blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fab7b06d24645c9d89ecdc69c4b13fb8374d5044
|
1859f328ad9ff15d7ebc5491ef99879346d7e707
|
/R/covid19.model.sa2-package.R
|
2f9c5c33d5a373488b809f80cbcb1ca2d4d77835
|
[] |
no_license
|
grattan/covid19.model.sa2
|
c5671e92de0fa3caeaf32da51a62cc45856146b7
|
980f73e7a14ead9caa921ec703928d5c6ed4f028
|
refs/heads/master
| 2022-11-27T06:37:19.072469
| 2020-08-09T16:52:25
| 2020-08-09T16:52:25
| 255,219,008
| 4
| 0
| null | 2020-06-25T10:43:55
| 2020-04-13T03:07:13
|
TeX
|
UTF-8
|
R
| false
| false
| 1,119
|
r
|
covid19.model.sa2-package.R
|
#' @keywords internal
"_PACKAGE"
# The following block is used by usethis to automatically manage
# roxygen namespace tags. Modify with care!
## usethis namespace: start
#' @import data.table
#' @importFrom Rcpp evalCpp
#' @importFrom checkmate vname
#' @importFrom dqrng dqsample
#' @importFrom fst read_fst
#' @importFrom fst write_fst
#' @importFrom glue glue
#' @importFrom hutils coalesce
#' @importFrom hutils drop_empty_cols
#' @importFrom hutils provide.dir
#' @importFrom hutils provide.file
#' @importFrom hutils weight2rows
#' @importFrom hutils XOR
#' @importFrom hutilscpp is_constant
#' @importFrom hutilscpp which_first
#' @importFrom fastmatch fmatch
#' @importFrom magrittr %>%
#' @importFrom magrittr %T>%
#' @importFrom stats complete.cases
#' @importFrom stats loess.smooth
#' @importFrom stats rbeta
#' @importFrom stats runif
#' @importFrom stats setNames
#' @importFrom stats weighted.mean
#' @importFrom utils packageName
#' @importFrom utils hasName
#' @importFrom utils combn
#' @importFrom utils tail
#'
#' @useDynLib covid19.model.sa2, .registration = TRUE
## usethis namespace: end
NULL
|
7a36ae955e7d908b1b07bb54774291c0c4940f71
|
9720a2cbb7ee176eff8f4af605b0e5cfc5627c4a
|
/man/varPartData.Rd
|
f3bb859899c56262cbbe84b80860745bb9ddfea2
|
[] |
no_license
|
GabrielHoffman/variancePartition
|
83b2bfb86a5d21ee04770ad70357c6561873436c
|
13919acdf745f526399d2563f8dc740714102e63
|
refs/heads/master
| 2023-09-03T23:32:21.753849
| 2023-08-19T00:59:33
| 2023-08-19T00:59:33
| 113,884,414
| 43
| 10
| null | 2023-06-15T14:55:00
| 2017-12-11T16:51:12
|
R
|
UTF-8
|
R
| false
| true
| 1,400
|
rd
|
varPartData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_annotation.R
\docType{data}
\name{varPartData}
\alias{varPartData}
\alias{geneCounts}
\alias{info}
\alias{geneExpr}
\title{Simulation dataset for examples}
\format{
A dataset of 100 samples and 200 genes
A dataset of 100 samples and 200 genes
A dataset of 100 samples and 200 genes
A dataset of 100 samples and 200 genes
}
\usage{
data(varPartData)
data(varPartData)
data(varPartData)
data(varPartData)
}
\description{
A simulated dataset of gene expression and metadata
A simulated dataset of gene counts
A simulated dataset of gene counts
A simulated dataset of gene counts
}
\details{
\itemize{
\item geneCounts gene expression in the form of RNA-seq counts
\item geneExpr gene expression on a continuous scale
\item info metadata about the study design
}
\itemize{
\item geneCounts gene expression in the form of RNA-seq counts
\item geneExpr gene expression on a continuous scale
\item info metadata about the study design
}
\itemize{
\item geneCounts gene expression in the form of RNA-seq counts
\item geneExpr gene expression on a continuous scale
\item info metadata about the study design
}
\itemize{
\item geneCounts gene expression in the form of RNA-seq counts
\item geneExpr gene expression on a continuous scale
\item info metadata about the study design
}
}
\keyword{datasets}
|
b1e0fb3f29c154e5a225cde1b1ffccf959fa30b7
|
c6211aa4980cc3b023136a9c04201a87d65f3d81
|
/App Comercial/R/mod_data.R
|
24c67fd557661922692cf0a25610b6b9384cca3e
|
[] |
no_license
|
RoReke/AppComercial
|
b7dd47c28101789c13d74038e2d91b2111bc2089
|
31a3de226f230a3c8c5d7323b1b01abb97e7eb70
|
refs/heads/main
| 2023-05-15T19:26:54.437188
| 2021-06-16T20:53:14
| 2021-06-16T20:53:14
| 334,972,245
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,252
|
r
|
mod_data.R
|
dataUI <- function(id) {
ns <- NS(id)
column(
width = 4,
tags$style(
type = "text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }",
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "custom.css")
)
),
tags$style("
.checkbox { /* checkbox is a div class*/
line-height: 20px;
margin-bottom: 40px; /*set the margin, so boxes don't overlap*/
}
input[type='checkbox']{ /* style for checkboxes */
width: 30px; /*Desired width*/
height: 20px; /*Desired height*/
line-height: 30px;
}
span {
margin-left: 0px; /*set the margin, so boxes don't overlap labels*/
line-height: 30px;
}
"),
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "custom.css")
),
# fluidRow(
box(
width = 400, title = tagList(shiny::icon("filter", class = "fa-lg"), "Filtros"),
solidHeader = T, collapsible = T, status = "primary",
pickerInput(ns("Aseguradora"), "Aseguradora", choices = unique(data_vigencias$Aseguradora), multiple = FALSE, selected = "51 - PROVINCIA ART", options = list(`actions-box` = TRUE), width = NULL),
pickerInput(ns("ge0"), "Segmento", choices = unique(data_vigencias$Segmento), selected = "Mas de 100", options = list(`actions-box` = TRUE), multiple = T, width = NULL),
pickerInput(ns("ge"), "Ciiu_1", choices = unique(data_vigencias$CIIU.V1.DESC), selected = unique(data_vigencias$CIIU.V1.DESC), options = list(`actions-box` = TRUE), multiple = T, width = NULL),
pickerInput(ns("ge1"), "Ciiu_6", choices = unique(data_vigencias$CIIU.V6.DESC), options = list(`actions-box` = TRUE), multiple = T, width = NULL),
pickerInput(ns("ge2"), "Provincia", choices = unique(data_vigencias$Provincia), selected = unique(data_vigencias$Provincia), options = list(`actions-box` = TRUE), multiple = T, width = NULL)
),
box(width = 400, searchInput(
inputId = ns("search"), label = "CUIT :",
placeholder = "Ingresar CUIT",
resetValue = "",
btnSearch = icon("search"),
btnReset = icon("remove"),
width = "450px"
)),
br(),
box(width = 400, switchButton(
inputId = ns("Switch.1"),
label = "Activar modelo BI.V1.01",
value = FALSE, col = "GB", type = "OO"
)),
br(),
box(width = 400, valueBoxOutput(ns("precio_m"), width = 400)),
DT::dataTableOutput(ns("table_enfoque")),
box(width = 400, switchButton(
inputId = ns("Switch.2"),
label = "Activar regla enfoque (ET < PM)",
value = FALSE, col = "GB", type = "OO"
)
)
)
}
dataServer <- function(id, data_vigencias, new_data, r) {
moduleServer(id, function(input, output, session) {
data_con_cambios_de_usuario <- reactive({
new_data$df$CUIT <- as.character(new_data$df$CUIT)
if (!is.null(new_data$df)) {
zz <- new_data$df %>%
group_by(CUIT) %>%
top_n(1, Fecha) %>%
ungroup() %>%
as.data.frame()
data <- left_join(data_vigencias, zz, by = "CUIT") %>%
mutate(
Observaciones = ifelse(is.na(Observaciones.y), Observaciones.x, Observaciones.y),
Contactado = if_else(!is.na(Contactado.y) & Contactado.y == 1, "Contactado",
if_else(!is.na(Contactado.y) & Contactado.y == 0, "No Contactado",
Contactado.x
)
),
color = ifelse(Observaciones != "", "purple",
ifelse(Contactado == "Contactado" & Estado.Actual.Cotiz. != "Rechazada", "orange",
color
)
),
precio_competencia = if_else(is.na(Precio_Competencia), precio_competencia, as.character(Precio_Competencia)),
Contactado_int = if_else(Contactado == "Contactado" , 1 , 0)
)
} else {
data <- data_vigencias
}
return(data)
})
## swicht enfoque ----
switch_enfoque <- reactive({
if (input$Switch.2 == 0) {
z <- c(0, 1)
}
else {
z <- 1
}
return(z)
})
## logica de filtros ----
filteredData <- reactive({
if(input$search == ""){
a <- data_con_cambios_de_usuario() %>%
filter(Aseguradora %in% input$Aseguradora) %>%
filter(CIIU.V1.DESC %in% input$ge) %>%
filter(Segmento %in% input$ge0) %>%
filter(admin.name1 %in% input$ge2) %>%
filter(CIIU.V6.DESC %in% input$ge1) %>%
filter(model_predict %in% switch_()) %>%
filter(enfoque_swicht %in% switch_enfoque())
} else {
a <- data_con_cambios_de_usuario() %>% filter(CUIT %in% input$search)
}
return(a)
})
switch_enfoque <- reactive({
if (input$Switch.2 == 0) {
z <- c(0, 1)
}
else {
z <- 1
}
return(z)
})
## filtro de oportunidades comerciales
switch_ <- reactive({
if (input$Switch.1 == 0) {
z <- c(0, 1)
}
else {
z <- 1
}
return(z)
})
observeEvent(input$ge,
{
ciiu6_ <- data_vigencias %>% filter(CIIU.V1.DESC %in% input$ge)
updatePickerInput(
session = session,
inputId = "ge1",
choices = unique(ciiu6_$CIIU.V6.DESC),
selected = unique(ciiu6_$CIIU.V6.DESC)
)
},
ignoreInit = TRUE
)
dd <- reactive({
req(r$map_data > 0)
df <- filteredData() %>%
filter(CUIT == r$map_data) %>%
select(alicuota)
df <- if (length(df) == 0) {
0
} else {
df
}
df1 <- if (is.na(df$alicuota)) {
0
} else {
df$alicuota
}
return(df1)
})
dd_aso <- reactive({
req(r$map_data > 0)
df <- filteredData() %>%
filter(CUIT == r$map_data) %>%
select(alicuota_mediana)
if (nrow(df) == 0) {
NULL
} else {
return(df$alicuota_mediana)
}
})
dd_enfq1 <- reactive({
req(r$map_data > 0)
df <- filteredData() %>%
filter(Trabajadores < 101) %>%
filter(Aseguradora %in% amigas) %>%
filter(CUIT == r$map_data) %>%
select(LS.20930, LS.33033, LS.52507, LS.9999999, Remuneracion, Canttrabajadores.Up) %>%
mutate(SP_Mercado = round(Remuneracion / Canttrabajadores.Up, 0)) %>%
select(LS.20930, LS.33033, LS.52507, LS.9999999, SP_Mercado)
if (nrow(df) == 0) {
return(NULL)
} else {
print("estoy aca")
df <- df %>% as.data.frame()
df2 <- data.table::transpose(df)
# get row and colnames in order
rownames(df2) <- colnames(df)
colnames(df2) <- "Tarifa Enfoque"
return(df2)
}
})
## precio referencia mercado ----
output$precio_m <- renderValueBox({
req(r$map_data > 0)
if (!isTruthy(dd_aso())) {
df <- ""
valueBox(
df,
"Precio de Referencia",
color = "olive",
width = 8
)
} else {
df2 <- paste(prettyNum(round(dd_aso(), 2), big.mark = ",", scientific = FALSE), "%")
valueBox(
formatC(df2, format = "d", big.mark = ","),
"Precio de Referencia",
icon = icon("check", lib = "glyphicon"),
color = "olive",
width = 8
)
}
})
## indicadores de enfoque tecnico ----
output$table_enfoque <- DT::renderDataTable(
dd_enfq1(),
options = list(
paging = FALSE,
searching = FALSE,
fixedColumns = FALSE,
autoWidth = TRUE,
ordering = FALSE,
bInfo = FALSE,
dom = "Bfrtip",
# buttons = c('excel'),
scrollX = FALSE,
class = "display"
)
)
return(filteredData)
})
}
|
7c3b18e35872a3c6513325dc8afc7d9c38183e78
|
b5175e8438ca574ab67653a64574d1b3c27df7d4
|
/01-InterpolacionLatLon.R
|
36188392f4f96c3fe2318d39c9e15cd177ddc93d
|
[] |
no_license
|
arrpak/SentinelAdmins
|
48d89d7f1e16062ee0050449f7b0979a9a174d8b
|
388b49c61046a9d38ea31132ef41a7ea53aec316
|
refs/heads/master
| 2022-09-20T05:23:17.838917
| 2020-06-02T15:51:18
| 2020-06-02T15:51:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,399
|
r
|
01-InterpolacionLatLon.R
|
## interpolacion
library(leaflet)
library(tidyverse)
library(ggplot2)
library(plotly)
df = read.csv('Modelar_UH2020.txt', sep='|')
puntos_identificados <- data.frame(
Lugar = c("Retiro","Legazpi", "Plaza de Toros","Arguelles",
"Cuatro Caminos", "Zapateria Robledo","Tetuan"),
X = c(2209489320, 2209747825, 2219461192, 2196313578, 2201844156, 2196736748, 2203891600),
Y = c(165537783, 165397664, 165616041, 165611443, 165677624, 165795600, 165734820),
Lat = c(40.413588, 40.386163, 40.432474, 40.430805, 40.447087, 40.475766, 40.460913),
Lon = c(-3.683393, -3.680577, -3.663236, -3.716191, -3.703295, -3.71556, -3.698528)
)
puntos_identificados$Sur <- ifelse(165724537<puntos_identificados$Y, 1,0)
puntos_identificados$Oeste <- ifelse(2208137136<puntos_identificados$X, 1, 0)
df$Sur <- ifelse(165724537<df$Y, 1,0)
df$Oeste <- ifelse(2208137136<df$X, 1, 0)
model_lat <- lm(Lat ~ Y + Sur + Oeste, data = puntos_identificados)
model_lon <- lm(Lon ~ X + Sur + Oeste, data = puntos_identificados)
summary(model_lon)
summary(model_lat)
df$lat <- predict.lm(model_lat, df)
df$lon <- predict.lm(model_lon, df)
# Para ver el ajuste sobre un mapa
leaflet(data = df) %>%
addTiles(urlTemplate = 'https://tiles.stadiamaps.com/tiles/alidade_smooth_dark/{z}/{x}/{y}{r}.png') %>%
addCircles(~lon, ~lat, radius = 0.2, fillOpacity = 0.03)
leaflet(data = puntos_identificados) %>%
addTiles(urlTemplate = 'https://tiles.stadiamaps.com/tiles/alidade_smooth_dark/{z}/{x}/{y}{r}.png') %>%
addCircles(~Lon, ~Lat, radius = 0.2, fillOpacity = 0.03, label = ~as.character(Lugar))
df$Sur = NULL
df$Oeste = NULL
# Calculando la Distancia a Sol
sol = c(lat = 40.418460, lon = -3.706529)
df$dist_eucl_sol = sqrt((df$lat - sol['lat'])**2 + (df$lon - sol['lon'])**2)
df$dist_taxi_sol = (df$lat - sol['lat']) + (df$lon - sol['lon'])
write.csv(df, 'dataset_train.csv')
# Interpolando para Estimar
df_test = read.csv('Estimar_UH2020.txt', sep='|')
df_test$Sur <- ifelse(165724537<df_test$Y, 1,0)
df_test$Oeste <- ifelse(2208137136<df_test$X, 1, 0)
df_test$lat = predict.lm(model_lat, df_test)
df_test$lon = predict.lm(model_lon, df_test)
df_test$Sur = NULL
df_test$Oeste = NULL
df_test$dist_eucl_sol = sqrt((df_test$lat - sol['lat'])**2 + (df_test$lon - sol['lon'])**2)
df_test$dist_taxi_sol = (df_test$lat - sol['lat']) + (df_test$lon - sol['lon'])
write.csv(df_test, 'dataset_test.csv')
|
32809ac2cc556eab1839aeead3c006efaf2d01a3
|
1fc02597b88e1046e5a414d3fbfaea2b86a7ff5a
|
/man/get_messages.Rd
|
400537ade0da31b5f0b0e3c4da7839e40db1865c
|
[] |
no_license
|
chrisbrownlie/myFacebook
|
c69458bc617157ee753180cd6e6628798abd9a2f
|
488bb82f4759c294b3a63ad41e112dfa08d32dad
|
refs/heads/master
| 2022-12-13T20:16:15.930397
| 2020-09-09T06:57:13
| 2020-09-09T06:57:13
| 293,470,871
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 590
|
rd
|
get_messages.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/messages.R
\name{get_messages}
\alias{get_messages}
\title{Get messages with an individual}
\usage{
get_messages(folder = "data", participant)
}
\arguments{
\item{folder}{the name of the data folder (in the project root directory)}
\item{participant}{the name of someone who you have sent or received messages from
(will search for this name in the data/messages/inbox folder)}
}
\value{
a character string denoting the 'friend peer group' assigned by Facebook
}
\description{
Get messages with an individual
}
|
058018d96c3ed9f487612fbf1c548292ac01221c
|
b8b3443e3b7021e9ac458bc12166f3e6f470843d
|
/R/calc_diffusion.R
|
a4fb8d5a05435b2161f364f00e36a4092b76fc1e
|
[
"MIT"
] |
permissive
|
taylorpourtaheri/nr
|
e745a5734ca244e642ef089d9dfd20b957f00852
|
5c2710c197533ecf8b439d58d3d317bc203ac990
|
refs/heads/main
| 2023-07-14T04:53:48.773417
| 2021-08-11T22:15:55
| 2021-08-11T22:15:55
| 386,658,478
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,043
|
r
|
calc_diffusion.R
|
#' @title Calculate network propagation by diffusion
#' @description Calculate various measures of diffusion
#' @import diffuStats
#' @param graph Graph of class '\code{igraph}'. Must include node
#' attributes '\code{name}' and '\code{seed}'.
#' @param method String. Kernel for diffusion. Passed to \code{diffusion()}
#' from the \code{diffuStats} package.
#' @return Graph of class '\code{igraph}' with new attribute, '\code{propagation_score}'.
#' @export
calc_diffusion <- function(graph, method = 'raw') {
# simulate diffusion
scores <- as.data.frame(igraph::vertex_attr(graph))$seed
names(scores) <- as.data.frame(igraph::vertex_attr(graph))$name
diffusion_scores <- diffuStats::diffuse(graph = graph,
scores = scores,
method = method)
graph <- igraph::set_vertex_attr(graph,
name = 'propagation_score',
value = diffusion_scores)
return(graph)
}
|
32ccf68d5e1cb1c046fcc6533765516545ac663b
|
3b9f039200307a5de8df02303e0be6665edfa74d
|
/plot2.r
|
1123f599b3d523fb9ab80ce1f6c980d246033e15
|
[] |
no_license
|
sureshnageswaran/ExData006-Project2
|
f6413f3ef57eb73f1d94bc241e763a8951dc9ccc
|
d99de8890b5ce47808d522f6714528bdacee54f0
|
refs/heads/master
| 2016-09-06T08:41:02.963269
| 2014-09-21T22:50:15
| 2014-09-21T22:50:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,847
|
r
|
plot2.r
|
# Please visit https://github.com/sureshnageswaran/ExData006-Project2
# View this readme file before using the code
# https://github.com/sureshnageswaran/ExData006-Project2/blob/master/README.md
# The source file mentioned below is no longer needed, since the function has been appended to the end of this code file.
#source("checkAndDownload.r") # Contains code to download the files in case absent
library(plyr) # For join(), which is faster than merge() for the size of the dataframe
library(ggplot2) # For qplot()
# Function : plot2
# Author : Suresh Nageswaran
# Input : Path to the dataset,
# Data frame with the NEI dataset if available
# Data frame with the SCC dataset if available
# Boolean flag indicating if the output should go to a file
# Output : File plot2.png with the output plot
# Dependencies : checkAndDownload.r
# How to invoke: On command line, simply type > plot2()
plot2 <- function(sPath="C:/projects/rwork/exdata006/project2/ExData006-Project2", dfNEI="", dfSCC="", bToFile = TRUE)
{
if(!is.data.frame(dfNEI) || !is.data.frame(dfSCC))
{
if ( checkAndDownload(sPath) == FALSE )
{
sPath = getwd()
}
# read in the NEI and SCC rds files into dataframes
print("Reading in NEI dataframe ...")
dfNEI <- readRDS("summarySCC_PM25.rds")
print("Reading in SCC dataframe ...")
dfSCC <- readRDS("Source_Classification_Code.rds")
}
# ------------------------------------------------------------------#
# Question: [2]
#
# Have total emissions from PM2.5 decreased in the Baltimore City,
# Maryland (fips == "24510") from 1999 to 2008? Use the base
# plotting system to make a plot answering this question.
#
# Answer:
# We will plot the emissions data over the years for Baltimore.
#
# Approach:
# For the second plot, we summarize the PM2.5 data for Baltimore
# tapply the 'sum' function on Emissions along the index year
# Finally invoke the plot function for the graph.
# Output is seen in plot2.png
# ------------------------------------------------------------------#
# For the second plot, we summarize the PM2.5 data for Baltimore
print("Running subset operation on dataframe ...")
dfBalt <- subset(dfNEI, fips == "24510", select = c(year, Emissions))
# tapply the 'sum' function on 'Emissions' along the index 'year'
lBalt <- tapply(dfBalt$Emissions, dfBalt$year, sum)
dfBalt <- data.frame(Year=as.numeric(names(lBalt)), Emissions=as.numeric(lBalt))
# Compute the % decrease from 1998 to 2008
iMin <- dfBalt$Emissions[dfBalt$Year==min(dfBalt$Year)]
iMax <- dfBalt$Emissions[dfBalt$Year==max(dfBalt$Year)]
dPercent <- round( ((iMax-iMin)/iMin) *-1*100, digits = 2)
iDiff <- round(iMax - iMin)
# This is the text for the graph
sLabel <- paste("Emissions (PM 2.5) in Baltimore declined by", iDiff)
sLabel <- paste(paste(sLabel, "\nThis is a net decrease of ", dPercent), "%.", sep="")
print("Creating plot on disk...")
if (bToFile == TRUE)
{
# Initialize the PNG device
png(filename=paste(sPath, "/plot2.png", sep=""), width=1000, height=480)
}
#Create the plot using the base plotting tools
par(pch=22, col="red")
plot(dfBalt$Year, dfBalt$Emissions, type="o", xlab="Year", ylab="Emissions", col="red")
title("Plot#2: Emissions vs. Year in Baltimore", sub="")
text(2004, 2600, sLabel, cex=.8)
# Turn off the device i.e. flush to disk
if (bToFile == TRUE) dev.off()
# cleanup
rm(dfBalt,lBalt)
print("Complete.")
return (TRUE)
}
# Function : checkAndDownload
# Author : Suresh Nageswaran
# Input : Path to the dataset,
# Output : Boolean
# True if the given path was valid; False otherwise
# Dependencies : None
# How to invoke: On command line, simply type > checkAndDownload("<Path>")
# Purpose : This function checks if the input files required are present.
# If absent, they are downloaded into the current folder.
checkAndDownload <- function(sPath="")
{
# List of required files
files <- c(
"summarySCC_PM25.rds", # NEI
"Source_Classification_Code.rds" # SCC
)
if( ! file.exists(sPath) )
{
# Either no path was given or path was wrong
# In this case, we set the current directory as the path
sPath = getwd()
setwd(sPath)
retVal <- FALSE
}
else
{
retVal <- TRUE
}
# Check if the files are in the working directory
if( !all(file.exists(files)) )
{
# download the files
temp <- tempfile()
fileURL <-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileURL,temp)
unzip(temp,files=files)
file.remove(temp)
}
return (retVal)
}
|
a927eb983d3055f977f993f0509221030ed3dd3d
|
30394cf5bc1389116d9297d2dd0fd1fc9af2a32c
|
/man/get_quotes.Rd
|
f81cb1113c83d9a2df2735b24029d35c0ac4edb4
|
[
"MIT"
] |
permissive
|
Leonardo-Vela/lemonmarkets
|
a408b41c6fd8009539c7a71ea335d7f86d97b9fa
|
c8259f462ac85c6a0ed3100e045842be0767edfa
|
refs/heads/master
| 2023-08-26T16:10:50.653488
| 2021-10-21T12:56:03
| 2021-10-21T12:56:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 299
|
rd
|
get_quotes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_quotes.R
\name{get_quotes}
\alias{get_quotes}
\title{Get Market data quotes}
\usage{
get_quotes(isin)
}
\arguments{
\item{isin}{character; ISIN of instrument to be retrieved.}
}
\description{
Get Market data quotes
}
|
42356970d87be527386b655d6b539f3d25732547
|
dfdaf5f28dfa6704ee25c79cfaa449903c710fe0
|
/QC_plots.R
|
c06e390662d97ce34ea474ddb5c82c4bc4ca019f
|
[] |
no_license
|
agawes/HNF1A_MAVE
|
d0265125c9ef7b2d17192ae1bb89c259f22160e9
|
62d5c144cabec363d555123363f0194456465718
|
refs/heads/master
| 2020-12-27T03:54:42.660390
| 2020-02-02T11:02:28
| 2020-02-02T11:02:28
| 237,755,891
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,082
|
r
|
QC_plots.R
|
module load R/3.2.5
R
setwd("/well/mccarthy/users/agata/MAVE_HNF1A/TWIST-Jan2020/")
codon_files=list.files("codon_summary/")
twist=list()
for (f in codon_files){
name=gsub(".summary","",f)
twist[[name]]=read.table(paste0("codon_summary/",f))
}
### remove all the variants where there is no actual NT change
twist=lapply(twist, function(x) x[which(as.character(x$V4) != as.character(x$V6)),])
x_axis=1:200
col=rainbow(20)
pdf("TWIST-Jan2020.codon_coverage.pdf", width=10)
par(mar=c(5.1,5.1,5.1,1))
for (i in 1:length(twist)){
design_cov = sapply(x_axis, function(x) nrow(twist[[i]][with(twist[[i]], V9==1 & V8>=x),]))
nondesign_cov = sapply(x_axis, function(x) nrow(twist[[i]][with(twist[[i]], V9==0 & V8>=x),]))
plot(x_axis, design_cov, type="l", col="darkred", xlab="Coverage depth",
ylab="# variants detected", cex.axis=1.5, cex.lab=1.5, main=names(twist)[i], ylim=c(0, max(c(design_cov, nondesign_cov))))
lines(x_axis, nondesign_cov, type="l", col="coral")
legend("topright", c("in design","not in design"), col=c("darkred","coral"), lty=1, bty="n")
}
dev.off()
|
a9837d87c448f6bac4aaf2423deeb69bd8a107ac
|
527a78cd62dbff60a78ba3c3a1ff2d1b802dffcc
|
/SteveDone/man/facto_based_rating.Rd
|
4ce98b704b899787eac529a399c927e46ba56178
|
[] |
no_license
|
devon12stone/recommender-system
|
5875887346acaa199f5173e3c1df2f9385ebbccc
|
20b5f64139a327746830c035ce450f534c9ef620
|
refs/heads/master
| 2021-09-22T03:10:58.774366
| 2018-09-05T14:20:25
| 2018-09-05T14:20:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,500
|
rd
|
facto_based_rating.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/facto_based_rating.R
\name{facto_based_rating}
\alias{facto_based_rating}
\title{Matrix Factorization Based Rating}
\usage{
facto_based_rating(user, ratings, changed, k_in, user_col, item_col,
rating_col, a, b, items)
}
\arguments{
\item{user}{User ID.}
\item{ratings}{A dataset of users, items and ratings that has been removed of zero ratings.}
\item{changed}{A matrix in the form of the User ID, index of the user's first rating that was set to NA,
index of the user's second rating that was set to NA, original value of the user's first rating
and original value of the user's second rating.}
\item{user_col}{The name of the user ID column in the ratings dataframe.}
\item{item_col}{The name of the item ID column in the ratings dataframe.}
\item{rating_col}{The name of the ratings column in the ratings dataframe.}
\item{a}{The L2 regularisation of W latent factors for the matrix decomposition.}
\item{b}{The L2 regularisation of H latent factors for the matrix decomposition}
\item{items}{Dataframe of items and information on the items.}
\item{k}{The integer of decomposition rank of the decomposition matrix.}
}
\value{
Returns a sorted dataframe of the top 10 rated items, the score of the item and information on each item.
}
\description{
Matrix Factorization Based Rating
}
\examples{
facto_based_rating(277042 ,book_ratings, changed_inds,5,'User.ID', 'ISBN', 'Book.Rating', 0.2,0.2, book_info)
}
|
251c21c39e53116657c08ff38810422ba172a33f
|
fcaa962d33b99b37c3913d091f0ad44c7bfe9dfc
|
/plot1.R
|
d641d709a9b4aa7f8d8127e6e790a91e9d6a538a
|
[] |
no_license
|
rennyatwork/ExploratoryDataAnalysis-Assignment02
|
cd5417e5f8450f1e561028207ea66624d4a2ec2b
|
d9e7032a749dd7c8d94ea0a1be2b14e7ddd48c92
|
refs/heads/master
| 2021-01-19T18:53:17.621700
| 2014-10-21T02:33:50
| 2014-10-21T02:33:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,789
|
r
|
plot1.R
|
## Question1:
## Have total emissions from PM2.5 decreased in the United States
##from 1999 to 2008? Using the base plotting system, make a plot showing
##the total PM2.5 emission from all sources for each of the years
##1999, 2002, 2005, and 2008.
##initialize to use library sqldf
initialize <-function ()
{
library(sqldf)
}
##This function return the full path+ file name to be read
getFullPathFileName <-function(pFileName)
{
return (paste(paste(getwd(),paste("exdata_data_NEI_data", pFileName, sep="/"), sep="/"), ".rds" , sep=""))
}
## Gets the NEI Dt
getDt <- function()
{
if(!exists("lixo"))
{
lixo <<- readRDS(getFullPathFileName('summarySCC_PM25'))
}
return (lixo);
#transform(NEI, year = factor(year)
}
## saves and close the file
saveAndClose <- function(pName, pWidth=480, pHeight=480)
{
dev.copy(png, paste(pName, ".png", sep=""), width=pWidth, height=pHeight)
dev.off()
}
plot1<-function()
{
#initialize
initialize()
#read the data into variable NEI
NEI<-getDt()
#generate new datatable with data suitable for plotting
dt2 <- sqldf("select sum(Emissions), year from NEI group by year")
#plots the graph
plot(data.frame(dt2[2], dt2[1]/1000.00), type='l', xlab='year', ylab=' Emissions PM25 (x 1000)', main='Total Emissions PM25 (x 1000) per year')
#saves and closes
saveAndClose("plot1")
}
##This function loads the desired dataset (NEI or SCC)
#get
#summary <-file(getFullPathFileName('summarySCC_PM25'))
# NEI <- readRDS(getFullPathFileName('summarySCC_PM25'))
#dt2 <- sqldf("select sum(Emissions), year from NEI group by year")
#plot(data.frame(dt2[2], dt2[1]))
# plot(data.frame(dt2[2], dt2[1]/1000.00), type='l', xlab='year', ylab=' Emissions PM25 (x 1000)', main='Total Emissions PM25 (x 1000) per year')
|
e6b22861c59082d9cacd6bb837f1d659b0804a43
|
dc33e3f1d99229b03a6ad64e3d25f1891e447937
|
/using-rhadoop.R
|
b870400fb1bf8c9e9f8de2a36f982b82f0aac614
|
[] |
no_license
|
agroebbe/r-learning
|
af4c1c083a4a5f9f9f3ddbc3016e8b54a8a98ea9
|
705693e20d82aaf132a0c46648a489a5bc7b594f
|
refs/heads/master
| 2020-06-02T05:12:58.179979
| 2015-11-05T09:55:55
| 2015-11-05T09:55:55
| 25,160,019
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,008
|
r
|
using-rhadoop.R
|
# note the Sys.env needs to be set !!
system("R CMD javareconf -e # exports JAVA variables")
Sys.getenv()[grep("hadoop",Sys.getenv())]
Sys.getenv()[grep("java",Sys.getenv())]
library("rhdfs")
library("rmr2")
hdfs.init()
# quick hack to get access to hadoop.tmp.dir (=/usr/local/hadoop/data )
# hadoop fs -chmod -R a+rwx /tmp
# note: make a user directory in hadoop using 'hadoop fs ... command'
small.ints = to.dfs(1:10)
f <- function(k,v) {
lapply(seq_along(v), function(r) { x <- runif(v[[r]]); keyval(r,c(max(x),min(x))) })
}
dfsfile <- mapreduce(input=small.ints, map=f)
output <- from.dfs(dfsfile)
tbl <- do.call('rbind',lapply(output$val,"[[",2))
# maybe...
tbldf <- as.data.frame(tbl)
names(tbldf) <- c("maxval","minval")
str(tbldf)
#TODO continue here running wordcount on hadoop:
# see also:
# RMR2 example: https://github.com/RevolutionAnalytics/rmr2/blob/master/pkg/tests/wordcount.R
# define a function as a program:
wordcount = function (input, output = NULL, pattern = " ") {
wc.map = function(., lines) {
keyval(unlist(strsplit(x = lines,split = pattern)),1)
}
wc.reduce = function(word, counts) { keyval(word, sum(counts)) }
mapreduce(input = input, output = output, input.format = "text",
map = wc.map, reduce = wc.reduce, combine = T)
}
file.path(list.files("/usr/local/hadoop","*.txt"))
hdfs.ls("/user/vuser/txts")
hdfs.cat("/user/vuser/txts")
hdfs.rm("/user/vuser/txts")
fromfile <- file.path("/usr/local/hadoop/README.txt")
hdfs.put(fromfile,file.path("/user/vuser/txts",basename(fromfile)))
#multiple file transfer:
?list.files
fromfiles <- file.path(list.files("/usr/local/hadoop","*.txt", full.names = T))
str(fromfiles)
sapply(fromfiles, function(ff){ hdfs.put(ff,file.path("/user/vuser/txts",basename(ff))) })
# NOTE: when hadoop streaming failed with error code 5 --> file not found or permission problem
hdfile <- "/user/vuser/txts/README.txt"
hdfs.ls(hdfile)
wordcount(hdfile)
wordcount(to.dfs(keyval(NULL, "dit is een tekst")))
|
5e6f6170ce04c1132356cdf1dea2410ae0accb26
|
4fe58f307f7af8834859eaac3384236db6700a92
|
/R/benruc.r
|
7e786784758f289a38dfb64bc957d231878e7fcf
|
[] |
no_license
|
openfields/metapo
|
02f7c79cdb35f4354e01e6b0cc48633c012b60b1
|
990efa8f318dc68604748f46eecb129e0fc55cef
|
refs/heads/master
| 2021-01-16T21:10:44.572136
| 2018-10-30T21:12:17
| 2018-10-30T21:12:17
| 62,419,815
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,049
|
r
|
benruc.r
|
# Script for Fort Benning/Fort Rucker region:
# 1. Red-cockaded woodpecker: 42, 5000m
# 2. Wood stork: 90, 500000m
# 3. Relict trillium: 42, 2m
# 4. Northern long-eared bat: 41/42/43/90, 100000m
# 5. Choctaw bean: river
# 6. Fuzzy pigtoe: river
# American alligator (excluded)
source('./R/mcmapcsv.r')
# export habitat network data to csv: need to export 90, 41/42/43/90
# import habitat network data for rcwo
read.csv(file="./data/benruc_65r42_5c.csv", header=TRUE) -> br42
# calculate rcwo network stats
system.time(mcmapcsv(dd=5000, vname=br42) -> br.rcwo)
# export data
# calculate trillium network stats
system.time(mcmapcsv(dd=2, vname=br42) -> br.retr)
# import & calculate for wood stork
read.csv(file="./data/benruc_65r90_5.csv", header=TRUE) -> br90
system.time(mcmapcsv(dd=500000, vname=br90) -> br.wost)
# import & calculate for northern long-eared bat
read.csv(file="./data/benruc_65r41424390.csv", header=TRUE) -> br41424390
system.time(mcmapcsv(dd=100000, vname=br_batnet) -> br.nleb)
# join data: python script for GRASS
|
9ee488a0be697bebacb4f1c033a8fd316ae04d72
|
ea0dd080892bfebbf6ee0e0f2fe50b88607116fb
|
/scripts/long_wmh_hv_cogn.R
|
f4784f7d982de2620004845524d882ea1ff8d51a
|
[] |
no_license
|
sharpwaveripple/RUNDMC_LGM
|
212c7903e86245bf2fa01834d75594ed667e0e6d
|
8ccb8239adeebc639e53ba31ed25fa547c1d897f
|
refs/heads/master
| 2021-08-14T06:06:43.457735
| 2017-11-14T11:36:55
| 2017-11-14T11:36:55
| 105,522,292
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,173
|
r
|
long_wmh_hv_cogn.R
|
library(mice)
library(lavaan)
library(semTools)
library(psych)
library(semPlot)
library(ggraph)
library(ggplot2)
datafile <- "data/RUNDMC_datasheet_long.csv"
df <- read.csv(datafile, header=T)
df$hvratio06 <- (df$hv06 / df$tbv06)*1000
df$hvratio11 <- (df$hv11 / df$tbv11)*1000
df$hvratio15 <- (df$hv15 / df$tbv15)*1000
df$wmhratio06 <- log((df$wmh06 / df$tbv06)*100000)
df$wmhratio11 <- log((df$wmh11 / df$tbv11)*100000)
df$wmhratio15 <- log((df$wmh15 / df$tbv15)*100000)
df$mem06 <- df$wvlt123correctmean06 + df$wvltdelayrecall06 +
df$reyimmrecalltotalscore06 + df$reydelayrecalltotalscore06 +
df$pp2sat06 + df$pp3sat06
df$mem11 <- df$wvlt123correctmean11 + df$wvltdelayrecall11 +
df$reyimmrecalltotalscore11 + df$reydelayrecalltotalscore11 +
df$pp2sat11 + df$pp3sat11
df$mem15 <- df$wvlt123correctmean15 + df$wvltdelayrecall15 +
df$reyimmrecalltotalscore15 + df$reydelayrecalltotalscore15 +
df$pp2sat15 + df$pp3sat15
df$psexf06 <- df$pp1sat06 + df$stroop1sat06 + df$stroop2sat06 + df$ldstcorrect06 +
df$fluencyanimals06 + df$fluencyjobs06 + df$stroopinterference06 + df$vsattotalsat06
df$psexf11 <- df$pp1sat11 + df$stroop1sat11 + df$stroop2sat11 + df$ldstcorrect11 +
df$fluencyanimals11 + df$fluencyjobs11 + df$stroopinterference11 + df$vsattotalsat11
df$psexf15 <- df$pp1sat15 + df$stroop1sat15 + df$stroop2sat15 + df$ldstcorrect15 +
df$fluencysupermarket15 + df$fluencyjobs15 + df$stroopinterference15 + df$vsattotalsat15
df$ps06 <- df$pp1sat06 + df$stroop1sat06 + df$stroop2sat06 + df$ldstcorrect06
df$ps11 <- df$pp1sat11 + df$stroop1sat11 + df$stroop2sat11 + df$ldstcorrect11
df$ps15 <- df$pp1sat15 + df$stroop1sat15 + df$stroop2sat15 + df$ldstcorrect15
df$exf06 <- df$fluencyanimals06 + df$fluencyjobs06 + df$stroopinterference06 + df$vsattotalsat06
df$exf11 <- df$fluencyanimals11 + df$fluencyjobs11 + df$stroopinterference11 + df$vsattotalsat11
df$exf15 <- df$fluencysupermarket15 + df$fluencyjobs15 + df$stroopinterference15 + df$vsattotalsat15
variables <- c("wmhratio06", "hvratio06", "mem06", "psexf06",
"wmhratio11", "hvratio11", "mem11", "psexf11",
"wmhratio15", "hvratio15", "mem15", "psexf15")
df.var <- df[variables]
df.var.incl <- df.var[complete.cases(df.var), ]
# psexf
variables.psexf <- c("wmhratio06", "hvratio06", "psexf06",
"wmhratio11", "hvratio11", "psexf11",
"wmhratio15", "hvratio15", "psexf15")
df.var.psexf <- df[variables.psexf]
df.var.psexf.incl <- df.var.psexf[complete.cases(df.var.psexf), ]
modelfile.psexf <- paste("temp/long_wmh_hv_psexf.lav", sep="/")
model.psexf <- readLines(modelfile.psexf)
fit.psexf <- growth(model.psexf, data=df.var.psexf.incl)
fitMeasures(fit.psexf)
summary(fit.psexf)
semPaths(fit.psexf)
# mem
variables.mem <- c("wmhratio06", "hvratio06", "mem06",
"wmhratio11", "hvratio11", "mem11",
"wmhratio15", "hvratio15", "mem15")
df.var.mem <- df[variables.mem]
df.var.mem.incl <- df.var.mem[complete.cases(df.var.mem), ]
modelfile.mem <- paste("temp/long_wmh_hv_mem.lav", sep="/")
model.mem <- readLines(modelfile.mem)
fit.mem <- growth(model.mem, data=df.var.mem.incl)
fitMeasures(fit.mem)
summary(fit.mem)
semPaths(fit.mem)
# ps
variables.ps <- c("wmhratio06", "hvratio06", "ps06",
"wmhratio11", "hvratio11", "ps11",
"wmhratio15", "hvratio15", "ps15")
df.var.ps <- df[variables.ps]
df.var.ps.incl <- df.var.ps[complete.cases(df.var.ps), ]
modelfile.ps <- paste("temp/long_wmh_hv_ps.lav", sep="/")
model.ps <- readLines(modelfile.ps)
fit.ps <- growth(model.ps, data=df.var.ps.incl)
fitMeasures(fit.ps)
summary(fit.ps)
semPaths(fit.ps)
# psexf
variables.exf <- c("wmhratio06", "hvratio06", "exf06",
"wmhratio11", "hvratio11", "exf11",
"wmhratio15", "hvratio15", "exf15")
df.var.exf <- df[variables.exf]
df.var.exf.incl <- df.var.exf[complete.cases(df.var.exf), ]
modelfile.exf <- paste("temp/long_wmh_hv_exf.lav", sep="/")
model.exf <- readLines(modelfile.exf)
fit.exf <- growth(model.exf, data=df.var.exf.incl)
fitMeasures(fit.exf)
summary(fit.exf)
semPaths(fit.exf)
|
6168175b22d5cd26958ee7b463dc544c09eebdbb
|
868b0cdb5d5c62b827c5d257d50cddf0605507b4
|
/man/iucnn.Rd
|
68f382d7208082458368c7824152b2bb88a15a9b
|
[
"MIT"
] |
permissive
|
barnabywalker/tidyassessments
|
0f79d14b1d416617494de50f71f5cacb2266bb56
|
566e9bd523d039c397ff659a0ab54a1a2751ea85
|
refs/heads/main
| 2023-04-18T20:09:18.266416
| 2022-03-30T21:32:56
| 2022-03-30T21:32:56
| 460,609,238
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,360
|
rd
|
iucnn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iucnn.R
\name{iucnn}
\alias{iucnn}
\title{Neural Network classifier to automate occurrence-based conservation assessments}
\usage{
iucnn(
mode = "classification",
engine = "keras",
layers = NULL,
dropout = NULL,
epochs = NULL
)
}
\arguments{
\item{mode}{A single character string for the prediction outcome mode.
Only "classification" is allowed.}
\item{engine}{A single character string specifying the engine to use.}
\item{layers}{A string specification of the hidden units in each layer, e.g. "40_20"
for a two-layer network with a 40-unit layer then a 20-unit layer.}
\item{dropout}{A number between 0 (inclusive) and 1 denoting the proportion
of model parameters randomly set to zero during model training.}
\item{epochs}{An integer for the number of training iterations.}
}
\description{
\code{iucnn()} defines a neural network for predicting the conservation status of
species given species-level predictors calculated from occurrence records.
This is an implementation of the \href{https://doi.org/10.1111/ddi.13450}{IUCNN model}
so it works in the tidymodels framework.
}
\details{
Currently only the binary threatened/not threatened classification is implemented.
}
\examples{
parsnip::show_engines("iucnn")
iucnn(layers="40_20", dropout=0.3, epochs=10)
}
|
e11937abc92a6b3662a699574cef874a3214c60f
|
aec46c5edcc8cb807ad5d5acd283492519cda063
|
/man/percentage.added.Rd
|
4d78c15a8d45f3d77e80a8940a7a643ba2eebf7d
|
[] |
no_license
|
cran/mem
|
e8f73b6aed326db7a985664af037446881ef9d0b
|
bcda3b3d4b5537ffdecc17503ade5c37a002a56e
|
refs/heads/master
| 2023-06-22T13:13:07.407467
| 2023-06-20T10:30:03
| 2023-06-20T10:30:03
| 17,697,395
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 317
|
rd
|
percentage.added.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/percentage.added.R
\name{percentage.added}
\alias{percentage.added}
\title{For use with transformseries.multiple}
\usage{
percentage.added(i.data, i.n)
}
\description{
For use with transformseries.multiple
}
\keyword{internal}
|
c95b15b640b94648a915cc10504d10c915a16b26
|
c6472aef2e2ef8d13ab681a64aef4663d977ff7c
|
/create_database.R
|
e08153c975a4719a18d506396ffc9674ed65f8cf
|
[
"CC-BY-4.0"
] |
permissive
|
matteodefelice/ECAD-data-browser
|
af9b5ca5fd9ad7129fc6c33799d9cde7584d6727
|
3f93e1c1a51264565a41678063f9591b429b07a8
|
refs/heads/master
| 2021-06-20T22:05:49.796505
| 2021-01-07T10:14:34
| 2021-01-07T10:14:34
| 151,147,789
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,817
|
r
|
create_database.R
|
library(tidyverse)
library(lubridate)
# Read a source file ------------------------------------------------------
read_source_file <- function(filename) {
read_data <- read.table(filename,
sep = "\n",
skip = 23 + 1, stringsAsFactors = FALSE,
quote = ""
) %>%
mutate(
STAID = str_sub(V1, 1, 5) %>% as.numeric(),
SOUID = str_sub(V1, 7, 12) %>% as.numeric(),
SOUNAME = str_sub(V1, 14, 53),
CN = str_sub(V1, 55, 56),
LAT = str_sub(V1, 58, 66),
LON = str_sub(V1, 68, 77),
HGTH = str_sub(V1, 79, 82) %>% as.numeric(),
ELEI = str_sub(V1, 84, 87),
START = str_sub(V1, 89, 96) %>% parse_date(format = "%Y%m%d"),
STOP = str_sub(V1, 98, 105) %>% parse_date(format = "%Y%m%d"),
PARID = str_sub(V1, 107, 111),
PARNAME = str_sub(V1, 113, 163)
) %>%
select(-V1) %>%
as_tibble()
return(read_data)
}
# Get the list of source files
source_files <- list.files("data",
pattern = glob2rx("ECA_blend_source*txt"),
full.names = TRUE
)
# Read all the source files
source_data <- lapply(source_files, read_source_file) %>%
bind_rows()
# Read station file ---------------------------------------------------
read_station_file <- function(filename) {
read_data <- read_csv(filename,
skip = 17
)
return(read_data)
}
# Get the list of station files
stn_files <- list.files("data",
pattern = glob2rx("ECA_blend_station*txt"),
full.names = TRUE
)
# Read all the station files
stn_data <- lapply(stn_files, read_station_file) %>%
bind_rows() %>%
distinct()
## Create single structure ------------------------------------------
eobs_data <- source_data %>%
mutate(base_ELEI = str_trim(ELEI) %>% str_sub(1, 2)) %>%
group_by(STAID, base_ELEI) %>%
summarise(
n_sources = n(),
START = min(START),
STOP = max(STOP)
) %>%
inner_join(stn_data) %>%
rowwise() %>%
mutate(
filename = sprintf(
"%s_STAID%06d.txt",
base_ELEI,
STAID
),
lat_dec = as.numeric(str_split(LAT, ":", simplify = TRUE)[1]) +
sign(as.numeric(str_split(LAT, ":", simplify = TRUE)[1]))*
as.numeric(str_split(LAT, ":", simplify = TRUE)[2]) / 60 +
sign(as.numeric(str_split(LAT, ":", simplify = TRUE)[1]))*
as.numeric(str_split(LAT, ":", simplify = TRUE)[3]) / 3600,
lon_dec = as.numeric(str_split(LON, ":", simplify = TRUE)[1]) +
sign(as.numeric(str_split(LON, ":", simplify = TRUE)[1]))*
as.numeric(str_split(LON, ":", simplify = TRUE)[2]) / 60 +
sign(as.numeric(str_split(LON, ":", simplify = TRUE)[1]))*
as.numeric(str_split(LON, ":", simplify = TRUE)[3]) / 3600,
years_length = round(as.numeric(difftime(STOP, START, units = "days")) / 365.25)
)
# Save the data for the Shiny application
write_rds(eobs_data, "eobs-database-stations.rds")
|
686fa1db325a1c24299cdd09be76868d8008282c
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/resemble/R/mbl.R
|
0c86853ff7afc6dbc323a589f8be7d42f6c52fca
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60,412
|
r
|
mbl.R
|
#' @title A function for memory-based learning (mbl)
#' @description
#' \loadmathjax
#' This function is implemented for memory-based learning (a.k.a.
#' instance-based learning or local regression) which is a non-linear lazy
#' learning approach for predicting a given response variable from a set of
#' predictor variables. For each observation in a prediction set, a specific
#' local regression is carried out based on a subset of similar observations
#' (nearest neighbors) selected from a reference set. The local model is
#' then used to predict the response value of the target (prediction)
#' observation. Therefore this function does not yield a global
#' regression model.
#' @usage
#' mbl(Xr, Yr, Xu, Yu = NULL, k, k_diss, k_range, spike = NULL,
#' method = local_fit_wapls(min_pls_c = 3, max_pls_c = min(dim(Xr), 15)),
#' diss_method = "pca", diss_usage = "predictors",
#' gh = TRUE, pc_selection = list(method = "opc", value = min(dim(Xr), 40)),
#' control = mbl_control(), group = NULL,
#' center = TRUE, scale = FALSE, verbose = TRUE,
#' documentation = character(), ...)
#'
#' @param Xr a matrix of predictor variables of the reference data
#' (observations in rows and variables in columns).
#' @param Yr a numeric matrix of one column containing the values of the
#' response variable corresponding to the reference data.
#' @param Xu a matrix of predictor variables of the data to be predicted
#' (observations in rows and variables in columns).
#' @param Yu an optional matrix of one column containing the values of the
#' response variable corresponding to the data to be predicted. Default is
#' \code{NULL}.
#' @param k a vector of integers specifying the sequence of k-nearest
#' neighbors to be tested. Either \code{k} or \code{k_diss} must be specified.
#' This vector will be automatically sorted into ascending order. If
#' non-integer numbers are passed, they will be coerced to the next upper
#' integers.
#' @param k_diss a numeric vector specifying the sequence of dissimilarity
#' thresholds to be tested for the selection of the nearest neighbors found in
#' \code{Xr} around each observation in \code{Xu}. These thresholds depend on
#' the corresponding dissimilarity measure specified in the object passed to
#' \code{control}. Either \code{k} or \code{k_diss} must be specified.
#' @param k_range an integer vector of length 2 which specifies the minimum
#' (first value) and the maximum (second value) number of neighbors to be
#' retained when the \code{k_diss} is given.
#' @param spike an integer vector indicating the indices of observations in
#' \code{Xr} that must be forced into the neighborhoods of every \code{Xu}
#' observation. Default is \code{NULL} (i.e. no observations are forced). Note
#' that this argument is not intended for increasing the neighborhood size which
#' is only controlled by \code{k} or \code{k_diss} and \code{k_range}. By
#' forcing observations into the neighborhood, some observations will be forced
#' out of the neighborhood. See details.
#' @param method an object of class \code{\link{local_fit}} which indicates the
#' type of regression to conduct at each local segment as well as additional
#' parameters affecting this regression. See \code{\link{local_fit}} function.
#' @param diss_method a character string indicating the spectral dissimilarity
#' metric to be used in the selection of the nearest neighbors of each
#' observation. Options are:
#' \itemize{
#' \item{\code{"pca"} (Default):}{ Mahalanobis distance
#' computed on the matrix of scores of a Principal Component (PC)
#' projection of \code{Xr} and \code{Xu}. PC projection is done using the
#' singular value decomposition (SVD) algorithm.
#' See \code{\link{ortho_diss}} function.}
#'
#' \item{\code{"pca.nipals"}}{ Mahalanobis distance
#' computed on the matrix of scores of a Principal Component (PC)
#' projection of \code{Xr} and \code{Xu}. PC projection is done using the
#' non-linear iterative partial least squares (nipals) algorithm.
#' See \code{\link{ortho_diss}} function.}
#'
#' \item{\code{"pls"}}{ Mahalanobis distance
#' computed on the matrix of scores of a partial least squares projection
#' of \code{Xr} and \code{Xu}. In this case, \code{Yr} is always
#' required. See \code{\link{ortho_diss}} function.}
#'
#' \item{\code{"cor"}}{ correlation coefficient
#' between observations. See \code{\link{cor_diss}} function.}
#'
#' \item{\code{"euclid"}}{ Euclidean distance
#' between observations. See \code{\link{f_diss}} function.}
#'
#' \item{\code{"cosine"}}{ Cosine distance
#' between observations. See \code{\link{f_diss}} function.}
#'
#' \item{\code{"sid"}}{ spectral information divergence between
#' observations. See \code{\link{sid}} function.}
#' }
#' Alternatively, a matrix of dissimilarities can also be passed to this
#' argument. This matrix is supposed to be a user-defined matrix
#' representing the dissimilarities between observations in \code{Xr} and
#' \code{Xu}. When \code{diss_usage = "predictors"}, this matrix must be squared
#' (derived from a matrix of the form \code{rbind(Xr, Xu)}) for which the
#' diagonal values are zeros (since the dissimilarity between an object and
#' itself must be 0). On the other hand, if \code{diss_usage} is set to either
#' \code{"weights"} or \code{"none"}, it must be a matrix representing the
#' dissimilarity of each observation in \code{Xu} to each observation in
#' \code{Xr}. The number of columns of the input matrix must be equal to the
#' number of rows in \code{Xu} and the number of rows equal to the number of
#' rows in \code{Xr}.
#' @param diss_usage a character string specifying how the dissimilarity
#' information shall be used. The possible options are: \code{"predictors"},
#' \code{"weights"} and \code{"none"} (see details below).
#' Default is \code{"predictors"}.
#' @param control a list created with the \code{\link{mbl_control}} function
#' which contains additional parameters that control some few aspects of the
#' \code{mbl} function (cross-validation, parameter tuning, etc).
#' The default list is as returned by \code{mbl_control()}.
#' See the \code{\link{mbl_control}} function for more details.
#' @param gh a logical indicating if the global Mahalanobis distance (in the pls
#' score space) between each observation and the pls mean (centre) must be
#' computed. This metric is known as the GH distance in the literature. Note
#' that this computation is based on the number of pls components determined by
#' using the \code{pc_selection} argument. See details.
#' @param pc_selection a list of length 2 used for the computation of GH (if
#' \code{gh = TRUE}) as well as in the computation of the dissimilarity methods
#' based on \code{\link{ortho_diss}} (i.e. when \code{diss_method} is one of:
#' \code{"pca"}, \code{"pca.nipals"} or \code{"pls"}) or when \code{gh = TRUE}.
#' This argument is used for optimizing the number of components (principal
#' components or pls factors) to be retained for dissimilarity/distance
#' computation purposes only (i.e not for regression).
#' This list must contain two elements in the following order:
#' \code{method} (a character indicating the method for selecting the number of
#' components) and \code{value} (a numerical value that complements the selected
#' method). The methods available are:
#' \itemize{
#' \item{\code{"opc"}:} { optimized principal component selection based
#' on Ramirez-Lopez et al. (2013a, 2013b). The optimal number of
#' components (of set of observations) is the one for which its distance
#' matrix minimizes the differences between the \code{Yr} value of each
#' observation and the \code{Yr} value of its closest observation. In
#' this case \code{value} must be a value (larger than 0 and
#' below the minimum dimension of \code{Xr} or \code{Xr} and \code{Xu}
#' combined) indicating the maximum
#' number of principal components to be tested. See the
#' \code{\link{ortho_projection}} function for more details.}
#'
#' \item{\code{"cumvar"}:}{ selection of the principal components based
#' on a given cumulative amount of explained variance. In this case,
#' \code{value} must be a value (larger than 0 and below or equal to 1)
#' indicating the minimum amount of cumulative variance that the
#' combination of retained components should explain.}
#'
#' \item{\code{"var"}:}{ selection of the principal components based
#' on a given amount of explained variance. In this case,
#' \code{value} must be a value (larger than 0 and below or equal to 1)
#' indicating the minimum amount of variance that a single component
#' should explain in order to be retained.}
#'
#' \item{\code{"manual"}:}{ for manually specifying a fix number of
#' principal components. In this case, \code{value} must be a value
#' (larger than 0 and below the minimum dimension of \code{Xr} or
#' \code{Xr} and \code{Xu} combined).
#' indicating the minimum amount of variance that a component should
#' explain in order to be retained.}
#' }
#' The list
#' \code{list(method = "opc", value = min(dim(Xr), 40))} is the default.
#' Optionally, the \code{pc_selection} argument admits \code{"opc"} or
#' \code{"cumvar"} or \code{"var"} or \code{"manual"} as a single character
#' string. In such a case the default \code{"value"} when either \code{"opc"} or
#' \code{"manual"} are used is 40. When \code{"cumvar"} is used the default
#' \code{"value"} is set to 0.99 and when \code{"var"} is used, the default
#' \code{"value"} is set to 0.01.
#' @param group an optional factor (or character vector vector
#' that can be coerced to \code{\link[base]{factor}} by \code{as.factor}) that
#' assigns a group/class label to each observation in \code{Xr}
#' (e.g. groups can be given by spectra collected from the same batch of
#' measurements, from the same observation, from observations with very similar
#' origin, etc). This is taken into account for internal leave-group-out cross
#' validation for pls tuning (factor optimization) to avoid pseudo-replication.
#' When one observation is selected for cross-validation, all observations of
#' the same group are removed together and assigned to validation. The length
#' of the vector must be equal to the number of observations in the
#' reference/training set (i.e. \code{nrow(Xr)}). See details.
#' @param center a logical if the predictor variables must be centred at each
#' local segment (before regression). In addition, if \code{TRUE}, \code{Xr}
#' and \code{Xu} will be centred for dissimilarity computations.
#' @param scale a logical indicating if the predictor variables must be scaled
#' to unit variance at each local segment (before regression). In addition, if
#' \code{TRUE}, \code{Xr} and \code{Xu} will be scaled for dissimilarity
#' computations.
#' @param verbose a logical indicating whether or not to print a progress bar
#' for each observation to be predicted. Default is \code{TRUE}. Note: In case
#' parallel processing is used, these progress bars will not be printed.
#' @param documentation an optional character string that can be used to
#' describe anything related to the \code{mbl} call (e.g. description of the
#' input data). Default: \code{character()}. NOTE: his is an experimental
#' argument.
#' @param ... further arguments to be passed to the \code{\link{dissimilarity}}
#' function. See details.
#'
#' @details
#' The argument \code{spike} can be used to indicate what reference observations
#' in \code{Xr} must be kept in the neighborhood of every single \code{Xu}
#' observation. If a vector of length \mjeqn{m}{m} is passed to this argument,
#' this means that the \mjeqn{m}{m} original neighbors with the largest
#' dissimilarities to the target observations will be forced out of the
#' neighborhood. Spiking might be useful in cases where
#' some reference observations are known to be somehow related to the ones in
#' \code{Xu} and therefore might be relevant for fitting the local models. See
#' Guerrero et al. (2010) for an example on the benefits of spiking.
#'
#' The \code{mbl} function uses the \code{\link{dissimilarity}} function to
#' compute the dissimilarities between code{Xr} and \code{Xu}. The dissimilarity
#' method to be used is specified in the \code{diss_method} argument.
#' Arguments to \code{\link{dissimilarity}} as well as further arguments to the
#' functions used inside \code{\link{dissimilarity}}
#' (i.e. \code{\link{ortho_diss}} \code{\link{cor_diss}} \code{\link{f_diss}}
#' \code{\link{sid}}) can be passed to those functions by using \code{...}.
#'
#' The \code{diss_usage} argument is used to specify whether the dissimilarity
#' information must be used within the local regressions and, if so, how.
#' When \code{diss_usage = "predictors"} the local (square symmetric)
#' dissimilarity matrix corresponding the selected neighborhood is used as
#' source of additional predictors (i.e the columns of this local matrix are
#' treated as predictor variables). In some cases this results in an improvement
#' of the prediction performance (Ramirez-Lopez et al., 2013a).
#' If \code{diss_usage = "weights"}, the neighbors of the query point
#' (\mjeqn{xu_{j}}{xu_j}) are weighted according to their dissimilarity to
#' \mjeqn{xu_{j}}{xu_j} before carrying out each local regression. The following
#' tricubic function (Cleveland and Delvin, 1988; Naes et al., 1990) is used for
#' computing the final weights based on the measured dissimilarities:
#'
#' \mjdeqn{W_{j} = (1 - v^{3})^{3}}{W_j = (1 - v^3)^3}
#'
#' where if \mjeqn{{xr_{i} \in }}{xr_i in} neighbors of \mjeqn{xu_{j}}{xu_j}:
#'
#' \mjdeqn{v_{j}(xu_{j}) = d(xr_{i}, xu_{j})}{v_j(xu_j) = d(xr_i, xu_j)}
#'
#' otherwise:
#'
#' \mjdeqn{v_{j}(xu_{j}) = 0}{v_j(xu_j) = 0}
#'
#' In the above formulas \mjeqn{d(xr_{i}, xu_{j})}{d(xr_i, xu_j)} represents the
#' dissimilarity between the query point and each object in \mjeqn{Xr}{Xr}.
#' When \code{diss_usage = "none"} is chosen the dissimilarity information is
#' not used.
#'
#' The global Mahalanobis distance (a.k.a GH) is computed based on the scores
#' of a pls projection. A pls projection model is built with \code{Yr} and
#' \code{Xr} and this models is used to obtain the pls scores of the \code{Xu}
#' observations. The Mahalanobis distance between each \code{Xu} observation in
#' (the pls space) and the centre of \code{Xr} is then computed. The number of
#' pls components is optimized based on the parameters passed to the
#' \code{pc_selection} argument. In addition, the \code{mbl} function also
#' reports the GH distance for the observations in \code{Xr}.
#'
#' Some aspects of the mbl process, such as the type of internal validation,
#' parameter tuning, what extra objects to return, permission for parallel
#' execution, prediction limits, etc, can be specified by using the
#' \code{\link{mbl_control}} function.
#'
#' By using the \code{group} argument one can specify groups of observations
#' that have something in common (e.g. observations with very similar origin).
#' The purpose of \code{group} is to avoid biased cross-validation results due
#' to pseudo-replication. This argument allows to select calibration points
#' that are independent from the validation ones. In this regard, when
#' \code{validation_type = "local_cv"} (used in \code{\link{mbl_control}}
#' function), then the \code{p} argument refers to the percentage of groups of
#' observations (rather than single observations) to be retained in each
#' sampling iteration at each local segment.
#'
#' @return a \code{list} of class \code{mbl} with the following components
#' (sorted either by \code{k} or \code{k_diss}):
#'
#' \itemize{
#' \item{\code{call}:}{ the call to mbl.}
#' \item{\code{cntrl_param}:}{ the list with the control parameters passed to
#' control.}
#' \item{\code{Xu_neighbors}:}{ a list containing two elements: a matrix of
#' \code{Xr} indices corresponding to the neighbors of \code{Xu} and a matrix
#' of dissimilarities between each \code{Xu} observation and its corresponding
#' neighbor in \code{Xr}.}
#' \item{\code{dissimilarities}:}{ a list with the method used to obtain the
#' dissimilarity matrices and the dissimilarity matrix corresponding to
#' \mjeqn{D(Xr, Xu)}{D(Xr, Xu)}. This object is returned only if the
#' \code{return_dissimilarity} argument in the \code{control} list was set
#' to \code{TRUE}.}
#' \item{\code{n_predictions}}{ the total number of observations predicted.}
#' \item{\code{gh}:}{ if \code{gh = TRUE}, a list containing the global
#' Mahalanobis distance values for the observations in \code{Xr} and \code{Xu}
#' as well as the results of the global pls projection object used to obtain
#' the GH values.}
#' \item{\code{validation_results}:}{ a list of validation results for
#' "local cross validation" (returned if the \code{validation_type} in
#' \code{control} list was set to \code{"local_cv"}),
#' "nearest neighbor validation" (returned if the \code{validation_type}
#' in \code{control} list was set to \code{"NNv"}) and
#' "Yu prediction statistics" (returned if \code{Yu} was supplied).}``
#' \item{\code{results}:}{ a list of data tables containing the results of the
#' predictions for each either \code{k} or \code{k_diss}. Each data table
#' contains the following columns:}
#' \itemize{
#' \item{\code{o_index}:}{ The index of the predicted observation.}
#' \item{\code{k_diss}:}{ This column is only output if the \code{k_diss}
#' argument is used. It indicates the corresponding dissimilarity threshold
#' for selecting the neighbors.}
#' \item{\code{k_original}:}{ This column is only output if the \code{k_diss}
#' argument is used. It indicates the number of neighbors that were originally
#' found when the given dissimilarity threshold is used.}
#' \item{\code{k}:}{ This column indicates the final number of neighbors
#' used.}
#' \item{\code{npls}:}{ This column is only output if the \code{pls}
#' regression method was used. It indicates the final number of pls
#' components used.}
#' \item{\code{min_pls}:}{ This column is only output if \code{wapls}
#' regression method was used. It indicates the final number of minimum pls
#' components used. If no optimization was set, it retrieves the original
#' minimum pls components passed to the \code{method} argument.}
#' \item{\code{max_pls}:}{ This column is only output if the \code{wapls}
#' regression method was used. It indicates the final number of maximum pls
#' components used. If no optimization was set, it retrieves the original
#' maximum pls components passed to the \code{method} argument.}
#' \item{\code{yu_obs}:}{ The input values given in \code{Yu} (the response
#' variable corresponding to the data to be predicted). If \code{Yu = NULL},
#' then \code{NA}s are retrieved.}
#' \item{\code{pred}:}{ The predicted Yu values.}
#' \item{\code{yr_min_obs}:}{ The minimum reference value (of the response
#' variable) in the neighborhood.}
#' \item{\code{yr_max_obs}:}{ The maximum reference value (of the response
#' variable) in the neighborhood.}
#' \item{\code{index_nearest_in_Xr}}{ The index of the nearest neighbor found
#' in \code{Xr}.}
#' \item{\code{index_farthest_in_Xr}}{ The index of the farthest neighbor
#' found in \code{Xr}.}
#' \item{\code{y_nearest}:}{ The reference value (\code{Yr}) corresponding to
#' the nearest neighbor found in \code{Xr}.}
#' \item{\code{y_nearest_pred}:}{ This column is only output if the
#' validation method in the object passed to \code{control} was set to
#' \code{"NNv"}. It represents the predicted value of the nearest neighbor
#' observation found in \code{Xr}. This prediction come from model fitted
#' with the remaining observations in the neighborhood of the target
#' observation in \code{Xu}.}
#' \item{\code{loc_rmse_cv}:}{ This column is only output if the validation
#' method in the object passed to \code{control} was set to
#' \code{'local_cv'}. It represents the RMSE of the cross-validation
#' computed for the neighborhood of the target observation in \code{Xu}.}
#' \item{\code{loc_st_rmse_cv}:}{ This column is only output if the
#' validation method in the object passed to \code{control} was set to
#' \code{'local_cv'}. It represents the standardized RMSE of the
#' cross-validation computed for the neighborhood of the target observation
#' in \code{Xu}.}
#' \item{\code{dist_nearest}:}{ The distance to the nearest neighbor.}
#' \item{\code{dist_farthest}:}{ The distance to the farthest neighbor.}
#' \item{\code{loc_n_components}:}{ This column is only output if the
#' dissimilarity method used is one of \code{"pca"}, \code{"pca.nipals"} or
#' \code{"pls"} and in addition the dissimilarities are requested to be
#' computed locally by passing \code{.local = TRUE} to the \code{mbl}
#' function.
#' See \code{.local} argument in the \code{\link{ortho_diss}} function.}
#' }
#' \item{\code{documentation}}{ A character string with the documentation
#' added.}
#' }
#' When the \code{k_diss} argument is used, the printed results show a table
#' with a column named '\code{p_bounded}. It represents the percentage of
#' observations for which the neighbors selected by the given dissimilarity
#' threshold were outside the boundaries specified in the \code{k_range}
#' argument.
#' @author \href{https://orcid.org/0000-0002-5369-5120}{Leonardo Ramirez-Lopez}
#' and Antoine Stevens
#' @references
#' Cleveland, W. S., and Devlin, S. J. 1988. Locally weighted regression: an
#' approach to regression analysis by local fitting. Journal of the American
#' Statistical Association, 83, 596-610.
#'
#' Guerrero, C., Zornoza, R., Gómez, I., Mataix-Beneyto, J. 2010. Spiking of
#' NIR regional models using observations from target sites: Effect of model
#' size on prediction accuracy. Geoderma, 158(1-2), 66-77.
#'
#' Naes, T., Isaksson, T., Kowalski, B. 1990. Locally weighted regression and
#' scatter correction for near-infrared reflectance data. Analytical Chemistry
#' 62, 664-673.
#'
#' Ramirez-Lopez, L., Behrens, T., Schmidt, K., Stevens, A., Dematte, J.A.M.,
#' Scholten, T. 2013a. The spectrum-based learner: A new local approach for
#' modeling soil vis-NIR spectra of complex data sets. Geoderma 195-196,
#' 268-279.
#'
#' Ramirez-Lopez, L., Behrens, T., Schmidt, K., Viscarra Rossel, R., Dematte,
#' J. A. M., Scholten, T. 2013b. Distance and similarity-search metrics for
#' use with soil vis-NIR spectra. Geoderma 199, 43-53.
#'
#' Rasmussen, C.E., Williams, C.K. Gaussian Processes for Machine Learning.
#' Massachusetts Institute of Technology: MIT-Press, 2006.
#'
#' Shenk, J., Westerhaus, M., and Berzaghi, P. 1997. Investigation of a LOCAL
#' calibration procedure for near infrared instruments. Journal of Near
#' Infrared Spectroscopy, 5, 223-232.
#'
#' @seealso \code{\link{mbl_control}}, \code{\link{f_diss}},
#' \code{\link{cor_diss}}, \code{\link{sid}}, \code{\link{ortho_diss}},
#' \code{\link{search_neighbors}}
#' @examples
#' \donttest{
#' library(prospectr)
#' data(NIRsoil)
#'
#' # Proprocess the data using detrend plus first derivative with Savitzky and
#' # Golay smoothing filter
#' sg_det <- savitzkyGolay(
#' detrend(NIRsoil$spc,
#' wav = as.numeric(colnames(NIRsoil$spc))
#' ),
#' m = 1,
#' p = 1,
#' w = 7
#' )
#'
#' NIRsoil$spc_pr <- sg_det
#'
#' # split into training and testing sets
#' test_x <- NIRsoil$spc_pr[NIRsoil$train == 0 & !is.na(NIRsoil$CEC), ]
#' test_y <- NIRsoil$CEC[NIRsoil$train == 0 & !is.na(NIRsoil$CEC)]
#'
#' train_y <- NIRsoil$CEC[NIRsoil$train == 1 & !is.na(NIRsoil$CEC)]
#' train_x <- NIRsoil$spc_pr[NIRsoil$train == 1 & !is.na(NIRsoil$CEC), ]
#'
#' # Example 1
#' # A mbl implemented in Ramirez-Lopez et al. (2013,
#' # the spectrum-based learner)
#' # Example 1.1
#' # An exmaple where Yu is supposed to be unknown, but the Xu
#' # (spectral variables) are known
#' my_control <- mbl_control(validation_type = "NNv")
#'
#' ## The neighborhood sizes to test
#' ks <- seq(40, 140, by = 20)
#'
#' sbl <- mbl(
#' Xr = train_x,
#' Yr = train_y,
#' Xu = test_x,
#' k = ks,
#' method = local_fit_gpr(),
#' control = my_control,
#' scale = TRUE
#' )
#' sbl
#' plot(sbl)
#' get_predictions(sbl)
#'
#' # Example 1.2
#' # If Yu is actually known...
#' sbl_2 <- mbl(
#' Xr = train_x,
#' Yr = train_y,
#' Xu = test_x,
#' Yu = test_y,
#' k = ks,
#' method = local_fit_gpr(),
#' control = my_control
#' )
#' sbl_2
#' plot(sbl_2)
#'
#' # Example 2
#' # the LOCAL algorithm (Shenk et al., 1997)
#' local_algorithm <- mbl(
#' Xr = train_x,
#' Yr = train_y,
#' Xu = test_x,
#' Yu = test_y,
#' k = ks,
#' method = local_fit_wapls(min_pls_c = 3, max_pls_c = 15),
#' diss_method = "cor",
#' diss_usage = "none",
#' control = my_control
#' )
#' local_algorithm
#' plot(local_algorithm)
#'
#' # Example 3
#' # A variation of the LOCAL algorithm (using the optimized pc
#' # dissmilarity matrix) and dissimilarity matrix as source of
#' # additional preditors
#' local_algorithm_2 <- mbl(
#' Xr = train_x,
#' Yr = train_y,
#' Xu = test_x,
#' Yu = test_y,
#' k = ks,
#' method = local_fit_wapls(min_pls_c = 3, max_pls_c = 15),
#' diss_method = "pca",
#' diss_usage = "predictors",
#' control = my_control
#' )
#' local_algorithm_2
#' plot(local_algorithm_2)
#'
#' # Example 4
#' # Running the mbl function in parallel with example 2
#'
#' n_cores <- 2
#'
#' if (parallel::detectCores() < 2) {
#' n_cores <- 1
#' }
#'
#' # Alternatively:
#' # n_cores <- parallel::detectCores() - 1
#' # if (n_cores == 0) {
#' # n_cores <- 1
#' # }
#'
#' library(doParallel)
#' clust <- makeCluster(n_cores)
#' registerDoParallel(clust)
#'
#' # Alernatively:
#' # library(doSNOW)
#' # clust <- makeCluster(n_cores, type = "SOCK")
#' # registerDoSNOW(clust)
#' # getDoParWorkers()
#'
#' local_algorithm_par <- mbl(
#' Xr = train_x,
#' Yr = train_y,
#' Xu = test_x,
#' Yu = test_y,
#' k = ks,
#' method = local_fit_wapls(min_pls_c = 3, max_pls_c = 15),
#' diss_method = "cor",
#' diss_usage = "none",
#' control = my_control
#' )
#' local_algorithm_par
#'
#' registerDoSEQ()
#' try(stopCluster(clust))
#'
#' # Example 5
#' # Using local pls distances
#' with_local_diss <- mbl(
#' Xr = train_x,
#' Yr = train_y,
#' Xu = test_x,
#' Yu = test_y,
#' k = ks,
#' method = local_fit_wapls(min_pls_c = 3, max_pls_c = 15),
#' diss_method = "pls",
#' diss_usage = "predictors",
#' control = my_control,
#' .local = TRUE,
#' pre_k = 150,
#' )
#' with_local_diss
#' plot(with_local_diss)
#' }
#' @export
######################################################################
# resemble
# Copyright (C) 2014 Leonardo Ramirez-Lopez and Antoine Stevens
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
######################################################################
## History:
## 09.03.2014 Leo Doc examples were formated with a max. line width
## 13.03.2014 Antoine The explanation of the cores argument was modified
## 23.04.2014 Leo Added default variable names when they are missing
## 08.09.2014 Leo A bug related with the computations of the weights
## for wapls2 was fixed
## 23.09.2014 Leo A bug that prevented the mbl function of using
## the 'dissimilarityM' argument was fixed
## 03.10.2014 Antoine Fix bug when scale = T and add allow_parallel argument
## 12.10.2014 Leo noise_variance was missing in the locFit function used for
## the nearest neighbor validation
## 16.11.2015 Leo Now the scale argument for gaussian process is a
## indicates that both x and y variables must be scaled
## to zero mean and unit variance. Before it only the x
## variables were scaled to unit variance
## 18.11.2015 Leo The mbl examples were corrected (unnecessary arguments
## were deleted)
## 01.12.2015 Leo The wapls2 was removed from the options of methods
## 10.12.2015 Leo The locFit function has been renamed to locFitnpred
## and now it always performs a prediction.
## 10.12.2015 Leo Several redundant/repaeated sanity checks (ocurring
## when combining functions) were deactivated. This does not
## impact the finaly sanity checks of the overall mbl
## function.
## 11.12.2015 Leo A bug when the k_diss argument was used was corrected.
## The results about the percentage of observations that were
## bounded by k_range was not not correct.
## 11.12.2015 Leo The explanation of the output variables in the results
## element of the mbl objects was extended. The rep variable
## is not output anymore in the results element.
## 03.01.2016 Leo Now it is possible to optimize the max and min pls
## components of wapls1
## 04.02.2016 Leo An extrange bug was fixed. The object pred_obs
## (in the parallel loop) had a variable named pls_c
## (pred_obs$pls_c). When when method = "gpr" was used,
## and mbl was runing in parallel it retrieved and error
## saying that pls_c was missing!!! This was perhaps due to
## the fact that the pls_c was variable (in pred_obs) and
## an argument.
## 16.02.2016 Leo Bug fixed. It caused the mbl function to return an error
## (sometimes) when the group argument was used together
## with local cross-validation. The errors occurred when
## groups containing very few observations (e.g. 1 or 2) were used.
## 09.03.2018 Leo A new output (XuneighborList) has been added. It was
## requested by Eva Ampe and Miriam de Winter.
## 16.05.2018 Leo A parameter called documentation has been added.
## 21.06.2020 Leo - pls.max.iter, pls.tol and noise.v were moved to mbl from
## mbl_control()
## - Argument scaled (from mbl_control()) renamed to .scale
## and moved to mbl
## - new arguments: gh and spike
## - order of the Yr, Xr, Yu and Xu arguments has changed to
## Xr, Yr, Xu and Yu
## - input type for the argument method has changed.
## Previously it received a character string indicating
## the type of local regression (i.e. "pls",
## "wapls1" or "gpr"). Now it receives an object of class
## local_fit which is output by the new local_fit functions.
## - dissimilarityM has been deprecated. It was used to pass
## a dissimilarity matrix computed outside the mbl
## function. This can be done now with the new argument
## diss_method of mbl which was previously named "sm" and
## it was in mbl_control()
## - the warning message coming from the foreach loop about no
## parallel backend registered is now avoided by checking
## first if there is any parallel backend registered
## 22.06.2020 Leo - Updated examples
## TODO:
## A bug was detected in ortho_diss.
## Use the following code to reproduce it
# mfr <- read.nircal("C:/raml/polco/FranceMIlk.nir")
#
# mfr <- mfr[mfr$Protein < 17,]
# mfr <- mfr[mfr$Protein >7,]
# cdist <- ortho_diss(Xr = savitzkyGolay(standardNormalVariate(mfr$spc[,as.character(wavs[wavs<9000])]), m = 1, p = 1, w = 5),
# X2 = savitzkyGolay(standardNormalVariate(colanta$spc[,as.character(wavs[wavs<9000])]), m = 1, p = 1, w = 5),
# Yr = mfr$Moisture,
# pc_selection = list("opc", 10),
# method = "pca",
# local = FALSE,
# center = TRUE, scaled = FALSE,
# compute_all = FALSE, cores = 1)
## The error thrown is:
## Error in svd(x = X0) : infinite or missing values in 'x'
mbl <- function(Xr, Yr, Xu, Yu = NULL,
k, k_diss, k_range,
spike = NULL,
method = local_fit_wapls(
min_pls_c = 3,
max_pls_c = min(dim(Xr), 15)
),
diss_method = "pca",
diss_usage = "predictors",
gh = TRUE,
pc_selection = list(
method = "opc",
value = min(dim(Xr), 40)
),
control = mbl_control(),
group = NULL,
center = TRUE,
scale = FALSE,
verbose = TRUE,
documentation = character(),
...) {
f_call <- match.call()
"%mydo%" <- get("%do%")
if (control$allow_parallel & getDoParRegistered()) {
"%mydo%" <- get("%dopar%")
}
if (!is.logical(verbose)) {
stop("'verbose' must be logical")
}
if (missing(k)) {
k <- NULL
}
if (missing(k_diss)) {
k_diss <- NULL
}
if (missing(k_range)) {
k_range <- NULL
}
input_dots <- list(...)
ini_cntrl <- control
ortho_diss_methods <- c("pca", "pca.nipals", "pls")
if (".local" %in% names(input_dots)) {
if (isTRUE(input_dots$.local)) {
if (!"pre_k" %in% names(input_dots)) {
stop("When '.local = TRUE', argument 'pre_k' needs to be provided. See ortho_diss documentation")
}
if (!is.null(k)) {
if (input_dots$pre_k < max(k)) {
stop("'k' cannot be larger than 'pre_k'")
}
}
}
}
# Sanity checks
if (!is.logical(center)) {
stop("'center' argument must be logical")
}
if (!is.logical(scale)) {
stop("'scale' argument must be logical")
}
if (ncol(Xr) != ncol(Xu)) {
stop("The number of predictor variables in Xr must be equal to the number of variables in Xu")
}
if (ncol(Xr) < 4) {
stop("This function works only with matrices containing more than 3 predictor variables")
}
if (length(Yr) != nrow(Xr)) {
stop("length(Yr) must be equal to nrow(Xr)")
}
if (any(is.na(Yr))) {
stop("The current version of the mbl function does not handle NAs in the response variable of the reference observations (Yr)")
}
Xr <- as.matrix(Xr)
Xu <- as.matrix(Xu)
Yr <- as.matrix(Yr)
n_xr <- nrow(Xr)
n_xu <- nrow(Xu)
n_total <- n_xr + n_xu
rownames(Xr) <- 1:nrow(Xr)
rownames(Xu) <- 1:nrow(Xu)
if (is.null(colnames(Xr))) {
colnames(Xr) <- 1:ncol(Xr)
}
if (is.null(colnames(Xu))) {
colnames(Xu) <- 1:ncol(Xu)
}
if (sum(!colnames(Xu) == colnames(Xr)) != 0) {
stop("Variable names in Xr do not match those in Xu")
}
diss_methods <- c(
"pca", "pca.nipals", "pls", "cor",
"euclid", "cosine", "sid"
)
if (!is.character(diss_method) & !is.matrix(diss_method)) {
mtds <- paste(diss_methods, collapse = ", ")
stop(paste0(
"'diss_method' must be one of: ",
mtds,
" or a matrix"
))
}
if (!is.null(group)) {
if (length(group) != nrow(Xr)) {
stop(paste0(
"The length of 'group' must be equal to the number of ",
"observations in 'Xr'"
))
}
}
if (length(pc_selection) != 2 | !is.list(pc_selection)) {
stop("'pc_selection' must be a list of length 2")
}
if (!all(names(pc_selection) %in% c("method", "value")) | is.null(names(pc_selection))) {
names(pc_selection)[sapply(pc_selection, FUN = is.character)] <- "method"
names(pc_selection)[sapply(pc_selection, FUN = is.numeric)] <- "value"
}
pc_sel_method <- match.arg(pc_selection$method, c(
"opc",
"var",
"cumvar",
"manual"
))
pc_threshold <- pc_selection$value
if (pc_sel_method %in% c("opc", "manual") & pc_selection$value > min(n_total, ncol(Xr))) {
warning(paste0(
"When pc_selection$method is 'opc' or 'manual', the value ",
"specified in \npc_selection$value cannot be larger than ",
"min(nrow(Xr) + nrow(Xu), ncol(Xr)) \n(i.e ",
min(n_total, ncol(Xr)),
"). Therefore the value was reset to ",
min(n_total, ncol(Xr))
))
pc_threshold <- min(n_total, ncol(Xr))
}
match.arg(diss_usage, c("predictors", "weights", "none"))
if (is.null(k) & is.null(k_diss)) {
stop("Either k or k_diss must be specified")
}
k_max <- NULL
if (!is.null(k)) {
if (!is.null(k_diss)) {
stop("Only one of k or k_diss can be specified")
}
if (!is.numeric(k)) {
stop("k must be a vector of integers")
} else {
k <- unique(sort(ceiling(k)))
}
k <- sort(k)
k_max <- max(k)
}
k_diss_max <- NULL
if (!is.null(k_diss)) {
k_diss <- unique(sort(k_diss))
if (is.null(k_range)) {
stop("If the k_diss argument is used, k_range must be specified")
}
if (length(k_range) != 2 | !is.numeric(k_range) | any(k_range < 1)) {
stop("k_range must be a vector of length 2 which specifies the minimum (first value, larger than 0) and the maximum (second value) number of neighbors")
}
k_range <- sort(k_range)
k_min_range <- as.integer(k_range[1])
k_max_range <- as.integer(k_range[2])
if (k_min_range < 4) {
stop("Minimum number of nearest neighbors allowed is 4")
}
if (k_max_range > nrow(Xr)) {
stop("Maximum number of nearest neighbors cannot exceed the number of reference observations")
}
k_diss_max <- max(k_diss)
}
if (".local" %in% names(input_dots)) {
if (isTRUE(input_dots$local)) {
if (!"pre_k" %in% names(input_dots)) {
stop(paste0(
"When .local = TRUE (passed to the ortho_diss method), the ",
"'pre_k' argument must be specified"
))
}
if (input_dots$pre_k < k_max) {
stop(paste0(
"pre_k must be larger than ",
ifelse(is.null(k), "max(k_range)", "max(k)")
))
}
}
}
if (!"local_fit" %in% class(method)) {
stop("Object passed to method must be of class local_fit")
}
validation_type <- control$validation_type
is_local_cv <- "local_cv" %in% validation_type
is_nnv_val <- "NNv" %in% validation_type
if (all(c("local_cv", "NNv") %in% control$validation_type)) {
validation_type <- "both"
}
if (validation_type %in% c("NNv", "both") & nrow(Xu) < 3) {
stop(paste0(
"For nearest neighbor validation (control$validation_type == 'NNv')",
" Xu must contain at least 3 observations"
))
}
if (!is.null(Yu)) {
Yu <- as.matrix(Yu)
if (length(Yu) != nrow(Xu)) {
stop("Number of observations in Yu and Xu differ")
}
}
if (!is.null(k)) {
k <- as.integer(k)
if (min(k) < 4) {
stop("Minimum number of nearest neighbors allowed is 3")
}
if (max(k) > nrow(Xr)) {
stop(paste0(
"The number of nearest neighbors cannot exceed the number ",
"of observations in Xr"
))
}
}
has_projection <- FALSE
if (!is.matrix(diss_method)) {
# when .local = TRUE, k_max is replaced with k_pre inside get_neighbor_info()
neighborhoods <- get_neighbor_info(
Xr = Xr, Xu = Xu,
diss_method = diss_method, Yr = Yr,
k = k_max, k_diss = k_diss_max,
k_range = k_range,
spike = spike, pc_selection = pc_selection,
return_dissimilarity = control$return_dissimilarity,
center = center, scale = scale,
gh = gh, diss_usage = diss_usage,
allow_parallel = control$allow_parallel,
...
)
diss_xr_xu <- neighborhoods$dissimilarity
if (!is.null(neighborhoods$projection)) {
diss_xr_xu_projection <- neighborhoods$projection
has_projection <- TRUE
}
} else {
diss_xr_xr <- NULL
dim_diss <- dim(diss_method)
if (diss_usage == "predictors") {
if (diff(dim_diss) != 0 | dim_diss[1] != n_total | any(diag(diss_method) != 0)) {
stop(paste0(
"If a matrix is passed to 'diss_method' ",
"and diss_usage = 'predictors', this matrix must be ",
"squared symmetric zeroes in its diagonal"
))
}
diss_xr_xr <- diss_method[1:nrow(Xr), 1:nrow(Xr)]
diss_xr_xu <- diss_method[1:nrow(Xr), (1 + nrow(Xr)):ncol(diss_method)]
rm(diss_method)
gc()
}
if (diss_usage %in% c("weights", "none")) {
if (dim_diss[1] != n_xr & dim_diss[2] != n_xu) {
stop(paste0(
"If a matrix is passed to 'diss_method' ",
"and 'diss_usage' argument is set to either 'weights' or ",
"'none', the number of rows and columns of this matrix ",
"must be equal to the number of rows of 'Xr' and the ",
"number of rows of 'Xu' respectively"
))
}
}
diss_xr_xu <- diss_method
append(
neighborhoods,
diss_to_neighbors(diss_xr_xu,
k = k, k_diss = k_diss, k_range = k_range,
spike = NULL,
return_dissimilarity = control$return_dissimilarity
)
)
if (gh) {
neighborhoods <- NULL
neighborhoods$gh$projection <- pls_projection(
Xr = Xr, Xu = Xu,
Yr = Yr,
pc_selection = pc_selection,
scale = scale, ...
)
neighborhoods$gh$gh_Xr <- f_diss(neighborhoods$gh$projection$scores,
Xu = t(colMeans(neighborhoods$gh$projection$scores)),
diss_method = "mahalanobis",
center = FALSE, scale = FALSE
)
neighborhoods$gh$gh_Xu <- neighborhoods$gh$gh_Xr[-c(1:nrow(Xr))]
neighborhoods$gh$gh_Xr <- neighborhoods$gh$gh_Xr[c(1:nrow(Xr))]
neighborhoods$gh <- neighborhoods$gh[c("gh_Xr", "gh_Xu", "projection")]
}
neighborhoods$diss_xr_xr <- diss_xr_xr
rm(diss_xr_xr)
rm(diss_method)
gc()
}
if (!is.null(k)) {
smallest_neighborhood <- neighborhoods$neighbors[1:min(k), , drop = FALSE]
smallest_n_neighbors <- colSums(!is.na(smallest_neighborhood))
}
if (!is.null(k_diss)) {
min_diss <- neighborhoods$neighbors_diss <= min(k_diss)
if (!is.null(spike)) {
min_diss[1:length(spike), ] <- TRUE
}
smallest_neighborhood <- neighborhoods$neighbors
smallest_neighborhood[!min_diss] <- NA
smallest_n_neighbors <- colSums(!is.na(smallest_neighborhood))
smallest_n_neighbors[smallest_n_neighbors < min(k_range)] <- min(k_range)
smallest_n_neighbors[smallest_n_neighbors > max(k_range)] <- max(k_range)
}
if (is_local_cv) {
min_n_samples <- floor(min(smallest_n_neighbors) * control$p) - 1
min_cv_samples <- floor(min(k, k_range) * (1 - control$p))
if (min_cv_samples < 3) {
stop(paste0(
"Local cross-validation requires at least 3 observations in ",
"the hold-out set, the current cross-validation parameters ",
"leave less than 3 observations in some neighborhoods."
))
}
} else {
min_n_samples <- smallest_n_neighbors - 1
}
if (method$method %in% c("pls", "wapls")) {
max_pls <- max(method$pls_c)
if (any(min_n_samples < max_pls)) {
stop(paste0(
"More pls components than observations in some neighborhoods.\n",
"If 'local_cv' is being used, consider that some ",
"observations \nin the neighborhoods are hold-out for local ",
"validation"
))
}
}
if (!".local" %in% names(input_dots)) {
iter_neighborhoods <- ith_mbl_neighbor(
Xr = Xr, Xu = Xu, Yr = Yr, Yu = Yu,
diss_usage = diss_usage,
neighbor_indices = neighborhoods$neighbors,
neighbor_diss = neighborhoods$neighbors_diss,
diss_xr_xr = neighborhoods$diss_xr_xr,
group = group
)
} else {
iter_neighborhoods <- ith_mbl_neighbor(
Xr = Xr, Xu = Xu, Yr = Yr, Yu = Yu,
diss_usage = "none",
neighbor_indices = neighborhoods$neighbors,
neighbor_diss = neighborhoods$neighbors_diss,
group = group
)
}
r_fields <- c(
"o_index", "k_diss", "k_original", "k", "npls", "min_pls", "max_pls",
"yu_obs", "pred", "yr_min_obs", "yr_max_obs",
"index_nearest_in_Xr", "index_farthest_in_Xr",
"y_nearest", "y_nearest_pred",
"y_farthest", "diss_nearest", "diss_farthest",
"loc_rmse_cv", "loc_st_rmse_cv", "loc_n_components", "rep"
)
n_ith_result <- ifelse(is.null(k_diss), length(k), length(k_diss))
template_pred_results <- data.table(matrix(NA, n_ith_result, length(r_fields),
dimnames = list(NULL, r_fields)
))
template_pred_results$rep[1] <- 0
if (!is.null(k_diss)) {
template_pred_results$k_diss <- k_diss
} else {
template_pred_results$k <- k
}
pg_bar_width <- 10
# to_erase <- getOption("width") - pg_bar_width - (2 * nchar(nrow(Xu))) - 2
to_erase <- pg_bar_width + (2 * nchar(nrow(Xu))) + 8
to_erase <- paste(rep(" ", to_erase), collapse = "")
if (verbose){
cat("\033[32m\033[3mPredicting...\n\033[23m\033[39m")
}
n_iter <- nrow(Xu)
pred_obs <- foreach(
i = 1:n_iter, ith_observation = iter_neighborhoods,
.inorder = FALSE,
.export = c(
"ortho_diss", "fit_and_predict", "pls_cv",
"get_col_sds", "get_wapls_weights"
),
.noexport = c("Xr", "Xu")
) %mydo% {
ith_pred_results <- template_pred_results
additional_results <- NULL
ith_pred_results$o_index[] <- i
if (".local" %in% names(input_dots) & diss_method %in% ortho_diss_methods) {
ith_observation <- get_ith_local_neighbors(
ith_xr = ith_observation$ith_xr,
ith_xu = ith_observation$ith_xu,
ith_yr = ith_observation$ith_yr,
ith_yu = ith_observation$ith_yu,
diss_usage = diss_usage,
ith_neig_indices = ith_observation$ith_neig_indices,
k = k_max, k_diss = k_diss_max,
k_range = k_range,
spike = spike,
diss_method = diss_method,
pc_selection = pc_selection,
center = center, scale = scale,
ith_group = ith_observation$ith_group,
...
)
ith_pred_results$loc_n_components[] <- ith_observation$ith_components
additional_results$ith_neig_indices <- ith_observation$ith_neig_indices
additional_results$ith_neigh_diss <- ith_observation$ith_neigh_diss
}
if (verbose) {
cat(paste0("\033[34m\033[3m", i, "/", n_iter, "\033[23m\033[39m"))
pb <- txtProgressBar(width = pg_bar_width, char = "\033[34m_\033[39m")
}
if (!is.null(k_diss)) {
ith_diss <- ith_observation$ith_neigh_diss
if (!is.null(spike)) {
ith_diss[1:length(spike)] <- 0
}
ith_pred_results$k_original <- sapply(k_diss, FUN = function(x, d) sum(d < x), d = ith_diss)
ith_pred_results$k <- ith_pred_results$k_original
ith_pred_results$k[ith_pred_results$k_original < min(k_range)] <- min(k_range)
ith_pred_results$k[ith_pred_results$k_original > max(k_range)] <- max(k_range)
} else {
ith_pred_results$k <- k
}
for (kk in 1:nrow(ith_pred_results)) {
if (verbose) {
setTxtProgressBar(pb, kk / nrow(ith_pred_results))
}
# If the sample has not been predicted before,
# then create a model and predict it (useful only when k_diss is used)
current_k <- ith_pred_results$k[kk]
if (current_k != ifelse(kk == 1, 0, ith_pred_results$k[kk - 1])) {
if (diss_usage == "predictors") {
keep_cols <- c(
1:current_k,
(1 + ith_observation$n_k):ncol(ith_observation$ith_xr)
)
i_k_xr <- ith_observation$ith_xr[1:current_k, keep_cols]
i_k_xu <- ith_observation$ith_xu[, keep_cols, drop = FALSE]
} else {
i_k_xr <- ith_observation$ith_xr[1:current_k, ]
i_k_xu <- ith_observation$ith_xu
}
i_k_yr <- ith_observation$ith_yr[1:current_k, , drop = FALSE]
i_k_yu <- ith_observation$ith_yu
kth_diss <- ith_observation$ith_neigh_diss[1:current_k]
ith_pred_results$rep[kk] <- 0
ith_yr_range <- range(i_k_yr)
ith_pred_results$yr_min_obs[kk] <- ith_yr_range[1]
ith_pred_results$yr_max_obs[kk] <- ith_yr_range[2]
ith_pred_results$diss_farthest[kk] <- max(kth_diss)
ith_pred_results$diss_nearest[kk] <- min(kth_diss)
ith_pred_results$y_farthest[kk] <- i_k_yr[which.max(kth_diss)]
ith_pred_results$y_nearest[kk] <- i_k_yr[which.min(kth_diss)]
ith_pred_results$index_nearest_in_Xr[kk] <- ith_observation$ith_neig_indices[which.min(kth_diss)]
ith_pred_results$index_farthest_in_Xr[kk] <- ith_observation$ith_neig_indices[which.max(kth_diss)]
if (!is.null(group)) {
i_k_group <- factor(ith_observation$ith_group[1:current_k])
} else {
i_k_group <- NULL
}
if (diss_usage == "weights") {
# Weights are defined according to a tricubic function
# as in Cleveland and Devlin (1988) and Naes and Isaksson (1990).
std_kth_diss <- kth_diss / max(kth_diss)
kth_weights <- (1 - (std_kth_diss^3))^3
kth_weights[which(kth_weights == 0)] <- 1e-04
} else {
kth_weights <- rep(1, current_k)
}
# local fit
i_k_pred <- fit_and_predict(
x = i_k_xr,
y = i_k_yr,
pred_method = method$method,
scale = scale,
pls_c = method$pls_c,
weights = kth_weights,
newdata = i_k_xu,
CV = is_local_cv,
tune = control$tune_locally,
group = i_k_group,
p = control$p,
number = control$number,
noise_variance = method$noise_variance,
range_prediction_limits = control$range_prediction_limits,
pls_max_iter = 1,
pls_tol = 1e-6
)
ith_pred_results$pred[kk] <- i_k_pred$prediction
selected_pls <- NULL
if (is_local_cv) {
if (control$tune_locally) {
best_row <- which.min(i_k_pred$validation$cv_results$rmse_cv)
} else {
best_row <- ifelse(method$method == "pls", method$pls_c, 1)
}
if (method$method == "pls") {
ith_pred_results$npls[kk] <- i_k_pred$validation$cv_results$npls[best_row]
selected_pls <- ith_pred_results$npls[kk]
}
if (method$method == "wapls") {
ith_pred_results$min_pls[kk] <- i_k_pred$validation$cv_results$min_component[best_row]
ith_pred_results$max_pls[kk] <- i_k_pred$validation$cv_results$max_component[best_row]
selected_pls <- i_k_pred$validation$cv_results[best_row, 1:2]
}
ith_pred_results$loc_rmse_cv[kk] <- i_k_pred$validation$cv_results$rmse_cv[best_row]
ith_pred_results$loc_st_rmse_cv[kk] <- i_k_pred$validation$cv_results$st_rmse_cv[best_row]
} else {
if (method$method == "pls") {
ith_pred_results$npls[kk] <- method$pls_c
selected_pls <- ith_pred_results$npls[kk]
}
if (method$method == "wapls") {
ith_pred_results$min_pls[kk] <- method$pls_c[[1]]
ith_pred_results$max_pls[kk] <- method$pls_c[[2]]
selected_pls <- method$pls_c
}
}
if (is_nnv_val) {
if (!is.null(group)) {
out_group <- which(i_k_group == i_k_group[[ith_observation$local_index_nearest]])
} else {
out_group <- ith_observation$local_index_nearest
}
nearest_pred <- fit_and_predict(
x = i_k_xr[-out_group, ],
y = i_k_yr[-out_group, , drop = FALSE],
pred_method = method$method,
scale = scale,
pls_c = selected_pls,
noise_variance = method$noise_variance,
newdata = i_k_xr[ith_observation$local_index_nearest, , drop = FALSE],
CV = FALSE,
tune = FALSE,
range_prediction_limits = control$range_prediction_limits,
pls_max_iter = 1,
pls_tol = 1e-6
)$prediction
ith_pred_results$y_nearest_pred[kk] <- nearest_pred / kth_weights[1]
}
} else {
ith_k_diss <- ith_pred_results$k_diss[kk]
ith_pred_results[kk, ] <- ith_pred_results[kk - 1, ]
ith_pred_results$rep[kk] <- 1
ith_pred_results$k_diss[kk] <- ith_k_diss
}
}
if (verbose) {
if (kk == nrow(ith_pred_results) & i != n_iter) {
cat("\r", to_erase, "\r")
}
if (i == n_iter) {
cat("\n")
}
# do not use close() (it prints a new line)
## close(pb)
}
list(
results = ith_pred_results,
additional_results = additional_results
)
}
iteration_order <- sapply(pred_obs,
FUN = function(x) x$results$o_index[1]
)
pred_obs <- pred_obs[order(iteration_order, decreasing = FALSE)]
results_table <- do.call("rbind", lapply(pred_obs,
FUN = function(x) x$results
))
if (".local" %in% names(input_dots) & diss_method %in% ortho_diss_methods) {
diss_xr_xu <- do.call(
"cbind",
lapply(iteration_order,
FUN = function(x, m, ii) {
idc <- x[[ii]]$additional_results$ith_neig_indices
d <- x[[ii]]$additional_results$ith_neigh_diss
m[idc] <- d
m
},
x = pred_obs,
m = matrix(NA, nrow(Xr), 1)
)
)
class(diss_xr_xu) <- c("local_ortho_diss", "matrix")
dimnames(diss_xr_xu) <- list(
paste0("Xr_", 1:nrow(diss_xr_xu)),
paste0("Xu_", 1:ncol(diss_xr_xu))
)
neighborhoods$neighbors <- do.call(
"cbind", lapply(iteration_order,
FUN = function(x, m, ii) {
idc <- x[[ii]]$additional_results$ith_neig_indices
m[1:length(idc)] <- idc
m
},
x = pred_obs,
m = matrix(NA, max(results_table$k), 1)
)
)
}
out <- c(
if (is.null(Yu)) {
"yu_obs"
},
if (all(is.na(results_table$k_original))) {
"k_original"
},
if (!(validation_type %in% c("NNv", "both"))) {
"y_nearest_pred"
},
if (method$method != "wapls") {
c("min_pls", "max_pls")
},
if (method$method != "pls") {
"npls"
},
if (!(validation_type %in% c("local_cv", "both"))) {
c("loc_rmse_cv", "loc_st_rmse_cv")
},
"rep"
)
results_table[, (out) := NULL]
if (!is.null(k_diss)) {
param <- "k_diss"
results_table <- lapply(get(param),
FUN = function(x, sel, i) x[x[[sel]] == i, ],
x = results_table,
sel = param
)
names(results_table) <- paste0("k_diss_", k_diss)
p_bounded <- sapply(results_table,
FUN = function(x, k_range) {
sum(x$k_original <= k_range[1] | x$k_original >= k_range[2])
},
k_range = k_range
)
col_ks <- data.table(
k_diss = k_diss,
p_bounded = paste0(round(100 * p_bounded / nrow(Xu), 3), "%")
)
} else {
param <- "k"
results_table <- lapply(get(param),
FUN = function(x, sel, i) x[x[[sel]] == i, ],
x = results_table,
sel = param
)
names(results_table) <- paste0("k_", k)
col_ks <- data.table(k = k)
}
if (validation_type %in% c("NNv", "both")) {
nn_stats <- function(x) {
nn_rmse <- (mean((x$y_nearest - x$y_nearest_pred)^2))^0.5
nn_st_rmse <- nn_rmse / diff(range(x$y_nearest))
nn_rsq <- (cor(x$y_nearest, x$y_nearest_pred))^2
c(nn_rmse = nn_rmse, nn_st_rmse = nn_st_rmse, nn_rsq = nn_rsq)
}
loc_nn_res <- do.call("rbind", lapply(results_table, FUN = nn_stats))
loc_nn_res <- cbind(col_ks,
rmse = loc_nn_res[, "nn_rmse"],
st_rmse = loc_nn_res[, "nn_st_rmse"],
r2 = loc_nn_res[, "nn_rsq"]
)
} else {
loc_nn_res <- NULL
}
if (validation_type %in% c("local_cv", "both")) {
mean_loc_res <- function(x) {
mean_loc_rmse <- mean(x$loc_rmse_cv)
mean_loc_st_rmse <- mean(x$loc_st_rmse_cv)
c(loc_rmse = mean_loc_rmse, loc_st_rmse = mean_loc_st_rmse)
}
loc_res <- do.call("rbind", lapply(results_table, mean_loc_res))
loc_res <- cbind(col_ks,
rmse = loc_res[, "loc_rmse"],
st_rmse = loc_res[, "loc_st_rmse"]
)
} else {
loc_res <- NULL
}
if (!is.null(Yu)) {
for (i in 1:length(results_table)) {
results_table[[i]]$yu_obs <- Yu
}
yu_stats <- function(x) {
yu_rmse <- mean((x$yu_obs - x$pred)^2, na.rm = TRUE)^0.5
yu_st_rmse <- yu_rmse / diff(range(x$yu_obs, na.rm = TRUE))
yu_rsq <- cor(x$yu_obs, x$pred, use = "complete.obs")^2
c(yu_rmse = yu_rmse, yu_st_rmse = yu_st_rmse, yu_rsq = yu_rsq)
}
pred_res <- do.call("rbind", lapply(results_table, yu_stats))
pred_res <- cbind(col_ks,
rmse = pred_res[, "yu_rmse"],
st_rmse = pred_res[, "yu_st_rmse"],
r2 = pred_res[, "yu_rsq"]
)
} else {
pred_res <- NULL
}
if ("local_ortho_diss" %in% class(diss_xr_xu)) {
diss_method <- paste0(diss_method, " (locally computed)")
}
if (control$return_dissimilarity) {
diss_list <- list(
diss_method = diss_method,
diss_xr_xu = diss_xr_xu
)
if (has_projection) {
diss_list$global_projection <- diss_xr_xu_projection
}
} else {
diss_list <- NULL
}
colnames(neighborhoods$neighbors) <- paste0("Xu_", 1:nrow(Xu))
rownames(neighborhoods$neighbors) <- paste0("k_", 1:nrow(neighborhoods$neighbors))
results_list <- list(
call = f_call,
cntrl_param = control,
dissimilarities = diss_list,
Xu_neighbors = list(
neighbors = neighborhoods$neighbors,
neighbors_diss = neighborhoods$neighbors_diss
),
n_predictions = nrow(Xu),
gh = neighborhoods$gh,
validation_results = list(
local_cross_validation = loc_res,
nearest_neighbor_validation = loc_nn_res,
Yu_prediction_statistics = pred_res
),
results = results_table,
documentation = documentation
)
attr(results_list, "call") <- f_call
class(results_list) <- c("mbl", "list")
results_list
}
|
e8265f6adc9548b34f58b06f523d41cdfc085025
|
d5bbf2b2d2186c677071019855838c56e93ffa2e
|
/night_crew_test.R
|
f5f64d034f096d00453d7699084d2f6f107a6a29
|
[] |
no_license
|
pleunipennings/SharedDataBioinformatics
|
781ce843c7136f6a1c89de91dcc8a832b6fc7bd9
|
aa1c144eecd3d40d6f7b05f8fee22fd97d282238
|
refs/heads/master
| 2022-08-01T22:46:46.508613
| 2020-05-26T00:40:57
| 2020-05-26T00:40:57
| 105,822,431
| 3
| 6
| null | 2017-10-10T04:35:57
| 2017-10-04T21:53:51
| null |
UTF-8
|
R
| false
| false
| 9,677
|
r
|
night_crew_test.R
|
# testerino
#group 6 Victoria, Sam, Mordy, Gordan
#Metting Wednesday 7-9
# BK polyom/na
#Due October 31
setwd("~/Desktop/Git")
setwd("~/Desktop")
install.packages('ggplot2')
install.packages("gridExtra")
install.packages("dplyr")
library(ggplot2)
library(gridExtra)
library(dplyr)
data<-read.csv('OverviewSelCoeff_BachelerFilter.csv')
#Example
library(ggplot2)
?theme_set()
# test plot sample
#g <- ggplot(mpg, aes(manufacturer, cty))
#g + geom_boxplot() +
# geom_dotplot(binaxis='y',
# stackdir='center',
# dotsize = .5,
# fill="red") +
# theme(axis.text.x = element_text(angle=65, vjust=0.6)) +
# labs(title="Box plot + Dot plot",
# subtitle="City Mileage vs Class: Each dot represents 1 row in source data",
# caption="Source: mpg",
# x="Class of Vehicle",
# y="City Mileage")
#building the combo lines to help sort
data$combo<- (data$bigAAChange*3) + (data$makesCpG*2)
data$combo<- as.factor(data$combo)
levels(data$combo) <- gsub("0", "noAA noCPG", levels(data$combo))
levels(data$combo) <- gsub("2", "noAA yesCPG", levels(data$combo))
levels(data$combo) <- gsub("3", "yesAA noCPG", levels(data$combo))
levels(data$combo) <- gsub("5", "yesAA yesCPG", levels(data$combo))
#building numbers for letter might not be needed
data$number<- data$WTnt
data$number<- as.factor(data$number)
levels(data$number) <- gsub("a", as.numeric("1"), levels(data$number))
levels(data$number) <- gsub("c", as.numeric("2"), levels(data$number))
levels(data$number) <- gsub("g", as.numeric("3"), levels(data$number))
levels(data$number) <- gsub("t", as.numeric("4"), levels(data$number))
syn <- which(data$TypeOfSite=="syn")
non <- which(data$TypeOfSite == "nonsyn")
cols<-c("red","yellow","blue","green")
#colsyn<-cols[syndata$combo]
#syn subset stuff
syndata <- subset(data, TypeOfSite=="syn")
#syn subset for no AA and no CPG
synNNdata <- subset(syndata, combo=="noAA noCPG")
#syn subset for no AA and no CPG for a, c, g, t (for HIV all 4 should be here)
synNNa <- subset(synNNdata, WTnt=="a")
synNNc <- subset(synNNdata, WTnt=="c")
synNNg <- subset(synNNdata, WTnt=="g")
synNNt <- subset(synNNdata, WTnt=="t")
#syn subset for no AA and yes CPG
synNYdata<- subset(syndata, combo=="noAA yesCPG")
#syn subset for no AA and yes CPG for a, c, g, t (for HIV a, t )
synNYa <- subset(synNYdata, WTnt=="a")
synNYc <- subset(synNYdata, WTnt=="c")
synNYg <- subset(synNYdata, WTnt=="g")
synNYt <- subset(synNYdata, WTnt=="t")
#syn subset for yes AA and no CPG
synYNdata <- subset(syndata, combo=="yesAA noCPG")
#syn subset for yes AA and no CPG for a, c, g, t (none for HIV)
synYNa <- subset(synYNdata, WTnt=="a")
synYNc <- subset(synYNdata, WTnt=="c")
synYNg <- subset(synYNdata, WTnt=="g")
synYNt <- subset(synYNdata, WTnt=="t")
#syn subset for yes AA and yes CPG
synYYdata <- subset(syndata, combo=="yesAA yesCPG")
#syn subset for yes AA and yes CPG for a, c, g, t (none for HIV)
synYYa <- subset(synYYdata, WTnt=="a")
synYYc <- subset(synYYdata, WTnt=="c")
synYYg <- subset(synYYdata, WTnt=="g")
synYYt <- subset(synYYdata, WTnt=="t")
#graph 1 Synonymous Sites
#still working
#graph <-
meansyn <- ((syndata$lowerConf+syndata$upperConf)/2)
ggplot(aes(factor(WTnt), MeanFreq, colour = combo), data = syndata)+
#geom_jitter()+
geom_point()+
geom_errorbar(aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))
#not yet done
graph + geom_boxplot() +
geom_dotplot(binaxis='y',
stackdir='center',
dotsize = .5,
fill="blue") +
theme(axis.text.x = element_text(angle=65, vjust=0.6))
#building the subset plot
ggplot(aes(factor(WTnt), MeanFreq), data = synNYa)+
geom_jitter(col = "red") +
geom_errorbar(aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))+
geom_point(aes('a',median(c(median(lowerConf),median(upperConf)))))
# need to find lower and upper conf for each individual nuc (a,c,g,t)
ggplot(aes(factor(WTnt), MeanFreq), data = cpGdata)+
geom_jitter(color ="blue")
ggplot()+
#geom_jitter(data = synNYa, aes(factor(WTnt), MeanFreq),col = "red") +
geom_errorbar(data = synNYa, aes(ymin = median(lowerConf), ymax = median(upperConf),position = 1, width = 0.2))+
geom_point(aes('a',median(c(median(lowerConf),median(upperConf)))),data = synNYa)
?geom_errorbar
#created 10/11
ggplot(aes(factor(WTnt), MeanFreq), data = synNNdata)+
geom_jitter(fill=5, col = "red") +
geom_errorbar(data = alla, aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))+
geom_point(data =alla, aes('a',median(c(median(lowerConf),median(upperConf)))))+
geom_errorbar(data = allc, aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))+
geom_point(data =allc, aes('c',median(c(median(lowerConf),median(upperConf)))))+
geom_errorbar(data = allg, aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))+
geom_point(data =allg, aes('g',median(c(median(lowerConf),median(upperConf)))))+
geom_errorbar(data = allt, aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))+
geom_point(data =allt, aes('t',median(c(median(lowerConf),median(upperConf)))))
# combinging the differt colors
#In HIV data there is no C,G change for synNY values
ggplot(aes(factor(WTnt), MeanFreq), data = syndata)+
#synNNdata
geom_jitter(data = synNNdata, aes(factor(WTnt), MeanFreq),fill=5, col = "red") +
geom_errorbar(data = synNNa, aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))+
geom_point(data =synNNa, aes('a',median(c(median(lowerConf),median(upperConf)))))+
geom_errorbar(data = synNNc, aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))+
geom_point(data =synNNc, aes('c',median(c(median(lowerConf),median(upperConf)))))+
geom_errorbar(data = synNNg, aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))+
geom_point(data =synNNg, aes('g',median(c(median(lowerConf),median(upperConf)))))+
geom_errorbar(data = synNNt, aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))+
geom_point(data =synNNt, aes('t',median(c(median(lowerConf),median(upperConf)))))+
#synNYdata
geom_jitter(data = synNYdata, aes(factor(WTnt), MeanFreq),fill=5, col = "blue")+
geom_errorbar(data = synNYa, aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))+
geom_point(data =synNYa, aes('a',median(c(median(lowerConf),median(upperConf)))))+
#geom_errorbar(data = synNYc, aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))+
#geom_point(data =synNYc, aes('c',median(c(median(lowerConf),median(upperConf)))))+
# geom_errorbar(data = synNYg, aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))+
# geom_point(data =synNYg, aes('g',median(c(median(lowerConf),median(upperConf)))))+
geom_errorbar(data = synNYt, aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))+
geom_point(data =synNYt, aes('t',median(c(median(lowerConf),median(upperConf)))))
#tring out numbers
ggplot(aes(factor(number), MeanFreq), data = syndata)+
#synNNdata
geom_jitter(data = synNNdata, aes(factor(number), MeanFreq),fill=5, col = "red") +
geom_errorbar(data = synNNa, aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))+
geom_point(data =synNNa, aes('1',median(c(median(lowerConf),median(upperConf)))))+
geom_jitter(data = synNYdata, aes(factor(number), MeanFreq),fill=5, col = "blue")
geom_jitter(col = "red") +
geom_errorbar(aes(ymin = median(lowerConf), ymax = median(upperConf), width = 0.2))+
geom_point(aes('a',median(c(median(lowerConf),median(upperConf)))))
#nonsyn sub set stuff
nonsyndata <- subset(data, TypeOfSite=="nonsyn")
nonyescpGdata<- subset(nonsyndata, combo=="noAA yesCPG")
#nonsyn subset for no AA and no CPG
nonNNdata <- subset(nonsyndata, combo=="noAA noCPG")
#nonsyn subset for no AA and no CPG for a, c, g, t (for HIV all 4 should be here)
nonsynNNa <- subset(nonNNdata, WTnt=="a")
nonsynNNc <- subset(nonNNdata, WTnt=="c")
nonsynNNg <- subset(nonNNdata, WTnt=="g")
nonsynNNt <- subset(nonNNdata, WTnt=="t")
#nonsyn subset for no AA and yes CPG
nonNYdata<- subset(nonsyndata, combo=="noAA yesCPG")
#syn subset for no AA and yes CPG for a, c, g, t (for HIV a, t )
nonsynNYa <- subset(nonNYdata, WTnt=="a")
nonsynNYc <- subset(nonNYdata, WTnt=="c")
nonsynNYg <- subset(nonNYdata, WTnt=="g")
nonsynNYt <- subset(nonNYdata, WTnt=="t")
#syn subset for yes AA and no CPG
nonYNdata <- subset(nonsyndata, combo=="yesAA noCPG")
#syn subset for yes AA and no CPG for a, c, g, t (for HIV all 4 should be here)
nonsynNNa <- subset(nonYNdata, WTnt=="a")
nonsynNNc <- subset(nonYNdata, WTnt=="c")
nonsynNNg <- subset(nonYNdata, WTnt=="g")
nonsynNNt <- subset(nonYNdata, WTnt=="t")
#syn subset for yes AA and yes CPG
nonYYdata <- subset(nonsyndata, combo=="yesAA yesCPG")
#syn subset for yes AA and yes CPG for a, c, g, t (for HIV a, t)
nonsynYYa <- subset(nonYYdata, WTnt=="a")
nonsynYYc <- subset(nonYYdata, WTnt=="c")
nonsynYYg <- subset(nonYYdata, WTnt=="g")
nonsynYYt <- subset(nonYYdata, WTnt=="t")
# not usinf ggplot
dotchart( as.numeric(syndata$num),syndata$MeanFreq, type="p",
prob = TRUE, col=col)
plot(jitter(as.numeric(syndata$WTnt)), syndata$MeanFreq, type="p", prob = TRUE, col=colsyn, pch=16)
boxplot(MeanFreq~as.numeric(WTnt), data=syndata, col=colsyn)
?boxplot()
#hist(pdg$Course.total[IndsMen], prob = TRUE, breaks = 30, col= rgb(0,0.,1,0.5), add = TRUE)
#graph 2 Non-synomymous Sites
|
66ea153aec21b0fbede08b9c416f34eb7d1ecadf
|
e3ccb3f761f337d327519a2d5182acd0aa045634
|
/R/groupedhist.R
|
6dda9c827e7ec68f65eb8eb3393450d489e2d659
|
[] |
no_license
|
alexandriaross/IDPReport
|
5e07c5814cadbdb7ef6a3be934ff53ad19aee312
|
ac0ae1d5ed6f5b71a08ddef6409cbea32e1861f8
|
refs/heads/master
| 2022-04-14T07:41:40.441651
| 2020-04-11T13:11:13
| 2020-04-11T13:11:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,636
|
r
|
groupedhist.R
|
#' Displays VLXT, VSL2 and PONDRFIT next to each other for low, medium and high disorder categories
#'
#' @param dataset A dataset containing the columns VLXT, VSL2 and PONDRFIT
#' @return A side-by-side grouped histogram
#' @examples
#' groupedhist(TPRdataset)
groupedhist <- function(dataset) {
# grouped frequency. Displays VLXT, VSL2 and PONDRFIT next to each other for the 3 groups
predictor <- "VLXT"
dataset$groupedVLXT <- cut(dataset$VLXT, c(-Inf, 10,30, Inf),
labels=c("0 to 10", "10 to 30", "greater 30"))
groups <- "0 to 10"
occurences <- sum(dataset$groupedVLXT == "0 to 10")
groups <- c(groups, "10 to 30")
occurences <- c(occurences, sum(dataset$groupedVLXT == "10 to 30"))
groups <- c(groups, "greater 30")
occurences <- c(occurences, sum(dataset$groupedVLXT == "greater 30"))
groupedplot <- data.frame(groups, occurences, predictor)
#VSL2-------------------------------------------------------
predictor <- "VSL2"
dataset$groupedVSL2 <- cut(dataset$VSL2, c(-Inf, 10,30, Inf),
labels=c("0 to 10", "10 to 30", "greater 30"))
#groups <- "0 to 10"
occurences <- sum(dataset$groupedVSL2 == "0 to 10")
#groups <- c(groups, "10 to 30")
occurences <- c(occurences, sum(dataset$groupedVSL2 == "10 to 30"))
#groups <- c(groups, "greater 30")
occurences <- c(occurences, sum(dataset$groupedVSL2 == "greater 30"))
groupedVSL2 <- data.frame(groups, occurences, predictor)
# rbind adds rows to data frame
groupedplot <- rbind(groupedplot, groupedVSL2)
#PONDR FIT-------------------------------------------
predictor <- "PONDRFIT"
# either cut at 0.1/0.3 or multiply PONDRFIT by 100 bc in PONDRFIT 30% is given as 0.3
#dataset$groupedPONDRFIT <- cut(dataset$PONDRFIT, c(-Inf, 10,30, Inf),
dataset$groupedPONDRFIT <- cut(dataset$PONDRFIT, c(-Inf, 0.1, 0.3, Inf),
labels=c("0 to 10", "10 to 30", "greater 30"))
#groups <- "0 to 10"
occurences <- sum(dataset$groupedPONDRFIT == "0 to 10")
#groups <- c(groups, "10 to 30")
occurences <- c(occurences, sum(dataset$groupedPONDRFIT == "10 to 30"))
#groups <- c(groups, "greater 30")
occurences <- c(occurences, sum(dataset$groupedPONDRFIT == "greater 30"))
groupedPONDRFIT <- data.frame(groups, occurences, predictor)
# rbind adds rows to data frame
groupedplot <- rbind(groupedplot, groupedPONDRFIT)
plotdata <- groupedplot %>%
mutate(predictor = factor(predictor), groups = factor(groups))
ggplot(plotdata, aes(fill=predictor, y=occurences, x=groups)) +
geom_bar(stat="identity", position="dodge")
}
|
ffe9b62c4f328eac95b2a381f70549e14281396e
|
ecb8ee97d6486860871c5387588ccc644b7f185e
|
/dev/experiments/accordion_ui/simple accordion using shinydashboard.R
|
27127be3816877e301fec703a74be05f26f9dc02
|
[
"MIT"
] |
permissive
|
ClinicoPath/shinyPivot
|
a18834ad249b890d19d2b58fb2dc9ccbc36a26bc
|
372b243fc6097c37488a94c461db5315b41ca26f
|
refs/heads/master
| 2022-02-13T12:49:20.186449
| 2019-08-25T01:15:03
| 2019-08-25T01:15:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,350
|
r
|
simple accordion using shinydashboard.R
|
library(shiny)
library(shinydashboard)
library(shinydashboardPlus)
shinyApp(
ui = dashboardPage(
dashboardHeader(),
dashboardSidebar(),
dashboardBody(
box(
title = "Accordion Demo",
accordion(
accordionItem(
id = 1,
title = "Accordion Item 1",
color = "danger",
collapsed = TRUE,
"This is some text!"
),
accordionItem(
id = 2,
title = "Accordion Item 2",
color = "warning",
collapsed = FALSE,
"This is some text!"
),
accordionItem(
id = 3,
title = "Accordion Item 3",
color = "info",
collapsed = FALSE,
"This is some text!"
)
)
)
),
title = "Accordion"
),
server = function(input, output) { }
)
|
2d6a1e265d8beb061d65b533d2069dfb2add6a59
|
1443e812411278d1f776f8f7d1196add8e2dcc31
|
/man/plotZerosVsDepth.Rd
|
d4a0886a57bdb1ed770de8d3d056afe262cefc52
|
[
"MIT"
] |
permissive
|
WeiSong-bio/roryk-bcbioSinglecell
|
e96f5ab1cb99cf1c59efd728a394aaea104d82b2
|
2b090f2300799d17fafe086bd03a943d612c809f
|
refs/heads/master
| 2020-06-15T23:38:23.802177
| 2018-07-03T21:01:07
| 2018-07-03T21:01:07
| 195,422,697
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,765
|
rd
|
plotZerosVsDepth.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/methods-plotZerosVsDepth.R
\docType{methods}
\name{plotZerosVsDepth}
\alias{plotZerosVsDepth}
\alias{plotZerosVsDepth}
\alias{plotZerosVsDepth,SingleCellExperiment-method}
\alias{plotZerosVsDepth,seurat-method}
\title{Plot Percentage of Zeros vs. Library Depth}
\usage{
plotZerosVsDepth(object, ...)
\S4method{plotZerosVsDepth}{SingleCellExperiment}(object, interestingGroups,
color = NULL, title = "zeros vs. depth")
\S4method{plotZerosVsDepth}{seurat}(object, interestingGroups, color = NULL,
title = "zeros vs. depth")
}
\arguments{
\item{object}{Object.}
\item{...}{Additional arguments.}
\item{interestingGroups}{Character vector of interesting groups. Must be
formatted in camel case and intersect with \code{\link[=sampleData]{sampleData()}} colnames.}
\item{color}{Desired ggplot color scale. Must supply discrete values. When
set to \code{NULL}, the default ggplot2 color palette will be used. If manual
color definitions are desired, we recommend using
\code{\link[ggplot2:scale_color_manual]{ggplot2::scale_color_manual()}}.}
\item{title}{Plot title.}
}
\value{
\code{ggplot}.
}
\description{
This function helps us visualize the dropout rate.
}
\examples{
# SingleCellExperiment ====
plotZerosVsDepth(cellranger_small)
}
\seealso{
Other Quality Control Functions: \code{\link{barcodeRanksPerSample}},
\code{\link{filterCells}}, \code{\link{metrics}},
\code{\link{plotCellCounts}},
\code{\link{plotGenesPerCell}},
\code{\link{plotMitoRatio}},
\code{\link{plotMitoVsCoding}},
\code{\link{plotNovelty}}, \code{\link{plotQC}},
\code{\link{plotReadsPerCell}},
\code{\link{plotUMIsPerCell}}
}
\author{
Rory Kirchner, Michael Steinbaugh
}
|
7274a2267a2ca7416c51ae2c37117d66780a3baf
|
ec3a132d0efb9fdde93b08e34a0a23dc9de513cd
|
/04_SM_regressions.R
|
2cecdf8ba85ee985b79749543f59ddcabab063ba
|
[] |
no_license
|
juanrocha/BEST
|
9aac71b0f2499a4df97df16b6f848dfd6848d305
|
0983f2c1a54752a5030ae5a416f38d24cb1a0ef5
|
refs/heads/master
| 2022-03-09T20:13:53.993478
| 2022-02-18T09:31:38
| 2022-02-18T09:31:38
| 55,783,122
| 0
| 1
| null | 2020-09-05T02:32:00
| 2016-04-08T14:00:46
|
HTML
|
UTF-8
|
R
| false
| false
| 12,559
|
r
|
04_SM_regressions.R
|
## Analysis paper II
## Juan Rocha
## juan.rocha@su.se
##
## Regressions for SM
## Game data regressions on:
## individual extraction
# simple
p1 <- plm(ind_extraction ~ Treatment + part + as.numeric(Round) + StockSizeBegining ,
data = pdata.frame(dat, index = c('ID_player' ,'Round', "group")),
model = "random",
effect = "individual" )
# clustered:
p2 <- plm(ind_extraction ~ Treatment + part + as.numeric(Round) + StockSizeBegining ,
data = pdata.frame(dat, index = c('ID_player' ,'Round', "group")),
model = "random", random.method = "walhus",
effect = "nested" )
# simple
p3 <- plm(ind_extraction ~ Treatment + part + Treatment * part + as.numeric(Round) + StockSizeBegining ,
data = pdata.frame(dat, index = c('ID_player' ,'Round', "group")),
model = "random",
effect = "individual" )
# clustered:
p4 <- plm(ind_extraction ~ Treatment + part +Treatment * part + as.numeric(Round) + StockSizeBegining ,
data = pdata.frame(dat, index = c('ID_player' ,'Round', "group")),
model = "random", random.method = "walhus",
effect = "nested" )
# # simple
p5 <- plm(ind_extraction ~ Treatment + as.numeric(Round) + StockSizeBegining ,
data = pdata.frame(dat %>% filter(part == TRUE), index = c('ID_player' ,'Round', "group")),
model = "random",
effect = "individual" )
# clustered:
p6 <- plm(ind_extraction ~ Treatment + as.numeric(Round) + StockSizeBegining ,
data = pdata.frame(dat %>% filter(part == TRUE), index = c('ID_player' ,'Round', "group")),
model = "random", random.method = "walhus",
effect = "nested" )
## proportion of stock extracted
# simple
q1 <- plm(prop ~ Treatment + part + as.numeric(Round) ,
data = pdata.frame(dat, index = c('ID_player' ,'Round', "group")),
model = "random",
effect = "individual" )
# clustered:
q2 <- plm(prop ~ Treatment + part + as.numeric(Round) ,
data = pdata.frame(dat, index = c('ID_player' ,'Round', "group")),
model = "random", random.method = "walhus",
effect = "nested" )
# simple
q3 <- plm(prop ~ Treatment + part + Treatment * part + as.numeric(Round) ,
data = pdata.frame(dat, index = c('ID_player' ,'Round', "group")),
model = "random", random.method = "walhus",
effect = "individual" )
# clustered:
q4 <- plm(prop ~ Treatment + part +Treatment * part + as.numeric(Round) ,
data = pdata.frame(dat, index = c('ID_player' ,'Round', "group")),
model = "random", random.method = "walhus",
effect = "nested" )
# simple
q5 <- plm(prop ~ Treatment + as.numeric(Round) ,
data = pdata.frame(dat %>% filter(part == TRUE), index = c('ID_player' ,'Round', "group")),
model = "random",
effect = "individual" )
# clustered:
q6 <- plm(prop ~ Treatment + as.numeric(Round) ,
data = pdata.frame(dat %>% filter(part == TRUE), index = c('ID_player' ,'Round', "group")),
model = "random", random.method = "walhus",
effect = "nested" )
## and cooperation
# simple
c1 <- plm(cooperation2 ~ Treatment + part + as.numeric(Round) ,
data = pdata.frame(dat, index = c('ID_player' ,'Round', "group")),
model = "random",
effect = "individual" )
# clustered:
c2 <- plm(cooperation2 ~ Treatment + part + as.numeric(Round) ,
data = pdata.frame(dat, index = c('ID_player' ,'Round', "group")),
model = "random", random.method = "walhus",
effect = "nested" )
# simple
c3 <- plm(cooperation2 ~ Treatment + part + Treatment * part + as.numeric(Round) ,
data = pdata.frame(dat, index = c('ID_player' ,'Round', "group")),
model = "random", random.method = "walhus",
effect = "individual" )
# clustered:
c4 <- plm(cooperation2 ~ Treatment + part +Treatment * part + as.numeric(Round) ,
data = pdata.frame(dat, index = c('ID_player' ,'Round', "group")),
model = "random", random.method = "walhus",
effect = "nested" )
# simple
c5 <- plm(cooperation2 ~ Treatment + as.numeric(Round) ,
data = pdata.frame(dat %>% filter(part == TRUE), index = c('ID_player' ,'Round', "group")),
model = "random",
effect = "individual" )
# clustered:
c6 <- plm(cooperation2 ~ Treatment + as.numeric(Round) ,
data = pdata.frame(dat %>% filter(part == TRUE), index = c('ID_player' ,'Round', "group")),
model = "random", random.method = "walhus",
effect = "nested" )
# stargazer::stargazer(
# # clustered and robust:
# coeftest(p4, vcov.=function(x) vcovHC(x, method="white2", type="HC1", cluster = "group")),
# # clustered and robust:
# coeftest(p4, vcov.=function(x) vcovHC(x, method="white2", type="HC2", cluster = "group")),
# # clustered and robust:
# coeftest(p4, vcov.=function(x) vcovHC(x, method="white2", type="HC3", cluster = "group")),
# # clustered and robust:
# coeftest(p4, vcov.=function(x) vcovHC(x, method="white2", type="HC4", cluster = "group")),
# # last model with NW and HC4, more strict
# coeftest(p4, vcov.=function(x) vcovNW(x, type="HC4", cluster = "group")),
# type = "latex", multicolumn = FALSE, header = FALSE, intercept.bottom = FALSE,
# model.names= FALSE, font.size = "small", digits = 2,
# float = TRUE, no.space = TRUE, single.row = FALSE, align = TRUE,
# dep.var.caption = "", dep.var.labels.include = FALSE, df = FALSE,
# title = "Clustered and robust standard errors estimation with White method and (1) HC1, (2) HC2, (3) HC3, (4) HC4 weighting schemes, and (5) Newey and West method with HC4 scheme. The response variable is individual extraction."
# )
### Regressions on surveys:
ind_coop <- ind_coop %>%
# coordination_all is now the coordination score for all rounds, while coordination_2 is for second part
rename(coordination_all = coordination) %>%
# step added to avoid using place names
mutate(Place = fct_recode(Place, A = "Buenavista", B = "Las Flores", C = "Taganga", D = "Tasajera"))
names(ind_coop) <- str_remove_all(names(ind_coop), pattern = "2" )
# write_csv(ind_coop, path = "ind_coop.csv") # file for Caroline to play around with regressions
#
y_vars <- c("mean_extraction", "mean_prop_extr", "med_coop", "variance", "coordination", "var_extraction", "var_prop_extr")
x_vars <- c("Treatment + Place + education_yr + BD_how_often + fishing_children + Risk + Amb + prop_ag")
out1 <- map2(x_vars, y_vars,
~ lm_robust(as.formula(paste(.y, "~", .x)),
data = ind_coop %>% filter(part == T) %>% ungroup(),
se_type = 'stata', cluster = group)
)
x_vars <- c( "Treatment + education_yr + BD_how_often + fishing_children + Risk + Amb + prop_ag")
out2 <- map2(x_vars, y_vars,
~ lm_robust(as.formula(paste(.y, "~", .x)),
data = ind_coop %>% filter(part == T) %>% ungroup(),
se_type = 'stata', cluster = group)
)
x_vars <- c( "Treatment + Place")
out3 <- map2(x_vars, y_vars,
~ lm_robust(as.formula(paste(.y, "~", .x)),
data = ind_coop %>% filter(part == T) %>% ungroup(),
se_type = 'stata', cluster = group)
)
x_vars <- c( "Treatment + education_yr + BD_how_often + fishing_children + fishing_future + Risk + group_fishing + Amb + prop_ag ")
out4 <- map2(x_vars, y_vars,
~ lm_robust(as.formula(paste(.y, "~", .x)),
data = ind_coop %>% filter(part == T) %>% ungroup(),
se_type = 'stata', cluster = group)
)
df_rsqr <- tibble(
original = out1 %>% map(., summary) %>% map(.,"adj.r.squared") %>% unlist(),
no_place = out2 %>% map(., summary) %>% map(.,"adj.r.squared") %>% unlist(),
just_place = out3 %>% map(., summary) %>% map(.,"adj.r.squared") %>% unlist(),
extras = out4 %>% map(., summary) %>% map(.,"adj.r.squared") %>% unlist(),
vars = y_vars
)
y_vars <- c("mean_extraction", "mean_prop_extr", "med_coop", "variance", "coordination", "var_extraction", "var_prop_extr")
x_vars <- c("Treatment + Place + education_yr + BD_how_often + fishing_children + Risk + Amb + prop_ag")
out5 <- map2(x_vars, y_vars,
~ lm(as.formula(paste(.y, "~", .x)),
data = ind_coop %>% filter(part == T) %>% ungroup()
)
)
x_vars <- c( "Treatment + education_yr + BD_how_often + fishing_children + Risk + Amb + prop_ag")
out6 <- map2(x_vars, y_vars,
~ lm(as.formula(paste(.y, "~", .x)),
data = ind_coop %>% filter(part == T) %>% ungroup()
)
)
x_vars <- c( "Treatment + Place")
out7 <- map2(x_vars, y_vars,
~ lm(as.formula(paste(.y, "~", .x)),
data = ind_coop %>% filter(part == T) %>% ungroup()
)
)
x_vars <- c( "Treatment + education_yr + BD_how_often + fishing_children + fishing_future + Risk + group_fishing + sharing_art + Amb + prop_ag ")
out8 <- map2(x_vars, y_vars,
~ lm(as.formula(paste(.y, "~", .x)),
data = ind_coop %>% filter(part == T) %>% ungroup()
)
)
## Manual regressions to implement Caroline's suggestion of using part one as predictor of part 2.
## The final regression for the survey will contain a control variable for behaviour in the first part of the game
ind_coop <- ind_coop %>%
select(-part) %>% ungroup() %>%
left_join(ind_coop1 %>% select(-Place))
# history_rs?
y_vars <- c("mean_extraction", "mean_prop_extr", "med_coop", "variance", "var_extraction", "var_prop_extr", "coordination")
x_vars <- c("Treatment + Place + education_yr + BD_how_often + fishing_children + sharing_art + group_fishing + Risk + Amb + prop_ag")
z_vars <- c("mean_extraction1", "mean_prop_extr1", "med_coop1", "variance1", "var_extraction1", "var_prop_extr1", "coordination1")
rhs <- map2(.x = x_vars, .y = z_vars,
~ paste(.x, .y, sep = " + "))
out <- map2(rhs, y_vars,
~ lm_robust(as.formula(paste(.y, "~", .x)),
data = ind_coop %>% ungroup(),
se_type = 'CR2', cluster = group)
)
## Note 191106: Stargazer does not currently recognize objects of the class lm_robust.
## to cirunvent that problem I create the same regression with lm and then manually
## modify coefficients, se, and p-values. See code below. The lm results are only used as
## template, table results are replaced manually.
out_lm2 <- map2(rhs, y_vars,
~ lm(as.formula(paste(.y, "~", .x)),
data = ind_coop %>% ungroup())
)
## without place
x_vars <- c("Treatment + education_yr + BD_how_often + fishing_children + sharing_art + group_fishing + Risk + Amb + prop_ag")
rhs <- map2(.x = x_vars, .y = z_vars,
~ paste(.x, .y, sep = " + "))
out_noplace <- map2(rhs, y_vars,
~ lm_robust(as.formula(paste(.y, "~", .x)),
data = ind_coop %>% ungroup(),
se_type = 'CR2', cluster = group)
)
out_lm_noplace <- map2(rhs, y_vars,
~ lm(as.formula(paste(.y, "~", .x)),
data = ind_coop %>% ungroup())
)
## only place
x_vars <- c( "Treatment + Place")
rhs <- map2(.x = x_vars, .y = z_vars,
~ paste(.x, .y, sep = " + "))
out_onlyplace <- map2(rhs, y_vars,
~ lm_robust(as.formula(paste(.y, "~", .x)),
data = ind_coop %>% ungroup(),
se_type = 'CR2', cluster = group)
)
out_lm_onlyplace <- map2(rhs, y_vars,
~ lm(as.formula(paste(.y, "~", .x)),
data = ind_coop %>% ungroup())
)
df_rsqr <- tibble(
original = out %>% map(., summary) %>% map(.,"adj.r.squared") %>% unlist(),
no_place = out_noplace %>% map(., summary) %>% map(.,"adj.r.squared") %>% unlist(),
just_place = out_onlyplace %>% map(., summary) %>% map(.,"adj.r.squared") %>% unlist(),
vars = y_vars
)
# save.image(file = "Regressions_paper2_200527.RData", safe = TRUE)
#J200908: regression suggested by reviewer2:
edu <- lm_robust(
prop_ag ~ Treatment + Place + education_yr + BD_how_often + fishing_children
+ Risk + Amb,
data = ind_coop %>% filter(part == T) %>% ungroup(),
se_type = 'stata', cluster = group)
|
ae99208576db06c379ccde3c849db53acb7fbc0e
|
7d5968837bec87fcc42bab82f82db8bfa169e7c7
|
/man/intercross.point.Rd
|
9bc1ff8230f7f46100377491b2eed1414189d0fc
|
[] |
no_license
|
liuguofang/figsci
|
ddadb01fae7c208b4ac3505eed5dc831d7de0743
|
076f7dd70711836f32f9c2118ad0db21ce182ea2
|
refs/heads/master
| 2021-06-04T19:23:34.065124
| 2020-02-12T04:22:11
| 2020-02-12T04:22:11
| 107,945,277
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 969
|
rd
|
intercross.point.Rd
|
\name{intercross.point}
\alias{intercross.point}
\title{Find a intercross point between two lines}
\usage{
intercross.point(x1, y1, x2, y2, x3, y3, x4, y4)
}
\description{
Find a intercross points between two lines (AB, CD).
}
\arguments{
\item{x1} {the x value of point A.}
\item{y1} {the y value of point A.}
\item{x2} {the x value of point B.}
\item{y2} {the y value of point B.}
\item{x3} {the x value of point C.}
\item{y3} {the y value of point C.}
\item{x4} {the x value of point D.}
\item{y4} {the y value of point D.}
}
\examples{
d <- data.frame(x = c(2, 5, 3, 8), y = c(8, 3, 2, 7))
with(d, plot(x, y, ylim = c(0, 8)))
segments(d$x[1], d$y[1], d$x[2], d$y[2])
segments(d$x[3], d$y[3], d$x[4], d$y[4])
#p is the point of intersection
p <- intercross.point(2, 8, 5, 3, 3, 2, 8, 7)
points(p[1], p[2], col = 2)
polygon(c(d$x[1], d$x[3], p[1]), c(d$y[1], d$y[3], p[2]), col = 2)
polygon(c(d$x[2], d$x[4], p[1]), c(d$y[2], d$y[4], p[2]), col = 3)
}
|
a23707113e5f80ee7670ba99b5359e26951e5d80
|
751426c4635f5763ba1ef4911c285753dce76d89
|
/Traffic Analysis/Seasonal/SEASTRAFFIC.R
|
2a94ca8884980674ec8d834fd5a55c923c3c623f
|
[] |
no_license
|
SuperMarioGiacomazzo/BAYESIAN_SUBSET_TAR
|
081836786d1134859efc6362b0b44c42156c0a5c
|
51864a4fb5eab3c2f9a89ca7c11216de52fec2e0
|
refs/heads/master
| 2020-04-07T11:31:06.717119
| 2018-11-20T05:28:16
| 2018-11-20T05:28:16
| 158,329,624
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,404
|
r
|
SEASTRAFFIC.R
|
#Libraries Used
library(doParallel)
library(forecast)
library(foreach)
library(runjags)
library(bayesreg)
library(coda)
options(scipen=999)
#Open Data Directory
setwd("D:/Mario Documents/Research/JAS/BAYESIAN_SUBSET_TAR/Traffic Analysis/Source Code")
#Gather Data from Source Code
source("APRIL_SOURCE.R")
#Directory for Saving
setwd("D:/Mario Documents/Research/JAS/BAYESIAN_SUBSET_TAR/Traffic Analysis/Seasonal")
##############################
# Lag Function
##############################
lag.func<-function(x,k=1){
t=length(x)
y=c(rep(NA,t))
for(i in (k+1):t){
y[i]=x[i-k]
}
return(y)
}
seasdiff.func<-function(x,d=1){
t=length(x)
y=diff(x,differences=1)
y=c(rep(NA,d),y)
return(y)
}
##############################
# Logit Functions
##############################
logit.func<-function(x) return(log(x/(1-x)))
revlogit.func<-function(x) return(exp(x)/(1+exp(x)))
###################################
# Data Functions
###################################
seas.data.func=function(day,nfreq,series){
#Obtain Train Data for Detector 103 including Before (L108) and After (L101)
L106.TR=April.3.Day[[day]]$L106_occupancy[1:1440]/100
L101.TR=April.3.Day[[day]]$L101_occupancy[1:1440]/100
L108.TR=April.3.Day[[day]]$L108_occupancy[1:1440]/100
L104.TR=April.3.Day[[day]]$L104_occupancy[1:1440]/100
L102.TR=April.3.Day[[day]]$L102_occupancy[1:1440]/100
L107.TR=April.3.Day[[day]]$L107_occupancy[1:1440]/100
L103.TR=April.3.Day[[day]]$L103_occupancy[1:1440]/100
#Adjust Data for 0 and 1 to be 0.0001 and 0.9999
L106.TR[L106.TR==1]=0.9999
L106.TR[L106.TR==0]=0.0001
L101.TR[L101.TR==1]=0.9999
L101.TR[L101.TR==0]=0.0001
L108.TR[L108.TR==1]=0.9999
L108.TR[L108.TR==0]=0.0001
L104.TR[L104.TR==1]=0.9999
L104.TR[L104.TR==0]=0.0001
L102.TR[L102.TR==1]=0.9999
L102.TR[L102.TR==0]=0.0001
L107.TR[L107.TR==1]=0.9999
L107.TR[L107.TR==0]=0.0001
L103.TR[L103.TR==1]=0.9999
L103.TR[L103.TR==0]=0.0001
#Logit Transformed Data
yA=logit.func(L101.TR)
yB=logit.func(L106.TR)
yC=logit.func(L108.TR)
yD=logit.func(L102.TR)
yE=logit.func(L104.TR)
yF=logit.func(L107.TR)
yG=logit.func(L103.TR)
#Time Variable
time=(1:length(yA))
#Creation of Harmonic Matrices
N=length(yA)
X=matrix(NA,N,nfreq*2)
for (j in 1:nfreq){
X[,(2*j-1):(2*j)]=cbind(cos(2*pi*time*j/480),sin(2*pi*time*j/480))
}
P=dim(X)[2]
y=list(yA,yB,yC,yD,yE,yF,yG)[[series]]
data=data.frame(y=y,X=X)
return(data)
}
seas.data.func2=function(day,nfreq,series){
#Obtain Train Data for Detector 103 including Before (L108) and After (L101)
L106.TR=April.3.Day[[day]]$L106_occupancy[-(1:1440)]/100
L101.TR=April.3.Day[[day]]$L101_occupancy[-(1:1440)]/100
L108.TR=April.3.Day[[day]]$L108_occupancy[-(1:1440)]/100
L104.TR=April.3.Day[[day]]$L104_occupancy[-(1:1440)]/100
L102.TR=April.3.Day[[day]]$L102_occupancy[-(1:1440)]/100
L107.TR=April.3.Day[[day]]$L107_occupancy[-(1:1440)]/100
L103.TR=April.3.Day[[day]]$L103_occupancy[-(1:1440)]/100
#Adjust Data for 0 and 1 to be 0.0001 and 0.9999
L106.TR[L106.TR==1]=0.9999
L106.TR[L106.TR==0]=0.0001
L101.TR[L101.TR==1]=0.9999
L101.TR[L101.TR==0]=0.0001
L108.TR[L108.TR==1]=0.9999
L108.TR[L108.TR==0]=0.0001
L104.TR[L104.TR==1]=0.9999
L104.TR[L104.TR==0]=0.0001
L102.TR[L102.TR==1]=0.9999
L102.TR[L102.TR==0]=0.0001
L107.TR[L107.TR==1]=0.9999
L107.TR[L107.TR==0]=0.0001
L103.TR[L103.TR==1]=0.9999
L103.TR[L103.TR==0]=0.0001
#Logit Transformed Data
yA=logit.func(L101.TR)
yB=logit.func(L106.TR)
yC=logit.func(L108.TR)
yD=logit.func(L102.TR)
yE=logit.func(L104.TR)
yF=logit.func(L107.TR)
yG=logit.func(L103.TR)
#Time Variable
time=(1:length(yA))
#Creation of Harmonic Matrices
N=length(yA)
X=matrix(NA,N,nfreq*2)
for (j in 1:nfreq){
X[,(2*j-1):(2*j)]=cbind(cos(2*pi*time*j/480),sin(2*pi*time*j/480))
}
P=dim(X)[2]
y=list(yA,yB,yC,yD,yE,yF,yG)[[series]]
data=data.frame(y=y,X=X)
return(data)
}
####################################
# Create Empty Lists to Save Results
####################################
SEASMOD.RESULTS=list()
for(day in 1:5){
SEASMOD.RESULTS[[day]]=foreach(series=1:7)%do%{
#Initial Seasonal Model Using JAGS
seasmod=bayesreg(y~.,data=seas.data.func(day=day,nfreq=150,series=series),
prior="hs+",nsamples=2000,burnin=5000,thin=10)
eff.size=rep(NA,300)
for(k in 1:300){
eff.size[k]=effectiveSize(c(seasmod$beta[k,]))
}
min.eff.size=min(eff.size)
seas.mean=seasmod$muBeta0
seas.coef=seasmod$muBeta
train.data=seas.data.func(day=day,nfreq=150,series=series)
train.predict=as.numeric(c(seas.mean)+as.matrix(train.data[,-1])%*%seas.coef)
test.data=seas.data.func2(day=day,nfreq=150,series=series)
test.predict=as.numeric(c(seas.mean)+as.matrix(test.data[,-1])%*%seas.coef)
seas.profile=c(train.predict,test.predict)
actual.data=c(train.data$y,test.data$y)
seas.dev=actual.data-seas.profile
seas.dev2=seas.dev^2
seas.data=data.frame(cbind(actual.data,seas.profile,seas.dev,seas.dev2))
names(seas.data)=c("Actual","Seas","Seas.Dev","Seas.Dev2")
#Optimal Seasonal Model
out=list(seas.data=seas.data,s.mu.p=as.numeric(seasmod$beta0),s.coef.p=seasmod$beta,s.s2.p=as.numeric(seasmod$sigma2))
out
}
save.image("SEASTRAFFIC.Rdata")
}
|
2befe8f89716200446ce66455426c10f0bb08996
|
d67fbbef3d2d35575c4bf377bc2553f445f1db29
|
/man/course.Rd
|
9b96badab9624a6eb33484bf0679ed10a5eac930
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MIDFIELDR/midfielddata
|
7fda1709529700d620a793f9321920bebc3f5ab9
|
aa48c2965eb6e9e25accbe7a6d162fa1de3fd38e
|
refs/heads/main
| 2022-12-15T04:46:50.978394
| 2022-12-06T17:24:42
| 2022-12-06T17:24:42
| 136,339,674
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,264
|
rd
|
course.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/course.R
\docType{data}
\name{course}
\alias{course}
\title{Student-level course data}
\format{
A \code{data.frame} and \code{data.table} with 12 variables and approximately
3.3M observations of 97,555 unique students occupying 325 MB of memory:
\describe{
\item{\code{mcid}}{Character, anonymized student identifier, e.g.,
\code{MCID3111142225}.}
\item{\code{institution}}{Character, anonymized institution name, e.g.,
\verb{Institution B}.}
\item{\code{term_course}}{Character, academic year and term, format \code{YYYYT}.}
\item{\code{course}}{Character, course name, e.g., \verb{Astrophysics III},
\verb{Calculus For Social Science And Business}, \verb{Corp Financial Rprtng 1},
\verb{Environmental Sanitation II}, \verb{Fitness and Wellness},
\verb{Introductory Astronomy 2}, \verb{Our Changing Environment}, etc.}
\item{\code{abbrev}}{Character, course alpha identifier, e.g. \code{AA}, \code{MATH},
\code{ACCT}, \code{EH}, \code{HES}, \code{ASTR}, etc.}
\item{\code{number}}{Character, course numeric identifier, e.g. \code{1104}, \code{1209},
\code{228}, \code{4047}, etc.}
\item{\code{section}}{Character, course section identifier, from one to four
characters, e.g., \code{1}, \code{2}, \code{01}, \code{14}, \code{001}, \code{040}, \code{785},
\code{H02}, \code{R01}, \verb{300E}, \verb{888R}, etc.}
\item{\code{type}}{Character, predominant delivery method for this section, e.g.,
\code{Blended}, \verb{Distance Education}, \code{Face-to-Face}, \code{Online}, etc.}
\item{\code{faculty_rank}}{Character, academic rank of the person teaching the
course, e.g., \verb{Assistant Professor}, \verb{Associate Professor},
\verb{Graduate Assistant}, \verb{Visiting Faculty}, etc.}
\item{\code{hours_course}}{Numeric, number of credit-hours for successful course
completion.}
\item{\code{grade}}{Character, course grade, e.g., \verb{A+}, \code{A}, \verb{A-}, \verb{B+},
\code{I}, \code{NG}, etc.}
\item{\code{discipline_midfield}}{Character, a variable for grouping courses by
academic discipline assigned by the MIDFIELD data curator, e.g.,
\code{Anthropology}, \code{Business}, \verb{Computer Science},
\code{Engineering}, \verb{Language and Literature}, \code{Mathematics},
\verb{Visual and Performing Arts}, etc.}
}
}
\source{
2022 \href{https://midfield.online/}{MIDFIELD} database
}
\usage{
data(course)
}
\description{
Student-level course information for approximately 98,000 undergraduates,
keyed by student ID. Data at the "student-level" refers to information
collected by undergraduate institutions about individual students, for
example, course name and number, credit hours, and student grades.
}
\details{
Course data are structured in block-record form, that is, records associated
with a particular ID can span multiple rows---one record per student per
course per term.
Terms are encoded \code{YYYYT}, where \code{YYYY} is the year at the start of the
academic year and \code{T} encodes the semester or quarter within an academic year
as Fall (\code{1}), Winter (\code{2}), Spring (\code{3}), and Summer (\code{4}, \code{5}, and \code{6}).
For example, for academic year 1995--96, Fall 95--96 is encoded \code{19951},
Spring 95--96 is encoded \code{19953}, and the first Summer 95-96 term is encoded
\code{19954}. The source database includes special month-long sessions encoded
with letters \code{A}, \code{B}, \code{C}, etc., though none are included in this sample.
The data in \code{midfielddata} are a proportionate stratified sample of the
MIDFIELD database, but are not suitable for drawing inferences about program
attributes or student experiences---\code{midfielddata} provides practice data,
not research data.
}
\examples{
\dontrun{
# Load data
data(course)
# Select specific rows and columns
rows_we_want <- course$mcid == MCID3112192438
cols_we_want <- c(mcid, term_course, course, grade)
# View observations for this ID
course[rows_we_want, cols_we_want]
}
}
\seealso{
Package \href{https://midfieldr.github.io/midfieldr/}{\code{midfieldr}}
for tools and methods for working with MIDFIELD data in \code{R}.
Other datasets:
\code{\link{degree}},
\code{\link{student}},
\code{\link{term}}
}
\concept{datasets}
\keyword{datasets}
|
772622a511d9820caeb2048ba5081ce8ee81c044
|
f9026a29bfd23f24aa798f2968611b40c4541308
|
/knapsack_brute_force_test.R
|
4e8f5aae5c4d65d80aecd3d0d05f257cf4d014d0
|
[
"MIT"
] |
permissive
|
aleka769/A94Lab6
|
ab288b28cd582cf1ae2454ba6a8e7301eb156db1
|
6e687b216d4d94cd73307e0d217f99f5df7d6274
|
refs/heads/master
| 2020-03-31T12:36:11.428186
| 2018-10-15T11:28:30
| 2018-10-15T11:28:30
| 152,221,139
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 352
|
r
|
knapsack_brute_force_test.R
|
W <- 25
set.seed(831117)
n <- 1000000
stuff <- data.frame(w = sample(1:10, n, replace = TRUE),
v = sample(4:12, n, replace = TRUE))
system.time(brute_force_knapsack(stuff[1:16,],25))
system.time(dynamic_knapsack(stuff[1:500,],25))
system.time(greedy_knapsack(stuff,25))
system.time(brute_force_knapsack(stuff,25,parallel = TRUE))
|
b15e57bef3931df380b065844b9c4820b0380a91
|
8175c4d86339cb29a56969a281b0b7fb8f975eaa
|
/barkRead/graph_deltas_raw_July.R
|
40ae506cf692cadcd08f3f5d6d980510ee51ae5a
|
[] |
no_license
|
TerefiGimeno/bark
|
58a55231078d77d59e56ad240ce6934f7ead08e7
|
4f0985f933efc06a7dfab60f5ed67eb445d531fb
|
refs/heads/master
| 2023-02-03T03:31:25.504048
| 2023-01-31T09:53:54
| 2023-01-31T09:53:54
| 191,321,811
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,321
|
r
|
graph_deltas_raw_July.R
|
library(lubridate)
library(data.table)
dfJ <- read.table('barkData/July24_complete.csv', sep = ';', header = TRUE)
# recalculate transpiration (in mmol m-2 s-1) following Zsofia's email on 1-March-2021
dfJ$TrA_old <- dfJ$TrA
dfJ$TrA <- 1000*((dfJ$FlowOut/dfJ$Area)*((dfJ$H2Oout_G - dfJ$H2Oin_G)/(dfJ$ATP - dfJ$H2Oin_G)))
dfJ$DT <- as.POSIXct(dfJ$DT, format="%Y-%m-%d %H:%M:%S")
dfJ$DOY <- yday(dfJ$DT)
dfJ$timeDec <- hour(dfJ$DT) + (minute(dfJ$DT)/60)
wiJ <- read.csv("barkData/July24_xylem_wi.csv")
dfJ <- dplyr::left_join(dfJ, wiJ, by = 'MpNo')
dfJ$time <- yday(dfJ$DT) + (hour(dfJ$DT)+ minute(dfJ$DT)/60)/24
# calculate deltas (d18O and d2H) of transpired water (d_E), according to:
dfJ$d18O_E <- (dfJ$FlowOut*dfJ$H2Oout_G*dfJ$d18O_out*0.001 - dfJ$FlowIn*dfJ$H2Oin_G*dfJ$d18O_in*0.001)*1000/
(dfJ$FlowOut*dfJ$H2Oout_G - dfJ$FlowIn*dfJ$H2Oin_G)
dfJ$ss <- ifelse(dfJ$d18O_E <= dfJ$d18_up_lim, 'yes', 'no')
dfJ$d2H_E <- (dfJ$FlowOut*dfJ$H2Oout_G*dfJ$dDH_out*0.001 - dfJ$FlowIn*dfJ$H2Oin_G*dfJ$dDH_in*0.001)*1000/
(dfJ$FlowOut*dfJ$H2Oout_G - dfJ$FlowIn*dfJ$H2Oin_G)
dfJ$dDH_ex <- dfJ$dDH_out - 8*dfJ$d18O_out
dfJ$dDH_ex_a <- dfJ$dDH_in - 8*dfJ$d18O_in
windows(12, 14)
par(mfrow=c(3, 2), mar = c(0, 5, 4, 0), cex = 1.1)
plot(subset(dfJ, MpNo == 2)$d18O_in ~ subset(dfJ, MpNo == 2)$DT,
ylim = c(-22, 30), xlim = c(min(dfJ$DT), max(dfJ$DT)),
main = 'Cuvette B', axes = F, pch =19, col = 'blue',
ylab = expression(paste(delta^{18}, "O (\u2030)")), xlab = '', cex.lab = 1.6)
points(subset(dfJ, MpNo == 2)$d18O_out ~ subset(dfJ, MpNo == 2)$DT, pch =19, col = 'red')
points(subset(dfJ, MpNo == 2 & ss == 'yes')$d18O_E ~ subset(dfJ, MpNo == 2 & ss == 'yes')$DT,
pch =19, col = 'black')
points(subset(dfJ, MpNo == 2 & ss == 'no')$d18O_E ~ subset(dfJ, MpNo == 2 & ss == 'no')$DT,
pch =1, col = 'black')
abline(subset(dfJ, MpNo == 2)$d18O_b[1], 0, lty = 2)
# no data available for the exact segment, use the tree average
abline(subset(dfJ, MpNo == 2)$d18O_a_tree[1], 0, lty = 3)
abline(subset(dfJ, MpNo == 2)$d18_up_lim[1], 0)
axis(side = 2, at = seq(-20, 30, 10), labels = seq(-20, 30, 10))
axis(side = 1, at = seq(min(dfJ$DT), max(dfJ$DT), 'hour'), labels = F)
box()
legend('topleft', expression(bold((a))), bty = 'n', cex = 1.2, pt.cex = 1)
legend('topright', legend = c('Xyl. Up', 'Xyl. Down'), bty = 'n', lty = c(2, 3),
cex = 1.2, pt.cex = 1)
par(mar = c(0, 2, 4, 3))
plot(subset(dfJ, MpNo == 7)$d18O_in ~ subset(dfJ, MpNo == 7)$DT,
ylim = c(-22, 30), xlim = c(min(dfJ$DT), max(dfJ$DT)),
pch =19, col = 'blue', axes = F, ylab = '', xlab = '', main = 'Cuvette C')
points(subset(dfJ, MpNo == 7)$d18O_out ~ subset(dfJ, MpNo == 7)$DT, pch =19, col = 'red')
points(subset(dfJ, MpNo == 7 & ss == 'yes')$d18O_E ~ subset(dfJ, MpNo == 7 & ss == 'yes')$DT,
pch =19, col = 'black')
points(subset(dfJ, MpNo == 7 & ss == 'no')$d18O_E ~ subset(dfJ, MpNo == 7 & ss == 'no')$DT,
pch =1, col = 'black')
abline(subset(dfJ, MpNo == 7)$d18O_b[1], 0, lty = 2)
abline(subset(dfJ, MpNo == 7)$d18O_a[1], 0, lty = 3)
abline(subset(dfJ, MpNo == 7)$d18_up_lim[1], 0)
axis(side = 2, at = seq(-20, 30, 10), labels = seq(-20, 30, 10))
axis(side = 1, at = seq(min(dfJ$DT), max(dfJ$DT), 'hour'), labels = F)
box()
legend('topleft', expression(bold((b))), bty = 'n', cex = 1.2, pt.cex = 1)
legend('topright', pch = c(19, 19, 1), legend = c(expression(delta['in']), expression(delta[out]),
expression(delta[italic(E)])),
col = c('blue', 'red', 'black'), bty = 'n', cex = 1.3, pt.cex = 1)
par(mar = c(2, 5, 2, 0))
plot(subset(dfJ, MpNo == 2)$dDH_in ~ subset(dfJ, MpNo == 2)$DT,
ylim = c(-150, 1250), xlim = c(min(dfJ$DT), max(dfJ$DT)),
pch =19, col = 'blue', axes = F,
ylab = expression(paste(delta^{2}, "H (\u2030)")), xlab = '', cex.lab = 1.6)
points(subset(dfJ, MpNo == 2)$dDH_out ~ subset(dfJ, MpNo == 2)$DT, pch =19, col = 'red')
points(subset(dfJ, MpNo == 2 & ss == 'yes')$d2H_E ~ subset(dfJ, MpNo == 2 & ss == 'yes')$DT,
pch =19, col = 'black')
points(subset(dfJ, MpNo == 2 & ss == 'no')$d2H_E ~ subset(dfJ, MpNo == 2 & ss == 'no')$DT,
pch =1, col = 'black')
abline(subset(dfJ, MpNo == 2)$d2H_b[1], 0, lty = 2)
# no data available for the exact segment, use the tree average
abline(subset(dfJ, MpNo == 2)$d2H_a_tree[1], 0, lty = 3)
axis(side = 2, at = seq(-100, 1200, 200), labels = seq(-100, 1200, 200))
axis(side = 1, at = seq(min(subset(dfJ, MpNo == 2)$DT), max(subset(dfJ, MpNo == 2)$DT), 'hour'),
labels = F)
box()
legend('topleft', expression(bold((c))), bty = 'n', cex = 1.2, pt.cex = 1)
par(mar = c(2, 2, 2, 3))
plot(subset(dfJ, MpNo == 7)$dDH_in ~ subset(dfJ, MpNo == 7)$DT,
ylim = c(-150, 1250), xlim = c(min(dfJ$DT), max(dfJ$DT)),
pch =19, col = 'blue', axes = F, ylab = '', xlab = '')
points(subset(dfJ, MpNo == 7)$dDH_out ~ subset(dfJ, MpNo == 7)$DT, pch =19, col = 'red')
points(subset(dfJ, MpNo == 7 & ss == 'yes')$d2H_E ~ subset(dfJ, MpNo == 7 & ss == 'yes')$DT,
pch =19, col = 'black')
points(subset(dfJ, MpNo == 7 & ss == 'no')$d2H_E ~ subset(dfJ, MpNo == 7 & ss == 'no')$DT,
pch =1, col = 'black')
abline(subset(dfJ, MpNo == 7)$d2H_b[1], 0, lty = 2)
abline(subset(dfJ, MpNo == 7)$d2H_a[1], 0, lty = 3)
axis(side = 2, at = seq(-100, 1200, 200), labels = seq(-100, 1200, 200))
axis(side = 1, at = seq(min(subset(dfJ, MpNo == 7)$DT), max(subset(dfJ, MpNo == 7)$DT), 'hour'),
labels = F)
box()
legend('topleft', expression(bold((d))), bty = 'n', cex = 1.2, pt.cex = 1)
par(mar = c(4, 5, 0, 0))
plot(subset(dfJ, MpNo == 2)$TrA ~ subset(dfJ, MpNo == 2)$DT,
ylim = c(0, 2), xlim = c(min(dfJ$DT), max(dfJ$DT)),
pch = 19, col = 'darkgreen', cex.lab = 1.4,
ylab = expression(italic(E)[leaf]~(mmol~m^-2~s^-1)), xlab = '')
lines(subset(dfJ, MpNo == 2)$TrA ~ subset(dfJ, MpNo == 2)$DT, col ='darkgreen')
legend('topleft', expression(bold((e))), bty = 'n', cex = 1.2, pt.cex = 1)
par(mar = c(4, 2, 0, 3))
plot(subset(dfJ, MpNo == 7)$TrA ~ subset(dfJ, MpNo == 7)$DT,
ylim = c(0, 2), xlim = c(min(dfJ$DT), max(dfJ$DT)),
pch = 19, col = 'darkgreen', ylab = '', xlab = '')
lines(subset(dfJ, MpNo == 7)$TrA ~ subset(dfJ, MpNo == 7)$DT, col ='darkgreen')
legend('topleft', expression(bold((f))), bty = 'n', cex = 1.2, pt.cex = 1)
|
86ebc9c2afc8a33b81ddf21b645aa2a4df9b36b8
|
f9f0aad0f238726ab8f44aeb38f075602b92909e
|
/rankhospital.R
|
391ed64f0bb789d71741391553f664d3d4f78208
|
[] |
no_license
|
agenkin/datasciencecoursera
|
8fd3853123ae103fe3f2d41dbb37fadc36f7c563
|
1ba1d471ebd5f63f75f803ab979826ad64b2ffea
|
refs/heads/master
| 2023-05-14T12:45:09.565671
| 2023-05-05T07:04:54
| 2023-05-05T07:04:54
| 50,101,881
| 0
| 0
| null | 2017-03-31T05:01:04
| 2016-01-21T11:03:00
|
R
|
UTF-8
|
R
| false
| false
| 1,345
|
r
|
rankhospital.R
|
TEST, TEST, TEST
TEST2
TEST3
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
data_file <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
if (any(unique(data_file$State) == state)) {
if (any(c("heart attack", "heart failure", "pneumonia") == outcome)) {
if (outcome == "heart attack") {
data <- data_file[data_file$State == state, c("Hospital.Name","Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack")]
data <- data[order(suppressWarnings(as.double(data[,2])), data[,1], na.last = NA), ]
}
if (outcome == "heart failure") {
data <- data_file[data_file$State == state, c("Hospital.Name","Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure")]
data <- data[order(suppressWarnings(as.double(data[,2])), data[,1], na.last = NA), ]
}
if (outcome == "pneumonia") {
data <- data_file[data_file$State == state, c("Hospital.Name","Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia")]
data <- data[order(suppressWarnings(as.double(data[,2])), data[,1], na.last = NA), ]
}
if (num=="best") {
num <- 1
}
if(num=="worst") {
num <- nrow(data)
}
data[num,1]
}
else {stop("invalid outcome")}
}
else {stop("invalid state")}
}
|
5230341dc9c666344df585b7c45e9f99ed87903b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/enpls/examples/plot.enpls.od.Rd.R
|
b424b933ad4f57af7c4835a0d145a189766c10c4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 274
|
r
|
plot.enpls.od.Rd.R
|
library(enpls)
### Name: plot.enpls.od
### Title: Plot enpls.od object
### Aliases: plot.enpls.od
### ** Examples
data("alkanes")
x = alkanes$x
y = alkanes$y
set.seed(42)
od = enpls.od(x, y, reptimes = 50)
plot(od, criterion = "quantile")
plot(od, criterion = "sd")
|
f9e1a1010eb4a6a9681926ecb61c73c65cefce6b
|
3bbdd4120d653ce9999009de9680665af9910e9e
|
/Imputation.R
|
04d4e284b12420d7c5049cab7309a0c83ef2af69
|
[] |
no_license
|
sysbiomed/TraumaRDB
|
b5c5411d38244831aa92cdf764e58db62c4a2500
|
0608e28816363454a873ba24c41c4f6a5badd837
|
refs/heads/main
| 2023-01-06T01:58:22.711389
| 2020-11-02T21:42:15
| 2020-11-02T21:42:15
| 304,031,868
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,075
|
r
|
Imputation.R
|
#Automated code used for the new imputation method (junction of LOCF and NOCB)
#Cláudia Constantino, MSc
install.packages("xts", repos="http://cloud.r-project.org")
install.packages("imputeTS")
library(dplyr)
library(tidyr)
library(zoo)
library(xts)
library(lubridate)
library(imputeTS)
library(ggplot2)
library(ggpubr)
library(Rcpp)
#Log transformation in the subset with 22 genes
Log.aux <- log1p(genes.data.subset[,2:23])
Log.genes.subset <- cbind(genes.data.subset$PATIENT_ID,
genes.data.subset$microarray.time,
Log.aux)
colnames(Log.genes.subset)[1:2] <- c("patient", "time")
# ====================================================================================
# Carry Forward and Carry Backward Imputation
# ====================================================================================
NamesList <- colnames(Log.genes.subset)
NamesList <- NamesList[-(1:2)]
geneX <- Log.genes.subset[,1:2]
ls_imput<-list()
for (j in 1:length(NamesList)){
geneX <- cbind(Log.genes.subset[,1:2], Log.genes.subset[NamesList[j]])
#columns divided by time
wide_geneX <- geneX %>% spread(time, NamesList[j])
colnames(wide_geneX)[2:23] <- paste("Time", colnames(wide_geneX[,c(2:23)]), sep = "")
#Imputation: Carry near observation (controlled)
#Carry the t+1 observation to t observation
new_imput_geneX = within(wide_geneX, {
Time1 = ifelse(is.na(wide_geneX$Time1), wide_geneX$Time2, wide_geneX$Time1)
Time4 = ifelse(is.na(wide_geneX$Time4), wide_geneX$Time5, wide_geneX$Time4)
Time7 = ifelse(is.na(wide_geneX$Time7), wide_geneX$Time8, wide_geneX$Time7)
Time14 = ifelse(is.na(wide_geneX$Time14), wide_geneX$Time15, wide_geneX$Time14)
Time21 = ifelse(is.na(wide_geneX$Time21), wide_geneX$Time22, wide_geneX$Time21)
Time28 = ifelse(is.na(wide_geneX$Time28), wide_geneX$Time29, wide_geneX$Time28)
} )
#Carry the t-1 observation to t observation
new_imput_geneX = within(new_imput_geneX, {
Time4 = ifelse(is.na(new_imput_geneX$Time4), new_imput_geneX$Time3, new_imput_geneX$Time4)
Time7 = ifelse(is.na(new_imput_geneX$Time7), new_imput_geneX$Time6, new_imput_geneX$Time7)
Time14 = ifelse(is.na(new_imput_geneX$Time14), new_imput_geneX$Time13, new_imput_geneX$Time14)
Time21 = ifelse(is.na(new_imput_geneX$Time21), new_imput_geneX$Time20, new_imput_geneX$Time21)
Time28 = ifelse(is.na(new_imput_geneX$Time28), new_imput_geneX$Time27, new_imput_geneX$Time28)
} )
#Carry the t+2 observation to t observation
new_imput_geneX = within(new_imput_geneX, {
Time7 = ifelse(is.na(new_imput_geneX$Time7), new_imput_geneX$Time9, new_imput_geneX$Time7)
Time14 = ifelse(is.na(new_imput_geneX$Time14), new_imput_geneX$Time16, new_imput_geneX$Time14)
Time21 = ifelse(is.na(new_imput_geneX$Time21), new_imput_geneX$Time23, new_imput_geneX$Time21)
} )
#delete days that will not be used
new_imput_geneX[ ,c('Time2', 'Time3', 'Time5', 'Time6', 'Time8', 'Time9', 'Time11', 'Time13',
'Time15', 'Time16', 'Time20', 'Time22', 'Time23', 'Time27',
'Time29')] <- list(NULL)
#And now, to complete the remaining NA's, linear interpolation
colnames(new_imput_geneX) <- c("patient", "0", "1", "4", "7", "14", "21", "28")
long_geneX_imput1 <- gather(new_imput_geneX, time, geneX,"0":"28")
long_geneX_imput1 <- transform(long_geneX_imput1, time = as.numeric(time))
long_geneX_imput1 <- long_geneX_imput1[order(long_geneX_imput1$patient,
long_geneX_imput1$time),]
#to do linear interpolation for each patient and not the entire column
wide_aux <- long_geneX_imput1 %>% spread(patient, geneX)
wide_geneX_imput2 <- as.data.frame(wide_aux$time)
#vector with gene expression gene data to enter for the imputation
for (i in names(wide_aux)) {
v_aux <- as.vector(wide_aux[[i]])
index <- max(which(!is.na(v_aux))) +1
if (index <= 7) {
v_aux <- v_aux[-c(index:7)]
imput2_aux <- na_interpolation(v_aux, option = "linear")
imput2_aux[c(index:7)] <- NA
}
else{
imput2_aux <- na_interpolation(v_aux, option = "linear")
}
wide_geneX_imput2[,i] <- as.data.frame(imput2_aux)
}
wide_geneX_imput2[,1] <- NULL
long_geneX_imput2 <- gather(wide_geneX_imput2, patient, geneX ,"37134":"34912227")
new_imput2_geneX <- long_geneX_imput2 %>% spread(time, geneX) #wide
new_imput2_geneX <- new_imput2_geneX[ order(match(new_imput2_geneX$patient,
new_imput_geneX$patient)), ]
ls_imput[[NamesList[j]]] <-new_imput2_geneX #list which contains the dataframes with imputation for each gene
}
#how many patients have no missing values in each time point after this imputation
ls_imput_complete <- lapply(ls_imput, na.omit)
#how many patients have no missing values until T7 after this imputation
ls_imput_T7 <- lapply(ls_imput, function(x) { x[6:8] <- NULL; x })
ls_imput_T7 <- lapply(ls_imput_T7, na.omit)
|
73b03cb3e0129e53a3ec2affc49ebd38224ca492
|
33021203bc03720616f604399d3f34bbfd06064b
|
/man/rx_count.Rd
|
17868d2f99ee02a06e7fb86c19a43d640ad9b915
|
[
"MIT"
] |
permissive
|
mervynakash/RVerbalExpressions
|
102d0b21efe68056eb532254d6944b7ae218a5c8
|
5a1da4057e624ac1cefb559a82936f1aa43e7afa
|
refs/heads/master
| 2020-04-28T05:49:15.660582
| 2019-03-11T15:28:49
| 2019-03-11T15:28:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 940
|
rd
|
rx_count.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/count.R
\name{rx_count}
\alias{rx_count}
\title{Match the previous stuff exact number of times.}
\usage{
rx_count(.data = NULL, n = 1)
}
\arguments{
\item{.data}{Expression to append, typically pulled from the pipe \code{ \%>\% }}
\item{n}{Number of times previous expression shall be repeated.
For exact number of repetitions use single number. Use sequence \code{min:max} or vector of \code{c(min, max)} to denote range of repetitions.
To create ranges unbounded on one end, pass on a vector with either first of second element equal to \code{NA}, e.g. \code{c(NA, 3)},
up to 3 repetitions.}
}
\description{
This function simply adds a \code{{n}} to the end of the expression.
}
\examples{
rx_count()
# create an expression
x <- rx_find(value = "a") \%>\%
rx_count(3)
# create input
input <- "aaa"
# extract match
regmatches(input, regexpr(x, input))
}
|
a8ee6740f5779960deb5089ff7d05954ba04b533
|
202fb2f3a908b0c002ef6859275a617ffa6a51e8
|
/man/combinedBenchmark.Rd
|
6fe4c7a13faeadde5958641ecb9edb37aef16fb1
|
[] |
no_license
|
jdestefani/MM4Benchmark
|
ad68d68a000dc879be2bcb91acb0bf4aaa48cc9f
|
bb8f185ce984121b084d109912e7df1566d7fb26
|
refs/heads/master
| 2023-09-04T17:58:34.312342
| 2021-11-20T21:30:09
| 2021-11-20T21:30:09
| 364,299,203
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 905
|
rd
|
combinedBenchmark.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/benchmarks.R
\name{combinedBenchmark}
\alias{combinedBenchmark}
\title{combinedBenchmark}
\usage{
combinedBenchmark(input, h, level = c(80, 95))
}
\arguments{
\item{input}{\itemize{
\item Input time series (numeric vector)
}}
\item{h}{\itemize{
\item Forecasting horizon (numeric scalar)
}}
\item{level}{\itemize{
\item Numeric vector (length 2) containing the upper and lower bound for interval forecasting
}}
}
\value{
h-step forecast for the combined forecasting method (numeric vector - length h)
}
\description{
Auxiliary function for the combined forecasting method.
From \url{https://github.com/M4Competition/M4-methods/blob/master/Benchmarks\%20and\%20Evaluation.R}
}
\examples{
x <- AirPassengers
splitting_point <- round(2*length(x)/3)
x_train <- x[1:splitting_point]
h <- 5
x_hat <- combinedBenchmark(x_train,h)
}
|
202de968b52e904494274d31e2d81be3dc9cd8e1
|
3033385be447c8f734884a6b2782028eef7b26f8
|
/teaching/Turkey2018/R_Programming/exercises/tests.R
|
37afdaa42f722533fa8bc9029121f9567f1a43d5
|
[] |
no_license
|
hturner/website
|
dbb1560f382632deddf23b2b54e00f89ca2e3c1c
|
54d177cc7cd370f7f4923bda75a381380ba65b25
|
refs/heads/master
| 2023-01-13T23:56:41.588597
| 2023-01-06T09:14:38
| 2023-01-06T09:14:38
| 134,763,162
| 2
| 1
| null | 2023-01-07T19:26:14
| 2018-05-24T20:03:57
|
HTML
|
UTF-8
|
R
| false
| false
| 86
|
r
|
tests.R
|
context("qq works correctly")
test_that("works correctly for standard normal", {
})
|
4f38d460fd36d489bae540718f105218081c0cf4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rsem/examples/rsem.ssq.Rd.R
|
80b8aaa84a0e34f859d32fd28d984ac41a45eef9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 161
|
r
|
rsem.ssq.Rd.R
|
library(rsem)
### Name: rsem.ssq
### Title: Calculate the squared sum of a matrix
### Aliases: rsem.ssq
### ** Examples
x<-array(1:6, c(2,3))
rsem.ssq(x)
|
57489ce12491223b0e36b54add54b0cd06c01c95
|
73f00dea0c368722f1b7e60224dc3d9a03f5a3ab
|
/server/services/analysis/analysis.R
|
3d677398816fa8a6bdb455ed41b10c2973dcce58
|
[] |
no_license
|
CBIIT/nci-webtools-ccr-methylscape
|
9e61a533de65157bea8de87180d16c891b63a134
|
555883ee5f79ceff64f450340b11dd8ea7a3394d
|
refs/heads/master
| 2023-09-04T00:17:17.989412
| 2023-07-13T19:14:14
| 2023-07-13T19:14:14
| 185,189,232
| 3
| 2
| null | 2023-07-20T14:07:01
| 2019-05-06T12:08:16
|
JavaScript
|
UTF-8
|
R
| false
| false
| 1,092
|
r
|
analysis.R
|
getSurvivalData <- function(data) {
survivalFormula <- survival::Surv(overallSurvivalMonths, overallSurvivalStatus) ~ group
survivalCurves <- survminer::surv_fit(survivalFormula, data = data)
survivalDataTable <- survminer::surv_summary(survivalCurves, data)
# create survival summary table for n.risk at each time point
survivalSummaryTimes <- survminer:::.get_default_breaks(survivalCurves$time)
survivalSummary <- summary(survivalCurves, times = survivalSummaryTimes, extend = T)
survivalSummaryTable <- tibble::tibble(
time = survivalSummary$time,
n.risk = survivalSummary$n.risk,
strata = survivalSummary$strata
)
# widen summary table across all strata
if (!is.null(survivalSummaryTable$strata)) {
survivalSummaryTable <- tidyr::pivot_wider(
survivalSummaryTable,
names_from = "strata",
values_from="n.risk"
)
}
pValue <- survminer::surv_pvalue(survivalCurves)
list(
data = survivalDataTable,
summary = survivalSummaryTable,
pValue = pValue
)
}
|
e73f21e6ab028666bfef1dd304e992efbade5e4d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/CMC/examples/alpha.cronbach.Rd.R
|
66f5f5b29f998bfa2b72fd1f976cfb63b302f4aa
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 203
|
r
|
alpha.cronbach.Rd.R
|
library(CMC)
### Name: alpha.cronbach
### Title: Cronbach reliability coefficient alpha
### Aliases: alpha.cronbach
### Keywords: package
### ** Examples
data(cain)
out = alpha.cronbach(cain)
out
|
1b28e8e3f1cb5cea25f76a5bd861d5533c39b469
|
2b27b35fa70d7faa126ed740589b29af3e1b398f
|
/src/main/R/thesis/plot/number-of-classes-tables.R
|
1764476c2b63e8d861d1e3ecd2ef44b78e6a614b
|
[] |
no_license
|
Bjorn48/cubtg-es-evaluation-processing
|
6457294fac234083912da2a3cc6c6f0966742bef
|
51568606a4fa5303717b214cec76cd8f8e3e0629
|
refs/heads/master
| 2020-12-18T15:03:09.561418
| 2020-05-27T11:38:33
| 2020-05-27T11:38:33
| 235,430,180
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,846
|
r
|
number-of-classes-tables.R
|
library(tidyverse)
library(effsize)
library(xtable)
confNames <- c()
confNames[1] <- "fit_def_sec_def"
confNames[2] <- "fit_def_sec_max"
confNames[3] <- "fit_def_sec_min"
confNames[4] <- "fit_max_min_sec_def"
confNames[5] <- "fit_max_sec_max"
confNames[6] <- "fit_min_sec_min"
confNames[7] <- "nsgaii_max"
confNames[8] <- "nsgaii_min"
confDispNames <- c()
confDispNames[1] <- "f_def_s_def"
confDispNames[2] <- "f_def_s_max"
confDispNames[3] <- "f_def_s_min"
confDispNames[4] <- "f_max_max_s_def"
confDispNames[5] <- "f_max_s_max"
confDispNames[6] <- "f_min_s_min"
confDispNames[7] <- "nsgaii_max"
confDispNames[8] <- "nsgaii_min"
outFolder <- "r-output/latex-tables/number-of-classes/"
computeMetrics <- function(stats, conf1, conf2) {
compareConf <- function (statsPerClass, conf1, conf2) {
result <- statsPerClass %>% group_modify((function(rows, key) {
onlyConf1<- rows %>% filter(conf == conf1)
onlyConf1Cov <- onlyConf1$val
onlyConf2 <- rows %>% filter(conf == conf2)
onlyConf2Cov <- onlyConf2$val
if (length(onlyConf1Cov) == 0 | length(onlyConf2Cov) == 0) {
testResult <- NA
effectSizeNum <- NA
effectSizeMag <- NA
}
else {
testResultFull <- wilcox.test(onlyConf1Cov, onlyConf2Cov)
testResult <- testResultFull$p.value
effectSizeFull <- cliff.delta(onlyConf1Cov, onlyConf2Cov)
effectSizeNum <- effectSizeFull$estimate
}
result <- tibble(conf1, conf2, testResult, effectSizeNum)
return(result)
}))
return(result %>% ungroup() %>% filter(!is.na(testResult)))
}
statsByClass <- stats %>% filter(val >= 0.0) %>% group_by(class)
return(compareConf(statsByClass, conf1, conf2))
}
countClasses <- function(stats) {
counts <- tribble(~conf1, ~conf2, ~numberOfClasses)
counts <- counts %>% add_row(conf1 = confNames[1], conf2 = confNames[1], numberOfClasses = NA)
for (i in 1:length(confNames)) {
for (j in 1:length(confNames)) {
if (i == j) {
next
}
comparisonData <- computeMetrics(stats, confNames[i], confNames[j])
counts <- counts %>%
add_row(conf1 = confNames[i], conf2 = confNames[j], numberOfClasses = tally(comparisonData))
}
}
return(counts)
}
infileTcStats <- 'r-input/data/tc-data.csv'
tcStats <- read_csv(infileTcStats) %>% rename_all(make.names)
commonalityStats <- tcStats[,c("class", "conf", "run.id", "exec.weight.cov")] %>% rename(val = exec.weight.cov)
commonalityCounts <- countClasses(commonalityStats) %>%
pivot_wider(names_from = conf2, values_from = numberOfClasses, values_fill = list(numberOfClasses=NA))
commonalityCountsTable <- xtable(commonalityCounts,
digits = c(20, 20, 20, 20, 20, 20, 20, 20, 20, 20),
display = c("s", "s", "d", "d", "d", "d", "d", "d", "d", "d"))
print.xtable(commonalityCountsTable, file = str_c(outFolder, "commonality.tex"))
tcLengthStats <- tcStats[,c("class", "conf", "run.id", "length")] %>% rename(val = length)
tcLengthCounts <- countClasses(tcLengthStats) %>%
pivot_wider(names_from = conf2, values_from = numberOfClasses, values_fill = list(numberOfClasses=NA))
tcLengthCountsTable <- xtable(tcLengthCounts,
digits = c(20, 20, 20, 20, 20, 20, 20, 20, 20, 20),
display = c("s", "s", "d", "d", "d", "d", "d", "d", "d", "d"))
print.xtable(tcLengthCountsTable, file = str_c(outFolder, "tc-length.tex"))
infileSuiteStats <- 'r-input/data/suite-data.csv'
suiteStats <- read_csv(infileSuiteStats) %>% rename_all(make.names)
pitStats <- suiteStats[,c("class", "conf", "run.id", "pit.score")] %>% rename(val = pit.score)
pitCounts <- countClasses(pitStats) %>%
pivot_wider(names_from = conf2, values_from = numberOfClasses, values_fill = list(numberOfClasses=NA))
pitCountsTable <- xtable(pitCounts,
digits = c(20, 20, 20, 20, 20, 20, 20, 20, 20, 20),
display = c("s", "s", "d", "d", "d", "d", "d", "d", "d", "d"))
print.xtable(pitCountsTable, file = str_c(outFolder, "pit.tex"))
branchStats <- suiteStats[,c("class", "conf", "run.id", "branch.coverage")] %>% rename(val = branch.coverage)
branchCounts <- countClasses(branchStats) %>%
pivot_wider(names_from = conf2, values_from = numberOfClasses, values_fill = list(numberOfClasses=NA))
branchCountsTable <- xtable(branchCounts,
digits = c(20, 20, 20, 20, 20, 20, 20, 20, 20, 20),
display = c("s", "s", "d", "d", "d", "d", "d", "d", "d", "d"))
print.xtable(branchCountsTable, file = str_c(outFolder, "branch.tex"))
suiteSizeStats <- suiteStats[,c("class", "conf", "run.id", "suite.size")] %>% rename(val = suite.size)
suiteSizeCounts <- countClasses(suiteSizeStats) %>%
pivot_wider(names_from = conf2, values_from = numberOfClasses, values_fill = list(numberOfClasses=NA))
suiteSizeCountsTable <- xtable(suiteSizeCounts,
digits = c(20, 20, 20, 20, 20, 20, 20, 20, 20, 20),
display = c("s", "s", "d", "d", "d", "d", "d", "d", "d", "d"))
print.xtable(suiteSizeCountsTable, file = str_c(outFolder, "suite-size.tex"))
numGensStats <- suiteStats[,c("class", "conf", "run.id", "num.generations")] %>% rename(val = num.generations)
numGensCounts <- countClasses(numGensStats) %>%
pivot_wider(names_from = conf2, values_from = numberOfClasses, values_fill = list(numberOfClasses=NA))
numGensCountsTable <- xtable(numGensCounts,
digits = c(20, 20, 20, 20, 20, 20, 20, 20, 20, 20),
display = c("s", "s", "d", "d", "d", "d", "d", "d", "d", "d"))
print.xtable(numGensCountsTable, file = str_c(outFolder, "num-generations.tex"))
|
d7dbab797675c7007c79b3f3160c11409eace064
|
b02b20f5fb9ac802df281389a9d963d1d0d63cb9
|
/code/Archive/Section-B_Group-3.R
|
ba2b7efed2972a95e6a83d57fb33b8b991df1589
|
[] |
no_license
|
Dharunbabu/Term2_BusinessAnalytics_Project
|
35c07c93012e91ef7dfc7be34507d22ad571e309
|
3120ddb86cf95a79c1ce92ee4b1389df5645b03c
|
refs/heads/master
| 2023-01-02T04:13:20.395429
| 2020-10-31T18:25:18
| 2020-10-31T18:25:18
| 299,040,933
| 0
| 0
| null | 2020-10-31T18:25:19
| 2020-09-27T13:42:55
|
R
|
UTF-8
|
R
| false
| false
| 34,040
|
r
|
Section-B_Group-3.R
|
#Initialising libraries - Start#
library("ggplot2")
library("car")
library("caret")
library("nortest")
library("class")
library("devtools")
library("e1071")
library("Hmisc")
library("MASS")
library("nnet")
library("plyr")
library("pROC")
library("psych")
library("scatterplot3d")
library("dplyr")
library("rpart")
library("rpart.plot")
library("randomForest")
library("neuralnet")
library("chron")
library("lubridate")
library("readxl")
#Initialising libraries - End#
#Setting up working directory & Picking the datasets - Start#
setwd("D:/GLIM/Terms folder/Term-2/Business Analytics/Final Project/Final") #Setting up the working directory
italy.master <- read_excel("Section-B_Group-3.xlsx", sheet = "Italy") #Code starts from line#37 and ends at line#144
sweden.master <- read_excel("Section-B_Group-3.xlsx", sheet = "Sweden") #Code starts from line#147 and ends at line#252
USA.master <- read_excel("Section-B_Group-3.xlsx", sheet = "USA_California") #Code starts from line#255 and ends at line#361
nz.master <- read_excel("Section-B_Group-3.xlsx", sheet = "New Zealand") #Code starts from line#363 and ends at line#477
Australia.master <- read_excel("Section-B_Group-3.xlsx", sheet = "Australia") #Code starts from line#479 and ends at line#603
india.master <- read_excel("Section-B_Group-3.xlsx", sheet = "India") #Code starts from line#605 and ends at line#710
England.master <- read_excel("Section-B_Group-3.xlsx", sheet = "England") #Code starts from line#712 and ends at line#818
#Setting up working directory & Picking the datasets - End#
###########################################################################
######################### Model for Italy - Start #########################
###########################################################################
#Assigning variables - Start#
italy.SI <- italy.master$StringencyIndex
italy.RI <- italy.master$RateOfInfection
italy.NC <- italy.master$NewCases
italy.LNC <- log(italy.NC)
italy.CC <- italy.master$ConfirmedCases
italy.LCC <- log(italy.CC)
italy.DT <- italy.master$Actual_Date
italy.CD <- italy.master$ConfirmedDeaths
italy.LCD <- log(italy.CD)
#Assigning variables - End#
#Interpreting Mean Value - Start#
mean(italy.SI)
mean(italy.RI)
sd(italy.SI)
#Interpreting Mean Value - End#
#Normalization & Checking for normal distribution - Start#
italy.RI.norm <- rnorm(italy.RI)
hist(italy.RI.norm)
italy.SI.norm <- rnorm(italy.SI)
hist(italy.SI.norm)
italy.NC.norm <- rnorm(italy.NC)
hist(italy.NC.norm)
#Normalization & Checking for normal distribution - End#
#Identifying relation between dates & New cases - Start#
qplot(italy.DT, italy.NC, colour = italy.SI, data=italy.master, geom = c("point","line"))
#Identifying relation between dates & New cases - End#
#Train & test model for multi-linear regression - Start#
set.seed(1234)
model.data <- sample(2, nrow(italy.master), replace=TRUE, prob = c(0.7,0.3))
#Splitting the data intro train & test - Start#
train <- italy.master[model.data==1,]
test <- italy.master[model.data==2,]
#Splitting the data intro train & test - End#
#Assigning variables - Start#
italy.C1 <- italy.master$`C1_School closing`
italy.C2 <- italy.master$`C2_Workplace closing`
italy.C3 <- italy.master$`C3_Cancel public events`
italy.C4 <- italy.master$`C4_Restrictions on gatherings`
italy.C5 <- italy.master$`C5_Close public transport`
italy.C6 <- italy.master$`C6_Stay at home requirements`
italy.C7 <- italy.master$`C7_Restrictions on internal movement`
italy.C8 <- italy.master$C8_International_travel_controls
italy.H1 <- italy.master$`H1_Public information campaigns`
italy.C1.flag <- italy.master$C1_Flag
italy.C2.flag <- italy.master$C2_Flag
italy.C3.flag <- italy.master$C3_Flag
italy.C4.flag <- italy.master$C4_Flag
italy.C5.flag <- italy.master$C5_Flag
italy.C6.flag <- italy.master$C6_Flag
italy.C7.flag <- italy.master$C7_Flag
italy.H1.flag <- italy.master$H1_Flag
#Assigning variables - End#
#Checking the fit of the model - Start of iteration-1#
Linear_1 <- italy.master$StringencyIndex~italy.C1+italy.C2+italy.C3+italy.C4+italy.C5+italy.C6+italy.C7+
italy.C8+italy.C1.flag+italy.C2.flag+italy.C3.flag+italy.C4.flag+italy.C5.flag+
italy.C6.flag+italy.C7.flag+italy.H1.flag
OLS_1 <- lm(Linear_1, data = train)
summary(OLS_1)
#Checking the fit of the model - End of iteration-1#
#Checking the fit of the model - Start of iteration-2#
Linear_2 <- italy.master$StringencyIndex~italy.C2+italy.C4+italy.C7+
italy.C8+italy.C1.flag+italy.C5.flag+
italy.C6.flag+italy.H1.flag
OLS_2 <- lm(Linear_2, data = train)
summary(OLS_2)
vif(OLS_2)
#Checking the fit of the model - End of iteration-2#
#Finding the MSE value - Start#
Pred <- predict(OLS_2,test)
MSE <- mean((Pred-test$StringencyIndex)^2)
MSE
#Finding the MSE value - End#
#Train & test model for multi-linear regression - End#
#Visualizing the effect of lockdown (Stringency Index) on new cases - Start#
qplot(italy.DT, italy.SI, colour = italy.SI, data = italy.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on new cases - End#
#Visualizing the effect of lockdown (Stringency Index) on confirmed cases - Start#
qplot(italy.DT, italy.LCC, colour = italy.SI, data = italy.master, geom = c("point","line"))
qplot(italy.DT, italy.CC, colour = italy.SI, data = italy.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on confirmed cases - End#
#Visualizing the effect of lockdown (Stringency Index) on confirmed deaths - Start#
qplot(italy.DT, italy.LCD, colour = italy.SI, data = italy.master, geom = c("point","line"))
qplot(italy.DT, italy.CD, colour = italy.SI, data = italy.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on confirmed deaths - End#
###########################################################################
######################### Model for Italy - End ###########################
###########################################################################
###########################################################################
######################### Model for Sweden - Start ########################
###########################################################################
#Assigning variables - Start#
sweden.SI <- sweden.master$StringencyIndex
sweden.RI <- sweden.master$RateOfInfection
sweden.NC <- sweden.master$NewCases
sweden.LNC <- log(sweden.NC)
sweden.CC <- sweden.master$ConfirmedCases
sweden.LCC <- log(sweden.CC)
sweden.DT <- sweden.master$Actual_Date
sweden.CD <- sweden.master$ConfirmedDeaths
sweden.LCD <- log(sweden.CD)
#Assigning variables - End#
#Interpreting Mean Value - Start#
mean(sweden.SI)
mean(sweden.RI)
sd(sweden.SI)
#Interpreting Mean Value - End#
#Normalization & Checking for normal distribution - Start#
sweden.RI.norm <- rnorm(sweden.RI)
hist(sweden.RI.norm)
sweden.SI.norm <- rnorm(sweden.SI)
hist(sweden.SI.norm)
sweden.NC.norm <- rnorm(sweden.NC)
hist(sweden.NC.norm)
#Normalization & Checking for normal distribution - End#
#Identifying relation between dates & New cases - Start#
qplot(sweden.DT, sweden.NC, colour = sweden.SI, data=sweden.master, geom = c("point","line"))
#Identifying relation between dates & New cases - End#
#Train & test model for multi-linear regression - Start#
set.seed(1234)
model.data <- sample(2, nrow(sweden.master), replace=TRUE, prob = c(0.7,0.3))
#Splitting the data intro train & test - Start#
train <- sweden.master[model.data==1,]
test <- sweden.master[model.data==2,]
#Splitting the data intro train & test - End#
#Assigning variables - Start#
sweden.C1 <- sweden.master$`C1_School closing`
sweden.C2 <- sweden.master$`C2_Workplace closing`
sweden.C3 <- sweden.master$`C3_Cancel public events`
sweden.C4 <- sweden.master$`C4_Restrictions on gatherings`
sweden.C5 <- sweden.master$`C5_Close public transport`
sweden.C6 <- sweden.master$`C6_Stay at home requirements`
sweden.C7 <- sweden.master$`C7_Restrictions on internal movement`
sweden.C8 <- sweden.master$C8_International_travel_controls
sweden.H1 <- sweden.master$`H1_Public information campaigns`
sweden.C1.flag <- sweden.master$C1_Flag
sweden.C2.flag <- sweden.master$C2_Flag
sweden.C3.flag <- sweden.master$C3_Flag
sweden.C4.flag <- sweden.master$C4_Flag
sweden.C5.flag <- sweden.master$C5_Flag
sweden.C6.flag <- sweden.master$C6_Flag
sweden.C7.flag <- sweden.master$C7_Flag
sweden.H1.flag <- sweden.master$H1_Flag
#Assigning variables - End#
#Checking the fit of the model - Start of iteration-1#
Linear_1 <- sweden.master$StringencyIndex~sweden.C1+sweden.C2+sweden.C3+sweden.C4+sweden.C5+sweden.C6+sweden.C7+
sweden.C8+sweden.H1+sweden.C1.flag+sweden.C2.flag+sweden.C3.flag+sweden.C4.flag+sweden.C5.flag+
sweden.C6.flag+sweden.C7.flag+sweden.H1.flag
OLS_1 <- lm(Linear_1, data = train)
summary(OLS_1)
#Checking the fit of the model - End of iteration-1#
#Checking the fit of the model - Start of iteration-2#
Linear_2 <- sweden.master$StringencyIndex~sweden.C1+sweden.C2+sweden.C1.flag+sweden.C4.flag
OLS_2 <- lm(Linear_2, data = train)
summary(OLS_2)
vif(OLS_2)
#Checking the fit of the model - End of iteration-2#
#Finding the MSE value - Start#
Pred <- predict(OLS_2,test)
MSE <- mean((Pred-test$StringencyIndex)^2)
MSE
#Finding the MSE value - End#
#Train & test model for multi-linear regression - End#
#Visualizing the effect of lockdown (Stringency Index) on new cases - Start#
qplot(sweden.DT, sweden.SI, colour = sweden.SI, data = sweden.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on new cases - End#
#Visualizing the effect of lockdown (Stringency Index) on confirmed cases - Start#
qplot(sweden.DT, sweden.LCC, colour = sweden.SI, data = sweden.master, geom = c("point","line"))
qplot(sweden.DT, sweden.CC, colour = sweden.SI, data = sweden.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on confirmed cases - End#
#Visualizing the effect of lockdown (Stringency Index) on confirmed deaths - Start#
qplot(sweden.DT, sweden.LCD, colour = sweden.SI, data = sweden.master, geom = c("point","line"))
qplot(sweden.DT, sweden.CD, colour = sweden.SI, data = sweden.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on confirmed deaths - End#
###########################################################################
######################### Model for Sweden - End ##########################
###########################################################################
###########################################################################
######################### Model for USA-California - Start ################
###########################################################################
#Assigning variables - Start#
USA.SI <- USA.master$StringencyIndex
USA.RI <- USA.master$RateOfInfection
USA.NC <- USA.master$NewCases
USA.LNC <- log(USA.NC)
USA.CC <- USA.master$ConfirmedCases
USA.LCC <- log(USA.CC)
USA.DT <- USA.master$Actual_Date
USA.CD <- USA.master$ConfirmedDeaths
USA.LCD <- log(USA.CD)
#Assigning variables - End#
#Interpreting Mean Value - Start#
mean(USA.SI)
mean(USA.RI)
sd(USA.SI)
#Interpreting Mean Value - End#
#Normalization & Checking for normal distribution - Start#
USA.RI.norm <- rnorm(USA.RI)
hist(USA.RI.norm)
USA.SI.norm <- rnorm(USA.SI)
hist(USA.SI.norm)
USA.NC.norm <- rnorm(USA.NC)
hist(USA.NC.norm)
#Normalization & Checking for normal distribution - End#
#Identifying relation between dates & New cases - Start#
qplot(USA.DT, USA.NC, colour = USA.SI, data=USA.master, geom = c("point","line"))
#Identifying relation between dates & New cases - End#
#Train & test model for multi-linear regression - Start#
set.seed(143)
model.data <- sample(2, nrow(USA.master), replace=TRUE, prob = c(0.7,0.3))
#Splitting the data intro train & test - Start#
train <- USA.master[model.data==1,]
test <- USA.master[model.data==2,]
#Splitting the data intro train & test - End#
#Assigning variables - Start#
USA.C1 <- USA.master$`C1_School closing`
USA.C2 <- USA.master$`C2_Workplace closing`
USA.C3 <- USA.master$`C3_Cancel public events`
USA.C4 <- USA.master$`C4_Restrictions on gatherings`
USA.C5 <- USA.master$`C5_Close public transport`
USA.C6 <- USA.master$`C6_Stay at home requirements`
USA.C7 <- USA.master$`C7_Restrictions on internal movement`
USA.C8 <- USA.master$C8_International_travel_controls
USA.H1 <- USA.master$`H1_Public information campaigns`
USA.C1.flag <- USA.master$C1_Flag
USA.C2.flag <- USA.master$C2_Flag
USA.C3.flag <- USA.master$C3_Flag
USA.C4.flag <- USA.master$C4_Flag
USA.C5.flag <- USA.master$C5_Flag
USA.C6.flag <- USA.master$C6_Flag
USA.C7.flag <- USA.master$C7_Flag
USA.H1.flag <- USA.master$H1_Flag
#Assigning variables - End#
#Checking the fit of the model - Start of iteration-1#
Linear_1 <- USA.master$StringencyIndex~USA.C1+USA.C2+USA.C3+USA.C4+USA.C5+USA.C6+USA.C7+
USA.C8+USA.H1+USA.C1.flag+USA.C2.flag+USA.C3.flag+USA.C4.flag+USA.C5.flag+
USA.C6.flag+USA.C7.flag+USA.H1.flag
OLS_1 <- lm(Linear_1, data = train)
summary(OLS_1)
#Checking the fit of the model - End of iteration-1#
#Checking the fit of the model - Start of iteration-2#
Linear_2 <- USA.master$StringencyIndex~USA.C7+USA.H1+USA.C3.flag+USA.C4.flag
OLS_2 <- lm(Linear_2, data = train)
summary(OLS_2)
vif(OLS_2)
#Checking the fit of the model - End of iteration-2#
#Finding the MSE value - Start#
Pred <- predict(OLS_2,test)
MSE <- mean((Pred-test$StringencyIndex)^2)
MSE
#Finding the MSE value - End#
#Train & test model for multi-linear regression - End#
#Visualizing the effect of lockdown (Stringency Index) on new cases - Start#
qplot(USA.DT, USA.SI, colour = USA.SI, data = USA.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on new cases - End#
#Visualizing the effect of lockdown (Stringency Index) on confirmed cases - Start#
qplot(USA.DT, USA.LCC, colour = USA.SI, data = USA.master, geom = c("point","line"))
qplot(USA.DT, USA.CC, colour = USA.SI, data = USA.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on confirmed cases - End#
#Visualizing the effect of lockdown (Stringency Index) on confirmed deaths - Start#
qplot(USA.DT, USA.LCD, colour = USA.SI, data = USA.master, geom = c("point","line"))
qplot(USA.DT, USA.CD, colour = USA.SI, data = USA.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on confirmed deaths - End#
###################################################################################
######################### Model for USA-California - End ##########################
###################################################################################
###################################################################################
######################### Model for New Zealand - Start ###########################
###################################################################################
#Assigning variables - Start#
nz.SI <- nz.master$StringencyIndex
nz.RI <- nz.master$RateOfInfection
nz.NC <- nz.master$NewCases
nz.LNC <- log(nz.NC)
nz.CC <- nz.master$ConfirmedCases
nz.LCC <- log(nz.CC)
nz.DT <- nz.master$Actual_Date
nz.CD <- nz.master$ConfirmedDeaths
nz.LCD <- log(nz.CD)
#Assigning variables - End#
#Interpreting Mean Value - Start#
mean(nz.SI)
mean(nz.RI)
sd(nz.SI)
#Interpreting Mean Value - End#
#Normalization & Checking for normal distribution - Start#
nz.RI.norm <- rnorm(nz.RI)
hist(nz.RI.norm)
nz.SI.norm <- rnorm(nz.SI)
hist(nz.SI.norm)
nz.NC.norm <- rnorm(nz.NC)
hist(nz.NC.norm)
#Normalization & Checking for normal distribution - End#
#Identifying relation between dates & New cases - Start#
qplot(nz.DT, nz.NC, colour = nz.SI, data=nz.master, geom = c("point","line"))
#Identifying relation between dates & New cases - End#
#Train & test model for multi-linear regression - Start#
set.seed(1234)
model.data <- sample(2, nrow(nz.master), replace=TRUE, prob = c(0.7,0.3))
#Splitting the data intro train & test - Start#
train <- nz.master[model.data==1,]
test <- nz.master[model.data==2,]
#Splitting the data intro train & test - End#
#Assigning variables - Start#
nz.C1 <- nz.master$`C1_School closing`
nz.C2 <- nz.master$`C2_Workplace closing`
nz.C3 <- nz.master$`C3_Cancel public events`
nz.C4 <- nz.master$`C4_Restrictions on gatherings`
nz.C5 <- nz.master$`C5_Close public transport`
nz.C6 <- nz.master$`C6_Stay at home requirements`
nz.C7 <- nz.master$`C7_Restrictions on internal movement`
nz.C8 <- nz.master$C8_International_travel_controls
nz.H1 <- nz.master$`H1_Public information campaigns`
nz.C1.flag <- nz.master$C1_Flag
nz.C2.flag <- nz.master$C2_Flag
nz.C3.flag <- nz.master$C3_Flag
nz.C4.flag <- nz.master$C4_Flag
nz.C5.flag <- nz.master$C5_Flag
nz.C6.flag <- nz.master$C6_Flag
nz.C7.flag <- nz.master$C7_Flag
nz.H1.flag <- nz.master$H1_Flag
#Assigning variables - End#
#Checking the fit of the model - Start of iteration-1#
Linear_1 <- nz.master$StringencyIndex~nz.C1+nz.C2+nz.C3+nz.C4+nz.C5+nz.C6+nz.C7+
nz.C8+nz.H1+nz.C1.flag+nz.C2.flag+nz.C3.flag+nz.C4.flag+nz.C5.flag+
nz.C6.flag+nz.C7.flag+nz.H1.flag
OLS_1 <- lm(Linear_1, data = train)
summary(OLS_1)
#Checking the fit of the model - End of iteration-1#
#Checking the fit of the model - Start of iteration-2#
Linear_2 <- nz.master$StringencyIndex~nz.C1+nz.C2+nz.C4+nz.C5+nz.C6+
nz.C8+nz.H1+nz.C1.flag+nz.C2.flag+nz.C5.flag+nz.H1.flag
OLS_2 <- lm(Linear_2, data = train)
summary(OLS_2)
vif(OLS_2)
#Checking the fit of the model - End of iteration-2#
#Checking the fit of the model - Start of iteration-3#
Linear_3 <- nz.master$StringencyIndex~nz.C1+nz.C4+
nz.C8+nz.H1+nz.C2.flag
OLS_3 <- lm(Linear_3, data = train)
summary(OLS_3)
vif(OLS_3)
#Checking the fit of the model - End of iteration-3#
#Finding the MSE value - Start#
Pred <- predict(OLS_4,test)
MSE <- mean((Pred-test$StringencyIndex)^2)
MSE
#Finding the MSE value - End#
#Train & test model for multi-linear regression - End#
#Visualizing the effect of lockdown (Stringency Index) on new cases - Start#
qplot(nz.DT, nz.SI, colour = nz.SI, data = nz.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on new cases - End#
#Visualizing the effect of lockdown (Stringency Index) on confirmed cases - Start#
qplot(nz.DT, nz.LCC, colour = nz.SI, data = nz.master, geom = c("point","line"))
qplot(nz.DT, nz.CC, colour = nz.SI, data = nz.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on confirmed cases - End#
#Visualizing the effect of lockdown (Stringency Index) on confirmed deaths - Start#
qplot(nz.DT, nz.LCD, colour = nz.SI, data = nz.master, geom = c("point","line"))
qplot(nz.DT, nz.CD, colour = nz.SI, data = nz.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on confirmed deaths - End#
###################################################################################
######################### Model for New Zealand - End #############################
###################################################################################
###################################################################################
######################### Model for Australia - Start #############################
###################################################################################
#Assigning variables - Start#
Australia.SI <- Australia.master$StringencyIndex
Australia.RI <- Australia.master$RateOfInfection
Australia.NC <- Australia.master$NewCases
Australia.LNC <- log(Australia.NC)
Australia.CC <- Australia.master$ConfirmedCases
Australia.LCC <- log(Australia.CC)
Australia.DT <- Australia.master$Actual_Date
Australia.CD <- Australia.master$ConfirmedDeaths
Australia.LCD <- log(Australia.CD)
#Assigning variables - End#
#Interpreting Mean Value - Start#
mean(Australia.SI)
mean(Australia.RI)
sd(Australia.SI)
#Interpreting Mean Value - End#
#Normalization & Checking for normal distribution - Start#
Australia.RI.norm <- rnorm(Australia.RI)
hist(Australia.RI.norm)
Australia.SI.norm <- rnorm(Australia.SI)
hist(Australia.SI.norm)
Australia.NC.norm <- rnorm(Australia.NC)
hist(Australia.NC.norm)
#Normalization & Checking for normal distribution - End#
#Identifying relation between dates & New cases - Start#
qplot(Australia.DT, Australia.NC, colour = Australia.SI, data=Australia.master, geom = c("point","line"))
#Identifying relation between dates & New cases - End#
#Train & test model for multi-linear regression - Start#
set.seed(1234)
model.data <- sample(2, nrow(Australia.master), replace=TRUE, prob = c(0.7,0.3))
#Splitting the data intro train & test - Start#
train <- Australia.master[model.data==1,]
test <- Australia.master[model.data==2,]
#Splitting the data intro train & test - End#
#Assigning variables - Start#
Australia.C1 <- Australia.master$`C1_School closing`
Australia.C2 <- Australia.master$`C2_Workplace closing`
Australia.C3 <- Australia.master$`C3_Cancel public events`
Australia.C4 <- Australia.master$`C4_Restrictions on gatherings`
Australia.C5 <- Australia.master$`C5_Close public transport`
Australia.C6 <- Australia.master$`C6_Stay at home requirements`
Australia.C7 <- Australia.master$`C7_Restrictions on internal movement`
Australia.C8 <- Australia.master$C8_International_travel_controls
Australia.H1 <- Australia.master$`H1_Public information campaigns`
Australia.C1.flag <- Australia.master$C1_Flag
Australia.C2.flag <- Australia.master$C2_Flag
Australia.C3.flag <- Australia.master$C3_Flag
Australia.C4.flag <- Australia.master$C4_Flag
Australia.C5.flag <- Australia.master$C5_Flag
Australia.C6.flag <- Australia.master$C6_Flag
Australia.C7.flag <- Australia.master$C7_Flag
Australia.H1.flag <- Australia.master$H1_Flag
#Assigning variables - End#
#Checking the fit of the model - Start of iteration-1#
Linear_1 <- Australia.master$StringencyIndex~Australia.C1+Australia.C2+Australia.C3+Australia.C4+Australia.C5+Australia.C6+Australia.C7+
Australia.C8+Australia.H1+Australia.C1.flag+Australia.C2.flag+Australia.C3.flag+Australia.C4.flag+Australia.C5.flag+
Australia.C6.flag+Australia.C7.flag+Australia.H1.flag
OLS_1 <- lm(Linear_1, data = train)
summary(OLS_1)
#Checking the fit of the model - End of iteration-1#
#Checking the fit of the model - Start of iteration-2#
Linear_2 <- Australia.master$StringencyIndex~Australia.C1+Australia.C2+Australia.C4+Australia.C5+Australia.C6+Australia.C7+
Australia.C8+Australia.H1+Australia.C2.flag+Australia.C4.flag+
Australia.C6.flag+Australia.H1.flag
OLS_2 <- lm(Linear_2, data = train)
summary(OLS_2)
vif(OLS_2)
#Checking the fit of the model - End of iteration-2#
#Checking the fit of the model - Start of iteration-3#
Linear_3 <- Australia.master$StringencyIndex~Australia.C1+Australia.C5+
Australia.C8+Australia.H1+Australia.C2.flag+Australia.C4.flag+
Australia.C6.flag
OLS_3 <- lm(Linear_3, data = train)
summary(OLS_3)
vif(OLS_3)
#Checking the fit of the model - End of iteration-3#
#Checking the fit of the model - Start of iteration-4#
Linear_4 <- Australia.master$StringencyIndex~Australia.C1+Australia.C5+
Australia.C8+Australia.C2.flag+Australia.C4.flag+Australia.C6.flag
OLS_4 <- lm(Linear_4, data = train)
summary(OLS_4)
vif(OLS_4)
#Checking the fit of the model - End of iteration-4#
#Finding the MSE value - Start#
Pred <- predict(OLS_4,test)
MSE <- mean((Pred-test$StringencyIndex)^2)
MSE
#Finding the MSE value - End#
#Train & test model for multi-linear regression - End#
#Visualizing the effect of lockdown (Stringency Index) on new cases - Start#
qplot(Australia.DT, Australia.SI, colour = Australia.SI, data = Australia.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on new cases - End#
#Visualizing the effect of lockdown (Stringency Index) on confirmed cases - Start#
qplot(Australia.DT, Australia.LCC, colour = Australia.SI, data = Australia.master, geom = c("point","line"))
qplot(Australia.DT, Australia.CC, colour = Australia.SI, data = Australia.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on confirmed cases - End#
#Visualizing the effect of lockdown (Stringency Index) on confirmed deaths - Start#
qplot(Australia.DT, Australia.LCD, colour = Australia.SI, data = Australia.master, geom = c("point","line"))
qplot(Australia.DT, Australia.CD, colour = Australia.SI, data = Australia.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on confirmed deaths - End#
###################################################################################
######################### Model for Australia - End ###############################
###################################################################################
###################################################################################
######################### Model for India - Start #################################
###################################################################################
#Assigning variables - Start#
india.SI <- india.master$StringencyIndex
india.RI <- india.master$RateOfInfection
india.NC <- india.master$NewCases
india.LNC <- log(india.NC)
india.CC <- india.master$ConfirmedCases
india.LCC <- log(india.CC)
india.DT <- india.master$Actual_Date
india.CD <- india.master$ConfirmedDeaths
india.LCD <- log(india.CD)
#Assigning variables - End#
#Interpreting Mean Value - Start#
mean(india.SI)
mean(india.RI)
sd(india.SI)
#Interpreting Mean Value - End#
#Normalization & Checking for normal distribution - Start#
india.RI.norm <- rnorm(india.RI)
hist(india.RI.norm)
india.SI.norm <- rnorm(india.SI)
hist(india.SI.norm)
india.NC.norm <- rnorm(india.NC)
hist(india.NC.norm)
#Normalization & Checking for normal distribution - End#
#Identifying relation between dates & New cases - Start#
qplot(india.DT, india.NC, colour = india.SI, data=india.master, geom = c("point","line"))
#Identifying relation between dates & New cases - End#
#Train & test model for multi-linear regression - Start#
set.seed(5698547)
model.data <- sample(2, nrow(india.master), replace=TRUE, prob = c(0.7,0.3))
#Splitting the data intro train & test - Start#
train <- india.master[model.data==1,]
test <- india.master[model.data==2,]
#Splitting the data intro train & test - End#
#Assigning variables - Start#
india.C1 <- india.master$`C1_School closing`
india.C2 <- india.master$`C2_Workplace closing`
india.C3 <- india.master$`C3_Cancel public events`
india.C4 <- india.master$`C4_Restrictions on gatherings`
india.C5 <- india.master$`C5_Close public transport`
india.C6 <- india.master$`C6_Stay at home requirements`
india.C7 <- india.master$`C7_Restrictions on internal movement`
india.C8 <- india.master$C8_International_travel_controls
india.H1 <- india.master$`H1_Public information campaigns`
india.C1.flag <- india.master$C1_Flag
india.C2.flag <- india.master$C2_Flag
india.C3.flag <- india.master$C3_Flag
india.C4.flag <- india.master$C4_Flag
india.C5.flag <- india.master$C5_Flag
india.C6.flag <- india.master$C6_Flag
india.C7.flag <- india.master$C7_Flag
india.H1.flag <- india.master$H1_Flag
#Assigning variables - End#
#Checking the fit of the model - Start of iteration-1#
Linear_1 <- india.master$StringencyIndex~india.C1+india.C2+india.C3+india.C4+india.C5+india.C6+india.C7+
india.C8+india.H1+india.C1.flag+india.C2.flag+india.C3.flag+india.C4.flag+india.C5.flag+
india.C6.flag+india.C7.flag+india.H1.flag
OLS_1 <- lm(Linear_1, data = train)
summary(OLS_1)
#Checking the fit of the model - End of iteration-1#
#Checking the fit of the model - Start of iteration-2#
Linear_2 <- india.master$StringencyIndex~india.C1+india.C2+india.C5+india.C6+india.C2.flag+india.C3.flag+india.C4.flag
OLS_2 <- lm(Linear_2, data = train)
summary(OLS_2)
vif(OLS_2)
#Checking the fit of the model - End of iteration-2#
#Finding the MSE value - Start#
Pred <- predict(OLS_2,test)
MSE <- mean((Pred-test$StringencyIndex)^2)
MSE
#Finding the MSE value - End#
#Train & test model for multi-linear regression - End#
#Visualizing the effect of lockdown (Stringency Index) on new cases - Start#
qplot(india.DT, india.SI, colour = india.SI, data = india.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on new cases - End#
#Visualizing the effect of lockdown (Stringency Index) on confirmed cases - Start#
qplot(india.DT, india.LCC, colour = india.SI, data = india.master, geom = c("point","line"))
qplot(india.DT, india.CC, colour = india.SI, data = india.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on confirmed cases - End#
#Visualizing the effect of lockdown (Stringency Index) on confirmed deaths - Start#
qplot(india.DT, india.LCD, colour = india.SI, data = india.master, geom = c("point","line"))
qplot(india.DT, india.CD, colour = india.SI, data = india.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on confirmed deaths - End#
###################################################################################
######################### Model for India - End ###################################
###################################################################################
###################################################################################
######################### Model for England - Start ###############################
###################################################################################
#Assigning variables - Start#
England.SI <- England.master$StringencyIndex
England.RI <- England.master$RateOfInfection
England.NC <- England.master$NewCases
England.LNC <- log(England.NC)
England.CC <- England.master$ConfirmedCases
England.LCC <- log(England.CC)
England.DT <- England.master$Actual_Date
England.CD <- England.master$ConfirmedDeaths
England.LCD <- log(England.CD)
#Assigning variables - End#
#Interpreting Mean Value - Start#
mean(England.SI)
mean(England.RI)
sd(England.SI)
#Interpreting Mean Value - End#
#Normalization & Checking for normal distribution - Start#
England.RI.norm <- rnorm(England.RI)
hist(England.RI.norm)
England.SI.norm <- rnorm(England.SI)
hist(England.SI.norm)
England.NC.norm <- rnorm(England.NC)
hist(England.NC.norm)
#Normalization & Checking for normal distribution - End#
#Identifying relation between dates & New cases - Start#
qplot(England.DT, England.NC, colour = England.SI, data=England.master, geom = c("point","line"))
#Identifying relation between dates & New cases - End#
#Train & test model for multi-linear regression - Start#
set.seed(143)
model.data <- sample(2, nrow(England.master), replace=TRUE, prob = c(0.7,0.3))
#Splitting the data intro train & test - Start#
train <- England.master[model.data==1,]
test <- England.master[model.data==2,]
#Splitting the data intro train & test - End#
#Assigning variables - Start#
England.C1 <- England.master$`C1_School closing`
England.C2 <- England.master$`C2_Workplace closing`
England.C3 <- England.master$`C3_Cancel public events`
England.C4 <- England.master$`C4_Restrictions on gatherings`
England.C5 <- England.master$`C5_Close public transport`
England.C6 <- England.master$`C6_Stay at home requirements`
England.C7 <- England.master$`C7_Restrictions on internal movement`
England.C8 <- England.master$C8_International_travel_controls
England.H1 <- England.master$`H1_Public information campaigns`
England.C1.flag <- England.master$C1_Flag
England.C2.flag <- England.master$C2_Flag
England.C3.flag <- England.master$C3_Flag
England.C4.flag <- England.master$C4_Flag
England.C5.flag <- England.master$C5_Flag
England.C6.flag <- England.master$C6_Flag
England.C7.flag <- England.master$C7_Flag
England.H1.flag <- England.master$H1_Flag
#Assigning variables - End#
#Checking the fit of the model - Start of iteration-1#
Linear_1 <- England.master$StringencyIndex~England.C1+England.C2+England.C3+England.C4+England.C5+England.C6+England.C7+
England.C8+England.H1+England.C1.flag+England.C2.flag+England.C3.flag+England.C4.flag+England.C5.flag+
England.C6.flag+England.C7.flag+England.H1.flag
OLS_1 <- lm(Linear_1, data = train)
summary(OLS_1)
#Checking the fit of the model - End of iteration-1#
#Checking the fit of the model - Start of iteration-2#
Linear_2 <- England.master$StringencyIndex~England.C6+England.C7+
England.C8+England.H1+England.C1.flag+England.C2.flag+England.C4.flag
OLS_2 <- lm(Linear_2, data = train)
summary(OLS_2)
vif(OLS_2)
#Checking the fit of the model - End of iteration-2#
#Finding the MSE value - Start#
Pred <- predict(OLS_2,test)
MSE <- mean((Pred-test$StringencyIndex)^2)
MSE
#Finding the MSE value - End#
#Train & test model for multi-linear regression - End#
#Visualizing the effect of lockdown (Stringency Index) on new cases - Start#
qplot(England.DT, England.SI, colour = England.SI, data = England.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on new cases - End#
#Visualizing the effect of lockdown (Stringency Index) on confirmed cases - Start#
qplot(England.DT, England.LCC, colour = England.SI, data = England.master, geom = c("point","line"))
qplot(England.DT, England.CC, colour = England.SI, data = England.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on confirmed cases - End#
#Visualizing the effect of lockdown (Stringency Index) on confirmed deaths - Start#
qplot(England.DT, England.LCD, colour = England.SI, data = England.master, geom = c("point","line"))
qplot(England.DT, England.CD, colour = England.SI, data = England.master, geom = c("point","line"))
#Visualizing the effect of lockdown (Stringency Index) on confirmed deaths - End#
###################################################################################
######################### Model for England - End #################################
###################################################################################
|
04129f6186546fe427419c44348441719b453995
|
dbc2af76893a0b669f2d9a032980c2111bfbc4d5
|
/R/individualize.R
|
5280a9800b4caef6ee08d41d7778fd9b2a74e54b
|
[
"MIT"
] |
permissive
|
thomasblanchet/gpinter
|
e974de36c0efd4c8070fb9b8cc0311bb10c356df
|
0ce91dd088f2e066c7021b297f0ec3cecade2072
|
refs/heads/master
| 2022-11-28T11:18:10.537146
| 2022-11-22T16:22:40
| 2022-11-22T16:22:40
| 72,655,645
| 19
| 5
| null | 2017-04-19T08:25:44
| 2016-11-02T15:51:21
|
R
|
UTF-8
|
R
| false
| false
| 5,371
|
r
|
individualize.R
|
#' @title Individualize a distribution
#'
#' @author Thomas Blanchet, Juliette Fournier, Thomas Piketty
#'
#' @description Individualize the distribution (of income or wealth) under
#' equal splitting among spouses, given the share of couples or singles
#' at different points of the distribution.
#'
#' @param dist An object of class \code{gpinter_dist_orig}.
#' @param p A vector of fractiles in [0, 1].
#' @param singleshare The overall share of singles.
#' @param coupleshare The overall share of couples.
#' @param singletop A vector with the same length as \code{p}: the share
#' of singles in the top 100*(1 - p)\%.
#' @param coupletop A vector with the same length as \code{p}: the share
#' of couples in the top 100*(1 - p)\%.
#' @param singlebracket A vector with the same length as \code{p}: the share
#' of singles in the matching bracket.
#' @param couplebracket A vector with the same length as \code{p}: the share
#' of couples in the matching bracket.
#' @param ratio A vector with the same length as \code{p}: the ratio of
#' singles average income over couples average income in each bracket. Default
#' is 1 for all brackets.
#'
#' @return An object of class \code{gpinter_dist_indiv}.
#'
#' @export
individualize_dist <- function(dist, p, singleshare=NULL, coupleshare=NULL,
singletop=NULL, coupletop=NULL,
singlebracket=NULL, couplebracket=NULL,
ratio=NULL) {
# Check the class of input distribution
if (!is(dist, "gpinter_dist_orig")) {
stop("'dist' objects must be of class 'gpinter_dist_orig'")
}
if (any(p < 0) || any(p >= 1)) {
stop("'p' must be between 0 and 1.")
}
# Order inputs
ord <- order(p)
p <- p[ord]
if (!is.null(singleshare) && !is.na(singleshare)) {
m <- 1 - singleshare
} else if (!is.null(coupleshare) && !is.na(coupleshare)) {
m <- coupleshare
} else if (p[1] != 0) {
stop("You must either specify 'singleshare' or 'coupleshare' if min(p) != 0.")
}
has_zero <- (p[1] == 0)
if (!is.null(singletop)) {
singletop <- singletop[ord]
ck <- 1 - singletop
ck <- (1 - p)*ck
if (p[1] != 0) {
p <- c(0, p)
ck <- c(m, ck)
}
ck <- -diff(c(ck, 0))/diff(c(p, 1))
} else if (!is.null(coupletop)) {
coupletop <- coupletop[ord]
ck <- coupletop
ck <- (1 - p)*ck
if (p[1] != 0) {
p <- c(0, p)
ck <- c(m, ck)
}
ck <- -diff(c(ck, 0))/diff(c(p, 1))
} else if (!is.null(singlebracket)) {
singlebracket <- singlebracket[ord]
ck <- 1 - singlebracket
if (p[1] != 0) {
ck <- c((m - sum(ck*diff(c(p, 1))))/p[1], ck)
p <- c(0, p)
}
} else if (!is.null(couplebracket)) {
couplebracket <- couplebracket[ord]
ck <- couplebracket
if (p[1] != 0) {
ck <- c((m - sum(ck*diff(c(p, 1))))/p[1], ck)
p <- c(0, p)
}
} else {
stop("You must specify one of 'singletop', 'coupletop', 'singlebracket' or 'couplebracket'.")
}
# Re-calculate m in case it was not specified
m <- sum(ck*diff(c(p, 1)))
if (any(ck >= 1) || any(ck < 0)) {
stop("The share of couples must be between 0 and 1.")
}
# Make a tabulation for singles and the couples
p_singles <- c(0, cumsum((1 - ck)*diff(c(p, 1))/(1 - m)))[1:length(p)]
p_couples <- c(0, cumsum(ck*diff(c(p, 1))/m))[1:length(p)]
thresholds <- fitted_quantile(dist, p)
if (is.null(ratio)) {
ratio <- rep(1, length(p))
}
if (length(ratio) != length(p) || any(ratio <= 0)) {
stop("invalid 'ratio'")
}
bracketavg <- bracket_average(dist, p, c(p[-1], 1))
bracketavg_singles <- bracketavg/(1 - ck + ck/ratio)
bracketavg_couples <- bracketavg/(ratio*(1 - ck) + ck)
average_singles <- sum(diff(c(p_singles, 1))*bracketavg_singles)
average_couples <- sum(diff(c(p_couples, 1))*bracketavg_couples)
# Interpolate the distribution of singles and couples
if (has_zero) {
dist_singles <- tabulation_fit(p_singles, thresholds, average_singles, bracketavg=bracketavg_singles)
dist_couples <- tabulation_fit(p_couples, thresholds, average_couples, bracketavg=bracketavg_couples)
} else {
dist_singles <- tabulation_fit(p_singles[-1], thresholds[-1], average_singles, bracketavg=bracketavg_singles[-1])
dist_couples <- tabulation_fit(p_couples[-1], thresholds[-1], average_couples, bracketavg=bracketavg_couples[-1])
}
# Return an object with the parent distribution and the interpolated couple
# share
new_dist <- list()
class(new_dist) <- c("gpinter_dist_indiv", "gpinter_dist")
new_dist$singles <- list(
dist = dist_singles,
average = average_singles,
pk = p_singles,
threshold = thresholds,
bracketavg = bracketavg_singles
)
new_dist$couples <- list(
dist = dist_couples,
average = average_couples,
pk = p_couples,
thresholds = thresholds,
bracketavg = bracketavg_couples
)
new_dist$average <- dist$average/(1 + m)
new_dist$couple_share <- m
new_dist$pk <- p
new_dist$ck <- ck
return(new_dist)
}
|
9c83d2d546b6c258a261c783dad95d1157033aad
|
680e46f9c5c067aa7e909f50b76642e7015ce458
|
/Multi-Variable Analysis/MVA W2.R
|
df080c67f074a45557b5869d20f6c45bf469d24c
|
[] |
no_license
|
tharindupr/Stats-in-R
|
369aaf62cb944c17550626f94b820bfb38ca8552
|
30a872b2922d6000d54d3d9f1782ff43a4c92f0c
|
refs/heads/master
| 2022-04-21T21:19:20.386659
| 2020-04-26T04:47:53
| 2020-04-26T04:47:53
| 256,406,914
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,029
|
r
|
MVA W2.R
|
#Repeated Measures
library(readxl)
Repeated <- read_excel("C:/Stats in R/Workshop 5/W5 Datasets V2.xlsx", sheet = "Repeated")
View(Repeated)
#1) Check the data properties , missing values etc
#### Append a subject ID.... from Workshop 03
(n<-dim(Repeated)[1]) # sample size # no. of rows it tell
Patient<-seq(1:n)
library(dplyr)
Repeated<-mutate(Repeated,Patient) #attach patient with dataframe
##### Ordering Variables (if desired) #put patient id in start
(cn<-dim(Repeated)[2])
Repeated<-Repeated[,c(cn,1:cn-1)]
##########################We have missing values in paired data, how u handle it coz its imp
######Select data of interest###### df,pid,
Rep<-select(Repeated,Patient,Baseline = `Oral condition at the initial stage`,
"Week 02"=`Oral condition at the end of week 02`,
"Week 04"= `Oral condition at the end of week 04`,
"Week 06" = `Oral condition at the end of week 06`)
View(Rep)
######Convert to long format
library(tidyr)
(Long<-gather(Rep,Time,Oral,2:5))
####First Explore the data
###Step 01: Check properties
is.factor(Long$Time)
Long$Time<-factor(Long$Time,levels = c("Baseline","Week 02","Week 04", "Week 06")) #Specify order of levels
is.numeric(Long$Oral)
###Step 02: numerical descriptive statistics
#next line won't work because of missing data
(Stats<-Long %>% group_by(Time) %>% summarise("Sample Size"=n(), "Mean"=mean(Oral),
"Standard deviation"=sd(Oral),
"Median"=median(Oral),
"1st quartile"=quantile(Oral,0.25),
"3rd quartile"=quantile(Oral,0.75),
"Min" =min(Oral),
"Max" = max(Oral)))
view(Stats)
###locating missing value patients .... from workshop 03
Long[!complete.cases(Long),]
### Two options
# 1. per protocol (pp) analysis: remove patients from study and complete analysis with 23 patients
#Long23<-na.omit(Long) #remove patients with missing data
#2. Intention to Treat (ITT) analysis: impute data values for patients with
#missing values - e.g., LOCF
Long$Oral[Long$Patient==22 & Long$Time =="Week 06"]<-Long$Oral[Long$Patient==22 & Long$Time == "Week 04"]
Long$Oral[Long$Patient==24 & Long$Time =="Week 06"]<-Long$Oral[Long$Patient==24 & Long$Time == "Week 04"]
#now return the numerical descriptive statistics (ITT analysis)
(Stats<-Long %>% group_by(Time) %>% summarise("Sample Size"=n(), "Mean"=mean(Oral),
"Standard deviation"=sd(Oral),
"Median"=median(Oral),
"1st quartile"=quantile(Oral,0.25),
"3rd quartile"=quantile(Oral,0.75),
"Min" =min(Oral),
"Max" = max(Oral)))
t(Stats)
|
7ed4df84f5fc5148b7105ade39715fc2c8f6c08b
|
4afe51bed713d0f181159088e8db5f77248852d3
|
/R/ggplot2_polarChart/ggplot2_polarChart.R
|
d2333f3726923cbd0beff0b2b074b9f179f486f9
|
[] |
no_license
|
davidfombella/RadarCharts
|
70d8a93407691093eacac0de4cbfd8713508ed9e
|
48a845a4804034b7be2e4febfecbf47ad7be88fb
|
refs/heads/master
| 2020-06-12T15:01:29.647657
| 2019-07-31T08:31:41
| 2019-07-31T08:31:41
| 194,338,230
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,515
|
r
|
ggplot2_polarChart.R
|
# https://www.sportschord.com/post/polar_area_charts_tutorial
# How to make the Polar Area Chart in R
# The second part of this blog will now breakdown how to build the Polar Area chart in R.
# This assumes a basic knowledge of R, RStudio & how to install packages.
# The full code to create a relatively unformatted Polar Area Chart is below.
# See beneath for a line by line description.
# Replace the bold, coloured metrics with your own data frame.
library(ggplot2)
#########################################
# REPLACE Metric Length Player
#########################################
ggplot(data, aes(x=Metric, y=Length)) +
geom_col(fill = "red", alpha=1, width=1)+
geom_hline(yintercept = seq(0, 100, by = 10),
color = "grey", size = 1) +
geom_vline(xintercept = seq(.5, 16.5, by = 1),
color = "grey", size = 1) +
coord_polar()+
facet_wrap(vars(Player))
# ggplot(data, aes(x=Metric, y=Length)) +
# This line calls the ggplot2 package, binds your data to the plot & allows you to select the 'aesthetics' (aes) that will be used for the visualization.
# In this case, we want our Metric on the x-axis & Value on the y-axis.
# Remember, we are creating a column chart right up until the coord_polar command.
# Use the '+' sign to chain the lines of together.
# geom_col(fill = "red", alpha=1, width=1)+
# This calls the geom_col function, required for making our column/vertical bar chart.
# The fill argument sets the fill colour of the columns. Hex codes/RGB can be used here.
# The alpha sets the transparency (0=transparent, 1= opaque).
# The width sets the gap between the columns (0=no bar, 1= touching side by side).
# geom_hline(yintercept = seq(0, 100, by = 10),
# color = "grey", size = 1) +
# geom_vline(xintercept = seq(.5, 16.5, by = 1),
# color = "grey", size = 1) +
# These lines adds some handy grid lines to our chart.
# geom_hline sets the circular grid lines, geom_vline sets the segment boundary lines.
# seq() creates a sequence between two numbers, the 'by' argument states the gap.
# color sets the line colour.
# size sets the line width.
# coord_polar()+
# The magic happens here. Switch from Cartesian to Polar coordinates.
# facet_wrap(vars(Player))
# Use the 'facet' function to get small multiples per a particular metric. Read more on this here.
#All other formatting to the charts, such as adding titles, subtitles, background colours and boxes for the facets can be achieved in the theme().
# An example of the theme I created for my Arsenal chart can be found below.
theme(axis.text.x = element_text(size=25,colour = "#063672" ,
angle = seq(-20,-340,length.out = 9),vjust = 100),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.ticks.x = element_line(size = 2,colour = "black",linetype = 1),
legend.position = "none",
strip.background = element_rect(colour = "black", fill = "#063672"),
strip.text.x = element_text(colour = "white", face = "plain",size = 45),
panel.border = element_rect(colour = "black",size = 1,fill=NA),
panel.spacing = unit(2, "lines"),
plot.title = element_text(family = "URWGothic",size = 80),
plot.subtitle = element_text(family = "URWGothic",size = 30),
plot.caption = element_text(family = "URWGothic",size = 30),
plot.margin = unit(c(1,1,1,1), "cm"),
panel.grid = element_blank()
)
|
113ac468bfaa3c15c1685283b436e8f9ff821e73
|
6bd031055b899e2b239d6866458dbeef5257d9a5
|
/rcodes_ggplot.r
|
3abff57fba2cd81170f7e5e2075078b6cca73a55
|
[] |
no_license
|
wesenu/python
|
a93aa261d7c1c6f8f55b97b4754293af4b9c3cc4
|
2bfacaa50aeb326ad5bf25f953e72efe73b468be
|
refs/heads/master
| 2022-11-25T12:30:10.346669
| 2020-07-29T06:21:19
| 2020-07-29T06:21:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,884
|
r
|
rcodes_ggplot.r
|
library(ggplot2)
library(gridExtra)
library(data.table)
library(dplyr)
emp1=read.csv("NO.csv",header = T,na.strings = c("na",NA,"")) # to read the file
emp1$Calculated.Target.Date=as.character(emp1$Calculated.Target.Date) # to change type(property) of a column
emp1=as.data.table(emp1) # converting to data table
emp3=emp1[,.(Calculated.Status,IdentityType,Calculated.Target.Date)] # selecting out three columns
emp4=group_by(emp3,Calculated.Target.Date) # Grouping on basis of date
# selecting employee and contractor data sets distinctly
emp_e=filter(emp4,IdentityType=="Employee")
emp_c=filter(emp4,IdentityType=="Contractor")
emp5=summarise(emp_e,identity_type=unique(Calculated.Status),count=n()) # summarising based on calculated status
emp6=summarise(emp_c,identity_type=unique(Calculated.Status),count=n()) # summarising based on calculated status
p=ggplot(emp5, aes(Calculated.Target.Date ,identity_type,label=count,fill=count)) +geom_point()+geom_label(color="white",aes(label=count),hjust=0, vjust=1)+
geom_density(position = "stack")+ggtitle("Employee") +xlab("Date") + ylab("Identity type")+theme(
plot.title = element_text(color="red", size=14, face="bold.italic"),
axis.title.x = element_text(color="blue", size=14, face="bold"),
axis.title.y = element_text(color="#993333", size=14, face="bold")
)
q=ggplot(emp6, aes(Calculated.Target.Date ,identity_type,label=count,fill=count)) +geom_point()+geom_label(color="white",aes(label=count),hjust=0, vjust=1)+
geom_density(position = "stack")+ggtitle("Contractor") +xlab("Date") + ylab("Identity type")+theme(
plot.title = element_text(color="red", size=14, face="bold.italic"),
axis.title.x = element_text(color="blue", size=14, face="bold"),
axis.title.y = element_text(color="#993333", size=14, face="bold")
)
grid.arrange(p,q,ncol=1) # to add both plots together
|
c37ac403451436a2b238df4740911271f0fbd727
|
a96cdea181776fc2edf1cbd174f461a2631c5ae3
|
/tests/testthat/fixtures/create-files-to-attach.R
|
cd1c2fa9cf2ebe2dddcaa328e6a733507128705d
|
[] |
no_license
|
cran/gmailr
|
cf353c562e46f802db428a19ff62f00b117144f0
|
8adb49e4ae222996eb2f134b1adef586f9706c2c
|
refs/heads/master
| 2023-07-14T19:02:31.720630
| 2023-06-30T04:40:02
| 2023-06-30T04:40:02
| 22,280,299
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 260
|
r
|
create-files-to-attach.R
|
library(graphics)
png(
filename = testthat::test_path("fixtures", "volcano.png"),
width = 200,
height = 200
)
filled.contour(volcano, color.palette = terrain.colors, asp = 1)
dev.off()
cat("testing", file = testthat::test_path("fixtures", "test.ini"))
|
956070f6ed6056e71da249c568de6b359ed71d0a
|
c10aa9ee48265a35f4a4e03d61b8e15f32c96d5e
|
/HW 2/lab 2 supplemental code.R
|
ab1e80956aa0566c94d4735583ad19eaa18607fb
|
[] |
no_license
|
Key2-Success/Stats-20
|
e0c78f83de5e00419a4ae70b1628da6fa8ef573e
|
95e1e237d9356fd8296980ddf6ca1ae35d86156a
|
refs/heads/master
| 2021-09-01T00:51:17.584760
| 2017-12-23T23:24:25
| 2017-12-23T23:24:25
| 115,219,641
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 899
|
r
|
lab 2 supplemental code.R
|
library(readr)
library(microbenchmark)
library(dplyr)
flights_base <- read.csv(file = "Kitu/College/Junior Year/Fall Quarter/Stats 20/Homework/Lab 2/flights.csv")
flights_readr <- read_csv(file = "Kitu/College/Junior Year/Fall Quarter/Stats 20/Homework/Lab 2/flights.csv")
microbenchmark(
read.csv(file = "Kitu/College/Junior Year/Fall Quarter/Stats 20/Homework/Lab 2/flights.csv"),
times = 10, unit = "s"
)
microbenchmark(
read_csv(file = "Kitu/College/Junior Year/Fall Quarter/Stats 20/Homework/Lab 2/flights.csv", progress = FALSE),
times = 10, unit = "s"
)
View(flights_base)
View(flights_readr)
class(flights_base$origin)
class(flights_base$time_hour)
class(flights_readr$origin)
class(flights_readr$time_hour)
read_csv(file = "Kitu/college/Junior Year/Fall Quarter/Stats 20/Homework/Lab 2/airports.csv")
names(airports_new) <- c("faa", "lat", "lon")
|
bcbe5c10f31b16da238efa74c9191560e2d6d5a6
|
5022c971354900f3cf0ee4d0be366de3677579ab
|
/man/gen.POSSUM.Rd
|
cde69c5e927846950ed848c60051387e9dbcae62
|
[
"MIT"
] |
permissive
|
dannyjnwong/HSRC
|
0319f55d2f37257461076a5ecfd8a7decd1d5395
|
9253377d72b222739d7f69623c75877dad062205
|
refs/heads/master
| 2020-03-27T22:21:11.542762
| 2019-04-09T12:49:15
| 2019-04-09T12:49:15
| 147,223,653
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,278
|
rd
|
gen.POSSUM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen.POSSUM.R
\name{gen.POSSUM}
\alias{gen.POSSUM}
\title{A function to compute POSSUM scores}
\usage{
gen.POSSUM(x)
}
\arguments{
\item{x}{A dataframe or tbl where each row is a patient observation, and the columns are POSSUM predictor variables.
x must contain the following column names (not necessarily in order):
\describe{
\item{Age}{continuous variable, numeric or integer}
\item{JVP}{binary variable, whether the patient has raised JVP or not}
\item{Cardiomegaly}{binary variable, whether the patient has cardiomegaly on CXR or not}
\item{Oedema}{binary variable, whether the patient has peripheral oedema or not}
\item{Warfarin}{binary variable, whether the patient normally takes warfarin or not}
\item{Diuretics}{binary variable, whether the patient normally takes a diuretic medication or not}
\item{AntiAnginals}{binary variable, whether the patient normally takes anti-anginal medication or not}
\item{Digoxin}{binary variable, whether the patient normally takes digoxin or not}
\item{AntiHypertensives}{binary variable, whether the patient normally takes blood pressure meds or not}
\item{Dyspnoea}{categorical variable, can be: "Non" = None; "OME" = On exertion; "L" = Limiting activities; "AR" = At rest}
\item{Consolidation}{binary variable, whether the patient has consolidation on CXR}
\item{PulmonaryFibrosis}{binary variable, whether the patient has a history of pulmonary fibrosis or imaging findings of fibrosis}
\item{COPD}{binary variable, whether the patient has COPD or not}
\item{SysBP}{continuous variable, pre-op systolic blood pressure (in mmHg)}
\item{HR}{continuous variable, pre-op pulse/heart rate (in beats per min)}
\item{GCS}{continuous variable, pre-op Glasgow Coma Scale (3-15)}
\item{Hb}{continuous variable, pre-op Haemoglobin (in g/L), please note the units!}
\item{WCC}{continuous variable, pre-op White Cell Count (in * 10^9cells/L)}
\item{Ur}{continuous variable, pre-op Urea (in mmol/L)}
\item{Na}{continuous variable, pre-op Sodium concentration (in mmol/L)}
\item{K}{continuous variable, pre-op Potassium concentration (in mmol/L)}
\item{ECG}{categorical variable, can be "ND" = Not done; "NOR" = Normal ECG; "AF6090" = AF 60-90; "AF>90" = AF>90; "QW" = Q-waves; "4E" = >4 ectopics; "ST" = ST or T wave changes; "O" = Any other abnormal rhythm}
\item{OpSeverity}{categorical variable, the surgical severity, can be Min = Minor; Int = Intermediate; Maj = Major; Xma = Xmajor; Com = Complex}
\item{ProcedureCount}{categorical variable, number of procedures patient underwent in the last 30 days including this one, can be "1" = 1; "2" = 2; "GT2" = >2}
\item{EBL}{categorical variable, the estimated blood loss, can be "0" = 0-100ml; "101" = 101-500ml; "501" = 501-999ml; "1000" = >=1000}
\item{PeritonealContamination}{categorical variable, whether there was peritoneal soiling, can be "NA" = Not applicable; "NS" = No soiling; "MS" = Minor soiling; "LP" = Local pus; "FBC" = Free bowel content pus or blood}
\item{Malignancy}{categorical variable, whether the patient has malignant disease, can be "NM" = Not malignant; "PM" = Primary malignancy only; "MNM" = Malignancy + nodal metastases; "MDM" = Malignancy + distal metastases}
\item{OpUrgency}{categorical variable, NCEPOD classifications of urgency, can be "Ele" = Elective; "Exp" = Expedited; "U" = Urgent; "I" = Immediate}
}}
}
\value{
A dataframe (or tbl), which you can assign to an object, with the following variables:
\describe{
\item{PhysScore}{The physiological score for POSSUM}
\item{OpScore}{The operative score for POSSUM}
\item{POSSUMLogit}{The log-odds for morbidity as calculated by POSSUM}
\item{pPOSSUMLogit}{The log-odds for mortatlity as calculated by pPOSSUM}
}
}
\description{
This function will parse a dataframe and produce POSSUM scores to predict perioperative mortality and morbidity. To use the function, you will need to manipulate your dataframe to have columns with the structure detailed below.
}
\section{Converting to probability scale}{
The function will produce POSSUMLogit and pPOSSUMLogit values which are on the log-odds scale
To convert to probabilities (0 to 1 scale), use \code{arm::invlogit()}. See: \code{\link[arm]{invlogit}}.
}
\section{References}{
\itemize{
\item Copeland GP, Jones D, Walters M. POSSUM: A scoring system for surgical audit. Br J Surg. 1991 Mar 1;78(3):355–60. \url{http://onlinelibrary.wiley.com/doi/10.1002/bjs.1800780327/abstract}.
\item Prytherch DR, Whiteley MS, Higgins B, Weaver PC, Prout WG, Powell SJ. POSSUM and Portsmouth POSSUM for predicting mortality. Br J Surg. 1998 Sep 1;85(9):1217–20. \url{http://onlinelibrary.wiley.com/doi/10.1046/j.1365-2168.1998.00840.x/abstract}
}
}
\examples{
\dontrun{
#Example of pre-processing to rename data variables to match expected column names
library(tidyverse)
test_data <- raw_data \%>\%
select(Age = S01Age,
JVP = S03ElevatedJugularVenousPressureJvp,
Cardiomegaly = S03RadiologicalFindingsCardiomegaly,
Oedema = S03PeripheralOedema,
Warfarin = S03DrugTreatmentWarfarin,
Diuretics = S03DrugTreatmentDiureticTreatment,
AntiAnginals = S03DrugTreatmentAntiAnginal,
Digoxin = S03DrugTreatmentDigoxinTherapy,
AntiHypertensives = S03DrugTreatmentAntiHypertensive,
Dyspnoea = S03Dyspnoea,
Consolidation = S03RadiologicalFindingsConsolidation,
PulmonaryFibrosis = S03PastMedicalHistoryPulmonaryFibrosis,
COPD = S03PastMedicalHistoryCOPD,
SysBP = S03SystolicBloodPressureBpAtPreAssessment,
HR = S03PulseRateAtPreoperativeAssessment,
GCS = S03GlasgowComaScaleGcsPreInductionOfAnaesthesia,
Hb = S03Hb,
WCC = S03WhiteCellCountWcc,
Ur = S03Urea,
Na = S03Na,
K = S03K,
ECG = S03EcgFindings,
OpSeverity = S02PlannedProcSeverity,
ProcedureCount = S04ProcedureCount,
EBL = S04BloodLoss,
PeritonealContamination = S04PeritonealContamination,
Malignancy = S04Malignancy,
OpUrgency = S02OperativeUrgency)
}
test_data <- patients
test_output <- gen.POSSUM(test_data)
head(test_output)
}
|
75c306518b492bbd19fd99ce1041e297c2cfad00
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/CaliCo/examples/CaliCo.Rd.R
|
aa8f54db827233537b9c2fb9de8412b34bdb7b1b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 216
|
r
|
CaliCo.Rd.R
|
library(CaliCo)
### Name: CaliCo
### Title: Bayesian calibration for computational codes
### Aliases: CaliCo CaliCo-package
### ** Examples
# Introduction to CaliCo
## Not run: vignette("CaliCo-introduction")
|
2f95fd65aa67f2955b4ddf0f0910cd456fb58dcc
|
5c5242760e0a45fef0400f48173fc325c296a80e
|
/man/PWMEnrich.cloverScore.Rd
|
c3505decce86f394a7b0d5c98109d092a61a4a03
|
[] |
no_license
|
jrboyd/peakrefine
|
29f90711497b0b1de56ff9fe5abed1aeb98e705f
|
44a7f42eeefefb52a69bb3066cc5829d79d25f02
|
refs/heads/master
| 2021-07-08T05:11:24.470701
| 2020-07-27T16:36:31
| 2020-07-27T16:36:31
| 148,842,127
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 368
|
rd
|
PWMEnrich.cloverScore.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_PWM_modify.R
\name{PWMEnrich.cloverScore}
\alias{PWMEnrich.cloverScore}
\title{a non-exported method from PWMEnrich, here for stability}
\usage{
PWMEnrich.cloverScore(scores, lr3 = FALSE, verbose = FALSE)
}
\description{
a non-exported method from PWMEnrich, here for stability
}
|
ed7b0d28f8738598eede39781aad34dfd10ee02e
|
ff12ace2203836c526198b1460718caba7993834
|
/R/method-display_.R
|
0cdc5a64bd3ba845ced8fd88662c51f9ddeadbe3
|
[] |
no_license
|
abresler/PivotalR
|
55abbeeeed82e858e15f4abe486bfde562d9108a
|
8a14875159931883b01d223ae24a61764b751001
|
refs/heads/master
| 2021-01-18T19:37:43.497184
| 2013-08-04T19:44:23
| 2013-08-04T19:44:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,117
|
r
|
method-display_.R
|
## ------------------------------------------------------------------------
## How to display the db objects
## ------------------------------------------------------------------------
setGeneric ("print", signature = "x")
setMethod (
"print",
signature (x = "db.data.frame"),
function (x) {
if (x@.table.type == "LOCAL TEMPORARY") {
if (is(x, "db.view"))
temp <- "Temp view"
else
temp <- "Temp table"
} else {
if (is(x, "db.view"))
temp <- "View"
else
temp <- "Table"
}
cat(temp, " : ", x@.content, "\n", sep = "")
cat("Database : ", dbname(x@.conn.id), "\n", sep = "")
cat("Host : ", host(x@.conn.id), "\n", sep = "")
cat("Connection : ", x@.conn.id, "\n", sep = "")
})
## ------------------------------------------------------------------------
## setGeneric ("show", signature = "object")
setMethod (
"show",
signature (object = "db.data.frame"),
function (object) {
print(object)
})
## ------------------------------------------------------------------------
## print method for db.Rquery objects
setMethod (
"print",
signature (x = "db.Rquery"),
function (x) {
if (identical(content(x), character(0))) {
cat("NULL\n")
return (NULL)
}
cat("A temporary object in R derived from ", x@.source, "\n", sep = "")
cat("Database : ", dbname(x@.conn.id), "\n", sep = "")
cat("Host : ", host(x@.conn.id), "\n", sep = "")
cat("Connection : ", x@.conn.id, "\n", sep = "")
cat("--\n")
cat("If you want to make it point to a real object in database,\n")
cat("please use the function as.db.data.frame.\n")
cat("See help(as.db.data.frame) for more.\n")
})
## ------------------------------------------------------------------------
setMethod (
"show",
signature (object = "db.Rquery"),
function (object) {
print(object)
})
|
dcabb46604c539e819de65b80f21c2e5ec77aa5b
|
71bb9b7250c1d3b6842c51ac65d7f9132949863d
|
/tests/testthat/test-01-sample-fs.R
|
e1b6ffdba13cb7ec2a08bbec55730602bcc267be
|
[
"MIT"
] |
permissive
|
diazrenata/feasiblesads
|
2b2433911a4dcab94ef5f39e2688e984fc7562be
|
683a3816cdff25e07e3ed74b024b12209cefe121
|
refs/heads/master
| 2021-06-10T23:08:35.568507
| 2021-04-22T16:59:34
| 2021-04-22T16:59:34
| 177,840,082
| 0
| 1
|
NOASSERTION
| 2021-04-22T16:59:35
| 2019-03-26T17:45:07
|
R
|
UTF-8
|
R
| false
| false
| 1,061
|
r
|
test-01-sample-fs.R
|
context("Sample feasible sets")
test_that("Check that all feasible sets with s = 3, n = 8 are made", {
set.seed(42)
expect_error(output <- sample_fs(3, 8, 20), NA)
expect_equal(dim(output), c(20, 3))
expect_true(all(rowSums(output) == 8))
expect_known_hash(output, "0de117829e")
unique_sads <- dplyr::distinct(as.data.frame(output))
expect_equal(NROW(unique_sads), 5)
})
test_that("Check that all feasible sets with s = 3, n = 8 are made", {
set.seed(42)
sad_freq <- sample_fs(3, 8, 10000) %>%
as.data.frame() %>%
dplyr::group_by_all() %>%
dplyr::tally()
expect_true(all(sad_freq$n > 1900))
expect_true(all(sad_freq$n < 2100))
})
test_that("Generate some feasible sets", {
set.seed(42)
expect_error(output <- sample_fs(4, 20, 1000), NA)
expect_equal(dim(output), c(1000, 4))
expect_true(all(rowSums(output) == 20))
expect_known_hash(output, "c9253177ed")
unique_sads <- dplyr::distinct(as.data.frame(output))
expect_equal(NROW(unique_sads), 64)
})
|
fb015bfabd38639d9cfc618bc5b732cc11abacbb
|
f10a733181102fd64a437bccbbbb0a81eb2a68f9
|
/R/000.R
|
06860400a4c5e2ec8a99021717e0cdc3be7c4c04
|
[] |
no_license
|
cran/rtf
|
7f2439fc408e57a36b27ffdfe5dbbb06101d1ae6
|
7e2e233fa4763a5a82e72393476c27aaf5d1f853
|
refs/heads/master
| 2020-06-01T05:10:50.738429
| 2020-03-22T08:32:44
| 2020-03-22T08:32:44
| 17,699,397
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 564
|
r
|
000.R
|
############################################################################
# This code has to come first in a library. To do this make sure this file
# is named "000.R" (zeros).
############################################################################
# Is autoload() allowed in R v2.0.0 and higher? According to the help one
# should not use require().
autoload("appendVarArgs", package="R.oo")
autoload("hasVarArgs", package="R.oo")
autoload("setMethodS3", package="R.oo")
autoload("setConstructorS3", package="R.oo")
#autoload("gsubfn", package="gsubfn")
|
4cfc784dbf40acdeeec3af5afcf597c7de8cc8dd
|
bbf1ae079309eca11270422d3f0d259d1515d430
|
/numerical-tours/r/nt_toolbox/toolbox_general/norms.R
|
70fd90592291e1c9b3ed9905971d82973803d712
|
[
"BSD-2-Clause"
] |
permissive
|
ZichaoDi/Di_MATLABTool
|
5e6a67b613c4bcf4d904ddc47c2744b4bcea4885
|
c071291c63685c236f507b2cb893c0316ab6415c
|
refs/heads/master
| 2021-08-11T07:28:34.286526
| 2021-08-04T18:26:46
| 2021-08-04T18:26:46
| 149,222,333
| 9
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 171
|
r
|
norms.R
|
# Defining the L1 norm of a vector
norm <- function(v){
####
# Euclidian norm of a vector
####
return(sqrt(sum(v**2)))
}
l1_norm = function(x){sum(abs(x))}
|
802f6b4ce068d78e92d34146d48d8bdd98731e6b
|
88dfe929a29807725a5f645838292c50cce6dfd2
|
/man/socket_detect.Rd
|
949ead5682f07330eb37bea09ecdc532465af10e
|
[] |
no_license
|
flooose/AlpacaforR
|
d1a5cd435fa0e93d5c7cfd06b8a3ac369e097646
|
dd6a3317fd16edfeb094d9c06b9357c2b5704101
|
refs/heads/master
| 2023-08-09T21:00:50.748078
| 2021-09-10T19:16:17
| 2021-09-10T19:16:17
| 405,643,999
| 0
| 0
| null | 2021-09-12T13:35:55
| 2021-09-12T13:05:24
| null |
UTF-8
|
R
| false
| true
| 394
|
rd
|
socket_detect.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Websockets.R
\name{socket_detect}
\alias{socket_detect}
\title{Determine the socket from the channel}
\usage{
socket_detect(channel)
}
\arguments{
\item{channel}{\code{(character)} Name of the channel}
}
\value{
\code{(character)} socket name based on channel
}
\description{
Determine the socket from the channel
}
|
9e24589951e7945918e504f31f8e7610f338a598
|
3ded716079ef6d40204ac4f24cfac64df49d4e9d
|
/man/remove_html.Rd
|
c36a435457f6a0b4f43b0ba3390fd9596f3b8b31
|
[] |
no_license
|
MarkGoble/mishMashr
|
7b38680ea59a66f544fabe507c2fc08a177319db
|
5455570039e864f89d3a43e6f071eb01fae2918f
|
refs/heads/master
| 2021-07-07T13:25:25.045758
| 2020-10-09T10:14:09
| 2020-10-09T10:14:09
| 196,279,650
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 413
|
rd
|
remove_html.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remove_html.R
\name{remove_html}
\alias{remove_html}
\title{Strip HTML from text}
\usage{
remove_html(string)
}
\arguments{
\item{string}{A string to clean}
}
\value{
A cleaned \code{string}
}
\description{
Remove HTML Tags from a string
}
\details{
Remove all the HTML tags from a string input
}
\examples{
\dontrun{
Add examples
}
}
|
14f746a7b9ac2c47d0f462d2697088b6063aa544
|
dc1c46aa1e67f29c52533371ef2c2cec1d552f6b
|
/R/utils.R
|
c7dfabab75e2dada1df23b6464a03280396f1928
|
[
"MIT"
] |
permissive
|
gshs-ornl/covidmodeldata
|
7eb9c84c6a88a23b606e69fe526bf9616068513f
|
f7a3c3022d73b6cd472f606896afdef914007a6b
|
refs/heads/master
| 2022-08-27T15:10:54.997231
| 2020-05-31T13:36:52
| 2020-05-31T13:36:52
| 255,154,924
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,027
|
r
|
utils.R
|
#' Harmonize names with other covidmodeldata data sources
#'
#' @noRd
google_translate <- function(df, admin, us_only, summarise_nyc) {
if (us_only) df <- dplyr::filter(df, country_code == "US")
if (admin == "county") df <- dplyr::filter(df, !is.na(county_name) | state_name == "District of Columbia")
if (admin == "state") df <- dplyr::filter(df, is.na(county_name), !is.na(state_name))
if (admin == "country") {
df <- dplyr::filter(df, is.na(county_name), is.na(state_name))
usethis::ui_warn("name translation and formatting have not been implemented for yet for `admin = 'country'`")
return(df)
}
if (admin == "all") {
usethis::ui_warn("name translation and formatting have not been implemented for yet for `admin = 'all'`")
return(df)
}
# translate county names --------------------------------------------------
if (admin == "county") {
google_names <-
df %>%
dplyr::mutate(
county_name_new = county_name,
county_name_new = gsub(" (County|Parish|Borough)", "", county_name_new),
county_name_new = dplyr::if_else(state_name == "District of Columbia", "District of Columbia", county_name_new),
county_name_new = dplyr::if_else(state_name == "South Dakota" & county_name_new == "Shannon", "Oglala Lakota", county_name_new),
county_name_new = dplyr::if_else(state_name == "Louisiana" & county_name_new == "La Salle", "LaSalle", county_name_new)
) %>%
dplyr::distinct(
state_name,
county_name,
county_name_new
) %>%
dplyr::group_by(
state_name,
county_name_new
) %>%
dplyr::mutate(
n = dplyr::n()
) %>%
dplyr::ungroup() %>%
dplyr::mutate(
county_name_new = dplyr::if_else(n > 1, county_name, county_name_new),
county_name_new = dplyr::if_else(n > 1 & !grepl(" County", county_name_new), paste(county_name_new, "city"), county_name_new)
) %>%
dplyr::select(-n)
google_translate <-
dplyr::left_join(
google_names,
acs_names,
by = c("state_name" = "state_name",
"county_name_new" = "county_name"
)
)
df <- dplyr::left_join(
df, google_translate,
by = c("state_name", "county_name")
) %>%
dplyr::mutate(
county_name = dplyr::if_else(is.na(county_name_new), county_name, county_name_new)
) %>%
dplyr::select(-county_name_new) %>%
dplyr::select(
geoid,
country_code,
country_name,
state_fips,
state_name,
county_fips,
county_name,
date,
tidyselect::starts_with("ggl_")
)
if (summarise_nyc) {
df_not_nyc <- dplyr::filter(df, geoid != "36NYC")
df_nyc <- df %>%
dplyr::filter(
geoid == "36NYC"
) %>%
dplyr::mutate(
county_fips = geoid,
county_name = "New York City",
county_name_long = "New York City, New York"
) %>%
dplyr::group_by(
geoid,
country_code,
country_name,
state_fips,
state_name,
county_fips,
county_name,
date
) %>%
dplyr::summarise_at(
dplyr::vars(tidyselect::starts_with("ggl_")), median, na.rm = TRUE
)
df <- dplyr::bind_rows(df_not_nyc, df_nyc) %>%
dplyr::arrange(geoid, date) %>%
dplyr::select(-county_fips)
}
df <- dplyr::rename(df, county_name_ggl = county_name)
} # end if (county)
# translate state names only ----------------------------------------------
if (admin == "state") {
acs_state_names <- dplyr::distinct(acs_names, state_fips, state_name)
df <- df %>%
dplyr::left_join(acs_state_names, by = "state_name") %>%
dplyr::select(
country_code,
country_name,
state_fips,
state_name,
date,
tidyselect::starts_with("ggl_")
)
}
df
}
|
4194beff2b0f8be6d6ff7abbd0077b62f0fe5b3e
|
1d3559eff0d13d2ac763574f86116dabc1d0c8ea
|
/Compiled_PL_Rscripts/Rscript20162017/Read Files & Results.r
|
8ddcd674ec9eef49f4105e0cc66d62b2064cd91c
|
[] |
no_license
|
cpfergus/ProtectedLands
|
e62d0df776181ba0294135bc1b42fe67a0576761
|
05058b4b2c329367094b02d3d13aee632fa10906
|
refs/heads/master
| 2020-03-30T23:55:36.607142
| 2018-10-05T12:58:34
| 2018-10-05T12:58:34
| 151,718,629
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,866
|
r
|
Read Files & Results.r
|
############################
#PURPOSE:
#INPUT:
#OUTPUT:
#DEVELOPED:
#CONTACT: LacherI@si.edu
#NOTES:
#IMPORTANT:
##### NEXT STEPS #####
############################
# SET WORKING DIRECTORY
# setwd("Y:/Lacher/...") #Harvard CLUSTER
# PACKAGES NEEDED
# rasters
library(raster)
# SET TEMP DIRECTORY
rasterOptions(tmpdir = "Y:/Lacher/rtempCLEARME/")
# ----------------------------------------------
################################################
# ----------------------------------------------
# ----------------------------------------------
# READ FILES:
# ----------------------------------------------
# ----------------------------------------------
# ----------------------------------------------
# FILE LOCATIONS:
# ----------------------------------------------
BR_fileLoc<-"Y:/Lacher/IaraSpatialLayers_HF/PreparedRasters/ProLands/BlueRidge/"
Output_Folder <- "Y:/Lacher/ProtectedLandsProject/PatchesTransitions_BR/Patch_Stats/"
Comb_output <- "Y:/Lacher/ProtectedLandsProject/PatchesTransitions_BR/Combine/Tables/"
CombRas_output <- "Y:/Lacher/ProtectedLandsProject/PatchesTransitions_BR/Combine/Rasters/"
# ----------------------------------------------
# RAW RASTERS:
# ----------------------------------------------
# Protected Lands yes/no '0', '1'
pl <- raster(paste0(BR_fileLoc, "pl.tif" ))
# Protected Lands by YEAR
pl_year <- raster(paste0(BR_fileLoc, "pl_year.tif" )) # nodata, years, 9999
# pl_yr_z <- raster(paste0(BR_fileLoc, "pl_yr_z.tif" )) # 0, years, 9999 # Raster reclassified so background =zero. This is at 360*360 resolution
# Unique Patch ID raster- patches distinguished by YEAR
yrly_patID<- raster(paste0(BR_fileLoc, "IndPatches/brPLiyrRSM.tif", sep=""))
pl_nlcd <- raster(paste0(BR_fileLoc, "pl_nlcd.tif" ))
pl_er <- raster(paste0(BR_fileLoc, "pl_er.tif" ))
pl_gap <- raster(paste0(BR_fileLoc, "pl_gap.tif" ))
pl_own <- raster(paste0(BR_fileLoc, "pl_own.tif" ))
pl_pp <- raster(paste0(BR_fileLoc, "pl_pp.tif" ))
pl_state <- raster(paste0(BR_fileLoc, "pl_state.tif"))
pl_type <- raster(paste0(BR_fileLoc, "pl_type.tif" ))
pl_resilR <- raster(paste0(BR_fileLoc, "pl_resilR.tif" ))# reclassified version
# pl_biopri <- raster(paste0(BR_fileLoc, "pl_biopri.tif")) # maybe come back to see note above
# ----------------------------------------------
# GENERATED RASTERS
# ----------------------------------------------
# Original Raster
yrly_patID<- raster(paste0(BR_fileLoc, "IndPatches/brPLiyrRSM.tif", sep=""))
# Removed patches with zero core area
NCyrly_patID<- raster(paste0(BR_fileLoc, "IndPatches/NCyrly_patID.tif", sep=""))#in NAD UTM 17
sNCyrly_patID<- raster(paste0(BR_fileLoc, "IndPatches/sNCyrly_patID.tif", sep=""))#in WGS ALBERS EQUAL AREA
# yrly_patID2<- raster(paste0(BR_fileLoc, "IndPatches/brPLiyrRSM2.tif", sep="")) # utm? Delete??
# ----------------------------------------------
# TABLES
# ----------------------------------------------
iStats_all<-read.csv("Y:/Lacher/ProtectedLandsProject/PatchesTransitions_BR/Patch_Stats/iStats_all.csv")
maj_categ<-read.table(paste0(Output_Folder,"maj_categ.txt"), sep=",", header=TRUE)
est_yr<-read.table(paste0(Output_Folder,"est_yr.txt"), sep=",", header=TRUE)
nldev_dist<-read.table(paste0(Output_Folder,"nldev_distL.txt"), sep=",", header=TRUE)
pat_dist_min<-read.table(paste0(Output_Folder,"pat_dist_min.txt"), sep=",", header=TRUE)
iStats_Join<-read.table(paste0(Output_Folder,"iStats_Join",".txt"), sep=",", header=TRUE)
gap_all<-read.table(paste0(Output_Folder,"gap_all",".txt"), sep=",", header=TRUE)
nlcd_all<-read.table(paste0(Output_Folder,"nlcd_all",".txt"), sep=",", header=TRUE)
resil_all<-read.table(paste0(Output_Folder,"resil_all",".txt"), sep=",", header=TRUE)
# ----------------------------------------------
# ----------------------------------------------
# RESULTS
# ----------------------------------------------
# ----------------------------------------------
# ----------------------------------------------
# CUMULATIVE AREA
# ----------------------------------------------
cum_TArER<-read.table(paste0(Output_Folder,"cum_TArER.txt"), sep=",", header=TRUE)
cum_ArOwn123<-read.table(paste0(Output_Folder,"cum_ArOwn123.txt"), sep=",", header=TRUE)
cum_NL123<-read.table(paste0(Output_Folder,"cum_NL123.txt"), sep=",", header=TRUE)
cum_GAP123<-read.table(paste0(Output_Folder,"cum_GAP123.txt"), sep=",", header=TRUE)
# Edited for
cum_TArER_EDIT <-read.table(paste0(Output_Folder,"cum_TArER_EDIT.txt"), sep="\t", header=TRUE)
cum_ArOwn123_EDIT <-read.table(paste0(Output_Folder,"cum_ArOwn123_EDIT.txt"), sep="\t", header=TRUE)
cum_NL123_EDIT <-read.table(paste0(Output_Folder,"cum_NL123_EDIT.txt"), sep="\t", header=TRUE)
cum_GAP123_EDIT <-read.table(paste0(Output_Folder,"cum_GAP123_EDIT.txt"), sep="\t", header=TRUE)
# ----------------------------------------------
# COALESCED CORE AREA
# ----------------------------------------------
coal_vals1111 <- raster(paste0(BR_fileLoc, "coal_vals1111.tif", sep=""))
coal_vals8595 <- raster(paste0(BR_fileLoc, "coal_vals8595.tif", sep=""))
coal_vals9505 <- raster(paste0(BR_fileLoc, "coal_vals9505.tif", sep=""))
coal_vals0515 <- raster(paste0(BR_fileLoc, "coal_vals0515.tif", sep=""))
# ----------------------------------------------
# Nearest Neighbot Regression Outputs - Version 12-15-16
# ----------------------------------------------
# NN prediction
NNpred123<-read.table(paste0(Output_Folder,"NNpred123.txt"), sep=",", header=TRUE)
> summary(nnB); summary(nnP)
Call:
glm(formula = sqrt(min_dist.km) ~ estYr, data = dist_n1)
Deviance Residuals:
Min 1Q Median 3Q Max
-1.0534 -0.7075 -0.2304 0.5754 3.4236
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 37.202374 13.430689 2.770 0.00588 **
estYr -0.018202 0.006705 -2.715 0.00693 **
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
(Dispersion parameter for gaussian family taken to be 0.7057707)
Null deviance: 275.51 on 384 degrees of freedom
Residual deviance: 270.31 on 383 degrees of freedom
AIC: 962.42
Number of Fisher Scoring iterations: 2
Call:
glm(formula = sqrt(min_dist.km) ~ estYr, data = dist_n23)
Deviance Residuals:
Min 1Q Median 3Q Max
-1.0796 -0.7910 -0.1411 0.5091 3.4071
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 27.116821 7.842051 3.458 0.000564 ***
estYr -0.013117 0.003914 -3.351 0.000831 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
(Dispersion parameter for gaussian family taken to be 0.7909091)
Null deviance: 915.26 on 1147 degrees of freedom
Residual deviance: 906.38 on 1146 degrees of freedom
AIC: 2992.6
Number of Fisher Scoring iterations: 2
|
3e647983e177300810e21ca6feeaa955dc37ff60
|
d43f9cec804ebd794c9016553c7ee6e844bfdb3a
|
/cachematrix.R
|
9903f633614ce08618e2f5a2e6cb85f3877701cf
|
[] |
no_license
|
avadi/ProgrammingAssignment2
|
299c0a0c8c11e5a4a6de30dc4a727f41ef1ca1e7
|
dbe1eff496d1dede75ef01008094205b98fd2823
|
refs/heads/master
| 2020-02-26T13:20:37.764173
| 2015-06-17T21:19:56
| 2015-06-17T21:19:56
| 37,554,758
| 0
| 0
| null | 2015-06-16T20:38:23
| 2015-06-16T20:38:23
| null |
UTF-8
|
R
| false
| false
| 1,396
|
r
|
cachematrix.R
|
## This program caches the inverse of a matrix to help
## rather than compute it repeatedly which could be costly computation
## We intend to have 2 functions - one for caching the matrix and
## other reuse the cache (than to recalculate)
## This function creates a special "matrix" object that can cache its inverse.
## This has bunch of 4 functions -
## 1 - get matrix,
## 2 - set matrix,
## 3 - set matrix inverse &
## 4 - get matrix inverse.
makeCacheMatrix <- function(x = matrix()) {
##inv matrix var
im <- NULL
setmat <- function(y) {
x <<- y
im <<- NULL
}
getmat <- function() x
##setmatinverse <- function(solve) im <<- solve
setmatinverse <- function(z) {
im <<- solve(z)
}
getmatinverse <- function() im
list(setmat = setmat, getmat = getmat,setmatinverse = setmatinverse,
getmatinverse = getmatinverse)
}
## This function computes the inverse of the special "matrix"
##returned by makeCacheMatrix above. If the inverse has already been
##calculated (and the matrix has not changed) then the cachesolve
##should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
im <- x$getmatinverse()
if(!is.null(im)) {
print("getting cached data")
return(im)
}
data <- x$getmat()
im <- solve(data, ...)
x$setmatinverse(im)
im
}
|
5e1b51198bfb7c8660308d41f0dc43014a194e2c
|
ab97856d258036c85fb41f06444f4b3547f70c99
|
/server.R
|
f644bab26e0aa42f04078f1deb61adc019201b7d
|
[] |
no_license
|
mtbbiker/dp_shiny
|
aa30e4d015cd774caf00a0d712ed4aa6335841b0
|
daad4819895ae1af5092ca3d7f01c4ea5a45da73
|
refs/heads/master
| 2021-01-01T03:40:03.249540
| 2016-05-26T13:01:19
| 2016-05-26T13:01:19
| 59,512,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,474
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(UsingR)
myfiledatamale <- read.csv("data/maledata.csv", colClasses = "character")
myfiledatafemale <- read.csv("data/femaledata.csv", colClasses = "character")
catagories <- names(myfiledatamale)
#Calculate the Estimated VO2max
vo2max <- function(distance) (distance - 504.9) / 44.73
#Test in what catagory did the measured distance fell
test <- function(testdata,dist)
{
result <- ""
for (i in 2:length(catagories)) {
a <- strsplit(testdata[,i],"-")
#print(a)
ll <- length(a[[1]])
if(ll==1)
{
##Single
lower <- sapply(a, function(x){as.numeric(x[1])})
#print(lower)
if(dist > lower)
{
#High value
#print("Match Upper bound")
result <- catagories[i]
break
} else
{
#Test more
#print("Match Lower bound")
result <- catagories[i]
}
} else
{
##Upper and lower val
lower <- sapply(a, function(x){as.numeric(x[1])})
upper <- sapply(a, function(x){as.numeric(x[2])})
if((lower <= dist) & (upper >= dist )) #Test more
{
#print("Match multi")
result <- catagories[i]
break
}
else
{
#print("Test More Multi")
}
}
}
result
}
compaDistance <- function(agegroup,sex,distance) {
if(sex == 1)
{
#Use Male data
usedata <- myfiledatamale[myfiledatamale$Age==agegroup,,drop =FALSE]
#head(usedata)
t <- test(usedata,distance)
paste(t, " Results for distance", distance, " m walked")
}
else
{
#Use Female Data
usedata <- myfiledatafemale[myfiledatafemale$Age==agegroup,,drop =FALSE]
t <- test(usedata,distance)
paste(t," Results for distance", distance, " m walked")
}
}
shinyServer(
function(input, output) {
output$mytable = renderDataTable({
if(input$radioGender==1)
{
#output$text1 <-renderText("Male")
if(input$varAge=="ALL")
{
myfiledatamale[]
}
else
{
myfiledatamale[myfiledatamale$Age==input$varAge,,drop =FALSE]
}
}
else
{
#output$text1 <-renderText("Female")
if(input$varAge=="ALL")
{
myfiledatafemale[]
}
else
{
myfiledatafemale[myfiledatafemale$Age==input$varAge,,drop =FALSE]
}
}
})
output$vo2max <- renderText({
#if (input$goButton == 0) "You have not calculated anything"
# else paste(vo2max(input$numDist), " mls/kg/min")
input$goButton
isolate(
if(input$numDist>0)
{
paste(round(vo2max(input$numDist), digits = 3) , " mls/kg/min")
}
else
{
"0.00 mls/kg/min"
}
)
})
output$testresult <- renderText({
input$goButton
isolate(
if (input$goButton <= 0) "Not Calculated yet !"
else
{
input$goButton
compaDistance( input$varAge,input$radioGender,input$numDist)
}
)
})
}
)
|
6915e754fc79bb91283cdbab87f3677f91a36c7c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mobsim/examples/sample_quadrats.Rd.R
|
0eb7602f496ec35412b8382b863e6b1ad2a0e339
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 396
|
r
|
sample_quadrats.Rd.R
|
library(mobsim)
### Name: sample_quadrats
### Title: Plot-based samples from a spatially-explicit census
### Aliases: sample_quadrats
### ** Examples
library(vegan)
sim_com1 <- sim_poisson_community(100, 10000)
comm_mat1 <- sample_quadrats(sim_com1, n_quadrats = 100,
quadrat_area = 0.002, method = "grid")
specnumber(comm_mat1$spec_dat)
diversity(comm_mat1$spec_dat, index = "shannon")
|
105dd8d4c4caa04ccf6ca746f1152a5dd99b2f7b
|
65c9d9a616608052c2ae1652c53fca0f07d3dc0b
|
/R/zzz.R
|
6216163168a329bd94e8de2c237a1d04fe66653c
|
[] |
no_license
|
blogsvoices/iSAX
|
de46ec10592c90e5c0a188d95e59a0a071bb0095
|
3c0e5fc3b4e4bba8cf775a8257849011bbcf85fc
|
refs/heads/master
| 2022-10-24T23:43:31.772611
| 2022-10-08T17:01:33
| 2022-10-08T17:01:33
| 57,962,525
| 10
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,159
|
r
|
zzz.R
|
#########################################################################################################
# iSAX is an R package which provides access to iSA technology developed by
# VOICES from the Blogs. It is released for academic use only and licensed
# under the Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License
# see http://creativecommons.org/licenses/by-nc-nd/4.0/
# Warning: Commercial use of iSA is protected under the U.S. provisional patent application No. 62/215264
#########################################################################################################
.onAttach <- function(libname, pkgname)
{
packageStartupMessage("\n")
packageStartupMessage(rep("#",63))
packageStartupMessage("# iSA: U.S. provisional patent application No. 62/215264 #")
packageStartupMessage("# This package is released under the Creative Commons License #")
packageStartupMessage("# Attribution-NonCommercial-NoDerivatives 4.0 International #")
packageStartupMessage("# For academic use only! #")
packageStartupMessage(rep("#",63))
packageStartupMessage("\n")
}
|
70664f29593f7e3374a0aa46a56bf91ee68c241e
|
68198dc9812e092dd3117c93f0cd307d39042c73
|
/Analyses/0_preprocess_cluster.R
|
4038cbcd238f5d67207c35909698e69fe58853b6
|
[] |
no_license
|
ding-lab/HotPho_Analysis
|
a5c55e56a05cc2b44da8efb9a3c7aeee8bf42f9c
|
9b60da568e170a5eac2bc9fd9f8a99fe5edbf3e4
|
refs/heads/master
| 2020-03-17T07:59:11.232779
| 2018-08-30T15:32:43
| 2018-08-30T15:32:43
| 133,420,023
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,514
|
r
|
0_preprocess_cluster.R
|
##### preprocess_cluster.R #####
# Kuan-lin Huang @ WashU 2017 May; updated 2018 April
### dependencies ###
bdir = "/Users/khuang/Box\ Sync/Ding_Lab/Projects_Current/hotpho_data"
setwd(bdir)
source("Analyses/hotpho_analyses_functions.R")
### common dependencies for plotting ###
# hotspot3d cluster file
cluster_f = "/Users/khuang/Box\ Sync/Ding_Lab/Projects_Current/hotpho_data/HotSpot3D/Data_201807/MC3.maf.3D_Proximity.pairwise.3D_Proximity_cleaned.sites.3D_Proximity_cleaned.musites.recurrence.l0.ad10.r10.clusters"
##### CLUSTERs #####
cluster = read.table(header=T, quote = "", sep="\t", stringsAsFactors = F, fill =T, file = cluster_f, colClasses=c(rep('character',3),rep("numeric",4),rep("character",7)))
colnames(cluster)=gsub("\\.","_",colnames(cluster)) # period cuz troubles for R data frame manipulation
cluster_summary = read.table(header=T, quote = "", sep="\t", stringsAsFactors = F, fill =T, file = paste(cluster_f,"summary",sep="."), colClasses=c(rep('character',2),rep("numeric",13),rep("character",7)))
### clean up clusters ###
# cluster$Alt_Mutation_Gene = gsub(".*:","",cluster$Alternative_Transcripts) # always get the last one as that seems to be the correct one; ex. ENST00000320868:p.T42|ENST00000320868:p.Y42
# cluster$Original_Mutation_Gene = cluster$Mutation_Gene
# cluster$Mutation_Gene[cluster$Alt_Mutation_Gene != ""] = cluster$Alt_Mutation_Gene[cluster$Alt_Mutation_Gene != ""]
# #write.table(cluster, quote=F, sep="\t", file = "HotSpot3D/Data_201807/convertToRef.filtered.0705.pass.fast.MC3.combined.mumu.musite.sitesite.max20.ld0.ad10.r10.net.recur.unspec.strInd.subInd.noSingletons.clusters", row.names = F)
# some REF residues for sites seem to be off during generation of pairwise or cluster file
# thankfully hotspot3d only considers position
# post-hoc fixing them here, the PTMs
PTMcosmo_f = "/Users/khuang/Box\ Sync/Ding_Lab/Projects_Current/hotpho_data/input/PTMPortal_ptm_sites_dump.phospho_only.with_enst.tsv"
PTMcosmo = read.table(header=T, quote = "", sep="\t", stringsAsFactors = F, fill =T, file = PTMcosmo_f)
PTMcosmo$Mutation_Gene = paste("p.",PTMcosmo$residue,PTMcosmo$p_coord,sep="")
PTMcosmo_map = PTMcosmo[,c("Ensembl.Transcript","p_coord","Mutation_Gene")]
cptac_site$Position = gsub("p.[A-Z]","",cptac_site$amino_acid_residue)
cptac_site_map = cptac_site[,c("ensembl_transcript_id","Position","amino_acid_residue")]
colnames(PTMcosmo_map) = c("Transcript","Position","originalLabel")
colnames(cptac_site_map) = c("Transcript","Position","originalLabel")
phosphosites_map = rbind(cptac_site_map,PTMcosmo_map)
phosphosites_map = phosphosites_map[!duplicated(paste(phosphosites_map$Transcript,phosphosites_map$Position)),]
cluster$Mutation_Gene = gsub("p. ","p.",cluster$Mutation_Gene) # some residues had a gap...
cluster$Position = gsub("p.[A-Z]*([0-9]*)[A-Z]*","\\1",cluster$Mutation_Gene)
cluster_merge = merge(cluster,phosphosites_map,by=c("Transcript","Position"),all.x=T)
cat("Number of residues with inconsistent residues from the original phosphosite files (True being inconsistent):\n")
table(cluster_merge$Mutation_Gene[cluster_merge$Alternate=="ptm"] != cluster_merge$originalLabel[cluster_merge$Alternate=="ptm"])
# cluster_merge$Mutation_Gene[cluster_merge$Alternate=="ptm"] = cluster_merge$originalLabel[cluster_merge$Alternate=="ptm"]
# table(cluster_merge$Mutation_Gene[cluster_merge$Alternate=="ptm"] != cluster_merge$originalLabel[cluster_merge$Alternate=="ptm"])
# cluster_merge = cluster_merge[,-c(which(colnames(cluster_merge) =="originalLabel"))]
# write.table(cluster_merge, quote=F, sep="\t", file = "HotSpot3D/Data_201807/PTM_MC3_noFs.maf.3D_Proximity.pairwise.3D_Proximity.sites.3D_Proximity.musites.site.l0.ad10.r10.cleaned.clusters", row.names = F)
cat("Unique clusters (unfiltered):",length(unique(cluster$Cluster)),"\n")
cat("Unique genes:",length(unique(cluster$Gene_Drug)),"\n")
annotated_cluster = annotate_cluster(cluster)
cat("Hybrid clusters (unfiltered):",length(unique(annotated_cluster$Cluster[annotated_cluster$Type=="Hybrid"])),"\n")
cat("Mutation-only clusters (unfiltered):",length(unique(annotated_cluster$Cluster[annotated_cluster$Type=="Mut_Only"])),"\n")
cat("Site-only clusters (unfiltered):",length(unique(annotated_cluster$Cluster[annotated_cluster$Type=="Site_Only"])),"\n")
# annotated_clusterPTM$ref_org = gsub("p.([A-Z])[0-9]+","\\1",annotated_clusterPTM$Original_Mutation_Gene)
# table(annotated_clusterPTM$ref_org)
annotated_cluster_centroids = annotated_cluster[annotated_cluster$Geodesic_From_Centroid==0,]
annotated_cluster_centroids_unique = annotated_cluster_centroids[!duplicated(annotated_cluster_centroids$Cluster),]
# Use cluster closeness to find the top 5% clusters
# hybrid_clus = unique(annotated_cluster$Cluster[annotated_cluster$Type == "Hybrid"])
# mut_clus = unique(annotated_cluster$Cluster[annotated_cluster$Type == "Mut_Only"])
# site_clus = unique(annotated_cluster$Cluster[annotated_cluster$Type == "Site_Only"])
#
top_clust = c()
cluster_types = c("Hybrid","Mut_Only","Site_Only")
thres = 0.95
h_thres = 0
cluster_type_thres = list()
cluster_summary$Type = NA
for (type in cluster_types){
cluster_summary$Type[cluster_summary$Cluster_ID %in% unique(annotated_cluster$Cluster[annotated_cluster$Type == type])] = type
clust_type = cluster_summary[cluster_summary$Cluster_ID %in%
unique(annotated_cluster$Cluster[annotated_cluster$Type == type]),]
type_thres = quantile(clust_type$Cluster_Closeness,probs=thres)
cluster_type_thres[type]=type_thres
if (type =="Hybrid"){h_thres= type_thres}
top_clust = c(top_clust,clust_type$Cluster_ID[clust_type$Cluster_Closeness > type_thres])
cat(type, ":\t",type_thres, "\n")
}
annotated_cluster_centroids_unique_pass = annotated_cluster_centroids_unique[annotated_cluster_centroids_unique$Cluster %in% top_clust,]
# # get the 5% most significant clusters for each category for subsequent analysis
# cluster_types = c("Hybrid","Mut_Only","Site_Only")
# thres = 0.95
# h_thres = 0
# cluster_type_thres = list()
# for (type in cluster_types){
# type_thres = quantile(annotated_cluster_centroids_unique$Closeness_Centrality[annotated_cluster_centroids_unique$Type == type],probs=thres)
# cluster_type_thres[type]=type_thres
# if (type =="Hybrid"){h_thres= type_thres}
# }
# annotated_cluster_centroids_unique_pass = annotated_cluster_centroids_unique[
# annotated_cluster_centroids_unique$Type == "Hybrid" & annotated_cluster_centroids_unique$Closeness_Centrality > cluster_type_thres[["Hybrid"]] |
# annotated_cluster_centroids_unique$Type == "Mut_Only" & annotated_cluster_centroids_unique$Closeness_Centrality > cluster_type_thres[["Mut_Only"]] |
# annotated_cluster_centroids_unique$Type == "Site_Only" & annotated_cluster_centroids_unique$Closeness_Centrality > cluster_type_thres[["Site_Only"]],
# ]
# take a look at the density
p = ggplot(cluster_summary,aes(x = log10(Cluster_Closeness), fill=Type))
p = p + geom_density(alpha=0.2,size=0.5)
p = p + theme_bw() #+ xlim(0,5)
p = p + geom_vline(xintercept = log10(h_thres),alpha=0.5)
p
fn = paste("output/Data_201807_cc_dist_by_cluster_type.pdf",sep="_")
ggsave(fn, useDingbat=F)
cat("\n")
cat("Hybrid clusters (filtered):",length(unique(annotated_cluster_centroids_unique_pass$Cluster[annotated_cluster_centroids_unique_pass$Type=="Hybrid"])),"\n")
rank_vectors(annotated_cluster_centroids_unique_pass$Gene_Drug[annotated_cluster_centroids_unique_pass$Type=="Hybrid"])
cat("Mutation-only clusters (filtered):",length(unique(annotated_cluster_centroids_unique_pass$Cluster[annotated_cluster_centroids_unique_pass$Type=="Mut_Only"])),"\n")
rank_vectors(annotated_cluster_centroids_unique_pass$Gene_Drug[annotated_cluster_centroids_unique_pass$Type=="Mut_Only"])
cat("Site-only clusters (filtered):",length(unique(annotated_cluster_centroids_unique_pass$Cluster[annotated_cluster_centroids_unique_pass$Type=="Site_Only"])),"\n")
rank_vectors(annotated_cluster_centroids_unique_pass$Gene_Drug[annotated_cluster_centroids_unique_pass$Type=="Site_Only"])
annotated_cluster_pass = annotated_cluster[annotated_cluster$Cluster %in% annotated_cluster_centroids_unique_pass$Cluster, ]
write.table(annotated_cluster_pass, quote=F, sep="\t", file = "output/Data_201807_cc.p0.05.cluster.tsv", row.names = F)
# sync up transcripts within the same cluster; when the PTM sites are on a different transcript
transvarIn_f = "HotSpot3D/Data_201807/PTM_Site_transvar.txt.gz"
transvarIn = read.table(header=F, quote = "", sep="\t", stringsAsFactors = F, fill =T, file = gzfile(transvarIn_f))
transvarIn_anno = transvarIn[,c(3,6,12)]
colnames(transvarIn_anno) = c("Transcript","Position","GenomicPosition")
# transvarIn_anno$Start = gsub("chr.*g.([0-9]*)_.*","\\1",transvarIn_anno$GenomicPosition)
# transvarIn_anno$Stop = gsub("chr.*g.([0-9]*)_([0-9]*)/.*","\\2",transvarIn_anno$GenomicPosition)
annotated_cluster_pass = merge(annotated_cluster_pass, transvarIn_anno, by=c("Transcript","Position"), all.x=T)
annotated_cluster_pass$Start[annotated_cluster_pass$Alternate == "ptm"] = gsub("chr.*g.([0-9]*)_.*","\\1",annotated_cluster_pass$GenomicPosition[annotated_cluster_pass$Alternate == "ptm"])
annotated_cluster_pass$Stop[annotated_cluster_pass$Alternate == "ptm"] = gsub("chr.*g.([0-9]*)_([0-9]*)/.*","\\2",annotated_cluster_pass$GenomicPosition[annotated_cluster_pass$Alternate == "ptm"])
transvar_f = "/Users/khuang/Box\ Sync/Ding_Lab/Projects_Current/hotpho_data/output/annotated_cluster_h_PTM_transvarOut.txt"
transvar = read.table(header=T, quote = "", sep="\t", stringsAsFactors = F, fill =T, file = transvar_f)
transvar$Transcript = gsub(" .*","",transvar$transcript)
transvar$Mutation_Gene = gsub(".*/(p.[A-Z][0-9]+)","\\1",transvar$coordinates.gDNA.cDNA.protein.)
transvar_anno = transvar[grep("p.",transvar$Mutation_Gene),c("input","Transcript","Mutation_Gene")]
transvar_anno$Start = gsub(".*:g.([0-9]+)_.*","\\1",transvar_anno$input)
cluster = "10394.1"
for (cluster in annotated_cluster_pass$Cluster){
annotated_cluster_pass_c = annotated_cluster_pass[annotated_cluster_pass$Cluster == cluster,]
if (annotated_cluster_pass_c$Type != "Site_Only"){
if (length(unique(annotated_cluster_pass_c$Transcript))>1){
mutTranscript = annotated_cluster_pass_c$Transcript[annotated_cluster_pass_c$Alternate!="ptm"][1]
PTMs = annotated_cluster_pass_c$Mutation_Gene[annotated_cluster_pass_c$Alternate=="ptm"]
for (PTM in PTMs){
if (annotated_cluster_pass_c$Transcript[annotated_cluster_pass_c$Mutation_Gene == PTM] != mutTranscript){
updatedSite = gsub("(p.[A-Z][0-9]+).*","\\1",transvar_anno$Mutation_Gene[transvar_anno$Start == annotated_cluster_pass_c$Start[annotated_cluster_pass_c$Mutation_Gene == PTM] &
transvar_anno$Transcript == mutTranscript])
if (length(updatedSite>0)){
annotated_cluster_pass$Transcript[annotated_cluster_pass$Cluster == cluster & annotated_cluster_pass$Mutation_Gene == PTM] = mutTranscript
annotated_cluster_pass$Mutation_Gene[annotated_cluster_pass$Cluster == cluster & annotated_cluster_pass$Mutation_Gene == PTM] = updatedSite
}
}
}
}
}
}
annotated_cluster_pass$Position = gsub("p.[A-Z]([0-9]+).*","\\1",annotated_cluster_pass$Mutation_Gene)
write.table(annotated_cluster_pass, quote=F, sep="\t", file = "output/Data_201807_cc.p0.05.cluster_transcriptSynced.tsv", row.names = F)
|
1de839a9bf466032f642137efef43ea9e0f3ced4
|
4232c4b7969be3f92563f224ef675bccd09ad562
|
/cachematrix.R
|
19ce1d8007bd1317e8b23d115aa50146264cc87a
|
[] |
no_license
|
coursera4ashok/ProgrammingAssignment2
|
4bf393af784f26d245baa391fd4cf2d41eb84efb
|
624174c8e2d9f0cf6a331349ef15a54b8ce3ee7f
|
refs/heads/master
| 2021-01-15T14:12:55.086132
| 2014-09-20T05:22:24
| 2014-09-20T05:22:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,372
|
r
|
cachematrix.R
|
## Cache the expensive computation of inverse and retrieve
## whenever required
## Exposes three methods
## set -- set the value of matrix
## get -- returns the matrix
## setnv -- Cache the value of inv
## getnv -- retrieve the value of cached inverted matrix
## returns NULL if inverse is not cached
makeCacheMatrix <- function(x = matrix()) {
invMatrix <- NULL
set <- function(y) {
x <<- y
invMatrix <<- NULL
}
get <- function() x
setinv <- function(value) invMatrix <<- value
getinv <- function() invMatrix
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## A method to solve the cache method
## This method uses the cache if one is available.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
## test
m<-matrix(c(3,8,9,8,4,8,12,1,3),nrow=3,ncol=3,byrow=TRUE)
# Cache the vector
cm<-makeCacheMatrix(m);
m2<-cacheSolve(cm);
# just for testing solve m
m1<-solve(m)
# print m1
m1
# print m2
m2
# check if the values are the same
m1==m2
# this should print 9 (3x3) TRUE
|
74295139c2d3bd3c70f425250d3380d0933b3116
|
67666f9118dd403b558cede8e21b53520f7bddbe
|
/man/pop_data.Rd
|
349a2d731783a9f3ba33d4362b70d6dc978a94f1
|
[
"MIT"
] |
permissive
|
beckwang80/Tplyr
|
8b3d64e99ee646118432f96208b7e6b8fd0f76a1
|
d782ba9f0012d6ed66da1b3d949082dfd63cfa75
|
refs/heads/master
| 2023-09-05T01:56:24.220929
| 2021-10-13T13:08:26
| 2021-10-13T13:08:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,195
|
rd
|
pop_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/table_bindings.R
\name{pop_data}
\alias{pop_data}
\alias{pop_data<-}
\alias{set_pop_data}
\title{Return or set population data bindings}
\usage{
pop_data(table)
pop_data(x) <- value
set_pop_data(table, pop_data)
}
\arguments{
\item{table}{A \code{tplyr_table} object}
\item{x}{A \code{tplyr_table} object}
\item{value}{A data.frame with population level information}
\item{pop_data}{A data.frame with population level information}
}
\value{
For \code{tplyr_pop_data} the pop_data binding of the
\code{tplyr_table} object. For \code{tplyr_pop_data<-} nothing is returned,
the pop_data binding is set silently. For \code{set_tplyr_pop_data} the
modified object.
}
\description{
The population data is used to gather information that may not be available
from the target dataset. For example, missing treatment groups, population N
counts, and proper N counts for denominators will be provided through the
population dataset. The population dataset defaults to the target dataset
unless otherwise specified using \code{set_pop_data}.
}
\examples{
tab <- tplyr_table(iris, Species)
pop_data(tab) <- mtcars
}
|
d93dc10c6e6c44ad5b1f65d0738560fbfa5f5078
|
8633a50727b06e1f096c5c81fb12fafd1fb47232
|
/PREDICTION.R
|
523492fbd2fe0bce574d0e6ba62505c3637c22f4
|
[] |
no_license
|
oldhero5/Walmart-Comp
|
3b9317ca2977af5cf5dea46eace97ff84b65aaca
|
3c03a42ed3db2ea80c81b71ca4675ca6cbec0bd3
|
refs/heads/master
| 2020-04-20T13:46:42.508988
| 2019-02-02T21:11:28
| 2019-02-02T21:11:28
| 168,878,519
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,751
|
r
|
PREDICTION.R
|
##XGB DATA PREDICT
# Set WD
setwd("~/DATA/KAGGLE/Walmart")
#Librarys
library(reshape2)
library(data.table)
library(xgboost)
library(Rtsne)
library(caret)
library(ggplot2)
library(readr)
library(lubridate)
#Read Data
train <- read_csv("train.csv")
test <- read_csv("test.csv")
samsub <- read_csv("sample_submission.csv")
#Clean Data
train1 <- train
test1 <- test
train1[is.na(train1)] <- 0
test1[is.na(test1)] <- 0
train$Weekday<- make.names(train$Weekday)
train$Weekday <- as.factor(train$Weekday)
train$DepartmentDescription<- make.names(train$DepartmentDescription)
train$DepartmentDescription <- as.factor(train$DepartmentDescription)
train1$TripType <-paste0("TripType_",train1$TripType)
train1$TripType <- as.factor(train1$TripType)
train1$Monday<- as.numeric(train$Weekday == "Monday")
train1$Tuesday<- as.numeric(train$Weekday == "Tuesday")
train1$Wednesday<- as.numeric(train$Weekday == "Wednesday")
train1$Thursday<- as.numeric(train$Weekday == "Thursday")
train1$Friday<- as.numeric(train$Weekday == "Friday")
train1$Saturday<- as.numeric(train$Weekday == "Saturday")
train1$Sunday<- as.numeric(train$Weekday == "Sunday")
test1$Monday<- as.numeric(test$Weekday == "Monday")
test1$Tuesday<- as.numeric(test$Weekday == "Tuesday")
test1$Wednesday<- as.numeric(test$Weekday == "Wednesday")
test1$Thursday<- as.numeric(test$Weekday == "Thursday")
test1$Friday<- as.numeric(test$Weekday == "Friday")
test1$Saturday<- as.numeric(test$Weekday == "Saturday")
test1$Sunday<- as.numeric(test$Weekday == "Sunday")
test$Weekday<- make.names(test$Weekday)
test$Weekday <- as.factor(test$Weekday)
test$DepartmentDescription<- make.names(test$DepartmentDescription)
test$DepartmentDescription <- as.factor(test$DepartmentDescription)
train1 <- dcast(train1, VisitNumber + TripType + Monday + Tuesday + Wednesday + Thursday + Friday +Saturday +Sunday ~ DepartmentDescription,fun.aggregate = sum, value.var = "ScanCount")
test1 <- dcast(test1, VisitNumber + Monday + Tuesday + Wednesday + Thursday + Friday +Saturday +Sunday ~ DepartmentDescription,fun.aggregate = sum, value.var = "ScanCount")
# creates total items purchased
train1$TotalItems <- rowSums(train1[,c(5:73)])
test1$TotalItems <- rowSums(test1[,c(4:71)])
Finelinetrain <- dcast(train, VisitNumber + TripType ~ DepartmentDescription, fun.aggregate = function(x) length(unique(x)), value.var = "FinelineNumber")
Finelinetest <- dcast(test, VisitNumber ~ DepartmentDescription, fun.aggregate = function(x) length(unique(x)), value.var = "FinelineNumber")
Skutrain <- dcast(train, VisitNumber + TripType ~ DepartmentDescription, fun.aggregate = function(x) length(unique(x)), value.var = "Upc")
Skutest <- dcast(test, VisitNumber ~ DepartmentDescription, fun.aggregate = function(x) length(unique(x)), value.var = "Upc")
Deptstrain <- dcast(train, VisitNumber + TripType ~ DepartmentDescription, fun.aggregate = function(x) length(unique(x)), value.var = "DepartmentDescription")
Deptstest <- dcast(test, VisitNumber ~ DepartmentDescription, fun.aggregate = function(x) length(unique(x)), value.var = "DepartmentDescription")
train1$TotalFLines <- rowSums(Finelinetrain[,c(3:71)])
test1$TotalFLines <- rowSums(Finelinetest[,c(2:69)])
train1$TotalSku<- rowSums(Skutrain[,c(3:71)])
test1$TotalSku <- rowSums(Skutest[,c(2:69)])
train1$TotalDepts <- rowSums(Deptstrain[,c(3:71)])
test1$TotalDepts <- rowSums(Deptstest[,c(2:69)])
#remove targets
target.org <- train1$TripType
target <- target.org
levels(target)
num.class <- length(levels(target))
levels(target) <- 1:num.class
train1$TripType <- NULL
train1$VisitNumber <- NULL
#convert to matrix
train.mat <- as.matrix(train1)
colnames(train.mat) <- NULL
test.mat <- as.matrix(test1)
mode(train.mat) <- "numeric"
colnames(test.mat)<- NULL
mode(test.mat) <- "numeric"
train.mat <- train.mat[,-1]
train.mat <- train.mat[,-1]
test.mat <- test.mat[,-1]
y <- as.matrix(as.integer(target)-1)
#k-fold cross validation with time
param <- list("objective" = "binary:logitraw", "eval_matric" = "merror","num_class" = num.class, "nthread" = 38, "max_depth" = 38, "eta" = 0.3,"gamma" = 0,
"subsample" = 10, "colsample_bytree" = 10, "min_child_weight" = 12)
set.seed(1234)
#kfold cross validation w/ timing
nround.cv = 200
system.time(bst.cv <- xgb.cv(params = param, data = train.mat, label = y, nfold = 4, nrounds = nround.cv, prediction = TRUE, verbose = FALSE))
tail(bst.cv$dt)
#index of minimum merror
min.merror.idx <- which.min(bst.cv$dt[,test.merror.mean])
min.merror.idx
#minimum error
bst.cv$dt[min.merror.idx,]
#get CV's prediction decoding
pred.cv = matrix(bst.cv$pred, nrow=length(bst.cv$pred)/num.class, ncol=num.class)
pred.cv = max.col(pred.cv, "last")
#real model fit training, with full data
system.time(bst<- xgboost(param = param, data = train.mat, label = y, nrounds = min.merror.idx, verbose = 0))
#xgboost predict using test matrix
pred <- predict(bst, test.mat)
#deprocess
pred<- matrix(pred, nrow = num.class, ncol = length(pred)/num.class)
pred<- t(pred)
pred<- max.col(pred, "last")
#get trained model
model <- xgb.dump(bst, with.stats = TRUE)
#get real names
names <- dimnames(train1)[[2]]
#feature importance
importance_matrix <- xgb.importance(names, model = bst)
#plot feature importance
gp <- xgb.plot.importance(importance_matrix)
print(gp)
#Write file
f <- colnames(samsub)
f <- f[2:39]
f <- gsub("\\TripType_", "", f)
f <- as.integer(f)
G <- outer(pred,f, function(x,y) x==y)
G <- G*1
G <- as.data.table(G)
a <- samsub$VisitNumber
G <- cbind(a,G)
d <- colnames(samsub)
colnames(G) <- d
write_csv(G,"solution1.csv")
|
e5695ef109d713f8e449a8c8743d0aff8b680696
|
a1ccd4fdc43c395f50d560e475b39177c5bd467f
|
/man/glioGSC.Rd
|
e833ef7689eda3ce84b19c358523c34d84dffc25
|
[] |
no_license
|
vjcitn/ivygapSEOLD
|
67b6cbb66e042c2283be79861b20c3d96f3a7b7d
|
6c0bd004c9e1e14a2837fb35fdf588d9947c1956
|
refs/heads/master
| 2021-07-16T15:30:11.951581
| 2017-10-24T18:41:05
| 2017-10-24T18:41:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 442
|
rd
|
glioGSC.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{glioGSC}
\alias{glioGSC}
\title{msigdb: 47 gene sets related to glioblastoma by 'Search Gene Sets' at msigdb}
\format{GeneSetCollection}
\usage{
glioGSC
}
\description{
msigdb: 47 gene sets related to glioblastoma by 'Search Gene Sets' at msigdb
}
\note{
Retrieved 20 Oct 2017, and imported with getGmt of GSEABase
}
\keyword{datasets}
|
f2de7969e3a988b39e05d9fce0742e73db6f3c46
|
33021203bc03720616f604399d3f34bbfd06064b
|
/tests/testthat/test-with_any_case.R
|
d50989e4d3bf20aaa4f931ad18fc45df12345958
|
[
"MIT"
] |
permissive
|
mervynakash/RVerbalExpressions
|
102d0b21efe68056eb532254d6944b7ae218a5c8
|
5a1da4057e624ac1cefb559a82936f1aa43e7afa
|
refs/heads/master
| 2020-04-28T05:49:15.660582
| 2019-03-11T15:28:49
| 2019-03-11T15:28:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 288
|
r
|
test-with_any_case.R
|
context("test-rx_with_any_case")
test_that("with_any_case modifier works", {
# expect match
expect_true(grepl(rx_find(value = "ABC") %>% rx_with_any_case(), "abc"))
# dont expect match
expect_false(grepl(rx_find(value = "ABC") %>% rx_with_any_case(enable = FALSE), "abc"))
})
|
a14b76a193b80f61bcf66cf89cd851cc0ce86108
|
d5996e1500aa7af65bb67ffad0e5cb618ca964d4
|
/principal.R
|
52f4cc3190bb722d5f6b750b3b971b52942660c9
|
[] |
no_license
|
fcen-amateur/practica4-modelo-lineal
|
1a4ae8f160036dab9985e219edf09eb4babbb233
|
b20e930bc651abe3a6644dabe2139759cda01c07
|
refs/heads/master
| 2022-01-27T03:56:30.673891
| 2019-06-05T23:03:42
| 2019-06-05T23:03:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,039
|
r
|
principal.R
|
library('tidyverse')
library('stats')
library('future')
library('furrr')
set.seed(42)
#setwd("/mnt/Datos/")
# Coeficientes "platonicos" (i.e., del proceso generador de datos)
beta_pgd <- c(4, 2, -3, 0.5, 0)
# Funciones generadoras de x_i
generadores_x <- list(
"x1" = function(n) { runif(n, min=-5, max=5) },
"x2" = function(n) { runif(n, min=-5, max=5) },
"x3" = function(n) { runif(n, min=-5, max=5) },
"x4" = function(n) { runif(n, min=-5, max=5) }
)
generadores_eps <- list(
"normal" = function(n) { rnorm(n) },
"exponencial" = function(n) { rexp(n, rate = 1/2) - 2 },
"lognormal" = function(n) { exp(rnorm(n) - exp(0.5)) },
"uniforme" = function(n) { runif(n, -3, 3) },
"chi_cuadrado" = function(n) { rchisq(n, 3) - 3 },
"student1" = function(n) { rt(n, 1) },
"student3" = function(n) { rt(n, 3) }
)
generador_y <- function(x1, x2, x3, x4, beta_pgd, eps, ...) {
c(1, x1, x2, x3, x4) %*% beta_pgd + eps
}
generar_muestra <- function(n, generadores_x, generador_eps, beta_pgd) {
# Tibble vacio
df <- tibble(.rows = n)
# Genero variables regresoras y errores
for (nombre in names(generadores_x)) {
if (nombre != "y") {
df[nombre] <- generadores_x[[nombre]](n)
}
df$eps <- generador_eps(n)
}
# Genero y
df["y"] <- pmap_dbl(df, generador_y, beta_pgd=beta_pgd)
return(df)
}
intervalo_conf <- function(a_vec, llamada_lm, alfa, metodo = "exacto") {
betahat <- llamada_lm$coefficients
# Matriz de covarianza estimada para los coeficientes
Sigmahat <- vcov(llamada_lm)
n_muestra <- nrow(llamada_lm$model)
r <- llamada_lm$rank
# Cualculo cuantil t o z, segun corresponda
if (metodo == "exacto") {
cuantil <- qt(p = 1 - alfa/2, df = n_muestra - r)
} else if (metodo == "asintotico") {
cuantil <- qnorm(p = 1 - alfa/2)
} else {
stop("Los unicos metodos soportados son 'exacto' y 'asintotico'")
}
centro <- t(a_vec)%*%betahat
delta <- cuantil * sqrt(t(a_vec) %*% Sigmahat %*% a_vec)
return(c(centro - delta, centro + delta))
}
cubre <- function(intervalo, valor) { intervalo[1] <= valor & intervalo[2] >= valor}
ayudante_generar_muestra <- function(distr_eps, generadores_x, beta_pgd, n) {
generar_muestra(n,generadores_x, generadores_eps[[distr_eps]],beta_pgd=beta_pgd)
}
#Pasamos a modo multihilo porque se vienen cálculos feos
future.globals.maxSize = '+inf'
#n_muestrales <- c(10, 25, 100)
n_muestrales <- c(10, 25, 100, 250, 500, 1000, 1500, 2000, 3000)
## max_n_muestral <- max(n_muestrales)
## n_sims <- 1000
## muestras_maestras <- crossing(
## n_sim = seq(max_n_muestral),
## distr_eps = names(generadores_eps)) %>%
## mutate(
## muestra = future_map(.progress=TRUE,
## distr_eps,
## ayudante_generar_muestra,
## generadores_x = generadores_x,
## beta_pgd = beta_pgd,
## n = max_n_muestral)
## )
#muestras_maestras %>% write_rds("muestras_maestras.Rds")
muestras_maestras <- read_rds("muestras_maestras.Rds")
# El '-3' es poco legible, buscar cómo sustraer una columna por nombre.
muestras_puntuales <- muestras_maestras[-3] %>%
crossing(
n = n_muestrales
)
muestras_puntuales %>% write_rds("muestras_puntuales.Rds")
## ayudante_intervalo_conf <- function(fun_a, llamada_lm, met_int, alfa) {
## intervalo_conf(a_vec = funciones_a[[fun_a]], llamada_lm, metodo = met_int, alfa)
## }
#ayudante recibe el número de simulación, n y la distribución de epsilon. En base a eso elabora un intervalo con el método met_int de nivel 1- alfa.
ayudante_intervalo_conf <- function(n_simulacion, distr_epsilon, n, fun_a, met_int, alfa) {
muestra_a_evaluar <- (muestras_maestras %>% filter(n_sim==n_simulacion,distr_eps==distr_epsilon))[[1,'muestra']] %>% head(n)
modelo <- lm(y ~ x1 + x2 + x3 +x4,data=muestra_a_evaluar)
intervalo_conf(a_vec = funciones_a[[fun_a]], llamada_lm=modelo, alfa=alfa, metodo = met_int)
}
# Combinaciones lineales de beta_pgd a estimar (matriz A q*p de la teoría general).
funciones_a <- list(
beta1 = c(0, 1, 0, 0, 0),
beta4 = c(0, 0, 0, 0, 1)
)
metodos_intervalo <- c("asintotico", "exacto")
alfa <- 0.1
## intervalos <- muestras_puntuales %>%
## crossing(
## fun_a = names(funciones_a),
## met_int = metodos_intervalo) %>%
## mutate(
## #atbeta es el valor del parámetro en el PGD.
## atbeta = map_dbl(fun_a, function(i) funciones_a[[i]] %*% beta_pgd),
## ic = future_pmap( .progress = TRUE,
## list(n_sim, distr_eps, n, fun_a, met_int),
## ayudante_intervalo_conf,
## alfa = alfa),
## cubre = map2_lgl(ic, atbeta, cubre),
## ic_low = map_dbl(ic, 1),
## ic_upp = map_dbl(ic, 2)
## )
# Guardamos la simulación
#intervalos %>% write_rds("simulacion.Rds")
intervalos <- read_rds("simulacion.csv")
# Esto lo hice para probar que diera algo. Y da!
sintesis <- intervalos %>%
group_by(distr_eps, n, met_int, fun_a) %>%
summarise(prop_cubre = mean(cubre)) %>% write_rds("sintesis-resultados.csv")
|
a398d0a3b0e6addafad43d94a42e1cd3f6ce3787
|
0fbfb9298e862d65bd4e076c9cc06198763b1a84
|
/Figure 1 Step5.R
|
e0b7e42fae3b608b913cd3e02043e127af004c4f
|
[
"MIT"
] |
permissive
|
kozlama/Sayed-Kodama-Fan-et-al-2021
|
18dd7475523515cd9baf164d27d301da3bcef280
|
332532c147f826760c9fb24cb2b2cf969aef5e20
|
refs/heads/main
| 2023-07-19T15:47:07.310109
| 2021-09-09T21:32:52
| 2021-09-09T21:32:52
| 404,477,614
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,108
|
r
|
Figure 1 Step5.R
|
###############################################################################################
# Pre-processing for data used to generate Figure 1 from Sayed, Kodama, Fan, et al. 2021
# Total of 46 human AD R47H vs CV samples
# This script is: STEP 5 of 5 - Generation of Figure panels as below
# Adapted from https://satijalab.org/seurat/v3.1/pbmc3k_tutorial.html
# by Li Fan
###############################################################################################
library(Seurat)
library(dplyr)
library(ggplot2)
library(cowplot)
library(reshape2)
library(MAST)
setwd("/athena/ganlab/scratch/lif4001/Human_AD_Mayo_UPenn/data_analysis/integration")
R47H_all_integrated <- readRDS("R47H_all_integrated_Annotation.rds")
R47H_all_integrated$TREM2.Sex <- paste(R47H_all_integrated$TREM2, R47H_all_integrated$Sex, sep = "_")
# Define an order of cluster identities
my_levels <- c("WT_F","R47H_F","WT_M","R47H_M")
my_levels_celltype <- c("astrocytes","excitatory neurons","inhibitory neurons","microglia","oligodendrocytes","OPCs","endothelia cells")
# Relevel object@ident
R47H_all_integrated$TREM2.Sex <- factor(x = R47H_all_integrated$TREM2.Sex, levels = my_levels)
R47H_all_integrated$celltype <- factor(x = R47H_all_integrated$celltype, levels = my_levels_celltype)
setwd("/athena/ganlab/scratch/lif4001/Human_AD_Mayo_UPenn/data_analysis/integration/Figures")
DefaultAssay(R47H_all_integrated) <- 'RNA'
### Fig.1B
pdf("R47H_all_integrated_umap_test_1.pdf", width=6.5, height=4)
DimPlot(R47H_all_integrated, reduction = 'umap', label = F, cols = c("#00B6EB","#F8766D","#C49A00","#00C094","#A58AFF","#006838","#FB61D7"))
dev.off()
### Fig.S1 E, F, G
pdf("R47H_all_integrated_QC.pdf", width=12, height=12)
Idents(R47H_all_integrated) <- "orig.ident"
VlnPlot(object = R47H_all_integrated, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 1, pt.size=0, idents=NULL)
dev.off()
### Fig.S1 H, I
pdf("R47H_all_integrated_FeatureScatter.pdf", width=10, height=5)
FeatureScatter(object = R47H_all_integrated, feature1 = "nCount_RNA", feature2 = "percent.mt", group.by = "orig.ident", pt.size=0.1)
FeatureScatter(object = R47H_all_integrated, feature1 = "nCount_RNA", feature2 = "nFeature_RNA", group.by = "orig.ident", pt.size=0.1)
dev.off()
# Fig. 1C: calculate ratio of each genotype in each cell type cluster
a<-as.data.frame(table(R47H_all_integrated$TREM2.Sex,R47H_all_integrated$celltype))
colnames(a)<-c("clusters","cell.type","cell.no")
agg<-aggregate(cell.no~clusters,a,sum)
a$cluster.total <- agg$cell.no[match(a$clusters,agg$clusters)]
a$ratio<-a$cell.no/a$cluster.total
ggplot(a,aes(x=clusters, y=ratio, fill=cell.type))+
geom_bar(stat="identity")+
theme_classic()+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
xlab("Genotype")+
ylab("Cell type ratio per genotype") + RotatedAxis()
ggsave("genotype_celltype_distribution_1.pdf",plot=last_plot(),path="/athena/ganlab/scratch/lif4001/Human_AD_Mayo_UPenn/data_analysis/integration/Figures",
width=8,height=8,units="in")
### Fig.S1J
pdf("R47H_all_integrated_Annotation.pdf", width=10, height=6)
FeaturePlot(R47H_all_integrated, features = c("FLT1","CLDN5","EBF1","GAD1","GAD2","PDGFRA","VCAN","CD74","C3","CSF1R","SLC17A7","CAMK2A","NRGN", "AQP4", "GFAP",
"PLP1","MBP","MOBP"))
dev.off()
# Fig. S1K: calculate ratio of each cell type in each sample
a<-as.data.frame(table(R47H_all_integrated$orig.ident,R47H_all_integrated$celltype))
colnames(a)<-c("clusters","cell.type","cell.no")
agg<-aggregate(cell.no~clusters,a,sum)
a$cluster.total <- agg$cell.no[match(a$clusters,agg$clusters)]
a$ratio<-a$cell.no/a$cluster.total
ggplot(a,aes(x=clusters, y=ratio, fill=cell.type))+
geom_bar(stat="identity")+
theme_classic()+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
xlab("Sample")+
ylab("Cell type ratio per sample") + RotatedAxis()
ggsave("sample_celltype_distribution_1.pdf",plot=last_plot(),path="/athena/ganlab/scratch/lif4001/Human_AD_Mayo_UPenn/data_analysis/integration/Figures",
width=10,height=4,units="in")
|
006e67c8f01d54b8c7c1b7424e53cc173f33290c
|
d22fff7e355f2ae52033dc40eabc2c0c3b087b6f
|
/MechaCarChallenge.R
|
140385361bb0ff32b5a82ff16840f32f5cdbbcfe
|
[] |
no_license
|
arielzzq/Module15_MechaCar_Statistical_Analysis
|
f141549578be9116ed32eee1ef6ca33847be19c9
|
05d747a3d09ca3c3d9c60b0e9e0b76386f260f53
|
refs/heads/main
| 2023-04-08T21:55:26.590735
| 2021-04-12T01:24:44
| 2021-04-12T01:24:44
| 356,971,861
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,046
|
r
|
MechaCarChallenge.R
|
#deliverable 1
library(tidyverse)
mpg_table <- read.csv(file = "mechaCar_mpg.csv", check.names = F, stringsAsFactors = F)
lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD, data = mpg_table)
summary(lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD, data = mpg_table))
#deliverable 2
sc_table <- read.csv(file = "Suspension_Coil.csv", check.names = F, stringsAsFactors = F)
total_summary <- summarize(sc_table, mean = mean(PSI), median = median(PSI), variance = var(PSI), SD = sd(PSI))
lot_summary <- sc_table %>% group_by(Manufacturing_Lot) %>% summarize(mean = mean(PSI), median = median(PSI), variance = var(PSI), SD = sd(PSI),.groups = "keep")
#deliverable 3
t.test(sc_table$PSI, mu = 1500)
lot1_table <- subset(sc_table, Manufacturing_Lot == "Lot1")
lot2_table <- subset(sc_table, Manufacturing_Lot == "Lot2")
lot3_table <- subset(sc_table, Manufacturing_Lot == "Lot3")
t.test(lot1_table$PSI, mu = 1500)
t.test(lot2_table$PSI, mu = 1500)
t.test(lot3_table$PSI, mu = 1500)
|
b2b76589c1c87984c5de8d3514a4190627fada53
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/B_analysts_sources_github/road2stat/MEF/get-ppi-sim.R
|
d8e7dfb994006033fa80425b227927213536c913
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,046
|
r
|
get-ppi-sim.R
|
# Multiple Evidence Fusion
#
# Compute PPI-based Drug-Drug Similarity
#
# The following functions require R packages "igraph", "foreach" and "doMC".
# The R data file pro-geneid.Rdata contains the Entrez Gene IDs for the
# drug targets (retrieved from UniProt).
#
# This task was completed using the high-performance computing server of
# CBDD Group, Central South University. It took about 2 days to complete
# with 64 parallel tasks.
#
# Author: Nan Xiao <me@nanx.me>
#
# Date: Sep 14, 2013
library('igraph')
library('foreach')
library('doMC')
registerDoMC(64)
load('pro-geneid.Rdata') # uniprot id to gene id mapping
biogrid = read.table(gzfile('BIOGRID-ORGANISM-Homo_sapiens-3.2.104.tab2.tar.gz'),
sep = '\t', header = FALSE, fill = TRUE,
stringsAsFactors = FALSE, quote = '')
biogrid = biogrid[, 2:3]
biogrid[, 1] = as.character(biogrid[, 1])
biogrid[, 2] = as.character(biogrid[, 2])
g = graph.data.frame(biogrid, directed = FALSE)
nodes = unique(as.vector(as.matrix(biogrid)))
# A * e ^ -(|P1 - P2|)
# |P1 - P2| = shortest path
A = 0.9 * exp(1)
#' Function to compute PPI-based similarity values between two protein
#' lists in parallel
#'
#' @param twoid length-2 integer vector specifying the indexes in a list,
#' whose components stored the Gene IDs of the proteins
#'
#' @return The similarity matrix between to protein sequence lists.
ppiSim = function (twoid) {
id1 = twoid[1]
id2 = twoid[2]
if (all(geneid[[id1]] == '') | all(geneid[[id2]] == '')) {
mat = matrix(0)
} else {
mat = matrix(0L, nrow = length(geneid[[id1]]), ncol = length(geneid[[id2]]))
for ( i in 1:length(geneid[[id1]]) ) {
for ( j in 1:length(geneid[[id2]]) ) {
gid1 = as.character(geneid[[id1]][i])
gid2 = as.character(geneid[[id2]][j])
if (gid1 == gid2) {
mat[i, j] = 1
} else if ( (gid1 %in% nodes) & (gid2 %in% nodes) ) {
spath = length(get.shortest.paths(g, from = gid1, to = gid2,
output = 'epath')[[1]])
mat[i, j] = A * ( exp(1)^(-spath) )
} else {
mat[i, j] = 0
}
}
}
}
return(mat)
}
# generate lower matrix index
idx = combn(1:length(geneid), 2)
# then use foreach parallelization
# input is all pair combination
ppisimlist = vector('list', ncol(idx))
ppisimlist <- foreach (i = 1:ncol(idx), .errorhandling = 'pass') %dopar% {
xxx <- ppiSim(rev(idx[, i]))
}
ppisimAvgtotal = sapply(ppisimlist, mean)
# convert list to matrix
ppisimmatAvgtotal = matrix(0, length(geneid), length(geneid))
for (i in 1:length(ppisimlist)) ppisimmatAvgtotal[idx[2, i], idx[1, i]] = ppisimAvgtotal[i]
ppisimmatAvgtotal[upper.tri(ppisimmatAvgtotal)] = t(ppisimmatAvgtotal)[upper.tri(t(ppisimmatAvgtotal))]
diag(ppisimmatAvgtotal) = 1
ppisimmatAvgtotal6digit = format(round(ppisimmatAvgtotal, 6), nsmall = 6)
write.table(ppisimmatAvgtotal6digit, 'ppisimmat.txt', sep = '\t',
quote = FALSE, row.names = FALSE, col.names = FALSE)
|
1ebfd741e5df2ab1331023017a0fbb06c0b1e18e
|
08fe4d36c08faa0744775b909bfb1ebc385aead6
|
/man/cite_datasource.Rd
|
f27fe731808733a52fb0298a047393301a99db1a
|
[
"MIT"
] |
permissive
|
ManuelPopp/BiomassEST
|
3682820e7f4b0680bed7e89619fce0d4df0df406
|
9019505d7d8fbfb06353606685e6154e07d01a57
|
refs/heads/main
| 2023-06-01T01:54:59.543702
| 2021-06-16T15:55:41
| 2021-06-16T15:55:41
| 373,981,193
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,138
|
rd
|
cite_datasource.Rd
|
\name{cite.datasource}
\alias{cite.datasource}
\title{Cite original source of parameter values used for biomass calculations}
\usage{
cite.datasource(Species = NA, Parameter = "RCD", Author = NA, Bibtex = TRUE)
}
\description{
Prints out the citation of the original paper from which parameters for the calculation of biomass values were obtained. Input is either a species name, in which case the database is searched for the citation of the corresponding values, or the family name of an author.
}
\arguments{
\item{Species}{
A species name, e.g, "Abies alba", for which the source of the corresponding database entry is to be printed.
}
\item{Author}{
Last name of the first author of a publication that was used as data source.
}
\item{Parameter}{
One of the methods that can be chosen in the functions of this package. Used in case various methods published in different publications could be used for estimating the biomass of a specific tree species.
}
\item{Bibtex}{
If TRUE, the output will be generated in form of a BibTeX entry.
}
}
\examples{
cite.datasource(Species = "Abies alba")
cite.datasource(Author = "Anninghoefer")
}
|
342acf0bece3fa954041cf06406ab3e1bbf05e8e
|
8036066874c5ff987566482971c36ad4e3f41551
|
/fmri/keuka_brain_behavior_analyses/dan/dan_decon_rt_prediction_streams.R
|
aa4cc234d5bc5df4f9ed1e7a5eab06615f5212b2
|
[] |
no_license
|
UNCDEPENdLab/clock_analysis
|
171ec5903fc00e8b0f6037f1b0a76b3f77558975
|
5aeb278aaf8278cebefa3f4b11360a9b3d5c364f
|
refs/heads/master
| 2023-06-22T07:18:03.542805
| 2023-06-19T18:31:40
| 2023-06-19T18:31:40
| 22,355,039
| 1
| 2
| null | 2021-04-20T20:07:08
| 2014-07-28T19:29:11
|
HTML
|
UTF-8
|
R
| false
| false
| 14,097
|
r
|
dan_decon_rt_prediction_streams.R
|
# with 'decode = T' makes MEDUSA decoding plots for Fig. 4 E-G.
# loops over decoding and RT prediction multi-level models for various regions and time points
# first run medusa_event_locked_lmer.R
library(modelr)
library(tidyverse)
library(lme4)
library(afex)
library(broom)
library(broom.mixed) #plays will with afex p-values in lmer wrapper
library(ggpubr)
library(car)
library(viridis)
library(psych)
library(corrplot)
library(foreach)
library(doParallel)
library(readxl)
repo_directory <- "~/code/clock_analysis"
# data & options ----
# data loading options
reprocess = F # otherwise load data from cache
if (!reprocess) {
wide_only = F # only load wide data (parcels and timepoints as variables)
tall_only = F
}
replicate_compression = F
if(replicate_compression) {reprocess = T}
# what to run
plots = T
# CAN RUN BOTH AT ONCE:
decode = T # main analysis analogous to Fig. 4 E-G in NComm 2020
rt_predict = T # predicts next response based on signal and behavioral variables
# PICK ONE AT A TIME:
online = F # whether to analyze clock-aligned ("online") or RT-aligned ("offline") responses
streams = T # whether models are run on parcels within levels of visuomotor gradient (F) or on parcels within streams (T)
visuomotor = F
exclude_first_run = T
reg_diagnostics = F
# load MEDUSA deconvolved data
source(file.path(repo_directory, "fmri/keuka_brain_behavior_analyses/dan/load_medusa_data_dan.R"))
setwd('~/code/clock_analysis/fmri/keuka_brain_behavior_analyses/')
# read in behavioral data
cache_dir <- "~/Box/SCEPTIC_fMRI/dan_medusa/cache/"
repo_dir <- "~/code/clock_analysis"
load(file.path(repo_dir, '/fmri/keuka_brain_behavior_analyses/trial_df_and_vh_pe_clusters_u.Rdata'))
# select relevant columns for compactness
df <- df %>% select(id, run, run_trial, rewFunc,emotion, last_outcome, rt_csv, score_csv, rt_next, pe_max, rt_vmax, rt_vmax_lag,
rt_vmax_change, v_max_wi, v_entropy_wi, v_entropy_b, v_entropy, v_max_b, u_chosen_quantile, u_chosen_quantile_lag, u_chosen_quantile_change,
rt_vmax_lag_sc, rt_lag_sc,rt_lag2_sc, rt_csv_sc, trial_neg_inv_sc, Age, Female, kld3, kld4) %>% group_by(id, run) %>% arrange(id, run, run_trial) %>%
mutate(rt_next = lead(rt_csv_sc),
rt_change = rt_next - rt_csv_sc,
rt_vmax_lead = lead(rt_vmax),
rt_vmax_change_next = rt_vmax_lead - rt_vmax,
v_entropy_wi_lead = lead(v_entropy_wi),
v_entropy_wi_change = v_entropy_wi_lead-v_entropy_wi,
v_entropy_wi_change_lag = lag(v_entropy_wi_change),
u_chosen_quantile_next = lead(u_chosen_quantile),
u_chosen_quantile_change_next = lead(u_chosen_quantile_change),
kld3_lead = lead(kld3),
kld3_lag = lag(kld3),
outcome = case_when(
score_csv>0 ~ 'Reward',
score_csv==0 ~ "Omission"),
abs_pe = abs(pe_max),
abs_pe_lag = lag(abs_pe)
) %>% ungroup()
if (online) {
if (streams) {
d <- merge(df, clock_streams, by = c("id", "run", "run_trial"))
} else {
d <- merge(df, clock_visuomotor, by = c("id", "run", "run_trial"))
}
} else {if (streams) {
d <- merge(df, rt_streams, by = c("id", "run", "run_trial"))
} else {
d <- merge(df, rt_visuomotor, by = c("id", "run", "run_trial"))
}
}
units <- names(d[grepl("_\\d|-\\d", names(d))])
# summarize by stream (for RT prediction)
message("Summarizing by stream")
dstreams <- d %>%
group_by(id, run, run_trial, ) %>%
summarise_at(.vars = units, .funs = mean, na.rm = T) %>% ungroup() %>% merge(df)
# remove first run
if (exclude_first_run) {
d <- d %>% filter(run>1)
}
# diagnose regressor multicollinearity
if (reg_diagnostics) {
regs <- d %>% select(rt_csv, rt_lag_sc, rt_vmax, rt_vmax_lag, rt_vmax_change, v_entropy_wi, v_entropy_wi_change, v_max_wi,
kld3, kld3_lag, abs_pe, score_csv, trial_neg_inv_sc)
cormat <- corr.test(regs)
corrplot(cormat$r, cl.lim=c(-1,1),
method = "circle", tl.cex = 1.5, type = "upper", tl.col = 'black',
order = "hclust", diag = FALSE,
addCoef.col="black", addCoefasPercent = FALSE,
p.mat = cormat$p, sig.level=0.05, insig = "blank")
}
scale2 <- function(x, na.rm = FALSE) (x - mean(x, na.rm = na.rm)) / sd(x, na.rm)
# scale decon across subjects as a predictor
# choice uncertainty prediction analyses run on scaled 'ds' dataframe instead of 'd'
# ds <- d %>% mutate_at(vars(starts_with("dan")), scale2, na.rm = TRUE) %>% ungroup()
## "Decoding" ----
# combined right and left hippocampus with side as a predictor
# if model does not converge, update with new starting values (not needed here)
# labels <- names(d[grepl("_R_|_r_|_L_|_l_", names(d))])
# make cluster ----
f <- Sys.getenv('PBS_NODEFILE')
library(parallel)
ncores <- detectCores()
nodelist <- if (nzchar(f)) readLines(f) else rep('localhost', ncores)
cat("Node list allocated to this job\n")
print(nodelist)
cl <- makePSOCKcluster(nodelist, outfile='')
print(cl) ##; print(unclass(cl))
registerDoParallel(cl)
# loop over sensors ----
pb <- txtProgressBar(0, max = length(units), style = 3)
# test
# labels <- labels[1:2]
if(decode) {
message("\nDecoding: analyzing parcel data")
ddf <- foreach(i = 1:length(units), .packages=c("lme4", "tidyverse", "broom.mixed", "car"),
.combine='rbind', .noexport = c("clock_wide", "clock_wide_cens", "rt_wide", "clock_streams", "clock_visuomotor", "rt_streams", "rt_visuomotor")) %dopar% {
# message(paste("Analyzing timepoint", t, sep = " "))
if (i %% 2 == 0) {setTxtProgressBar(pb, i)}
# for (unit in units) {
unit <- as.character(units[i])
d$h <- as.numeric(d[[unit]])
s <- d[!is.na(d$h),]
if (online) {
md <- lmerTest::lmer(h ~ trial_neg_inv_sc + rt_csv_sc + rt_lag_sc + scale(rt_vmax_lag) + scale(rt_vmax_change) +
v_entropy_wi + v_entropy_wi_change +
kld3_lag + v_max_wi + scale(abs_pe_lag) + last_outcome +
(1|id) + (1|label), s, control=lmerControl(optimizer = "nloptwrap"))
} else {
md <- lmerTest::lmer(h ~ trial_neg_inv_sc + rt_csv_sc + rt_lag_sc + scale(rt_vmax_lag) + scale(rt_vmax_change) +
v_entropy_wi + v_entropy_wi_change +
kld3_lag + v_max_wi + scale(abs_pe) + outcome +
(1|id) + (1|label), s, control=lmerControl(optimizer = "nloptwrap")) }
while (any(grepl("failed to converge", md@optinfo$conv$lme4$messages) )) {
print(md@optinfo$conv$lme4$conv)
ss <- getME(md,c("theta","fixef"))
md <- update(md, start=ss)}
dm <- tidy(md)
dm$unit <- unit
dm$t <- gsub(".*_", "\\1", unit)
dm}
# FDR correction ----
message("\nFDR correction")
ddf <- ddf %>% mutate(stat_order = as.factor(case_when(abs(statistic) < 2 ~ '1',
abs(statistic) > 2 & abs(statistic) < 3 ~ '2',
abs(statistic) > 3 ~ '3')),
p_value = as.factor(case_when(`p.value` > .05 ~ '1',
`p.value` < .05 & `p.value` > .01 ~ '2',
`p.value` < .01 & `p.value` > .001 ~ '3',
`p.value` <.001 ~ '4')))
ddf$t <- as.numeric(ddf$t)
ddf$unit <- as.factor(sub("_[^_]+$", "", ddf$unit))
ddf$stat_order <- factor(ddf$stat_order, labels = c("NS", "|t| > 2", "|t| > 3"))
ddf$p_value <- factor(ddf$p_value, labels = c("NS", "p < .05", "p < .01", "p < .001"))
terms <- unique(ddf$term[ddf$effect=="fixed"])
ddf <- ddf %>% group_by(term) %>% mutate(p_fdr = p.adjust(p.value, method = 'fdr'),
p_level_fdr = as.factor(case_when(
# p_fdr > .1 ~ '0',
# p_fdr < .1 & p_fdr > .05 ~ '1',
p_fdr > .05 ~ '1',
p_fdr < .05 & p_fdr > .01 ~ '2',
p_fdr < .01 & p_fdr > .001 ~ '3',
p_fdr <.001 ~ '4'))
) %>% ungroup() %>% mutate(side = substr(as.character(unit), nchar(as.character(unit)), nchar(as.character(unit))),
zone = substr(as.character(unit), 1, nchar(as.character(unit))-2))
ddf$p_level_fdr <- factor(ddf$p_level_fdr, levels = c('1', '2', '3', '4'), labels = c("NS","p < .05", "p < .01", "p < .001"))
ddf$`p, FDR-corrected` = ddf$p_level_fdr
# plots ----
message("\nSaving decoding results")
if (online) {
setwd('~/OneDrive/collected_letters/papers/sceptic_fmri/dan/plots/clock_decode')
if(streams) { decode_results_fname = "clock_decode_output_streams.Rdata"
} else {decode_results_fname = "clock_decode_output_visuomotor.Rdata"}
} else {setwd('~/OneDrive/collected_letters/papers/sceptic_fmri/dan/plots/rt_decode')
if(streams) { decode_results_fname = "rt_decode_output_streams.Rdata"
} else {decode_results_fname = "rt_decode_output_visuomotor.Rdata"}
}
save(file = decode_results_fname, ddf)
# gc()
}
## STOPPED HERE
## RT prediction ----
if(rt_predict) {
message("\nRT prediction: analyzing parcel data")
rdf <- foreach(i = 1:length(units), .packages=c("lme4", "tidyverse", "broom.mixed", "car"),
.combine='rbind', .noexport = c("clock_wide", "clock_wide_cens", "rt_wide", "clock_streams", "clock_visuomotor", "rt_streams", "rt_visuomotor", "d")) %dopar% {
# message(paste("Analyzing timepoint", t, sep = " "))
if (i %% 10 == 0) {setTxtProgressBar(pb, i)}
unit <- as.character(units[i])
dstreams$h <- as.numeric(dstreams[[unit]])
s <- dstreams[!is.na(dstreams$h),]
if (online) {
md <- lmerTest::lmer(scale(rt_next) ~ scale(h) * scale(rt_vmax) +
scale(h) * rt_csv_sc * last_outcome + scale(h) * rt_lag_sc +
(1|id), s, control=lmerControl(optimizer = "nloptwrap"))
} else {
md <- lmerTest::lmer(scale(rt_next) ~ scale(h) * rt_csv_sc * outcome + scale(h) * scale(rt_vmax) +
scale(h) * rt_lag_sc +
(1|id), s, control=lmerControl(optimizer = "nloptwrap"))}
while (any(grepl("failed to converge", md@optinfo$conv$lme4$messages) )) {
print(md@optinfo$conv$lme4$conv)
ss <- getME(md,c("theta","fixef"))
md <- update(md, start=ss)}
dm <- tidy(md)
dm$unit <- unit
dm$t <- gsub(".*_", "\\1", unit)
dm}
# FDR correction ----
message("\nFDR correction")
rdf <- rdf %>% mutate(stat_order = as.factor(case_when(abs(statistic) < 2 ~ '1',
abs(statistic) > 2 & abs(statistic) < 3 ~ '2',
abs(statistic) > 3 ~ '3')),
p_value = as.factor(case_when(`p.value` > .05 ~ '1',
`p.value` < .05 & `p.value` > .01 ~ '2',
`p.value` < .01 & `p.value` > .001 ~ '3',
`p.value` <.001 ~ '4')))
rdf$t <- as.numeric(rdf$t)
rdf$unit <- as.factor(sub("_[^_]+$", "", rdf$unit))
rdf$stat_order <- factor(rdf$stat_order, labels = c("NS", "|t| > 2", "|t| > 3"))
rdf$p_value <- factor(rdf$p_value, labels = c("NS", "p < .05", "p < .01", "p < .001"))
terms <- unique(rdf$term[rdf$effect=="fixed"])
terms <- terms[grepl("(h)",terms)]
rdf <- rdf %>% group_by(term) %>% mutate(p_fdr = p.adjust(p.value, method = 'fdr'),
p_level_fdr = as.factor(case_when(
# p_fdr > .1 ~ '0',
# p_fdr < .1 & p_fdr > .05 ~ '1',
p_fdr > .05 ~ '1',
p_fdr < .05 & p_fdr > .01 ~ '2',
p_fdr < .01 & p_fdr > .001 ~ '3',
p_fdr <.001 ~ '4'))
) %>% ungroup() %>% mutate(side = substr(as.character(unit), nchar(as.character(unit)), nchar(as.character(unit))),
region = substr(as.character(unit), 1, nchar(as.character(unit))-2))
rdf$p_level_fdr <- factor(rdf$p_level_fdr, levels = c('1', '2', '3', '4'), labels = c("NS","p < .05", "p < .01", "p < .001"))
rdf$`p, FDR-corrected` = rdf$p_level_fdr
# plots ----
message("\nSaving RT prediction results")
if (online) {
setwd('~/OneDrive/collected_letters/papers/sceptic_fmri/dan/plots/clock_rt')
if(streams) { rt_results_fname = "clock_rt_output_streams.Rdata"
} else {rt_results_fname = "clock_rt_output_visuomotor.Rdata"}
} else {setwd('~/OneDrive/collected_letters/papers/sceptic_fmri/dan/plots/rt_rt')
if(streams) { rt_results_fname = "rt_rt_output_streams.Rdata"
} else {rt_results_fname = "rt_rt_output_visuomotor.Rdata"}
save(file = rt_results_fname, rdf)
}
}
stopCluster(cl)
gc()
|
5d5e7f47939744e134e8fcbe687159841a08bb16
|
e706a7d76a4548173f1e09f957cfa262be330c7a
|
/1_population_dyn_fit/popdyn_det.R
|
afdc1cf788af0f76cd477bcde3d719c339d63769
|
[] |
no_license
|
dieraz/prov-theo
|
4dab0d9ae5b87e6109c6fd84996c832f25499da7
|
381fd281cf8f000b5aee9d9d62f124e9bcc0bcd7
|
refs/heads/main
| 2023-04-13T01:10:22.475014
| 2022-05-23T12:54:31
| 2022-05-23T12:54:31
| 369,817,986
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,136
|
r
|
popdyn_det.R
|
simulate <- function(theta,init.state,times) {
ode <-function(t,x,params){
N <- x[1]
with(as.list(params),{
dN <- b*N*(K-N)/K - mu*N
dx <- c(dN)
list(dx)
})
}
traj <- as.data.frame(lsoda(init.state, times, ode, theta))
return(traj)
}
rPointObs <- function(model.point, theta){
obs.point <- rpois(n=1, lambda=model.point[["N"]])
return(c(obs=obs.point))
}
dPointObs <- function(data.point, model.point, theta,log = FALSE){
return(dpois(x=data.point[["obs"]],lambda=model.point[["N"]],log=log))
}
dprior <- function(theta, log = FALSE) {
log.prior.b <- dunif(theta[["b"]], min = 0.5, max = 2, log = TRUE)
log.prior.K <- dunif(theta[["K"]], min = 25, max = 100, log = TRUE)
log.prior.mu <- dunif(theta[["mu"]], min = 1/52, max = 1/8, log = TRUE)
log.sum = log.prior.b + log.prior.K + log.prior.mu
return(ifelse(log, log.sum, exp(log.sum)))
}
name <- "Population dynamics model"
state.names <- c("N")
theta.names <- c("b","K","mu")
Popdyn_det <- fitmodel(name, state.names, theta.names,simulate, rPointObs, dprior,dPointObs)
|
6e49b88f20dc23dbceac93f75ec72f840e21d5c8
|
deca20f404aa14f95dbb266585e59ea264e12691
|
/IterativeAlgo/man/apply.inapplicables.Rd
|
fe9ac0c7d1970392e0c067e1391b84bf14c8906b
|
[] |
no_license
|
TGuillerme/Parsimony_Inapplicable
|
0cea924ffcff59b7cf985260c843553170e3f0c4
|
2710e3c89a9e7d4ee02e8c16b19ca168f99a036c
|
refs/heads/master
| 2021-01-10T15:04:34.638326
| 2016-11-24T16:14:06
| 2016-11-24T16:14:06
| 49,874,989
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,716
|
rd
|
apply.inapplicables.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apply.inapplicables.R
\name{apply.inapplicables}
\alias{apply.inapplicables}
\title{Apply inapplicable characters to a matrix.}
\usage{
apply.inapplicables(matrix, inapplicables, tree, invariant = FALSE,
verbose = FALSE, ...)
}
\arguments{
\item{matrix}{A discrete morphological matrix.}
\item{inapplicables}{Optional, a vector of characters inapplicability source (either \code{"character"} or \code{"clade"}; see details). The length of this vector must be at maximum half the total number of characters.}
\item{tree}{If any inapplicable source is \code{"clade"}, a tree from where to select the clades.}
\item{invariant}{Whether to allow invariant sites among the characters with inapplicable data. If \code{invariant = FALSE} the algorithm will try to remove such characters (if possible).}
\item{verbose}{Whether to be verbose or not.}
\item{...}{Any additional arguments.}
}
\description{
Apply inapplicable characters to discrete morphological matrix.
}
\details{
\itemize{
\item The \code{inapplicables} argument intakes a vector of character inapplicability source rendering a number of characters inapplicable using the following sources:
\itemize{
\item \code{"character"} draws inapplicable characters directly from the character matrix, ignoring the phylogeny (i.e. for a random character X, an other random character Y will have inappicable characters fro each character states 0 for character X).
\item \code{"clade"} draws inapplicable characters from the phylogeny: it will randomly apply inapplicable characters states for some characters by randomly selecting clades from the provided tree. The algorithm randomly assigns an inapplicable token for this character for all taxa in this clade or all taxa outside this clade.
}
For example \code{inapplicables = c(rep("character", 2), rep("clade", 2))} will generate 4 characters with inapplicable data, two using previous characters and two other using random clades.
}
}
\examples{
set.seed(4)
## A random tree with 15 tips
tree <- rcoal(15)
## setting up the parameters
my_rates = c(rgamma, 1, 1) # A gamma rate distribution with of shape alpha = 0.5
my_substitutions = c(runif, 2, 2) # A fixed substitution rate of 2 (T/T ratio in HKY)
## A Mk matrix (10*50)
matrixMk <- make.matrix(tree, characters = 100, model = "ER", states = c(0.85, 0.15), rates = my_rates)
## Setting the number and source of inapplicable characters
my_inapplicables <- c(rep("character", 5), rep("clade", 5))
## Apply some inapplicable characters to the matrix
matrix <- apply.inapplicables(matrixMk, my_inapplicables, tree)
}
\author{
Thomas Guillerme
}
|
f37ba20ef775657927f9f4b70ed821b998405392
|
ad184b82a6d1d74f7ed3110c5bccf10719170bd7
|
/man/chimpanzeesDF.Rd
|
9b54a47ad2d1b4bf9657167767ac20af1727bfb4
|
[
"MIT"
] |
permissive
|
flyaflya/causact
|
5a5f695e92ac79997de2fd291845f8ed51b05805
|
17c374a9c039c0d5726931953d97985d87d7dcaa
|
refs/heads/master
| 2023-08-31T13:00:47.344039
| 2023-08-19T13:27:50
| 2023-08-19T13:27:50
| 130,230,186
| 39
| 15
|
NOASSERTION
| 2022-06-02T14:09:58
| 2018-04-19T14:42:45
|
R
|
UTF-8
|
R
| false
| true
| 1,649
|
rd
|
chimpanzeesDF.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{chimpanzeesDF}
\alias{chimpanzeesDF}
\title{Data from behavior trials in a captive group of chimpanzees, housed in Lousiana. From Silk et al. 2005. Nature 437:1357-1359 and further popularized in McElreath, Richard. Statistical rethinking: A Bayesian course with examples in R and Stan. CRC press, 2020. Experiment}
\format{
A data frame with 504 rows and 9 variables:
\describe{
\item{actor}{name of actor}
\item{recipient}{name of recipient (NA for partner absent condition)}
\item{condition}{partner absent (0), partner present (1)}
\item{block}{block of trials (each actor x each recipient 1 time)}
\item{trial}{trial number (by chimp = ordinal sequence of trials for each chimp, ranges from 1-72; partner present trials were interspersed with partner absent trials)}
\item{prosoc_left}{prosocial_left : 1 if prosocial (1/1) option was on left}
\item{chose_prosoc}{choice chimp made (0 = 1/0 option, 1 = 1/1 option)}
\item{pulled_left}{which side did chimp pull (1 = left, 0 = right)}
\item{treatment}{narrative description combining condition and prosoc_left that describes the side the prosical food option was on and whether a partner was present}
}
}
\source{
Silk et al. 2005. Nature 437:1357-1359..
}
\usage{
chimpanzeesDF
}
\description{
Data from behavior trials in a captive group of chimpanzees, housed in Lousiana. From Silk et al. 2005. Nature 437:1357-1359 and further popularized in McElreath, Richard. Statistical rethinking: A Bayesian course with examples in R and Stan. CRC press, 2020. Experiment
}
\keyword{datasets}
|
e87dc29806e34ef9bfededf1ad724da80f6ed8ac
|
7b1099c65c9b8bdb3947d5bdc76359079f79a398
|
/dviz.supp/man/US_regions.Rd
|
9d63a0f627efe59b9672b67add5f6cd2cfe63b66
|
[
"MIT"
] |
permissive
|
f0nzie/dataviz-wilke-2020
|
8b2a3a622c633efa6a00497142798c94e229b9d6
|
7cb0dde407903682c94cdaf5bab8cc8d0d732f0a
|
refs/heads/master
| 2022-11-17T12:03:40.425309
| 2020-07-12T21:53:42
| 2020-07-12T21:53:42
| 279,151,756
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 487
|
rd
|
US_regions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/US_regions.R
\docType{data}
\name{US_regions}
\alias{US_regions}
\title{US regions and divisions}
\format{An object of class \code{data.frame} with 51 rows and 4 columns.}
\usage{
US_regions
}
\description{
The definitions of US regions and divisions were downloaded from census.gov at:
https://www2.census.gov/geo/pdfs/maps-data/maps/reference/us_regdiv.pdf
}
\seealso{
\link{US_census}
}
\keyword{datasets}
|
1d90e3e2d3f48b0d01da26808742c02bd6fac0ec
|
b0abd65c719c61580505b56cb2cec5ff268f9638
|
/r-updates-JoaT.r
|
9972604272925137633939a5516755f0e5cf0301
|
[] |
no_license
|
theGeneral902/JoaT-Linux
|
164ea26bbc740238075f17aaf8ed5e91afd351c1
|
2cb33f84097947c166bb8cdb16e8afa892cdfbdb
|
refs/heads/master
| 2021-01-09T05:34:53.649587
| 2017-02-03T02:01:56
| 2017-02-03T02:01:56
| 80,760,827
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,371
|
r
|
r-updates-JoaT.r
|
install.packages("acepack", repos="http://cran.rstudio.com/")
install.packages("assertthat", repos="http://cran.rstudio.com/")
install.packages("backports", repos="http://cran.rstudio.com/")
install.packages("base64enc", repos="http://cran.rstudio.com/")
install.packages("BH", repos="http://cran.rstudio.com/")
install.packages("bitops", repos="http://cran.rstudio.com/")
install.packages("boot", repos="http://cran.rstudio.com/")
install.packages("brew", repos="http://cran.rstudio.com/")
install.packages("car", repos="http://cran.rstudio.com/")
install.packages("caTools", repos="http://cran.rstudio.com/")
install.packages("checkmate", repos="http://cran.rstudio.com/")
install.packages("chron", repos="http://cran.rstudio.com/")
install.packages("codetools", repos="http://cran.rstudio.com/")
install.packages("colorspace", repos="http://cran.rstudio.com/")
install.packages("commonmark", repos="http://cran.rstudio.com/")
install.packages("crayon", repos="http://cran.rstudio.com/")
install.packages("data.table", repos="http://cran.rstudio.com/")
install.packages("DBI", repos="http://cran.rstudio.com/")
install.packages("desc", repos="http://cran.rstudio.com/")
install.packages("dichromat", repos="http://cran.rstudio.com/")
install.packages("digest", repos="http://cran.rstudio.com/")
install.packages("doParallel", repos="http://cran.rstudio.com/")
install.packages("dplyr", repos="http://cran.rstudio.com/")
install.packages("e1071", repos="http://cran.rstudio.com/")
install.packages("evaluate", repos="http://cran.rstudio.com/")
install.packages("foreach", repos="http://cran.rstudio.com/")
install.packages("forecast", repos="http://cran.rstudio.com/")
install.packages("foreign", repos="http://cran.rstudio.com/")
install.packages("formatR", repos="http://cran.rstudio.com/")
install.packages("Formula", repos="http://cran.rstudio.com/")
install.packages("fracdiff", repos="http://cran.rstudio.com/")
install.packages("gdata", repos="http://cran.rstudio.com/")
install.packages("geosphere", repos="http://cran.rstudio.com/")
install.packages("ggmap", repos="http://cran.rstudio.com/")
install.packages("ggplot2", repos="http://cran.rstudio.com/")
install.packages("gplots", repos="http://cran.rstudio.com/")
install.packages("gridBase", repos="http://cran.rstudio.com/")
install.packages("gridExtra", repos="http://cran.rstudio.com/")
install.packages("gtable", repos="http://cran.rstudio.com/")
install.packages("gtools", repos="http://cran.rstudio.com/")
install.packages("highr", repos="http://cran.rstudio.com/")
install.packages("Hmisc", repos="http://cran.rstudio.com/")
install.packages("htmlTable", repos="http://cran.rstudio.com/")
install.packages("htmltools", repos="http://cran.rstudio.com/")
install.packages("htmlwidgets", repos="http://cran.rstudio.com/")
install.packages("httpuv", repos="http://cran.rstudio.com/")
install.packages("igraph", repos="http://cran.rstudio.com/")
install.packages("irlba", repos="http://cran.rstudio.com/")
install.packages("ISLR", repos="http://cran.rstudio.com/")
install.packages("iterators", repos="http://cran.rstudio.com/")
install.packages("jpeg", repos="http://cran.rstudio.com/")
install.packages("jsonlite", repos="http://cran.rstudio.com/")
install.packages("knitr", repos="http://cran.rstudio.com/")
install.packages("labeling", repos="http://cran.rstudio.com/")
install.packages("lattice", repos="http://cran.rstudio.com/")
install.packages("latticeExtra", repos="http://cran.rstudio.com/")
install.packages("lazyeval", repos="http://cran.rstudio.com/")
install.packages("lme4", repos="http://cran.rstudio.com/")
install.packages("magrittr", repos="http://cran.rstudio.com/")
install.packages("manipulate", repos="http://cran.rstudio.com/")
install.packages("mapproj", repos="http://cran.rstudio.com/")
install.packages("maps", repos="http://cran.rstudio.com/")
install.packages("markdown", repos="http://cran.rstudio.com/")
install.packages("MASS", repos="http://cran.rstudio.com/")
install.packages("Matrix", repos="http://cran.rstudio.com/")
install.packages("MatrixModels", repos="http://cran.rstudio.com/")
install.packages("memoise", repos="http://cran.rstudio.com/")
install.packages("mgcv", repos="http://cran.rstudio.com/")
install.packages("mime", repos="http://cran.rstudio.com/")
install.packages("minqa", repos="http://cran.rstudio.com/")
install.packages("mnormt", repos="http://cran.rstudio.com/")
install.packages("munsell", repos="http://cran.rstudio.com/")
install.packages("mvtnorm", repos="http://cran.rstudio.com/")
install.packages("nloptr", repos="http://cran.rstudio.com/")
install.packages("NLP", repos="http://cran.rstudio.com/")
install.packages("NMF", repos="http://cran.rstudio.com/")
install.packages("nnet", repos="http://cran.rstudio.com/")
install.packages("pbkrtest", repos="http://cran.rstudio.com/")
install.packages("pkgmaker", repos="http://cran.rstudio.com/")
install.packages("plyr", repos="http://cran.rstudio.com/")
install.packages("png", repos="http://cran.rstudio.com/")
install.packages("praise", repos="http://cran.rstudio.com/")
install.packages("proto", repos="http://cran.rstudio.com/")
install.packages("quadprog", repos="http://cran.rstudio.com/")
install.packages("quantreg", repos="http://cran.rstudio.com/")
install.packages("R6", repos="http://cran.rstudio.com/")
install.packages("randomForest", repos="http://cran.rstudio.com/")
install.packages("RColorBrewer", repos="http://cran.rstudio.com/")
install.packages("Rcpp", repos="http://cran.rstudio.com/")
install.packages("RcppArmadillo", repos="http://cran.rstudio.com/")
install.packages("RcppEigen", repos="http://cran.rstudio.com/")
install.packages("registry", repos="http://cran.rstudio.com/")
install.packages("reshape", repos="http://cran.rstudio.com/")
install.packages("reshape2", repos="http://cran.rstudio.com/")
install.packages("RgoogleMaps", repos="http://cran.rstudio.com/")
install.packages("rjson", repos="http://cran.rstudio.com/")
install.packages("RJSONIO", repos="http://cran.rstudio.com/")
install.packages("rmarkdown", repos="http://cran.rstudio.com/")
install.packages("rngtools", repos="http://cran.rstudio.com/")
install.packages("rpart", repos="http://cran.rstudio.com/")
install.packages("rprojroot", repos="http://cran.rstudio.com/")
install.packages("rstudioapi", repos="http://cran.rstudio.com/")
install.packages("sandwich", repos="http://cran.rstudio.com/")
install.packages("scales", repos="http://cran.rstudio.com/")
install.packages("shiny", repos="http://cran.rstudio.com/")
install.packages("slam", repos="http://cran.rstudio.com/")
install.packages("sourcetools", repos="http://cran.rstudio.com/")
install.packages("sp", repos="http://cran.rstudio.com/")
install.packages("SparseM", repos="http://cran.rstudio.com/")
install.packages("stringi", repos="http://cran.rstudio.com/")
install.packages("stringr", repos="http://cran.rstudio.com/")
install.packages("survival", repos="http://cran.rstudio.com/")
install.packages("testthat", repos="http://cran.rstudio.com/")
install.packages("tibble", repos="http://cran.rstudio.com/")
install.packages("timeDate", repos="http://cran.rstudio.com/")
install.packages("tm", repos="http://cran.rstudio.com/")
install.packages("tseries", repos="http://cran.rstudio.com/")
install.packages("viridis", repos="http://cran.rstudio.com/")
install.packages("whisker", repos="http://cran.rstudio.com/")
install.packages("withr", repos="http://cran.rstudio.com/")
install.packages("xtable", repos="http://cran.rstudio.com/")
install.packages("yaml", repos="http://cran.rstudio.com/")
install.packages("zoo", repos="http://cran.rstudio.com/")
install.packages("base", repos="http://cran.rstudio.com/")
install.packages("boot", repos="http://cran.rstudio.com/")
install.packages("class", repos="http://cran.rstudio.com/")
install.packages("cluster", repos="http://cran.rstudio.com/")
install.packages("codetools", repos="http://cran.rstudio.com/")
install.packages("compiler", repos="http://cran.rstudio.com/")
install.packages("datasets", repos="http://cran.rstudio.com/")
install.packages("foreign", repos="http://cran.rstudio.com/")
install.packages("graphics", repos="http://cran.rstudio.com/")
install.packages("grDevices", repos="http://cran.rstudio.com/")
install.packages("grid", repos="http://cran.rstudio.com/")
install.packages("KernSmooth", repos="http://cran.rstudio.com/")
install.packages("lattice", repos="http://cran.rstudio.com/")
install.packages("MASS", repos="http://cran.rstudio.com/")
install.packages("Matrix", repos="http://cran.rstudio.com/")
install.packages("methods", repos="http://cran.rstudio.com/")
install.packages("mgcv", repos="http://cran.rstudio.com/")
install.packages("nlme", repos="http://cran.rstudio.com/")
install.packages("nnet", repos="http://cran.rstudio.com/")
install.packages("parallel", repos="http://cran.rstudio.com/")
install.packages("rpart", repos="http://cran.rstudio.com/")
install.packages("spatial", repos="http://cran.rstudio.com/")
install.packages("splines", repos="http://cran.rstudio.com/")
install.packages("stats", repos="http://cran.rstudio.com/")
install.packages("stats4", repos="http://cran.rstudio.com/")
install.packages("survival", repos="http://cran.rstudio.com/")
install.packages("tcltk", repos="http://cran.rstudio.com/")
install.packages("tools", repos="http://cran.rstudio.com/")
install.packages("utils", repos="http://cran.rstudio.com/")
|
001e4ee1126a748c3330d51c1a5079cded0a052b
|
13015d2e2a31f708609d34e939ae1f6a4a40717e
|
/spam detection/spam detection.R
|
ab29fec5cb76e2163b668381a00bdf94d29bccb3
|
[] |
no_license
|
rprajwal/analytics-projects
|
1d9cdafbcacf93b24d346029c74c382773afa28d
|
55a3f8922b76e51a741ecb99d9c0a36934fe8a59
|
refs/heads/master
| 2020-04-12T23:45:52.562785
| 2019-02-07T18:20:29
| 2019-02-07T18:20:29
| 162,829,819
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,561
|
r
|
spam detection.R
|
rm(list = ls())
#loading libraries
library(stringr)
library(tm)
library(wordcloud)
library(ggplot2)
library(e1071)
library(C50)
library(caret)
library(textstem)
library(randomForest)
library(xgboost)
#importing data
d=read.csv('D:/analytics/practice/spam.csv')
#removing unwanted variables
d=d[,1:2]
#checking the data types of variables
str(d)
#Renaming the names of variables
colnames(d)=c('spam','Text')
#Converting spam variable to numerical type
d$spam=gsub('ham',0,d$spam)
d$spam=gsub('spam',1,d$spam)
d$spam=as.factor(d$spam)
#Checking number of texts in each class
ggplot(d,aes_string(d$spam))+geom_bar()
#Removing empty spaces from start and end of the string
d$Text=str_trim(d$Text)
#Converting texts into corpus
c=Corpus(VectorSource(d$Text))
#case folding
c=tm_map(c,tolower)
#remove punctuation marks
c=tm_map(c,removePunctuation)
#remove numbers
c=tm_map(c,removeNumbers)
#remove stopwords
c=tm_map(c,removeWords,stopwords('english'))
#remove blank spaces
c=tm_map(c,stripWhitespace)
#lemmatization
c=tm_map(c,lemmatize_strings)
#Converting corpus back to dataframe
d$ptext=get('content',c)
#plotting wordcloud to findout most frequent words in spam and ham texts
#words in spam messages
wordcloud(d[d$spam==1,'ptext'],random.order = F,colors = brewer.pal(8,'Dark2'))
#words in non spam messages
wordcloud(d[d$spam==0,'ptext'],random.order = F,colors = brewer.pal(12,'Paired'))
#There are some stopwords which are still remaining.
c=tm_map(c,removeWords,c('ill','will','now','just',stopwords('en')))
c=tm_map(c,stripWhitespace)
#build term document matrix
tdm=TermDocumentMatrix(c)
#removing sparse terms from term document matrix
rst=removeSparseTerms(tdm,0.999)
#Converting term document matrix into a data frame
tdd=as.data.frame(t(as.matrix(rst)))
#Finding most frequent words
freq=findFreqTerms(rst,lowfreq = 5)
#Reducing the size of data frame so that it should have terms which appears more than 10 times
tdd=tdd[,freq]
tdd=cbind.data.frame(tdd,TT=d$spam)
names(tdd)=make.names(names(tdd))
#Splitting data to train test
ti=sample(nrow(d),size = 0.8*nrow(d))
train=tdd[ti,]
test=tdd[-ti,]
#Modeling
#Random Forest
rf=randomForest(TT~.,data = train,ntree=100)
rfp=predict(rf,test[,-1149])
confusionMatrix(test[,1149],rfp)
#Accuracy=97.94
#False negative rate=12.5
#False positive rate=0.4
#------------------------------------------------------
#Support vector machine
sv=svm(TT~.,train,cost=10)
svp=predict(sv,test[,-1149])
confusionMatrix(test[,1149],svp)
#Accuracy=96.32
#False negative rate=27.3
#False positive rate=0
#---------------------------------------------------
#Logistic regression
lr=glm(TT~.,data=train,family = 'binomial')
summary(lr)
lrp=predict(lr,test[,-1149])
lrp=ifelse(lrp<0.5,0,1)
table(test[,1149],lrp)
#Accuracy=93.9
#False negative rate=13.3
#False positive rate=4.9
#---------------------------------------------------------
#Naive Bayes
nb=naiveBayes(TT~.,train)
nbp=predict(nb,test[,-1149])
confusionMatrix(test$TT,nbp)
#Accuracy=13.63
#False negative rate=100
#False positive rate=0
#------------------------------------------------------
#xgboost
xg=xgboost(data = as.matrix(train[,-1149]),label = as.matrix(train[,1149]),objective='binary:logistic',nrounds = 100)
xgp=predict(xg,as.matrix(test[,-1149]))
xgp=ifelse(xgp>0.5,1,0)
table(test[,1149],xgp)
#Accuracy=97.84
#False negative rate=14.4
#False positive rate=0.2
|
f482ffa4eb2c332f5bad709262503926e6bda7dd
|
0068bb3ff70280d0832c21f7f0addbf7d9febb05
|
/R/RenameShinyFileNames.R
|
c3a645bcd173fb0e9a5a2745efd07b0bea786db2
|
[
"Apache-2.0"
] |
permissive
|
ohdsi-studies/EhdenRaDmardsEstimation
|
e5874f684dbad837b84bc4ed6729675e0d048ce5
|
075e85ebd89bd15c539ef5abf206ff0b540fa3d3
|
refs/heads/master
| 2020-12-09T22:24:11.713185
| 2020-04-22T04:07:25
| 2020-04-22T04:07:25
| 233,433,499
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,210
|
r
|
RenameShinyFileNames.R
|
# Copyright 2019 Observational Health Data Sciences and Informatics
#
# This file is part of EhdenRaDmardsEstimation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' @export
renameShinyFileNames <- function(dataFolder) {
files <- list.files(dataFolder, pattern = ".rds")
splitTables <- c("covariate_balance", "preference_score_dist", "kaplan_meier_dist")
for (splitTable in splitTables) {
splitTableFilesFrom <- file.path(dataFolder, grep(splitTable, files, value = TRUE))
splitTablesFilesTo <- gsub("Amb_EMR", "AmbEMR", splitTableFilesFrom)
splitTablesFilesTo <- gsub("BELGIUM", "DABelgium", splitTablesFilesTo)
splitTablesFilesTo <- gsub("GERMANY", "DAGermany", splitTablesFilesTo)
splitTablesFilesTo <- gsub("IPCI-HI-LARIOUS-RA", "ICPI", splitTablesFilesTo)
splitTablesFilesTo <- gsub("LPDFRANCE", "LPDFrance", splitTablesFilesTo)
splitTablesFilesTo <- gsub("Optum", "ClinFormatics", splitTablesFilesTo)
splitTablesFilesTo <- gsub("PanTher", "OptumEHR", splitTablesFilesTo)
file.rename(splitTableFilesFrom, splitTablesFilesTo)
}
toBlind <- readRDS(file.path(dataFolder, "to_blind.rds"))
toBlind$database_id[toBlind$database_id == "Amb_EMR"] <- "AmbEMR"
toBlind$database_id[toBlind$database_id == "BELGIUM"] <- "DABelgium"
toBlind$database_id[toBlind$database_id == "GERMANY"] <- "DAGermany"
toBlind$database_id[toBlind$database_id == "IPCI-HI-LARIOUS-RA"] <- "ICPI"
toBlind$database_id[toBlind$database_id == "LPDFRANCE"] <- "LPDFrance"
toBlind$database_id[toBlind$database_id == "Optum"] <- "ClinFormatics"
toBlind$database_id[toBlind$database_id == "PanTher"] <- "OptumEHR"
saveRDS(toBlind, file.path(dataFolder, "to_blind.rds"))
}
|
af720955e4dc68916c73d783dfa70edeb130b89b
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.inspector/R/paws.inspector_interfaces.R
|
a0296ce402f7ce60c3508c8dad6330328c697da1
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 55,366
|
r
|
paws.inspector_interfaces.R
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common populate
NULL
add_attributes_to_findings_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(findingArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 10L, min = 1L)), attributes = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 10L, min = 0L))), tags = list(type = "structure"))
return(populate(args, shape))
}
add_attributes_to_findings_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(failedItems = structure(list(structure(list(failureCode = structure(logical(0),
tags = list(type = "string", enum = c("INVALID_ARN",
"DUPLICATE_ARN", "ITEM_DOES_NOT_EXIST", "ACCESS_DENIED",
"LIMIT_EXCEEDED", "INTERNAL_ERROR"))), retryable = structure(logical(0),
tags = list(type = "boolean"))), tags = list(type = "structure"))),
tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_assessment_target_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTargetName = structure(logical(0),
tags = list(type = "string", max = 140L, min = 1L)),
resourceGroupArn = structure(logical(0), tags = list(type = "string",
max = 300L, min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_assessment_target_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTargetArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
create_assessment_template_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTargetArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
assessmentTemplateName = structure(logical(0), tags = list(type = "string",
max = 140L, min = 1L)), durationInSeconds = structure(logical(0),
tags = list(type = "integer", max = 86400L, min = 180L)),
rulesPackageArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 50L, min = 0L)),
userAttributesForFindings = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 10L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
create_assessment_template_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTemplateArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
create_exclusions_preview_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTemplateArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
create_exclusions_preview_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(previewToken = structure(logical(0),
tags = list(type = "string", pattern = "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
create_resource_group_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceGroupTags = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 10L, min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
create_resource_group_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceGroupArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
delete_assessment_run_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentRunArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
delete_assessment_run_output <- function ()
{
return(list())
}
delete_assessment_target_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTargetArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
delete_assessment_target_output <- function ()
{
return(list())
}
delete_assessment_template_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTemplateArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
delete_assessment_template_output <- function ()
{
return(list())
}
describe_assessment_runs_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentRunArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 10L, min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
describe_assessment_runs_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentRuns = structure(list(structure(list(arn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
name = structure(logical(0), tags = list(type = "string",
max = 140L, min = 1L)), assessmentTemplateArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
state = structure(logical(0), tags = list(type = "string",
enum = c("CREATED", "START_DATA_COLLECTION_PENDING",
"START_DATA_COLLECTION_IN_PROGRESS", "COLLECTING_DATA",
"STOP_DATA_COLLECTION_PENDING", "DATA_COLLECTED",
"START_EVALUATING_RULES_PENDING", "EVALUATING_RULES",
"FAILED", "ERROR", "COMPLETED", "COMPLETED_WITH_ERRORS",
"CANCELED"))), durationInSeconds = structure(logical(0),
tags = list(type = "integer", max = 86400L, min = 180L)),
rulesPackageArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 50L, min = 1L)),
userAttributesForFindings = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 10L, min = 0L)),
createdAt = structure(logical(0), tags = list(type = "timestamp")),
startedAt = structure(logical(0), tags = list(type = "timestamp")),
completedAt = structure(logical(0), tags = list(type = "timestamp")),
stateChangedAt = structure(logical(0), tags = list(type = "timestamp")),
dataCollected = structure(logical(0), tags = list(type = "boolean")),
stateChanges = structure(list(structure(list(stateChangedAt = structure(logical(0),
tags = list(type = "timestamp")), state = structure(logical(0),
tags = list(type = "string", enum = c("CREATED",
"START_DATA_COLLECTION_PENDING", "START_DATA_COLLECTION_IN_PROGRESS",
"COLLECTING_DATA", "STOP_DATA_COLLECTION_PENDING",
"DATA_COLLECTED", "START_EVALUATING_RULES_PENDING",
"EVALUATING_RULES", "FAILED", "ERROR", "COMPLETED",
"COMPLETED_WITH_ERRORS", "CANCELED")))), tags = list(type = "structure"))),
tags = list(type = "list", max = 50L, min = 0L)),
notifications = structure(list(structure(list(date = structure(logical(0),
tags = list(type = "timestamp")), event = structure(logical(0),
tags = list(type = "string", enum = c("ASSESSMENT_RUN_STARTED",
"ASSESSMENT_RUN_COMPLETED", "ASSESSMENT_RUN_STATE_CHANGED",
"FINDING_REPORTED", "OTHER"))), message = structure(logical(0),
tags = list(type = "string", max = 1000L, min = 0L)),
error = structure(logical(0), tags = list(type = "boolean")),
snsTopicArn = structure(logical(0), tags = list(type = "string",
max = 300L, min = 1L)), snsPublishStatusCode = structure(logical(0),
tags = list(type = "string", enum = c("SUCCESS",
"TOPIC_DOES_NOT_EXIST", "ACCESS_DENIED", "INTERNAL_ERROR")))),
tags = list(type = "structure"))), tags = list(type = "list",
max = 50L, min = 0L)), findingCounts = structure(list(structure(logical(0),
tags = list(type = "integer"))), tags = list(type = "map"))),
tags = list(type = "structure"))), tags = list(type = "list",
max = 10L, min = 0L)), failedItems = structure(list(structure(list(failureCode = structure(logical(0),
tags = list(type = "string", enum = c("INVALID_ARN",
"DUPLICATE_ARN", "ITEM_DOES_NOT_EXIST", "ACCESS_DENIED",
"LIMIT_EXCEEDED", "INTERNAL_ERROR"))), retryable = structure(logical(0),
tags = list(type = "boolean"))), tags = list(type = "structure"))),
tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
describe_assessment_targets_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTargetArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 10L, min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
describe_assessment_targets_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTargets = structure(list(structure(list(arn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
name = structure(logical(0), tags = list(type = "string",
max = 140L, min = 1L)), resourceGroupArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
createdAt = structure(logical(0), tags = list(type = "timestamp")),
updatedAt = structure(logical(0), tags = list(type = "timestamp"))),
tags = list(type = "structure"))), tags = list(type = "list",
max = 10L, min = 0L)), failedItems = structure(list(structure(list(failureCode = structure(logical(0),
tags = list(type = "string", enum = c("INVALID_ARN",
"DUPLICATE_ARN", "ITEM_DOES_NOT_EXIST", "ACCESS_DENIED",
"LIMIT_EXCEEDED", "INTERNAL_ERROR"))), retryable = structure(logical(0),
tags = list(type = "boolean"))), tags = list(type = "structure"))),
tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
describe_assessment_templates_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTemplateArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 10L, min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
describe_assessment_templates_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTemplates = structure(list(structure(list(arn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
name = structure(logical(0), tags = list(type = "string",
max = 140L, min = 1L)), assessmentTargetArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
durationInSeconds = structure(logical(0), tags = list(type = "integer",
max = 86400L, min = 180L)), rulesPackageArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 50L, min = 0L)),
userAttributesForFindings = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 10L, min = 0L)),
lastAssessmentRunArn = structure(logical(0), tags = list(type = "string",
max = 300L, min = 1L)), assessmentRunCount = structure(logical(0),
tags = list(type = "integer")), createdAt = structure(logical(0),
tags = list(type = "timestamp"))), tags = list(type = "structure"))),
tags = list(type = "list", max = 10L, min = 0L)), failedItems = structure(list(structure(list(failureCode = structure(logical(0),
tags = list(type = "string", enum = c("INVALID_ARN",
"DUPLICATE_ARN", "ITEM_DOES_NOT_EXIST", "ACCESS_DENIED",
"LIMIT_EXCEEDED", "INTERNAL_ERROR"))), retryable = structure(logical(0),
tags = list(type = "boolean"))), tags = list(type = "structure"))),
tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
describe_cross_account_access_role_input <- function ()
{
return(list())
}
describe_cross_account_access_role_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(roleArn = structure(logical(0), tags = list(type = "string",
max = 300L, min = 1L)), valid = structure(logical(0),
tags = list(type = "boolean")), registeredAt = structure(logical(0),
tags = list(type = "timestamp"))), tags = list(type = "structure"))
return(populate(args, shape))
}
describe_exclusions_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(exclusionArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 100L, min = 1L)), locale = structure(logical(0),
tags = list(type = "string", enum = "EN_US"))), tags = list(type = "structure"))
return(populate(args, shape))
}
describe_exclusions_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(exclusions = structure(list(structure(list(arn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
title = structure(logical(0), tags = list(type = "string",
max = 20000L, min = 0L)), description = structure(logical(0),
tags = list(type = "string", max = 20000L, min = 0L)),
recommendation = structure(logical(0), tags = list(type = "string",
max = 20000L, min = 0L)), scopes = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", enum = c("INSTANCE_ID",
"RULES_PACKAGE_ARN"))), value = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "list", min = 1L)), attributes = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 50L, min = 0L))),
tags = list(type = "structure"))), tags = list(type = "map",
max = 100L, min = 1L)), failedItems = structure(list(structure(list(failureCode = structure(logical(0),
tags = list(type = "string", enum = c("INVALID_ARN",
"DUPLICATE_ARN", "ITEM_DOES_NOT_EXIST", "ACCESS_DENIED",
"LIMIT_EXCEEDED", "INTERNAL_ERROR"))), retryable = structure(logical(0),
tags = list(type = "boolean"))), tags = list(type = "structure"))),
tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
describe_findings_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(findingArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 10L, min = 1L)), locale = structure(logical(0),
tags = list(type = "string", enum = "EN_US"))), tags = list(type = "structure"))
return(populate(args, shape))
}
describe_findings_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(findings = structure(list(structure(list(arn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
schemaVersion = structure(logical(0), tags = list(type = "integer",
min = 0L)), service = structure(logical(0), tags = list(type = "string",
max = 128L, min = 0L)), serviceAttributes = structure(list(schemaVersion = structure(logical(0),
tags = list(type = "integer", min = 0L)), assessmentRunArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
rulesPackageArn = structure(logical(0), tags = list(type = "string",
max = 300L, min = 1L))), tags = list(type = "structure")),
assetType = structure(logical(0), tags = list(type = "string",
enum = "ec2-instance")), assetAttributes = structure(list(schemaVersion = structure(logical(0),
tags = list(type = "integer", min = 0L)), agentId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
autoScalingGroup = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L)), amiId = structure(logical(0),
tags = list(type = "string", max = 256L, min = 0L)),
hostname = structure(logical(0), tags = list(type = "string",
max = 256L, min = 0L)), ipv4Addresses = structure(list(structure(logical(0),
tags = list(type = "string", max = 15L, min = 7L))),
tags = list(type = "list", max = 50L, min = 0L)),
tags = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list")), networkInterfaces = structure(list(structure(list(networkInterfaceId = structure(logical(0),
tags = list(type = "string", max = 20000L, min = 0L)),
subnetId = structure(logical(0), tags = list(type = "string",
max = 20000L, min = 0L)), vpcId = structure(logical(0),
tags = list(type = "string", max = 20000L,
min = 0L)), privateDnsName = structure(logical(0),
tags = list(type = "string", max = 20000L,
min = 0L)), privateIpAddress = structure(logical(0),
tags = list(type = "string", max = 20000L,
min = 0L)), privateIpAddresses = structure(list(structure(list(privateDnsName = structure(logical(0),
tags = list(type = "string", max = 20000L,
min = 0L)), privateIpAddress = structure(logical(0),
tags = list(type = "string", max = 20000L,
min = 0L))), tags = list(type = "structure"))),
tags = list(type = "list")), publicDnsName = structure(logical(0),
tags = list(type = "string", max = 20000L,
min = 0L)), publicIp = structure(logical(0),
tags = list(type = "string", max = 20000L,
min = 0L)), ipv6Addresses = structure(list(structure(logical(0),
tags = list(type = "string", max = 20000L,
min = 0L))), tags = list(type = "list")),
securityGroups = structure(list(structure(list(groupName = structure(logical(0),
tags = list(type = "string", max = 20000L,
min = 0L)), groupId = structure(logical(0),
tags = list(type = "string", max = 20000L,
min = 0L))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure"))),
tags = list(type = "list"))), tags = list(type = "structure")),
id = structure(logical(0), tags = list(type = "string",
max = 128L, min = 0L)), title = structure(logical(0),
tags = list(type = "string", max = 20000L, min = 0L)),
description = structure(logical(0), tags = list(type = "string",
max = 20000L, min = 0L)), recommendation = structure(logical(0),
tags = list(type = "string", max = 20000L, min = 0L)),
severity = structure(logical(0), tags = list(type = "string",
enum = c("Low", "Medium", "High", "Informational",
"Undefined"))), numericSeverity = structure(logical(0),
tags = list(type = "double", max = 10L, min = 0L)),
confidence = structure(logical(0), tags = list(type = "integer",
max = 10L, min = 0L)), indicatorOfCompromise = structure(logical(0),
tags = list(type = "boolean")), attributes = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 50L, min = 0L)),
userAttributes = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 10L, min = 0L)),
createdAt = structure(logical(0), tags = list(type = "timestamp")),
updatedAt = structure(logical(0), tags = list(type = "timestamp"))),
tags = list(type = "structure"))), tags = list(type = "list",
max = 100L, min = 0L)), failedItems = structure(list(structure(list(failureCode = structure(logical(0),
tags = list(type = "string", enum = c("INVALID_ARN",
"DUPLICATE_ARN", "ITEM_DOES_NOT_EXIST", "ACCESS_DENIED",
"LIMIT_EXCEEDED", "INTERNAL_ERROR"))), retryable = structure(logical(0),
tags = list(type = "boolean"))), tags = list(type = "structure"))),
tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
describe_resource_groups_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceGroupArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 10L, min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
describe_resource_groups_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceGroups = structure(list(structure(list(arn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
tags = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 10L, min = 1L)),
createdAt = structure(logical(0), tags = list(type = "timestamp"))),
tags = list(type = "structure"))), tags = list(type = "list",
max = 10L, min = 0L)), failedItems = structure(list(structure(list(failureCode = structure(logical(0),
tags = list(type = "string", enum = c("INVALID_ARN",
"DUPLICATE_ARN", "ITEM_DOES_NOT_EXIST", "ACCESS_DENIED",
"LIMIT_EXCEEDED", "INTERNAL_ERROR"))), retryable = structure(logical(0),
tags = list(type = "boolean"))), tags = list(type = "structure"))),
tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
describe_rules_packages_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(rulesPackageArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 10L, min = 1L)), locale = structure(logical(0),
tags = list(type = "string", enum = "EN_US"))), tags = list(type = "structure"))
return(populate(args, shape))
}
describe_rules_packages_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(rulesPackages = structure(list(structure(list(arn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
name = structure(logical(0), tags = list(type = "string",
max = 1000L, min = 0L)), version = structure(logical(0),
tags = list(type = "string", max = 1000L, min = 0L)),
provider = structure(logical(0), tags = list(type = "string",
max = 1000L, min = 0L)), description = structure(logical(0),
tags = list(type = "string", max = 20000L, min = 0L))),
tags = list(type = "structure"))), tags = list(type = "list",
max = 10L, min = 0L)), failedItems = structure(list(structure(list(failureCode = structure(logical(0),
tags = list(type = "string", enum = c("INVALID_ARN",
"DUPLICATE_ARN", "ITEM_DOES_NOT_EXIST", "ACCESS_DENIED",
"LIMIT_EXCEEDED", "INTERNAL_ERROR"))), retryable = structure(logical(0),
tags = list(type = "boolean"))), tags = list(type = "structure"))),
tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_assessment_report_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentRunArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
reportFileFormat = structure(logical(0), tags = list(type = "string",
enum = c("HTML", "PDF"))), reportType = structure(logical(0),
tags = list(type = "string", enum = c("FINDING",
"FULL")))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_assessment_report_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(status = structure(logical(0), tags = list(type = "string",
enum = c("WORK_IN_PROGRESS", "FAILED", "COMPLETED"))),
url = structure(logical(0), tags = list(type = "string",
max = 2048L))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_exclusions_preview_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTemplateArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
previewToken = structure(logical(0), tags = list(type = "string",
pattern = "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}")),
nextToken = structure(logical(0), tags = list(type = "string",
max = 300L, min = 1L)), maxResults = structure(logical(0),
tags = list(type = "integer")), locale = structure(logical(0),
tags = list(type = "string", enum = "EN_US"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_exclusions_preview_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(previewStatus = structure(logical(0),
tags = list(type = "string", enum = c("WORK_IN_PROGRESS",
"COMPLETED"))), exclusionPreviews = structure(list(structure(list(title = structure(logical(0),
tags = list(type = "string", max = 20000L, min = 0L)),
description = structure(logical(0), tags = list(type = "string",
max = 20000L, min = 0L)), recommendation = structure(logical(0),
tags = list(type = "string", max = 20000L, min = 0L)),
scopes = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", enum = c("INSTANCE_ID",
"RULES_PACKAGE_ARN"))), value = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "list", min = 1L)), attributes = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 50L, min = 0L))),
tags = list(type = "structure"))), tags = list(type = "list",
max = 100L, min = 0L)), nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_telemetry_metadata_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentRunArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
get_telemetry_metadata_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(telemetryMetadata = structure(list(structure(list(messageType = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
count = structure(logical(0), tags = list(type = "long")),
dataSize = structure(logical(0), tags = list(type = "long"))),
tags = list(type = "structure"))), tags = list(type = "list",
max = 5000L, min = 0L))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_assessment_run_agents_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentRunArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
filter = structure(list(agentHealths = structure(list(structure(logical(0),
tags = list(type = "string", enum = c("HEALTHY",
"UNHEALTHY", "UNKNOWN")))), tags = list(type = "list",
max = 10L, min = 0L)), agentHealthCodes = structure(list(structure(logical(0),
tags = list(type = "string", enum = c("IDLE", "RUNNING",
"SHUTDOWN", "UNHEALTHY", "THROTTLED", "UNKNOWN")))),
tags = list(type = "list", max = 10L, min = 0L))),
tags = list(type = "structure")), nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
maxResults = structure(logical(0), tags = list(type = "integer"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_assessment_run_agents_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentRunAgents = structure(list(structure(list(agentId = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
assessmentRunArn = structure(logical(0), tags = list(type = "string",
max = 300L, min = 1L)), agentHealth = structure(logical(0),
tags = list(type = "string", enum = c("HEALTHY",
"UNHEALTHY", "UNKNOWN"))), agentHealthCode = structure(logical(0),
tags = list(type = "string", enum = c("IDLE", "RUNNING",
"SHUTDOWN", "UNHEALTHY", "THROTTLED", "UNKNOWN"))),
agentHealthDetails = structure(logical(0), tags = list(type = "string",
max = 1000L, min = 0L)), autoScalingGroup = structure(logical(0),
tags = list(type = "string", max = 256L, min = 1L)),
telemetryMetadata = structure(list(structure(list(messageType = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
count = structure(logical(0), tags = list(type = "long")),
dataSize = structure(logical(0), tags = list(type = "long"))),
tags = list(type = "structure"))), tags = list(type = "list",
max = 5000L, min = 0L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 500L, min = 0L)), nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_assessment_runs_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTemplateArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 50L, min = 0L)), filter = structure(list(namePattern = structure(logical(0),
tags = list(type = "string", max = 140L, min = 1L)),
states = structure(list(structure(logical(0), tags = list(type = "string",
enum = c("CREATED", "START_DATA_COLLECTION_PENDING",
"START_DATA_COLLECTION_IN_PROGRESS", "COLLECTING_DATA",
"STOP_DATA_COLLECTION_PENDING", "DATA_COLLECTED",
"START_EVALUATING_RULES_PENDING", "EVALUATING_RULES",
"FAILED", "ERROR", "COMPLETED", "COMPLETED_WITH_ERRORS",
"CANCELED")))), tags = list(type = "list", max = 50L,
min = 0L)), durationRange = structure(list(minSeconds = structure(logical(0),
tags = list(type = "integer", max = 86400L, min = 180L)),
maxSeconds = structure(logical(0), tags = list(type = "integer",
max = 86400L, min = 180L))), tags = list(type = "structure")),
rulesPackageArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 50L, min = 0L)),
startTimeRange = structure(list(beginDate = structure(logical(0),
tags = list(type = "timestamp")), endDate = structure(logical(0),
tags = list(type = "timestamp"))), tags = list(type = "structure")),
completionTimeRange = structure(list(beginDate = structure(logical(0),
tags = list(type = "timestamp")), endDate = structure(logical(0),
tags = list(type = "timestamp"))), tags = list(type = "structure")),
stateChangeTimeRange = structure(list(beginDate = structure(logical(0),
tags = list(type = "timestamp")), endDate = structure(logical(0),
tags = list(type = "timestamp"))), tags = list(type = "structure"))),
tags = list(type = "structure")), nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
maxResults = structure(logical(0), tags = list(type = "integer"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_assessment_runs_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentRunArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 100L, min = 0L)), nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_assessment_targets_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(filter = structure(list(assessmentTargetNamePattern = structure(logical(0),
tags = list(type = "string", max = 140L, min = 1L))),
tags = list(type = "structure")), nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
maxResults = structure(logical(0), tags = list(type = "integer"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_assessment_targets_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTargetArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 100L, min = 0L)), nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_assessment_templates_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTargetArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 50L, min = 0L)), filter = structure(list(namePattern = structure(logical(0),
tags = list(type = "string", max = 140L, min = 1L)),
durationRange = structure(list(minSeconds = structure(logical(0),
tags = list(type = "integer", max = 86400L, min = 180L)),
maxSeconds = structure(logical(0), tags = list(type = "integer",
max = 86400L, min = 180L))), tags = list(type = "structure")),
rulesPackageArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 50L, min = 0L))),
tags = list(type = "structure")), nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
maxResults = structure(logical(0), tags = list(type = "integer"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_assessment_templates_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTemplateArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 100L, min = 0L)), nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_event_subscriptions_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
nextToken = structure(logical(0), tags = list(type = "string",
max = 300L, min = 1L)), maxResults = structure(logical(0),
tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_event_subscriptions_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(subscriptions = structure(list(structure(list(resourceArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
topicArn = structure(logical(0), tags = list(type = "string",
max = 300L, min = 1L)), eventSubscriptions = structure(list(structure(list(event = structure(logical(0),
tags = list(type = "string", enum = c("ASSESSMENT_RUN_STARTED",
"ASSESSMENT_RUN_COMPLETED", "ASSESSMENT_RUN_STATE_CHANGED",
"FINDING_REPORTED", "OTHER"))), subscribedAt = structure(logical(0),
tags = list(type = "timestamp"))), tags = list(type = "structure"))),
tags = list(type = "list", max = 50L, min = 1L))),
tags = list(type = "structure"))), tags = list(type = "list",
max = 50L, min = 0L)), nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_exclusions_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentRunArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
nextToken = structure(logical(0), tags = list(type = "string",
max = 300L, min = 1L)), maxResults = structure(logical(0),
tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_exclusions_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(exclusionArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 100L, min = 0L)), nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_findings_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentRunArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 50L, min = 0L)), filter = structure(list(agentIds = structure(list(structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "list", max = 500L, min = 0L)), autoScalingGroups = structure(list(structure(logical(0),
tags = list(type = "string", max = 256L, min = 1L))),
tags = list(type = "list", max = 20L, min = 0L)), ruleNames = structure(list(structure(logical(0),
tags = list(type = "string", max = 1000L))), tags = list(type = "list",
max = 50L, min = 0L)), severities = structure(list(structure(logical(0),
tags = list(type = "string", enum = c("Low", "Medium",
"High", "Informational", "Undefined")))), tags = list(type = "list",
max = 50L, min = 0L)), rulesPackageArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 50L, min = 0L)), attributes = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 50L, min = 0L)), userAttributes = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 50L, min = 0L)), creationTimeRange = structure(list(beginDate = structure(logical(0),
tags = list(type = "timestamp")), endDate = structure(logical(0),
tags = list(type = "timestamp"))), tags = list(type = "structure"))),
tags = list(type = "structure")), nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
maxResults = structure(logical(0), tags = list(type = "integer"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_findings_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(findingArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 100L, min = 0L)), nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_rules_packages_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
maxResults = structure(logical(0), tags = list(type = "integer"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_rules_packages_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(rulesPackageArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 100L, min = 0L)), nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_tags_for_resource_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_tags_for_resource_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(tags = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 10L, min = 0L))), tags = list(type = "structure"))
return(populate(args, shape))
}
preview_agents_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(previewAgentsArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
nextToken = structure(logical(0), tags = list(type = "string",
max = 300L, min = 1L)), maxResults = structure(logical(0),
tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
preview_agents_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(agentPreviews = structure(list(structure(list(hostname = structure(logical(0),
tags = list(type = "string", max = 256L, min = 0L)),
agentId = structure(logical(0), tags = list(type = "string",
max = 128L, min = 1L)), autoScalingGroup = structure(logical(0),
tags = list(type = "string", max = 256L, min = 1L)),
agentHealth = structure(logical(0), tags = list(type = "string",
enum = c("HEALTHY", "UNHEALTHY", "UNKNOWN"))), agentVersion = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
operatingSystem = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L)), kernelVersion = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
ipv4Address = structure(logical(0), tags = list(type = "string",
max = 15L, min = 7L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 100L, min = 0L)), nextToken = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
register_cross_account_access_role_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(roleArn = structure(logical(0), tags = list(type = "string",
max = 300L, min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
register_cross_account_access_role_output <- function ()
{
return(list())
}
remove_attributes_from_findings_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(findingArns = structure(list(structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "list", max = 10L, min = 1L)), attributeKeys = structure(list(structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L))),
tags = list(type = "list", max = 10L, min = 0L))), tags = list(type = "structure"))
return(populate(args, shape))
}
remove_attributes_from_findings_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(failedItems = structure(list(structure(list(failureCode = structure(logical(0),
tags = list(type = "string", enum = c("INVALID_ARN",
"DUPLICATE_ARN", "ITEM_DOES_NOT_EXIST", "ACCESS_DENIED",
"LIMIT_EXCEEDED", "INTERNAL_ERROR"))), retryable = structure(logical(0),
tags = list(type = "boolean"))), tags = list(type = "structure"))),
tags = list(type = "map"))), tags = list(type = "structure"))
return(populate(args, shape))
}
set_tags_for_resource_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
tags = structure(list(structure(list(key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L)),
value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 1L))), tags = list(type = "structure"))),
tags = list(type = "list", max = 10L, min = 0L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
set_tags_for_resource_output <- function ()
{
return(list())
}
start_assessment_run_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTemplateArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
assessmentRunName = structure(logical(0), tags = list(type = "string",
max = 140L, min = 1L))), tags = list(type = "structure"))
return(populate(args, shape))
}
start_assessment_run_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentRunArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
stop_assessment_run_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentRunArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
stopAction = structure(logical(0), tags = list(type = "string",
enum = c("START_EVALUATION", "SKIP_EVALUATION")))),
tags = list(type = "structure"))
return(populate(args, shape))
}
stop_assessment_run_output <- function ()
{
return(list())
}
subscribe_to_event_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
event = structure(logical(0), tags = list(type = "string",
enum = c("ASSESSMENT_RUN_STARTED", "ASSESSMENT_RUN_COMPLETED",
"ASSESSMENT_RUN_STATE_CHANGED", "FINDING_REPORTED",
"OTHER"))), topicArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
subscribe_to_event_output <- function ()
{
return(list())
}
unsubscribe_from_event_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(resourceArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
event = structure(logical(0), tags = list(type = "string",
enum = c("ASSESSMENT_RUN_STARTED", "ASSESSMENT_RUN_COMPLETED",
"ASSESSMENT_RUN_STATE_CHANGED", "FINDING_REPORTED",
"OTHER"))), topicArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
unsubscribe_from_event_output <- function ()
{
return(list())
}
update_assessment_target_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(assessmentTargetArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L)),
assessmentTargetName = structure(logical(0), tags = list(type = "string",
max = 140L, min = 1L)), resourceGroupArn = structure(logical(0),
tags = list(type = "string", max = 300L, min = 1L))),
tags = list(type = "structure"))
return(populate(args, shape))
}
update_assessment_target_output <- function ()
{
return(list())
}
|
a4e80166ddeeb35bfe4a68fa8b42cca24d775f37
|
7a866c210bba93fa33e02305e221338541d6ec9b
|
/Direction JOL/Timed JOL/Output/Merged/bin jol data2.R
|
84144d3b9075921d1076fe694b7f648518444839
|
[] |
no_license
|
npm27/Spring-2019-Projects
|
afbb6d3816e097b58f7d5032bc8d7563536a232a
|
52e0c1c4dc3de2e0399e5391dd2c8aff56754c1c
|
refs/heads/master
| 2021-12-20T01:01:11.218779
| 2021-12-08T14:49:18
| 2021-12-08T14:49:18
| 168,214,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 71
|
r
|
bin jol data2.R
|
dat = read.csv('binnedJOL.csv')
dat = dat[ , -1] ##drop first column
|
e0764731a1679adef83ee62781d3c9b5154e50bb
|
93adc096f7104252dc6d3470cded0fdf9a48f800
|
/varray/pkg/man/update.matrix.Rd
|
1dd6941c2aaa6c5bfcdffd5a4afa4eceedf39cef
|
[
"MIT"
] |
permissive
|
tplate/fuzzy-llama
|
4ce3ff478b626963a3b4135b488e9f8e5cf1ff65
|
ea8ee46d7e669f87ab99a4476a60c8799214e3a0
|
refs/heads/master
| 2020-12-12T09:58:35.508737
| 2018-11-29T06:10:54
| 2018-11-29T06:10:54
| 17,426,337
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,328
|
rd
|
update.matrix.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/update.Matrix.R
\name{update.Matrix}
\alias{update.Matrix}
\alias{update.matrix}
\title{Add data to an object (obselete version of add.data)}
\usage{
\method{update}{Matrix}(object, data, need.dimnames = list(NULL, NULL),
keep.ordered = TRUE, ...)
\method{update}{matrix}(object, data, need.dimnames = list(NULL, NULL),
keep.ordered = TRUE, ...)
}
\arguments{
\item{object}{An object to add data to, specified by name (i.e., a character string).
The object is changed in place (i.e., the function will have side effects).}
\item{data}{New data to incorporate in the object.
Should have the same number of dimensions as the object
being updated (i.e.,
\code{length(dim(x))==length(dim(data))}). Must have
dimnames.}
\item{need.dimnames}{Dimension names that should be included in the updated object.}
\item{keep.ordered}{Logical. Specifies which dimensions should be
kept ordered. Can be a single element or a vector with
length equal to the number of dimensions of
\code{object}.}
\item{\dots}{Not used, but needed because \code{add.data()} could be a generic.}
}
\value{
The altered object \code{x}. If \code{x} was the
name of an object, then the altered object is also
updated in place.
}
\description{
Update the contents of a Matrix or matrix object, adding new dimension
indices if necessary.
}
\details{
Can be used in multiple ways, either called as a generic or calling the method directly (to create the object if it does not already exist):
\itemize{
\item
add.data(x, newdata): adds newdata to existing object x and returns the modified x (no side effects)
\item
add.data.Matrix(x, newdata): adds newdata to existing Matrix object x (no side effects)
\item
add.data('x', newdata): adds newdata to existing object named 'x' and saves the modified x (has side effects)
\item
add.data.Matrix('x', newdata): adds newdata to existing Matrix object named 'x' and saves the modified object in 'x' OR if 'x' doesn't exist, creates new Matrix object with contents newdata and saves it in 'x' (has side effects)
}
}
\note{
Not really closely related to \code{varray} objects, but supplied here
as a useful analogue to \code{\link{add.tsdata.varray}}.
}
\examples{
x <- cbind(A=c(a=1))
update.matrix('x', cbind(B=c(b=2)))
x
}
|
7cb5b573c71a5e1380d1e6d4d62ab41636a53a37
|
0bfcd342f305fde1ccca49a1c547671ce27357a4
|
/man/update_record.Rd
|
15bd4be08bbd97e6f411da422d174552c5c70687
|
[] |
no_license
|
zapier/AirtableR
|
7df507157af72b7e5ce6fef494ae54b269ad331e
|
81c3602ddf9ee9610084a48f26836494c708f847
|
refs/heads/master
| 2021-01-24T22:51:38.588332
| 2016-06-27T16:07:35
| 2016-06-27T16:07:35
| 59,320,016
| 0
| 1
| null | 2016-05-20T19:12:24
| 2016-05-20T19:12:23
| null |
UTF-8
|
R
| false
| false
| 713
|
rd
|
update_record.Rd
|
% Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/update_record.R
\name{update_record}
\alias{update_record}
\title{Update a record}
\usage{
update_record(air_options, table, record_id, fields, method = "PATCH")
}
\arguments{
\item{air_options}{A list}
\item{table}{A length-one character vector}
\item{record_id}{A length-one character vector}
\item{fields}{A list for fields}
\item{method}{A length-one character vector. The default is "PATCH"}
}
\value{
A request object
}
\description{
\code{update_record} updates a record by issuing PATCH or PUT request to a record endpoint.
Note that if you use PUT method, any fields that are not included will be cleared.
}
|
37b52c643caed5d4551134d2eadd2040bfda7b89
|
95ddb283bc126d83c683cecbf9b874521e08da98
|
/R/checkMatrices.R
|
fbc426e924a240352bace393a80289c1142241a9
|
[] |
no_license
|
aciobanusebi/s2fa
|
05770bcc5c23d8524beaa5cf71d343d0ab452b26
|
e3f0dd9770e0d3843948c6d039504531d575d752
|
refs/heads/master
| 2021-09-07T21:38:38.465399
| 2021-08-04T15:25:44
| 2021-08-04T15:25:44
| 192,394,527
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 179
|
r
|
checkMatrices.R
|
checkMatrices <- function(X_t,Z_t) {
# X_t - checked
# z_t - checked
if(nrow(X_t) != nrow(Z_t)) {
stop("Number of rows must be the same in the two matrices")
}
}
|
a4167eefb5f08bd81b4e1b1890215f9dc48a2e10
|
3f8c626d06952ff30a7c8cc1f24e5f852bbca42c
|
/workout3/binomial/man/plot.bincum.Rd
|
9d6067f867b612e07990f84473ed6f3d28db9c17
|
[] |
no_license
|
stat133-sp19/hw-stat133-garyhu9718
|
99008c4619d621a03b703cec01064c02469cadc1
|
a170019cfa92aa220852a33b56984dc5234c0d33
|
refs/heads/master
| 2020-04-28T20:30:41.272517
| 2019-05-03T21:45:58
| 2019-05-03T21:45:58
| 175,546,746
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 499
|
rd
|
plot.bincum.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bin-cumulative.R
\name{plot.bincum}
\alias{plot.bincum}
\title{binomial cumulative distribution plot}
\usage{
\method{plot}{bincum}(y, ...)
}
\arguments{
\item{...}{arguments from other functions}
\item{x}{a bincum dataframe recording the cumulative distribution information}
}
\value{
the cumulative distribution graph
}
\description{
plot the cumulative distribution graph
}
\examples{
plot(bin_cumulative(10, 0.2))
}
|
a9c5a62195659e00a21bcd96255912c26519d6eb
|
e8cd3ff9e965465c2f8bbdeee3dfafe131073507
|
/R para Data Science/9 - arrumando_estudo_de_caso.R
|
394613a08098ccc053ff6e62e43089a70d635249
|
[] |
no_license
|
victormnalves/estudos_de_ferias
|
d6fd63548dacd0e4a92a97575697d783b8debdc1
|
dfa9d13ee86ec29853c3335a7c07d44933c9809e
|
refs/heads/main
| 2023-02-24T21:20:59.895723
| 2021-02-01T18:12:37
| 2021-02-01T18:12:37
| 324,792,945
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 389
|
r
|
9 - arrumando_estudo_de_caso.R
|
library(tidyverse)
library(nycflights13)
who1 <- who %>% gather(new_sp_m014:newrel_f65, key = 'key', value = 'cases', na.rm = T)
who2 <- who1 %>% mutate(key = stringr::str_replace(key, 'newrel', 'new_rel'))
who3 <- who2 %>% separate(key,c('new','type','sexage'),sep = '_')
who4 <- who3 %>% select(-c(new, iso2, iso3))
who5 <- who4 %>% separate(sexage, c('sex', 'age'),sep = 1)
view(who5)
|
28d4efb1342a2fa1f6bc364336810924684fde92
|
99d962c3510325fa2cb37eb4b8754c6f5682ee3d
|
/taxa_64_5k_s1/run.all.R
|
b961071fb4a672cddd688dd52ab540cdd592c74c
|
[] |
no_license
|
duchene/adequacy_nonstationarity
|
6360860491d13a5f8bc172011cc2c40285bbf044
|
da8a0e8997c139871cd336001a16e902dd62fb30
|
refs/heads/master
| 2020-04-18T00:00:51.214084
| 2016-12-25T01:02:19
| 2016-12-25T01:02:19
| 66,908,097
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 304
|
r
|
run.all.R
|
dats <- grep(".phy", dir(), value = T)
for(i in 1:length(dats)){
system(paste0("mkdir ", dats[i], ".folder"))
setwd(paste0(dats[i], ".folder"))
system(paste0("cp ../", dats[i], " ../../run.mladeq.Rscript ../../run.mladeq.sh ."))
system("qsub run.mladeq.sh")
setwd("..")
}
|
47e8debd7593295013ed2ee051f8d9dde51ea67a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sbrl/examples/tictactoe.Rd.R
|
d836b130b114a795d3453e2db54d6c33aee4b52a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 190
|
r
|
tictactoe.Rd.R
|
library(sbrl)
### Name: tictactoe
### Title: SHUFFLED TIC-TAC-TOE-ENDGAME DATASET
### Aliases: tictactoe
### ** Examples
data(tictactoe)
## maybe str(tictactoe) ; plot(tictactoe) ...
|
b4921a9fab34978b2c6774bfd3a9d32b1cd4eb2e
|
c97d0887e9ab9ddd9b96194db7432916408a6498
|
/Assignments/Independent_Project/Chalifour_Independent_Project_Script.R
|
367cc1124834019121d66cbd5662385a5f167263
|
[] |
no_license
|
brchalifour/CompBioLabsAndHomework
|
37c2daf80c325392df09ec70f97c483f0ec72169
|
d88906df6f5e7f075fc3eee6d3fcfd14e40f2fd9
|
refs/heads/master
| 2020-04-18T14:32:04.784414
| 2019-05-01T23:11:21
| 2019-05-01T23:11:21
| 167,591,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,675
|
r
|
Chalifour_Independent_Project_Script.R
|
### Chalifour Independent Project ###
# Load necessary libraries/packages
# Used for "unfactor" function
install.packages("varhandle")
library(varhandle)
# Used for creating visual plots
library(ggplot2)
library(wesanderson)
library(scales)
# Used for various functions
library(dplyr)
library(plyr)
# Used for combining levels
library(tidyverse)
# Set working directory
setwd("Grad_School/CompBio/CompBioLabsAndHomework/Assignments/Independent_Project/")
# Download Dryad Data from Dahirel et al. 2014, read into R
snailmvmt <- read.table("raw_individual_synthetic_data.txt")
# Check out file, make sure it imported correctly
head(snailmvmt)
# Read in column names
colnames(snailmvmt) <- c("Species", "PDI", "Box", "ID", "is.exploring", "speed_cm_per_min", "Sinuousity", "Body_mass_g", "Shell_size_cm", "Foot_dry_mass_mg")
# Delete redundant row
newsnail <- snailmvmt[-c(1), ]
head(newsnail)
# Change appropriate variables back into numeric vectors (all but Species, Box, ID)
BodyMass <- newsnail$Body_mass_g
NewBodyMass <- unfactor(BodyMass)
PDI <- newsnail$PDI
NewPDI <- unfactor(PDI)
Speed <- newsnail$speed_cm_per_min
NewSpeed <- unfactor(Speed)
Sinuousity <- newsnail$Sinuousity
NewSinuousity <- unfactor(Sinuousity)
shellSize <- newsnail$Shell_size_cm
newShellSize <- unfactor(shellSize)
footMass <- newsnail$Foot_dry_mass_mg
newFootMass <- unfactor(footMass)
explorers <- newsnail$is.exploring
newExplorers <- unfactor(explorers)
newsnail$Foot_dry_mass_mg <- newFootMass
newsnail$Shell_size_cm <- newShellSize
newsnail$Sinuousity <- NewSinuousity
newsnail$speed_cm_per_min <- NewSpeed
newsnail$PDI <- NewPDI
newsnail$Body_mass_g <- NewBodyMass
newsnail$is.exploring <- newExplorers
# Check that these are now numeric
str(newsnail)
# Body mass and footdry mass both need to be log-10 transformed, to follow the procedures of the paper
logBodyMass <- log10(NewBodyMass)
newsnail$Body_mass_g <- logBodyMass
logFootMass <- log10(newFootMass)
newsnail$Foot_dry_mass_mg <- logFootMass
# Check that these transformations worked
head(newsnail)
head(NewBodyMass)
head(logBodyMass)
# Per Dahirel et al's Methods section:
# For each movement variable, our linear regression models contained two explanatory variables:
# habitat specialization (PDI) and the species’ mean fresh body mass (log10‐transformed).
NASpeed <- !is.na(NewSpeed)
NA_in_speed <- which(NASpeed)
NA_in_speed
# Run least squares regression comparing speed to PDI and body mass
model <- lm(NewSpeed[NA_in_speed]~PDI[NA_in_speed] + logBodyMass[NA_in_speed] + NewSinuousity[NA_in_speed], data = newsnail)
summary(model)
# Create data frame with relevant values from least squares regression
Least_sq_reg_speed_DF <- data.frame(summary(model)$coefficients)
view(Least_sq_reg_speed_DF)
# Interpretation: As snails become more specialized, their speed is significantly reduced, controlling for their log-10 body mass and path sinuosity.
# P-values significant to <0.001
# Consistent with paper? Yes
# run linear regression for log-transformed foot mass against habitat specialization (PDI)
foot_vs_PDI <- lm(Foot_dry_mass_mg ~ PDI, data = newsnail)
summary(foot_vs_PDI)
# Interpretation: As species become more highly specialized, their foot mass decreases (they have smaller feet)
# Consistent with papers conclusions? Yes, significant to p-value < 0.05.
# Plot log-transformed foot mass against PDI, compare trends to paper
ggplot(newsnail, aes(x = PDI, y = Foot_dry_mass_mg)) +
geom_point() +
stat_smooth(method = "lm", col = "#046C9A") +
labs(title="Log-Transformed Foot Size vs. Habitat Specialization", x="Habitat Specialization (PDI)", y = "Foot Dry Mass (mg)") + theme_minimal()
# Compare the proportion of explorers as a function of habitat specialization (PDI)
# Hypothesis: generalists (lower PDI) will be more likely to explore.
# Find proportion of explorers for each of 20 species
# sort data by Species and whether or not they explored to get counts
NewSpecies_explorers <- newsnail %>%
group_by(Species, is.exploring) %>%
tally()
# sort data by Species to get total number of counts per Species
NewSpecies <- newsnail %>%
group_by(Species) %>%
tally()
# Take proportion of explorers per species, divide by total counts of species to get proportion
Prop_A <- NewSpecies_explorers[2, 3]/NewSpecies[1,2]
Prop_B <- NewSpecies_explorers[4, 3]/NewSpecies[2,2]
Prop_C <- NewSpecies_explorers[6, 3]/NewSpecies[3,2]
Prop_D <- NewSpecies_explorers[8, 3]/NewSpecies[4,2]
Prop_E <- NewSpecies_explorers[10, 3]/NewSpecies[5,2]
Prop_F <- NewSpecies_explorers[12, 3]/NewSpecies[6,2]
Prop_G <- NewSpecies_explorers[14, 3]/NewSpecies[7,2]
Prop_H <- NewSpecies_explorers[16, 3]/NewSpecies[8,2]
Prop_I <- NewSpecies_explorers[18, 3]/NewSpecies[9,2]
Prop_J <- NewSpecies_explorers[20, 3]/NewSpecies[10,2]
Prop_K <- NewSpecies_explorers[22, 3]/NewSpecies[11,2]
Prop_L <- NewSpecies_explorers[24, 3]/NewSpecies[12,2]
Prop_M <- NewSpecies_explorers[26, 3]/NewSpecies[13,2]
Prop_N <- NewSpecies_explorers[28, 3]/NewSpecies[14,2]
Prop_O <- NewSpecies_explorers[30, 3]/NewSpecies[15,2]
Prop_P <- NewSpecies_explorers[32, 3]/NewSpecies[16,2]
Prop_Q <- NewSpecies_explorers[34, 3]/NewSpecies[17,2]
Prop_R <- NewSpecies_explorers[36, 3]/NewSpecies[18,2]
Prop_S <- NewSpecies_explorers[38, 3]/NewSpecies[19,2]
Prop_T <- NewSpecies_explorers[40, 3]/NewSpecies[20,2]
# Create new list
Explorer_Prop <- c(Prop_A, Prop_B, Prop_C, Prop_D, Prop_E, Prop_F, Prop_G, Prop_H, Prop_I, Prop_J, Prop_K, Prop_L, Prop_M, Prop_N, Prop_O, Prop_P, Prop_Q, Prop_R, Prop_S, Prop_T)
# Turn list into vector
Explorer_Vec <- unlist(Explorer_Prop, use.names=FALSE)
# Narrow down PDI values to per species values
NewPDItable <- newsnail %>%
group_by(Species, PDI) %>%
tally()
# Create new data frame to merge PDI and explorer proportion
Prop_DF <- data_frame("Explorers" = Explorer_Vec, "PDI" = NewPDItable$PDI)
# Run linear regression
explorers_vs_PDI <- lm(Explorer_Vec ~ NewPDItable$PDI, data = newsnail)
summary(explorers_vs_PDI)
# Interpretation: As species become more highly specialized, the proportion of members
# who explored outside familiar territory decreased
# Consistent with Dahirel et al. 2014? Yes.
# Generalists are more likely to explore, and do so significantly more than specialists (p-value > 0.05)
# Create plot to show relationship, compare with published plot. Consistent results.
ggplot(Prop_DF, aes(x = PDI, y = Explorers)) +
geom_point() +
stat_smooth(method = "lm", col = "#5BBCD6") + labs(title="Propensity for Exploration vs. Specialization",
x="Habitat Specialization (PDI)", y = "Proportion of Explorers") + theme_minimal()
# Assign species to their families, per Fig. 2 of Dahirel et al. 2014
# Sort Members by Family to elucidate effects of phylogeny
snailid2 <- fct_collapse(newsnail$Species,
Elonidae = "Elona_quimperiana",
Helicidae = c("Helix_pomatia", "Helix_ligata", "Helix_lucorum", "Eobania_vermiculata", "Theba_pisana", "Cepaea_nemoralis", "Cepaea_hortensis", "Cepaea_sylvatica", "Cornu_aspersum", "Arianta_arbustorum"),
Helicodontidae = "Helicodonta_obvoluta",
Cochlicellidae = "Cochlicella_acuta",
Hygromiidae = c("Trochoidea_elegans", "Xeropicta_derbentina", "Cernuella_neglecta", "Trochulus_hispidus", "Monacha_cantiana", "Monacha_cartusiana", "Ciliella_ciliata")
)
# Assign families to previous dataset
newsnail$Species <- snailid2
# Are members of the same family more likely to be specialists or generalists?
# Group data by family and PDI
Family_PDI <- newsnail %>%
group_by(Species, PDI) %>%
tally()
# One-way ANOVA to find statistically significant differences between family mean PDI
summary(aov(lm(PDI~Species, data = Family_PDI)))
# Conclusions: Family is not a predictor of habitat specialization (PDI).
# P-value of 0.313
# Across all five families, mean PDI was relatively the same, and not statistically significant.
# Pairwise comparisons
TukeyHSD(aov(lm(PDI~Species, data = Family_PDI)))
# Get mean PDI, standard deviation per family
# Write function (this is not my original function, definitely had the help of Google on this part)
# see http://www.sthda.com/english/wiki/ggplot2-error-bars-quick-start-guide-r-software-and-data-visualization#barplot-with-error-bars
data_summary <- function(data, varname, groupnames){
require(plyr)
summary_func <- function(x, col){
c(mean = mean(x[[col]], na.rm=TRUE),
sd = sd(x[[col]], na.rm=TRUE))
}
data_sum<-ddply(data, groupnames, .fun=summary_func,
varname)
data_sum <- rename(data_sum, c("mean" = varname))
return(data_sum)
}
# Use function to get means/sds
df2 <- data_summary(Family_PDI, varname="PDI", groupnames="Species")
head(df2)
# Plot family vs. PDI in a bar plot with error bars
ggplot(df2, aes(x = Species, y = PDI, fill = Species)) +
geom_bar(stat = "identity") +
scale_fill_manual(values=wes_palette(n=5, name="Darjeeling1")) +
geom_errorbar(aes(ymin=PDI, ymax=PDI+sd), width=.2,
position=position_dodge(.9)) +
labs(title="Snail Family vs. Specialization",
x="Snail Family", y = "Habitat Specialization (PDI)", fill = "Family") +
guides(fill=FALSE) +
annotate(geom="text", x=1.5, y=0.9, label="P-value = 0.313",
color="black")
# Is being an explorer (propensity to explore unfamiliar areas) a conserved trait within families?
# Create new table of families and explorers/non-explorers
Family_Explorers <- newsnail %>%
group_by(Species, is.exploring) %>%
tally()
# Turn list into vector
Family_Prop_Vec <- unlist(Family_Prop, use.names=FALSE)
# Create new data frame to merge PDI and explorer proportion
Family_Prop_DF <- data_frame("Explorers" = Family_Prop_Vec, "Family" = unique(Family_Explorers$Species))
# Create new table showing Family and exploration propensity
# This will allow me to run a chi-squared test
Fam_Explore_Prop_Tb <- table(newsnail$Species, newsnail$is.exploring)
Snail_sums_by_fam <- rowSums(Fam_Explore_Prop_Tb)
# Delete insignificant row from table
New_fam_explore <- Fam_Explore_Prop_Tb[-c(6), ]
# Chi-square test
chisq.test(New_fam_explore)
# Different families have significantly different proportions of explorers
# P-value of 0.00012
# The tendency to explore may be a conserved trait
# Find proportions of each family that are explorers
Heli_explorers <- New_fam_explore[6]/Snail_sums_by_fam[1]
Hygr_explorers <- New_fam_explore[7]/Snail_sums_by_fam[2]
Coch_explorers <- New_fam_explore[8]/Snail_sums_by_fam[3]
Hekicodont_explorers <- New_fam_explore[9]/Snail_sums_by_fam[4]
Helicod_explorers <- New_fam_explore[10]/Snail_sums_by_fam[5]
# Create vector of family explorer proportions
Fam_Explore_Prop_Vec <- c(Heli_explorers, Hygr_explorers, Coch_explorers, Hekicodont_explorers, Helicod_explorers)
# Create data frame to use in bar plot
Fam_Explore_DF <- data_frame("Proportion" = Fam_Explore_Prop_Vec, "Family" = unique(Family_Explorers$Species))
# Create bar plot to visually show families vs. their proportion of explorers
ggplot(Fam_Explore_DF, aes(x = Family, y = Proportion, fill = Family)) +
geom_bar(stat = "identity") +
scale_fill_manual(values=wes_palette(n=5, name="Darjeeling2")) +
labs(title="Snail Family vs. Exploration Proportion",
x="Snail Family", y = "Proportion of Explorers", fill = "Family") +
scale_y_continuous(labels=percent) + guides(fill=FALSE) +
theme_minimal() +
annotate(geom="text", x=4.5, y=0.55, label="P-value = 0.00012",
color="black")
|
04f000cd0417a2e1813ec02696ac14fbb378c906
|
45fa01559df5c59da1a00fac4af772e331b08483
|
/man/sitka.Rd
|
36daedc19366b4f8d951940ca9d3f2e49e9dd018
|
[] |
no_license
|
cran/gamair
|
4a5e8664c2a504caa4b10d1a6f7b707e7f77b58b
|
20804247f3efe7d4fd296be1cb5b6aa496ddd897
|
refs/heads/master
| 2020-06-08T19:56:29.969176
| 2019-08-23T11:40:02
| 2019-08-23T11:40:02
| 17,696,246
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,347
|
rd
|
sitka.Rd
|
\name{sitka}
\alias{sitka}
%- Also NEED an `\alias' for EACH other topic documented here.
\title{Sitka spruce growth data.}
\description{Tree growth data under enhanced ozone and control conditions.
}
\usage{
data(sitka)
}
%- maybe also `usage' for other objects documented here.
\format{
A data frame with 1027 rows and 5 columns columns:
\describe{
\item{id.num}{identity of the tree: 1...79.}
\item{order}{time order ranking within each tree.}
\item{days}{since 1st January, 1988.}
\item{log.size}{log of tree `size'.}
\item{ozone}{1 - enhanced ozone treatment; 0 - control.}
}
}
\details{ The data were analysed in Crainiceanu CM, Ruppert D, Wand MP (2005) using WinBUGS, and in Wood (2016) using auto-generated JAGS code.
}
\source{
The \code{SemiPar} package, from:
Diggle, P.J, Heagery, P., Liang, K.-Y. and Zeger, S.L. (2002) Analysis of Longitudinal Data (2nd ed.) OUP.
}
\references{
Wood SN (2016) "Just Another Gibbs Additive Modeller: Interfacing JAGS and mgcv" Journal of Statistical Software 75
Crainiceanu C.M., Ruppert D. and Wand M.P. (2005). "Bayesian Analysis for Penalized Spline Regression Using WinBUGS." Journal of Statistical Software, 14(14).
}
\examples{
require(gamair); require(lattice)
data(sitka)
xyplot(log.size~days|as.factor(ozone),data=sitka,type="l",groups=id.num)
}
\keyword{data}
|
d589a109cf5f2d3eb5b4b1b6f169824d49e2a57f
|
7af9e429a8c36ff6949ba930abcd56c153d36a7e
|
/Module 1/1/Homework1.1_EIT_DSC_IDA_Wach.R
|
937ec3f77c11e7dc0cd7538306d2fdd0912956d1
|
[] |
no_license
|
jacekwachowiak/Intelligent-Data-Analysis-with-R
|
900fb1fcdc3c3b031694746cc9bfd74ea520fa9b
|
3f67863f51827431895d63457fe45e64e20460b7
|
refs/heads/master
| 2020-04-16T13:01:54.834373
| 2019-01-14T15:59:46
| 2019-01-14T15:59:46
| 165,607,198
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,388
|
r
|
Homework1.1_EIT_DSC_IDA_Wach.R
|
library('plyr')
library('ggplot2')
library(reshape2)
library(scales)
data <- read.csv('wines-PCA.txt', sep = "\t", header = FALSE)
head(data)
names(data) <- c('fixed_acidity',
'volatile_acidity'
,'citic_acid'
,'residual_sugar'
,'chlorides'
,'free sulfur_dioxide'
,'total_sulfur_dioxide'
,'density'
,'pH'
,'sulphates'
,'alcohol'
,'quality'
,'type')
#Removing outlier
data <- data [data$density < 2,]
data$type <- as.factor(data$type)
data <- unique(data)
nrow(data)
data$quality
class(data$type)
plot1 <- ggplot(data=data, aes(x=density, y=alcohol))
mapNumberToColor <- function (x)
{
if (x == 1)
return ('white')
else
return ('red')
}
plot1 + geom_point(aes( color = sapply(type, mapNumberToColor), size = quality))+ scale_color_manual(values=c("red", "white")) +
theme(panel.background = element_rect(fill = 'black', colour = 'black')) +
ggtitle("Alcohol content depending on density, quality and type of wine")+
xlab("Density")+
ylab("Alcohol") +
labs(color = 'color')
#--------------------------------------------
#plot2
df<- (as.data.frame(cbind(data$quality,data$sulphates, data$citic_acid, data$chlorides)))
names(df) <- c('Quality', 'Sulphates', 'Citic acid', 'Chlorides')
dfmelt<-melt(df, measure.vars = 2:4)
ggplot(dfmelt, aes(x=factor(round_any(Quality,0.5)), y=value, fill=variable)) +
geom_boxplot() + scale_y_continuous(trans=log2_trans()) +
ggtitle("Sulphates, citic acid and chlorides for different qualities")+
xlab("Quality")+
ylab("Value in logaritmic scale") +
labs(color = 'color')
#----------------------------------
#plot3
table0 <- xtabs(~quality+type, data=data)
mosaicplot(table0,shade=TRUE, type="pearson",main="Contingency table of quality and type")
chisq.test(table0)
#-------------------------------------------
#plot4
mapNumberToBarColor <- function (x)
{
if (x == 1)
return ('blue')
else
return ('red')
}
ggplot(data, aes(x = residual_sugar, y=alcohol)) +
geom_col (width = 0.5, position = "identity", fill = sapply(data$type, mapNumberToBarColor), alpha = 0.15) +
scale_fill_manual(values=c("red", "white")) +
scale_y_continuous(expand = c(0,0), limits = c(0,20)) +
ggtitle("Alcohol strength based on sugar content")+
xlab("Residual sugar")+
ylab("Alcohol strength") +
geom_text(aes(label = "", colour = sapply(data$type, mapNumberToColor))) +
labs(color = 'Colour')
|
b866d1326dba8d51c115e1cac719a70b38bbd107
|
0fe47d1ac76c706cf48d5c386818c649dd9413b5
|
/dynamic DT_Albatross.R
|
5fcf49b42e9c150bf2106952680a4b0bf5931b39
|
[] |
no_license
|
tpgjs66/PredictCharging
|
e97a853422675c87340ec0575d695244d810fdd0
|
20cf6df95080d21f34ffb51e4e55603d6b49e673
|
refs/heads/master
| 2020-04-25T16:48:27.083606
| 2019-02-27T13:55:48
| 2019-02-27T13:55:48
| 172,552,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 41,504
|
r
|
dynamic DT_Albatross.R
|
library(devtools)
install_github("tpgjs66/pmmlParty")
install.packages("gtools")
install.packages("MCMCglmm")
install.packages("CHAID", repos="http://R-Forge.R-project.org")
library(party)
library(CHAID)
library(MCMCglmm)
library(gtools)
library(pmmlParty)
library(XML)
library(sp)
library(rgdal)
###### Load shape file ######
pc4sp = rgdal::readOGR("~/ActivityPriority/GIS/ppcs_single_cs.shp", layer = "ppcs_single_cs")
pc4sp$krachtstro <- as.numeric(pc4sp$krachtstro)
pc4sp$snellader_ <- as.numeric(pc4sp$snellader_)
pc4sp$krachtstro[is.na(pc4sp$krachtstro)] <- 0
pc4sp$snellader_[is.na(pc4sp$snellader_)] <- 0
# pc6sp = rgdal::readOGR("~/ActivityPriority/GIS/ppcs_single.shp", layer = "ppcs_single")
###### DATA PREPARATION ######
setwd("~/ActivityPriority/dynamicDT")
data = (read.delim("aicharging2.csv",
header=TRUE,
sep=",",
stringsAsFactor = TRUE
)
)
## Convert coordinates to numeric
data$destination.latitude <- as.numeric(as.character(data$destination.latitude))
data$destination.longitude <- as.numeric(as.character(data$destination.longitude))
## Subsetting only charging incidents
## HomeCharging
data <- data[which(data$HomeCharging != "Missing"),]
data <- droplevels(data)
## OutHomeCharging
data <- data[which(data$OutHomeCharging != "Missing"),]
data <- droplevels(data)
## Count charging stations in activity location (PC4)
coords <- cbind(data$destination.longitude,data$destination.latitude)
coords <- SpatialPoints(coords, proj4string = CRS("+proj=longlat +datum=WGS84 +no_defs"))
pc4 <- over(coords,pc4sp)
data$KrachtstroomN <- pc4$krachtstro
data$KrachtstroomN[is.na(data$KrachtstroomN)] <- 0
data$KrachtstroomN <- as.ordered(quantcut(data$KrachtstroomN,seq(0,1,by=1/6),dig.lab=8))
levels(data$KrachtstroomN) <- c(0,1,2,3,4,5)
data$SnelladerN <- pc4$snellader_
data$SnelladerN[is.na(data$SnelladerN)] <- 0
data$SnelladerN <- as.ordered(quantcut(data$SnelladerN,seq(0,1,by=1/6),dig.lab=8))
levels(data$SnelladerN) <- c(0,1,2,3,4,5)
## Convert data type into categorical variable
## Note NA should not exist in dataset.
data$HHID<-as.factor(data$HHID)
data$Urb <- as.ordered(data$Urb)
data$Day <- as.factor(data$Day)
data$pAge<-as.ordered(data$pAge)
data$Ncar<- as.ordered(data$Ncar)
data$Gend<- as.factor(data$Gend)
data$Driver<- as.factor(data$Driver)
data$wstat<- as.factor(data$wstat)
data$Tdur<-as.ordered(quantcut(data$Tdur, seq(0,1,by=1/6),dig.lab=8))
levels(data$Tdur) <- c(0,1,2,3,4,5)
data$Dist<-as.ordered(quantcut(data$Dist, seq(0,1,by=1/6),dig.lab=8))
levels(data$Dist) <- c(0,1,2,3,4,5)
# Mode
data$Mode <- as.character(data$Mode)
data$Mode[data$Mode %in% c("Lopend","Fiets")] <- as.character("0")
data$Mode[data$Mode %in% c("Auto")] <- as.character("1")
data$Mode[data$Mode %in% c("Taxi","Onbekend","Anders")] <- as.character("2")
data$Mode[data$Mode %in% c("Bus","Trein","Tram","Metro")] <- as.character("3")
data$Mode <- as.factor(data$Mode)
# Activity type
data$Act <- as.character(data$Act)
data$Act[data$Act %in% c("03 Naar huis")] <- as.character("0")
data$Act[data$Act %in% c("02 Naar werk of school") & data$ActDur < 60] <- as.character("2")
data$Act[data$Act %in% c("02 Naar werk of school")] <- as.character("1")
data$Act[data$Act %in% c("06 Ophalen of wegbrengen")] <- as.character("3")
data$Act[data$Act %in% c("04 Dagelijkse boodschappen")] <- as.character("4")
data$Act[data$Act %in% c("05 Winkelen")] <- as.character("5")
data$Act[data$Act %in% c("07 Diensten of prive zaken")] <- as.character("6")
data$Act[data$Act %in% c("08 Sociale activiteiten")] <- as.character("7")
data$Act[data$Act %in% c("09 Vrije tijd")] <- as.character("8")
data$Act[data$Act %in% c("10 Wachten")] <- as.character("9")
data$Act[data$Act %in% c("11 Andere activiteiten")] <- as.character("10")
data$Act[data$Act %in% c("01 Alleen laden")] <- as.character("11")
prob <- c()
for (i in unique(data$Act[data$Act != "12 Onbekend"])) {
prob[[i]] <- table(data$Act)[i]
}
data$Act[data$Act %in% c("12 Onbekend")] <- sample(unique(data$Act[data$Act != "12 Onbekend"]),
size = length(data$Act[data$Act == "12 Onbekend"]),
replace = TRUE,
prob = prob)
data$Act <- as.factor(data$Act)
# ModePrev
data$ModePrev <- as.character(data$ModePrev)
data$ModePrev[data$ModePrev %in% c("Lopend","Fiets")] <- as.character("0")
data$ModePrev[data$ModePrev %in% c("Auto")] <- as.character("1")
data$ModePrev[data$ModePrev %in% c("Taxi","Onbekend","Anders")] <- as.character("2")
data$ModePrev[data$ModePrev %in% c("Bus","Trein","Tram","Metro")] <- as.character("3")
data$ModePrev <- as.factor(data$ModePrev)
# ActPrev
data$ActPrev <- as.character(data$ActPrev)
data$ActPrev[data$ActPrev %in% c("03 Naar huis")] <- as.character("0")
data$ActPrev[data$ActPrev %in% c("02 Naar werk of school") & data$ActDur < 60] <- as.character("2")
data$ActPrev[data$ActPrev %in% c("02 Naar werk of school")] <- as.character("1")
data$ActPrev[data$ActPrev %in% c("06 Ophalen of wegbrengen")] <- as.character("3")
data$ActPrev[data$ActPrev %in% c("04 Dagelijkse boodschappen")] <- as.character("4")
data$ActPrev[data$ActPrev %in% c("05 Winkelen")] <- as.character("5")
data$ActPrev[data$ActPrev %in% c("07 Diensten of prive zaken")] <- as.character("6")
data$ActPrev[data$ActPrev %in% c("08 Sociale activiteiten")] <- as.character("7")
data$ActPrev[data$ActPrev %in% c("09 Vrije tijd")] <- as.character("8")
data$ActPrev[data$ActPrev %in% c("10 Wachten")] <- as.character("9")
data$ActPrev[data$ActPrev %in% c("11 Andere activiteiten")] <- as.character("10")
data$ActPrev[data$ActPrev %in% c("01 Alleen laden")] <- as.character("11")
prob <- c()
for (i in unique(data$ActPrev[data$ActPrev != "12 Onbekend"])) {
prob[[i]] <- table(data$ActPrev)[i]
}
data$ActPrev[data$ActPrev %in% c("12 Onbekend")] <- sample(unique(data$ActPrev[data$ActPrev != "12 Onbekend"]),
size = length(data$ActPrev[data$ActPrev == "12 Onbekend"]),
replace = TRUE,
prob = prob)
data$ActPrev<-as.factor(data$ActPrev)
# TTPrev
data$TTPrev<-as.ordered(quantcut(as.numeric(as.character(data$TTPrev)),seq(0,1,by=1/6),
dig.lab=8))
data$TTPrev<-addNA(data$TTPrev)
levels(data$TTPrev) <- c("FirstEpisode",0,1,2,3,4,5)
# ModeNext
data$ModeNext <- as.character(data$ModeNext)
data$ModeNext[data$ModeNext %in% c("Lopend","Fiets")] <- as.character("0")
data$ModeNext[data$ModeNext %in% c("Auto")] <- as.character("1")
data$ModeNext[data$ModeNext %in% c("Taxi","Onbekend","Anders")] <- as.character("2")
data$ModeNext[data$ModeNext %in% c("Bus","Trein","Tram","Metro")] <- as.character("3")
data$ModeNext <- as.factor(data$ModeNext)
# ActNext
data$ActNext <- as.character(data$ActNext)
data$ActNext[data$ActNext %in% c("03 Naar huis")] <- as.character("0")
data$ActNext[data$ActNext %in% c("02 Naar werk of school") & data$ActDur < 60] <- as.character("2")
data$ActNext[data$ActNext %in% c("02 Naar werk of school")] <- as.character("1")
data$ActNext[data$ActNext %in% c("06 Ophalen of wegbrengen")] <- as.character("3")
data$ActNext[data$ActNext %in% c("04 Dagelijkse boodschappen")] <- as.character("4")
data$ActNext[data$ActNext %in% c("05 Winkelen")] <- as.character("5")
data$ActNext[data$ActNext %in% c("07 Diensten of prive zaken")] <- as.character("6")
data$ActNext[data$ActNext %in% c("08 Sociale activiteiten")] <- as.character("7")
data$ActNext[data$ActNext %in% c("09 Vrije tijd")] <- as.character("8")
data$ActNext[data$ActNext %in% c("10 Wachten")] <- as.character("9")
data$ActNext[data$ActNext %in% c("11 Andere activiteiten")] <- as.character("10")
data$ActNext[data$ActNext %in% c("01 Alleen laden")] <- as.character("11")
prob <- c()
for (i in unique(data$ActNext[data$ActNext != "12 Onbekend"])) {
prob[[i]] <- table(data$ActNext)[i]
}
data$ActNext[data$ActNext %in% c("12 Onbekend")] <- sample(unique(data$ActNext[data$ActNext != "12 Onbekend"]),
size = length(data$ActNext[data$ActNext == "12 Onbekend"]),
replace = TRUE,
prob = prob)
data$ActNext<-as.factor(data$ActNext)
# TTNext
data$TTNext<-as.ordered(quantcut(as.numeric(as.character(data$TTNext)), seq(0,1,by=1/6),
dig.lab=8))
data$TTNext<-addNA(data$TTNext)
levels(data$TTNext) <- c("LastEpisode",0,1,2,3,4,5)
#Categorize continous variable using the 5 quintile values
data$BT<-as.ordered(quantcut(data$BT%%1440, seq(0,1,by=1/6),dig.lab=8))
levels(data$BT) <- c(0,1,2,3,4,5)
data$ActDur<-as.ordered(quantcut(data$ActDur, seq(0,1,by=1/6),dig.lab=8))
levels(data$ActDur) <- c(0,1,2,3,4,5)
data$Evtype<- as.factor(data$Evtype)
data$ElapsedCharging<- as.ordered(quantcut(data$ElapsedCharging, seq(0,1,by=1/6)
,dig.lab=8))
levels(data$ElapsedCharging) <- c(0,1,2,3,4,5)
data$SOC <-as.factor(data$SOC)
data$SOC[data$SOC=="-1"] <- NA
data$SOC <- droplevels(data$SOC)
data$SOC[is.na(data$SOC)] <- 2
data$Xdag<-as.ordered(data$Xdag)
data$Xndag<-as.ordered(data$Xndag)
data$Xarb<-as.ordered(data$Xarb)
data$Xpop<-as.ordered(data$Xpop)
data$Ddag<-as.ordered(data$Ddag)
data$Dndag<-as.ordered(data$Dndag)
data$Darb<-as.ordered(data$Darb)
data$Dpop<-as.ordered(data$Dpop)
data$origin.latitude<-as.numeric(as.character(data$origin.latitude))
data$origin.longitude<-as.numeric(as.character(data$origin.longitude))
data$destination.latitude<-as.numeric(as.character(data$destination.latitude))
data$destination.longitude<-as.numeric(as.character(data$destination.longitude))
data$chargingKrachtstroom_X<-
as.numeric(as.character(data$chargingKrachtstroom_X))
data$chargingKrachtstroom_Y<-
as.numeric(as.character(data$chargingKrachtstroom_Y))
data$chargingSnellader_X<-as.numeric(as.character(data$chargingSnellader_X))
data$chargingSnellader_Y<-as.numeric(as.character(data$chargingSnellader_Y))
data$chargingStopcontact_X<-as.numeric(as.character(data$chargingStopcontact_X))
data$chargingStopcontact_Y<-as.numeric(as.character(data$chargingStopcontact_Y))
# Give unique ID
data$SchedID<-do.call(paste, c(data[c("HHID", "MemID","EpID")], sep = "-"))
# Merging SemiPublicCharging with PublicCharging
data$HomeCharging <- as.factor(data$HomeCharging)
data$HomeCharging[data$HomeCharging=="SemiPublicCharging"] <- "PublicCharging"
data$HomeCharging <- droplevels(data$HomeCharging)
data$OutHomeCharging <- as.factor(data$OutHomeCharging)
data$OutHomeCharging[data$OutHomeCharging=="SemiPublicCharging"] <- "PublicCharging"
data$OutHomeCharging <- droplevels(data$OutHomeCharging)
# HomeCharging
homecharging <- data
# OutHomeCharging
outhomecharging <- data
# ## Subsetting only charging incidents
# homecharging <- data[which(data$HomeCharging != "Missing"),]
# homecharging <- droplevels(homecharging)
#
# outhomecharging <- data[which(data$OutHomeCharging != "Missing"),]
# outhomecharging <- droplevels(outhomecharging)
## Function for route information using routino
routing <- function(data, Lat2, Lon2) {
if (strsplit(Lat2,split="_")[[1]][1] != strsplit(Lon2,split="_")[[1]][1]) {
stop("Lat2 and Lon2 is differ!")
}
# Create new column name for route info
cstype <- strsplit(Lat2,split="_")[[1]][1]
dur <- paste0(cstype,"_dur")
dist <- paste0(cstype,"_dist")
# Routino routine
fileloc <- "/Users/KimSeheon/routino/quickest-all.txt"
#This is the default working directory
setwd("/Users/KimSeheon/routino/")
routeresults <- c()
for (i in 1:nrow(data)) {
print(i)
# Coordinates of charging station by type
lat2 <- data[Lat2][i,]
lon2 <- data[Lon2][i,]
# Skip the first episode
if (data$EpID[i] == 0){
routeresults[i] <- list(NULL)
next
}
# Coordinates of Activity episode
lat1 <- data$destination.latitude[i]
lon1 <- data$destination.longitude[i]
# Assign transport mode to route (Always walk)
tmode <- "foot"
if (cstype == "chargingSnellader") {
tmode <- "motorcar"
}
# Command implementation
router <- paste("router --transport=", tmode,
" --prefix=nl",
" --quickest",
" --lat1=", lat1,
" --lon1=", lon1,
" --lat2=",lat2,
" --lon2=",lon2,
# "--translations=/Users/KimSeheon/routino/routino-translations.xml",
# "--profiles=/Users/KimSeheon/routino/xml/routino-profiles.xml",
" --output-text-all",
# "--output-stdout",
" --quiet --dir=/Users/KimSeheon/routino/", sep = "")
system(router, wait = TRUE) # Send the routing command
# Read in the txt instructions to extract the network distance
routeresults[[i]] <- read.delim(fileloc, header = F, sep = "\t", skip = 6)
colnames(routeresults[[i]]) <- c('lat', 'lng', 'node', 'type',
'seg.distance', 'seg.duration', 'distance',
'duration', 'speed', 'bearing', 'highway')
}
# For leaflet visualization
lines <- c()
index <- c()
for (i in 1:nrow(data)) {
if (is.null(routeresults[[i]])) {
next
}
index <- append(index, i)
lines[i] <- (list(sp::Lines(sp::Line(routeresults[[i]][2:1]),
ID = data$SchedID[[i]])))
}
filtered.lines <- Filter(Negate(is.null), lines)
filtered.lines <- SpatialLines(filtered.lines,
proj4string = CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"))
filtered.sched <- data[index,]
routes <- SpatialLinesDataFrame(sl = filtered.lines, data = filtered.sched, match.ID = FALSE)
data[dur] <- NA
data[dist] <- NA
# Collect info from routino results
for (i in 1:nrow(data[index,])) {
if (is.null(routeresults[[i]])) {
next
}
data[,dur][i] <- routeresults[[i]][nrow(routeresults[[i]]),]$duration
data[,dist][i] <- routeresults[[i]][nrow(routeresults[[i]]),]$distance
}
return(list(data,routes))
}
# Execute the routing function (homecharging)
homecharging.Krachtstroom <- routing(homecharging,
Lat2="chargingKrachtstroom_Y",
Lon2="chargingKrachtstroom_X")
homecharging$chargingKrachtstroom_dist <- homecharging.Krachtstroom[[1]]$chargingKrachtstroom_dist
homecharging.Snellader <- routing(homecharging,
Lat2="chargingSnellader_Y",
Lon2="chargingSnellader_X")
homecharging$chargingSnellader_dist <- homecharging.Snellader[[1]]$chargingSnellader_dist
homecharging.Stopcontact <- routing(homecharging,
Lat2="chargingStopcontact_Y",
Lon2="chargingStopcontact_X")
homecharging$chargingStopcontact_dist <- homecharging.Stopcontact[[1]]$chargingStopcontact_dist
# Convert route info to categorical vars.
homecharging$chargingKrachtstroom_dur <- as.ordered(quantcut(homecharging$chargingKrachtstroom_dur,seq(0,1,by=1/6),dig.lab=8))
levels(homecharging$chargingKrachtstroom_dur) <- c(0,1,2,3,4,5)
homecharging$chargingKrachtstroom_dist <- as.ordered(quantcut(homecharging$chargingKrachtstroom_dist,seq(0,1,by=1/6),dig.lab=8))
levels(homecharging$chargingKrachtstroom_dist) <- c(0,1,2,3,4,5)
homecharging$chargingSnellader_dur <- as.ordered(quantcut(homecharging$chargingSnellader_dur,seq(0,1,by=1/6),dig.lab=8))
levels(homecharging$chargingSnellader_dur) <- c(0,1,2,3,4,5)
homecharging$chargingSnellader_dist <- as.ordered(quantcut(homecharging$chargingSnellader_dist,seq(0,1,by=1/6),dig.lab=8))
levels(homecharging$chargingSnellader_dist) <- c(0,1,2,3,4,5)
homecharging$chargingStopcontact_dur <- as.ordered(quantcut(homecharging$chargingStopcontact_dur,seq(0,1,by=1/6),dig.lab=8))
levels(homecharging$chargingStopcontact_dur) <- c(0,1,2,3,4,5)
homecharging$chargingStopcontact_dist <- as.ordered(quantcut(homecharging$chargingStopcontact_dist,seq(0,1,by=1/6),dig.lab=8))
levels(homecharging$chargingStopcontact_dist) <- c(0,1,2,3,4,5)
# Write out homecharging data
write.csv(homecharging,"~/ActivityPriority/dynamicDT/homecharging.csv")
# Execute the routing function (outhomecharging)
outhomecharging.Krachtstroom <- routing(outhomecharging,
Lat2="chargingKrachtstroom_Y",
Lon2="chargingKrachtstroom_X")
outhomecharging$chargingKrachtstroom_dist <- outhomecharging.Krachtstroom[[1]]$chargingKrachtstroom_dist
outhomecharging.Snellader <- routing(outhomecharging,
Lat2="chargingSnellader_Y",
Lon2="chargingSnellader_X")
outhomecharging$chargingSnellader_dist <- outhomecharging.Snellader[[1]]$chargingSnellader_dist
outhomecharging.Stopcontact <- routing(outhomecharging,
Lat2="chargingStopcontact_Y",
Lon2="chargingStopcontact_X")
outhomecharging$chargingStopcontact_dist <- outhomecharging.Stopcontact[[1]]$chargingStopcontact_dist
# Convert route info to categorical vars.
outhomecharging$chargingKrachtstroom_dur <- as.ordered(quantcut(outhomecharging$chargingKrachtstroom_dur,seq(0,1,by=1/6),dig.lab=8))
levels(outhomecharging$chargingKrachtstroom_dur) <- c(0,1,2,3,4,5)
outhomecharging$chargingKrachtstroom_dist <- as.ordered(quantcut(outhomecharging$chargingKrachtstroom_dist,seq(0,1,by=1/6),dig.lab=8))
levels(outhomecharging$chargingKrachtstroom_dist) <- c(0,1,2,3,4,5)
outhomecharging$chargingSnellader_dur <- as.ordered(quantcut(outhomecharging$chargingSnellader_dur,seq(0,1,by=1/6),dig.lab=8))
levels(outhomecharging$chargingSnellader_dur) <- c(0,1,2,3,4,5)
outhomecharging$chargingSnellader_dist <- as.ordered(quantcut(outhomecharging$chargingSnellader_dist,seq(0,1,by=1/6),dig.lab=8))
levels(outhomecharging$chargingSnellader_dist) <- c(0,1,2,3,4,5)
outhomecharging$chargingStopcontact_dur <- as.ordered(quantcut(outhomecharging$chargingStopcontact_dur,seq(0,1,by=1/6),dig.lab=8))
levels(outhomecharging$chargingStopcontact_dur) <- c(0,1,2,3,4,5)
outhomecharging$chargingStopcontact_dist <- as.ordered(quantcut(outhomecharging$chargingStopcontact_dist,seq(0,1,by=1/6),dig.lab=8))
levels(outhomecharging$chargingStopcontact_dist) <- c(0,1,2,3,4,5)
# Write out outhomecharging data
write.csv(outhomecharging,"~/ActivityPriority/dynamicDT/outhomecharging.csv")
# Leaflet visualization
m <- leaflet()
m <- addTiles(map=m)
m <- addPolylines(map=m,data=outhomecharging.Krachtstroom[[2]])
m
### CHAID formula ###
formula.homecharging <- (HomeCharging~Urb+Day+pAge+Ncar+Gend+Driver+wstat+
Tdur+Mode+Act+
ModePrev+ActPrev+TTPrev+ModeNext+ActNext+TTNext+BT+
ActDur+Evtype+ElapsedCharging+SOC+Xdag+Xndag+Xarb+
Xpop+Ddag+Dndag+Darb+Dpop+chargingKrachtstroom_dist+
chargingSnellader_dist+KrachtstroomN+SnelladerN)
formula.outhomecharging <- (OutHomeCharging~Urb+Day+pAge+Ncar+Gend+Driver+wstat+
Tdur+Mode+Act+
ModePrev+ActPrev+TTPrev+ModeNext+ActNext+TTNext+
BT+ActDur+Evtype+ElapsedCharging+SOC+Xdag+
Xndag+Xarb+Xpop+Ddag+Dndag+Darb+Dpop+
chargingKrachtstroom_dist+
chargingSnellader_dist+KrachtstroomN+SnelladerN)
################################################################################
### Define MEtree function ###
################################################################################
MEtree5<-function(data,formula,random) {
ErrorTolerance=10
MaxIterations=5
#parse formula
Predictors<-paste(attr(terms(formula),"term.labels"),collapse="+")
TargetName<-formula[[2]]
Target<-data[,toString(TargetName)]
#set up variables for loop
ContinueCondition<-TRUE
iterations<-0
#set up the initial target
OriginalTarget<-(Target)
oldDIC<- Inf
# Make a new data frame to include all the new variables
newdata <- data
newdata[,"p.1"]<-0
newdata[,"p.2"]<-0
newdata[,"p.3"]<-0
newdata[,"p.4"]<-0
newdata[,"p.5"]<-0
m.list<-list()
tree.list<-list()
while(ContinueCondition){
# Count iterations
iterations <- iterations+1
print(paste("############### Main Iteration ",iterations,"###############"))
# Target response will be updated from the previous result.
if (iterations<2){
newdata[,"OriginalTarget"] <- as.factor(OriginalTarget)
}else {
newdata[,"OriginalTarget"] <- as.factor(MCMCTarget)
}
# Build CHAID tree
ctrl <- chaid_control(alpha2=0.05,alpha4=0.05,
minsplit = 2*floor(nrow(data)/100),
minbucket=floor(nrow(data)/100), minprob=1)
tree <- chaid(formula(paste(c("OriginalTarget", Predictors),collapse = "~"))
,data = newdata, control = ctrl)
tree.list[[iterations]]<-tree
# Get terminal node
newdata[,"nodeInd"] <- 0
newdata["nodeInd"] <-as.factor(predict(tree,newdata=newdata,type="node"))
# Get variables (alternative-specific) that identify the node for
# each observation
newdata["p.1"]<-list(predict(tree,newdata=newdata,type="prob")[,1])
newdata["p.2"]<-list(predict(tree,newdata=newdata,type="prob")[,2])
newdata["p.3"]<-list(predict(tree,newdata=newdata,type="prob")[,3])
newdata["p.4"]<-list(predict(tree,newdata=newdata,type="prob")[,4])
newdata["p.5"]<-list(predict(tree,newdata=newdata,type="prob")[,5])
CHAIDTarget<-c()
# Update adjusted target based on CHAID predicted probs.
repeat{
for(k in 1:length(OriginalTarget)){
t<-levels(OriginalTarget)
# Draw a decision based on probs
CHAIDTarget[k]<-sample(t,1,replace=FALSE,
prob=newdata[k,c("p.1","p.2","p.3","p.4","p.5")])
}
if ((length(table(CHAIDTarget))==5)){break}
}
newdata[,"CHAIDTarget"] <- as.factor(CHAIDTarget)
# Fit MCMCglmm
k <- length(levels(Target))
I <- diag(k-1)
J <- matrix(rep(1, (k-1)^2), c(k-1, k-1))
prior <- list(
G = list(G1 = list(V = diag(k-1), n = k-1)),
R = list(fix=1,V= (1/k) * (I + J), n = k-1))
m <- MCMCglmm(fixed = OriginalTarget ~ -1 + trait +
+trait:(nodeInd+CHAIDTarget),
random = ~ idh(trait):HHID,# ~ idh(trait-1+nodeInd):HHID ??
rcov = ~idh(trait):units,
prior = prior, # Add fix=1 if you want fix R-structure
burnin =1000,
nitt = 21000,
thin = 10,
# This option saves the posterior distribution of
# random effects in the Solution mcmc object:
pr = TRUE,
#pl = TRUE,
family = "categorical",
#saveX = TRUE,
#saveZ = TRUE,
#saveXL = TRUE,
data = newdata,
verbose = T
#slice = T
#singular.ok = T
)
m.list[[iterations]]<-m
#p <- predict(m,type="terms",interval="prediction")[,1]
p <- (predict(m,type="terms",interval="none",posterior="all"))
#p <- (predict(m,type="terms",interval="none",posterior="distribution"))
#p <- (predict(m,type="terms",interval="none",posterior="mean"))
#p <- (predict(m,type="terms",interval="none",posterior="mode"))
# Predicted probability with marginalizing the random effect
#p <- predict(m,type="terms", interval="none",posterior="mean",marginal=NULL)
#p <- predict(m,type="terms", interval="none",
# posterior="mean",marginal=m$Random$formula)
pred<-c()
pred$b<-p[1:nrow(newdata)]
pred$c<-p[(nrow(newdata)+1):(2*nrow(newdata))]
pred$d<-p[(2*nrow(newdata)+1):(3*nrow(newdata))]
pred$e<-p[(3*nrow(newdata)+1):(4*nrow(newdata))]
pred<-as.data.frame(pred)
pred$pa<-1/(1+exp(pred$b)+exp(pred$c)+exp(pred$d)+exp(pred$e))
pred$pb<-exp(pred$b)/(1+exp(pred$b)+exp(pred$c)+exp(pred$d)+exp(pred$e))
pred$pc<-exp(pred$c)/(1+exp(pred$b)+exp(pred$c)+exp(pred$d)+exp(pred$e))
pred$pd<-exp(pred$d)/(1+exp(pred$b)+exp(pred$c)+exp(pred$d)+exp(pred$e))
pred$pe<-exp(pred$e)/(1+exp(pred$b)+exp(pred$c)+exp(pred$d)+exp(pred$e))
pred<-pred[5:9]
# Get the DIC to check on convergence
if(!(is.null(m))){
newDIC <- m$DIC
ContinueCondition <- (abs(oldDIC-newDIC)>ErrorTolerance &
iterations < MaxIterations)
oldDIC <- newDIC
print(paste("###### DIC : ", m$DIC, " ######"))
# Update prob.
newdata["p.1"]<-pred[,1]
newdata["p.2"]<-pred[,2]
newdata["p.3"]<-pred[,3]
newdata["p.4"]<-pred[,4]
newdata["p.5"]<-pred[,5]
# # Update adjusted target based on logit prob.
# for(k in 1:length(AdjustedTarget)){
# AdjustedTarget[k]<-sum(cumsum(mlogitfit$probabilities[k,])<runif(1))+1
#
# }
# newdata[,"AdjustedTarget"] <- AdjustedTarget
# Update adjusted target based on MCMCglmm predicted probs.
MCMCTarget<-c()
repeat{
for(k in 1:length(OriginalTarget)){
t<-levels(OriginalTarget)
MCMCTarget[k]<-sample(t,1,replace=FALSE,
prob=newdata[k,c("p.1","p.2","p.3","p.4","p.5")])
}
if ((length(table(MCMCTarget))==5)){break}
}
newdata[,"MCMCTarget"] <- as.factor(MCMCTarget)
}
else{ ContinueCondition<-FALSE }
}
#return final model fits and convergence info.
return(list(
CHAID.tree=tree.list,
MCMCglmm.fit=m.list,
Conv.info=newDIC-oldDIC,
n.iter=iterations
))
}
MEtree4<-function(data,formula,random) {
ErrorTolerance=50
MaxIterations=100
#parse formula
Predictors<-paste(attr(terms(formula),"term.labels"),collapse="+")
TargetName<-formula[[2]]
Target<-data[,toString(TargetName)]
#set up variables for loop
ContinueCondition<-TRUE
iterations<-0
#set up the initial target
OriginalTarget<-(Target)
oldDIC<- Inf
# Make a new data frame to include all the new variables
newdata <- data
newdata[,"p.1"]<-0
newdata[,"p.2"]<-0
newdata[,"p.3"]<-0
newdata[,"p.4"]<-0
m.list<-list()
tree.list<-list()
while(ContinueCondition){
# Count iterations
iterations <- iterations+1
print(paste("############### Main Iteration ",iterations,"###############"))
# Target response will be updated from the previous result.
if (iterations<2){
newdata[,"OriginalTarget"] <- as.factor(OriginalTarget)
}else {
newdata[,"OriginalTarget"] <- as.factor(MCMCTarget)
}
# Build CHAID tree
ctrl <- chaid_control(alpha2=0.05,alpha4=0.05,
minsplit = 2*floor(nrow(data)/100),
minbucket=floor(nrow(data)/100), minprob=1)
tree <- chaid(formula(paste(c("OriginalTarget", Predictors),collapse = "~"))
,data = newdata, control = ctrl)
tree.list[[iterations]]<-tree
# Get terminal node
newdata[,"nodeInd"] <- 0
newdata["nodeInd"] <-as.factor(predict(tree,newdata=newdata,type="node"))
# Get variables (alternative-specific) that identify the node for
# each observation
newdata["p.1"]<-list(predict(tree,newdata=newdata,type="prob")[,1])
newdata["p.2"]<-list(predict(tree,newdata=newdata,type="prob")[,2])
newdata["p.3"]<-list(predict(tree,newdata=newdata,type="prob")[,3])
newdata["p.4"]<-list(predict(tree,newdata=newdata,type="prob")[,4])
CHAIDTarget<-c()
# Update adjusted target based on CHAID predicted probs.
repeat{
for(k in 1:length(OriginalTarget)){
t<-levels(OriginalTarget)
# Draw a decision based on probs
CHAIDTarget[k]<-sample(t,1,replace=FALSE,
prob=newdata[k,c("p.1","p.2","p.3","p.4")])
}
if ((length(table(CHAIDTarget))==4)){break}
}
newdata[,"CHAIDTarget"] <- as.factor(CHAIDTarget)
# Fit MCMCglmm
k <- length(levels(Target))
I <- diag(k-1)
J <- matrix(rep(1, (k-1)^2), c(k-1, k-1))
prior <- list(
G = list(G1 = list(V = diag(k-1), n = k-1)),
R = list(fix=1,V= (1/k) * (I + J), n = k-1))
m <- MCMCglmm(fixed = OriginalTarget ~ -1 + trait +
+trait:(nodeInd+CHAIDTarget),
random = ~ idh(trait):HHID,# ~ idh(trait-1+nodeInd):HHID ??
rcov = ~idh(trait):units,
prior = prior, # Add fix=1 if you want fix R-structure
burnin =1000,
nitt = 21000,
thin = 10,
# This option saves the posterior distribution of
# random effects in the Solution mcmc object:
pr = TRUE,
#pl = TRUE,
family = "categorical",
#saveX = TRUE,
#saveZ = TRUE,
#saveXL = TRUE,
data = newdata,
verbose = T
#slice = T
#singular.ok = T
)
m.list[[iterations]]<-m
#p <- predict(m,type="terms",interval="prediction")[,1]
p <- (predict(m,type="terms",interval="none",posterior="all"))
#p <- (predict(m,type="terms",interval="none",posterior="distribution"))
#p <- (predict(m,type="terms",interval="none",posterior="mean"))
#p <- (predict(m,type="terms",interval="none",posterior="mode"))
# Predicted probability with marginalizing the random effect
#p <- predict(m,type="terms", interval="none",posterior="mean",marginal=NULL)
#p <- predict(m,type="terms", interval="none",
# posterior="mean",marginal=m$Random$formula)
pred<-c()
pred$b<-p[1:nrow(newdata)]
pred$c<-p[(nrow(newdata)+1):(2*nrow(newdata))]
pred$d<-p[(2*nrow(newdata)+1):(3*nrow(newdata))]
pred<-as.data.frame(pred)
pred$pa<-1/(1+exp(pred$b)+exp(pred$c)+exp(pred$d))
pred$pb<-exp(pred$b)/(1+exp(pred$b)+exp(pred$c)+exp(pred$d))
pred$pc<-exp(pred$c)/(1+exp(pred$b)+exp(pred$c)+exp(pred$d))
pred$pd<-exp(pred$d)/(1+exp(pred$b)+exp(pred$c)+exp(pred$d))
pred<-pred[4:7]
# Get the DIC to check on convergence
if(!(is.null(m))){
newDIC <- m$DIC
ContinueCondition <- (abs(oldDIC-newDIC)>ErrorTolerance &
iterations < MaxIterations)
oldDIC <- newDIC
print(paste("###### DIC : ", m$DIC, " ######"))
# Update prob.
newdata["p.1"]<-pred[,1]
newdata["p.2"]<-pred[,2]
newdata["p.3"]<-pred[,3]
newdata["p.4"]<-pred[,4]
# Update adjusted target based on MCMCglmm predicted probs.
MCMCTarget<-c()
repeat{
for(k in 1:length(OriginalTarget)){
t<-levels(OriginalTarget)
MCMCTarget[k]<-sample(t,1,replace=FALSE,
prob=newdata[k,c("p.1","p.2","p.3","p.4")])
}
if ((length(table(MCMCTarget))==4)){break}
}
newdata[,"MCMCTarget"] <- as.factor(MCMCTarget)
}
else{ ContinueCondition<-FALSE }
}
#return final model fits and convergence info.
return(list(
CHAID.tree=tree.list,
MCMCglmm.fit=m.list,
Conv.info=newDIC-oldDIC,
n.iter=iterations
))
}
MEtree2<-function(data,formula,random) {
ErrorTolerance=50
MaxIterations=100
#parse formula
Predictors<-paste(attr(terms(formula),"term.labels"),collapse="+")
TargetName<-formula[[2]]
Target<-data[,toString(TargetName)]
#set up variables for loop
ContinueCondition<-TRUE
iterations<-0
#set up the initial target
OriginalTarget<-(Target)
oldDIC<- Inf
# Make a new data frame to include all the new variables
newdata <- data
newdata[,"p.1"]<-0
newdata[,"p.2"]<-0
m.list<-list()
tree.list<-list()
while(ContinueCondition){
# Count iterations
iterations <- iterations+1
print(paste("############### Main Iteration ",iterations,"###############"))
# Target response will be updated from the previous result.
if (iterations<2){
newdata[,"OriginalTarget"] <- as.factor(OriginalTarget)
}else {
newdata[,"OriginalTarget"] <- as.factor(MCMCTarget)
}
# Build CHAID tree
ctrl <- chaid_control(alpha2=0.05,alpha4=0.05,
minsplit = 2*floor(nrow(data)/200),
minbucket=floor(nrow(data)/200), minprob=1)
tree <- chaid(formula(paste(c("OriginalTarget", Predictors),collapse = "~"))
,data = newdata, control = ctrl)
tree.list[[iterations]]<-tree
# Get terminal node
newdata[,"nodeInd"] <- 0
newdata["nodeInd"] <-as.factor(predict(tree,newdata=newdata,type="node"))
# Get variables (alternative-specific) that identify the node for
# each observation
newdata["p.1"]<-list(predict(tree,newdata=newdata,type="prob")[,1])
newdata["p.2"]<-list(predict(tree,newdata=newdata,type="prob")[,2])
CHAIDTarget<-c()
# Update adjusted target based on CHAID predicted probs.
repeat{
for(k in 1:length(OriginalTarget)){
t<-levels(OriginalTarget)
# Draw a decision based on probs
CHAIDTarget[k]<-sample(t,1,replace=FALSE,
prob=newdata[k,c("p.1","p.2")])
}
if ((length(table(CHAIDTarget))==2)){break}
}
newdata[,"CHAIDTarget"] <- as.factor(CHAIDTarget)
# Fit MCMCglmm
k <- length(levels(Target))
I <- diag(k-1)
J <- matrix(rep(1, (k-1)^2), c(k-1, k-1))
prior <- list(
G = list(G1 = list(V = diag(k-1), n = k-1)),
R = list(fix=1,V= (1/k) * (I + J), n = k-1))
m <- MCMCglmm(fixed = OriginalTarget ~ (nodeInd+CHAIDTarget),
random = ~ HHID,# ~ idh(trait-1+nodeInd):HHID ??
rcov = ~ units,
prior = prior, # Add fix=1 if you want fix R-structure
burnin =1000,
nitt = 21000,
thin = 10,
# This option saves the posterior distribution of
# random effects in the Solution mcmc object:
pr = TRUE,
#pl = TRUE,
family = "categorical",
#saveX = TRUE,
#saveZ = TRUE,
#saveXL = TRUE,
data = newdata,
verbose = T
#slice = T
#singular.ok = T
)
m.list[[iterations]]<-m
#p <- predict(m,type="terms",interval="prediction")[,1]
p <- (predict(m,type="terms",interval="none",posterior="all"))
#p <- (predict(m,type="terms",interval="none",posterior="distribution"))
#p <- (predict(m,type="terms",interval="none",posterior="mean"))
#p <- (predict(m,type="terms",interval="none",posterior="mode"))
# Predicted probability with marginalizing the random effect
#p <- predict(m,type="terms", interval="none",posterior="mean",marginal=NULL)
#p <- predict(m,type="terms", interval="none",
# posterior="mean",marginal=m$Random$formula)
pred<-c()
pred$b<-p[1:nrow(newdata)]
pred<-as.data.frame(pred)
pred$pa<-1/(1+exp(pred$b))
pred$pb<-exp(pred$b)/(1+exp(pred$b))
pred<-pred[2:3]
# Get the DIC to check on convergence
if(!(is.null(m))){
newDIC <- m$DIC
ContinueCondition <- (abs(oldDIC-newDIC)>ErrorTolerance &
iterations < MaxIterations)
oldDIC <- newDIC
print(paste("###### DIC : ", m$DIC, " ######"))
# Update prob.
newdata["p.1"]<-pred[,1]
newdata["p.2"]<-pred[,2]
# Update adjusted target based on MCMCglmm predicted probs.
MCMCTarget<-c()
repeat{
for(k in 1:length(OriginalTarget)){
t<-levels(OriginalTarget)
MCMCTarget[k]<-sample(t,1,replace=FALSE,
prob=newdata[k,c("p.1","p.2")])
}
if ((length(table(MCMCTarget))==2)){break}
}
newdata[,"MCMCTarget"] <- as.factor(MCMCTarget)
}
else{ ContinueCondition<-FALSE }
}
#return final model fits and convergence info.
return(list(
CHAID.tree=tree.list,
MCMCglmm.fit=m.list,
Conv.info=newDIC-oldDIC,
n.iter=iterations
))
}
library(dplyr)
outhomecharging <- mutate(outhomecharging,OutHomeChargingYN = ifelse(OutHomeCharging %in% c("NoCharging"),"NoCharging","Charging"))
outhomecharging$OutHomeChargingYN <- as.factor(outhomecharging$OutHomeChargingYN)
formula.outhomechargingYN <- (OutHomeChargingYN~Urb+Day+pAge+Ncar+Gend+Driver+wstat+
Tdur+Mode+Act+
ModePrev+ActPrev+TTPrev+ModeNext+ActNext+TTNext+
BT+ActDur+Evtype+SOC+Xdag+
Xndag+Xarb+Xpop+Ddag+Dndag+Darb+Dpop+
chargingKrachtstroom_dist+
KrachtstroomN)
## Call the function
MEtree.homecharging.result<-MEtree4(homecharging,formula.homecharging)
MEtree.outhomecharging.result<-MEtree4(outhomecharging,formula.outhomecharging)
MEtree.outhomechargingYN.result<-MEtree2(outhomecharging,formula.outhomechargingYN)
setwd("~/ActivityPriority/dynamicDT")
## Save pmmlParty
library(pmmlParty)
library(XML)
aicharging1 <- pmmlparty(MEtree.homecharging.result$CHAID.tree[[2]],
formula.homecharging,homecharging)
aicharging2 <- pmmlparty(MEtree.outhomecharging.result$CHAID.tree[[2]],
formula.outhomecharging,outhomecharging)
saveXML(aicharging1, "aicharging1_R.xml")
saveXML(aicharging2, "aicharging2_R.xml")
library(devtools)
install_github("JWiley/postMCMCglmm")
library(postMCMCglmm)
##### Predict with random effect for out-of-sample #####
predict.MEtree <- function(tree , MCMCglmm, newdata,formula, id=NULL,
EstimateRandomEffects=TRUE,...){
treePrediction <- predict.party(tree,newdata)
# If we aren't estimating random effects, we just use the tree for prediction.
if(!EstimateRandomEffects){
return(treePrediction)
}
# Get the group identifiers if necessary
if(is.null(id)){
id <- newdata[,as.character((MCMCglmm$Random$formula[[2]][[3]]))]
}
# Error-checking: the number of observations in the dataset must match
# the sum of NumObs
if(length(newdata[,id]) != dim(newdata)[1]){
stop("number of observations in newdata does not match the length
of the group identifiers")
}
### Use the formula to get the target name
TargetName <- formula[[2]]
# Remove the name of the data frame if necessary
if(length(TargetName)>1) TargetName <-TargetName[3]
ActualTarget <- newdata[,toString(TargetName)]
completePrediction <- treePrediction
# Get the identities of the groups in the data
# This will be slow - does LME have a faster way?
uniqueID <- unique(id)
# Get the random effects from the estimated MCMCglmm, in case there is overlap
estRE <- ranef(object, use = ("mean"))
for(i in 1:length(uniqueID)){
# Identify the new group in the data
nextID <- uniqueID[i]
thisGroup <- id==nextID
# If this group was in the original estimation, apply its random effect
filter<-grepl(toString(uniqueID[i]),rownames(estRE))
estEffect <- estRE[filter,]
if(is.na(estEffect)){
# Check for non-missing target
nonMissing <- !is.na(ActualTarget[thisGroup])
numAvailable <- sum(nonMissing)
# If all the targets are missing, accept the
# tree prediction; otherwise, estimate
if(numAvailable>0) {
R <- object$ErrorVariance * diag(numAvailable)
D <- object$BetweenMatrix
Z <- matrix(data=1,ncol=1, nrow=numAvailable)
W <- solve(R + Z %*% D %*% t(Z))
effect <- D %*% t(Z) %*% W %*%
subset(ActualTarget[thisGroup] - treePrediction[thisGroup],
subset=nonMissing)
completePrediction[thisGroup] <- treePrediction[thisGroup]+effect
}
} else {
completePrediction[thisGroup] <- treePrediction[thisGroup]+estEffect
}
}
return(completePrediction)
}
##### Create training and test data set #####
set.seed(20)
## For observation-level validation
#Randomly shuffle the data
yourData<-data[sample(nrow(data)),]
#Create 4 equally size folds
folds <- cut(seq(4,nrow(yourData)),breaks=4,labels=FALSE)
#Segment the data by fold using the which() function
testIndexes <- which(folds==1,arr.ind=TRUE)
testData.obs <- yourData[testIndexes, ] ## 25% test set
trainData.obs <- yourData[-testIndexes, ] ## 75% training set
## Call the function
MEtree.result.obs.Model7<-MEtree(trainData.obs,formula)
|
c68abbc04c2235cf010e1b3be9c29d9eb2c4153c
|
eb5a2c7de5610fa94238d8120912bafc214eb0da
|
/cachematrix.R
|
d2ab83449bbb9c72db192d30213a9372f0842050
|
[] |
no_license
|
wvkehoe/ProgrammingAssignment2
|
236500a36e4f4503f7bf77b83a281cfe00044227
|
d8ad0918e4dd041ba8246f243fe402db5fcc8da8
|
refs/heads/master
| 2020-12-30T18:03:40.896633
| 2014-09-15T18:40:01
| 2014-09-15T18:40:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,845
|
r
|
cachematrix.R
|
## Take advantage of R's lexical scoping rules and closure support
## to build a 'memo' function for remembering the result of a potentially
## computaionally expensive matrix inverse operation.
##
## This is accomplished below with a pair of functions (see detailed doc below for each function):
## - makeCacheMatrix
## - cacheSolve
## Function: makeCacheMatrix(x)
## Create a special matrix object that "wraps" a square matrix along with it's inverse matrix.
## The function takes a single parameter 'x' which must be a square matrix.
## The function returns a list which contains the named members 'get' and 'set' which are
## closures over the encapsulated matrix for getting and setting its value. The returned list also
## contains the named members 'getinverse' and 'setinverse' which are closures over the the inverse of the matrix.
makeCacheMatrix <- function(x = matrix()) {
ensure_square <- function(z) {
if(nrow(z) != ncol(z)) {
stop(paste("The passed matrix is not square (nrow(x) != ncol(x) (the passed matrix is ",
nrow(z), "x", ncol(z), ")"))
}
}
## make sure the matrix is square
ensure_square(x)
## Initialize inverted matrix cache to NULL
inv <- NULL
## settor function for setting matrix
set <- function(y) {
ensure_square(y) ## again make sue the matrix is square
x <<- y
m <<- NULL
}
## gettor function for accessing matrix
get <- function() x
## function for setting inverse
setinverse <- function(inverse) inv <<- inverse
## function for getting inverse
getinverse <- function() inv
## return a list that contains all the methods
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## Function: cacheSolve(sm)
## This function takes a single parameter 'sp' which must be a special matrix object that was
## created using the 'makeCacheMatrix' function (above) abd returns the inverse of that spcial matrix.
## To improve performance, the function tests whether the inverse of the special matrix has already
## been computed and cached and returns that inverse if so otherwise the inverse is computed using the
## 'R' solve function, the inverse value is cached and then reurned.
cacheSolve <- function(x) {
## First make sure that the passed in object is a special matrix
if(!is.list(x) || !is.function(x$getinverse) || !is.function(x$setinverse) || !is.function(x$get)) {
stop("Passed parameter x is not a special matrix.")
}
## see if the special matrix object is already cached and return it if so
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached inverse")
return(inv)
}
## Inverse is not cached so get the matrix, compute the inverse, cache it and return it
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
f244b0a5fddc33d791fcd9bfbf71d8f8ab1c584a
|
f586cc3599f8685ffed9f10befa8bef0dd761cd4
|
/man/vconf.rd
|
6defb7417a85145c6b21730842a6477abb272a29
|
[] |
no_license
|
cran/mrt
|
87bd3d0b56c73c95146ab1c1d8703f8a303e3c89
|
b2ad5f7db7432499d81f827812b2cfbf068132c1
|
refs/heads/master
| 2020-04-07T15:45:38.872572
| 2009-08-17T00:00:00
| 2009-08-17T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 418
|
rd
|
vconf.rd
|
\name{vconf}
\alias{vconf}
\title{Non-bootstrap Cramer's V intervals using non-central chi-square}
\description{Follows Smithson producing the confidence intervals for
Cramers V using noncentral chi-square. A boot function for Cramers V
will arrive at some point}
\usage{vconf(ctab, clevel=.95)}
\arguments{
\item{ctab}{the rxc contingency table}
\item{clevel}{Confidence level}
}
\references{Smithson}
|
4c0f6dc7c795c4dd2a715f498e399975e181194b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Jmisc/examples/demean.Rd.R
|
83317f697b51e8cecf02be2e122727726a0044ce
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 159
|
r
|
demean.Rd.R
|
library(Jmisc)
### Name: demean
### Title: Demean a vector or a matrix (by column)
### Aliases: demean
### ** Examples
x<-matrix(1:20,ncol=2)
demean(x)
|
3bb1b023a285c3bed52b2623572241b70371ad6f
|
c486604d9335890f984a425eb9bab70aabfd8c66
|
/Rfiles/rshiny.R
|
26c8d0e8339464d0e08e3cadd423c1921da59f73
|
[] |
no_license
|
Colin303/An-Analysis-of-the-Dublin-rental-market
|
4c0a0f5365fcd5c2033d02b9cc4d8699e199dd76
|
0ee878cd253010b2736630e2a4c95779007c7104
|
refs/heads/master
| 2021-04-14T02:19:08.251063
| 2020-03-25T19:35:16
| 2020-03-25T19:35:16
| 249,202,147
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,820
|
r
|
rshiny.R
|
#Rshiny tut
#https://medium.com/@joyplumeri/how-to-make-interactive-maps-in-r-shiny-brief-tutorial-c2e1ef0447da
#load libraries
install.packages("shiny")
install.packages("leaflet")
install.packages("leaflet.extras")
library(shiny)
library(leaflet)
library(dplyr)
library(leaflet.extras)
#import data
data <- daft_city
#ui code -------------------------
ui <- fluidPage(
mainPanel(
#this will create a space for us to display our map
leafletOutput(outputId = "mymap"),
#this allows me to put the checkmarks ontop of the map to allow people to view earthquake depth or overlay a heatmap
absolutePanel(top = 60, left = 20,
checkboxInput("markers", "Depth", FALSE),
checkboxInput("heat", "Heatmap", FALSE)
)
))
#server code ----------------------
server <- function(input, output, session) {
#define the color pallate for the magnitidue of the earthquake
pal <- colorNumeric(
palette = c('gold', 'orange', 'dark orange', 'orange red', 'red', 'dark red'),
domain = data$price)
#define the color of for the depth of the earquakes
pal2 <- colorFactor(
palette = c('blue', 'yellow', 'red', 'green'),
domain = data$dwelling
)
#create the map
output$mymap <- renderLeaflet({
leaflet(data) %>%
setView(lng = -6.094057, lat = 53.37188, zoom = 10) %>% #setting the view over ~ center of North America
addTiles() %>%
addCircles(data = data, lat = ~ latitude, lng = ~ longitude, weight = 1,
radius = 5, popup = ~as.character(price),
label = ~as.character(paste0("Price: ", sep = " ", price)),
color = ~pal(price), fillOpacity = 0.5)
})
#next we use the observe function to make the checkboxes dynamic. If you leave this part out you will see that the checkboxes, when clicked on the first time, display our filters...But if you then uncheck them they stay on. So we need to tell the server to update the map when the checkboxes are unchecked.
observe({
proxy <- leafletProxy("mymap", data = data)
proxy %>% clearMarkers()
if (input$markers) {
proxy %>% addCircleMarkers(stroke = FALSE, color = ~pal2(dwelling), fillOpacity = 0.2, label = ~as.character(paste0("Magnitude: ", sep = " ", price))) %>%
addLegend("bottomright", pal = pal2, values = data$dwelling,
title = "Depth Type",
opacity = 1)}
else {
proxy %>% clearMarkers() %>% clearControls()
}
})
observe({
proxy <- leafletProxy("mymap", data = data)
proxy %>% clearMarkers()
if (input$heat) {
proxy %>% addHeatmap(lng=~longitude, lat=~latitude, intensity = ~price, blur = 1, max = 0.05, radius = 1)
}
else{
proxy %>% clearHeatmap()
}
})
}
shinyApp(ui, server)
|
31314e6693f98bc762ff45744286c24f1b2658bc
|
9810702945ddf4d2db8211637ca2b6330420bff6
|
/cachematrix.R
|
2fb4c2b91b8a0cbdcaf1b1d5b6551175d70aae39
|
[] |
no_license
|
campbell78/ProgrammingAssignment2
|
0a859031e5f51123faad523c5ed0c12f3b100c8d
|
706268a2fd214ad3a876a9cba71a4729edb465a7
|
refs/heads/master
| 2021-01-18T09:02:23.858950
| 2014-08-23T23:50:23
| 2014-08-23T23:50:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 669
|
r
|
cachematrix.R
|
##Create Matrix in order to cache the inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <-function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(solve) m <<- solve
getmatrix <- function() m
list(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
##Returns the inverse from the cache if not already cached;
##If already cached and matrix not changed, returns that inverse
cacheSolve <- function(x, ...) {
m <- x$getmatrix()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
matrix <- x$get()
m <- solve(matrix, ...)
x$setmatrix(m)
m
}
|
37851b18add75345694f961e5149e1ceba45be4a
|
514d1b43b7e43f34399bbea724412f1053f000ab
|
/R/plot.vads.R
|
7955715d7f819018f2144b5d7ca423306d33b5b5
|
[] |
no_license
|
cran/ads
|
980da3b56bc208a78bf625a1c577604ccedf5589
|
cd3eeda3f4bd35e0742ad1fa54aea0eb7159ed52
|
refs/heads/master
| 2023-01-25T02:39:50.140787
| 2023-01-17T09:20:02
| 2023-01-17T09:20:02
| 17,694,304
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,977
|
r
|
plot.vads.R
|
plot.vads<-function(x,main,opt,select,chars,cols,maxsize,char0,col0,legend,csize,...) {
UseMethod("plot.vads")
}
plot.vads.dval<-function (x,main,opt=c("dval","cval"),select,chars=c("circles","squares"),cols,maxsize,char0,col0,legend=TRUE,csize=1,...) {
if(!missing(select)) {
d<-c()
for(i in 1:length(select)) {
select.in.r<-c()
for(j in 1:length(x$r)) {
select.in.r<-c(select.in.r,ti<-isTRUE(all.equal(select[i],x$r[j])))
if(ti)
d<-c(d,j)
}
stopifnot(any(select.in.r==TRUE))
}
}
else
d<-rank(x$r)
nd<-length(d)
nf<-ceiling(sqrt(nd))
stopifnot(opt%in%c("dval","cval"))
opt<-opt[1]
stopifnot(chars%in%c("circles","squares"))
chars<-chars[1]
ifelse(opt=="dval",val<-x$dval[,d],val<-x$cval[,d])
v<-val
val<-data.frame(adjust.marks.size(val,x$window,if(!missing(maxsize)) maxsize))
def.par <- par(no.readonly = TRUE)
on.exit(par(def.par))
if (missing(main))
main <- deparse(substitute(x))
mylayout<-layout(matrix(c(rep(1,nf),seq(2,((nf*nf)+1),1)),(nf+1),nf,byrow=TRUE))
s<-summary(x$window)
par(mar=c(0.1,0.1,1,0.1),cex=csize)
plot(s$xrange,s$yrange,type="n",axes=FALSE,asp=1/nf)
legend("center","",cex=1.5,bty="n",horiz=TRUE,title=main,...)
if(legend) {
mid<-(s$xrange[2]-s$xrange[1])/2
xl<-c(mid-0.5*mid,mid,mid+0.5*mid)
yl<-rep(s$xrange[2]*0.25,3)
lm<-range(v[v>0])
lm<-c(lm[1],mean(lm),lm[2])
lms<-range(val[val>0])
lms<-c(lms[1],mean(lms),lms[2])
if(missing(chars)||chars=="circles") {
symbols(xl,yl,circles=sqrt(lms),fg=ifelse(missing(cols),1,cols),bg=ifelse(missing(cols),1,cols),inches=FALSE,add=TRUE,...)
text(c(xl[1]+lms[1],xl[2]+lms[2],xl[3]+lms[3]),yl,labels=signif(lm,2),pos=4,cex=1.5)
}
else if(chars=="squares") {
symbols(xl,yl,squares=1.5*sqrt(lms),fg=ifelse(missing(cols),1,cols),bg=ifelse(missing(cols),1,cols),inches=FALSE,add=TRUE,...)
text(c(xl[1]+lms[1],xl[2]+lms[2],xl[3]+lms[3]),yl,labels=signif(lm,2),pos=4,cex=1.5)
}
}
ifelse(missing(cols),cols<-1,cols<-cols[1])
if(!missing(char0)||!missing(col0)) {
ifelse(missing(col0),col0<-cols,col0<-col0[1])
if(missing(char0))
char0<-3
}
for(i in 1:nd) {
plot.swin(x$window,main=paste("r =",x$r[d[i]]),scale=FALSE,csize=0.66*csize,...)
nort<-(val[,i]==0)
if(!missing(char0)&&any(nort))
points(x$xy$x[nort],x$xy$y[nort],pch=char0,col=col0,...)
if(any(!nort)) {
if(chars=="circles")
symbols(x$xy$x[!nort],x$xy$y[!nort],circles=nf*sqrt(val[!nort,i]),
fg=cols,bg=cols,inches=FALSE,add=TRUE,...)
else if(chars=="squares")
symbols(x$xy$x[!nort],x$xy$y[!nort],squares=1.5*nf*sqrt(val[!nort,i]),
fg=cols,bg=cols,inches=FALSE,add=TRUE,...)
}
}
}
plot.vads.kval<-function (x,main,opt=c("lval","kval","nval","gval"),select,chars=c("circles","squares"),cols,maxsize,char0,col0,legend=TRUE,csize=1,...) {
if(!missing(select)) {
d<-c()
for(i in 1:length(select)) {
select.in.r<-c()
for(j in 1:length(x$r)) {
select.in.r<-c(select.in.r,ti<-isTRUE(all.equal(select[i],x$r[j])))
if(ti)
d<-c(d,j)
}
stopifnot(any(select.in.r==TRUE))
}
}
else
d<-rank(x$r)
nd<-length(d)
nf<-ceiling(sqrt(nd))
opt<-opt[1]
stopifnot(chars%in%c("circles","squares"))
chars<-chars[1]
if(opt=="lval")
val<-x$lval[,d]
else if(opt=="kval")
val<-x$kval[,d]
else if(opt=="nval")
val<-x$nval[,d]
else if(opt=="gval")
val<-x$gval[,d]
else
stopifnot(opt%in%c("lval","kval","nval","gval"))
v<-val
val<-data.frame(adjust.marks.size(val,x$window))
if(!missing(maxsize))
val<-val*maxsize
def.par <- par(no.readonly = TRUE)
on.exit(par(def.par))
if (missing(main))
main <- deparse(substitute(x))
mylayout<-layout(matrix(c(rep(1,nf),seq(2,((nf*nf)+1),1)),(nf+1),nf,byrow=TRUE))
s<-summary(x$window)
par(mar=c(0.1,0.1,1,0.1),cex=csize)
plot.default(s$xrange,s$yrange,type="n",axes=FALSE,asp=1/nf)
legend("center","",cex=1.5,bty="n",horiz=TRUE,title=main,...)
if(legend) {
mid<-(s$xrange[2]-s$xrange[1])/2
xl<-c(mid-0.5*mid,mid,mid+0.5*mid)
yl<-rep(s$xrange[2]*0.25,3)
lm<-range(abs(v)[abs(v)>0])
lm<-c(lm[1],mean(lm),lm[2])
lms<-range(abs(val)[abs(val)>0])
lms<-c(lms[1],mean(lms),lms[2])
if(missing(chars)||chars=="circles") {
symbols(xl,yl,circles=sqrt(lms),fg=ifelse(missing(cols),1,cols),bg=ifelse(missing(cols),1,cols),inches=FALSE,add=TRUE,...)
text(c(xl[1]+lms[1]+1,xl[2]+lms[2]+1,xl[3]+lms[3]+1),yl,labels=signif(lm,2),pos=4,cex=1)
symbols(xl,yl*0.5,circles=sqrt(lms),fg=ifelse(missing(cols),1,cols),bg="white",inches=FALSE,add=TRUE,...)
text(c(xl[1]+lms[1],xl[2]+lms[2],xl[3]+lms[3]),yl*0.5,labels=signif(-lm,2),pos=4,cex=1)
}
else if(chars=="squares") {
symbols(xl,yl,squares=1.5*sqrt(lms),fg=ifelse(missing(cols),1,cols),bg=ifelse(missing(cols),1,cols),inches=FALSE,add=TRUE,...)
text(c(xl[1]+lms[1]+1,xl[2]+lms[2]+1,xl[3]+lms[3]+1),yl,labels=signif(lm,2),pos=4,cex=1)
symbols(xl,yl*0.5,squares=1.5*sqrt(lms),fg=ifelse(missing(cols),1,cols),bg="white",inches=FALSE,add=TRUE,...)
text(c(xl[1]+lms[1],xl[2]+lms[2],xl[3]+lms[3]),yl*0.5,labels=signif(-lm,2),pos=4,cex=1)
}
}
ifelse(missing(cols),cols<-1,cols<-cols[1])
if(!missing(char0)||!missing(col0)) {
ifelse(missing(col0),col0<-cols,col0<-col0[1])
if(missing(char0))
char0<-3
}
for(i in 1:nd) {
plot.swin(x$window,main=paste("r =",x$r[d[i]]),scale=FALSE,csize=0.66*csize,...)
nort<-(val[,i]==0)
neg<-(val[,i]<0)
if(!missing(char0)&&any(nort))
points(x$xy$x[nort],x$xy$y[nort],pch=char0,col=col0,...)
if(any(!nort)) {
if(chars=="circles") {
if(any(!neg))
symbols(x$xy$x[(!neg&!nort)],x$xy$y[(!neg&!nort)],circles=nf*sqrt(abs(val[(!neg&!nort),i])),
fg=cols,bg=cols,inches=FALSE,add=TRUE,...)
if(any(neg))
symbols(x$xy$x[(neg&!nort)],x$xy$y[(neg&!nort)],circles=nf*sqrt(abs(val[(neg&!nort),i])),
fg=cols,bg="white",inches=FALSE,add=TRUE,...)
}
else if(chars=="squares") {
if(any(!neg))
symbols(x$xy$x[(!neg&!nort)],x$xy$y[(!neg&!nort)],squares=1.5*nf*sqrt(abs(val[(!neg&!nort),i])),
fg=cols,bg=cols,inches=FALSE,add=TRUE,...)
if(any(neg))
symbols(x$xy$x[(neg&!nort)],x$xy$y[(neg&!nort)],squares=1.5*nf*sqrt(abs(val[(neg&!nort),i])),
fg=cols,bg="white",inches=FALSE,add=TRUE,...)
}
}
}
}
plot.vads.k12val<-function (x,main,opt=c("lval","kval","nval","gval"),select,chars=c("circles","squares"),cols,maxsize,char0,col0,legend=TRUE,csize=1,...) {
if(!missing(select)) {
d<-c()
for(i in 1:length(select)) {
select.in.r<-c()
for(j in 1:length(x$r)) {
select.in.r<-c(select.in.r,ti<-isTRUE(all.equal(select[i],x$r[j])))
if(ti)
d<-c(d,j)
}
stopifnot(any(select.in.r==TRUE))
}
}
else
d<-rank(x$r)
nd<-length(d)
nf<-ceiling(sqrt(nd))
opt<-opt[1]
stopifnot(chars%in%c("circles","squares"))
chars<-chars[1]
if(opt=="lval")
val<-x$l12val[,d]
else if(opt=="kval")
val<-x$k12val[,d]
else if(opt=="nval")
val<-x$n12val[,d]
else if(opt=="gval")
val<-x$g12val[,d]
else
stopifnot(opt%in%c("lval","kval","nval","gval"))
v<-val
#val<-data.frame(adjust.marks.size(val,x$window,if(!missing(maxsize)) maxsize))
val<-data.frame(adjust.marks.size(val,x$window))
if(!missing(maxsize))
val<-val*maxsize
def.par <- par(no.readonly = TRUE)
on.exit(par(def.par))
if (missing(main))
main <- deparse(substitute(x))
mylayout<-layout(matrix(c(rep(1,nf),seq(2,((nf*nf)+1),1)),(nf+1),nf,byrow=TRUE))
s<-summary(x$window)
par(mar=c(0.1,0.1,1,0.1),cex=csize)
plot.default(s$xrange,s$yrange,type="n",axes=FALSE,asp=1/nf)
legend("center","",cex=1.5,bty="n",horiz=TRUE,title=main,...)
if(legend) {
mid<-(s$xrange[2]-s$xrange[1])/2
xl<-c(mid-0.5*mid,mid,mid+0.5*mid)
yl<-rep(s$xrange[2]*0.25,3)
lm<-range(abs(v)[abs(v)>0])
lm<-c(lm[1],mean(lm),lm[2])
lms<-range(abs(val)[abs(val)>0])
lms<-c(lms[1],mean(lms),lms[2])
if(missing(chars)||chars=="circles") {
symbols(xl,yl,circles=sqrt(lms),fg=ifelse(missing(cols),1,cols),bg=ifelse(missing(cols),1,cols),inches=FALSE,add=TRUE,...)
text(c(xl[1]+lms[1]+1,xl[2]+lms[2]+1,xl[3]+lms[3]+1),yl,labels=signif(lm,2),pos=4,cex=1)
symbols(xl,yl*0.5,circles=sqrt(lms),fg=ifelse(missing(cols),1,cols),bg="white",inches=FALSE,add=TRUE,...)
text(c(xl[1]+lms[1],xl[2]+lms[2],xl[3]+lms[3]),yl*0.5,labels=signif(-lm,2),pos=4,cex=1)
}
else if(chars=="squares") {
symbols(xl,yl,squares=1.5*sqrt(lms),fg=ifelse(missing(cols),1,cols),bg=ifelse(missing(cols),1,cols),inches=FALSE,add=TRUE,...)
text(c(xl[1]+lms[1],xl[2]+lms[2],xl[3]+lms[3]),yl,labels=signif(lm,2),pos=4,cex=1)
symbols(xl,yl*0.5,squares=1.5*sqrt(lms),fg=ifelse(missing(cols),1,cols),bg="white",inches=FALSE,add=TRUE,...)
text(c(xl[1]+lms[1],xl[2]+lms[2],xl[3]+lms[3]),yl*0.5,labels=signif(-lm,2),pos=4,cex=1)
}
}
ifelse(missing(cols),cols<-1,cols<-cols[1])
if(!missing(char0)||!missing(col0)) {
ifelse(missing(col0),col0<-cols,col0<-col0[1])
if(missing(char0))
char0<-3
}
for(i in 1:nd) {
plot.swin(x$window,main=paste("r =",x$r[d[i]]),scale=FALSE,csize=0.66*csize,...)
nort<-(val[,i]==0)
neg<-(val[,i]<0)
if(!missing(char0)&&any(nort))
points(x$xy$x[nort],x$xy$y[nort],pch=char0,col=col0,...)
if(any(!nort)) {
if(chars=="circles") {
if(any(!neg))
symbols(x$xy$x[(!neg&!nort)],x$xy$y[(!neg&!nort)],circles=nf*sqrt(abs(val[(!neg&!nort),i])),
fg=cols,bg=cols,inches=FALSE,add=TRUE,...)
if(any(neg))
symbols(x$xy$x[(neg&!nort)],x$xy$y[(neg&!nort)],circles=nf*sqrt(abs(val[(neg&!nort),i])),
fg=cols,bg="white",inches=FALSE,add=TRUE,...)
}
else if(chars=="squares") {
if(any(!neg))
symbols(x$xy$x[(!neg&!nort)],x$xy$y[(!neg&!nort)],squares=1.5*nf*sqrt(abs(val[(!neg&!nort),i])),
fg=cols,bg=cols,inches=FALSE,add=TRUE,...)
if(any(neg))
symbols(x$xy$x[(neg&!nort)],x$xy$y[(neg&!nort)],squares=1.5*nf*sqrt(abs(val[(neg&!nort),i])),
fg=cols,bg="white",inches=FALSE,add=TRUE,...)
}
}
}
}
|
bcf285303523e5f2c4eaf266ec51bbaf1ea9160b
|
a714d228510c539e937ec9d80d7edc98a6c9a6e9
|
/R/Measure_colAUC.R
|
7369af5c63a8d23ed145c4049b4511750dd725d1
|
[] |
no_license
|
Alven8816/mlr
|
9c290208f03620530db57226f05933e1399a1749
|
41e95a9bc02246a21a9298f9658160908ba6c185
|
refs/heads/master
| 2021-01-12T17:50:12.493060
| 2016-10-21T17:38:23
| 2016-10-21T17:38:23
| 71,648,158
| 0
| 1
| null | 2016-10-22T15:33:11
| 2016-10-22T15:33:11
| null |
UTF-8
|
R
| false
| false
| 1,497
|
r
|
Measure_colAUC.R
|
# colAUC calculates for a vector with true values the Area Under the ROC Curve (AUC) for a matrix of samples.
# Matrix rows contain samples while the columns contain features/variables.
# The function is used to calculate different multiclass AUC measures AU1P, AU1U, AUNP, AUNU,
# following the definition by Ferri et al.:
# https://www.math.ucdavis.edu/~saito/data/roc/ferri-class-perf-metrics.pdf
colAUC = function(samples, truth) {
y = as.factor(truth)
X = as.matrix(samples)
if (nrow(X) == 1)
X = t(X)
nr = nrow(X)
nc = ncol(X)
ny = table(y)
ul = as.factor(rownames(ny))
nl = length(ny)
if (nl <= 1)
stop("colAUC: List of labels 'y' have to contain at least 2 class labels.")
if (!is.numeric(X))
stop("colAUC: 'X' must be numeric")
if (nr != length(y))
stop("colAUC: length(y) and nrow(X) must be the same")
per = t(utils::combn(1:nl, 2))
np = nrow(per)
auc = matrix(0.5, np, nc)
rownames(auc) = paste(ul[per[, 1]], " vs. ", ul[per[, 2]], sep = "")
colnames(auc) = colnames(X)
# Wilcoxon AUC
idxl = vector(mode = "list", length = nl)
for (i in 1:nl) idxl[[i]] = which(y == ul[i])
for (j in 1:nc) {
for (i in 1:np) {
c1 = per[i, 1]
c2 = per[i, 2]
n1 = as.numeric(ny[c1])
n2 = as.numeric(ny[c2])
if (n1 > 0 & n2 > 0) {
r = rank(c(X[idxl[[c1]], j], X[idxl[[c2]], j]))
auc[i, j] = (sum(r[1:n1]) - n1 * (n1 + 1) / 2) / (n1 * n2)
}
}
}
auc = pmax(auc, 1 - auc)
return(auc)
}
|
517b4e696d8c5f7a8e40913f006ab25230f592a4
|
5646d369e179ed6cf0e8b88cd14b9cc595731974
|
/cachematrix.R
|
155ccecd27eaf069713339f6c7ad0e39e0c245e3
|
[] |
no_license
|
Krieth/ProgrammingAssignment2
|
8c2204f4830444e04a2524e094ac87d34b900f52
|
91f841ee8da653668a1a4e5795bda49dcf82f943
|
refs/heads/master
| 2022-11-16T05:30:53.851150
| 2020-07-16T16:12:47
| 2020-07-16T16:12:47
| 280,197,858
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,178
|
r
|
cachematrix.R
|
## TThe function below creates a special "matrix", from a list containing a
## function that it does:
## 1) Sets the values of the entries in the matrix.
## 2) Gets the matrix.
## 3) Sets the values of each entry in the inverse matrix.
## 4) Gets the inverse matrix.
## This function creates a special "matrix" object that can store its inverse.
makeCacheMatrix <- function(x = matrix()) {
mInv <- NULL
set <- function(y) {
x <<- y
mInv <<- NULL
}
get <- function() x
setInv <- function(inverse) mInv <<- inverse
getInv <- function() mInv
list(set = set, get = get, setInv = setInv, getInv = getInv)
}
## This function calculates the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then cacheSolve should retrieve the inverse from the
## cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mInv <- x$getInv()
if (!is.null(mInv)) {
message("Getting chached data")
return(mInv)
}
mtrx <- x$get()
mInv <- solve(mtrx, ...)
x$setInv(mInv)
mInv
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.