blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e6538504ea68dae67b486fcdad7721d9dc384bfd | 76ad7614dfe2a77154d408c559393b6a963ddd73 | /submitted_detect.R | 81ad1ffa5c935e0db15e14ad2d53bf4d6318921b | [] | no_license | guopeng-jiang/ShinyApp_testing | aec3c71548c3b00cdafd011398190c0d0d1a326d | 52e1ef00713b094aea0e4de17f0c2cb7259a9228 | refs/heads/main | 2023-07-08T22:13:24.016776 | 2021-08-12T23:14:28 | 2021-08-12T23:14:28 | 392,844,112 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,610 | r | submitted_detect.R | library(readxl)
#################### FEMP summary #####################
long = read_excel("N:/Gorden_Jiang/FEMP_summary/FEMP_summary28052021.xlsx")
ID = long$ID
address = substr(long$PhysicalAddress,1, 15)
# median(nchar(long$address), na.rm = T)
long = data.frame(ID, address)
##################### FEMP resubmission ###########################
short = read_excel("N:/Gorden_Jiang/FEMP_2021_resubmission/FEMPresub28052021.xlsx")
ID = short$FEMPID
address = substr(short$FarmPhysicalAddress,1, 15)
short = data.frame(ID, address)
##################### FEMP resubmitted (Fuzzy + Hard matching) ##########################
library(fuzzyjoin)
fuz_match = stringdist_join(long,
short,
by = "address", mode = "left", ignore_case = T, method = "jw",max_dist = 3,
distance_col = "dist") %>% group_by(ID.x) %>% slice(which.min(dist))
fuz_match$match_yes = mapply(function(a, b) grepl(a, b, fixed = T),
substr(fuz_match$address.x,1,5),
substr(fuz_match$address.y,1,5))
print(paste0(sum(fuz_match$match_yes), " detected by fuzzy + hard matching"))
# Compare to hard matching only
long$resubmitted = substr(long$address, 1, 10) %in% substr(short$address, 1, 10)
print(paste0(sum(long$resubmitted), " detected by hard matching only"))
colnames(fuz_match)[1] = "ID"
compare = merge(x = long, y = fuz_match, by = c("ID"), all.x = TRUE)
View(subset(compare, compare$resubmitted == FALSE & compare$match_yes == TRUE))
|
e6a11178b333194530d207fc7760a8368f247120 | ac01758e97d1a98fd558fa99467a91e5382c946c | /DDP_Shiny_1.2/server.R | 7f11b1a2bc9985a24c15b537ff82a7365891c090 | [
"Unlicense"
] | permissive | katrinleinweber/datasciencecoursera | d3f6a804add4a964f87a6c0cbdcd2d749797c24a | 73336a9ad6297c2e2e301333601d83c93fc51709 | refs/heads/master | 2021-01-20T16:55:26.004301 | 2017-03-02T20:13:21 | 2017-03-02T20:13:21 | 82,836,947 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,351 | r | server.R | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# data input & presentation defined in ui.R (`sliderInput()` & `plotOutput()`)
# [x] Are "output" & "input" reserved variable names (orange symbol in RStudio)?
# ==> no, function parameters defined above
output$dots <- renderPlot({ # reactivity requires parentheses with inner curly braces
set.seed(2017-02-23)
# implement effects of inputs
number_of_points <- input$numeric
minX <- input$sliderX[1]
maxX <- input$sliderX[2]
minY <- input$sliderY[1]
maxY <- input$sliderY[2]
# generate random, uniform numbers
dataX <- runif(n = number_of_points, min = minX, max = maxX)
dataY <- runif(number_of_points, minY, maxY)
# implement effects of checkboxes
xlab <- ifelse(test = input$show_xlab, yes = "x axis", no = "")
ylab <- ifelse(input$show_ylab, "y axis", "")
main <- ifelse(input$show_title, "title", "")
plot(x = dataX, y = dataY, xlab = xlab, ylab = ylab, main = main,
xlim = c(-100,100), ylim = c(-100,100))
})
})
|
99b18a63063b39a2d65d133629d4272592028690 | 94c1f2fc69dd3b0cf9b6135fb8a138cd008c0e2b | /series_estacionarias.r | 009324abb49b3438516a5deeeeb7c9e2fb0d7ae6 | [] | no_license | ricardocunh/serie_temporais_com_r | 85093ebd366636ebf3f0f567a95df84abcda03a1 | 375016c80c0a360f9afca4b05cf4f2badd89a99e | refs/heads/main | 2023-03-30T05:46:18.825319 | 2021-03-28T21:28:49 | 2021-03-28T21:28:49 | 350,142,747 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 2,369 | r | series_estacionarias.r | # Série Estacionária
# A média e a variância se mantêm constante durante o tempo
# Em principio, series com tendências e sazonalidade não são estacionárias
##Por que a Estacionariedade é importante?
# Existem técnicas analíticas de series temporais que dependem da estacionariedade da série para funcionarem
# Se a série não é estacionária, pode-se aplicar algum tipo de transformação
## Como saber se é Estacionária ou não?
# Visualmente
# Testes Estatísticos (Dickey-Fuller, KPSS, Philips-Perron) **Forma mais correta de se avaliar
# COMPONENTES E PADRÕES
# Tendêcias
# Sazonalidades
# Ciclo
# Erro (restante)
# > Tendências
# Aumento ou redução a longo prazo
# > Sazonalidades
# Padrões que ocorrem em intervalos fixos
# > Ciclo
# Aumento ou redução de frequência sem intervalos fixos
# > Erro (restantes)
# Tendência
# Sazonalidade + Erro
# Ciclo
## CORRELAÇÃO (R)
# Mostra a força e a direção da relação entre variáveis aleatórias
# Pode ser um valor entre -1 e 1
# A correlação de A ~ B é a mesma que B ~ A
# FORÇA E DIREÇÃO
# 1 > PERFEITA (Positiva)
# 0,7 > Forte
# 05 > Moderada
# 0,25 > Fraca
# 0 > INEXISTENTE
# -0,25 > Fraca
# -0,5 > Moderada
# -0,7 > Forte
# -1 > PERFEITA (Negativa)
## EXEMPLOS:
# 1 : Positiva perfeita
# -0,8 : Negativa forte
# 0,23 : Positiva fraca
# 0,09 : Positiva fraca
# -0,334 : Negativa Fraca
# 0 : Inexistente
# 0,6 : Positiva moderada
# 1,2 : ERRO
# Em uma correlação forte, dentro de um gráfico de dispersão os dados ficam todos próximos,
# ja na correlação fraca os dados ficam dispersos, longe um dos outros
## COEFICIENTE DE DETERMINAÇÃO (R(2) ao quadrado)
# Mostra o quanto o modelo consegue explicar os valores
# Quanto maior, mais explicativo ele é
# O restante da variabilidade está me variáveis não incluídas no modelo
# Varia entre zero até 1 (sempre positivo)
# Calcula-se com o quadrado do coeficiente de correlação (R)
## CORRELAÇÃO
# Podemos fazer previsões usando apenas a serie temporal (sem outra variável para explicar o modelo)
# Podemos usar a própria vaiável e mais uma variável explanatória (independente)
# A relação da vaiável com ela mesma, se chama autocorrelação
|
caadf2c9b5a9859b9d5d8b4a858ec0b5090cdde2 | 43c9baa29238dd34df80f76b147d1ef24e267f6b | /man/eq_download_data.Rd | 9ff323920b7d0e75f58f324ceff276a2676029c8 | [] | no_license | rhkaz/noaamsdr | dae6cc083d70ebf69b3bf36d790c5217e3194195 | 9f392033e678f963ce1b2697600b0ec3e8622891 | refs/heads/master | 2021-01-20T13:22:01.313972 | 2017-05-06T17:20:52 | 2017-05-06T17:20:52 | 90,477,353 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 487 | rd | eq_download_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_data.R
\name{eq_download_data}
\alias{eq_download_data}
\title{Download the NOAA dataset}
\usage{
eq_download_data(destdir)
}
\arguments{
\item{destdir}{character. Destination directory}
}
\value{
NULL. Impure function, called by its side effects
}
\description{
Get the NOAA dataset in TXT file format. If the destination directory
don't exist, will be created.
}
\examples{
\dontrun{download_noaa()}
}
|
f9bf92250251c04235918ed6017556a7e533a74f | b728911ad2f0d8dbd51edd1ce5e4bad427027e75 | /personalGenome/app.R | 144ec5354d570078ffac8c5082add06d7230902a | [] | no_license | manitsalana/bcb420-shiny-app | 52f02a62cc3240ed5f1d2f6cf19c847aa9b2d3cd | 7df38a3ecc3548e5a65e222218ea2cfc62bcd6e1 | refs/heads/master | 2020-03-07T06:10:39.085731 | 2018-03-30T21:55:17 | 2018-03-30T21:55:17 | 127,314,849 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,192 | r | app.R | library(shiny)
library(ggplot2)
library(plotly)
# p16FIXhq_df is the name of the dataset
# datasetName <- load(file = "./p16FIXhq_df.RData")
load(file="./topNucFreqNoCode.RData")
load(file="./topNucFreqCode.RData")
# dataset <- p16FIXhq_df$POS
ui <- fluidPage(
# Application title
titlePanel("Hello Shiny!"),
# Sidebar with a slider input for number of observations
sidebarLayout(
sidebarPanel(
sliderInput("obs",
"Number of observations:",
min = 1,
max = 1000,
value = 500)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
# titlePanel("Chr20 Top Variants"),
#
# mainPanel(
# plotlyOutput('plot')
# )
)
server <- function(input, output) {
# positionMap <- p16FIXhq_df[,c("POS","Coding")]
# positionCount <- as.data.frame(table(positionMap))
# posFreq <- positionCount[positionCount$Freq>0,]
# posFreq$CodeStatus[posFreq$Coding==TRUE] <- "Coding"
# posFreq$CodeStatus[posFreq$Coding==FALSE] <- "Non-Coding"
#
# nucMapCode <- p16FIXhq_df[p16FIXhq_df$Coding==TRUE, c("ALT")]
# nucCountCode <- as.data.frame(table(nucMapCode))
# nucFreqCode <- nucCountCode[nucCountCode$Freq>0,]
# orderedNucFreqCode <- nucFreqCode[order(-nucFreqCode$Freq),]
# topNucFreqCode <- orderedNucFreqCode[1:35,]
#
# nucMapNoCode <- p16FIXhq_df[p16FIXhq_df$Coding==FALSE, c("ALT")]
# nucCountNoCode <- as.data.frame(table(nucMapNoCode))
# nucFreqNoCode <- nucCountNoCode[nucCountNoCode$Freq>0,]
# orderedNucFreqNoCode <- nucFreqNoCode[order(-nucFreqNoCode$Freq),]
# topNucFreqNoCode <- orderedNucFreqNoCode[1:35,]
# dataset <- reactive({
# smallPosFreq
# })
output$plot <- renderPlotly({
# topNucFreqNoCode$nucMapNoCode <- factor(topNucFreqNoCode$nucMapNoCode,
# levels = unique(topNucFreqNoCode$nucMapNoCode)
# [order(topNucFreqNoCode$Freq, decreasing = TRUE)])
#
# topNucFreqCode$nucMapCode <- factor(topNucFreqCode$nucMapCode,
# levels = unique(topNucFreqCode$nucMapCode)
# [order(topNucFreqCode$Freq, decreasing = TRUE)])
#
# p1 <- plot_ly(data = topNucFreqNoCode, x = ~nucMapNoCode, y = ~Freq,
# width = 1000, height = 1000, name = 'Non-Coding')
#
# p2 <- plot_ly(data = topNucFreqCode, x = ~nucMapCode,
# y = ~Freq, width = 1000, height = 1000, name = 'Coding')
#
# p <- subplot(p1, p2, nrows = 2)
# ggplotly(p)
# Expression that generates a plot of the distribution. The expression
# is wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should be automatically
# re-executed when inputs change
# 2) Its output type is a plot
#
# output$distPlot <- renderPlot({
#
# # generate an rnorm distribution and plot it
# dist <- rnorm(input$obs)
# hist(dist)
# })
})
}
shinyApp(ui = ui, server = server) |
b29600bbb6b4a168c181989e570f02d3a9de3a09 | e1fbd987544702b646e0072b1bccd56d7a24cbeb | /R/ovtsub.R | 4617b4e8819d94553d249a907017ca139be5f3eb | [] | no_license | RGLab/flowStats | 4ed715566cd336ebad4aeb1eeae86a0465bf2f6c | fabb13191a2567e41a89691cab31507d4cbddc7a | refs/heads/devel | 2023-07-19T21:22:09.624646 | 2023-07-19T00:39:40 | 2023-07-19T00:39:40 | 3,539,961 | 11 | 9 | null | 2023-07-19T01:09:13 | 2012-02-24T21:35:17 | R | UTF-8 | R | false | false | 2,394 | r | ovtsub.R | ##' This function computes an Overton-like subtraction of two densities. It calculates the proportion of the reference density that is above a reference
##'
##' The test can be one-sided or two-sided. If one sided, it tests the region of the test density that is above the mode of the reference density. If two-sided it will look at the regions on either side of the mode of the reference density.
##' Densities are computed on a grid of 1024, and appropriately normalized.
##' @title Overton-like subtraction of densities.
##' @param ref The reference channel specified as a \code{vector}
##' @param test The test (potentially positive) channel specified as a \code{vector}
##' @param twosided \code{boolean} flag testing whether the area of the density of the test curve above the reference curve will be calculated on both sides of the mode of the test curve (TRUE) or only on the positive side of the mode (FALSE, default).
##' @return \code{numeric} value representing the proportion of the area of the test density above the reference density.
##' @author Greg Finak
##' @export
##' @examples
##' A = rnorm(10000,mean=1,sd=0.5)
##' B = rnorm(10000,mean=2,sd=0.5)
##' overton_like(A,B)
##'
overton_like =
function(ref, test,twosided=FALSE) {
from = pmin(range(ref), range(test))[1]
to = pmax(range(ref), range(test))[2]
ref = density(ref, from = from, to = to,n=1024)
test = density(test, from = from, to = to,n=1024)
muA = sum(ref$y * ref$x * diff(ref$x)[1])
ABnorm = sum(pmax(ref$y, test$y) * diff(ref$x)[1])
bpart = test$y * diff(test$x)[1] / ABnorm
apart = ref$y * diff(ref$x)[1] / ABnorm
aboverlap = pmin(ref$y, test$y) * diff(ref$x)[1] / ABnorm
bpospart = bpart
aboverlappos = aboverlap
if(!twosided){
bpospart[test$x < muA & test$y / ref$y < 1] = 0 #zero out where test < ref and less than mu
bpospart[test$x > muA & test$y / ref$y > 1] = 0 #zero out where test > ref and more than mu
aboverlappos[test$x < muA & test$y / ref$y < 1] = 0
aboverlappos[test$x > muA & test$y / ref$y > 1] = 0
}else{
bpart[test$x<muA]=-bpart[test$x<muA]
aboverlap[test$x<muA] = -aboverlap[test$x<muA]
}
if(!twosided){
res = (sum(bpart) - sum(aboverlap) - sum(bpospart) + sum(aboverlappos)) *
ABnorm
}else{
res=(sum(bpart)-sum(aboverlap))*ABnorm
}
if (res < 0 & res > -1) {
res = res
} else if (res > 1) {
res = 1
} else if (res < -1){
res = -1
}
res
}
|
fea499c8c7ebd8a06fc66fa72109f7ef6a6ba4c6 | bb71bd0537a3eb0a2671089e3a0db531ed7a4e72 | /R/setup.R | e50a0a3bb0f5677ad687a636c3394753f5dd050c | [] | no_license | kauebraga/dissertacao | a6c9bce0c5bca2e2dd716dfaead33ef846f397b4 | 20d75e8e5aa50a44f1940ceecdd531f0df5a5f03 | refs/heads/master | 2023-05-06T11:55:57.452394 | 2021-05-11T13:47:14 | 2021-05-11T13:47:14 | 112,378,851 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,290 | r | setup.R | Sys.setenv(TZ='UTC')
options(scipen = 999)
library(zoo)
library(sp)
library(ggplot2)
library(dplyr)
library(sf)
library(fasttime)
# library(mapview)
#library(ggmap) #função geocode() pra extrair as coordenadas dos endereços
library(sf) #pra importar os dados espaciais e tal
library(data.table)
library(knitr)
library(readr)
library(tidyr)
library(hrbrthemes)
library(stringr)
# library(leaflet.minicharts)
library(purrr)
library(lubridate)
library(mapview)
library(RColorBrewer)
library(furrr)
#library(extrafont)
#extrafont::loadfonts(device="win")
#library(htmltools)
#library(htmlwidgets)
#library(tmap)
to_spatial <- function(df1, coordenada = c("lon", "lat")) {
x <- st_as_sf(df1, coords = coordenada, crs = 4326)
}
# from https://github.com/r-spatial/sf/issues/231
sfc_as_cols <- function(x, names = c("lon","lat")) {
stopifnot(inherits(x,"sf") && inherits(sf::st_geometry(x),"sfc_POINT"))
ret <- sf::st_coordinates(x)
ret <- tibble::as_tibble(ret)
stopifnot(length(names) == ncol(ret))
x <- x[ , !names(x) %in% names]
ret <- setNames(ret,names)
ui <- dplyr::bind_cols(x,ret)
st_set_geometry(ui, NULL)
}
options(scipen=10000)
`%nin%` = Negate(`%in%`)
`%nlike%` = Negate(`%like%`)
|
da3d547d88ea3c32d2efc2fc0933e658e263ac37 | e6041ede6a54aad2f78d03dc3bc3e99a0300a947 | /test_data_preparation.r | 71c7052d393a8ba516fcfae765563339cd996f35 | [] | no_license | 2345scy/ascr_project | 9a6df8adf28691251643e2cbaa169de1449b4328 | aa6ebb1d2b6c1709b251827c423e5429356dfaf2 | refs/heads/master | 2023-08-29T14:35:39.912715 | 2021-09-26T03:40:06 | 2021-09-26T03:40:06 | 298,949,785 | 0 | 0 | null | 2021-07-04T22:48:48 | 2020-09-27T04:01:48 | R | UTF-8 | R | false | false | 8,463 | r | test_data_preparation.r | test_data = function(model_type){
context("Testing model fits")
if(model_type == "simple fitting -- half normal"){
#recover the example data to original format
captures <<- data.frame(session = rep(1, 127 * 6), ID = rep(1:127, 6), occasion = rep(1, 127 * 6),
trap = rep(1:6, each = 127), stringsAsFactors = FALSE)
captures <<- captures[as.logical(as.vector(example.data$capt[["bincapt"]])),]
traps <<- example.data$traps
mask <<- example.data$mask
par.extend <<- NULL
ss.opts <<- NULL
survey.length <<- NULL
cue.rates <<- NULL
detfn <<- NULL
bounds_input <<- NULL
sv_input <<- NULL
fix_input <<- NULL
local_input <<- FALSE
sound.speed <<- 331
} else if (model_type == "joint bearing/dist fitting"){
#detfn = "hn"
captures = data.frame(session = rep(1, 127 * 6), ID = rep(1:127, 6), occasion = rep(1, 127 * 6),
trap = rep(1:6, each = 127), stringsAsFactors = FALSE)
captures$bearing = as.vector(example.data$capt[["bearing"]])
captures$dist = as.vector(example.data$capt[['dist']])
captures <<- captures[as.logical(as.vector(example.data$capt[["bincapt"]])),]
traps <<- example.data$traps
mask <<- example.data$mask
par.extend <<- NULL
ss.opts <<- NULL
survey.length <<- NULL
cue.rates <<- NULL
detfn <<- NULL
bounds_input <<- NULL
sv_input <<- NULL
fix_input <<- list(g0 = 1)
local_input <<- FALSE
sound.speed <<- 331
} else if (model_type == "simple fitting -- hazard halfnormal"){
captures <<- data.frame(session = rep(1, 127 * 6), ID = rep(1:127, 6), occasion = rep(1, 127 * 6),
trap = rep(1:6, each = 127), stringsAsFactors = FALSE)
captures <<- captures[as.logical(as.vector(example.data$capt[["bincapt"]])),]
traps <<- example.data$traps
mask <<- example.data$mask
par.extend <<- NULL
ss.opts <<- NULL
survey.length <<- NULL
cue.rates <<- NULL
detfn <<- "hhn"
bounds_input <<- NULL
sv_input <<- NULL
fix_input <<- NULL
local_input <<- FALSE
sound.speed <<- 331
} else if (model_type == "simple fitting -- hazard rate"){
captures <<- data.frame(session = rep(1, 127 * 6), ID = rep(1:127, 6), occasion = rep(1, 127 * 6),
trap = rep(1:6, each = 127), stringsAsFactors = FALSE)
captures <<- captures[as.logical(as.vector(example.data$capt[["bincapt"]])),]
traps <<- example.data$traps
mask <<- example.data$mask
par.extend <<- NULL
ss.opts <<- NULL
survey.length <<- NULL
cue.rates <<- NULL
detfn <<- "hr"
bounds_input <<- NULL
sv_input <<- list(z = 5)
fix_input <<- NULL
local_input <<- FALSE
sound.speed <<- 331
} else if (model_type == "bearing fitting"){
#detfn = "hn"
captures = data.frame(session = rep(1, 127 * 6), ID = rep(1:127, 6), occasion = rep(1, 127 * 6),
trap = rep(1:6, each = 127), stringsAsFactors = FALSE)
captures$bearing = as.vector(example.data$capt[["bearing"]])
captures <<- captures[as.logical(as.vector(example.data$capt[["bincapt"]])),]
traps <<- example.data$traps
mask <<- example.data$mask
par.extend <<- NULL
ss.opts <<- NULL
survey.length <<- NULL
cue.rates <<- NULL
detfn <<- NULL
bounds_input <<- NULL
sv_input <<- NULL
fix_input <<- list(g0 = 1)
local_input <<- FALSE
sound.speed <<- 331
} else if (model_type == "dist fitting"){
#detfn = "hn"
captures = data.frame(session = rep(1, 127 * 6), ID = rep(1:127, 6), occasion = rep(1, 127 * 6),
trap = rep(1:6, each = 127), stringsAsFactors = FALSE)
captures$dist = as.vector(example.data$capt[['dist']])
captures <<- captures[as.logical(as.vector(example.data$capt[["bincapt"]])),]
traps <<- example.data$traps
mask <<- example.data$mask
par.extend <<- NULL
ss.opts <<- NULL
survey.length <<- NULL
cue.rates <<- NULL
detfn <<- NULL
bounds_input <<- NULL
sv_input <<- NULL
fix_input <<- list(g0 = 1)
local_input <<- FALSE
sound.speed <<- 331
} else if (model_type == "toa fitting"){
#detfn = "hn"
captures = data.frame(session = rep(1, 127 * 6), ID = rep(1:127, 6), occasion = rep(1, 127 * 6),
trap = rep(1:6, each = 127), stringsAsFactors = FALSE)
captures$toa = as.vector(example.data$capt[["toa"]])
captures <<- captures[as.logical(as.vector(example.data$capt[["bincapt"]])),]
traps <<- example.data$traps
mask <<- example.data$mask
par.extend <<- NULL
ss.opts <<- NULL
survey.length <<- NULL
cue.rates <<- NULL
detfn <<- NULL
bounds_input <<- NULL
sv_input <<- NULL
fix_input <<- list(g0 = 1)
local_input <<- FALSE
sound.speed <<- 331
} else if (model_type == "ss fitting"){
captures = data.frame(session = rep(1, 127 * 6), ID = rep(1:127, 6), occasion = rep(1, 127 * 6),
trap = rep(1:6, each = 127), stringsAsFactors = FALSE)
captures$ss = as.vector(example.data$capt[["ss"]])
captures <<- captures[as.logical(as.vector(example.data$capt[["bincapt"]])),]
traps <<- example.data$traps
mask <<- example.data$mask
par.extend <<- NULL
ss.opts <<- list(cutoff = 60)
survey.length <<- NULL
cue.rates <<- NULL
detfn <<- NULL
bounds_input <<- NULL
sv_input <<- list(b0.ss = 90, b1.ss = 4, sigma.ss = 10)
fix_input <<- NULL
local_input <<- FALSE
sound.speed <<- 331
} else if (model_type == "joint ss/toa fitting"){
captures = data.frame(session = rep(1, 127 * 6), ID = rep(1:127, 6), occasion = rep(1, 127 * 6),
trap = rep(1:6, each = 127), stringsAsFactors = FALSE)
captures$ss = as.vector(example.data$capt[["ss"]])
captures$toa = as.vector(example.data$capt[["toa"]])
captures <<- captures[as.logical(as.vector(example.data$capt[["bincapt"]])),]
traps <<- example.data$traps
mask <<- example.data$mask
par.extend <<- NULL
ss.opts <<- list(cutoff = 60)
survey.length <<- NULL
cue.rates <<- NULL
detfn <<- NULL
bounds_input <<- NULL
sv_input <<- list(b0.ss = 90, b1.ss = 4, sigma.ss = 10)
fix_input <<- NULL
local_input <<- FALSE
sound.speed <<- 331
} else if (model_type == "Inhomogeneous density estimation"){
captures <<- data.frame(session = rep(1, 127 * 6), ID = rep(1:127, 6), occasion = rep(1, 127 * 6),
trap = rep(1:6, each = 127), stringsAsFactors = FALSE)
captures <<- captures[as.logical(as.vector(example.data$capt[["bincapt"]])),]
traps <<- example.data$traps
mask <<- example.data$mask
df <- data.frame(x1 = example.data$mask[, 1]/1000, y1 = example.data$mask[, 2]/1000)
par.extend <<- list(data = list(mask = df), model = list(D = ~x1+y1))
ss.opts <<- NULL
survey.length <<- NULL
cue.rates <<- NULL
detfn <<- NULL
bounds_input <<- NULL
sv_input <<- NULL
fix_input <<- list(g0 = 1)
local_input <<- FALSE
sound.speed <<- 331
} else if (model_type == "Multi-session models"){
captures1 = data.frame(session = rep(1, 57 * 6), ID = rep(1:57, 6), occasion = rep(1, 57 * 6),
trap = rep(1:6, each = 57), stringsAsFactors = FALSE)
captures1$bearing = as.vector(multi.example.data$capt[[1]][["bearing"]])
captures1$dist = as.vector(multi.example.data$capt[[1]][["dist"]])
captures1 = captures1[as.logical(as.vector(multi.example.data$capt[[1]][["bincapt"]])),]
captures2 = data.frame(session = rep(2, 29 * 3), ID = rep(1:29, 3), occasion = rep(2, 29 * 3),
trap = rep(1:3, each = 29), stringsAsFactors = FALSE)
captures2$bearing = as.vector(multi.example.data$capt[[2]][["bearing"]])
captures2$dist = as.vector(multi.example.data$capt[[2]][["dist"]])
captures2 = captures2[as.logical(as.vector(multi.example.data$capt[[2]][["bincapt"]])),]
captures <<- rbind(captures1, captures2)
traps <<- multi.example.data$traps
mask <<- multi.example.data$mask
par.extend <<- NULL
ss.opts <<- NULL
survey.length <<- NULL
cue.rates <<- NULL
detfn <<- NULL
bounds_input <<- NULL
sv_input <<- list(kappa = 100)
fix_input <<- NULL
local_input <<- FALSE
sound.speed <<- 331
}
}
|
e5cfea25265785a9c7177f9b337dc52789fb4537 | 5dbe661a6c46a8e29a1b3d4d8836dceccd2b6f95 | /MolecularEvolution/olfactionAnalyses/FunctionalOlfactoryReceptorGene_Analysis/southern_sea_otter_ORGs/elut1/step_7_plotinROnDesktop/step_7_a_plot_clustalX_dendrogram_20170929.R | ca5a7912b487b064737fc949924d1b8b6853b6bc | [] | no_license | LohmuellerLab/OtterGenomeProject | 9aff387293491cc4125601d3caff81dda0b8a7c3 | 9f70998b67c3941afe12b2de43f287818834a1c0 | refs/heads/master | 2020-05-06T20:18:54.624120 | 2020-01-17T01:53:04 | 2020-01-17T01:53:04 | 180,231,144 | 4 | 2 | null | null | null | null | UTF-8 | R | false | false | 5,196 | r | step_7_a_plot_clustalX_dendrogram_20170929.R | ############ PACKAGES ###############
require(ape)
#install.packages('dendextend')
require(dendextend)
require(ggplot2)
#source("https://bioconductor.org/biocLite.R")
#biocLite("ggtree")
#biocLite("GenomeGraphs")
require(ggtree)
require(geiger)
require(GenomeGraphs)
############ WORKING DIRECTORY ############
setwd("/Users/annabelbeichman/Documents/UCLA/Otters/Olfaction/20170707_MainOlfactionResults")
###### READ IN TREE ####################
# this is from clustalX with distance correction and gaps removed
# and bootstraps as node labels
tree <- read.tree("/Users/annabelbeichman/Documents/UCLA/Otters/Olfaction/20170707_MainOlfactionResults/step_6_result_mafftAligment.wOutgroups.wRepresentatives.noOutlier.20170929.phb")
############ SET UP GROUPS ###########
# outgroup
outgroup <- tree$tip.label[grep("elut",tree$tip.label,invert=T)]
outgroup <- outgroup[outgroup!="Human_OR2J3"]
outgroup <- outgroup[grep("Clade",outgroup,invert=T)]
# sea otter (eventually do this for pbra and mfur?)
elut <- tree$tip.label[grep("elut",tree$tip.label,invert=F)]
# human
human <- tree$tip.label[grep("Human",tree$tip.label,invert=F)]
# other spp:
representatives <- tree$tip.label[grep("Clade",tree$tip.label)]
classI <- representatives[grep("CLASS_I",representatives)]
classII <- representatives[grep("CLASS_I",representatives,invert=T)]
####### ROOT TREE USING OUTGROUP ###########
tree <- root(tree,outgroup = outgroup,resolve.root = T)
####### MAKE GROUPS IN THE TREE ########
# want to make each clade a group somehow?
representatives <- tree$tip.label[grep("Clade",tree$tip.label)]
clades <- sapply(strsplit(as.character(representatives), "_Clade"), "[", 2)
groups <- list(g1=outgroup,g2=elut,g4=classI,g5=c(classII,human)) # create a list of your different groups! nice!
# to color a group
tree <- groupOTU(tree,groups)
################# USEFUL THINGS: MRCA, viewClade, etc. ########
# to get a node #:
MRCA(tree, tip=outgroup) # 579
MRCA(tree,tip=elut) # 489
# node 579 highligh
viewClade(ggtree(tree)+geom_tiplab(), node=579) # aha! this zooms in on a node
viewClade(ggtree(tree)+geom_tiplab(), node=589) # aha! this zooms in on a node
viewClade(ggtree(tree)+geom_tiplab(), node=489) # aha! this zooms in on a node I think this is Class I!
viewClade(ggtree(tree)+geom_tiplab(), node=576)
######################## PLOT #########################
p <- ggtree(tree,layout="circular",aes(color=group))
p <- p + ggtitle("Class I and II Olfactory Receptors, with outgroup")+
geom_tiplab(size=1,aes(angle=angle))+
geom_text2(aes(subset=!isTip, label=label), hjust=-.3,size=0.5,color="black")
p
############# COLLAPSE CLADES ############
# these numbers will change!!!
cp <- p %>% collapse(node=577) %>% collapse(node=589)
cp <- cp + geom_point2(aes(subset=(node == 577)), size=5, shape=23, fill="steelblue")+
geom_point2(aes(subset=(node == 589)), size=5, shape=23, fill="steelblue")
cp
############### BOOTSTRAP VALUES #####################
############### Figure out Class I and Class II nodes ###########
# class I: 1227 node
viewClade(ggtree(tree)+geom_tiplab(size=3), node=1227)
classITips <- tips(tree,node=1227)
classITips_sppOnly <- classITips[grep("elut",classITips)] # 77
# class II; 1225 node
viewClade(ggtree(tree)+geom_tiplab(), node=1225)
classIITips <- tips(tree,node=1225)
classIITips_sppOnly <- classIITips[grep("elut",classIITips)] # 398
length(classIITips_sppOnly)
# these contain the non-elut genes as well.
length(classITips)
length(classIITips)
# write out:
write.table(classITips,"listsOfORs/elut/classI.tips.includingReps.txt",row.names=F,col.names=F,quote=F)
write.table(classITips_sppOnly,"listsOfORs/elut/classI.tips.sppOnly.txt",row.names=F,col.names=F,quote=F)
write.table(classIITips,"listsOfORs/elut/classII.tips.includingReps.txt",row.names=F,col.names=F,quote=F)
write.table(classIITips_sppOnly,"listsOfORs/elut/classII.tips.sppOnly.txt",row.names=F,col.names=F,quote=F)
############ ZOOM makes nice figure ##############
gzoom(tree,classITips)
###### Plot positions #########
classIlocations <- strsplit(classITips_sppOnly,"_")
classIscaffs <- sapply(classIlocations, "[", 3)
classIstarts <- sapply(classIlocations, "[", 4)
classIscaff_start <- as.data.frame(cbind(classIscaffs,classIstarts))
classIweirdclade <- strsplit(tips(tree,1290),"_")
classIweirdclade_starts <- sapply(classIweirdclade, "[", 4)
classIweirdclade_scaffs <- sapply(classIweirdclade, "[", 3)
classIweirdclade_scaff_start <- as.data.frame(cbind(classIweirdclade_scaffs,classIweirdclade_starts))
ggplot(classIscaff_start,aes(y=as.character(classIscaffs),x=as.numeric(as.character(classIstarts))))+
geom_point()+
geom_point(data=classIweirdclade_scaff_start,color="pink",aes(y=as.character(classIweirdclade_scaffs),x=as.numeric(as.character(classIweirdclade_starts))))
classIIlocations <- strsplit(classIITips_sppOnly,"_")
classIIlocations <- as.table(classIIlocations)
classIIscaffs <- sapply(classIIlocations, "[", 3)
classIIstarts <- sapply(classIIlocations, "[", 4)
classIIscaff_start <- as.data.frame(cbind(classIIscaffs,classIIstarts))
ggplot(classIIscaff_start,aes(y=as.character(classIIscaffs),x=as.numeric(as.character(classIIstarts))))+
geom_point()
|
e0c56f8ac74e9bedd859f10da066ac4d6516d7e4 | 23cfe0809fb697ca2f4353299abb770d19c1bbce | /course_r_programming/programming_assignment_3/rankall.R | 31aefefe7073be3e13bce055eb8d310da98c6ba9 | [] | no_license | JimmyDore/datasciencecoursera | cf3d46cc0a7effae7b688803b4b6fcbd930a2a87 | 736441d470a6f1542bde421568834701d4ec5974 | refs/heads/master | 2020-03-30T07:58:03.820149 | 2018-11-06T02:42:03 | 2018-11-06T02:42:03 | 150,977,896 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,538 | r | rankall.R | ## 4 - Ranking hospitals in all states
## ------------------------------------------
## Write a function called rankall that takes two arguments: an outcome name (outcome) and a hospital ranking
## (num). The function reads the outcome-of-care-measures.csv file and returns a 2-column data frame
## containing the hospital in each state that has the ranking specified in num. For example the function call
## rankall("heart attack", "best") would return a data frame containing the names of the hospitals that
## are the best in their respective states for 30-day heart attack death rates. The function should return a value
## for every state (some may be NA). The first column in the data frame is named hospital, which contains
## the hospital name, and the second column is named state, which contains the 2-character abbreviation for
## the state name. Hospitals that do not have data on a particular outcome should be excluded from the set of
## hospitals when deciding the rankings.
rankall <- function(outcome, num = "best") {
## Read outcome data
setwd("/home/jimmydore/Documents/Coursera/Repo_Github/datasciencecoursera/course_r_programming/programming_assignment_3")
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state (not state actually) and outcome are valid
possible_outcomes = c("heart attack","heart failure","pneumonia")
possible_columns = c("Heart.Attack","Heart.Failure","Pneumonia")
if (!(is.element(outcome,possible_outcomes))){
stop("invalid outcome")
}
#Remove all rows where is no value in the column
index_column = possible_columns[match(outcome,possible_outcomes)]
column = paste("Hospital.30.Day.Death..Mortality..Rates.from.",index_column,sep='')
data <- data[,c(column,"Hospital.Name","State")]
data[, column] <- as.numeric(data[, column])
#according to the examples, we want to keep NA
data <- na.omit(data)
data <- data[order(data[,column],data[,"Hospital.Name"]),]
f_test <- function (state){
data_temp <- subset(data,State == state)
if (num == "best"){
return(c(data_temp[1,"Hospital.Name"],state))
}else if(num == "worst"){
return(c(data_temp[nrow(data_temp),"Hospital.Name"],state))
}else{
return(c(data_temp[num, "Hospital.Name"],state))
}
}
result <- sapply(sort(unique(data$State)),f_test)
result <- (data.frame(result[1,],result[2,]))
colnames(result) <- c("hospital", "state")
return(result)
}
print(head(rankall("heart attack", 20), 10)) |
150d1060648746180380f7ed8839a0fae0152e8e | 1967193ff92cca080d12f06c16e0a4b9f2c2120f | /R/PlanTrail.R | a8b45af632906a098eba53a0e4fd5d403fe07b88 | [] | no_license | nikapepita/HikingTrail | eb76edd290c9d036963d5811131cbd431bd0d885 | 393d993882b364523cc66ce4c8b880f866baec10 | refs/heads/master | 2022-04-22T11:19:50.316009 | 2020-04-14T17:40:28 | 2020-04-14T17:40:28 | 255,669,068 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 157 | r | PlanTrail.R | #'PlanTrail
#'
#'With this function you can plan a hiking route for a predefined area
#'only cost
#'
#'@param
#'
#'@return
#'
#'@examples
#'
#'@export
#'
#'
|
1ab6157df566170f75833b93c7a47b60dc96e9aa | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/8652_0/rinput.R | 29fec6627d8cf9a9ca7d19910cbf475409883bb6 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("8652_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8652_0_unrooted.txt") |
dd57d6d9961c081382d3951578410c755728f7be | 265746beb936c10466eda49f1f49eb22e6c5ba53 | /Week 2/pollutantmean.R | c70514cb78687be2ac5724106735384f019ee313 | [] | no_license | jlondal/RPrograming | d7816ce20e471e7735311c8feaeb845811f0ebbd | 6c6b4ce41e35d49388bfd9286c78e04a21ceccd0 | refs/heads/master | 2016-09-06T00:07:35.678849 | 2014-09-27T14:45:52 | 2014-09-27T14:45:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 464 | r | pollutantmean.R | setwd("/Users/jameslondall/Dropbox/DS Course/RPrograming/Week 2")
pollutantmean <- function(directory, pollutant, id = 1:332) {
directory <- paste('/Users/jameslondall/Documents/Data/DS Course/',directory,sep="")
files<-as.character(list.files(directory))
files_paths<-paste(directory, files,sep="/")
df2 <- c()
for(i in id) {
df <- read.csv(files_paths[i])
df2 <- c(df2,df[!is.na(df[pollutant]),][[pollutant]])
}
round(mean(df2),3)
} |
3850dae18a532fba078781f330fb807e0a6a3bef | 4249aed63bb1302c65010c492b9d6446e3f71b9e | /reference/data_wrangling.R | 0003fd6bafa814578b7df4a313b1cd8f3f1459f0 | [] | no_license | Emiton/dsp539-assignment3 | b4f2b209b36d2cf36c50b43afbc3da2c964ff18a | 7af1c8cf731a61bc6e66257cde55c38b60e701fc | refs/heads/master | 2020-05-16T01:30:57.444864 | 2019-05-06T21:37:43 | 2019-05-06T21:37:43 | 182,604,464 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,422 | r | data_wrangling.R | # R Data Viz
install.packages("tidyverse")
download.file(url="https://ndownloader.figshare.com/files/2292169", destfile = "data/portal_data_joined.csv")
surveys <- read.csv("data/portal_data_joined.csv")
library("tidyverse")
# head(surveys)
# View(surveys)
# mean weight for each sex for each species
surveys %>%
filter(!is.na(weight)) %>%
group_by(sex, species_id) %>%
summarize(mean_weight = mean(weight, na.rm = TRUE))
# Heaviest animal each year
surveys %>%
filter(!is.na(weight)) %>%
group_by(year) %>%
filter(weight == max(weight)) %>%
select(year, genus, species, weight) %>%
arrange(year)
# summarize mean weight for each genus for each plot_id
surveys_gw <- surveys %>%
filter(!is.na(weight)) %>%
group_by(genus, plot_id) %>%
summarize(mean_weight = mean(weight))
# create new table using spread
surveys_spread <- surveys_gw %>%
spread(key = genus, value = mean_weight, fill = 0)
str(surveys_spread)
surveys_gather <- surveys_spread %>%
gather(key = genus, value = mean_weight, -plot_id)
str(surveys_gather)
# gather using multiple columns
surveys_spread %>%
gather(key = genus, value = mean_weight, Baiomys:Spermophilus) %>%
head()
# spread
rich_time <- surveys %>%
group_by(plot_id, year) %>%
summarize(n_genera = n_distinct(genus)) %>%
spread(year, n_genera)
head(rich_time)
surveys_long <- surveys %>%
gather(measurement, value, hindfoot_length, weight)
|
f1198e213af0d880a885aea8293bcf6718783297 | b6897715823fd16dd37a9627cf2c4fb18abb481c | /Assignment 2 - K Fold.R | 09795ae9aa2343df64a48ff77a5cdd4008a1a564 | [] | no_license | monicachandil/AMMA2017-18 | 39d6e68526e94603be04070da602f176cefbca70 | 36a5aa8a09e309a4042a6f7c6caf6c9816cf3bb5 | refs/heads/master | 2021-01-23T09:12:04.506033 | 2017-09-07T15:32:10 | 2017-09-07T15:32:10 | 102,566,300 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,067 | r | Assignment 2 - K Fold.R | # STEP - 1 START #
install.packages("titanic")
install.packages("rpart.plot")
install.packages("randomForest")
install.packages("DAAG")
library(titanic)
library(rpart.plot)
library(gmodels)
library(Hmisc)
library(pROC)
library(ResourceSelection)
library(car)
library(caret)
library(dplyr)
library(InformationValue)
library(rpart)
library(randomForest)
library("DAAG")
cat("\014") # Clearing the screen
getwd()
setwd("C:/08072017/AMMA 2017/Data/Assignment_2_Monica") #This working directory is the folder where all the bank data is stored
titanic_train_2<-read.csv('train.csv')
titanic_train<-titanic_train_2
titanic_train_3 <- read.csv('train.csv')
#titanic test
titanic_test_const <-read.csv('test-3.csv')
#splitting titanic train into 70,30
set.seed(1234) # for reproducibility
titanic_train$rand <- runif(nrow(titanic_train))
titanic_train_start <- titanic_train[titanic_train$rand <= 0.7,]
titanic_test_start <- titanic_train[titanic_train$rand > 0.7,]
# number of survived vs number of dead
CrossTable(titanic_train$Survived)
# removing NA row entries
#titanic_train <- titanic_train_start
titanic_train <- titanic_train[!apply(titanic_train[,c("Pclass", "Sex", "SibSp", "Parch", "Fare", "Age")], 1, anyNA),]
titanic_train_NA_allcols <- titanic_train_2[!apply(titanic_train_2[,c("Pclass", "Sex", "SibSp", "Parch", "Fare", "Age")], 1, anyNA),]
nrow(titanic_train_2)
# replacing NA by mean
mean_age = mean(titanic_train_2$Age)
titanic_train_mean_monica <- titanic_train_start
titanic_train_mean_monica2 <- titanic_train_start
titanic_train_mean_monica$Age[is.na(titanic_train_mean_monica$Age)] = mean(titanic_train_mean_monica$Age, na.rm = TRUE)
titanic_train_mean_monica2$Age[is.na(titanic_train_mean_monica2$Age)] = mean(titanic_train_mean_monica2$Age, na.rm = TRUE)
# STEP - 1 END #
# STEP - 2 START #
########## Build model from mean imputed into the data set ##########
full.model.titanic.mean <- glm(formula = Survived ~ Pclass + Sex + SibSp + Parch + Fare + Age,
data=titanic_train_mean_monica, family = binomial) #family = binomial implies that the type of regression is logistic
#lm
fit.train.mean <- lm(formula = Survived ~ Pclass + Sex + SibSp + Parch + Fare + Age,
data=titanic_train_mean_monica2) #family = binomial implies that the type of regression is logistic
summary(fit.train.mean)
#vif - remove those variables which have high vif >5
vif(fit.train.mean)
#removing insignificant variables
titanic_train_mean_monica$Parch<-NULL
full.model.titanic.mean <- glm(formula = Survived ~ Pclass + Sex + SibSp + Fare + Age,
data=titanic_train_mean_monica, family = binomial) #family = binomial implies that the type of regression is logistic
summary(full.model.titanic.mean)
titanic_train_mean_monica$Fare<-NULL
full.model.titanic.mean <- glm(formula = Survived ~ Pclass + Sex + SibSp + Age,
data=titanic_train_mean_monica, family = binomial) #family = binomial implies that the type of regression is logistic
summary(full.model.titanic.mean)
#Testing performance on Train set
titanic_train_mean_monica$prob = predict(full.model.titanic.mean, type=c("response"))
titanic_train_mean_monica$Survived.pred = ifelse(titanic_train_mean_monica$prob>=.5,'pred_yes','pred_no')
table(titanic_train_mean_monica$Survived.pred,titanic_train_mean_monica$Survived)
#Testing performance on test set
nrow(titanic_test)
nrow(titanic_test2_mean_monica)
titanic_test2_mean_monica <- titanic_test_start
#imputation by replacing NAs by means in the test set
titanic_test2_mean_monica$Age[is.na(titanic_test2_mean_monica$Age)] = mean(titanic_test2_mean_monica$Age, na.rm = TRUE)
titanic_test2_mean_monica$prob = predict(full.model.titanic.mean, newdata=titanic_test2_mean_monica, type=c("response"))
titanic_test2_mean_monica$Survived.pred = ifelse(titanic_test2_mean_monica$prob>=.5,'pred_yes','pred_no')
table(titanic_test2_mean_monica$Survived.pred,titanic_test2_mean_monica$Survived)
########## END - Model with mean included instead of NA #########
# STEP - 2 END #
# STEP - 3 START #
### Testing for Jack n Rose's survival ###
df.jackrose <- read.csv('Book1.csv')
df.jackrose$prob = predict(full.model.titanic.mean, newdata=df.jackrose, type=c("response"))
df.jackrose$Survived.pred = ifelse(df.jackrose$prob>=.5,'pred_yes','pred_no')
head(df.jackrose)
# Jack dies, Rose survives
### END - Testing on Jack n Rose ###
# STEP - 3 END #
# STEP - 4 START #
## START K-fold cross validation ##
# Defining the K Fold CV function here
Kfold_func <- function(dataset,formula,family,k)
{
object <- glm(formula=formula, data=dataset, family = family)
CVbinary(object, nfolds= k, print.details=TRUE)
}
#Defining the function to calculate Mean Squared Error here
MeanSquareError_func <- function(dataset,formula)
{
LM_Object <- lm(formula=formula, data=dataset)
LM_Object_sum <-summary(LM_Object)
MSE <- mean(LM_Object_sum$residuals^2)
print("Mean squared error")
print(MSE)
}
#Performing KFold CV on Training set by calling the KFOLD CV function here
Kfoldobj <- Kfold_func(titanic_train_mean_monica,Survived ~ Pclass + Sex + SibSp + Age,binomial,10)
#Calling the Mean Squared Error function on the training set here
MSE_Train <-MeanSquareError_func(titanic_train_mean_monica,Survived ~ Pclass + Sex + SibSp + Age)
#confusion matrix on training set
table(titanic_train_mean_monica$Survived,round(Kfoldobj$cvhat))
print("Estimate of Accuracy")
print(Kfoldobj$acc.cv)
#Performing KFold CV on test set by calling the KFOLD CV function here
Kfoldobj.test <- Kfold_func(titanic_test2_mean_monica,Survived ~ Pclass + Sex + SibSp + Age,binomial,10)
#Calling the Mean Squared Error function on the test set here
MSE_Test <-MeanSquareError_func(titanic_test2_mean_monica,Survived ~ Pclass + Sex + SibSp + Age)
#Confusion matrix on test set
table(titanic_test2_mean_monica$Survived,round(Kfoldobj.test$cvhat))
print("Estimate of Accuracy")
print(Kfoldobj.test$acc.cv)
## END K-FOLD CROSS VALIDATION ##
# STEP - 4 END #
|
9dd4de63061e83bbdbbbecbfadae6ed785ddd18a | e8f656ed68a82e391a8fdbec7701545a69f74aa1 | /main.r | 6156ac979f0ee76eee6488d69355dbea051f8f7d | [] | no_license | pknight24/flying-circus | 36fdea22cead87315a5501b2b7db4584c3646d98 | 7f9b0cf7955061d0f3af2afc950f096310294627 | refs/heads/master | 2020-03-23T08:51:40.393130 | 2018-07-30T01:22:20 | 2018-07-30T01:22:20 | 141,352,175 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 649 | r | main.r | library(Rcpp)
library(RSQLite)
source("helpers.r")
sourceCpp("src/markov.cpp")
###########################################################################################
db <- dbConnect(SQLite(), dbname="database.sqlite")
q <- dbSendQuery(db, "SELECT character, detail FROM scripts")
data <- dbFetch(q)
dbClearResult(q)
dbDisconnect(db)
data <- mutate(data, speaker = ifelse(is.na(character), "Background", "Character"))
speakers <- data$speaker
char.words <- filter(data, speaker == "Character")$detail %>% words.cleaner
back.words <- filter(data, speaker == "Background")$detail %>% words.cleaner
runMain(speakers, char.words, back.words);
|
ecd29283f74e7aae1c4e046fea6c012677b94673 | 6d7a191695cf8905d7cdda3d8753540568a8565e | /Data Plotting Code.R | 3feaaf810ab1df82a8c7163021d8d60cb8174798 | [] | no_license | bhagerott/ExData_Plotting1 | 5b747abceaad68d9f347231ee9670197758ce040 | 8c5c8a86f601e7a27381486967d44934404286e6 | refs/heads/master | 2021-01-17T05:27:23.321671 | 2016-02-14T18:23:46 | 2016-02-14T18:23:46 | 51,704,735 | 0 | 0 | null | 2016-02-14T17:28:09 | 2016-02-14T17:28:09 | null | UTF-8 | R | false | false | 2,370 | r | Data Plotting Code.R | #download data
file.name<-"./household_power_consumption.txt"
url<-"http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zip.file<-"./data.zip"
if (!file.exists("./household_power_consumption.txt")) {
download.file(url, destfile = zip.file)
unzip(zip.file)
file.remove(zip.file)
}
#open the file and isolate relevent data
library(data.table)
DT<-fread(file.name, sep=";", header=TRUE, colClasses=rep("character",9))
DT[DT=="?"]<-NA
DT$Date<-as.Date(DT$Date, format="%d/%m/%Y")
DT<-DT[DT$Date>=as.Date("2007-02-01")& DT$Date<=as.Date("2007-02-02")]
DT$posix <- as.POSIXct(strptime(paste(DT$Date, DT$Time, sep = " "),format = "%Y-%m-%d %H:%M:%S"))
#create graphs
DT$Global_active_power<-as.numeric(DT$Global_active_power)
#Graph 1
png(file = "plot1.png", width = 480, height = 480, units = "px")
hist(DT$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
#Graph 2
png(file = "plot2.png", width = 480, height = 480, units = "px")
with(DT, plot(posix, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)"))
dev.off()
#Graph 3
png(file = "plot3.png", width = 480, height = 480, units = "px")
with(DT, plot(posix, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering"))
with(DT, points(posix, type = "l", Sub_metering_2, col = "red"))
with(DT, points(posix, type = "l", Sub_metering_3, col = "blue"))
legend("topright", col = c("black", "blue", "red"), legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), lty = 1)
dev.off()
#Graph 4
png(file = "plot4.png", width = 480, height = 480, units = "px")
par(mfrow = c(2, 2))
# Graph 4.1
with(DT, plot(posix, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power"))
# Graph 4.2
with(DT, plot(posix, Voltage, type = "l", xlab = "datetime", ylab = "Voltage"))
# Graph 4.3
with(DT, plot(posix, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering"))
with(DT, points(posix, type = "l", Sub_metering_2, col = "red"))
with(DT, points(posix, type = "l", Sub_metering_3, col = "blue"))
legend("topright", col = c("black", "blue", "red"), legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), lty = 1)
# Graph 4.4
with(DT, plot(posix, Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power"))
dev.off() |
844dccc92d28df08fc698c5a558b92ae05946855 | 466e2c481363aa446bb0e470325e90fb1f0069bc | /02_Plotting/02_Plot_CA_HST_suitability.R | 3a246e0f652a07f60eaaf062e451808141ac3407 | [] | no_license | naiamh/Lead-trail_project | 9e51bc78d5b4659c2126c2fa7421e2b96ae2747d | cd31593461bb0fa6f89d8ed13d4e4e3cd25b2c97 | refs/heads/master | 2021-01-10T06:55:51.563796 | 2016-02-16T22:10:32 | 2016-02-16T22:10:32 | 51,658,025 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,956 | r | 02_Plot_CA_HST_suitability.R | # Scripts for plotting suitability maps for historic climate
# Updated October 28 2015
# Clear workspace
rm(list=ls())
Computer <- "HP"
#-----------------#
# Set directories #
#-----------------#
if (Computer == "HP") {
wdir <- 'C:/Users/Naia Morueta Holme/Documents/Documents_share/Projects/'
idir <- 'D:/Lead-trail/Projections/V4/'
bgdir <- paste0(wdir,'100_Postdoc/Data/Background_layers/PROCESSED/')
spdir2 <- paste0(wdir,'101_TBC3_modelling/Lead-trail_R-project/Data/Species/')
fdir <- 'D:/Lead-trail/Projections/Figures_V4/CA_HST_suitabilities/'
spdir <- 'D:/Phylo_modelling/Data/Species/Processed2/'
}
#----------------#
# Load libraries #
#----------------#
require(raster)
#-----------------------#
# Parameters for script #
#-----------------------#
# What species?
allSpecies <- sort(unique(read.csv(paste0(spdir2, 'Species_CLN_matching_v2.csv'), as.is=T)[,'Scientific_name']))
# Which model type
mxModelType = "cwd-djf-jja-ppt"
# Background CA map
bg <- readRDS(paste(bgdir, "GADM_California_proj.rdata",sep=""))
orig.project <- '+proj=longlat +ellps=WGS84'
#---------------------#
# Plot continuous map #
#---------------------#
i=1
for(i in 1:length(allSpecies)) {
mySpecies <- allSpecies[i]
writeLines(mySpecies)
#load projection
hfile <- paste0(idir,mxModelType,'/',mySpecies,'/','1951-1980_suitability_CA_HST.tif')
hst <- raster(hfile)
#load occurrences
pres <- readRDS(paste(spdir, mySpecies, ".rdata", sep=""))
coordinates(pres) <- ~longitude + latitude
projection(pres) <- CRS(orig.project)
figfile <- paste0(fdir, mySpecies,'_CA_suitability.png')
png(figfile, width=1000, height=1000, pointsize = 36)
par(mar=c(0.1,0.1,0.1,2))
plot(hst,axes=F,zlim=c(0,1),legend=T,col=grey.colors(20,start=1,end=0))
# add CA outline
plot(bg,add=T)
# add occurrence points
occur <- spTransform(pres, projection(hst))
points(occur,pch=4,col='red',cex=0.1)
dev.off()
}
|
03551384d6d5398ae553299e400c25199b59c2c9 | 558244a780c81c64cf171def2ae6720f0feb5239 | /crime/scrape.R | a1cb44dcd4cbe65b77ee8f8805176ef6f601c730 | [] | no_license | xyangwu/whosesociety-data | 3056ab6a1469c5699a630617f96300c2b37c8b0a | c2d85192da21aa9bb2eeb45b67d4664213cd75c1 | refs/heads/master | 2021-05-21T19:50:55.740625 | 2020-05-17T13:23:59 | 2020-05-17T13:23:59 | 252,776,916 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,818 | r | scrape.R | library(rvest)
library(RSelenium)
library(tidyverse)
library(XML)
remDr <- remoteDriver(remoteServerAddr = "localhost"
, port = 4444L
, browserName = "chrome")
# java -jar selenium-server-standalone-3.9.1.jar
# java -jar selenium-server-standalone-3.141.59.jar
remDr$open()
# 18767308370 mypasswordwu
'http://www.hshfy.sh.cn/shfy/gweb2017/flws_view.jsp?pa='
url <- 'http://www.hshfy.sh.cn/shfy/gweb2017/flws_list.jsp?'
remDr$navigate(url)
################### test javascripts ##################
for(j in 1:2){
remDr$executeScript(paste("scroll(0,",1*6000,");"))
Sys.sleep(1)
}
remDr$executeScript("javascript:goPage('6')")
remDr$executeScript('return document.getElementById("imageCode");')
remDr$executeScript('mini.hideMessageBox(messageBox);') # close the box
remDr$executeScript('return imageCodeLoad()')
remDr$executeScript('returndocument.getElementsByTagName("script")') # show the input code
remDr$executeScript('return $("#userCode").val()') # show the input code
scripts <- "window.scrollTo(0, document.body.scrollHeight)"
remDr$executeScript(scripts)
remDr$setWindowSize(200,200)
remDr$screenshot_as_file(display = TRUE)
driver.get_screenshot_as_file('.//1.png')
ahElem <- remDr$findElement(using = "css", '#ah\\$value') # 案号
ahElem$sendKeysToElement(list("nmsl", key = "enter"))
jarqksElem <- remDr$findElement(using = "css", '#jarqks')
remDr$executeScript('mini.getbyName("jarqks").setValue("2001-01-02")') # 开始时间
remDr$executeScript('mini.getbyName("jarqjs").setValue("2019-11-25")') # 截至时间
########################## scrape selenium ###############################
judgement <- data.frame(page = seq(1,18342,1), stringsAsFactors = F)
judgement$table <- NA
judgement$link <- NA
for(i in 67:nrow(judgement)){ #######input page numnber
remDr$executeScript(paste0("javascript:goPage('",i,"')"))
Sys.sleep(2.5)
trynext<- try({
pagesource <- remDr$getPageSource()[[1]]
html <- pagesource %>% read_html()
curpage <- html %>% html_nodes("span.current") %>%html_text()
print(paste(curpage, nrow(judgement), sep="-"))
remDr$close()
remDr$open();remDr$navigate(url)
judgement$table[i] <- html %>% html_nodes("#flws_list_content > table") %>%html_table()
judgement$link[i] <- html %>% html_nodes("#flws_list_content > table:nth-child(1) > tbody:nth-child(1) tr") %>%
html_attr('onclick') %>% list()
#next_leng <- html %>% html_nodes(".meneame a") %>% length()
#next_elemt <- paste0('/html/body/div[3]/form/center/div/a[', next_leng - 1, ']')
#b <- remDr$findElement('xpath', next_elemt)
#b$clickElement()
})
if ('try-error' %in% class(trynext)) next
print(i)
}
judgement_injury_df <- cbind.data.frame(do.call(rbind.data.frame, judgement_injury$table),
link = unlist(judgement_injury$link))
names(judgement_injury_df) <- c(judgement_injury_df[1,][1:7], "link")
judgement_injury_df <- judgement_injury_df[-which(judgement_injury_df[,1]=='案号'), ]
rownames(judgement_injury_df) <- 1:nrow(judgement_injury_df)
judgement_injury_df$link <- as.character(judgement_injury_df$link) %>% str_extract_all(., '\\([^&].+\\)') %>% gsub("\\(|\\)|'",'',.)
saveRDS(judgement_injury_df, 'data/judgement_injury_df.rds')
## get content info --------------------------------------------------------
library(httr)
judgement_hoju_df <- rbind.data.frame(judgement_homicide_df, judgement_injury_df, stringsAsFactors = F)
judgement_hoju_df$content <- NA
for(i in 317:nrow(judgement_hoju_df)){
Sys.sleep(3)
trynext<- try({
#search <- remDr$findElement('xpath', '//*[@id="ah$value"]')
#search$sendKeysToElement(list(judgement_rape_df$案号[i]))
#search_click <- remDr$findElement('xpath', '/html/body/div[3]/form/table/tbody/tr[2]/td[2]/img[1]')
#search_click$clickElement()
jud_page <- paste0('http://www.hshfy.sh.cn/shfy/gweb2017/flws_view.jsp?pa=', judgement_hoju_df$link[i])
#Sys.sleep(3)
#pagesource <- remDr$getPageSource()[[1]]
#html <- pagesource %>% read_html()
#html <- read_html(jud_page)
html <- html_session(jud_page, timeout(60),(add_headers(`User-Agent`="mozilla/5.0 (Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36")))
judgement_hoju_df$content[i] <- html %>% html_node('#wsTable') %>% html_text()
#b <- remDr$findElement('xpath', next_elemt)
#b$clickElement()
})
if ('try-error' %in% class(trynext)) next
print(i)
}
saveRDS(judgement_hoju_df, 'data/judgement_hoju_df.rds')
# 中国法律文书网 china legal instrument #####################################
library(rvest);library(tidyverse)
#wenshu_url <- 'http://wenshu.court.gov.cn/website/wenshu/181107ANFZ0BXSK4/index.html?docId=038eb438363a47c48b54aac400e055bc'
#lawsdata_url <- "http://www.lawsdata.com/?q=eyJtIjoiYWR2YW5jZSIsImEiOnsiY2FzZVR5cGUiOlsiMSJdLCJpbnN0cnVtZW50VHlwZUlkIjpbIjEiXSwicmVhc29uSWQiOlsiMDAxMDA0IiwiMDAxMDA1Il0sImZ1enp5TWVhc3VyZSI6IjAiLCJyZWFzb24iOiLkvrXniq/lhazmsJHkurrouqvmnYPliKnjgIHmsJHkuLvmnYPliKks5L6154qv6LSi5LqnIn0sInNtIjp7InRleHRTZWFyY2giOlsic2luZ2xlIl0sImxpdGlnYW50U2VhcmNoIjpbInBhcmFncmFwaCJdfX0=&s="
lawsdata_url <- "http://www.lawsdata.com/?q=eyJtIjoiYWR2YW5jZSIsImEiOnsiY2FzZVR5cGUiOlsiMSJdLCJpbnN0cnVtZW50VHlwZUlkIjpbIjEiXSwicmVhc29uSWQiOlsiMDAxMDA0IiwiMDAxMDA1Il0sInByb3ZpbmNlSWQiOlsiNzA5MiJdLCJjb3VydElkIjpbIjcwOTIiXSwiZnV6enlNZWFzdXJlIjoiMCIsInJlYXNvbiI6IuS+teeKr+WFrOawkeS6uui6q+adg+WIqeOAgeawkeS4u+adg+WIqSzkvrXniq/otKLkuqciLCJjb3VydCI6IuS4iua1t+W4giJ9LCJzbSI6eyJ0ZXh0U2VhcmNoIjpbInNpbmdsZSJdLCJsaXRpZ2FudFNlYXJjaCI6WyJwYXJhZ3JhcGgiXX19&s="
lawsdata_url2 <- "http://www.lawsdata.com/?q=eyJtIjoiYWR2YW5jZSIsImEiOnsiY2FzZVR5cGUiOlsiMSJdLCJpbnN0cnVtZW50VHlwZUlkIjpbIjEiXSwicmVhc29uSWQiOlsiMDAxMDA0IiwiMDAxMDA1Il0sInByb3ZpbmNlSWQiOlsiNzA5MiJdLCJjb3VydElkIjpbIjcwOTIiXSwiZnV6enlNZWFzdXJlIjoiMCIsInJlYXNvbiI6IuS+teeKr+WFrOawkeS6uui6q+adg+WIqeOAgeawkeS4u+adg+WIqSzkvrXniq/otKLkuqciLCJjb3VydCI6IuS4iua1t+W4giJ9LCJzbSI6eyJ0ZXh0U2VhcmNoIjpbInNpbmdsZSJdLCJsaXRpZ2FudFNlYXJjaCI6WyJwYXJhZ3JhcGgiXX19&s="
remDr$navigate(lawsdata_url2) # increasing time
remDr$maxWindowSize()
remDr$screenshot(display = TRUE)
judge_ls <- data.frame(page = seq(1,6000,1), stringsAsFactors = F)
judge_ls$title <- NA
judge_ls$court <- NA
judge_ls$type <- NA
judge_ls$content <- NA
judge_ls$link <- NA
for(i in 51:nrow(judge_ls)){
Sys.sleep(2)
# log in
login <- try(remDr$findElement(using = "css", "#mobile"),silent = T)
if("try-error"%in%class(login)==FALSE){
logtElem <- remDr$findElement("xpath", '//*[@class="mt-checkbox mt-checkbox-outline"]')
logtElem$clickElement()
mobiElem <- remDr$findElement(using = "css", "#mobile")
mobiElem$sendKeysToElement(list("18767308370", key = "enter"))
passElem <- remDr$findElement(using = "css", "#password")
passElem$sendKeysToElement(list("18767308370", key = "enter"))
}
#Sys.sleep(1)
# time decreasing
#down <- try(remDr$findElement("xpath", '//*[@class="fa fa-arrow-down"]'))
#if("try-error"%in%class(down)){
#sortElem <- remDr$findElement("xpath", '//span[@data-sort-field="judgementDate"]')
#sortElem$clickElement()
#}
trynext<- try({
PageSource <- remDr$getPageSource()[[1]]
html <- PageSource %>% read_html()
#resultList <- html %>% html_node("#resultListDiv > div:nth-child(2) > div") %>% html_text()
#result_ls <- c(result_ls, resultList)
judge_ls$title[i] <- html %>% html_nodes(xpath = '//*[@class="caption-subject"]') %>% html_text()%>%list()
judge_ls$court[i] <- html %>% html_nodes(xpath = '//*[@class="row case-footer"]') %>% html_text()%>%str_trim()%>%list()
judge_ls$type[i] <- html %>% html_nodes(xpath = '//*[@class="col-md-8 case-footer-lineSecond"]') %>% html_text()%>%str_trim()%>%list()
judge_ls$content[i] <- html %>% html_nodes(xpath = '//*[@class="tab-pane fade active in font-color"]') %>% html_text()%>%str_trim()%>%list()
judge_ls$link[i] <- html %>% html_nodes(xpath = '//*[@class="detail-instrument-link"]') %>% html_attr("href")%>%list()
#link <- html %>% html_nodes(xpath = '//*[@class="detail-instrument-link"]') %>% html_attr("href")
#link_ls <- c(link_ls, link)
b <- remDr$findElement("link text", "下一页")
b$clickElement()
})
if ('try-error' %in% class(trynext)) next
print(i)
}
judge_ls$title[200]
saveRDS(judge_ls,"data/judge_ls")
result_ls <- readRDS("data/result_ls.rds")
result_txt <- result_ls%>%str_split(.,"收藏")
result_txt[50:52]%>%unlist()
saveRDS(link_ls, "data/link_ls.rds")
format(object.size(link_ls),units = "MB")
result_ls[40000]
remDr$close()resuk
|
b1cd5915cb20e6972c8420294716a827cb4e6168 | 37c76010ded6d1fc4448ce417d9a5648a4e1cdee | /man/scale_fill_jpregion.Rd | 4622908bf67c71c689135e4402d1b26650b81587 | [
"CC-BY-4.0"
] | permissive | uribo/tabularmaps | d51927fea6679aa89d8490df37f351591759fc83 | 7fd7251eec59def15a1853bf18294772452b9989 | refs/heads/master | 2022-12-02T03:28:15.633293 | 2020-08-06T03:19:40 | 2020-08-06T03:19:40 | 251,153,664 | 8 | 1 | NOASSERTION | 2020-04-05T02:18:44 | 2020-03-29T23:11:21 | R | UTF-8 | R | false | true | 512 | rd | scale_fill_jpregion.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tabularmap.R
\name{scale_fill_jpregion}
\alias{scale_fill_jpregion}
\title{Coloring the tabularmaps by region in Japan}
\usage{
scale_fill_jpregion(lang, ...)
}
\arguments{
\item{lang}{Select whether the region variable is Japanese (\code{jp}) or English (\code{en}).}
\item{...}{all other arguments passed on to \code{\link[ggplot2:scale_manual]{ggplot2::scale_fill_manual()}}}
}
\description{
Custom ggplot2 scale for tabulamap.
}
|
8464890929953ba8c277b42f0c2c569d26c39e43 | 6e32987e92e9074939fea0d76f103b6a29df7f1f | /googleautomlv1.auto/man/AnnotationSpec.Rd | 8b1026e3f5c4889713c32d68fdc6df066053d0fb | [] | no_license | justinjm/autoGoogleAPI | a8158acd9d5fa33eeafd9150079f66e7ae5f0668 | 6a26a543271916329606e5dbd42d11d8a1602aca | refs/heads/master | 2023-09-03T02:00:51.433755 | 2023-08-09T21:29:35 | 2023-08-09T21:29:35 | 183,957,898 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 573 | rd | AnnotationSpec.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/automl_objects.R
\name{AnnotationSpec}
\alias{AnnotationSpec}
\title{AnnotationSpec Object}
\usage{
AnnotationSpec(exampleCount = NULL, name = NULL, displayName = NULL)
}
\arguments{
\item{exampleCount}{Output only}
\item{name}{Output only}
\item{displayName}{Required}
}
\value{
AnnotationSpec object
}
\description{
AnnotationSpec Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A definition of an annotation spec.
}
\concept{AnnotationSpec functions}
|
495c8ca3f35cfa300616338d3e459c72412edfdf | 1b1241229f6386662bef24be67ca94e0ac8f7ca5 | /R/legisListWordPress.R | 0831a4695a9f053f343d24fa8c846a3805ee7969 | [] | no_license | rafaeldjacome/CongressoAberto | c928815bef71008ffadefc7d2ea1d07fd75129a1 | a4785785cb37e8095893dc411f0a030a57fd30f8 | refs/heads/master | 2020-03-18T00:33:50.482718 | 2010-04-14T14:46:51 | 2010-04-14T14:46:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,011 | r | legisListWordPress.R | rf <- function(x=NULL) {
if (.Platform$OS.type!="unix") {
run.from <- "C:/reps/CongressoAberto"
} else {
run.from <- "~/reps/CongressoAberto"
}
## side effect: load functions
source(paste(run.from,"/R/caFunctions.R",sep=""))
if (is.null(x)) {
run.from
} else {
paste(run.from,"/",x,sep='')
}
}
source(rf("R/wordpress.R"))
connect.db()
connect.wp()
## all deps
##dall <- dbGetQueryU(connect,"SELECT bioid FROM br_bio")[,1]
## ## what are the current deps?
## dnow <- dbGetQueryU(connect,"SELECT b.namelegis as Nome, upper(a.state) as Estado, a.party as Partido, round(c.ausente_prop*100) `% faltas no ultimo ano`,
## postid
## FROM
## br_deputados_current as a,
## br_bio as b,
## br_ausencias as c,
## br_bioidpostid as d
## WHERE a.bioid=b.bioid and a.bioid=c.bioid and a.bioid=d.bioid
## ")
## add parent page (deputados)
pname <- "Deputados"
pdeps <- wpAddByTitle(conwp,post_title=pname,
post_content='<?php include_once("php/legislist.php"); ?>')
|
d398e00bab9407afe0fa7a018a51198c8562d9ba | 2848fe85a2077683826f4206cccdb023830c14ef | /tests/testthat/test-triangles.R | 1f35d25ec977cb35e112a895182004cd0e84697b | [] | no_license | mpadge/silicate | c5f629eb562eea53210a805f05dd4dfe019a9d4c | df0118208df26c282f31fed5fe0e07ef2d04085e | refs/heads/master | 2021-01-24T08:08:12.416339 | 2019-02-22T00:52:47 | 2019-02-22T00:52:47 | 122,969,382 | 0 | 0 | null | 2018-02-26T12:51:18 | 2018-02-26T12:51:17 | null | UTF-8 | R | false | false | 329 | r | test-triangles.R | context("test-triangles")
pts <- structure(c(5L, 3L, 1L, 4L, 4L, 8L, 6L, 9L), .Dim = c(4L, 2L))
tri <- c(2, 1, 3, 2, 4, 1)
a <- tri_area(pts[tri, ])
test_that("triangle tools works", {
expect_equal(a, c(6, 3))
expect_equal(tri_ix(pts[tri, ]), c(3, 1, 2, 6, 4, 5))
expect_equal(tri_jx(pts[tri, ]), c(1, 2, 3, 4, 5, 6))
})
|
e1dd549a6a877100d36a8cc79bbbf628b8b8a6b6 | 0ee50dd399127ebe38bc8f5197114d46d717ccd7 | /R/combatbaaddon.R | 1371413e8bf831f062ec3d48a290ab042cfbb577 | [] | no_license | cran/bapred | 0051d57056f886e6df028255f0c85339c70d66f1 | e24720be3c6f82c2d5422ae97a8f12f5edc15adc | refs/heads/master | 2022-07-07T03:06:58.840921 | 2022-06-22T07:20:13 | 2022-06-22T07:20:13 | 48,076,867 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,466 | r | combatbaaddon.R | combatbaaddon <-
function(params, x, batch) {
if(any(is.na(x)))
stop("Data contains missing values.")
if(!is.factor(batch))
stop("'batch' has to be of class 'factor'.")
if(!is.matrix(x))
stop("'x' has to be of class 'matrix'.")
if(!inherits(params, "combat"))
stop("Input parameter 'params' has to be of class 'combat'.")
if(ncol(params$xadj) != ncol(x))
stop("Number of variables in test data matrix different to that of training data matrix.")
mod = matrix(nrow=length(batch), ncol=1, data=batch)
colnames(mod)[ncol(mod)] = "Batch"
design <- design.mat(mod,numCov = NULL)
batches <- list.batch(mod)
n.batch <- length(batches)
n.batches <- sapply(batches, length)
n.array <- sum(n.batches)
##standardize Data across genes
B.hat <- solve(t(design)%*%design)%*%t(design)%*%as.matrix(x)
meanoverall <- matrix(nrow=ncol(x), ncol=n.array, data=params$meanoverall)
if(!is.null(design)){tmp <- design;tmp[,c(1:n.batch)] <- 0;meanoverall <- meanoverall+t(tmp%*%B.hat)}
s.data <- (t(x)-meanoverall)/(sqrt(params$var.pooled)%*%t(rep(1,n.array)))
##Get regression batch effect parameters
# cat("Fitting L/S model and finding priors\n")
batch.design <- design[,1:n.batch, drop=FALSE]
gamma.hat <- solve(t(batch.design)%*%batch.design)%*%t(batch.design)%*%t(as.matrix(s.data))
delta.hat <- NULL
for (i in batches){
delta.hat <- rbind(delta.hat,apply(s.data[,i], 1, var,na.rm=T))
}
##Find Priors
gamma.bar <- apply(gamma.hat, 1, mean)
t2 <- apply(gamma.hat, 1, var)
a.prior <- apply(delta.hat, 1, aprior)
b.prior <- apply(delta.hat, 1, bprior)
##Find EB batch adjustments
gamma.star <- delta.star <- NULL
# cat("Finding parametric adjustments\n")
for (i in 1:n.batch){
temp <- it.sol(s.data[,batches[[i]]],gamma.hat[i,],
delta.hat[i,],gamma.bar[i],t2[i],a.prior[i],b.prior[i])
gamma.star <- rbind(gamma.star,temp[1,])
delta.star <- rbind(delta.star,temp[2,])
}
### Normalize the Data ###
# cat("Adjusting the Data\n")
bayesdata <- s.data
j <- 1
for (i in 1:length(batches)){
id = batches[[i]]
bayesdata[,id] <- (bayesdata[,id]-t(batch.design[id,]%*%gamma.star))/(sqrt(delta.star[j,])%*%t(rep(1,n.batches[j])))
j <- j+1
}
bayesdata <- (bayesdata*(sqrt(params$var.pooled)%*%t(rep(1,n.array))))+meanoverall
xadj <- t(bayesdata)
return(xadj)
}
|
5ef38c7e0f0df88a8dea5e13d5cb762642259ed0 | 8e1002933c5e79a33bc1aff1d216696aa2f96e68 | /Particle_Tracking_subcode/Making_depth_layers.R | 04405a6c9eb773fdfe8721bdad9b002112d247de | [
"MIT"
] | permissive | Christopher-Blackford/ParticleTracking | 34432a0e58eb453c04c222d6d186200cef01f742 | 5e4917b88e6bdd87753bc8157244c2767c61a5e9 | refs/heads/master | 2020-03-13T02:25:58.543461 | 2020-01-13T20:36:30 | 2020-01-13T20:36:30 | 84,609,475 | 0 | 1 | null | 2017-03-15T18:50:50 | 2017-03-10T23:32:45 | R | UTF-8 | R | false | false | 743 | r | Making_depth_layers.R | #Making_depth_layers
#Making the depth layers from released points
Depth_class_dataframe <- Released_larvae@data
Depth_class_dataframe$Depth_class <- Habitat_classes_names[i]
Depth_class_dataframe <- Depth_class_dataframe[,c("Poly_ID", "Depth_class")]
Depth_class_dataframe <- dplyr::distinct(Depth_class_dataframe, Poly_ID, .keep_all = TRUE)
Unif_depth_class <- sp::merge(ConPoly, Depth_class_dataframe, by = "Poly_ID", all.x = FALSE)
writeOGR(Unif_depth_class, dsn = paste0("K:/Christopher_PhD/CH1_MPA/Displaying_study_region/CH1/Depth_class_maps/Depth_class_10km/", Habitat_classes_names[i]),
layer = paste0(Habitat_classes_names[i]), driver = "ESRI Shapefile",
verbose = TRUE, overwrite = TRUE, morphToESRI = TRUE)
|
47ffa1600c95f818410473afa9e7be3743394601 | 8632eab9c0d5c3aaa1d0aef69bef8458f49e1c30 | /R/ggsurvplot.R | 53acba7b7981f5ea43604b2cd6b4b61126c1a5e7 | [] | no_license | AlKavaev/survminer | 79f85f32b4a571a0a14b8dc00e80f8da9accfb03 | 76e822a303b2c008cb2a55e6bf093245fc3c4df7 | refs/heads/master | 2021-01-19T09:03:38.209274 | 2017-04-07T15:16:21 | 2017-04-07T15:19:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 44,428 | r | ggsurvplot.R | #' @include utilities.R theme_classic2.R surv_summary.R
#' @importFrom methods is
#' @importFrom stats pchisq
#' @importFrom survMisc ten comp
#' @importFrom utils capture.output
NULL
#'Drawing Survival Curves Using ggplot2
#'@description Drawing survival curves using ggplot2
#'@param fit an object of class survfit.
#'@param data a dataset used to fit survival curves. If not supplied then data
#' will be extracted from 'fit' object.
#'@param fun an arbitrary function defining a transformation of the survival
#' curve. Often used transformations can be specified with a character
#' argument: "event" plots cumulative events (f(y) = 1-y), "cumhaz" plots the
#' cumulative hazard function (f(y) = -log(y)), and "pct" for survival
#' probability in percentage.
#'@param surv.scale scale transformation of survival curves. Allowed values are
#' "default" or "percent".
#'@param xscale numeric or character value specifying x-axis scale. \itemize{
#' \item If numeric, the value is used to divide the labels on the x axis. For
#' example, a value of 365.25 will give labels in years instead of the original
#' days. \item If character, allowed options include one of c("d_m", "d_y",
#' "m_d", "m_y", "y_d", "y_m"), where d = days, m = months and y = years. For
#' example, xscale = "d_m" will transform labels from days to months; xscale =
#' "m_y", will transform labels from months to years.}
#'@param color color to be used for the survival curves. \itemize{ \item If the
#' number of strata/group (n.strata) = 1, the expected value is the color name.
#' For example color = "blue". \item If n.strata > 1, the expected value is the
#' grouping variable name. By default, survival curves are colored by strata
#' using the argument color = "strata", but you can also color survival curves
#' by any other grouping variables used to fit the survival curves. In this
#' case, it's possible to specify a custom color palette by using the argument
#' palette.}
#'@param palette the color palette to be used. Allowed values include "hue" for
#' the default hue color scale; "grey" for grey color palettes; brewer palettes
#' e.g. "RdBu", "Blues", ...; or custom color palette e.g. c("blue", "red").
#' See details section for more information.
#'@param linetype line types. Allowed values includes i) "strata" for changing
#' linetypes by strata (i.e. groups); ii) a numeric vector (e.g., c(1, 2)) or a
#' character vector c("solid", "dashed").
#'@param break.time.by numeric value controlling time axis breaks. Default value
#' is NULL.
#'@param break.x.by alias of break.time.by. Numeric value controlling x axis
#' breaks. Default value is NULL.
#'@param break.y.by same as break.x.by but for y axis.
#'@param conf.int logical value. If TRUE, plots confidence interval.
#'@param conf.int.fill fill color to be used for confidence interval.
#'@param conf.int.style confidence interval style. Allowed values include
#' c("ribbon", "step").
#'@param censor logical value. If TRUE, censors will be drawn.
#'@param censor.shape character or numeric value specifying the point shape of
#' censors. Default value is "+" (3), a sensible choice is "|" (124).
#'@param censor.size numveric value specifying the point size of censors. Default is 4.5.
#'@param pval logical value. If TRUE, the p-value is added on the plot.
#'@param pval.size numeric value specifying the p-value text size. Default is 5.
#'@param pval.coord numeric vector, of length 2, specifying the x and y
#' coordinates of the p-value. Default values are NULL.
#'@param title,xlab,ylab main title and axis labels
#'@param xlim,ylim x and y axis limits e.g. xlim = c(0, 1000), ylim = c(0, 1).
#'@param legend character specifying legend position. Allowed values are one of
#' c("top", "bottom", "left", "right", "none"). Default is "top" side position.
#' to remove the legend use legend = "none". Legend position can be also
#' specified using a numeric vector c(x, y); see details section.
#'@param legend.title legend title.
#'@param legend.labs character vector specifying legend labels. Used to replace
#' the names of the strata from the fit. Should be given in the same order as
#' those strata.
#'@param risk.table Allowed values include: \itemize{ \item TRUE or FALSE
#' specifying whether to show or not the risk table. Default is FALSE. \item
#' "absolute" or "percentage": to show the \bold{absolute number} and the
#' \bold{percentage} of subjects at risk by time, respectively. Use i)
#' "abs_pct" to show both absolute number and percentage. ii) "nrisk_cumcensor"
#' and "nrisk_cumevents" to show the number at risk and, the cumulative number
#' of censoring and events, respectively. }
#'
#'@param risk.table.title The title to be used for the risk table.
#'@param risk.table.pos character vector specifying the risk table position.
#' Allowed options are one of c("out", "in") indicating 'outside' or 'inside'
#' the main plot, respectively. Default value is "out".
#'@param risk.table.col same as tables.col but for risk table only.
#'@param risk.table.fontsize,fontsize font size to be used for the risk table
#' and the cumulative events table.
#'@param risk.table.y.text logical. Default is TRUE. If FALSE, risk table y axis
#' tick labels will be hidden.
#'@param risk.table.y.text.col logical. Default value is FALSE. If TRUE, risk
#' table tick labels will be colored by strata.
#'@param tables.height numeric value (in [0 - 1]) specifying the general height
#' of all tables under the main survival plot.
#'@param tables.y.text logical. Default is TRUE. If FALSE, the y axis tick
#' labels of tables will be hidden.
#'@param tables.col color to be used for all tables under the main plot. Default
#' value is "black". If you want to color by strata (i.e. groups), use
#' tables.col = "strata".
#'@param tables.theme function, ggplot2 theme name. Default value is
#' \link{theme_survminer}. Allowed values include ggplot2 official themes: see
#' \code{\link[ggplot2]{theme}}.
#'@param risk.table.height the height of the risk table on the grid. Increase
#' the value when you have many strata. Default is 0.25. Ignored when
#' risk.table = FALSE.
#'@param surv.plot.height the height of the survival plot on the grid. Default
#' is 0.75. Ignored when risk.table = FALSE. \code{1-risk.table.height -
#' ncensor.plot.height} when \code{risk.table = TRUE} and \code{ncensor.plot =
#' TRUE}
#'@param ncensor.plot logical value. If TRUE, the number of censored subjects at
#' time t is plotted. Default is FALSE. Ignored when cumcensor = TRUE.
#'@param ncensor.plot.title The title to be used for the censor plot. Used when
#' \code{ncensor.plot = TRUE}.
#'@param ncensor.plot.height The height of the censor plot. Used when
#' \code{ncensor.plot = TRUE}.
#'@param cumevents logical value specifying whether to show or not the table of
#' the cumulative number of events. Default is FALSE.
#'@param cumevents.title The title to be used for the cumulative events table.
#'@param cumevents.col same as tables.col but for the cumulative events table
#' only.
#'@param cumevents.y.text logical. Default is TRUE. If FALSE, the y axis tick
#' labels of the cumulative events table will be hidden.
#'@param cumevents.y.text.col logical. Default value is FALSE. If TRUE, the y
#' tick labels of the cumulative events will be colored by strata.
#'@param cumevents.height the height of the cumulative events table on the grid.
#' Default is 0.25. Ignored when cumevents = FALSE.
#'@param cumcensor logical value specifying whether to show or not the table of
#' the cumulative number of censoring. Default is FALSE.
#'@param cumcensor.title The title to be used for the cumcensor table.
#'@param cumcensor.col same as tables.col but for cumcensor table only.
#'@param cumcensor.y.text logical. Default is TRUE. If FALSE, the y axis tick
#' labels of the cumcensor table will be hidden.
#'@param cumcensor.y.text.col logical. Default value is FALSE. If TRUE, the y
#' tick labels of the cumcensor will be colored by strata.
#'@param cumcensor.height the height of the cumcensor table on the grid. Default
#' is 0.25. Ignored when cumcensor = FALSE.
#'@param surv.median.line character vector for drawing a horizontal/vertical
#' line at median survival. Allowed values include one of c("none", "hv", "h",
#' "v"). v: vertical, h:horizontal.
#'@param ggtheme function, ggplot2 theme name. Default value is
#' \link{theme_survminer}. Allowed values include ggplot2 official themes: see
#' \code{\link[ggplot2]{theme}}.
#'@param ... other arguments to be passed i) to ggplot2 geom_*() functions such
#' as linetype, size, ii) or to the function ggpubr::ggpar() for customizing
#' the plots. See details section.
#'@param log.rank.weights The name for the type of weights to be used in
#' computing the p-value for log-rank test. By default \code{survdiff} is used
#' to calculate regular log-rank test (with weights == 1). A user can specify
#' \code{"1", "n", "sqrtN", "S1", "S2", "FH"} to use weights specified in
#' \link[survMisc]{comp}, so that weight correspond to the test as : 1 -
#' log-rank, n - Gehan-Breslow (generalized Wilcoxon), sqrtN - Tarone-Ware, S1
#' - Peto-Peto's modified survival estimate, S2 - modified Peto-Peto (by
#' Andersen), FH - Fleming-Harrington(p=1, q=1).
#'@param pval.method whether to add a text with the test name used for
#' calculating the pvalue, that corresponds to survival curves' comparison -
#' used only when \code{pval=TRUE}
#'@param pval.method.size the same as \code{pval.size} but for displaying
#' \code{log.rank.weights} name
#'@param pval.method.coord the same as \code{pval.coord} but for displaying
#' \code{log.rank.weights} name
#'@details \itemize{ \item \strong{legend position}: The argument
#' \strong{legend} can be also a numeric vector c(x,y). In this case it is
#' possible to position the legend inside the plotting area. x and y are the
#' coordinates of the legend box. Their values should be between 0 and 1.
#' c(0,0) corresponds to the "bottom left" and c(1,1) corresponds to the "top
#' right" position. For instance use legend = c(0.8, 0.2).\cr \item
#' \strong{Color palettes}: The argument \strong{palette} can be used to
#' specify the color to be used for each group. By default, the first color in
#' the palette is used to color the first level of the factor variable. This
#' default behavior can be changed by assigning correctly a named vector. That
#' is, the names of colors should match the strata names as generated by the
#' \code{ggsurvplot()} function in the legend.\cr \item \strong{Customizing the
#' plots}: The plot can be easily customized using additional arguments to be
#' passed to the function ggpar(). Read ?ggpubr::ggpar. These arguments include
#' \emph{font.title, font.subtitle, font.caption, font.x, font.y, font.tickslab
#' and font.legend}: a vector of length 3 indicating respectively the size
#' (e.g.: 14), the style (e.g.: "plain", "bold", "italic", "bold.italic") and
#' the color (e.g.: "red") of main title, subtitle, caption, xlab and ylab,
#' axis tick labels and legend, respectively. For example \emph{font.x = c(14,
#' "bold", "red")}. Use font.x = 14, to change only font size; or use font.x =
#' "bold", to change only font face.}
#'
#'@return return an object of class ggsurvplot which is list containing the
#' following components: \itemize{ \item plot: the survival plot (ggplot
#' object) \item table: the number of subjects at risk table per time (ggplot
#' object). \item cumevents: the cumulative number of events table (ggplot
#' object). \item ncensor.plot: the number of censoring (ggplot object). \item
#' data.survplot: the data used to plot the survival curves (data.frame). \item
#' data.survtable: the data used to plot the tables under the main survival
#' curves (data.frame). }
#'
#'@author Alboukadel Kassambara, \email{alboukadel.kassambara@@gmail.com}
#' @examples
#'
#'#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#'# Example 1: Survival curves with two groups
#'#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#'
#'# Fit survival curves
#'#++++++++++++++++++++++++++++++++++++
#'require("survival")
#'fit<- survfit(Surv(time, status) ~ sex, data = lung)
#'
#'# Basic survival curves
#'ggsurvplot(fit, data = lung)
#'
#'# Customized survival curves
#'ggsurvplot(fit, data = lung,
#' surv.median.line = "hv", # Add medians survival
#'
#' # Change legends: title & labels
#' legend.title = "Sex",
#' legend.labs = c("Male", "Female"),
#' # Add p-value and confidence intervals
#' pval = TRUE,
#'
#' conf.int = TRUE,
#' # Add risk table
#' risk.table = TRUE,
#' tables.height = 0.2,
#' tables.theme = theme_cleantable(),
#'
#' # Color palettes. Use custom color: c("#E7B800", "#2E9FDF"),
#' # or brewer color (e.g.: "Dark2"), or ggsci color (e.g.: "jco")
#' palette = c("#E7B800", "#2E9FDF"),
#' ggtheme = theme_bw() # Change ggplot2 theme
#')
#'
#'# Change font size, style and color
#'#++++++++++++++++++++++++++++++++++++
#'\dontrun{
#' # Change font size, style and color at the same time
#' ggsurvplot(fit, data = lung, main = "Survival curve",
#' font.main = c(16, "bold", "darkblue"),
#' font.x = c(14, "bold.italic", "red"),
#' font.y = c(14, "bold.italic", "darkred"),
#' font.tickslab = c(12, "plain", "darkgreen"))
#'}
#'
#'
#'
#'#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#'# Example 2: Facet ggsurvplot() output by
#' # a combination of factors
#'#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#'
#'# Fit (complexe) survival curves
#'#++++++++++++++++++++++++++++++++++++
#' \dontrun{
#'require("survival")
#'fit3 <- survfit( Surv(time, status) ~ sex + rx + adhere,
#' data = colon )
#'
#'# Visualize
#'#++++++++++++++++++++++++++++++++++++
#' ggsurv <- ggsurvplot(fit3, data = colon,
#' fun = "cumhaz", conf.int = TRUE,
#' risk.table = TRUE, risk.table.col="strata",
#' ggtheme = theme_bw())
#'
#' # Faceting survival curves
#' curv_facet <- ggsurv$plot + facet_grid(rx ~ adhere)
#' curv_facet
#'
#' # Faceting risk tables:
#' # Generate risk table for each facet plot item
#' ggsurv$table + facet_grid(rx ~ adhere, scales = "free")+
#' theme(legend.position = "none")
#'
#' # Generate risk table for each facet columns
#' tbl_facet <- ggsurv$table + facet_grid(.~ adhere, scales = "free")
#' tbl_facet + theme(legend.position = "none")
#'
#' # Arrange faceted survival curves and risk tables
#' g2 <- ggplotGrob(curv_facet)
#' g3 <- ggplotGrob(tbl_facet)
#' min_ncol <- min(ncol(g2), ncol(g3))
#' g <- gridExtra::rbind.gtable(g2[, 1:min_ncol], g3[, 1:min_ncol], size="last")
#' g$widths <- grid::unit.pmax(g2$widths, g3$widths)
#' grid::grid.newpage()
#' grid::grid.draw(g)
#'
#'}
#'
#'@describeIn ggsurvplot Draws survival curves using ggplot2.
#'@export
ggsurvplot <- function(fit, data = NULL, fun = NULL,
color = NULL, palette = NULL, linetype = 1,
break.x.by = NULL, break.y.by = NULL, break.time.by = NULL,
surv.scale = c("default", "percent"), xscale = 1,
conf.int = FALSE, conf.int.fill = "gray", conf.int.style = "ribbon",
censor = TRUE, censor.shape = "+", censor.size = 4.5,
pval = FALSE, pval.size = 5, pval.coord = c(NULL, NULL),
pval.method = FALSE, pval.method.size = pval.size, pval.method.coord = c(NULL, NULL),
log.rank.weights = c("survdiff", "1", "n", "sqrtN", "S1", "S2", "FH_p=1_q=1"),
title = NULL, xlab = "Time", ylab = "Survival probability",
xlim = NULL, ylim = NULL,
legend = c("top", "bottom", "left", "right", "none"),
legend.title = "Strata", legend.labs = NULL,
tables.height = 0.25, tables.y.text = TRUE, tables.col = "black",
risk.table = FALSE, risk.table.pos = c("out", "in"), risk.table.title = NULL,
risk.table.col = tables.col, risk.table.fontsize = 4.5, fontsize = 4.5,
risk.table.y.text = tables.y.text,
risk.table.y.text.col = TRUE,
risk.table.height = tables.height, surv.plot.height = 0.75,
ncensor.plot.height = tables.height, cumevents.height = tables.height,
cumcensor.height = tables.height,
ncensor.plot = FALSE,
ncensor.plot.title = NULL,
cumevents = FALSE, cumevents.col = tables.col, cumevents.title = NULL,
cumevents.y.text = tables.y.text, cumevents.y.text.col = TRUE,
cumcensor = FALSE, cumcensor.col = tables.col, cumcensor.title = NULL,
cumcensor.y.text = tables.y.text, cumcensor.y.text.col = TRUE,
surv.median.line = c("none", "hv", "h", "v"),
ggtheme = theme_survminer(),
tables.theme = ggtheme,
...
){
if(!inherits(fit, "survfit"))
stop("Can't handle an object of class ", class(fit))
size <- ifelse(is.null(list(...)$size), 1, list(...)$size)
if(!is(legend, "numeric")) legend <- match.arg(legend)
surv.median.line <- match.arg(surv.median.line)
stopifnot(log.rank.weights %in% c("survdiff", "1", "n", "sqrtN", "S1", "S2","FH_p=1_q=1"))
log.rank.weights <- match.arg(log.rank.weights)
if(!is.numeric(xscale) & !(xscale %in% c("d_m", "d_y", "m_d", "m_y", "y_d", "y_m")))
stop('xscale should be numeric or one of c("d_m", "d_y", "m_d", "m_y", "y_d", "y_m").')
# Make sure that user can do either ncensor.plot or cumcensor
# But not both
if(ncensor.plot & cumcensor){
warning("Both ncensor.plot and cumsensor are TRUE.",
"In this case, we consider only cumcensor.", call. = FALSE)
ncensor.plot <- FALSE
}
if(cumcensor) ncensor.plot.height <- cumcensor.height
if(is.null(ncensor.plot.title))
ncensor.plot.title <- "Number of censoring"
if(is.null(cumcensor.title))
cumcensor.title <- "Cumulative number of censoring"
if(is.null(cumevents.title))
cumevents.title <- "Cumulative number of events"
# Adapt ylab value according to the value of the argument fun
ylab <- .check_ylab(ylab, fun)
# Check and get linetypes
lty <- .get_lty(linetype)
linetype <- lty$lty
linetype.manual <- lty$lty.manual
# Check legend
.check_legend_labs(fit, legend.labs)
# risk.table argument
risk.table.pos <- match.arg(risk.table.pos)
risktable <- .parse_risk_table_arg(risk.table)
risk.table <- risktable$display
risk.table.type <- risktable$type
extra.params <- list(...)
# Data
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# data used to compute survfit
data <- .get_data(fit, data = data)
# Data for survival plot
d <- surv_summary(fit, data = data)
# Number of strata and strata names
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.strata <- d$strata
# Multiple groups
if (!is.null(.strata)){
strata_names <- levels(.strata)
n.strata <- length(strata_names)
if(is.null(legend.labs)) legend.labs <- strata_names
if(missing(color)) color <- "strata"
}
# One group
else{
n.strata <- 1
if (is.null(legend.labs)) {
.strata <- as.factor(rep("All", nrow(d)))
legend.labs <- strata_names <- "All"
}
else {
.strata <- as.factor(rep(legend.labs, nrow(d)))
strata_names <- legend.labs
}
if(missing(conf.int)) conf.int = TRUE
if(missing(color)) color <- "black"
}
d$strata <- .strata
# Connect surv data to the origin for plotting
# time = 0, surv = 1
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if(!.is_cloglog(fun)) d <- .connect2origin(d, fit, data)
# Transformation of the survival curve
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
d <- .apply_surv_func(d, fun = fun)
# Scale transformation
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
surv.scale <- match.arg(surv.scale)
scale_labels <- ggplot2::waiver()
if (surv.scale == "percent") scale_labels <- scales::percent
xlog <- .is_cloglog(fun)
y.breaks <- ggplot2::waiver()
if(!is.null(break.y.by)) y.breaks <- seq(0, 1, by = break.y.by)
# Axis limits
xmin <- ifelse(.is_cloglog(fun), min(c(1, d$time)), 0)
if(is.null(xlim)) xlim <- c(xmin, max(d$time))
if(is.null(ylim) & is.null(fun)) ylim <- c(0, 1)
# Drawing survival curves
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
d$strata <- factor(d$strata, levels = strata_names, labels = legend.labs)
d <- d[order(d$strata), , drop = FALSE]
if(color %in% colnames(d)) surv.color <- color
else surv.color <- ifelse(n.strata > 1, "strata", color)
#surv.color <- color
p <- ggplot2::ggplot(d, ggplot2::aes_string("time", "surv")) +
ggpubr::geom_exec(ggplot2::geom_step, data = d, size = size, color = surv.color, linetype = linetype, ...) +
ggplot2::scale_y_continuous(breaks = y.breaks, labels = scale_labels, limits = ylim) +
ggplot2::coord_cartesian(xlim = xlim)+
ggtheme
p <- ggpubr::ggpar(p, palette = palette, ...)
if(!is.null(break.x.by)) break.time.by <- break.x.by
times <- .get_default_breaks(d$time, .log = xlog)
if(!is.null(break.time.by) & !xlog) times <- seq(0, max(c(d$time, xlim)), by = break.time.by)
xticklabels <- .format_xticklabels(labels = times, xscale = xscale)
if(!.is_cloglog(fun)) {
p <- p + ggplot2::scale_x_continuous(breaks = times, labels = xticklabels) +
ggplot2::expand_limits(x = 0, y = 0)
}
else p <- p + ggplot2::scale_x_continuous(breaks = times, trans = "log10", labels = xticklabels)
# Add confidence interval
if(conf.int){
if(missing(conf.int.fill)) conf.int.fill <- surv.color
if(conf.int.style == "ribbon"){
p <- p + ggpubr::geom_exec(.geom_confint, data = d,
ymin = "lower", ymax = "upper",
fill = conf.int.fill, alpha = 0.3, na.rm = TRUE)
}
else if(conf.int.style == "step"){
p <- p + ggpubr::geom_exec(ggplot2::geom_step, data = d,
y = "lower", linetype = "dashed",
color = surv.color, na.rm = TRUE)+
ggpubr::geom_exec(ggplot2::geom_step, data = d,
y = "upper", linetype = "dashed",
color = surv.color, na.rm = TRUE)
}
}
# Add cencored
if (censor & any(d$n.censor >= 1)) {
p <- p + ggpubr::geom_exec(ggplot2::geom_point, data = d[d$n.censor > 0, , drop = FALSE],
colour = surv.color, size = censor.size, shape = censor.shape)
}
# Add pvalue
if(pval){
if(!is.numeric(pval) & !is.null(fit$strata)) pval <- .get_pvalue(fit, method = log.rank.weights, data = data)
else if(is.numeric(pval)) pval <- list(val = pval, method = "")
pvaltxt <- ifelse(pval$val < 1e-04, "p < 0.0001",
paste("p =", signif(pval$val, 2)))
pval.x <- ifelse(is.null(pval.coord[1]), max(fit$time)/50, pval.coord[1])
pval.y <- ifelse(is.null(pval.coord[2]), 0.2, pval.coord[2])
p <- p + ggplot2::annotate("text", x = pval.x, y = pval.y,
label = pvaltxt, size = pval.size, hjust = 0)
if(pval.method){
pvalmethod <- pval$method
pval.method.x <- ifelse(is.null(pval.method.coord[1]), max(fit$time)/50, pval.method.coord[1])
pval.method.y <- ifelse(is.null(pval.method.coord[2]), 0.3, pval.method.coord[2])
p <- p + ggplot2::annotate("text", x = pval.method.x, y = pval.method.y,
label = pvalmethod, size = pval.method.size, hjust = 0)
}
}
# Drawing a horizontal line at 50% survival
#if(surv.scale == "percent") fun <- "pct"
if(surv.median.line %in% c("hv", "h", "v"))
p <- .add_surv_median(p, fit, type = surv.median.line, fun = fun, data = data)
# Axis label and legend title
lty.leg.title <- ifelse(linetype == "strata", legend.title, linetype)
p <- p + ggplot2::labs(x = xlab, y = ylab, title = title,
color = legend.title, fill = legend.title,
linetype = lty.leg.title
)
p <- .set_general_gpar(p, legend = legend, ...) # general graphical parameters
if(!is.null(linetype.manual)) p <- p + scale_linetype_manual(values = linetype.manual)
res <- list(plot = p)
# Extract strata colors used in survival curves
# Will be used to color the y.text of risk table and cumevents table
if(risk.table | cumevents | cumcensor | ncensor.plot){
g <- ggplot_build(p)
scurve_cols <- unlist(unique(g$data[[1]]["colour"]))
if(length(scurve_cols)==1) scurve_cols <- rep(scurve_cols, length(legend.labs))
names(scurve_cols) <- legend.labs # Give every color an appropriate name
}
# Add risk table
if(risk.table){
if(risk.table.pos == "in") risk.table.col = surv.color
risktable <- ggrisktable(fit, data = data, type = risk.table.type, color = risk.table.col,
palette = palette, break.time.by = break.time.by,
xlim = xlim, title = risk.table.title,
legend = legend, legend.title = legend.title, legend.labs = legend.labs,
y.text = risk.table.y.text, y.text.col = risk.table.y.text.col,
fontsize = risk.table.fontsize, ggtheme = ggtheme,
xlab = xlab, ylab = legend.title, xlog = xlog, xscale = xscale,
...)
risktable <- risktable + tables.theme
if(!risk.table.y.text) risktable <- .set_large_dash_as_ytext(risktable)
# color risk.table ticks by strata
if(risk.table.y.text.col)
risktable <- risktable + theme(axis.text.y = element_text(colour = rev(scurve_cols)))
res$table <- risktable
}
# Add the cumulative number of events
if(cumevents){
res$cumevents <- ggcumevents (fit, data = data, color = cumevents.col,
palette = palette, break.time.by = break.time.by,
xlim = xlim, title = cumevents.title,
legend = legend, legend.title = legend.title, legend.labs = legend.labs,
y.text = cumevents.y.text, y.text.col = cumevents.y.text.col,
fontsize = fontsize, ggtheme = ggtheme, xlab = xlab, ylab = legend.title,
xlog = xlog, xscale = xscale, ...)
res$cumevents <- res$cumevents + tables.theme
if(!cumevents.y.text) res$cumevents <- .set_large_dash_as_ytext(res$cumevents)
if(cumevents.y.text.col)
res$cumevents <- res$cumevents + theme(axis.text.y = element_text(colour = rev(scurve_cols)))
}
# Add ncensor.plot or cumcensor plot
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if(ncensor.plot){
ncensor_plot <- ggplot(d, aes_string("time", "n.censor")) +
ggpubr::geom_exec(geom_bar, d, color = surv.color, fill = surv.color,
stat = "identity", position = "dodge")+
coord_cartesian(xlim = xlim)+
scale_y_continuous(breaks = sort(unique(d$n.censor))) +
ggtheme
ncensor_plot <- ggpubr::ggpar(ncensor_plot, palette = palette)
ncensor_plot <- ncensor_plot + ggplot2::labs(color = legend.title, fill = legend.title,
x = xlab, y = "n.censor", title = ncensor.plot.title)
# For backward compatibility
ncensor_plot <- .set_general_gpar(ncensor_plot, ...) # general graphical parameters
ncensor_plot <- .set_ncensorplot_gpar(ncensor_plot, ...) # specific graphical params
ncensor_plot <- ncensor_plot + tables.theme
if(!xlog) ncensor_plot <- ncensor_plot + ggplot2::scale_x_continuous(breaks = times, labels = xticklabels)
else ncensor_plot <- ncensor_plot + ggplot2::scale_x_continuous(breaks = times, trans = "log10", labels = xticklabels)
}
else if(cumcensor){
ncensor_plot <- ggcumcensor (fit, data = data, color = cumcensor.col,
palette = palette, break.time.by = break.time.by,
xlim = xlim, title = cumcensor.title,
legend = legend, legend.title = legend.title, legend.labs = legend.labs,
y.text = cumcensor.y.text, y.text.col = cumcensor.y.text.col,
fontsize = fontsize, ggtheme = ggtheme, xlab = xlab, ylab = legend.title,
xlog = xlog, xscale = xscale, ...)
ncensor_plot <- ncensor_plot + tables.theme
if(!cumcensor.y.text) ncensor_plot <- .set_large_dash_as_ytext(ncensor_plot)
if(cumcensor.y.text.col)
ncensor_plot <- ncensor_plot + theme(axis.text.y = element_text(colour = rev(scurve_cols)))
}
if(ncensor.plot | cumcensor)
res$ncensor.plot <- ncensor_plot
# Defining attributs for ggsurvplot
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
heights <- list(
plot = surv.plot.height,
table = ifelse(risk.table, risk.table.height, 0),
ncensor.plot = ifelse(ncensor.plot | cumcensor, ncensor.plot.height, 0),
cumevents = ifelse(cumevents, cumevents.height, 0)
)
y.text <- list(
table = risk.table.y.text,
cumevents = cumevents.y.text,
cumcensor = cumcensor.y.text
)
y.text.col <- list(
table = risk.table.y.text.col,
cumevents = cumevents.y.text.col,
cumcensor = cumcensor.y.text.col
)
# Returning the data used to generate the survival plots
res$data.survplot <- d
res$data.survtable <- .get_timepoints_survsummary(fit, data, times)
class(res) <- c("ggsurvplot", "ggsurv", "list")
attr(res, "heights") <- heights
attr(res, "y.text") <- y.text
attr(res, "y.text.col") <- y.text.col
attr(res, "legend.position") <- legend
attr(res, "legend.labs") <- legend.labs
attr(res, "cumcensor") <- cumcensor
attr(res, "risk.table.pos") <- risk.table.pos
res
}
#' @param x an object of class ggsurvplot
#' @method print ggsurvplot
#' @param newpage open a new page. See \code{\link{grid.arrange}}
#' @rdname ggsurvplot
#' @export
print.ggsurvplot <- function(x, surv.plot.height = NULL, risk.table.height = NULL, ncensor.plot.height = NULL, newpage = TRUE, ...){
res <- .build_ggsurvplot(x = x, surv.plot.height = surv.plot.height,
risk.table.height = risk.table.height,
ncensor.plot.height = ncensor.plot.height)
if(newpage) grid::grid.newpage()
grid::grid.draw(res)
}
# Build ggsurvplot for printing
.build_ggsurvplot <- function(x, surv.plot.height = NULL,
risk.table.height = NULL, ncensor.plot.height = NULL,
cumevents.height = NULL, ...)
{
if(!inherits(x, "ggsurvplot"))
stop("An object of class ggsurvplot is required.")
heights <- attr(x, "heights")
y.text <- attr(x, "y.text")
y.text.col <- attr(x, "y.text.col")
cumcensor <- attr(x, "cumcensor")
risk.table.pos <- attr(x, "risk.table.pos")
if(risk.table.pos == "in") x <- .put_risktable_in_survplot(x)
nplot <- .count_ggplots(x)
# Removing data components from the list and keep only plot objects
x$data.survplot <- x$data.survtable <- NULL
# Extract legend from the survival plot
legend.position <- attr(x, "legend.position")[1]
legend.grob <- .get_legend(x$plot)
# Update heights
if(!is.null(surv.plot.height)) heights$plot <- surv.plot.height
if(!is.null(risk.table.height)) heights$table <- risk.table.height
if(!is.null(ncensor.plot.height)) heights$ncensor.plot <- ncensor.plot.height
if(!is.null(cumevents.height)) heights$cumevents <- cumevents.height
heights$plot <- 1 - heights$table - heights$ncensor.plot - heights$cumevents
# Extract strata colors for survival curves
legend.labs <- attr(x, "legend.labs")
if(!is.null(x$table) | !is.null(x$ncensor.plot) | !is.null(x$cumevents)){
g <- ggplot_build(x$plot)
cols <- unlist(unique(g$data[[1]]["colour"]))
if(length(cols)==1) cols <- rep(cols, length(legend.labs))
names(cols) <- legend.labs # Give every color an appropriate name
}
if(nplot > 1 & legend.position %in% c("left", "right", "bottom")) x$plot <- .hide_legend(x$plot)
if(!is.null(x$table)){
x$table <- .hide_legend(x$table)
if(!y.text$table) x$table <- .set_large_dash_as_ytext(x$table)
# Make sure that risk.table.y.text.col will be the same as the plot legend colors
if(y.text.col$table)
x$table <- x$table + ggplot2::theme(axis.text.y = ggplot2::element_text(colour = rev(cols)))
}
if(!is.null(x$cumevents)){
x$cumevents <- .hide_legend(x$cumevents)
if(!y.text$cumevents) x$cumevents <- .set_large_dash_as_ytext(x$cumevents)
# Make sure that y.text.col will be the same as the plot legend colors
if(y.text.col$cumevents)
x$cumevents <- x$cumevents + ggplot2::theme(axis.text.y = ggplot2::element_text(colour = rev(cols)))
}
if(!is.null(x$ncensor.plot)){
x$ncensor.plot <- x$ncensor.plot + theme (legend.position = "none")
if(cumcensor){
if(!y.text$cumcensor) x$ncensor.plot <- .set_large_dash_as_ytext(x$ncensor.plot)
if(y.text.col$cumcensor)
x$ncensor.plot <- x$ncensor.plot + theme(axis.text.y = ggplot2::element_text(colour = rev(cols)))
}
}
if(is.null(x$table) & is.null(x$ncensor.plot) & is.null(x$cumevents)) return(x$plot)
heights <- unlist(heights)[names(x)] # get the height of each component in x
plots <- x
grobs <- widths <- list()
for (i in 1:length(plots)) {
if(is.ggplot(plots[[i]])){
grobs[[i]] <- ggplotGrob(plots[[i]])
widths[[i]] <- grobs[[i]]$widths[2:5]
}
}
maxwidth <- do.call(grid::unit.pmax, widths)
for (i in 1:length(grobs)) {
grobs[[i]]$widths[2:5] <- as.list(maxwidth)
}
ggsurv <- gridExtra::arrangeGrob(grobs = grobs, nrow = nplot, heights = unlist(heights))
# Set legend
if(nplot > 1 & legend.position %in% c("left", "right", "bottom") & is.null(legend.grob)){
ggsurv <- switch(legend.position,
bottom = gridExtra::arrangeGrob(grobs = list(ggsurv, legend.grob), nrow = 2, heights = c(0.9, 0.1)),
top = gridExtra::arrangeGrob(grobs = list(legend.grob, ggsurv), nrow = 2, heights = c(0.1, 0.9)),
right = gridExtra::arrangeGrob(grobs = list(ggsurv, legend.grob), ncol = 2, widths = c(0.75, 0.25)),
left = gridExtra::arrangeGrob(grobs = list(legend.grob, ggsurv), ncol = 2, widths = c(0.25, 0.75)),
ggsurv
)
}
return(ggsurv)
}
.hide_legend <- function(p){
p <- p + theme(legend.position = "none")
}
# Function defining a transformation of the survival curve
# ++++++++++++++++++++++++++++++++++++++++++++++
# see ?survival::plot.survfit
# d: data frame containing the column surv, upper and lower
# fun the function
.apply_surv_func <- function(d, fun = NULL){
if (!is.null(fun)) {
if (is.character(fun)) {
fun <- switch(fun, log = function(y) log(y),
event = function(y) 1 - y,
cumhaz = function(y) -log(y),
cloglog = function(y) log(-log(y)),
pct = function(y) y * 100,
logpct = function(y) 100 * y,
identity = function(y) y,
stop("Unrecognized survival function argument"))
}
else if (!is.function(fun)) {
stop("Invalid 'fun' argument")
}
d$surv <- fun(d$surv)
d$upper <- fun(d$upper)
d$lower <- fun(d$lower)
}
return(d)
}
# Adapt ylab according to the value of the argument fun
#%%%%%%%%%%%%%%%%%%%%%%%%%
.check_ylab <- function(ylab, fun){
if(!is.null(fun) & is.character(fun)){
if(ylab == "Survival probability"){
ylab <- switch(fun, log = "log(Survival probability)",
event = "Cumulative event",
cumhaz = "Cumulative hazard",
pct = "Survival probability (%)",
identity = "Survival probability",
cloglog = "log(-log(S(t)))",
"Survival probability")
}
}
ylab
}
# get survdiff pvalue
.get_pvalue <- function(fit, method, data = NULL){
data <- .get_data(fit, data)
# One group
if(length(levels(summary(fit)$strata)) == 0) return(list(val = NULL, method = NULL))
if(method == "survdiff") {
ssubset <- fit$call$subset
if(is.null(ssubset))
sdiff <- survival::survdiff(eval(fit$call$formula), data = data)
else
sdiff <- survival::survdiff(eval(fit$call$formula), data = data,
subset = eval(fit$call$subset))
pvalue <- stats::pchisq(sdiff$chisq, length(sdiff$n) - 1, lower.tail = FALSE)
return(list(val = pvalue, method = "Log-rank"))
} else {
tenfit <- ten(eval(fit$call$formula), data = data)
capture.output(comp(tenfit)) -> null_dev
# comp modifies tenfit object (ten class: ?survMisc::ten)
# and adds attributes with tests
attributes(tenfit)$lrt -> tests
# check str(tests) -> W:weights / pNorm:p-values
pvalue <- round(tests$pNorm[tests$W == method], 4)
test_name <- c("Log-rank", "Gehan-Breslow",
"Tarone-Ware", "Peto-Peto",
"modified Peto-Peto", "Fleming-Harrington (p=1, q=1)")
# taken from ?survMisc::comp
return(list(val = pvalue, method = test_name[tests$W == method]))
}
}
# Check user defined legend labels
.check_legend_labs <- function(fit, legend.labs = NULL){
if(!is.null(legend.labs) & !inherits(fit, "survfit.cox")){
if(!is.null(fit$strata)){
if(length(fit$strata) != length(legend.labs))
stop("The length of legend.labs should be ", length(fit$strata) )
}
else{
if(length(legend.labs) != 1)
stop("The length of legend.labs should be 1")
}
}
}
# Connect survival data to the origine
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.connect2origin <- function(d, fit, data = NULL){
base <- d[1, , drop = FALSE]
base[intersect(c('time', 'n.censor', 'std.err', "n.event"), colnames(base))] <- 0
base[c('surv', 'upper', 'lower')] <- 1.0
n.strata <- length(levels(d$strata))
# Connect each group to the origin
if (n.strata > 1) {
strata <- levels(d$strata)
base <- base[rep(1, n.strata),, drop = FALSE]
row.names(base) <- 1:nrow(base)
base$strata <- strata
base$strata <- factor(strata, levels = strata)
# update variable values
if(!inherits(fit, "survfit.cox")){
variables <- .get_variables(base$strata, fit, data)
for(variable in variables) base[[variable]] <- .get_variable_value(variable, base$strata, fit, data)
}
}
d <- rbind(base, d)
d
}
# Adjust linetype manually
#%%%%%%%%%%%%%%%%%%%%%%%%%%%
.get_lty <- function(linetype){
linetype.manual = NULL
nlty <- length(linetype)
if(is.numeric(linetype)){
if(nlty > 1) {
linetype.manual <-linetype
linetype <- "strata"
}
}
else (is.character(linetype))
{
base_lty <- c("blank", "solid", "dashed", "dotted", "dotdash", "longdash", "twodash")
is_base_lty <- all(linetype %in% base_lty)
if(is_base_lty & nlty > 1){
linetype.manual <-linetype
linetype <- "strata"
}
}
list(lty = linetype, lty.manual = linetype.manual)
}
# Parse risk.table argument
#%%%%%%%%%%%%%%%%%%%%%%%
# risk.table a logical value (TRUE/FALSE) or a string ("absolute", "percentage", "abs_pct")
.parse_risk_table_arg <- function(risk.table){
res <- list(display = risk.table, type = "absolute")
if(inherits(risk.table, "character") ){
if(risk.table %in% c("absolute", "percentage", "abs_pct", "nrisk_cumcensor", "nrisk_cumevents") )
res <- list(display = TRUE, type = risk.table)
else stop("Allowed values for risk.table are: TRUE, FALSE, 'absolute', 'percentage', 'nrisk_cumcensor', 'nrisk_cumevents' ")
}
res
}
# Drawing horizontal line at 50% median survival
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.add_surv_median <-function(p, fit, type = "hv", fun = NULL, data = NULL){
x1 <- x2 <- y1 <- y2 <- NULL
draw_lines <- TRUE
med_y = 0.5
if(is.null(fun)) draw_lines <- TRUE
else if(fun %in% c("cumhaz", "cloglog")){
warning("Adding survival median lines is not allowed when fun is: ", fun)
draw_lines <- FALSE
}
else if(fun == "pct") med_y <- 50
if(draw_lines){
if(!is.null(fit$strata) | is.matrix(fit$surv)) .table <- as.data.frame(summary(fit)$table)
else{
.table <- t(as.data.frame(summary(fit)$table))
rownames(.table) <- "All"
}
surv_median <- as.vector(.table[,"median"])
df <- data.frame(x1 = surv_median, x2 = surv_median,
y1 = rep(0, length(surv_median)),
y2 = rep(med_y, length(surv_median)),
strata = .clean_strata(rownames(.table)))
if(!is.null(fit$strata)){
variables <- .get_variables(df$strata, fit, data)
for(variable in variables) df[[variable]] <- .get_variable_value(variable, df$strata, fit, data)
}
df <- stats::na.omit(df)
if(nrow(df)>0){
if(type %in% c("hv", "h"))
p <- p +
geom_segment(aes(x = 0, y = max(y2), xend = max(x1), yend = max(y2)),
data = df, linetype = "dashed", size = 0.5) # horizontal segment
if(type %in% c("hv", "v"))
p <- p + geom_segment(aes(x = x1, y = y1, xend = x2, yend = y2), data = df,
linetype = "dashed", size = 0.5) # vertical segments
}
else warning("Median survival not reached.")
}
p
}
# Graphical parameters
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# general graphical parameters to be applied to
# survival curves, risk.table, ncensor.plot
.set_general_gpar <- function(p, legend = "top", ...){
extra.params <- list(...)
ggpubr::ggpar(p = p, font.main = extra.params$font.main, font.x = extra.params$font.x,
font.y = extra.params$font.y, font.submain = extra.params$font.submain,
font.caption = extra.params$font.caption,
font.tickslab = extra.params$font.tickslab,
legend = legend, font.legend = extra.params$font.legend)
}
# Specific graphical params to ncensor_plot
.set_ncensorplot_gpar <- function(p, legend = "none", ...){
extra.params <- list(...)
ggpubr::ggpar(p,
subtitle = extra.params$ncensor.plot.subtitle,
caption = extra.params$ncensor.plot.caption,
font.main = extra.params$font.ncensor.plot.title,
font.submain = extra.params$font.ncensor.plot.subtitle,
font.caption = extra.params$font.ncensor.plot.caption,
font.tickslab = extra.params$font.ncensor.plot.tickslab,
font.x = extra.params$font.ncensor.plot.x,
font.y = extra.params$font.ncensor.plot.y,
legend = legend)
}
# Put risk table inside main plot
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
.put_risktable_in_survplot <- function(ggsurv){
if(is.null(ggsurv$table)) return(ggsurv)
if(is.null(ggsurv$table))
stop("You can't put risk table inside the main plot because risk.table = FALSE. Use risk.table = TRUE")
# Create a transparent theme object
theme_transparent<- function() {
theme(
title = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
panel.grid = element_blank(),
axis.line = element_blank(),
panel.background = element_rect(fill = "transparent",colour = NA),
plot.background = element_rect(fill = "transparent",colour = NA),
plot.margin=unit(c(0,0,0,0),"mm"),
panel.border = element_blank(),
legend.position = "none")
}
survplot <- ggsurv$plot
risktable <- ggsurv$table + theme_transparent()
nstrata <- length(levels(survplot$data$strata))
.time <- survplot$data$time
ymax <- nstrata*0.05
risktable_grob = ggplotGrob(risktable)
survplot <- survplot + annotation_custom(grob = risktable_grob, xmin = -max(.time)/20,
ymin = -0.05, ymax = ymax)
ggsurv$plot <- survplot
ggsurv$table <- NULL
ggsurv
}
# Check if fun is cloglog
.is_cloglog <- function(fun){
res <- FALSE
if(is.character(fun)){
res <- fun == "cloglog"
}
res
}
|
86184d5b89bd7548f26ff3825bb6a9adc8188151 | 3325717d8ca9d832a7e391bd77e5a763e3401472 | /transactions.R | 3c9125e3297a43728fc2eb21d6b8c540011017b1 | [] | no_license | huynh-vu/1---The-Vu-Foundation | 206172c1504caddb2a85e429515f18863c864c68 | 23665b6978178cec393f2167a025e70b2bbe0f2d | refs/heads/master | 2020-03-28T18:21:07.266791 | 2018-09-26T20:41:15 | 2018-09-26T20:41:15 | 148,873,893 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 970 | r | transactions.R | library(tidyr)
library(dplyr)
library(ggplot2)
library(sqldf)
# read file
imported_data <- read.csv("data/transactions.csv",header=TRUE, skip = 0)
# exclude variables
newdata <- imported_data[c(1,2,4,5,6,7)]
newdata <- subset(newdata, Transaction.Type=='debit')
newdata <- subset(newdata, as.Date(newdata$Date, format = "%m/%d/%Y") > '4/1/2017')
# get first 1000 observations
# newdata <- newdata[1:1000,]
newdata <- aggregate(newdata$Amount, by=list(Account.Name=newdata$Account.Name), FUN=sum)
newdata
as.Date(dates, format = "%m/%d/%y")
imported_data <- sapply(head(imported_data, 1), 1, paste)
# trim variable obtain just the percentage
imported_data <- as.numeric(gsub("([0-9]+).*$", "\\1", substring(imported_data, 19, 22)))
# writes the table of best rate info
write.table(paste(toString(Sys.time()), imported_data, sep = " "), "data/transactions.txt",
sep = "", row.names = FALSE, quote = FALSE, append = TRUE, col.names = FALSE) |
d7967dcf0b786af37be6daac8c7bd81243e87c8b | 53c7dee5598e34fcec7f6843fda3b938735d70b7 | /GP.R | 418912176e511fabe98371b9ecad08a1c38420e9 | [] | no_license | tinazi/geomod3D | d49051473581e84d74dc9f3f15a588fd5de49f8a | 9cc3eb400f5c4ad598de018a8f0136ef5b625280 | refs/heads/master | 2021-01-23T02:30:07.998491 | 2016-12-27T18:33:24 | 2016-12-27T18:33:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,415 | r | GP.R | #### Gaussian Process class ####
GP <- setClass(
"GP",
slots = c(data = "spatial3DDataFrame",
tangents = "points3DDataFrame",
model = "list",
mean = "numeric",
trend = "character",
beta = "matrix",
likelihood = "numeric",
pre_comp = "list"),
validity = function(object) {
if(!all(rapply(object@model,class) == "covarianceStructure3D"))
stop("Invalid covariance object")
}
)
#### initialization ####
setMethod(
f = "initialize",
signature = "GP",
definition = function(.Object, data, model, value, nugget,
mean = NULL, trend = NULL, weights = NULL,
tangents = NULL, reg.t = "reg"){
require(Matrix)
# covariance model
if(length(model) == 1 & class(model) != "list")
.Object@model <- list(model)
else
.Object@model <- model
# value
if(length(value) == 1 & class(value) == "character")
data["value"] <- data[value]
else
data["value"] <- value
yval <- data[["value"]]
# nugget
if(length(nugget) == 1){
if(class(nugget) == "character"){
data["nugget"] <- data[nugget]
}else{
data["nugget"] <- rep(nugget, nrow(data))
}
}
else{
data["nugget"] <- nugget
}
nugget <- data[["nugget"]]
# weights
if(is.null(weights)) weights <- rep(1, nrow(data))
data["weights"] <- weights
# regularization for tangents
if(!is.null(tangents) && nrow(tangents) > 0){
if(length(reg.t) == 1){
if(class(reg.t) == "character"){
tangents["reg"] <- tangents[reg.t]
}else{
tangents["reg"] <- rep(reg.t, nrow(tangents))
}
}
else{
tangents["reg"] <- reg.t
}
reg.t <- tangents[["reg"]]
}
# mean
if(is.null(mean)) mean <- mean(yval)
# data
.Object@data <- data[c("value", "nugget", "weights")]
.Object@tangents <- as(tangents, "points3DDataFrame")
# trend
if(is.null(trend) | length(trend) == 0){
TR <- matrix(0,nrow(data),0)
}else{
TR <- trend_matrix(data, trend)
if(!is.null(tangents) && nrow(tangents) > 0){
TR <- rbind(
TR,
trend_matrix_d1(tangents, trend)
)
}
mean <- 0
}
Ntrend <- dim(TR)[2]
# covariances
Ntang <- 0
K <- covariance_matrix(data, data, model, T) +
diag(nugget / (weights + 1e-6),
length(nugget), length(nugget))
if(!is.null(tangents) && nrow(tangents) > 0){
K1 <- covariance_matrix_d1(data, tangents, model, T)
K2 <- covariance_matrix_d2(tangents, model, T)
Ntang <- nrow(K2)
# regularization
K2 <- K2 + diag(reg.t, Ntang, Ntang)
# final matrix
K <- rbind(
cbind(K, K1),
cbind(t(K1), K2)
)
}
# pre-computations
.Object@pre_comp <- list()
L <- t(chol(Matrix(K))) # makes L lower triangular
.Object@mean <- mean
yval <- c(yval - mean, rep(0, Ntang))
LiY <- solve(L, Matrix(yval, length(yval), 1))
w_value <- solve(t(L), LiY)
.Object@pre_comp$w_value <- as.numeric(w_value)
.Object@pre_comp$w_var <- L
if(Ntrend > 0){
HLi <- solve(L, TR)
A <- t(HLi) %*% HLi
b1 <- t(TR) %*% w_value
beta <- solve(A, b1)
.Object@beta <- as.matrix(beta)
.Object@trend <- trend
.Object@pre_comp$w_trend <- HLi
}
# likelihood
dt <- 2*sum(diag(L)^2)
.Object@likelihood <- -0.5 * dt -
0.5 * sum(yval * w_value) -
0.5 * length(yval) * log(2*pi)
if(Ntrend > 0){
LiYH <- t(LiY) %*% HLi
tmp <- LiYH %*% solve(A, t(LiYH))
.Object@likelihood <- .Object@likelihood +
0.5 * as.numeric(tmp) +
Ntrend * log(2*pi) -
0.5 * determinant(A)$modulus
}
# end
validObject(.Object)
return(.Object)
}
)
#### show ####
setMethod(
f = "show",
signature = "GP",
definition = function(object){
# display
cat("Object of class ", class(object), "\n", sep = "")
cat("Data points:", nrow(object@data), "\n")
cat("Tangent points:", nrow(object@tangents), "\n")
if(length(object@trend) == 0)
cat("Global mean:", object@mean, "\n")
else{
cat("Trend:", object@trend, "\n")
print(object@beta)
}
cat("Log-likelihood:", object@likelihood, "\n")
}
)
#### predict ####
setMethod(
f = "predict",
signature = "GP",
definition = function(object, target, to = "value", output.var = F){
require(Matrix)
# pre processing
w_var <- object@pre_comp$w_var
w_value <- object@pre_comp$w_value
# trend
if(length(object@trend) > 0){
TRtarget <- trend_matrix(target, object@trend)
TRdata <- trend_matrix(object@data, object@trend)
w_tr <- object@pre_comp$w_trend
beta <- object@beta
}
# slicing target to save memory
maxgrid <- 1000 # optimize this
Ngrid <- nrow(target)
Nslice <- ceiling(Ngrid / maxgrid)
t2 <- pointify(target)
for(i in seq(Nslice)){
# slice ID
slid <- seq((i - 1) * maxgrid + 1,
min(Ngrid, i * maxgrid))
ttemp <- t2[slid,]
# covariances
Ntang <- nrow(object@tangents)
Ktarget <- covariance_matrix(ttemp, object@data,
object@model, T)
if(Ntang > 0){
K1 <- covariance_matrix_d1(ttemp,
object@tangents,
object@model, T)
Ktarget <- cbind(Ktarget, K1)
}
# prediction
# residuals
pred <- apply(Ktarget, 1, function(rw){
sum(rw * w_value)
}) + object@mean
# trend
if(length(object@trend) > 0){
LinvK <- solve(w_var, t(Ktarget))
R <- t(TRtarget[slid,]) - t(w_tr) %*% LinvK
pred <- pred + t(R) %*% beta
}
target[slid, to] <- pred
# variance
if(output.var){
tot_var <- sum(sapply(object@model,
function(m) m@contribution))
if(length(object@trend) > 0){
pred_var <- colSums(LinvK^2)
pred_var[pred_var > tot_var] <- tot_var
pred_var <- tot_var - pred_var
tr_var <- colSums(
R * (solve(t(w_tr) %*% w_tr, R))
)
pred_var <- pred_var + tr_var
}
else{
pred_var <- colSums(solve(w_var, t(Ktarget))^2)
pred_var[pred_var > tot_var] <- tot_var
pred_var <- tot_var - pred_var
}
target[slid, paste0(to, ".var")] <- pred_var
}
}
# output
return(target)
}
)
#### log-likelihood ####
setMethod(
f = "logLik",
signature = "GP",
definition = function(object){
return(object@likelihood)
}
)
#### fit ####
setMethod(
f = "fit",
signature = "GP",
definition = function(object,
midrange = F, minrange = F,
azimuth = F, dip = F, rake = F,
power = F, nugget = F,
nugget.fix = numeric(),
seed = runif(1, 1, 10000)){
require(GA)
# setup
structures <- sapply(object@model, function(x) x@type)
Nstruct <- length(structures)
Ndata <- nrow(object@data)
data_var <- var(object@data[["value"]])
data_box <- boundingBox(object@data)
data_rbase <- sqrt(sum(data_box[1,] - data_box[2,])^2)
data_nugget <- object@data[["nugget"]]
# optimization limits and starting point
opt_min <- opt_max <- numeric(Nstruct * 8 + 1)
xstart <- matrix(0, 1, Nstruct * 8 + 1)
for(i in 1:Nstruct){
# contribution
opt_min[(i-1)*8+1] <- data_var/1000
opt_max[(i-1)*8+1] <- data_var*2
xstart[(i-1)*8+1] <- object@model[[i]]@contribution
# maxrange
opt_min[(i-1)*8+2] <- data_rbase/1000
opt_max[(i-1)*8+2] <- data_rbase
xstart[(i-1)*8+2] <- object@model[[i]]@maxrange
# midrange (multiple of maxrange)
if(midrange)
opt_min[(i-1)*8+3] <- 0.01
else
opt_min[(i-1)*8+3] <- 1
opt_max[(i-1)*8+3] <- 1
xstart[(i-1)*8+3] <- object@model[[i]]@midrange /
object@model[[i]]@maxrange
# minrange(multiple of midrange)
if(minrange)
opt_min[(i-1)*Nstruct+4] <- 0.01
else
opt_min[(i-1)*Nstruct+4] <- 1
opt_max[(i-1)*8+4] <- 1
xstart[(i-1)*8+4] <- object@model[[i]]@minrange /
object@model[[i]]@midrange
# azimuth
opt_min[(i-1)*8+5] <- 0
if(azimuth)
opt_max[(i-1)*8+5] <- 360
else
opt_max[(i-1)*8+5] <- 0
xstart[(i-1)*8+5] <- object@model[[i]]@azimuth
# dip
opt_min[(i-1)*8+6] <- 0
if(dip)
opt_max[(i-1)*8+6] <- 90
else
opt_max[(i-1)*8+6] <- 0
xstart[(i-1)*8+6] <- object@model[[i]]@dip
# rake
opt_min[(i-1)*8+7] <- 0
if(rake)
opt_max[(i-1)*8+7] <- 90
else
opt_max[(i-1)*8+7] <- 0
xstart[(i-1)*8+7] <- object@model[[i]]@rake
# power
if(power){
opt_min[(i-1)*8+8] <- 0.1
opt_max[(i-1)*8+8] <- 3
}
else{
opt_min[(i-1)*8+8] <- 1
opt_max[(i-1)*8+8] <- 1
}
xstart[(i-1)*8+8] <- object@model[[i]]@power
}
# nugget
if(nugget){
opt_min[Nstruct * 8 + 1] <- data_var/1000
opt_max[Nstruct * 8 + 1] <- data_var*2
}
else{
opt_min[Nstruct * 8 + 1] <- 0 # not used
opt_max[Nstruct * 8 + 1] <- 0 # not used
}
xstart[Nstruct * 8 + 1] <- mean(data_nugget)
# conforming starting point to limits
xstart[xstart < opt_min] <- opt_min[xstart < opt_min]
xstart[xstart > opt_max] <- opt_max[xstart > opt_max]
# fitness function
makeGP <- function(x, finished = F){
# covariance model
m <- vector("list", Nstruct)
for(i in 1:Nstruct){
m[[i]] <- covarianceStructure3D(
type = structures[i],
contribution = x[(i-1)*8+1],
maxrange = x[(i-1)*8+2],
midrange = x[(i-1)*8+2] *
x[(i-1)*8+3],
minrange = x[(i-1)*8+2] *
x[(i-1)*8+3] *
x[(i-1)*8+4],
azimuth = x[(i-1)*8+5],
dip = x[(i-1)*8+6],
rake = x[(i-1)*8+7],
power = x[(i-1)*8+8]
)
}
# temporary GP
if(nugget){
# fit a constant nugget model
tmpnug <- x[Nstruct * 8 + 1]
}
else{
# use values as given
tmpnug <- data_nugget
}
# points with fixed nugget
tmpnug[nugget.fix] <- data_nugget[nugget.fix]
# GP
tmpgp <- GP(
data = object@data,
model = m,
value = "value",
nugget = tmpnug,
mean = object@mean,
trend = object@trend,
tangents = object@tangents,
weights = object@data[["weights"]]
)
# output
if(finished)
return(tmpgp)
else
return(tmpgp@likelihood)
}
# optimization
opt <- ga(
type = "real-valued",
fitness = function(x) makeGP(x, F),
min = opt_min,
max = opt_max,
pmutation = 0.5,
popSize = 20,
run = 20,
seed = seed,
monitor = F,
suggestions = xstart
)
# update
sol <- opt@solution
return(makeGP(sol, T))
}
) |
e00ac576fdf3de3c315778f0009d83199371c32c | 0ae1661c6939d7924612c08e12725ff9139da98b | /R/lw.R | eedf19f9bedce40f8f9751e8ab2ef6d1bc3ae21c | [
"MIT"
] | permissive | kbario/concentr8r | 1d24a080588661f02f55cbc27049d890caf4f8f2 | ecde75b76f8b6a9d12c333795a6bde8833b9250e | refs/heads/main | 2023-04-13T12:55:58.704961 | 2022-11-01T22:57:34 | 2022-11-01T22:57:34 | 404,204,813 | 1 | 1 | NOASSERTION | 2022-11-01T22:57:35 | 2021-09-08T03:58:10 | R | UTF-8 | R | false | false | 1,814 | r | lw.R | #' Full Width at Half-Maximum
#' @description Calculating full width at half maximum (FWHM, aka line width). This function returns the ppm difference where peak line crosses half of the peak height. It requires one signal across all spectra within ppm ranges specified in shift.
#'
#' @param X num matrix, NMR data with rows representing spectra.
#' @param ppm num array describing chemical shift positions, its length equals to nrow(X).
#' @param sh num array(2), chemical shift range of a singlet for which fwhm is calculated
#' @param sf num, operating frequency of the spectrometer (meta$SFO1)
#'
#' @return Array of line widths in ppm. To convert from ppm to Hertz (Hz), multiply values with the spectrometer frequency (column a_SF01 in meta data frame).
#' @export
#' @author Torben Kimhofer \email{torben.kimhofer@@murdoch.edu.au}
#' @family {estimation}
#'
#' @examples
#' read_in(path = system.file('extdata',package='concentr8r'),
#' exp_type = list(exp=c("PROF_URINE_NOESY")),
#' n_spec='multiple')
#' lnw <- lw(X, ppm, sh = c(-0.1, 0.1), meta$a_SF01)
#' bad <- which(lnw>1)
#' if (length(bad)!=0){cat("The spectra", bad, "have line widths over 1")
#' } else {cat("All Spectra have line widths under 1")}
lw <- function (X, ppm, sh = c(-0.1, 0.1), sf){
idx <- get_idx(sh, ppm)
asign = sign(diff(ppm[seq_len(2)]))
fwhm <- apply(X[, idx], 1, function(x, pp = ppm[idx], as = asign) {
if (min(x) < 0) {
x <- x + abs(min(x))
baseline = 0
} else {
baseline <- min(x)
}
height <- max(x) - baseline
hw <- baseline + (0.5 * height)
f <- approxfun(pp, x)
x_new <- seq(pp[1], pp[length(pp)], by = 1e-05 * as)
y_new <- f(x_new)
diff(sort(abs(x_new[range(which(y_new > hw))])))
})
return(fwhm * sf)
}
|
3f673f4848c3c29418d6da2b27f4249e2c5896ff | 1e45d64203edd6d5125980bf23db3daedc9da89d | /sources/framework/VEModel/inst/advanced/02-running.R | 3ab461dad19df6f05854f3c3c2b93c8ffc9fe2c5 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"Unlicense",
"LicenseRef-scancode-public-domain",
"Apache-2.0"
] | permissive | VisionEval/VisionEval-Dev | 5c1600032307c729b96470355c40ef6cbbb9f05b | 701bf7f68d94bf1b4b73a0dfd622672a93d4af5f | refs/heads/development | 2023-08-19T17:53:55.037761 | 2023-08-15T12:33:50 | 2023-08-15T12:33:50 | 144,179,471 | 6 | 34 | Apache-2.0 | 2023-09-07T20:39:13 | 2018-08-09T16:44:22 | R | UTF-8 | R | false | false | 1,093 | r | 02-running.R | ### running.R
# Walk through running models
# Load VEModel package (in effect, the visioneval environment)
require(VEModel)
# Assuming you've done install.R and have some models around
mod <- openModel("VERSPM-run")
print(mod) # Should say "Run complete"
mod$run() # basic instruction to run the model
# but it does nothing here since "mod" has already run
mod$run("reset") # Throws away the results and runs it all again
# Moves the results into an archive folder then reruns everything
mod$run("save")
# explicit version of the default action:
# try re-running everything that is not already "Run Complete"
# Use this if you've added a stage or scenario just to run the new stuff
mod$run("continue")
# let's look at a multi-stage model
# Install if need be:
# mod.pop <- installModel("VERSPM",var="pop")
mod.pop <- openModel("VERSPM-pop")
mod.pop$run() # just run it - one stage at a time
# Continue with 03-structure.R to learn about the parts of a VisionEval model
# See model-stages.R for more information on model stages
# See scenarios.R for more information on scenarios |
bffffb46916710eab534510a001a726af61bf15a | f43ff1e09138649558c2e90a75bd2d4f3cbbdbb6 | /source/Windows/R-Portable-Win/library/zeallot/doc/unpacking-assignment.R | 9d6e73dd5477c8ded840ee13434992e6e57d54f2 | [
"MIT",
"CC-BY-3.0",
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | romanhaa/Cerebro | 5b2d9371403c52f60341894f84cd0f6a006cc930 | 946ed178c986027d60af6013e63d1fc51ae8b371 | refs/heads/master | 2022-12-02T15:49:57.705873 | 2021-11-20T11:47:12 | 2021-11-21T17:09:37 | 164,686,297 | 87 | 23 | MIT | 2022-11-10T18:21:44 | 2019-01-08T16:09:59 | HTML | UTF-8 | R | false | false | 5,050 | r | unpacking-assignment.R | ## ---- include = FALSE----------------------------------------------------
knitr::opts_chunk$set(collapse = TRUE, comment = "#>")
library(zeallot)
## ------------------------------------------------------------------------
c(lat, lng) %<-% list(38.061944, -122.643889)
## ------------------------------------------------------------------------
lat
lng
## ------------------------------------------------------------------------
c(lat, lng) %<-% c(38.061944, -122.643889)
lat
lng
## ------------------------------------------------------------------------
c(min_wt, q1_wt, med_wt, mean_wt, q3_wt, max_wt) %<-% summary(mtcars$wt)
min_wt
q1_wt
med_wt
mean_wt
q3_wt
max_wt
## ---- error=TRUE---------------------------------------------------------
c(stg1, stg2, stg3) %<-% list("Moe", "Donald")
## ---- error=TRUE---------------------------------------------------------
c(stg1, stg2, stg3) %<-% list("Moe", "Larry", "Curley", "Donald")
## ------------------------------------------------------------------------
#
# A function which returns a list of 2 numeric values.
#
coords_list <- function() {
list(38.061944, -122.643889)
}
c(lat, lng) %<-% coords_list()
lat
lng
## ------------------------------------------------------------------------
#
# Convert cartesian coordinates to polar
#
to_polar = function(x, y) {
c(sqrt(x^2 + y^2), atan(y / x))
}
c(radius, angle) %<-% to_polar(12, 5)
radius
angle
## ------------------------------------------------------------------------
c(inter, slope) %<-% coef(lm(mpg ~ cyl, data = mtcars))
inter
slope
## ---- eval = require("purrr")--------------------------------------------
safe_log <- purrr::safely(log)
## ---- eval = require("purrr")--------------------------------------------
pair <- safe_log(10)
pair$result
pair$error
## ---- eval = require("purrr")--------------------------------------------
pair <- safe_log("donald")
pair$result
pair$error
## ---- eval = require("purrr")--------------------------------------------
c(res, err) %<-% safe_log(10)
res
err
## ------------------------------------------------------------------------
c(mpg, cyl, disp, hp) %<-% mtcars[, 1:4]
head(mpg)
head(cyl)
head(disp)
head(hp)
## ------------------------------------------------------------------------
quartet <- lapply(1:4, function(i) anscombe[, c(i, i + 4)])
c(an1, an2, an3, an4) %<-% lapply(quartet, head, n = 3)
an1
an2
an3
an4
## ------------------------------------------------------------------------
c(a, c(b, d), e) %<-% list("begin", list("middle1", "middle2"), "end")
a
b
d
e
## ---- error=TRUE---------------------------------------------------------
c(a, c(b, d, e), f) %<-% list("begin", list("middle1", "middle2"), "end")
## ------------------------------------------------------------------------
c(ch1, ch2, ch3) %<-% "abc"
ch1
ch2
ch3
## ------------------------------------------------------------------------
c(y, m, d) %<-% Sys.Date()
y
m
d
## ------------------------------------------------------------------------
f <- lm(mpg ~ cyl, data = mtcars)
c(fcall, fterms, resids, ...rest) %<-% summary(f)
fcall
fterms
head(resids)
## ------------------------------------------------------------------------
str(rest)
## ---- error = TRUE-------------------------------------------------------
c(fcall, fterms, resids, rest) %<-% summary(f)
## ------------------------------------------------------------------------
c(...skip, e, f) %<-% list(1, 2, 3, 4, 5)
skip
e
f
## ------------------------------------------------------------------------
c(begin, ...middle, end) %<-% list(1, 2, 3, 4, 5)
begin
middle
end
## ------------------------------------------------------------------------
c(min_wt, ., ., mean_wt, ., max_wt) %<-% summary(mtcars$wt)
min_wt
mean_wt
max_wt
## ------------------------------------------------------------------------
c(begin, ..., end) %<-% list("hello", "blah", list("blah"), "blah", "world!")
begin
end
## ------------------------------------------------------------------------
c(begin, ., ...middle, end) %<-% as.list(1:5)
begin
middle
end
## ------------------------------------------------------------------------
nums <- 1:2
c(x, y) %<-% tail(nums, 2)
x
y
## ---- error = TRUE-------------------------------------------------------
c(x, y, z) %<-% tail(nums, 3)
## ------------------------------------------------------------------------
c(x, y, z = NULL) %<-% tail(nums, 3)
x
y
z
## ------------------------------------------------------------------------
c(first, last) %<-% c("Ai", "Genly")
first
last
c(first, last) %<-% c(last, first)
first
last
## ------------------------------------------------------------------------
cat <- "meow"
dog <- "bark"
c(cat, dog, fish) %<-% c(dog, cat, dog)
cat
dog
fish
## ---- eval = require("magrittr")-----------------------------------------
library(magrittr)
mtcars %>%
subset(hp > 100) %>%
aggregate(. ~ cyl, data = ., FUN = . %>% mean() %>% round(2)) %>%
transform(kpl = mpg %>% multiply_by(0.4251)) %->%
c(cyl, mpg, ...rest)
cyl
mpg
rest
|
b134f67304cac1ec899c49a85efcd2d99692de2e | 0f5fc517c7beb08b4a11fd85749d0d1a50c28f5b | /man/sum_A_mat.Rd | 5cede3fab72b40e67a988794012ac5e1981813ea | [] | no_license | sqyu/ZiDAG | 544de482c6e7a3e35968408826c6136e57d2cb25 | d893be61690031b13ced18b18a7e7c98d4b78804 | refs/heads/master | 2023-02-13T19:04:25.840259 | 2021-01-13T08:16:53 | 2021-01-13T08:16:53 | 239,381,238 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,252 | rd | sum_A_mat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zero_dist.R
\name{sum_A_mat}
\alias{sum_A_mat}
\title{Calculate the a parameter in the conditional Hurdle density of the child node given the parents for multiple samples, assuming a is linear in the parents and their indicators.}
\usage{
sum_A_mat(aa, Vo, Yo)
}
\arguments{
\item{aa}{A numerical vector of length \code{2*ncol(Yo)+1}. See details.}
\item{Vo}{A numerical vector of the same dimension as \code{Yo} indicating if each entry in \code{Yo} is non-zero, i.e. \code{Vo = (Yo != 0)}.}
\item{Yo}{A numerical vector, a sample for the parent nodes (regressors).}
}
\value{
A number, the mean log probability.
}
\description{
Calculate the \code{a} parameter in the conditional Hurdle density of the child node given the parents for multiple samples, assuming \code{a} is linear in the parents and their indicators.
}
\details{
Matrix version of \code{sum_a()}. See examples.
}
\examples{
set.seed(1)
n <- 100; p_others <- 10
Vo <- matrix(stats::rbinom(n*p_others, 1, 0.8), nrow=n, ncol=p_others)
Yo <- matrix(stats::rnorm(n*p_others) * Vo, nrow=n, ncol=p_others)
aa <- rnorm(2*p_others+1)
sum_A_mat(aa, Vo, Yo) - sapply(1:n, function(i){sum_a(aa, Vo[i,], Yo[i,])})
}
|
0b965c968b73dee92db38084aba7eb7830417792 | 9b6539835ebff3eed1f23930aced8c5de69506d0 | /Semana 5.R | bab0c5500e4fae05989559f9ffb30c4f54fbadfc | [] | no_license | tcastrop/Practica-1-Tatiana-Castro | 6ef0629927df0c1521f850cc2177e5d1f7af28d3 | a55a7fbf88f1b3e4b937aca69ebb57001c1e9548 | refs/heads/main | 2023-06-19T16:12:05.886196 | 2021-07-19T04:06:03 | 2021-07-19T04:06:03 | 377,016,683 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,276 | r | Semana 5.R | library("DBI")
library("odbc")
library("dbplyr")
DB_BikeStores <- DBI::dbConnect(odbc::odbc(),
Driver = "SQL Server",
Server = "dbtedamssql.uh.ac.cr",
Database = "BikeStores",
UID = rstudioapi::askForPassword("Database user"),
PWD = rstudioapi::askForPassword("Database password"),
Port = 1433)
Stores<-dbGetQuery(DB_BikeStores,"select * from [sales].[order_items]")
View(Stores)
#PRACTICA 4
#Conexiones SQL
DB_Northwind <- DBI::dbConnect(odbc::odbc(),
Driver = "SQL Server",
Server = "dbtedamssql.uh.ac.cr",
Database = "Northwind",
UID = rstudioapi::askForPassword("Database user"),
PWD = rstudioapi::askForPassword("Database password"),
Port = 1433)
DB_Northwind=dbGetQuery(DB_Northwind,"select * from [dbo].[customers]")
View(DB_Northwind)
#Dplyr
Agrupacion = DB_Northwind%>%group_by(state,City)%>%
summarise(Cantidad_Ciudad=n())
View(Ciudad)
|
0680addbf54d64f93eaa4bc92b230078e09109ce | 3516abb9471d95326c21c83ae1c6dbaf29a90700 | /time_series/WaterDemandAnalysis.R | 6d63ae173b18d5827e734c5b11c34baa72dde3a4 | [] | no_license | rgrosskopf/R_scripts | f06200375f72342fc7883baa879ea1c97532b929 | 2a29785d8565003fb5bd8617eab718b8cdf384dc | refs/heads/master | 2020-04-16T09:22:15.369721 | 2019-01-13T04:00:06 | 2019-01-13T04:00:06 | 165,461,717 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,523 | r | WaterDemandAnalysis.R | #-------------------------------------------------------------------------------------#
#Author: Ryan Grosskopf
#Date: July 20, 2016
#Description: Perform multi-seasonal decomposition of water demand time series to determine base demand
#pattern and to evaluate the distribution of the model residuals. This distribution will be used
#in a Monte Carlo model to perturb the base demand pattern. DMA = Demand Management Area.
#-------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------#
####Fit time series model####
#-------------------------------------------------------------------------------------#
#install.packages("forecast", "fitdistrplus", "tseries", "actuar")
#load required packages
library(forecast)
library(fitdistrplus)
library(tseries)
library(actuar)
#read in data file
DMA.df<-read.csv(file="TSExt_TWS-PMA05_10120_02-hourly.csv", header=T, sep=",", stringsAsFactors=F)
#setup time series objects
DMA.ts<-ts(data=DMA.df$Units, frequency=24)
boxplot(DMA.ts~cycle(DMA.ts)) #examine daily seasonality
DMA.ts.train <- window(DMA.ts,1,137)
DMA.msts <- msts(DMA.ts.train, seasonal.periods=c(24, 168)) #time step in weeks instead of days.
DMA.ts.test <- window(ts(data=DMA.df$Units, frequency=168),20.5,22) #time step in weeks to match msts output
#fit trend-less TBATS model
DMAtrain.fit.tbats <- tbats(DMA.msts, use.trend=FALSE)
summary(DMAtrain.fit.tbats)
plot(DMAtrain.fit.tbats)
#plot forecast and witheld test set
plot(forecast(DMAtrain.fit.tbats, 192, level=c(97.5)), main="TBATS model fit", sub="16 Sep 2012 through 22 Sept 2012", ylab="Flow", xlab="Weeks", xlim=c(19,22))
lines(DMA.ts.test)
legend("bottomright",inset=.01, c("Model", "Actual"), lty=1, col=c("blue","black"))
#calculate numeric fit statistics and accuracy summary metrics
accuracy(forecast(DMAtrain.fit.tbats, h=192),DMA.ts.test)
accuracy(forecast(DMAtrain80.fit.tbats, h=(145*.2*24)),DMA.ts.test)
#-------------------------------------------------------------------------------------#
####Determine distirbution and parameters for stochastic component of demand model####
#-------------------------------------------------------------------------------------#
#calculate normalized and raw errors
res.raw<-as.numeric(DMAfull.fit.tbats$errors)
res.norm<-as.numeric(DMAfull.fit.tbats$errors) / mean(DMA.msts)
#check error distribution
hist(res.norm, 100, col="black", main="TWS-PM A05_10120_02, TBATS Residuals")
#examine fit plots
descdist(res.norm, boot=1000)
plotdist(DMA.df$Units)
qqnorm(res.norm)
qqline(res.norm)
shapiro.test(residuals(DMA.fit.tbats))
#fit distributions, review results and plot
dist.norm<-fitdist(res.norm, "norm")
dist.logistic<-fitdist(res.norm, "logis")
plot(dist.norm)
plot(dist.logistic)
summary(dist.norm)
summary(dist.logistic)
#generate goodness of fit statistics
gofstat(list(dist.norm, dist.logistic), fitnames = c("norm","logistic"))
#generate comparison plots
denscomp(list(dist.norm,dist.logistic),addlegend=TRUE, xlab="Normalized Residual Magnitude", legendtext=c("Normal","Logistic"),xlegend = "topright", breaks=40)
qqcomp(list(dist.norm,dist.logistic),legendtext=c("Normal","Logistic"), main="Residuals Distribution Fitting",xlegend = "bottomright",line01 = TRUE, fitpch=16)
ppcomp(list(dist.norm,dist.logistic),legendtext=c("Normal","Logistic"), main="Residuals Distribution Fitting",xlegend = "bottomright",line01 = TRUE, fitpch=16)
#Export TBATS model component output if desired.
write.table(as.data.frame(tbats.components(DMA.fit.tbats)), file = "C:\\Users\\USER\\FILEPATH\\TBATS_components.csv", sep = ",", col.names = NA, qmethod = "double")
#-------------------------------------------------------------------------------------#
####FIT EVENT PARAMETERS####
#-------------------------------------------------------------------------------------#
##water required for fires, uses log-logistic distribution
FireWaterReq.dist<-fitdist(DMA.df$Units, "llogis", start = list(shape = 1, scale = 500))
summary(FireWaterReq.dist)
##break duration distribution, discrete distributions
#fit Poisson distribution
BreakDuration.pois <- fitdist(BreakDuration,"pois")
summary(BreakDuration.pois)
plot(BreakDuration.pois)
#fit Geometric distribution
BreakDuration.geom <- fitdist(BreakDuration,"geom")
summary(BreakDuration.geom)
plot(BreakDuration.geom)
#compare fit statistics
gofstat(list(BreakDuration.pois, BreakDuration.geom),fitnames = c("Poisson","Geometric"))
|
e931de35f54f96521f64c68fa8334d4da2454688 | 7479ff31fafab560cc195ee58fed193da611799c | /cachematrix.R | 20e784f514fa7ec0b7884239db920986ad8a7ec2 | [] | no_license | cpmontoya/ProgrammingAssignment2 | 12ca93153558f0ad32df4a7d2e148159cc788a54 | 0402552d177c39c6085ce00b40e0c7297a901585 | refs/heads/master | 2021-01-18T10:26:21.042811 | 2014-05-25T05:40:36 | 2014-05-25T05:40:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,268 | r | cachematrix.R | ## The functions below can be used to create a matrix and solve for its inverse
## the inverse matrix is cached for later retrieval. Caching the matrix makes
## use of R's Lexical Scoping
## The makeCacheMatrix function creates a list of functions for getting and
## and setting a matrix as well as getting and setting its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(temp){
x <<- temp
inv <<- NULL
}
get <- function() x
setinverse <- function(solve) inv <<- solve
getinverse <- function() inv
list(set = set, get = get, setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function computes the inverse of the CacheMatrix created with
## the makeCacheMatrix function, above. Before computing the inverse, however,
## cacheSolve checks to see if the matrix has been computed and cached in which
## case it pulls the cached inverse rather than solve again.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)){
message("getting cache data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
f2c604b81977172ea75e6c9ca83bc2129cb9b5c4 | 88311cfdacc0ada10cfb6e05c35411d3965dc582 | /solution/2-similarity/part2b/visualise.R | c30740da80dc9f829e790d7e7d4b7fd526f1bc92 | [] | no_license | g-eorge/CCPDS-02 | 7aab360f3c77c7c4a77bc047a7d18ee8c6dc1b95 | 6e3095395723f7d679349595d6ed8f098504a1b8 | refs/heads/master | 2021-05-27T10:18:52.830892 | 2014-07-01T01:47:09 | 2014-07-01T01:47:09 | 19,005,113 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,833 | r | visualise.R | #! /usr/bin/env Rscript
# Dependencies
# install.packages("ggplot2")
# install.packages("reshape")
# Load packages
library(ggplot2)
library(reshape)
# The regions that are least like the others
provider_ids <- c('CA - San Jose', 'CA - Contra Costa County', 'CA - San Mateo County')
# Plot colours for the regions
scale_colours <- c()
scale_colours[[provider_ids[1]]] <- '#1AA794'
scale_colours[[provider_ids[2]]] <- '#F5435D'
scale_colours[[provider_ids[3]]] <- '#A532FF'
# Plot shapes for the regions
scale_shapes <- c()
scale_shapes[[provider_ids[1]]] <- 15
scale_shapes[[provider_ids[2]]] <- 16
scale_shapes[[provider_ids[3]]] <- 17
numcols = 523 # The number of columns the vectorizer produced - 1
cls <- c("character", rep("numeric",numcols))
# Read in the data file
df <- read.csv("vector_regions.txt", header=F, stringsAsFactors=F, colClasses=cls, row.names=1, sep="\t", na.strings="NA")
## Plot number of providers and procedures types each region carries out (Providers, DRG, APC, Total)
counts <- df[,1:3] # Subset the procedure type count columns
colnames(counts) <- c("provider_count", "drg_count", "apc_count")
counts$total_count <- counts$drg_count + counts$apc_count # Compute a total column
# Use a Z scale for easier comparison
scaled_counts <- data.frame(scale(counts, center=T, scale=T))
# Pick out the providers we are interested in
compare_counts <- scaled_counts[provider_ids,]
compare_counts$id <- provider_ids
# Build the plot with a box plot for comparison
p <- ggplot(data = melt(scaled_counts), aes(x=variable, y=value))
p + geom_boxplot(alpha=0.4, size=0.5, color="grey") +
geom_point(aes(colour=provider_ids[1], shape=provider_ids[1]), data=melt(compare_counts[1,], id.vars='id')) +
geom_point(aes(colour=provider_ids[2], shape=provider_ids[2]), data=melt(compare_counts[2,], id.vars='id')) +
geom_point(aes(colour=provider_ids[3], shape=provider_ids[3]), data=melt(compare_counts[3,], id.vars='id')) +
scale_colour_manual(name="Provider", values=scale_colours) + scale_shape_manual(name="Provider", values=scale_shapes) +
xlab("region counts") + ylab("z-score")
# Output the plot to a file
ggsave(file = "exploring/plots/region_counts.png", width = 11, height = 8, dpi = 300)
## Plot the number of procedures each region carries out for each procedure
counts <- df[,seq(4,ncol(df),4)] # Subset the procedure count column for each procedure
# Use a Z scale for easier comparison
scaled_counts <- data.frame(scale(counts, center=T, scale=T))
# Pick out the providers we are interested in
compare_counts <- scaled_counts[provider_ids,]
compare_counts$id <- provider_ids
# Build the plot
p <- ggplot(data = melt(scaled_counts), aes(x=variable, y=value))
p + geom_point(aes(colour=provider_ids[1], shape=provider_ids[1]), data=melt(compare_counts[1,], id.vars='id')) +
geom_point(aes(colour=provider_ids[2], shape=provider_ids[2]), data=melt(compare_counts[2,], id.vars='id')) +
geom_point(aes(colour=provider_ids[3], shape=provider_ids[3]), data=melt(compare_counts[3,], id.vars='id')) +
scale_colour_manual(name="Provider", values=scale_colours) + scale_shape_manual(name="Provider", values=scale_shapes) +
xlab("region procedure counts") + ylab("z-score") +
theme(axis.text.x = element_blank())
# Output the plot to a file
ggsave(file = "exploring/plots/procedure_counts.png", width = 11, height = 8, dpi = 300)
## Plot the number of services of each procedures each region carries out
counts <- df[,seq(5,ncol(df),4)] # Subset the service count column for each procedure
# Use a Z scale for easier comparison
scaled_counts <- data.frame(scale(counts, center=T, scale=T))
# Pick out the providers we are interested in
compare_counts <- scaled_counts[provider_ids,]
compare_counts$id <- provider_ids
# Build the plot
p <- ggplot(data = melt(scaled_counts), aes(x=variable, y=value))
p + geom_point(aes(colour=provider_ids[1], shape=provider_ids[1]), data=melt(compare_counts[1,], id.vars='id')) +
geom_point(aes(colour=provider_ids[2], shape=provider_ids[2]), data=melt(compare_counts[2,], id.vars='id')) +
geom_point(aes(colour=provider_ids[3], shape=provider_ids[3]), data=melt(compare_counts[3,], id.vars='id')) +
scale_colour_manual(name="Provider", values=scale_colours) + scale_shape_manual(name="Provider", values=scale_shapes) +
xlab("region service counts") + ylab("z-score") +
theme(axis.text.x = element_blank())
# Output the plot to a file
ggsave(file = "exploring/plots/service_counts.png", width = 11, height = 8, dpi = 300)
## Plot the average charges for procedures region providers carried out
counts <- df[,c(seq(6,ncol(df),4))] # Subset the charges columns for each region
# Use a Z scale for easier comparison
scaled_counts <- data.frame(scale(counts, center=T, scale=T))
# Pick out the providers we are interested in
compare_counts <- scaled_counts[provider_ids,]
compare_counts$id <- provider_ids
# Build the plot
p <- ggplot(data = melt(scaled_counts), aes(x=variable, y=value))
p + geom_point(aes(colour=provider_ids[1], shape=provider_ids[1]), data=melt(compare_counts[1,], id.vars='id')) +
geom_point(aes(colour=provider_ids[2], shape=provider_ids[2]), data=melt(compare_counts[2,], id.vars='id')) +
geom_point(aes(colour=provider_ids[3], shape=provider_ids[3]), data=melt(compare_counts[3,], id.vars='id')) +
scale_colour_manual(name="Provider", values=scale_colours) + scale_shape_manual(name="Provider", values=scale_shapes) +
xlab("region procedure avg charges") + ylab("z-score") +
theme(axis.text.x = element_blank())
# Output the plot to a file
ggsave(file = "exploring/plots/charges.png", width = 11, height = 8, dpi = 300)
## Plot the average payments for procedures region providers carried out
counts <- df[,c(seq(7,ncol(df),4))] # Subset the payments columns for each region
# Use a Z scale for easier comparison
scaled_counts <- data.frame(scale(counts, center=T, scale=T))
# Pick out the providers we are interested in
compare_counts <- scaled_counts[provider_ids,]
compare_counts$id <- provider_ids
# Build the plot
p <- ggplot(data = melt(scaled_counts), aes(x=variable, y=value))
p + geom_point(aes(colour=provider_ids[1], shape=provider_ids[1]), data=melt(compare_counts[1,], id.vars='id')) +
geom_point(aes(colour=provider_ids[2], shape=provider_ids[2]), data=melt(compare_counts[2,], id.vars='id')) +
geom_point(aes(colour=provider_ids[3], shape=provider_ids[3]), data=melt(compare_counts[3,], id.vars='id')) +
scale_colour_manual(name="Provider", values=scale_colours) + scale_shape_manual(name="Provider", values=scale_shapes) +
xlab("region procedure avg payments") + ylab("z-score") +
theme(axis.text.x = element_blank())
# Output the plot to a file
ggsave(file = "exploring/plots/payments.png", width = 11, height = 8, dpi = 300)
## Plot everything in one plot
scaled_all <- data.frame(scale(df, center=T, scale=T))
# Pick out the providers we are interested in
compare_counts <- scaled_all[provider_ids,]
compare_counts$id <- provider_ids
# Build the plot
p <- ggplot(data = melt(scaled_all), aes(x=variable, y=value))
p + geom_point(color='#202020', size=1, alpha=0.2) +
geom_point(aes(colour=provider_ids[1], shape=provider_ids[1]), data=melt(compare_counts[1,], id.vars='id')) +
geom_point(aes(colour=provider_ids[2], shape=provider_ids[2]), data=melt(compare_counts[2,], id.vars='id')) +
geom_point(aes(colour=provider_ids[3], shape=provider_ids[3]), data=melt(compare_counts[3,], id.vars='id')) +
scale_colour_manual(name="Provider", values=scale_colours) + scale_shape_manual(name="Provider", values=scale_shapes) +
xlab("region procedure counts, service counts, avg charges and avg payments") + ylab("z-score") +
theme(axis.text.x = element_blank())
# Output the plot to a file
ggsave(file = "exploring/plots/all.png", width = 11, height = 8, dpi = 300)
|
1c47c4de6b12200145e2bdaa9fc2cf6b41bc063a | e3259d8f489b093b246fe2fd0c4fb6999d6466bf | /R/talbox.camp.R | 9db9a7c0d5b25f9cd9eb0dc3293b9207ab989ac3 | [] | no_license | Franvgls/CampR | 7baf0e8213993db85004b95d009bec33570c0407 | 48987b9f49ea492c043a5c5ec3b85eb76605b137 | refs/heads/master | 2023-09-04T00:13:54.220440 | 2023-08-22T14:20:40 | 2023-08-22T14:20:40 | 93,841,088 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 4,030 | r | talbox.camp.R | #' Abundancia estratificada para un rango de talla
#'
#' Extrae los datos de abundancia de una especie o conjunto de especies con un rango de tallas determinado a partir de las distribuciones de talla.También puede mostrar los datos de biomasa a partir de la relación talla-peso
#' @param gr Grupo de la especie: 1 peces, 2 crustáceos 3 moluscos 4 equinodermos 5 invertebrados
#' @param esp Código de la especie numérico o carácter con tres espacios. 999 para todas las especies del grupo
#' @param camps campañas (años) a representar en el mapa: Demersales "NXX", Porcupine "PXX", Arsa primavera "1XX" y Arsa otoño "2XX"
#' @param dns Elige el origen de las bases de datos: Porcupine "Porc" o "Pnew", Cantábrico "Cant, Golfo de Cádiz "Arsa" (únicamente para sacar datos al IBTS, no gráficos)gr Grupo de la especie: 1 peces, 2 crustáceos 3 moluscos 4 equinodermos 5 invertebrados
#' @param cor.time Si T corrige las abundancias en función de la duración del lance
#' @param excl.sect Sectores a excluir como carácter, se pueden elegir tanto los sectores como estratos
#' @param years Si T muestra los datos por años, si F por campañas (siguiendo el formato del parámetro camps)
#' @param mult por defecto 100 para evitar que la distribución en números decimales de número haga que no salgan datos
#' @param ti Si T en el gráfico muestra el nombre de la especie y el rango de tallas comprendido
#' @param las Controla el sentido de las etiquetas del gráfico, 2 perpendicular al eje, mejor para etiquetas de años
#' @param plot Saca el gráfico (T) o lo guarda como objeto para componer con otros gráficos (F)
#' @param es Si T gráfico en castellano, si F gráfico en inglés
#' @return Devuelve un vector con nombre con el número estratificado del rango de tallas deseados por campaña/año. Si se solicita plot=TRUE saca un gráfico de barras que muestra la abundancia por año. En peso sólo saca los resultados para una especie.
#' @examples talbox.camps(2,19,Psh,"Porc",varwidth=T,notch=T)
#' @seealso {\link{dattal.camp}}
#' @export
talbox.camps<- function(gr,esp,camps,dns="Cant",notch=TRUE,outline=FALSE,varwidth=T,cor.time=TRUE,boxplot=T,excl.sect=NA,years=TRUE,mult=100,ti=TRUE,las=2,es=FALSE,bw=TRUE,idi="l",cexleg=1) {
options(scipen=2)
esp<-format(esp,width=3,justify="r")
if (length(esp)>1) warning("Seguro que tiene sentido mezclar más de una especie para sacar el rango de talla")
dumb<-data.frame(dattal.camp(gr,esp,camps[1],dns,cor.time=cor.time,excl.sect=excl.sect,sex=FALSE),camp=camps[1])
if (length(camps)>1) {
for (i in camps[2:length(camps)]) {
dumb<-rbind(dumb,data.frame(dattal.camp(gr,esp,i,dns,cor.time=cor.time,excl.sect=excl.sect,sex=FALSE),camp=i))
}
}
if (years) dumb$camp<-camptoyear(dumb$camp)
increm<-unid.camp(gr,esp)["INCREM"]
medida<-ifelse(unid.camp(gr,esp)["MED"]==1,"cm",ifelse(increm==5,"x5 mm","mm"))
if (es) {ax<-c(paste0("Talla (",medida,")"),expression("Ind"%*%"lan"^-1))}
else {ax<-c(paste0("Length (",medida,")"),expression("Ind"%*%"haul"^-1))}
if (is.logical(ti)) {
if (ti) {tit<-list(label=buscaesp(gr,esp,id=idi),font=ifelse(idi=="l",4,2),cex=1*cexleg)}
else {tit<-NULL}
}
else {
if(is.list(ti)) tit<-ti
else tit<-list(label=ti)
}
if (boxplot) {boxplot(rep(dumb$talla+.5,dumb$numero*mult)~rep(dumb$camp,dumb$numero*mult),na.rm=T,
main=tit$label,font.main=tit$font.main,xlab=ifelse(years,ifelse(es,"Año","Year"),ifelse(es,"Campaña","Survey")),
ylab=ax,notch=notch,outline=outline,varwidth=varwidth,col=ifelse(bw,"white","lightblue"),
las=las)
if (ti) title(main=tit$label,font=tit$font,cex=tit$cex)
}
else vioplot::vioplot(rep(dumb$talla+.5,dumb$numero*mult)~rep(dumb$camp,dumb$numero*mult),main=tit$label,
xlab=ifelse(years,ifelse(es,"Año","Year"),ifelse(es,"Campaña","Survey")),
ylab=ifelse(es,"Talla (cm)","Length (cm)"),las=las)
}
|
1d73dfe49bf4b3f1e228e1ab0113701b30e0fc05 | 850406eebc34d582fe8603a9ed79b6bcf613f132 | /h2o.saveModel.Rd | 6f3262797cfc9e67d2f65f0056661ce0f5bed1e7 | [
"MIT"
] | permissive | sanjaybasu/sdh_t2dm | 7652a7452cc271f345a9bf9303d77f1bf3efa73c | 5b1d5752a056898397bff7e51c6e1aa180feccc2 | refs/heads/master | 2020-03-29T04:00:35.481658 | 2019-04-19T23:11:19 | 2019-04-19T23:11:19 | 149,511,195 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,217 | rd | h2o.saveModel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/export.R
\name{h2o.saveModel}
\alias{h2o.saveModel}
\title{Save an H2O Model Object to Disk}
\usage{
h2o.saveModel(object, path = "", force = FALSE)
}
\arguments{
\item{object}{an \linkS4class{H2OModel} object.}
\item{path}{string indicating the directory the model will be written to.}
\item{force}{logical, indicates how to deal with files that already exist.}
}
\description{
Save an \linkS4class{H2OModel} to disk. (Note that ensemble binary models
can be saved.)
}
\details{
In the case of existing files \code{force = TRUE} will overwrite the file.
Otherwise, the operation will fail.
}
\examples{
\dontrun{
# library(h2o)
# h2o.init()
# prostate.hex <- h2o.importFile(path = paste("https://raw.github.com",
# "h2oai/h2o-2/master/smalldata/logreg/prostate.csv", sep = "/"),
# destination_frame = "prostate.hex")
# prostate.glm <- h2o.glm(y = "CAPSULE", x = c("AGE","RACE","PSA","DCAPS"),
# training_frame = prostate.hex, family = "binomial", alpha = 0.5)
# h2o.saveModel(object = prostate.glm, path = "/Users/UserName/Desktop", force=TRUE)
}
}
\seealso{
\code{\link{h2o.loadModel}} for loading a model to H2O from disk
}
|
7d6a0819c0d79242cd5be7466046cb00d2d3d965 | 38098d293d364f8df734bc187b701ed6df160a8d | /refuge_central.R | 7ab9fc67f70243da61745bbda563384e705798a4 | [] | no_license | RyanXJu/Using-CDmetaPOP-simulate-MPB-population | dbd53d016371cf6f3fc799b9116f0c236927c8a9 | 25c24aaebe7d63611aa1074067b1a6a5861ada3d | refs/heads/master | 2021-10-23T16:08:43.608771 | 2019-03-18T17:36:22 | 2019-03-18T17:36:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 609 | r | refuge_central.R | #####**********function return central refuge***********####
#need col and row number
#xcen,ycen: dimention of the centre wanted
centre <- function(col, row, xcen, ycen) {
xcentre <- (col / 2 - xcen / 2 + 1):(col / 2 + xcen / 2)
ycentre <- (row / 2 - ycen / 2 + 1):(row / 2 + ycen / 2)
centre <- NULL
for (i in 1:length(xcentre)) {
for (j in 1:length(ycentre)) {
xy <- ((xcentre[i] - 1) * col + ycentre[j])
centre <- c(centre, xy)
}
}
return(centre)
}
####*********end of the function**********#########
refuge_centre <- centre(col, row, xcen, ycen)
|
f24ad1c109d96400488bc64ad37863d96237cda7 | 0450df55f943cb5ea0f1484c71bc5e9b37238cd3 | /Lab03/global_copy.R | aac9a274f233946366c5f24e7b316542a8f332b3 | [] | no_license | gjw13/data-visualization | 95ec647ca39b1a279266bcb331329e878d0d07c0 | d695b1c38e5ac849f15863fec80daeff0fd96573 | refs/heads/master | 2020-05-14T09:12:41.131227 | 2020-04-15T13:59:34 | 2020-04-15T13:59:34 | 181,732,829 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,610 | r | global_copy.R | library(tidyverse)
library(lubridate)
library(maps)
#Read in data and clean up
gridded_data <- read_csv("gridded_data.csv",col_names =F)
number_of_months <- nrow(gridded_data)/37
monthly_data <- vector("list",number_of_months)
for(i in 1:number_of_months){
monthly_data[[i]] <- gridded_data[((i-1)*37+2):(i*37),] #extract and store temperature anomalies
integer_date <- gridded_data[((i-1)*37+1),2]*100+gridded_data[((i-1)*37+1),1] #get date for current month in yyyymm format
current_date <- ymd(integer_date ,truncated=1) #convert to Date class
current_month <- month(current_date, label=T) #extract named month
current_year <- year(current_date) #extract year
names(monthly_data)[i] <- paste(current_month, current_year) #paste together named month and year to name data
}
current_data <- monthly_data[["Apr 1975"]]
latitudes <- rev(seq(from=-87.5, to=87.5, by=5))
current_data$latitude <- latitudes
longitudes <- seq(from=-177.5, to=177.5, by=5)
colnames(current_data)[1:72] <- longitudes
current_data <- gather(current_data, key="longitude", value="TempAnom", -latitude)
for(i in 1:number_of_months){
current_data <- monthly_data[[i]]
current_data$latitude <- latitudes
colnames(current_data)[1:72] <- longitudes
current_data <- gather(current_data, key="longitude",
value="TempAnom", -latitude)
current_data$longitude <- as.numeric(current_data$longitude)
current_data <- mutate(current_data,
TempAnom = ifelse(TempAnom==-9999, NA, TempAnom/100))
monthly_data[[i]] <- current_data
}
remove(current_data)
|
39320cf3a7523d44d2d37ccfcfefabe9cbbe471d | f8b02de18a21aa42d2035d8d66bebcc6a669473a | /animate_recursive.R | 3df07f5075dd6b8225aa9ce2574735db9d741332 | [] | no_license | fabio-a-oliveira/stock-data-reduction | 925c25e2c9a1201bbabe5bc977f0390c9b6fbd32 | 818511380e935bcd6136b4cbcbbbdda89920888a | refs/heads/master | 2022-12-19T00:03:31.319749 | 2020-09-17T15:58:21 | 2020-09-17T15:58:21 | 283,913,122 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,132 | r | animate_recursive.R | # Housekeeping -----------------------------------------------------------------
# load libraries
library(tidyverse)
library(ggplot2)
library(gifski)
# runs script with the definition of the pivot points functions and the function
# that creates random stock data
source("declaration_of_functions.R")
# Image parameters -------------------------------------------------------------
num_stocks <- 20
max_depth <- 10
num_days <- 4 * 240
frames_per_stock <- max_depth + 2
filenames <- rep("" , frames_per_stock * num_stocks)
plot_title <- "Recursive algorithm"
plot_subtitle <- "For each segment, define pivot at point further from reference line, then repeat \nrecursively for each new segment, until all points are within tolerance"
plot_x_label <- "timestamp \n should be any unit of time that captures high frequency behavior"
plot_y_label <- "value \n stock price or any continuous variable"
# Create individual frames -----------------------------------------------------
for (current_stock in 1:num_stocks){
# Create mock stock data
stock <- rstock(duration = num_days,
exp_yield = 1 ^ (1/240),
se_yield = 1.5 / 100)
for (current_frame in 1:frames_per_stock){
if (current_frame == 1) {
# define object to be plotted
tidy_stock <- stock
# create plot
p <-
tidy_stock %>%
ggplot(aes(x = day, y = value),
color = 'black',
size = 1) +
geom_line(size = 1) +
scale_x_continuous(breaks = seq(-240,num_days+240,60),
minor_breaks = seq(-240,num_days+240,20),
limits = c(0,num_days)) +
scale_y_continuous(breaks = seq(0,10,.25),
minor_breaks = seq(0,10,.05),
limits = c(min(stock$value) * .85,
max(stock$value) * 1.15)) +
labs(title = plot_title,
subtitle = plot_subtitle,
x = plot_x_label,
y = plot_y_label)
} else {
# include is.pivot column
stock <-
stock %>%
mutate(is.pivot = FALSE)
# calculates the pivot points for the current depth level
pivot_points <-
find.pivot(stock$day,
stock$value,
tolerance = .1,
mode = "Recursive",
max.depth = current_frame-2) %>%
mutate(day = pivot,
is.pivot = TRUE) %>%
select(-inclination, - pivot)
# defines object to be plotted (joins stock data and pivot points in tidy format)
tidy_stock <-
bind_rows(stock,pivot_points) %>%
arrange(day)
# create plot
p <-
tidy_stock %>%
ggplot(aes(x = day, y = value,
color = is.pivot,
size = is.pivot,
alpha = is.pivot)) +
geom_line(show.legend = FALSE) +
scale_color_manual(values = c('black','red')) +
scale_alpha_manual(values = c(1,.7)) +
scale_size_manual(values = c(1,1)) +
# draws points at each pivot point
geom_point(data = filter(tidy_stock,is.pivot==TRUE),
inherit.aes = FALSE,
mapping = aes(x = day,
y = value),
show.legend = FALSE,
size = 4,
color = 'red') +
# draws green band with fixed width around line connecting pivot points
geom_ribbon(data = filter(tidy_stock,is.pivot==TRUE),
inherit.aes = FALSE,
mapping = aes(x = day,
ymin = value * (1-.1),
ymax = value * (1+.1)),
alpha = .05,
fill = 'green',
outline.type = 'both',
color = 'green',
linetype = 3) +
# defines limits and breaks to scales
scale_x_continuous(breaks = seq(-240,num_days+240,60),
minor_breaks = seq(-240,num_days+240,20),
limits = c(0,num_days)) +
scale_y_continuous(breaks = seq(0,10,.25),
minor_breaks = seq(0,10,.05),
limits = c(min(stock$value) * .85,
max(stock$value) * 1.15)) +
# defines labels
labs(title = plot_title,
subtitle = plot_subtitle,
x = plot_x_label,
y = plot_y_label)
} # if (current_depth == 0)
# add current frame to list of file names
filenames[(current_stock-1)*frames_per_stock + current_frame] <-
paste("stock",
formatC(current_stock,width=3,format="d",flag="0"),
"frame",
formatC(current_frame,width=4,format="d",flag="0"),
".png",
sep="")
# print progress status
print(paste("Creating frame #",
current_frame," of ", frames_per_stock,
" for stock #", current_stock,
" of ", num_stocks,
sep=""))
# saves file with current frame
ggsave(plot = p,
filename =
filenames[(current_stock-1)*frames_per_stock + current_frame],
device = "png",
path = "images",
dpi = "screen",
width = 20,
height = 20,
unit = "cm")
} # for (current_depth in 0:max_depth)
} # for (current_stock in 1:num_stocks)
# Render gif animation ---------------------------------------------------------
# renders gif from list of created files
gifski(png_files = file.path("images",filenames),
gif_file = tempfile(pattern = "animation_recursive_",
fileext = ".gif",
tmpdir = "images"),
delay = 1,
loop = TRUE,
progress = TRUE)
# removes files with individual images
file.remove(file.path("images",filenames)) |
50d3c712582c0646d337a35c73a59b258b3c10ae | 30427a7015befa1c2923311622f9a60c0b500050 | /man/frescalo.Rd | 015dafd32146e328fbe3b82c6f43ba55ca26580d | [] | no_license | ealarsen/sparta | 35c5fe7d3b84077db871e2788f768c63ba233945 | 3f80893fca52de551f7674e3912e60e03df7f6a5 | refs/heads/master | 2021-01-23T01:51:11.148474 | 2014-11-19T10:17:59 | 2014-11-19T10:17:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,480 | rd | frescalo.Rd | \name{frescalo}
\alias{frescalo}
\title{Frescalo trend analysis}
\usage{
frescalo(Data = NULL, species_to_include = NULL,
ignore.ireland = F, ignore.channelislands = F,
sinkdir = NULL, time_periods = NULL, plot_fres = FALSE,
Fres_weights = "LC", non_benchmark_sp = NULL,
fres_site_filter = NULL, phi = NULL, alpha = NULL,
trend_option = "arithmetic", NYears = 10,
year_col = NA, site_col = NA, sp_col = NA,
start_col = NA, end_col = NA)
}
\arguments{
\item{Data}{A dataframe object or string giving the file
path to the location of data (either .rdata or .csv).
This should consist of rows of observations and columns
indicating the species and location as well as either the
year of the observation or columns specifying the start
and end dates of the observation. If \code{NULL}
(default) the user is prompted to select a .csv or .rdata
file. If using a dataframe it is important that date
columns are in a date format. If using a .csv dates are
assumed to be in the format dd/mm/yyyy.}
\item{species_to_include}{Optionally a character vector
listing the names of species to be used. Species not in
your list are ignored. This is useful if you are only
interested in a subset of species.}
\item{ignore.ireland}{Logical, if \code{TRUE} Irish
hectads are removed. Default is \code{FALSE}}
\item{ignore.channelislands}{Logical, if \code{TRUE}
channel island hectads are removed. Default is
\code{FALSE}}
\item{sinkdir}{String giving the output directory for
results}
\item{time_periods}{A dataframe object with two columns.
The first column contains the start year of each time
period and the second column contains the end year of
each time period. Time periods should not overlap.}
\item{plot_fres}{Logical, if \code{TRUE} maps are
produced by Frescalo. Default is \code{FALSE}. CURRENTLY
ONLY WORKS FOR UK GRID-REFERENCE DATA}
\item{Fres_weights}{'LC' specifies a weights file based
on landcover data for the UK and 'VP' uses a weights file
based on vascular plant data for the UK , both are
included in the package. Alternativly a custom weights
file can be given as a data.frame. This must have three
columns: target cell, neighbour cell, weight.}
\item{non_benchmark_sp}{a character vector or data.frame
with one column, giving the names of species not to be
used as benchmarks in Frescalo. Default is \code{NULL}
and all species are used. See Hill, 2011 for reasons why
some species may not be suitable benchmarks.}
\item{fres_site_filter}{Optionally a character vector or
data.frame with one column, giving the names of sites to
be used for in the trend analysis. Sites not include in
this list are not used for estimating TFactors. Default
is \code{NULL} and all sites are used.}
\item{phi}{Target frequency of frequency-weighted mean
frequency. Default is 0.74 as in Hill (2011). If this
value is smaller than the 98.5 percentile of input phi it
is automatically increased and a warning message is
generated. This is limited to 0.50 to 0.95.}
\item{alpha}{the proportion of the expected number of
species in a cell to be treated as benchmarks. Default is
0.27 as in Hill (2011). This is limited to 0.08 to 0.50.}
\item{trend_option}{Set the method by which you wish to
calculate percentage change. This can currently be set to
either \code{'arithmetic'} (default) or
\code{'geometric'}. Arimthmetic calculates percentage
change in a linear fashion such that a decline of 50\%
over 50 years is equal to 10\% in 10 years. Using the
same example a Geometric trend would be 8.44\% every 10
years as this work on a compound rate.}
\item{NYears}{The number of years over which you want the
percentage change to be calculated (i.e. 10 gives a
decadal change). Default = 10}
\item{year_col}{The name of the year column in
\code{Data}}
\item{site_col}{The name of the site column in
\code{Data}}
\item{sp_col}{The name of the species column in
\code{Data}}
\item{start_col}{The name of the start date column in
\code{Data}}
\item{end_col}{The name of the end date column in
\code{Data}}
}
\value{
Results are saved to file and most are returned in a list
to R.
The list object returned is comprised of the following:
\item{\bold{$paths}}{This list of file paths provides the
locations of the raw data files for $log, $stat, $freq
and $trend, in that order}
\item{\bold{$trend}}{This dataframe provides the list of
time factors for each species}
\tabular{rll}{ - \tab \code{Species} \tab Name of
species\cr - \tab \code{Time} \tab Time period, specified
as a class (e.g. 1970); times need not be numeric and are
indexed as character strings\cr - \tab \code{TFactor}
\tab Time factor, the estimated relative frequency of
species at the time\cr - \tab \code{St_Dev} \tab Standard
deviation of the time factor, given that spt (defined
below) is a weighted sum of binomial variates\cr - \tab
\code{X} \tab Number of occurrences of species at the
time period\cr - \tab \code{Xspt} \tab Number of
occurrences, given reduced weight of locations having
very low sampling effort\cr - \tab \code{Xest} \tab
Estimated number of occurrences; this should be equal to
spt if the algorithm has converged\cr - \tab
\code{N>0.00} \tab Number of locations with non-zero
probability of the species occurring\cr - \tab
\code{N>0.98} \tab Number of locations for which the
probability of occurrence was estimated as greater than
0.98\cr }
\item{\bold{$stat}}{Location report}
\tabular{rll}{ - \tab \code{Location} \tab Name of
location; in this case locations are hectads of the GB
National Grid \cr - \tab \code{Loc_no} \tab Numbering
(added) of locations in alphanumeric order \cr - \tab
\code{No_spp} \tab Number of species at that location;
the actual number which may be zero \cr - \tab
\code{Phi_in} \tab Initial value of phi, the
frequency-weighted mean frequency \cr - \tab \code{Alpha}
\tab Sampling effort multiplier (to achieve standard
value of phi) \cr - \tab \code{Wgt_n2} \tab effective
number N2 for the neighbourhood weights; this is small if
there are few floristically similar hectads close to the
target hectad. It is (sum weights)^2 / (sum weights^2)
\cr - \tab \code{Phi_out} \tab Value of phi after
rescaling; constant, if the algorithm has converged\cr -
\tab \code{Spnum_in} \tab Sum of neighbourhood
frequencies before rescaling\cr - \tab \code{Spnum_out}
\tab Estimated species richness, i.e. sum of
neighbourhood frequencies after rescaling\cr - \tab
\code{Iter} \tab Number of iterations for algorithm to
converge\cr }
\item{\bold{$freq}}{Listing of rescaled species
frequencies}
\tabular{rll}{ - \tab \code{Location} \tab Name of
location\cr - \tab \code{Species} \tab Name of species\cr
- \tab \code{Pres} \tab Record of species in location (1
= recorded, 0 = not recorded)\cr - \tab \code{Freq} \tab
Frequency of species in neighbourhood of location\cr -
\tab \code{Freq_1} \tab Estimated probabilty of
occurrence, i.e. frequency of species after rescaling\cr
- \tab \code{SD_Frq1} \tab Standard error of Freq_1,
calculated on the assumption that Freq is a binomial
variate with standard error sqrt(Freq*(1-Freq)/ Wgt_n2),
where Wgt_n2 is as defined for samples.txt in section
(b)\cr - \tab \code{Rank} \tab Rank of frequency in
neighbourhood of location\cr - \tab \code{Rank_1} \tab
Rescaled rank, defined as Rank/Estimated species
richness\cr }
\item{\bold{$log}}{This records all the output sent to
the console when running frescalo}
\item{\bold{$lm_stats}}{The results of linear modelling
of TFactors}
\tabular{rll}{ - \tab \code{SPECIES} \tab Name of species
used internally by frescalo\cr - \tab \code{NAME} \tab
Name of species as appears in raw data\cr - \tab \code{b}
\tab The slope of the model\cr - \tab \code{a} \tab The
intercept\cr - \tab \code{b_std_err} \tab Standard error
of the slope\cr - \tab \code{b_tval} \tab t-value for a
test of significance of the slope\cr - \tab \code{b_pval}
\tab p-value for a test of significance of the slope\cr -
\tab \code{a_std_err} \tab Standard error of the
intercept\cr - \tab \code{a_tval} \tab t-value for a test
of significance of the intercept\cr - \tab \code{a_pval}
\tab p-value for a test of significance of the
intercept\cr - \tab \code{adj_r2} \tab Rescaled rank,
defined as Rank/Estimated species richness\cr - \tab
\code{r2} \tab t-value for a test of significance of the
intercept\cr - \tab \code{F_val} \tab F-value of the
model\cr - \tab \code{F_num_df} \tab Degrees of freedom
of the model\cr - \tab \code{F_den_df} \tab Denominator
degrees of freedom from the F-statistic\cr - \tab
\code{Ymin} \tab The earliest year in the dataset\cr -
\tab \code{Ymax} \tab The latest year in the dataset\cr -
\tab \code{change_...} \tab The percentage change
dependent on the values given to \code{trend_option} and
\code{NYears}.\cr } \bold{The following columns are only
produced when there are only two time periods}
\tabular{rll}{ - \tab \code{Z_VAL} \tab Z-value for the
significance test of the trend\cr - \tab \code{SIG_95}
\tab A logical statement indicating if the trend is
significant (TRUE) or non-significant (FALSE)\cr }
}
\description{
A function for using Frescalo (Hill, 2011), a tool for
analysing occurrence data when recording effort is not
known. This function returns the output from Frescalo to
the R session and saves it to the path specified by
\code{sinkdir}. By setting \code{plot_fres} to
\code{TRUE} maps of the results will also be saved.
Plotting the returned object gives a useful summary.
}
\examples{
\dontrun{
# Load the library
library(sparta)
# Load data
data(ex_dat)
# Run frescalo (data is save to the working directory as sinkdir is not given)
fres_out<-frescalo(Data=ex_dat,
time_periods=data.frame(start=c(1980,1990),end=c(1989,1999)),
site_col='hectad',
sp_col='CONCEPT',
start_col='TO_STARTDATE',
end_col='Date')
}
}
\references{
Hill, Mark. Local frequency as a key to interpreting
species occurrence data when recording effort is not
known. 2011. \emph{Methods in Ecology and Evolution}, 3
(1), 195-205.
}
\keyword{frescalo}
\keyword{trends,}
|
8abdc9f98a642804b8e0f866806925a79db39797 | 520c0e6917626743f86d9e86be81461030231c7b | /man/pdFilterSingle.Rd | ae69ee01808205e676b1acff81bdb2fad92bcd1e | [] | no_license | olssol/pfDesign | be4e8cd5161304d991ffe1ef71258dfe28788e6f | e8bd03bef21c4210645a16bd5d5e5651754a2616 | refs/heads/master | 2022-03-02T06:53:51.087223 | 2022-02-20T04:41:31 | 2022-02-20T04:41:31 | 206,724,967 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 661 | rd | pdFilterSingle.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pfDesign_filter.R
\name{pdFilterSingle}
\alias{pdFilterSingle}
\title{Particle filter for a given interval}
\usage{
pdFilterSingle(
y,
last_smps,
last_weights = NULL,
f_ll = pdBinPmf,
thresh_ess = NULL,
...
)
}
\arguments{
\item{y}{current Y}
\item{last_smps}{col 1: prior samples of theta; col 2: mixture indicator}
\item{last_weights}{normalized weights corresponding to last_smps. If NULL,
all weights are 1/N.}
\item{f_ll}{likelihood function}
}
\value{
col 1: theta; col 2: mixture indicator; col 3: weights
}
\description{
Particle filter for a given interval
}
|
0f3f5ebf879ab5108284a6e43a5e6e1ce125304e | 24220d86828456d501b8c1b96e3d3839f68aa8d3 | /man/review.Rd | 58abd2420758557b526d6ca0df0897c6bca62a9f | [] | no_license | AndreaCapozio/TMDb | 025a1e90870f520a78091c5b9c113c764da4f24b | 5498eae1b37b4858185328cf0555158cbe1566e3 | refs/heads/master | 2021-01-01T04:39:11.419294 | 2016-05-01T11:20:05 | 2016-05-01T11:20:05 | 57,594,176 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,181 | rd | review.Rd | \name{review}
\alias{review}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Retrieve basic informations about a review.
}
\description{
Get the full details of a review by ID.
}
\usage{
review(api_key, id)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{api_key}{
Your TMDb Consumer Key.
}
\item{id}{
The review ID.
}
}
\value{
A list with the following fields:
\item{id}{The review ID.}
\item{author}{The review's author.}
\item{content}{The review'text.}
\item{iso_639_1}{The review's language.}
\item{media_id}{The ID of the reviewed object.}
\item{media_title}{The object's title.}
\item{media_type}{The type of the reviewed object.}
\item{url}{The url of the review.}
}
\references{
http://docs.themoviedb.apiary.io/#reference
}
\author{
Andrea Capozio
}
\examples{
\dontrun{
## An example of an authenticated request,
## where api_key is fictitious.
## You can obtain your own at https://www.themoviedb.org/documentation/api
api_key <- "key"
review(api_key = api_key, id = "5013bc76760ee372cb00253e")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{review}
|
c8b73849fdf535c3294f8f56732031f8745a9ef8 | 2e6f8e6eaf11f6e3fe622428dd3d4ce9b9185278 | /ctsmr/ctsmr-package/man/PackData.Rd | 282dd7dd28ec29c3ed8b35b6eb90dc265ca285bf | [
"MIT"
] | permissive | perNyfelt/renjinSamplesAndTests | c9498a3eebf35f668bc1061a4c1f74a6bf8e2417 | 5140850aff742dbff02cd4a12a4f92b32a59aa25 | refs/heads/master | 2021-07-23T23:58:59.668537 | 2021-07-23T10:21:39 | 2021-07-23T10:21:39 | 202,578,093 | 1 | 1 | MIT | 2020-10-15T18:13:49 | 2019-08-15T16:45:33 | Fortran | UTF-8 | R | false | false | 354 | rd | PackData.Rd | \name{PackData}
\alias{PackData}
\title{Packs (validated) user given data for CTSM}
\usage{
PackData(data, inputs, outputs)
}
\arguments{
\item{data}{list of data.frames.}
\item{inputs}{vector of names of the input variables.}
\item{outputs}{vector of names of the output variables.}
}
\description{
Packs (validated) user given data for CTSM
}
|
d40ebf34a1d2566ed916fc42ba30c245cb46efed | d58151abb81dba81e8ac2804d89a0e66de26af0d | /Snippets/differential expression analsysis formalized.R | 749edd75be0bcd489939de441b908db4145d2c5e | [] | no_license | vanrooij-lab/scRNAseq-HCM-human--old | c7fa3ac20a85aa77c26bde9a4b9731d236206df2 | 8f24236128e9cc89225e0e6d9c70b6c6eb0e88f1 | refs/heads/master | 2022-12-06T03:21:56.520140 | 2020-08-18T15:04:58 | 2020-08-18T15:04:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,603 | r | differential expression analsysis formalized.R | # functions for permutation-based statistics to deterimene
# cell population size dymanics across different conditions
# strategy of Ferhabi et al (2019)* was adapted for this script
# this sctipt requires a data.frame with teh first column being cell names which include the condition they are derived from
# the second column should depict the cluster number of each cell.
# SCript by: Bas Molenaar 15-07-2019
# * Nona Farbehi, et al. (2019). Single-cell expression profiling reveals dynamic flux of cardiac stromal, vascular
# and immune cells in health and injury. E-life,8
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
##### function to get contigency table of raw number of cells #####
##### per condition in all clusters #####
##### all_clus = data frame showing the cluster number for each cell that #####
##### was included in the RaceID clustering (after filtering) first column should be cell names, #####
##### second should column should be cluster number #####
##### ... = depict here which condition the cells from a group originates (e.g. here "sham_1d","Sham_14d","IR_1d) #####
##### output is contigency table with every row being different clusters #####
##### and every column being different conditions #####
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
create_contigence_table <- function(all_clus,...){
for(condition in list(...)){
if (any(grepl(condition,all_clus[,1]))==FALSE){
stop("Warning, one or more conditions are not in the dataset")
}
}
output <- data.frame()
clusters_nr <- NULL
for(i in 1:max(all_clus[,2])){
clusters_nr <- c(clusters_nr,i)
num_cell <- NULL
for(y in seq(1,length(list(...)))){
cells <- nrow(subset(all_clus,all_clus[,2]==i & grepl(unlist(list(...)[y]),all_clus[,1])))
num_cell <- c(num_cell,cells)
}
output<- rbind(output,num_cell)
}
output <- cbind(clusters_nr,output)
colnames_data1 <- "cluster number"
for(colna in list(...)){
name1 <- paste("nr of cells in cluster from condition",colna)
colnames_data1<- c(colnames_data1,name1)
}
colnames(output)<- colnames_data1
rownames(output) <- output[,"cluster number"]
output[,"cluster number"] <- NULL
return(output)
}
#############################################################################################################
#############################################################################################################
#############################################################################################################
#############################################################################################################
### function to create the condition and cluster information list for each cell. ###
### Creates list with 2 vectors from a contigency table containing nuber of cells from each ###
### condition per cluster ,with colnamen being different conditions and rownames being different cluster. ###
### Numbers of each element in each vector represents a seperate cell, with information about ###
### the condition from that cell in the first vector (condition vector) and information about the cluster ###
### in the second row (cluster vector). Used for downstream bootstrapping permutation procedure ###
### input = contigency table with every row being different clusters ###
### and every column being different conditions ### ###
#############################################################################################################
#############################################################################################################
#############################################################################################################
#############################################################################################################
makevariablesforcells<-function(input){
cond_names<-colnames(input)
clus_names<-rownames(input)
# create condition vector with each condition repeated x times, with x being the total number of cells
# in a specific condition. Follows the order of the rownames of the contigency table
cond<-rep(cond_names, apply(input,2,sum))
# creates cluster vector for each each condition, per condition each cluster is repeated x times, with x being the total
# number of cells in a a cluster of the specific condition.
clus_list<-apply(input,2,function(x){
rep(clus_names, x )
})
# catonate the cluster vectors of each condtion, follows the order of the rownames of the contigency table
clus <- NULL
for(i in seq(1:length(clus_list))){
clus <- c(clus,clus_list[[i]])
}
#table(cond,clus);
return(list(condition=cond, cluster=clus));
}
################################################################################################################
################################################################################################################
################################################################################################################
################################################################################################################
### function to perform permutation based null distribution for each condition across all clusters ###
### Creates multi-way array with 3 dimensions. 1 dimension (rownames) depict each cluster ###
### 1 dimension (colnames) depict each condition clusters. last dimension depict number of iterations for ###
### purmutation based null distribution. ###
### IMPORTANT, MAKE SURE TO HAVE THE FUNCTION makevariablesforcells IN THE GENERAL ENVIROMENT ###
### input = contigency table with every row being different clusters ###
### and every column being different conditions ### ###
### n = iteration number for perfoming permutations in background model ###
### p = number of cells for which clusters will be permuted by SRSWOR from the observed cluster destribution ###
### (equation --> number_cells_SRSWOR = total cells*p) ###
################################################################################################################
################################################################################################################
################################################################################################################
################################################################################################################
generatebackground_model <-function(input, n=10^5, p=0.1){
# making sleketal frame for background distribution, with dimnames same as the input contigency table
background_mod <- array(NA,dim=c(nrow(input),ncol(input),n))
dimnames(background_mod)[[3]] <- 1:n
# grenerating lists showing for each cell the condition/cluster combination
observed_sample <- makevariablesforcells(input)
# permuting the clusters in a random group of cells by simple random sampling without replacement from the observed
# cluster distribution
for(i in 1:n)
{
if(i %% 5000 == 0){
print(paste(i, "of ", n, "permutations"))
}
permuted_sample <- observed_sample
sampled_cells_index <-sample(1:length(permuted_sample$cluster),round(length(permuted_sample$cluster)*p))
permuted_sample$cluster[sampled_cells_index]<-sample(observed_sample$cluster,length(sampled_cells_index),replace=F)
permuted_sample_cont_table <- table(permuted_sample$cluster,permuted_sample$condition)
background_mod[,,i]<-permuted_sample_cont_table
}
# manipulation the colnames (the conditions) of the background model in teh array in the correct orde
dimnames(background_mod)[[1]] <- rownames(permuted_sample_cont_table)
dimnames(background_mod)[[2]] <- colnames(permuted_sample_cont_table)
return(background_mod)
}
#############################################################################################################
#############################################################################################################
#############################################################################################################
#############################################################################################################
### perform signficance test between all conditions ###
### returns a 3 dimensional array, with the x and y dimension being all the conditions in from the ###
### contigency table, and the z dimension being the different clusters ###
### input = contigency table with every row being different clusters ###
### and every column being different conditions ###
### background_mod = the background model array from the generatebackground_model function ###
### output is an array with 3 dimensions, first and second dimension from the P-value matrix in a cluster ###
### between all possible conditions. Third dimension represent the different clusters ###
#############################################################################################################
#############################################################################################################
#############################################################################################################
#############################################################################################################
DPA_test <- function(input,background_mod){
p_value_array <- array(-1,dim=c(length(colnames(input)),length(colnames(input)),length(rownames(input))))
dimnames(p_value_array)[1] <- list(colnames(input))
dimnames(p_value_array)[2] <- list(colnames(input))
dimnames(p_value_array)[3] <- list(rownames(input))
counter <- 0
for(i in colnames(input)){
for(y in colnames(input)){
for(z in rownames(input)){
observed_difference <- input[z,y]/sum(input[,y])-
input[z,i]/sum(input[,i])
background_differences <- background_mod[z,y,]/apply(background_mod[,y,],2,sum)-
background_mod[z,i,]/apply(background_mod[,i,],2,sum)
increase_observed <- length(observed_difference[observed_difference>background_differences])/length(background_differences)
decrease_observed <- length(observed_difference[observed_difference<background_differences])/length(background_differences)
if(increase_observed==0&decrease_observed==0){
p_value_array[i,y,z] <- 1
} else{
p_value_array[i,y,z] <- min(increase_observed,decrease_observed)
}
}
}
counter=counter+1
perc_done <- counter/length(colnames(input))*100
print(paste("currently",perc_done,"% done"))
}
return(p_value_array)
}
|
02e5d4747dd1b78b12ab8d135db494ad90e8afbc | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/lattice/examples/qqmath.Rd.R | 3d879e05d61b06c96105db0b7af39b4be17a6903 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,012 | r | qqmath.Rd.R | library(lattice)
### Name: B_04_qqmath
### Title: Q-Q Plot with Theoretical Distribution
### Aliases: qqmath qqmath.formula qqmath.numeric
### Keywords: dplot
### ** Examples
qqmath(~ rnorm(100), distribution = function(p) qt(p, df = 10))
qqmath(~ height | voice.part, aspect = "xy", data = singer,
prepanel = prepanel.qqmathline,
panel = function(x, ...) {
panel.qqmathline(x, ...)
panel.qqmath(x, ...)
})
vp.comb <-
factor(sapply(strsplit(as.character(singer$voice.part), split = " "),
"[", 1),
levels = c("Bass", "Tenor", "Alto", "Soprano"))
vp.group <-
factor(sapply(strsplit(as.character(singer$voice.part), split = " "),
"[", 2))
qqmath(~ height | vp.comb, data = singer,
groups = vp.group, auto.key = list(space = "right"),
aspect = "xy",
prepanel = prepanel.qqmathline,
panel = function(x, ...) {
panel.qqmathline(x, ...)
panel.qqmath(x, ...)
})
|
531b137de95e27576ea0f41d718f61083df2bdb6 | cd6b2f30ae748a9adc5c6f4a52c775645ac64497 | /data analysis/SNR subjective measures.R | 61a2ddf3a971cd735ef33d4c2a13af3f10707537 | [] | no_license | mkeomec/SNR-ERP | 88663b6cf51ebd0223dcf256e559794c111e4f66 | 8b037f13508a008c9caaf134412205eafac7d587 | refs/heads/master | 2020-05-23T08:14:18.672831 | 2018-06-21T04:23:52 | 2018-06-21T04:23:52 | 70,273,801 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 830 | r | SNR subjective measures.R | ## EXPLORE SNR subjective measures
redcap_data <- read.csv('C:/Users/cwbishop/Documents/SNR-ERP/data analysis/HASNR_DATA_2018-01-08_1630.csv')
# Look at IOI #7
redcap_data['ioiha_response_007']
table(redcap_data['ioiha_response_007'])
barplot(table(ioi_data['ioiha_response_007']))
# ALDQ
aldq_table <- table(redcap_data['aldq_demand'])
plot(table(redcap_data['aldq_demand']))
plot(redcap_data['aldq_demand'])
# APHAB
# Combine subjective measures
plot((redcap_data[,'ioiha_response_007']),redcap_data[,'aldq_demand'])
'aphab_unaided_001','aphab_aided_001','aphab_unaided_005','aphab_aided_005','aphab_unaided_006','aphab_aided_006','aphab_unaided_007','aphab_aided_007','aphab_unaided_011','aphab_aided_011','aphab_unaided_016','aphab_aided_016','aphab_unaided_019','aphab_aided_019','aphab_unaided_024','aphab_aided_024'
|
ca10a2955680eca12b417a7e1ca28775c441f1ce | ec39440f33e9652cd7705b1d11f31bcb417ae806 | /man/get_whitelisted_domains.Rd | 278238aab15159dd55cd5765bbef4e5c6a6a339f | [
"MIT"
] | permissive | koad7/anahita | 61ae9870df69becc6bf395fa63c87ab818eb6f33 | 480be5f5ec8e3f65de790f11825bfa8c63bc62c2 | refs/heads/main | 2023-05-06T17:29:45.348845 | 2021-05-28T22:38:19 | 2021-05-28T22:38:19 | 371,832,792 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 615 | rd | get_whitelisted_domains.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-whitelisting.R
\name{get_whitelisted_domains}
\alias{get_whitelisted_domains}
\title{Get whitelisted domains}
\usage{
get_whitelisted_domains(conn, org_ids = NULL)
}
\arguments{
\item{conn}{connection to the data warehouse}
\item{org_ids}{list of organisation ids}
}
\value{
a tibble
}
\description{
Get list all whitelisted domains in Salesforce
or domains whitelisted for a selected organisation.
}
\examples{
\dontrun{
conn <- connect_dw()
get_whitelisted_domains(conn)
get_whitelisted_domains(conn, "001b0000005gxmlAAA")
}
}
|
c158ceac5feb5a2a9910b03292f782ea21df469c | 22087c2df17907e3d7c6c502b68779ba968848ce | /R/kuenm_rpca.R | a7a628191508f184e52fefc14476336fb95b3c9a | [] | no_license | marlonecobos/kuenm | 07c16d20445ccbd6e07d7349fa54ea03900cf0a9 | 2012fadee1ceffec008f50495ec598c94b41ffc5 | refs/heads/master | 2023-07-07T09:13:57.863875 | 2023-06-26T07:03:22 | 2023-06-26T07:03:22 | 130,799,797 | 46 | 19 | null | 2022-01-21T11:30:20 | 2018-04-24T05:17:49 | TeX | UTF-8 | R | false | false | 8,254 | r | kuenm_rpca.R | #' Principal componens for raster layers and projections
#'
#' @description kuenm_rpca performs a principal component analysis with a set of variables and
#' produces raster layers of them. If needed the pricipal components are projected to other
#' scenarios.
#'
#' @param variables (character or RasterStack) if character, name of the folder where raster layers are located.
#' If RasterStack, stack of raster layers to be used in principal component analyses.
#' @param in.format (character) valid only if \code{variables} is character. Format of variables in the directory.
#' Options are "ascii", "GTiff", and "EHdr" = bil.
#' @param var.scale (logical) wheter or not to scale variables before performing principal component
#' analyses. Default = TRUE.
#' @param write.result (logical) whether or not to write PCA results and raster layers (PCs) in \code{out.dir}.
#' @param out.dir (character) valid if \code{write.result} = TRUE. Name of the folder to be created to save the
#' results of the analyses. Default = "PCA_results".
#' @param out.format (character) if \code{write.result} = TRUE, format of variables to be written in distinct
#' sets inside \code{out.dir}. Options are "ascii", "GTiff", and "EHdr" = bil. Default = "GTiff".
#' @param project (logical) whether or not to project the species niche to other scenario(s).
#' If TRUE, argument \code{proj.variables} needs to be defined. Default = FALSE.
#' @param proj.vars (character or RasterStack) if character, name of the folder where subfolders with environmental
#' variables of scenarios for projections are (useful if multiple projections are needed). If RasterStack, object
#' containing stacked variables of only one projection scenario. Variables must correspond with variables in \code{vars.folder}
#' (i.e., their names must correspond but they should represent conditions in other scenario).
#' @param n.pcs (numeric) number of principal components to be returned as rasters. By default all principal
#' components are returned as RasterLayers.
#'
#' @return
#' A list containing PCA loadings and PCA summary as matrices, as well as one or multiple (if projected) RasterStacks
#' of principal components.
#'
#' If \code{write.result} = TRUE, all results are written in \code{out.dir}.
#'
#' @details
#' If \code{var.scale} = TRUE, variables are centered to cero and scaled using \code{\link[base]{scale}}.
#'
#' @usage
#' kuenm_rpca(variables, in.format, var.scale = TRUE, write.result = TRUE,
#' out.format = "GTiff", out.dir = "PCA_results", project = FALSE,
#' proj.vars, n.pcs)
#'
#' @export
#'
#' @examples
#' # Data
#' variab <- raster::stack(list.files(system.file("extdata", package = "kuenm"),
#' pattern = "Mbio_", full.names = TRUE))
#' names(variab) <- paste0("bio_", c(1, 12, 15, 17))
#'
#' proj_var <- raster::stack(list.files(system.file("extdata", package = "kuenm"),
#' pattern = "Gbio_", full.names = TRUE))
#' names(proj_var) <- paste0("bio_", c(1, 12, 15, 17))
#'
#' # Example with no projection
#' npcs <- 3
#'
#' rpca <- kuenm_rpca(variables = variab, var.scale = TRUE, write.result = FALSE,
#' n.pcs = npcs)
#'
#' # Example with projection
#' project <- TRUE
#'
#' rpca1 <- kuenm_rpca(variables = variab, var.scale = TRUE, write.result = FALSE,
#' project = project, proj.vars = proj_var, n.pcs = npcs)
kuenm_rpca <- function(variables, in.format, var.scale = TRUE, write.result = TRUE, out.format = "GTiff",
out.dir = "PCA_results", project = FALSE, proj.vars, n.pcs) {
# testing potential errors
if (missing(variables)) {
stop("Argument variables must be defined. See functions help.")
}
if (project == TRUE) {
if (missing(proj.vars)) {
stop("If projections are needed, argument proj.vars must be defined. See functions help.")
}
}
# formatting
if (class(variables)[1] == "character") {
if (missing(in.format)) {
stop("Argument variables is a character, in.format needs to be defined.")
}
patt <- rformat_type(in.format)
patt <- paste0(patt, "$")
}
if (!missing(write.result)) {
patt1 <- rformat_type(out.format)
}
# reading variables
if (class(variables)[1] == "character") {
var <- list.files(variables, pattern = patt, full.names = TRUE)
variables <- raster::stack(var)
}
var_points <- na.omit(raster::values(variables))
# pca analyses
if (var.scale == TRUE) {
pca <- prcomp(var_points, center = TRUE, scale = TRUE)
} else {
pca <- prcomp(var_points, center = TRUE, scale = FALSE)
}
scores <- pca$x
if (missing(n.pcs)) {
n.pcs <- length(var)
}
pcras <- list()
if (write.result == TRUE) {
cat("\nWriting raster PCs in Output folder, please wait...\n")
dir.create(out.dir)
pca_fol <- paste(out.dir, "Initial", sep = "/")
dir.create(pca_fol)
}
for (i in 1:n.pcs) {
pcra <- variables[[1]]
pcra[!is.na(raster::values(pcra))] <- scores[, i]
if (write.result == TRUE) {
filenam <- paste(pca_fol, "/pc_", i, patt1, sep = "")
raster::writeRaster(pcra, filenam, format = out.format)
}
pcras[[i]] <- pcra
}
pcras <- do.call(raster::stack, pcras)
names(pcras) <- paste0("pc_", 1:dim(pcras)[3])
StdDev <- pca$sdev
VarExp <- pca$sdev^2/sum(pca$sdev^2)
CumVar <- cumsum(VarExp)
SumPCAMat <- rbind(StdDev, VarExp, CumVar)
colnames(SumPCAMat) <- paste("PC", seq(1, length(StdDev)), sep = "")
row.names(SumPCAMat) <- c("Standard deviation", "Proportion of Variance",
"Cumulative Proportion")
if (write.result == TRUE) {
sink(paste(paste(pca_fol, "pca_results.txt", sep = "/")))
cat("Principal component analysis results\n")
cat("\nPCA loadings\n")
print(pca$rotation)
cat("\n\nPCA summary\n")
print(SumPCAMat)
sink()
}
# pca results to be returned
loadings <- pca$rotation
respca <- SumPCAMat
# projecting PCs
if (project == TRUE) {
ppcrass <- list()
if (write.result == TRUE) {
cat("\nProjecting and writing projected raster PCs in Output folder, please wait...\n")
} else {
cat("\nProjecting raster PCs\n")
}
if (class(proj.vars)[1] == "character") {
proj_dirs <- list.dirs(proj.vars, recursive = FALSE)
proj_names <- list.dirs(proj.vars, recursive = FALSE, full.names = FALSE)
fol_names <- paste(out.dir, proj_names, sep = "/")
}
if (class(proj.vars)[1] %in% c("RasterStack", "RasterBrick")) {
proj_dirs <- "projection"
proj_names <- "Projected_PCs"
fol_names <- paste(out.dir, proj_names, sep = "/")
}
for (h in 1:length(proj_dirs)) {
if (class(proj.vars)[1] == "character") {
pvar <- list.files(proj_dirs[h], pattern = patt, full.names = TRUE)
p_stack <- raster::stack(pvar)
}
if (class(proj.vars)[1] %in% c("RasterStack", "RasterBrick")) {
p_stack <- proj.vars
}
if (write.result == TRUE) {
dir.create(fol_names[h])
}
ppcras <- list()
p_stackp <- na.omit(raster::values(p_stack))
colnames(p_stackp) <- names(pca[[4]])
p_pcs <- predict(pca, newdata = p_stackp)
for (i in 1:n.pcs) {
pcra <- p_stack[[1]]
pcra[!is.na(raster::values(pcra))] <- p_pcs[, i]
if (write.result == TRUE) {
filenam <- paste(fol_names[h], "/pc_", i, patt1, sep = "")
raster::writeRaster(pcra, filenam, format = out.format)
}
ppcras[[i]] <- pcra
}
ppcrass[[h]] <- do.call(raster::stack, ppcras)
names(ppcrass[[h]]) <- paste0("pc_", 1:dim(ppcrass[[h]])[3])
}
names(ppcrass) <- paste("PCRasters", proj_names, sep = "_")
}
if (project == TRUE) {
results <- c(list(loadings, respca, pcras), ppcrass)
names(results)[1:3] <- c("PCA_loadings", "PCA_results", "PCRasters_initial")
}else {
results <- list(loadings, respca, pcras)
names(results) <- c("PCA_loadings", "PCA_results", "PCRasters_initial")
}
if (write.result == TRUE) {
cat("\nRaster PCA finished. Check your output directory", paste(getwd(), out.dir, sep = "/"), "\n")
}
return(results)
}
|
fc0dea54c4e4bd0c48972930cbcdbc3d1841621d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/currentSurvival/examples/cci.nostrat.Rd.R | 8b145c3d2448597d9ad744a26cdf755fa816852d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 319 | r | cci.nostrat.Rd.R | library(currentSurvival)
### Name: cci.nostrat
### Title: Estimates Current Cumulative Incidence (CCI) and Common
### Cumulative Incidence (comCI) Functions Without Stratification
### Aliases: cci.nostrat
### Keywords: survival
### ** Examples
# This is an internal function and is not usually called by user.
|
bcefaf68c3ff9cb566e8b2d13c1c0b24628215a9 | 52586df6b1df22e19750306185ee69a7b09abf42 | /ISAEM/oldISAEM/GMM/SAEM_2gauss.R | 11f85b21ab567f378b6bbc5fdfc5a817413e1c2f | [] | no_license | BelhalK/AccelerationTrainingAlgorithms | 5d1390f5a5cb6f24f59f2c06073040056014aa64 | 0cc5f4405ad103f704cd7c6259762a66fb6bf37f | refs/heads/master | 2023-07-25T02:28:38.095277 | 2020-10-30T09:14:28 | 2020-10-30T09:14:28 | 94,530,148 | 0 | 0 | null | 2023-07-06T21:20:14 | 2017-06-16T09:46:26 | Jupyter Notebook | UTF-8 | R | false | false | 1,355 | r | SAEM_2gauss.R | source("mixtureAlgos.R")
source("mixtureFunctions.R")
theme_set(theme_bw())
n <- 100
weight<-c(0.7, 0.3)
mu<-c(0,4)
sigma<-c(1,1)*1
weight0<-c(.5,.5)
mu0<-c(1,2)
sigma0<-c(.5,2)
nb_r <- 10
K1 <-50
K <- 100
alpha1 <- 0.7
alpha2 <- 0.4
seed0=44444
# ylim <- c(0.15, 0.5, 0.4)
ylim <- c(0.1, 0.3, 0.3)
nsim <- 5
#
G<-length(mu)
col.names <- c("iteration", paste0("p",1:G), paste0("mu",1:G), paste0("sigma",1:G))
theta<-list(p=weight,mu=mu,sigma=sigma)
# theta0<-list(p=weight0,mu=mu0,sigma=sigma0)
theta0<-theta
## Simulation
x <- matrix(0,nrow=n,ncol=nsim)
for (j in (1:nsim))
{
seed <- j*seed0
set.seed(seed)
xj<-mixt.simulate(n,weight,mu,sigma)
x[,j] <- xj
}
## EM
dem <- NULL
df.em <- vector("list", length=nsim)
for (j in (1:nsim))
{
df <- mixt.em(x[,j], theta, K)
df <- mixt.ident(df)
df$rep <- j
dem <- rbind(dem,df)
df$rep <- NULL
df.em[[j]] <- df
}
graphConvMC(dem, title="EM")
pdf <- NULL
param <- df.em[[1]]
data<- x[,1]
pdf <- mixt.pdf(param,data,K,n)
pdftest <- mixt.pdftest(param,data,K,n)
plot(pdf)
plot(pdftest)
## SAEM2
diff <- NULL
for (j in (1:nsim))
{
seed <- j*seed0
set.seed(seed)
df <- mixt.saem1(x[,j], theta0, K, K1, M=1, alpha=0.6)
df <- mixt.ident(df)
df <- df - df.em[[j]]
df$iteration <- 0:K
df$rep <- j
diff <- rbind(diff,df)
}
graphConvMC(diff, title="SAEM - EM", ylim=ylim)
|
ec84826b1ec6e9cc051c5d0211fc04a7cab3034e | 3d3d03188e534039ecb68f057f159f58591bd03f | /man/estimate.mfARI.CBFV.parameters.type.2.Rd | 7e29284da94acea42921db14335f93332eb07163 | [] | no_license | adbelazim/mfARI | c9ce3d2966f6b7596ac744e7a7d177f77a8d770e | 4c89ac08cf95e3a423197baa0291fda24f02ef21 | refs/heads/master | 2021-03-27T13:08:15.263063 | 2016-10-18T17:09:33 | 2016-10-18T17:09:33 | 71,271,237 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,139 | rd | estimate.mfARI.CBFV.parameters.type.2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mfARI-v15.9.10.R
\name{estimate.mfARI.CBFV.parameters.type.2}
\alias{estimate.mfARI.CBFV.parameters.type.2}
\title{Descripción de la función
Estimates the MSE for specified duration of the transient CBFV signal
In this version, Ks is the value of the normalised CBFV at the end
of the transient period.
Would be better to limit the CBFV angle here?
This would affect the slope and straight line,
affecting the transient MSE...}
\usage{
estimate.mfARI.CBFV.parameters.type.2(time.instants, normalised.CBFV.signal,
min.CBFV.sample, min.CBFV.time.instant, transient.CBFV.duration,
steady.CBFV.duration, time.tol = min(diff(time.instants))/100, ...)
}
\value{
Returns a plot with the CBFV parameters for a specific Delta-Tau.
}
\description{
Descripción de la función
Estimates the MSE for specified duration of the transient CBFV signal
In this version, Ks is the value of the normalised CBFV at the end
of the transient period.
Would be better to limit the CBFV angle here?
This would affect the slope and straight line,
affecting the transient MSE...
}
|
8fbee4cec969a399649a8f301bd09de2ef7c0d88 | ab47dae2fa5a108e6c48b2013ae2ac09a1511856 | /R/TaxaSplit.R | 34ddd958f2788f511d2799b7cc60f8ef7ccba291 | [] | no_license | Nermin-Ghith/phylofactor | bee02d7676411e3d37b758ae42828128708da62c | 666b55cf634067392850ea5035c46eb1a473e969 | refs/heads/master | 2023-04-08T13:16:40.626542 | 2021-04-01T15:57:33 | 2021-04-01T15:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 954 | r | TaxaSplit.R | #' Describes the taxa split at a particular factor summarized by summary.phylofactor
#' @export
#' @param Summary summary.phylofactor object
#' @return list of taxa (with trimmed taxonomy strings) unique to the group & complement for the input summarized factor
TaxaSplit <- function(Summary){
taxG <- as.data.frame(lapply(Summary$group1$IDs,as.character),stringsAsFactors = F)
taxC <- as.data.frame(lapply(Summary$group2$IDs,as.character),stringsAsFactors = F)
if (is.null(dim(taxG))){
nms <- names(taxG)
taxG <- data.frame('otuIDs'=taxG[1],'TaxaIDs'=taxG[2])
}
if (is.null(dim(taxC))){
nms <- names(taxC)
taxC <- data.frame('otuIDs'=taxC[1],'TaxaIDs'=taxC[2])
}
#### Grab the first unique taxonomic category for the Group and Complement ###
taxG[,2] <- uniqueTaxa(taxG[,2],taxC[,2])
taxC[,2] <- uniqueTaxa(taxC[,2],taxG[,2])
output <- list(taxG,taxC)
names(output) <- c('Group 1','Group 2')
return(output)
}
|
2a079c7b93424cdc7b36e403a03378ad26aa867f | 5c57739590f374e136e95252250c06be3c300275 | /run_analysis.R | e00744147485e7dbf771409cc13b0ddc58b98796 | [] | no_license | leeyeelien/CleaningDataProject | 3380a566463a0e6bf40aef00cf706f5a2057ea6f | b67f702aaf329c6b6074ab8183ca198f0f86e6f9 | refs/heads/master | 2022-11-29T23:15:05.087822 | 2020-08-04T14:23:03 | 2020-08-04T14:23:03 | 284,751,845 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,496 | r | run_analysis.R | #Load dyplr library
library (dyplr)
#Load labels and features tables
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
features <- read.table("UCI HAR Dataset/features.txt")
#Create feature description from features table
feature_names = as.vector(features[,2])
#Load train data tables
train_dataset <- read.table("UCI HAR Dataset/train/X_train.txt")
train_y <- read.table("UCI HAR Dataset/train/y_train.txt")
train_subj <- read.table("UCI HAR Dataset/train/subject_train.txt")
#Merge all train data tables to train_dataset
train_dataset <- bind_cols(train_y, train_dataset)
train_dataset <- bind_cols(train_subj, train_dataset)
#Load test data tables
test_dataset <- read.table("UCI HAR Dataset/test/X_test.txt")
test_y <- read.table("UCI HAR Dataset/test/y_test.txt")
test_subj <- read.table("UCI HAR Dataset/test/subject_test.txt")
#Merge all test data tables to test_dataset
test_dataset <- bind_cols(test_y, test_dataset)
test_dataset <- bind_cols(test_subj, test_dataset)
#Merge train and test set
complete_data <- bind_rows(train_dataset,test_dataset)
#Rename all the columns to subject, label, and feature names
feature_names <- c("Subject", "Activity", feature_names)
colnames(complete_data)<-feature_names
#Extract columns with mean and std dev
m_s <- feature_names[grepl(".*mean\\(\\)|.*std\\(\\)", feature_names)]
mean_std_data <- select (complete_data, c(Subject,Activity,all_of(m_s)))
#Replace activity number with activity labels (in the labels column)
mean_std_data$Activity <- activity_labels[mean_std_data$Activity,2]
#Make variables names more descriptive/readable
variable_names <- names(mean_std_data)
variable_names <- sub("\\(\\)", "",variable_names) #remove the parentheses
variable_names <- gsub("-", "",variable_names) #remove the dashes
variable_names <- sub("mean", "Mean",variable_names)
variable_names <- sub("std", "StdDev",variable_names)
colnames(mean_std_data) <- variable_names
#Create a factor of subject and activities
mean_std_data$Subject <- factor(mean_std_data$Subject)
mean_std_data$Activity <- factor(mean_std_data$Activity)
#Create a table for summary of average values
summary_data <- mean_std_data %>% group_by(Subject,Activity) %>% summarise_all(mean)
#write the tidy datasets to files
write.table(mean_std_data, file="UCI HAR Dataset/tidy_dataset.txt", row.names=FALSE)
write.table(summary_data, file="UCI HAR Dataset/tidy_summary.txt", row.names=FALSE)
|
9d6f5d79cc5e5d81be8e181351ec4c1c08b77c95 | bca63e5a36852745b285c801f0f1d66d79b63933 | /R Scripts/Tsay - High-frequency intraday number of transactions.R | e57a741b7cdd4914c45b1d64847833c2b604ae63 | [] | no_license | arkagogoldey/Finance_R_Files | 17201641c1ef05715bca8889dbfe7ff64cafe357 | 3b8b1fc5dd32448c7db637fc7306a7be50453710 | refs/heads/master | 2020-04-18T02:53:16.520896 | 2018-09-25T16:43:05 | 2018-09-25T16:43:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,182 | r | Tsay - High-frequency intraday number of transactions.R | "hfntra" <- function(da,int){
# Compute number of trades in a given interval (intraday)
#
# int: time intervals in minutes
# da: data in the format: date, hour, minute, second, price, volume
#
if(!is.matrix(da))da=as.matrix(da)
intsec=int*60
istart=9*60*60+30*60
iend=16*60*60
# compute the number of time intervals within a trading day
tradetime=6.5*60*60
nintval=floor(tradetime/intsec)
T=dim(da)[1]
# compute time in seconds from midnight.
caltime=da[,2]*60*60+da[,3]*60+da[,4]
ntrade=NULL
date=da[1,1]
iday=1
#
icnt=0
cnt=rep(0,nintval)
for (i in 1:T) {
#
if(da[i,1] > date){
date=da[i,1]
ntrade=c(ntrade,cnt)
cnt=rep(0,nintval)
}
if(caltime[i]==istart)cnt[1]=cnt[1]+1
if(caltime[i]==iend)cnt[nintval]=cnt[nintval]+1
if((caltime[i] > istart) && (caltime[i] < iend)){
ii=caltime[i]-istart
ij=floor(ii/intsec)
cnt[ij+1]=cnt[ij+1]+1
}
# end of i-loop
}
ntrade=c(ntrade,cnt)
par(mfcol=c(2,1))
plot(ntrade,type='l')
title(main="Time plot of number of transactions")
acf(ntrade,lag=3*nintval)
par(mfcol=c(1,1))
hfntra <- list(ntrad=ntrade)
} |
04e41318ac05ce4d472aff17c705f76a86d5900d | 2c1805e79d915c88faa0f6c258fc41e95937dba5 | /R/HelperFunctions/distance_between_points.R | 46cf0b3e0ad40c462829b7c772b6d6166826b616 | [] | no_license | hejtmy/VR_City_Analysis | b85c14ddc7aad5db8aeeb353ae02462986b20e59 | b149d3f52d76fc8fb0104fa42ec7b38ae7470ba0 | refs/heads/master | 2021-01-18T16:16:53.962471 | 2017-05-21T22:01:26 | 2017-05-21T22:01:34 | 49,779,651 | 0 | 0 | null | 2017-02-18T17:35:16 | 2016-01-16T15:48:50 | R | UTF-8 | R | false | false | 214 | r | distance_between_points.R | distance_between_points = function(point_x, point_y){
if (is.numeric(point_x) && is.numeric(point_y)){
x = point_x
y = point_y
}
if(is.null(x) || is.null(y)) return(NA)
return(sqrt(sum((x-y)^2)))
} |
66d2961c0cd4178ee5060365b337e0d41e51943b | b40ec26f17286c2159a72f666e1531a041be2f51 | /datatest.R | 087d19e87afc8b377cf42dff78735f13b13dbf95 | [] | no_license | Nicey80/CampaignPlanner | bb744b5a12eb0f25fab57a680cf44164e09e1542 | d3b6f9f8d9be28c7d0a81e8ef0d06077a6d9dcbe | refs/heads/master | 2021-09-04T15:31:48.998266 | 2018-01-19T22:41:23 | 2018-01-19T22:41:23 | 111,463,778 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,651 | r | datatest.R | library(tidyverse)
library(readxl)
Camps <- read_excel("./data/InitCampDB.xlsx",sheet=1)
CampBatch <- read_excel("./data/InitCampDB.xlsx",sheet=2)
RCID <- read_excel("./data/InitCampDB.xlsx",sheet=4)
RCurves <- read_excel("./data/InitCampDB.xlsx",sheet=3)
DatePeriods <- read_excel("./data/AdminData.xlsx",sheet=1)
OrgSetUp<- read_excel("./data/AdminData.xlsx",sheet=2)
#### testing
##SUmmary table
FullData <- Camps%>%
left_join(CampBatch)
FullData %>%
mutate(BRespV=`Batch Vol`*VoiceRR,
BRespW=`Batch Vol`*WebRR,
BBVoice=BRespV*VConvBB,
BBWeb=BRespW*WConvBB,
TotBB=BBVoice + BBWeb
) %>%
select(CampaignID,`Campaign Name`,Format,BRespV,BRespW,
BBVoice,BBWeb,TotBB,`Batch Send Date`) %>%
group_by(CampaignID) %>%
summarise(VoiceCalls=sum(BRespV),
WebVisits=sum(BRespW),
VoiceSalesBB=sum(BBVoice),
WebSalesBB=sum(BBWeb),
TotalBBSales=sum(TotBB),
FirstMailDate=min(`Batch Send Date`),
LastMailDate=min(`Batch Send Date`))
### Forecast
FullData <- Camps%>%
left_join(CampBatch)
FD <- FullData %>%
mutate(BRespV=`Batch Vol`*VoiceRR,
BRespW=`Batch Vol`*WebRR,
BBVoice=BRespV*VConvBB,
BBWeb=BRespW*WConvBB,
TotBB=BBVoice + BBWeb
) %>%
#select(CampaignID,`Campaign Name`,Format,BRespV,BRespW,
# BBVoice,BBWeb,TotBB,`Batch Send Date`) %>%
group_by(CampaignID, Batch)
FD_nest <- FD %>%
nest()
FD_nest
batch_Forecast <- function(inpdata){
CurveID <- RCID$CurveID[RCID$Func==inpdata$Func && RCID$SubFunc==inpdata$SubFunc]
Curve <- RCurves[,CurveID+1]
B.Forecast.Voice.Calls <- inpdata$`Batch Vol`* inpdata$VoiceRR *Curve
B.Forecast.Voice.BB <- B.Forecast.Voice.Calls * inpdata$VConvBB
B.Forecast.Voice.TV <- B.Forecast.Voice.Calls* inpdata$VConvTV
B.Forecast.Voice.Mob <- B.Forecast.Voice.Calls* inpdata$VConvMob
B.Forecast.Web.Calls <- inpdata$`Batch Vol`* inpdata$WebRR *Curve
B.Forecast.Web.BB <- B.Forecast.Web.Calls *inpdata$ WConvBB
B.Forecast.Web.TV <- B.Forecast.Web.Calls* inpdata$WConvTV
B.Forecast.Web.Mob <- B.Forecast.Web.Calls* inpdata$WConvMob
F.Days=nrow(B.Forecast.Voice.Calls)
library(lubridate)
datelist <- seq.Date(from=as_date(inpdata$`Batch Send Date`),length.out = F.Days, by= "day")
BF <- cbind(datelist,B.Forecast.Voice.Calls,B.Forecast.Voice.BB,B.Forecast.Voice.TV,B.Forecast.Voice.Mob,
B.Forecast.Web.Calls,B.Forecast.Web.BB,B.Forecast.Web.TV,B.Forecast.Web.Mob)
colnames(BF) <- c("datelist","Voice Calls","Voice BB Sales",
"Voice TV Sales","Voice Mob Sales",
"Web Visits","Web BB Sales","Web TV Sales",
"Web Mob Sales")
BF
}
FD_nest <- FD_nest %>%
mutate(forecast_Resp = map(data,batch_Forecast))
FD_nest
#FD_nest$forecast_voiceCalls[[1]]
library(broom)
FD_tidy <- FD_nest %>%
mutate(td=map(forecast_Resp,bind_rows)) %>%
select(CampaignID,Batch,td) %>%
unnest()
FD_tidy
#idy(FD_nest)
TD <- bind_rows(FD_nest$forecast_voiceCalls)
FD_nest$forecast_Resp[[1]]
ggplot(FD_tidy)+stat_summary(aes(datelist,B.Forecast.Voice.Calls), geom = 'line', fun.y = 'sum')
FD_tidy %>% filter(datelist>='2017-10-01',datelist<='2018-03-31') %>%
gather(Type,Vol, B.Forecast.Voice.Calls:B.Forecast.Web.Mob)
|
6119e24d6ab2a400a908128b18e8455cb24e2728 | 40c0b708b174b6860015a9e99acd3f90b51fb4a7 | /Beta/server.R | a0b7bc430ad387afe2b62db940ddb4c350f4f695 | [] | no_license | stu-keil/ComputationalStatistics | 0a49579adb0e9f9d71d4dbb7806d58eee13ddb96 | 3a83d9494a0a8b351d46f84247a3f9e6f68809f8 | refs/heads/master | 2021-01-10T03:30:50.615068 | 2016-03-11T21:44:27 | 2016-03-11T21:44:27 | 53,698,193 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,177 | r | server.R |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyServer(function(input, output) {
unif <- reactive(sort(runif(1000)))
output$text1 <- renderText({
paste0("La media o Esperanza Matematica se expresa como alpha / (alpha + beta) = ",input$alpha/(input$alpha+input$beta))
})
output$text2 <- renderText({
var <- (input$alpha*input$beta)/((input$alpha+input$beta)^2*(input$alpha+input$beta+1))
stddev <- sqrt(var)
paste0("La varianza o segundo momento se expresa como alpha*beta / (alpha + beta)^2(alpha+beta+1) = ",
var," y su desviacion estandar es ",stddev,"\\n\\n\\n")
})
output$distPlot <- renderPlot({
plot(unif(),dbeta(unif(),input$alpha,input$beta),col = 'darkgray', xlab="Rango de 0 a 1", ylab= "fx",
main = paste0("Esta es la funcion de densidad de probabilidad de Beta(",input$alpha,",",input$beta,")"),type="l")
abline(v=input$alpha/(input$alpha+input$beta),lty = 1, col = 21, lwd =4)
abline(v=(input$alpha/(input$alpha+input$beta))+sqrt((input$alpha*input$beta)/((input$alpha+input$beta)^2*(input$alpha+input$beta+1))),lty = 2, col = "red", lwd =2)
abline(v=(input$alpha/(input$alpha+input$beta))-sqrt((input$alpha*input$beta)/((input$alpha+input$beta)^2*(input$alpha+input$beta+1))),lty = 2, col = "red", lwd =2)
})
output$distPlot1 <- renderPlot({
plot(unif(),pbeta(unif(),input$alpha,input$beta),col = 'skyblue', xlab="Rango de 0 a 1", ylab= "Fx",
main = paste0("Esta es la funcion cumulativa de probabilidad de Beta(",input$alpha,",",input$beta,")"),type="l")
abline(v=input$alpha/(input$alpha+input$beta),lty = 1, col = 21, lwd =4)
abline(v=(input$alpha/(input$alpha+input$beta))+sqrt((input$alpha*input$beta)/((input$alpha+input$beta)^2*(input$alpha+input$beta+1))),lty = 2, col = "red", lwd =2)
abline(v=(input$alpha/(input$alpha+input$beta))-sqrt((input$alpha*input$beta)/((input$alpha+input$beta)^2*(input$alpha+input$beta+1))),lty = 2, col = "red", lwd =2)
})
})
|
67617dd9c14c189ff69ab01cf4e69c8b29bd3b17 | c400f710ad66102a0216b1b2df088838938b8bf9 | /logistic_machine_learning.R | da610a9ca29f9ae0202d64b97bc386f3b26938d7 | [] | no_license | dorissuzukiesmerio/R_machine_learning_palmetto | 35ee3f2f3470bd35f9c0d19f18daa65880447a6e | fda1e88233b545b5b1a385192fb4e314a1152229 | refs/heads/main | 2023-09-01T09:01:03.116142 | 2021-10-26T21:07:10 | 2021-10-26T21:07:10 | 381,422,108 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,129 | r | logistic_machine_learning.R | # Logistic Regression : binary output
#install.packages("kernlab")
library(kernlab)
data(spam) # spam , not spam is binary output
names(spam)
table(spam)
library(caret)
set.seed(123)
#Divide between test and training
indTrain <- createDataPartition(y=spam$type,p=0.6,list = FALSE)
training <- spam[indTrain,]
testing <- spam[-indTrain,]
#general linear model
ModFit_glm <- train(type~.,data=training,method="glm")
summary(ModFit_glm$finalModel)
prediction_glm <- predict(ModFit_glm, testing)
head(prediction_glm)
#Let's plot ROC-AUC curves
install.packages("ROCR")
library(ROCR)
pred_prob <- predict(ModFit_glm, testing, type="prob")
head(pred_prob)
pred_prob <- predict(ModFit_glm,testing, type = "prob")
head(pred_prob)
data_roc <- data.frame(pred_prob = pred_prob[,'spam'],
actual_label = ifelse(testing$type == 'spam', 1, 0))
roc <- prediction(predictions = data_roc$pred_prob,
labels = data_roc$actual_label)
plot(performance(roc, "tpr", "fpr"))
abline(0, 1, lty = 2) # garis diagonal, yaitu performa ketika asal tebak
auc <- performance(roc, measure = "auc")
auc@y.values
|
5c17d702b4b8053621f03622e615563ae7a5dc39 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/MapGAM/examples/toformula.Rd.R | 8c94c8a93033f56a3a94a868360f4df1f1f596a6 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 247 | r | toformula.Rd.R | library(MapGAM)
### Name: toformula
### Title: Build a Formula Based on Data for 'modgam' Function
### Aliases: toformula
### Keywords: misc smooth
### ** Examples
## No test:
data(CAdata)
toformula(data=CAdata, surv=TRUE)
## End(No test)
|
eb02f6bd34091e10af218df74bc926cd21e86265 | 6d8572fb50a9ba39e6372ff0de70aac877d50ec7 | /man/f_val_calc_total_mol_fraction_CO2.Rd | 935ffbce1033fa2e07373fd7fb98b67b7b55c283 | [] | no_license | erikerhardt/isogasex | aed346bf689f28dce3d8500dc799e80b7354c037 | 2e3fc9c21c1d3d8e2348b7bff28954b5a169b0e8 | refs/heads/master | 2020-05-22T00:32:30.670300 | 2019-07-16T04:43:20 | 2019-07-16T04:43:20 | 186,173,267 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 526 | rd | f_val_calc_total_mol_fraction_CO2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f_val_calc_total_mol_fraction_CO2.R
\name{f_val_calc_total_mol_fraction_CO2}
\alias{f_val_calc_total_mol_fraction_CO2}
\title{Total mol fraction CO2}
\usage{
f_val_calc_total_mol_fraction_CO2(obs_12C, obs_13C, fo_13C)
}
\arguments{
\item{obs_12C}{xxxPARAMxxx}
\item{obs_13C}{xxxPARAMxxx}
\item{fo_13C}{xxxPARAMxxx}
}
\value{
total_mol_fraction_CO2 xxxRETURNxxx
}
\description{
\deqn{total_mol_fraction_CO2 = (obs_12C + obs_13C) / (1 - fo_13C)}
}
|
d37d238a80d27430c5e48767fc2cdbbdc6f97147 | f5f142e469ba0526a2768a509630c8b5156b1fcb | /man/get_leaves_branches_col.Rd | 81209f149ec33d2536e39f88f1aa9878c1ff1205 | [] | no_license | JohnMCMa/dendextend | 350ca633b439b8964eec739ba9247c9527ae37f4 | 1e25e5bf786d943b3aa651f4257336462187d43c | refs/heads/master | 2021-01-18T16:05:01.686085 | 2017-03-30T14:15:29 | 2017-03-30T14:15:29 | 86,709,713 | 0 | 0 | null | 2017-03-30T14:06:03 | 2017-03-30T14:06:03 | null | UTF-8 | R | false | true | 1,642 | rd | get_leaves_branches_col.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/attr_access.R
\name{get_leaves_branches_col}
\alias{get_leaves_branches_col}
\title{Get the colors of the branches of a dendrogram's leaves}
\usage{
get_leaves_branches_col(dend, ...)
}
\arguments{
\item{dend}{a dendrogram object}
\item{...}{not used}
}
\value{
A vector with the dendrogram's leaves' branches' colors
}
\description{
It is useful to get the colors of branches of the leaves,
after we use \link{color_branches}, so to then match the colors of the labels
to that of the branches (since getting the colors of branches to match
those of the labels can be tricky).
This is based on \link{get_leaves_branches_attr} which is based on
\link{get_leaves_edgePar}.
TODO: The function get_leaves_branches_col may behave oddly when extracting
colors with missing col attributes when the lwd attribute is available.
This may resolt in a vector with the wrong length (with omitted NA values).
This might need to be fixed in the future, and attention should be given to this case.
}
\examples{
# define dendrogram object to play with:
hc <- hclust(dist(USArrests[1:5,]), "ave")
dend <- as.dendrogram(hc)
par(mfrow = c(1,2), mar = c(5,2,1,0))
dend <- dend \%>\%
color_branches(k = 3) \%>\%
set("branches_lwd", c(2,1,2)) \%>\%
set("branches_lty", c(1,2,1))
plot(dend)
labels_colors(dend) <- get_leaves_branches_col(dend)
plot(dend)
}
\seealso{
\link{get_nodes_attr}, \link{assign_values_to_leaves_nodePar}, \link{labels_colors}
\link{get_leaves_nodePar}, \link{get_leaves_edgePar}, \link{get_leaves_branches_attr}
}
|
85b5c5b952e658605d08c3c3165ceff095f79a37 | 0969cd25b2806877d1aa239e1cb8d20f9d2b5bfe | /R/callback.R | 693845e3ea9b9c033b7b85a83f43ead45add80ff | [
"MIT"
] | permissive | refik/busboyr | 8f5f7544826a82827abca3ff79d24108279f6299 | 8233094162a20466d61387ea8e161ef9bde95da6 | refs/heads/master | 2018-12-20T08:51:54.131784 | 2018-09-17T12:38:24 | 2018-09-17T12:38:24 | 112,404,856 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,132 | r | callback.R | #' Consumes transfer complete tasks from putio
#'
#' @export
consume_callback <- function(wait = NULL) {
logger <- get_logger()
sqs_message <- aws.sqs::receive_msg(
Sys.getenv("CALLBACK_QUEUE"),
wait = wait
)
if (nrow(sqs_message) == 0) {
logger("No message in callback queue.")
return()
}
uuid <- sqs_message$Body
# Get intended download
download_id <- get_table("download") %>%
dplyr::filter(uuid == !!uuid) %>%
dplyr::pull(id)
if (length(download_id) == 0) {
logger(glue("download_id for uuid:{uuid} not found, skipping callback."))
} else {
create_task("create_file", list(download_id = download_id))
}
logger("Deleting callback message.")
aws.sqs::delete_msg(Sys.getenv("CALLBACK_QUEUE"), sqs_message$ReceiptHandle)
}
#' Notify user about updates
#'
#' @export
refresh_title <- function(user_id, title_id, season = NULL) {
logger <- get_logger()
title_key <- glue("title:{title_id}")
season_key <- glue("-{season}")
refresh_key <- paste0(title_key, season_key) # title:51453-3
refresh_shiny(user_id, refresh_key)
}
#' Send a message to users session
#'
#' @export
refresh_shiny <- function(user_id, message) {
logger <- get_logger()
session_uuid <- get_table("session") %>%
dplyr::filter(user_id == !!user_id, is.na(ended_at)) %>%
filter_last() %>%
dplyr::pull(uuid)
if (length(session_uuid) == 1) {
queue_name <- paste("session", session_uuid, sep = "-")
logger("Sending refresh signal to users shiny session.")
# Not being able to send refresh is not a show stopper
try(aws.sqs::send_msg(queue_name, message))
}
}
#' Transfer complete callback url for put.io
#'
#' @export
callback_url <- function(uuid) {
httr::modify_url(
"https://sqs.us-east-1.amazonaws.com",
path = glue("575677476286/{Sys.getenv('CALLBACK_QUEUE')}"),
query = list(
Action = "SendMessage",
MessageBody = uuid
)
)
} |
fecb7d9551e70709f6e006d8182e20a277d4e6f6 | 1ce674b9b77d1c11c36ab8263cdf002ed5515f33 | /Src/annotate_tissue_abbundance.r | 9f38fe0942b424655741c6a0f18bad0520b4c9d9 | [] | no_license | RaikOtto/Generic_mRNA_Expression_Pipeline | a129dd208dbd7073446f3a3109e384eb53b9d83b | 4a50c18e2c3ca261812da63e4f8fa360ffc49f8c | refs/heads/master | 2021-01-17T08:16:42.887852 | 2016-05-04T14:27:51 | 2016-05-04T14:27:51 | 41,857,180 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,862 | r | annotate_tissue_abbundance.r | #print("Comparison to normal tissue expression")
library("hgu133a.db")
mapWithMultiProbes_symbols = toggleProbes( hgu133aSYMBOL, "all")
if(F){
tissue_of_interest = c("kidney","kidney.1")
data_tmp = read.table(tissue_norm_exprs_file_path, sep ="\t",header=T)
tissue_norm_exprs = data_tmp[ , 2 : length( data_tmp[ 1 , ] ) ] ; remove( data_tmp )
tissue_norm_exprs = log2(tissue_norm_exprs)
print(c("Position of samples of interest:", which( colnames(tissue_norm_exprs) %in% tissue_of_interest ) ))
probe_ids = read.table(tissue_norm_exprs_file_path, sep ="\t",header=T)[,1]
tissue_hgnc_symbs = mget( as.character( probe_ids ), mapWithMultiProbes_symbols )
print(c("Amount probes contained in both topall and tissue expression data:",sum(rownames(topall) %in% probe_ids)))
}
#tmp = as.matrix( tissue_norm_exprs, ncol = 158 )
#colnames( tmp ) = colnames(tissue_norm_exprs)
#tissue_norm_exprs = tmp; remove(tmp)
topall_high = topall[topall$logFC > 0,]
if ( chip_type %in% c("hgu133plus2" ) ){
hgnc_high_topall = mget( rownames(topall_high), hgu133plus2SYMBOL )
} else if ( chip_type %in% c("hgu133a", "pd.hg.u133a" ) ){
library("hgu133a.db")
hgnc_high_topall = mget( rownames(topall_high), hgu133aSYMBOL )
} else if ( chip_type %in% c( "pd.hugene.2.0.st", "pd.huex.1.0.st.v2" ) ){
featureData( eset ) = getNetAffx( eset, type = "transcript" )
split_fun = function( entry ){ res = unlist( str_split( entry, " // " ) ); if (length(res) > 1){ return( res[2] ) } else{ return( "" ) } }
mapping = match( rownames(topall_high), rownames(eset ) )
sub_set = eset[ mapping ,]
hgnc_high_topall = str_trim( unlist( lapply( featureData( sub_set )$geneassignment, FUN=split_fun ) ) )
hgnc_genes = str_trim( unlist( lapply( featureData( eset )$geneassignment, FUN=split_fun ) ) )
remove(sub_set)
}
#mapping = match( hgnc_topall, tissue_hgnc_symbs, nomatch = 0 )
#subset = tissue_norm_exprs[ mapping ,]
#res = data.frame(
# "hgnc_topall" = as.character( tissue_hgnc_symbs[ mapping ] ),
# "row_means_value" = rowMeans(subset)
#)
#res = res[ res$hgnc_symbol != "NA" ,]
#res = res[order( as.double( res$row_means_value), decreasing = F ),]
### run blood
print("Calculating blood genes")
if ( !exists("eset_blood") ){
library("pd.hg.u133a")
celFiles = list.celfiles( "~/Dropbox/PhD/Kidney_Cancer_Biomarker/GSE2888_RAW_GPL96", full =T, listGzipped = T )
raw_data_blood= read.celfiles( filenames = celFiles) # type = pd.hg.u133a
eset_blood = rma( raw_data_blood, normalize = T)
blood_genes = mget( rownames( eset_blood ), hgu133aSYMBOL )
}
### proteome map
expr_map = read.table( body_exprs_maps_path, sep =",",header=T)
expr_map = expr_map[ , - which( startsWith( colnames(expr_map), "Fetal" ) ) ]
expr_gene = expr_map[, 1]
expr_map = expr_map[,-1]
### merge results
eset_mapping = match( rownames( topall_high ), rownames( eset ), nomatch = 0)
mapping_eset = which( rownames( topall_high ) %in% rownames( eset ) )
hgnc_high_topall = hgnc_high_topall[ !is.na(hgnc_high_topall) ]
topall_high_filt = topall_high[ which( as.character( hgnc_high_topall ) %in% as.character( expr_gene) ) ,]
mapping_blood = match( rownames( topall_high_filt ), rownames( eset_blood ), nomatch = 0 )
topall_high_filt = topall_high_filt[ which( rownames(topall_high_filt) %in% rownames( eset_blood ) ),]
eset_tmp = eset[ which( rownames(eset) %in% rownames( eset_blood ) ) ,]
hgnc_genes_filt = hgnc_genes[ match( rownames(topall_high_filt), rownames( eset_tmp ), nomatch = 0) ]
topall_high_filtmap = match( rownames(topall_high_filt), rownames(eset_tmp), nomatch = 0 )
res = cbind(
hgnc_genes_filt,
round( topall_high_filt$logFC ,2 ),
round( rowMeans(
exprs( eset_tmp[ topall_high_filtmap, ] )
), 2),
round( rowMeans(
exprs( eset_blood[ mapping_blood, ] )
), 2)
)
remove(eset_tmp)
expr_map_mapping = match( hgnc_genes_filt , as.character( expr_gene ), nomatch = 0 )
expr_map_present = which( hgnc_genes_filt %in% as.character( expr_gene ) )
res = res[ expr_map_present ,]
tmp = apply( expr_map[ expr_map_mapping, ], MARGIN = 2, FUN = function(x){ x[ x < 1.0] = 1.0; return( x ) } )
tmp = round( log2( tmp ), 1)
counter = apply( tmp, MARGIN= 1, FUN = function(x){ return( sum( x >= 4 ) ) } )
res = cbind( res, counter, tmp )
colnames(res)[1:5] = c("HGNC_symbol","LogFC","Exprs_Strength", "Exprs_Blood","NMBR_Exprs_Tissue_Greater_4")
res = res[ order( unlist( res[,5]), decreasing = F ) , ]
write.table( res, tissue_abbundance_res_file , row.names=F, quote = F, sep =",", col.names =T )
l = length( res[1,] )
my_palette <- colorRampPalette(c("white", "yellow", "red"))(n = l)
rownames(tmp) = res[,1]
library("gplots")
png(paste(cel_files_path,"tissue_abbundace.png", sep ="/"))
heatmap.2(t(tmp), col=my_palette, trace="none")
dev.off()
|
2c96b1c4d01fbce42d3ba4e3751eb5ea83400c1c | 4418250666483cdfd0247130613265517c6cc702 | /machine-learning-fundamentals/supervised-learning in-r-classification/4. Classification Trees.R | 0048c0a0ce9cc6da3e3bfb6c60cb6306d8fa452d | [] | no_license | andrepvieira/courses | fb3b36bd0887f1be4846454b2bf5731bfea1a3be | 03ce293996cb524b39f6fdc1345cf2f25a194d54 | refs/heads/master | 2020-04-27T14:03:32.044320 | 2019-03-07T20:08:29 | 2019-03-07T20:08:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,164 | r | 4. Classification Trees.R | #' Classification trees use flowchart-like structures to make decisions.
#' Because humans can readily understand these tree structures,
#' classification trees are useful when transparency is needed, such as in
#' loan approval. We'll use the Lending Club dataset to simulate this
#' scenario.
# building a simple rpart classification tree
library(rpart)
library(tidyverse)
m <- rpart(outcome ~ loan_amount + credit_score, data = loans,
method = "class")
# making predictions from an rpart tree
p <- predict(m, test_data, type = "class")
# Making decision with trees -----------------------------------------
loans <- read_csv("./cursos/machine-learning-fundamentals/supervised-learning in-r-classification/loans.csv")
# Load the rpart package
library(rpart)
# Build a lending model predicting loan outcome versus loan amount and credit score
loan_model <- rpart(outcome ~ loan_amount + credit_score, data = loans, method = "class", control = rpart.control(cp = 0))
# Make a prediction for someone with good credit
predict(loan_model, good_credit, type = "class")
# Make a prediction for someone with bad credit
predict(loan_model, bad_credit, type = "class")
library(rpart.plot)
# Examine the loan_model object
loan_model
# Load the rpart.plot package
library(rpart.plot)
# Plot the loan_model with default settings
rpart.plot(loan_model)
# Plot the loan_model with customized settings
rpart.plot(loan_model, type = 3, box.palette = c("red", "green"),
fallen.leaves = TRUE)
# Growing larger classification trees --------------------------------
# Determine the number of rows for training
0.75*nrow(loans)
# Create a random sample of row IDs
sample_rows <- sample(11312, 8484)
# Create the training dataset
loans_train <- loans[sample_rows, ]
# Create the test dataset
loans_test <- loans[-sample_rows, ]
# Grow a tree using all of the available applicant data
loan_model <- rpart(outcome ~ ., data = loans_train, method = "class", control = rpart.control(cp = 0))
# Make predictions on the test dataset
loans_test$pred <- predict(loan_model, loans_test, type = "class")
# Examine the confusion matrix
table(loans_test$pred, loans_test$outcome)
# Compute the accuracy on the test dataset
mean(loans_test$pred == loans_test$outcome)
# Tending to classification trees ------------------------------------
#' Decision trees have the tendency to grow very largeand complex very
#' quickly. One method of preventing a tree from becoming too large
#' involves stopping the growing process early. This is known as pre-pruning.
#'
#' One of the simplest approaches to pre-pruning stops divide-and-conquer
#' once the tree reaches a predefined size. Another pre-pruning method
#' requires a minimum number of observations at a node in order for a split
#' to occur.
#'
#' However, a tree stopped too early may fail to discober subtle or
#' important patterns it might have discovered later. To address this
#' concern, it is also possible to grow a very large tree, knowing that it
#' will be overly complex, but then prune it back to reduce the size.
#' This is known as post-pruning.
#'
#'
# Grow a tree using all of the available applicant data
loan_model <- rpart(outcome ~ ., data = loans_train, method = "class",
control = rpart.control(cp = 0))
# Make predictions on the test dataset
loans_test$pred <- predict(loan_model, loans_test, type = "class")
# Examine the confusion matrix
table(loans_test$pred, loans_test$outcome)
# Compute the accuracy on the test dataset
mean(loans_test$pred == loans_test$outcome)
# post-pruning with rpart
m <- rpart(repaid ~ credit_score + request_amt,
data = loans,
method = "class")
plotcp(m)
m_pruned <- prune(m, cp = 0.20)
# Practice!
# Grow a tree with maxdepth of 6
loan_model <- rpart(outcome ~ ., data = loans_train, method = "class",
control = rpart.control(cp = 0, maxdepth = 6))
# Make a class prediction on the test set
loans_test$pred <- predict(loan_model, loans_test, type = "class")
# Compute the accuracy of the simpler tree
mean(loans_test$pred == loans_test$outcome) # 59.2%
# Swap maxdepth for a minimum split of 500
loan_model <- rpart(outcome ~ ., data = loans_train, method = "class", control = rpart.control(cp = 0, minsplit = 500))
# Run this. How does the accuracy change?
loans_test$pred <- predict(loan_model, loans_test, type = "class")
mean(loans_test$pred == loans_test$outcome) # 59.2%
#' It may seem surprising, but creating a simpler decision tree may
#' actually result in greater performance on the test dataset.
# Grow an overly complex tree
loan_model <- rpart(outcome ~ ., data = loans_train, method = "class", control = rpart.control(cp = 0))
# Examine the complexity plot
plotcp(loan_model)
# Prune the tree
loan_model_pruned <- prune(loan_model, cp = 0.0014)
# Compute the accuracy of the pruned tree
loans_test$pred <- predict(loan_model_pruned, loans_test, type = "class")
mean(loans_test$pred == loans_test$outcome) # 60.1%
#' As with pre-pruning, creating a simpler tree actually improved the
#' performance of the tree on the test dataset.
# Seeing the forest from the trees -----------------------------------
#' Classification trees can be combined into a collection known as a
#' decision tree forest. These forests are among the most powerful
#' machine learning classifiers.
#'
#' The power of decision tree forests comes from a collection of smaller,
#' simpler trees that together reflect the data's complexity. Each of the
#' forest's trees is diverse, and may reflect some subtle pattern in the
#' outcome to be modeled.
#'
#' Generating this diversity is the key to building powerful decision
#' tree forests. This is done by allocating each tree a random subset of
#' data. One may receive a vastly different training set than another.
#'
#' The term random forests refers to the specific growth algorithm in
#' which both the features and examples may differ from tree to tree.
#'
#' Machine learning methods like random forests that apply this principle
#' are called ensemble methods. They are based on the principle that
#' weaker learners become stronger with teamwork.
#'
#' In a random forest, each tree is asked to make a prediction, and
#' the group's overall prediction is determined by a majority vote.
#'
#' The R package randomForest implements the random forest algorithm.
# building a simple random forest
library(randomForest)
m <- randomForest(repaid ~ credit_score + request_amt, data = loans,
ntree = 500, # number of trees in the forest
mtry = sqrt(p)) # number of predictors (p) per tree
# making predictions from a random forest
p <- predict(m, test_data)
# Practice!
# Load the randomForest package
library(randomForest)
# Build a random forest model
loan_model <- randomForest(outcome ~ ., data = loans)
# Compute the accuracy of the random forest
loans_test$pred <- predict(loan_model, loans_test)
mean(loans_test$pred == loans_test$outcome) # 60.2%
#' Classification is only one of the problems you'll have to tackle as a
#' data scientist.
|
eb3c6a5d39a18f8a9cdedb62a278d7bbe1d85a85 | fbe71ac8022c9a05ed461ce4a65f80d6e46ac55a | /pkg/mutoss/R/localfdr.R | ffe228a9954361e4ea2689afa7c1f966adc12d44 | [] | no_license | kornl/mutoss | 63474aceaac4a4bc9373ea1ae8bdb1d65ea7222b | 21d3d8b397632d54138510f514102bf8cb5f6686 | refs/heads/master | 2020-04-14T19:30:39.280884 | 2017-12-04T16:39:36 | 2017-12-04T16:39:36 | 10,869,131 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,104 | r | localfdr.R | #
# Author: JonathanRosenblatt
###############################################################################
pval2qval<- function(pValues, cutoff){
requireLibrary('fdrtool')
fdrtool <- get("fdrtool", envir=asNamespace("fdrtool"))
qvals<-fdrtool(
pValues,
statistic= 'pvalue',
plot=FALSE,verbose=FALSE)$qval
if (missing(cutoff)) {
return(list(qValues=qvals))
}
return(list(qValues=qvals, rejected= qvals<=cutoff ))
}
pval2locfdr<- function(pValues, cutoff){
requireLibrary('fdrtool')
fdrtool <- get("fdrtool", envir=asNamespace("fdrtool"))
locfdr<-fdrtool(
pValues,
statistic= 'pvalue',
plot=FALSE,verbose=FALSE)$lfdr
if (missing(cutoff)) {
return(list(locFDR=locfdr))
}
return(list(locFDR=locfdr, rejected= locfdr<=cutoff ))
}
mutoss.locfdr <- function() {
return(new(Class="MutossMethod",
label="Local FDR (fdr)",
callFunction="pval2locfdr",
output=c("locFDR", "rejected"),
info=
"<h2> Name: </h2> Local fdr.\n
<h3> Also known as: </h3> fdr, empirical posterior probability of the null. \n
<h3> Error Type: </h3> Motivated by Bayesian considerations. Does not guarantee control of frequentist error types like FWER or FDR.\n
<h3> Recommended Usage: </h3> Typically used when a massive amount of hypotheses is being tested as in microarray analyses.\n
<h3> Related procedures: </h3> See FDR methods for similar procedures for frequentist error control.\n
<h3> References: </h3> \n
<ul>
<li> Efron B., Tibshirani R., Storey J. D. and Tusher, V. (2001).<i> Empirical Bayes Analysis of a Microarray Experiment. </i>\n
Journal of the American Statistical Association 96(456):1151-1160. </li>
</ul>",
parameters=list(
pValues=list(type="numeric"),
cutoff=list(type="numeric", label="Local fdr cutoff for rejection", optional=TRUE))))
}
mutoss.qvalues <- function() {
return(new(Class="MutossMethod",
label="q Values (Fdr)",
callFunction="pval2qval",
output=c("qValues", "rejected"),
info=
"<h2> Name: </h2> q-Values.\n
<h3> Also known as: </h3> \n
<ul>
<li> Estimated pFDR</li>\n
<li> Estimated Positive FDR </li>\n
<li> Empirical tail-area posterior probability of the null</li> \n
<h3> Error Type: </h3> Motivated by Bayesian considerations. Guarantees FDR control only when masses of hypotheses are being tested.\n
<h3> Recommended Usage: </h3> Typically used when a massive amount of hypotheses is being tested as in microarray analyses.\n
<h3> Related procedures: </h3> See FDR methods for similar procedures with frequentist error control.\n
<h3> References: </h3> \n
<ul>
<li> Storey, J. D. (2003)<i>The Positive False Discovery Rate: A Bayesian Interpretation and the q-Value.</i>
The Annals of Statistics 31(6): 2013-2035. </li>
</ul>",
parameters=list(
pValues=list(type="numeric"),
cutoff=list(type="numeric", label="q-value (pFDR) cutoff for rejection", optional=TRUE))))
}
|
1117200125940e4f880df8744a6cedcf4d6c981a | 1d3505b16f17a0c473fbc0e4bbe456976c5830ce | /scripts/timeline.R | f8623ec8b84aaa459332382d5b398ff1dc664c9b | [] | no_license | anuto/info-201-final | 82ec83a2de573ca5e71bec0ee5c5a19e2e0023c1 | 07eb452bcb69e737baaf30216593d2f6be87787d | refs/heads/master | 2020-12-30T15:54:58.601440 | 2017-10-06T18:18:18 | 2017-10-06T18:18:18 | 91,186,114 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,553 | r | timeline.R | library(plotly)
library(gdata)
library(dplyr)
Timeline <- function(oc, this.data) {
# Read data
data <- this.data
# Read still open case data
open <- data %>% filter(is.closed == "FALSE")
# Set the date into the form that is usable.
open.case <- as.Date(open$opened, format = "%m/%d/%y")
# Read closed case data
close <- data %>% filter(is.closed == "TRUE") %>% select(case_id, opened, closed)
# Set the open date int othe form that is usable
closed.case.open.date <- as.Date(close$opened, format = "%m/%d/%y")
# Set the close date int othe form that is usable
closed.case.close.date <- as.Date(close$closed, format = "%m/%d/%y")
# Calculate the days that take for each closed case.
duration <- closed.case.close.date - closed.case.open.date
# Create a variable that hides for the axis.
ax <- list(
title = "",
zeroline = FALSE,
showline = FALSE,
showticklabels = FALSE,
showgrid = FALSE
)
# Draw the timeline plot for open cases
if(oc == "Open") {
p <- plot_ly(x = ~open.case, y = 0, color = 'blue') %>% layout(yaxis = ax,
title = "Open Cases",
xaxis = list(title = "year"))
} else{
# Draw the timeline plot for closed cases
g <- plot_ly(x = ~closed.case.open.date, y = 0, text = paste(duration, "days"), color = 'orange') %>%
layout(yaxis = ax,
xaxis = list(title = "year"),
title = "Closed Cases")
}
} |
b71eb3b23405eec98e611d9915b3a0600e900cb7 | 05c6c5192018fa59f713e894dc3c3cf0d588036f | /man/gwsca.vioplot.Rd | cb80fe325d9a9532e84816b49013091cf63c1eab | [] | no_license | dyerlab/gwscaR | 029dd73185098375d6f136757cd4cf4855c8f605 | 226fef34caeceff472af8894ff5062a58ab77e5e | refs/heads/master | 2023-03-22T08:30:42.405029 | 2019-11-13T02:47:17 | 2019-11-13T02:47:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,022 | rd | gwsca.vioplot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gwscaR_plot.R
\name{gwsca.vioplot}
\alias{gwsca.vioplot}
\title{Create violin plots with}
\usage{
gwsca.vioplot(x, ..., range = 1.5, h = NULL, ylim = NULL,
names = NULL, horizontal = FALSE, col = "magenta",
border = "black", lty = 1, lwd = 1, rectCol = "black",
colMed = "white", pchMed = 19, at, add = FALSE, wex = 1,
drawRect = TRUE, plot.axes = TRUE, axis.box = FALSE,
plot.ann = TRUE, axis.bty = "l")
}
\arguments{
\item{x}{the data to plot}
\item{...}{legend parameters}
\item{range}{The range for plotting}
\item{h}{The height of the density estimator (in sm.density), if ommitted h will be set to an optimum}
\item{ylim}{The y-axis limits}
\item{names}{Names to plot (default is NULL)}
\item{horizontal}{Whether the plot should be horizontal (defaults to FALSE)}
\item{col}{Color for violin plots}
\item{border}{Border color}
\item{lty}{Line type}
\item{lwd}{line width}
\item{rectCol}{rectangle color}
\item{colMed}{median point color}
\item{pchMed}{median point size}
\item{at}{The location for things}
\item{add}{Whether the plot gets added to others (defaults to FALSE)}
\item{wex}{Size of something}
\item{drawRect}{Whether to add rectangles to violin plots (defaults to TRUE)}
\item{plot.axes}{A logical value that determines whether or not to add axes (defaults to TRUE)}
\item{axis.box}{A logical value that determines whether the axes should be a box (defaults to FALSE)}
\item{plot.ann}{A logical value that determines whether the annotations should be plotted (defaults to TRUE)}
\item{axis.bty}{The axis box type (default is L)}
}
\value{
Optionally returns the summary statistics of the data.
}
\description{
Create violin plots with
}
\note{
Modified from vioplot() in library(vioplot)
}
\examples{
library(sm)
mu<-2
si<-0.6
bimodal<-c(rnorm(1000,-mu,si),rnorm(1000,mu,si))
uniform<-runif(2000,-4,4)
normal<-rnorm(2000,0,3)
gwsca.vioplot(bimodal,uniform,normal,col=c("red","blue","green"))
}
|
8171723c8e554fe154a1b48fb61b75ce7a792b52 | 8f951800419bbd7e752db183d61891fe3ab239c0 | /normalized_alignment.R | be5d44d82fdba1b243fa5685ff19feaf7af9cdcc | [
"MIT"
] | permissive | innerfirexy/cognition2017 | 28e09234203ed0cb112ca5a7cdefe41ad1114770 | 98ab93e520d991991dbb24d527c742bb768b559a | refs/heads/master | 2021-01-20T16:58:43.329825 | 2017-09-26T18:04:19 | 2017-09-26T18:04:19 | 82,845,793 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 15,762 | r | normalized_alignment.R | # Analyze how the normalized LLA measure (from length effect) changes within topic episode and dialogue
# Yang Xu
# 2/28/2017
library(data.table)
library(tidyverse)
library(stringr)
library(lme4)
library(lmerTest)
library(ggplot2)
library(ggalt)
# LLA function
LLA = function(prime, target, normalizer='sum') {
# examine arguments
stopifnot(normalizer %in% c('sum', 'prod', 'sqrtprod'))
if (length(prime)==0 | length(target)==0) {
return(NaN)
}
repeatcount = 0
for (w in target) {
if (w %in% prime) {
repeatcount = repeatcount + 1
}
}
switch(normalizer,
sum = repeatcount / (length(prime) + length(target)),
prod = repeatcount / (length(prime) * length(target)),
sqrtprod = repeatcount / sqrt(length(prime) * length(target))
)
}
# the function that computes LLA (local linguistic alignment) between speaking turns
# This is a modified function from the one in alignment_withintopic.R
# It outputes the sum of prime and target length as well, in addition to LLA
compute_LLA_sumLen = function(data) {
d1 = copy(data)
setkey(d1, convId, turnId)
# join text within the same turn
d2 = d1[, {
# words = sapply(rawWord, function(text) { str_split(text, ' ')[[1]] })
.(turnText = str_c(rawWord, collapse = ' '), speaker = speaker[1])
}, by = .(convId, turnId)]
# previous info
prevConvId = shift(d2[, last(convId), by = .(convId, turnId)]$V1)
prevSpeaker = shift(d2[, last(speaker), by = .(convId, turnId)]$V1)
prevTurnText = shift(d2[, last(turnText), by = .(convId, turnId)]$V1)
# LLA
d.align = d2[, {
currWords = str_split(turnText, ' ')[[1]]
if (is.na(prevConvId[.GRP])) {
lla_sum = NaN
lla_prod = NaN
lla_sqrtprod = NaN
sumLen = NaN
} else if (convId != prevConvId[.GRP]) {
lla_sum = NaN
lla_prod = NaN
lla_sqrtprod = NaN
sumLen = NaN
} else if (speaker == prevSpeaker[.GRP]) {
lla_sum = NaN
lla_prod = NaN
lla_sqrtprod = NaN
sumLen = NaN
} else {
prime = str_split(prevTurnText[.GRP], ' ')[[1]]
target = str_split(turnText, ' ')[[1]]
lla_sum = LLA(prime, target, normalizer='sum')
lla_prod = LLA(prime, target, normalizer='prod')
lla_sqrtprod = LLA(prime, target, normalizer='sqrtprod')
sumLen = as.numeric(length(prime) + length(target))
}
.(lla_sum = lla_sum, lla_prod = lla_prod, lla_sqrtprod = lla_sqrtprod, sumLen = sumLen)
}, by = .(convId, turnId)]
d.align
}
###
# Compute the normalzied alignment for Switchboard
dt.swbd = fread('data/SWBD_text_db.csv')
setkey(dt.swbd, convId)
system.time(dt.swbd.align <- compute_LLA_sumLen(dt.swbd)) # elapsed 27.734 sec
# compute the mean lla for each sumLen level
setkey(dt.swbd.align, sumLen)
dt.swbd.align.mean = dt.swbd.align[, {
.(lla_sum_mean = mean(lla_sum[!is.nan(lla_sum)]),
lla_prod_mean = mean(lla_prod[!is.nan(lla_prod)]),
lla_sqrtprod_mean = mean(lla_sqrtprod[!is.nan(lla_sqrtprod)]))
}, by = sumLen]
# join `lla_*_mean` columns back to dt.swbd.align
dt.swbd.align = dt.swbd.align[dt.swbd.align.mean, nomatch = 0]
# compute the normalized lla
dt.swbd.align[, lla_sum_norm := lla_sum / lla_sum_mean][, lla_prod_norm := lla_prod / lla_prod_mean][, lla_sqrtprod_norm := lla_sqrtprod / lla_sqrtprod_mean]
##
# Use models to check how lla_*_norm changes within dialogue and topic episode
m = lmer(lla_sum_norm ~ turnId + (1|convId), dt.swbd.align)
summary(m)
# turnId 8.265e-04 1.555e-04 4.929e+04 5.316 1.07e-07 ***
# Yes! Alignment actually increases along dialogue
m = lmer(lla_prod_norm ~ turnId + (1|convId), dt.swbd.align)
summary(m)
# turnId 8.048e-04 1.517e-04 4.299e+04 5.305 1.13e-07 ***
m = lmer(lla_sqrtprod_norm ~ turnId + (1|convId), dt.swbd.align)
summary(m)
# turnId 8.183e-04 1.507e-04 4.814e+04 5.429 5.68e-08 ***
##
# check how lla_* (w/o normalizing) change within dialogue
m = lmer(lla_sum ~ turnId + (1|convId), dt.swbd.align)
summary(m)
# turnId 2.547e-06 4.162e-06 5.647e+04 0.612 0.541 n.s.
m = lmer(lla_prod ~ turnId + (1|convId), dt.swbd.align)
summary(m)
# turnId 3.002e-05 3.561e-06 1.599e+04 8.43 <2e-16 ***
m = lmer(lla_sqrtprod ~ turnId + (1|convId), dt.swbd.align)
summary(m)
# turnId -4.433e-06 9.433e-06 6.011e+04 -0.47 0.638 n.s.
# Read topic information data and join with alignment data
dt.swbd.topic = fread('data/SWBD_entropy_db.csv')
setkey(dt.swbd.topic, convId, turnId)
setkey(dt.swbd.align, convId, turnId)
dt.swbd.comb = dt.swbd.topic[dt.swbd.align, nomatch=0]
# shrink inTopicId column by computing the mean
dt.swbd.comb = dt.swbd.comb[, {
.(topicId = topicId[1], inTopicId = mean(inTopicId),
lla_sum = lla_sum[1], lla_sum_norm = lla_sum_norm[1],
lla_prod = lla_prod[1], lla_prod_norm = lla_prod_norm[1],
lla_sqrtprod = lla_sqrtprod[1], lla_sqrtprod_norm = lla_sqrtprod_norm[1],
ent = mean(ent))
}, by = .(convId, turnId)]
# add uniqueTopicId
dt.swbd.comb[, uniqueTopicId := .GRP, by = .(convId, topicId)]
# models
# lla_*_norm ~ inTopicId
m = lmer(lla_sum_norm ~ inTopicId + (1|uniqueTopicId), dt.swbd.comb)
summary(m)
# inTopicId 9.899e-03 1.563e-03 4.167e+04 6.333 2.43e-10 ***
# lla_sum_norm increases within topic episode!
m = lmer(lla_prod_norm ~ inTopicId + (1|uniqueTopicId), dt.swbd.comb)
summary(m)
# inTopicId 7.342e-03 1.532e-03 3.891e+04 4.793 1.65e-06 ***
# lla_prod_norm increases
m = lmer(lla_sqrtprod_norm ~ inTopicId + (1|uniqueTopicId), dt.swbd.comb)
summary(m)
# inTopicId 8.815e-03 1.516e-03 4.134e+04 5.816 6.07e-09 ***
# lla_sqrtprod_norm increases
# lla_* ~ inTopicId
m = lmer(lla_prod ~ inTopicId + (1|uniqueTopicId), dt.swbd.comb)
summary(m)
# inTopicId 2.182e-04 3.360e-05 2.761e+04 6.493 8.54e-11 ***
m = lmer(lla_sqrtprod ~ inTopicId + (1|uniqueTopicId), dt.swbd.comb)
summary(m)
# inTopicId -3.575e-04 9.770e-05 4.087e+04 -3.66 0.000253 ***
# Haha, opposite direction when we use square root as normalizer
m = lmer(llaNorm ~ ent + (1|convId), dt.swbd.comb)
summary(m)
# ent 4.326e-02 1.827e-03 7.636e+04 23.67 <2e-16 ***
# llaNorm is also sensitive to entropy
# add shifted entropy column
shiftedEnt = shift(dt.swbd.comb$ent)
dt.swbd.comb$shiftedEnt = shiftedEnt
dt.swbd.tmp = dt.swbd.comb[, .SD[2:.N,], by=convId]
m = lmer(llaNorm ~ shiftedEnt + (1|convId), dt.swbd.tmp)
summary(m)
# shiftedEnt 4.935e-02 1.828e-03 7.636e+04 27.00 <2e-16 ***
# llaNorm is correlated with the entropy of previous utterance
##
# How does llaNorm change across topic boundaries
dt.swbd.bound = dt.swbd.comb[, {
# find the positions where topic shift happens
beforeInd = which(diff(topicId)==1)
atInd = which(c(0, diff(topicId))==1)
afterInd1 = atInd + 1
afterInd2 = atInd + 2
.(lla_prod_norm_before = lla_prod_norm[beforeInd],
lla_prod_norm_at = lla_prod_norm[atInd],
lla_prod_norm_after1 = lla_prod_norm[afterInd1],
lla_prod_norm_after2 = lla_prod_norm[afterInd2])
}, by = .(convId)]
# melt
dt.swbd.bound.melt = melt(dt.swbd.bound, id=1, measures=2:4, variable.name='position', value.name='llaNorm')
# plot
p = ggplot(dt.swbd.bound.melt, aes(x=position, y=llaNorm)) +
stat_summary(fun.data = mean_cl_boot, geom='errorbar', width=.2) +
stat_summary(fun.y = mean, geom='point', size=3) +
stat_summary(fun.y = mean, geom='line', lty=2, group=1) +
annotate('text', x=2, y=1.01, label='Topic shift', color='#B22222', size=5) +
labs(x = 'Relative utterance position from topic boundary', y = 'Normalized LLA') +
scale_x_discrete(labels = c('-1', '0', '1', '2')) +
theme_light() + theme(axis.text.x = element_text(size=12, color='#B22222', face='bold'))
pdf('figs/llaNorm_acrossBound_SWBD.pdf', 5, 5)
plot(p)
dev.off()
#
# It shows that llaNorm decreases across topic boundary
##
# Plot llaNorm against inTopicId, with facet_wrap by topicId
mean(dt.swbd.comb[, max(inTopicId), by=uniqueTopicId]$V1) # 9
# create the `topicId_text` for facet_wrap
dt.swbd.comb[, topicId_text := paste0('Episode ', topicId)]
p = ggplot(dt.swbd.comb[topicId<=6 & inTopicId>=2 & inTopicId<=9,], aes(x=floor(inTopicId-1), y=lla_prod_norm)) +
stat_summary(fun.data = mean_cl_boot, geom = 'ribbon', alpha = .5) +
stat_summary(fun.y = mean, geom = 'line') +
facet_wrap(~topicId_text, nrow = 1) +
xlab('Utterance position within topic episode') + ylab('Normalized LLA')
pdf('figs/llaNorm_vs_inTopicId_SWBD.pdf', 9, 2.5)
plot(p)
dev.off()
######
# The following analysis applies to BNC
dt.bnc = fread('data/BNC_text_db100.csv')
setkey(dt.bnc, convId)
system.time(dt.bnc.align <- compute_LLA_sumLen(dt.bnc)) # elapsed 11 sec
# compute the mean lla for each sumLen level
setkey(dt.bnc.align, sumLen)
dt.bnc.align.mean = dt.bnc.align[, {
.(lla_sum_mean = mean(lla_sum[!is.nan(lla_sum)]),
lla_prod_mean = mean(lla_prod[!is.nan(lla_prod)]),
lla_sqrtprod_mean = mean(lla_sqrtprod[!is.nan(lla_sqrtprod)]))
}, by = sumLen]
# join `llaMean` column back to dt.bnc.align
dt.bnc.align = dt.bnc.align[dt.bnc.align.mean, nomatch = 0]
# compute the normalized lla
dt.bnc.align[, lla_sum_norm := lla_sum / lla_sum_mean][, lla_prod_norm := lla_prod / lla_prod_mean][, lla_sqrtprod_norm := lla_sqrtprod / lla_sqrtprod_mean]
# Use models to check how llaNorm changes within dialogue and topic episode
# lla_*_norm ~ turnId
m = lmer(lla_sum_norm ~ turnId + (1|convId), dt.bnc.align)
summary(m)
# turnId -5.764e-04 5.217e-04 1.375e+04 -1.105 0.269 n.s., decrease
m = lmer(lla_prod_norm ~ turnId + (1|convId), dt.bnc.align)
summary(m)
# turnId -7.061e-04 5.168e-04 1.300e+04 -1.366 0.172 n.s., decrease
m = lmer(lla_sqrtprod_norm ~ turnId + (1|convId), dt.bnc.align)
summary(m)
# turnId -6.338e-04 5.121e-04 1.358e+04 -1.238 0.216 n.s., decrease
# lla_* ~ turnId
m = lmer(lla_sum ~ turnId + (1|convId), dt.bnc.align)
summary(m)
# turnId -3.221e-05 2.212e-05 1.951e+04 -1.456 0.145 n.s., decrease
m = lmer(lla_prod ~ turnId + (1|convId), dt.bnc.align)
summary(m)
# turnId 1.199e-06 1.974e-05 1.344e+04 0.061 0.952 n.s.
m = lmer(lla_sqrtprod ~ turnId + (1|convId), dt.bnc.align)
summary(m)
# turnId -6.926e-05 5.053e-05 2.423e+04 -1.371 0.17 n.s., decrease
# Read topic information data and join with alignment data
dt.bnc.topic = fread('data/BNC_entropy_db.csv')
setkey(dt.bnc.topic, convId, turnId)
setkey(dt.bnc.align, convId, turnId)
dt.bnc.comb = dt.bnc.topic[dt.bnc.align, nomatch=0]
# shrink inTopicId column by computing the mean
dt.bnc.comb = dt.bnc.comb[, {
.(topicId = topicId[1], inTopicId = mean(inTopicId),
lla_sum = lla_sum[1], lla_sum_norm = lla_sum_norm[1],
lla_prod = lla_prod[1], lla_prod_norm = lla_prod_norm[1],
lla_sqrtprod = lla_sqrtprod[1], lla_sqrtprod_norm = lla_sqrtprod_norm[1],
ent = mean(ent))
}, by = .(convId, turnId)]
# add uniqueTopicId
dt.bnc.comb[, uniqueTopicId := .GRP, by = .(convId, topicId)]
# models
# lla_*_norm ~ inTopicId
m = lmer(lla_sum_norm ~ inTopicId + (1|uniqueTopicId), dt.bnc.comb)
summary(m)
# inTopicId 7.106e-03 1.754e-03 1.976e+04 4.051 5.12e-05 ***
# Yes, llaNorm increases within topic episode
m = lmer(lla_prod_norm ~ inTopicId + (1|uniqueTopicId), dt.bnc.comb)
summary(m)
# inTopicId 7.569e-03 1.735e-03 1.907e+04 4.362 1.3e-05 ***
m = lmer(lla_sqrtprod_norm ~ inTopicId + (1|uniqueTopicId), dt.bnc.comb)
summary(m)
# inTopicId 7.305e-03 1.719e-03 1.962e+04 4.249 2.16e-05 ***
# lla_* ~ inTopicId
m = lmer(lla_sum ~ inTopicId + (1|uniqueTopicId), dt.bnc.comb)
summary(m)
# inTopicId -1.200e-04 7.394e-05 2.035e+04 -1.623 0.105 n.s.
m = lmer(lla_prod~ inTopicId + (1|uniqueTopicId), dt.bnc.comb)
summary(m)
# inTopicId 4.592e-04 6.502e-05 1.813e+04 7.062 1.71e-12 ***
m = lmer(lla_sqrtprod~ inTopicId + (1|uniqueTopicId), dt.bnc.comb)
summary(m)
# inTopicId -6.773e-04 1.688e-04 2.093e+04 -4.013 6.01e-05 *** decrease
# again, inconsistent with lla_prod
m = lmer(llaNorm ~ ent + (1|convId), dt.bnc.comb)
summary(m)
# ent 2.033e-02 2.156e-03 3.144e+04 9.428 <2e-16 ***
# add shifted entropy column
shiftedEnt = shift(dt.bnc.comb$ent)
dt.bnc.comb$shiftedEnt = shiftedEnt
dt.bnc.tmp = dt.bnc.comb[, .SD[2:.N,], by=convId]
m = lmer(llaNorm ~ shiftedEnt + (1|convId), dt.bnc.tmp)
summary(m)
# shiftedEnt 2.442e-02 2.174e-03 3.145e+04 11.23 <2e-16 ***
# llaNorm correlates with entropy of previous utterance
##
# How does llaNorm change across topic boundaries
dt.bnc.bound = dt.bnc.comb[, {
# find the positions where topic shift happens
beforeInd = which(diff(topicId)==1)
atInd = which(c(0, diff(topicId))==1)
afterInd1 = atInd + 1
afterInd2 = atInd + 2
.(lla_prod_norm_before = lla_prod_norm[beforeInd],
lla_prod_norm_at = lla_prod_norm[atInd],
lla_prod_norm_after1 = lla_prod_norm[afterInd1],
lla_prod_norm_after2 = lla_prod_norm[afterInd2])
}, by = .(convId)]
# melt
dt.bnc.bound.melt = melt(dt.bnc.bound, id=1, measures=2:4, variable.name='position', value.name='llaNorm')
# dt.bnc.bound.melt$position = as.numeric(dt.bnc.bound.melt$position)
# plot
p = ggplot(dt.bnc.bound.melt, aes(x=position, y=llaNorm)) +
stat_summary(fun.data = mean_cl_boot, geom='errorbar', width=.2) +
stat_summary(fun.y = mean, geom='point', size=3) +
stat_summary(fun.y = mean, geom='line', lty=2, group=1) +
labs(x = 'Relative utterance position from topic boundary', y = 'Normalized LLA') +
annotate('text', x=2, y=.98, label='Topic shift', color='#B22222', size=5) +
scale_x_discrete(labels = c('-1', '0', '1', '2')) +
theme_light() + theme(axis.text.x = element_text(size=12, color='#B22222', face='bold'))
pdf('figs/llaNorm_acrossBound_BNC.pdf', 5, 5)
plot(p)
dev.off()
##
# Plot llaNorm against inTopicId, with facet_wrap by topicId
mean(dt.bnc.comb[, max(inTopicId), by=uniqueTopicId]$V1) # 9
# create column for facet_wrap
dt.bnc.comb[, topicId_text := paste0('Episode ', topicId)]
p = ggplot(dt.bnc.comb[topicId<=6 & inTopicId>=2 & inTopicId<=9,], aes(x=floor(inTopicId-1), y=lla_prod_norm)) +
stat_summary(fun.data = mean_cl_boot, geom = 'ribbon', alpha = .5) +
stat_summary(fun.y = mean, geom = 'line') +
facet_wrap(~topicId, nrow = 1) +
xlab('Utterance position within topic episode') + ylab('Normalized LLA')
pdf('figs/llaNorm_vs_inTopicId_BNC.pdf', 9, 2.5)
plot(p)
dev.off()
##
# Plot lla_prod_norm ~ inTopicId for SWBD and BNC together
dt.swbd.tmp = dt.swbd.comb[, .(topicId, inTopicId, lla_prod_norm, lla_prod, topicId_text)]
dt.swbd.tmp[, Corpus := 'SWBD']
dt.bnc.tmp = dt.bnc.comb[, .(topicId, inTopicId, lla_prod_norm, lla_prod, topicId_text)]
dt.bnc.tmp[, Corpus := 'BNC']
dt.comb = rbindlist(list(dt.swbd.tmp, dt.bnc.tmp))
p = ggplot(dt.comb[topicId<=6 & inTopicId>=2 & inTopicId<=9,], aes(x=floor(inTopicId-1), y=lla_prod_norm)) +
stat_summary(fun.data = mean_cl_boot, geom = 'ribbon', alpha = .5, aes(fill=Corpus)) +
stat_summary(fun.y = mean, geom = 'line', aes(lty=Corpus)) +
facet_wrap(~topicId_text, nrow = 1) +
xlab('Utterance position within topic episode') + ylab('Normalized LLA') +
theme_light() + theme(legend.position=c(.9, .7))
pdf('figs/nLLA_vs_inTopicId_together.pdf', 9, 2.5)
plot(p)
dev.off()
|
3d78d3feb64681b0f51c8c71075eb3d64e4cd3b7 | 52a2ee72de740ee910724180f9f84afff6db704c | /garch11.R | 35862b9e2227c111834faff6063146b5579a3409 | [] | no_license | pedropolvora/Research | 27c36bba0ca9a9a27d8ab5ab38302bc1fd4f4d61 | 6469e52b6fed45e993c0d700847dd47d99c9da44 | refs/heads/master | 2021-01-20T00:35:24.962387 | 2017-04-21T00:49:35 | 2017-04-21T00:49:35 | 89,155,729 | 1 | 0 | null | 2017-04-23T16:23:49 | 2017-04-23T16:23:49 | null | UTF-8 | R | false | false | 2,240 | r | garch11.R | library(tseries)
library(MASS)
library(stats)
library(fGarch)
date_price <- read.csv("dataa.csv", sep=",")
price <- date_price[1:2373,2]
plot(price,type='l')
log_r <- diff(log(price))
plot(log_r, type='l')
acf(log_r)
hist(log_r, breaks = 20)
osx<-seq(min(log_r),max(log_r), 0.01)
lines(osx, dnorm(osx, mean(log_r),sd(log_r)),col='red')
###test 1 - garch(1,1)
garch11 <- garch(log_r)
summary(garch11) # ARCH effects are filtered. However,
plot(garch11, type='l') # conditional normality seems to be violated
#### prediction
g = garchFit(~garch(1,1), log_r, cond.dist= "norm", include.mean=FALSE, trace=FALSE)
omega = g@fit$matcoef[1,1]
alpha = g@fit$matcoef[2,1]
beta = g@fit$matcoef[3,1]
alpha+beta ### tesne>1 !!!!
sigma = omega + alpha * log_r[2372]^2 + beta*g@h.t[2372] #compute sigma^2 for t+1
print(sigma)
plot(g)
##### testin prediction with 2000 data
g2 = garchFit(~garch(1,1), log_r[1:2000], cond.dist= "norm", include.mean=FALSE, trace=FALSE)
omega2 = g2@fit$matcoef[1,1]
alpha2 = g2@fit$matcoef[2,1]
beta2 = g2@fit$matcoef[3,1]
alpha2+beta2 ###tesne >1 !!!!
sigma2 = omega2 + alpha2 * log_r[2000]^2 + beta2*g@h.t[2000] #compute sigma^2 for t+1
print(sigma2)
plot(g2)
sigma2 = c()
### 2000:2372
for(i in 1:372){
sigma2[i] = omega2 + alpha2 * log_r[1999+i]^2 + beta2*g@h.t[1999+i]
}
plot(sqrt(sigma2), type='l')
plot(g@h.t, type='l')
lines(seq(2001,2372,1), sigma2, col='red')
### same thing....
###
###2016-01/2017
plot(g@h.t[1994:2372], type='l')
###07/2010-12/2011
plot(g@h.t[1:532], type='l')
lines(g@h.t[1994:2372], col='red')
### variance with the Least Squares Fit
### 01/01/2011
variance <- ts(g@h.t[168:length(g@h.t)], start = c(2011), frequency = 365)
#plot(g@h.t, type='l')
plot(variance)
#abline(lsfit(201:(2011+2205), variance), col='red')
plot(g@h.t, type='l')
abline(lsfit(1:length(g@h.t), g@h.t), col='red')
a1 <- lsfit(1:length(g@h.t), g@h.t)
ls.print(a1) ## Intercept 0.0093,
## 2blocks of data
par(mfrow=c(2,1))
#1
plot(g@h.t[1:1150], type='l')
abline(lsfit((1:1150), g@h.t[1:1150]), col='red')
#2
plot(g@h.t[1151:2372], type='l')
abline(lsfit((1151:2372), g@h.t[1151:2372]), col='red')
|
b3d18d4d8a56aa7da2ee568c24d2fe2ede72e886 | 858a0c6bee8b6785d8e043028a74d82a76b3333a | /#3.5_Script_BOGA.r | 29b077f05a6e9644977bb7e26079799775037caf | [] | no_license | BellaBoga/Quantitative_Phonetics_with_R | e91cfd0e25b93b8bda48c4d9248f218ffa58cd2f | 12f2004d6bdb6815faa2ff538c8b78e5fb5ceb57 | refs/heads/main | 2023-06-05T16:09:43.769448 | 2021-06-17T21:43:08 | 2021-06-17T21:43:08 | 377,966,790 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,601 | r | #3.5_Script_BOGA.r | #Create a data frame from the second file in the corpus
#folder: rec_001_AS_id_002_CUT_20MIN_2.TextGrid.
#with these columns : word, starts, ends, and word duration.
#get rid of all the pauses then calculate the mean of all
#words' duration then get words with the least and the most
#duration.
#get the mean of worddurationDIR = '/home/bella/Dropbox/Master/1st Semester/Quantitative Phonetics with R/Material Corpus etc/KEC/'
setwd(DIR)
filenames = list.files(pattern = 'Textgrid')
head(filenames)
data = read.table('rec_001_AS_id_002_CUT_20MIN_2.TextGrid', fill = NA)
whereInterval = data$V1=='intervals'
whereIntervals.numeric=which(whereInterval)
where.start=whereIntervals.numeric+1
where.end = whereIntervals.numeric+2
where.word = whereIntervals.numeric+3
w.start=as.numeric(as.character(data$V3[where.start]))
w.end=as.numeric(as.character(data$V3[where.end]))
w.word=as.character(data$V3[where.word])
DF.textgrid=data.frame(word=w.word,begins=w.start,ends=w.end,wordduration=w.end-w.start,stringsAsFactors = FALSE)
#get rid of all the Pauses
DF.words=DF.textgrid[!DF.textgrid$word=='<P>',]
#get rid of all the empty strings
DF.words1=DF.words[!DF.words$wordduration==" ",]
meanOfWords=mean(DF.words$wordduration)
meanOfWords
sort(DF.words$wordduration)
#get word with longest duration
max=DF.words[which(DF.words$wordduration==max(DF.words$wordduration)),1]
max
maxLengthOfWord = max(DF.words1$wordduration)
maxLengthOfWord
DF.words1[max,]
#get word with shortest duration
min=min(DF.words1$wordduration)
min
minLengthOf=min(DF.words1$wordduration)
minLengthOf
|
ec8001ecee27e062c5a97ea58d8b99c0290cd41f | c66690df66cd458ca1bd9352bd0c4e04ff70089a | /Code/loadDataOberijeEtAl.R | 2c8631456ee850015d6e793eb8e91a96da3343f3 | [
"Apache-2.0"
] | permissive | biobibibi/classifier_selection_code | 6ef8a706c808de1a8b911df61e7fbd10e571551b | 040a2e1c8504e5f0e3d2877fdbaa326cc9c0708d | refs/heads/master | 2021-10-01T00:52:17.720895 | 2018-11-26T12:34:22 | 2018-11-26T12:34:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,600 | r | loadDataOberijeEtAl.R | loadDataOberijeEtAl = function(pathFromDataFolderToCsv,pathToDataFolder)
{
library(plyr) # used for revalue
pathToFile = file.path(pathToDataFolder,pathFromDataFolderToCsv) # construct full file to csv
data = read.csv(pathToFile,sep = ";", dec = ",") # read data from .csv
# derive outcome (one-year survival)
yearCutOff = 2
data[,'survival_yearCutOff'] = NA # add column for the survival at year-cutoff
nonSurvivors = data$survyear < yearCutOff & data$deadstat
survivors = data$survyear >= yearCutOff
data$survival_yearCutOff[nonSurvivors] = 0 # non-survivors at yearCutOff
data$survival_yearCutOff[survivors] = 1 # survivors at yearCutOff
# remove patients/rows without outcome
data = data[complete.cases(data[,'survival_yearCutOff']),]
# columns to drop, features
drop = c('study_id','survmonth','survyear','deadstat')
data = data[ , !(names(data) %in% drop)]
# convert to factors
factorCols = c('gender', 'who3g', 'dumsmok2', 't_ct_loc', 'hist4g', 'countpet_all6g', 'countpet_mediast6g', 'tstage', 'nstage', 'stage', 'timing', 'group', 'yearrt', 'survival_yearCutOff') # list all columns that should be factors
data[factorCols] = lapply(data[factorCols], factor) # convert columns into factor variables
data_class = data$survival_yearCutOff # place outcome in separate variable
data$survival_yearCutOff = NULL #remove outcome from data
data_class = revalue(data_class, c('0' = 'nonEvent','1' = 'event')) # relabel outcome as event and nonEvent: 0:dead before 2yr --> nonEvent, 1:alive after 2yr --> event
return(list(data,data_class))
} |
733f2b3141a6143231379d2a4eed2702fea9f654 | 1c6c7e838ad70677c317e76152f4b9b425d1f91e | /Prediction using superised ML.r | 1698b2c056684d4e6219fe4448351c0f796db027 | [] | no_license | borle2909/Prediction-using-Supervised-ML--Student-data | b345ef48e04109e613762e1511555bff80544cd2 | 5997047a219e12f15730b0b6ae998572e1ba3ff2 | refs/heads/main | 2023-08-06T06:23:09.807470 | 2021-09-17T13:58:53 | 2021-09-17T13:58:53 | 405,916,235 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,269 | r | Prediction using superised ML.r | #Step-1 Reading the Given Data Set.**
# Importing The CSV Data
student_data <- read.csv("https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv", header = TRUE)
head(student_data)
summary(student_data)
#Step-2 Plotting the Given Data set.**
# Plotting the given data
plot(x = student_data$Hours, y = student_data$Scores, xlab = "Hours", ylab = "Scores", main = "Score of student by study Hours", col = "Red")
#Step-3 Running Linear Regression on the data as there are only Two variables.**
# Linear regression
student_data_regression <- lm(formula = Scores~Hours , data = student_data)
plot(x = student_data$Hours, y = student_data$Scores, xlab = "Hours", ylab = "Scores", main = "Score of student by study Hours", col = "red")
abline(student_data_regression, col= "Blue")
summary(student_data_regression)
#Step-4 Splitting Data into Test and Training Data**
# Splitting Data Into Test and Training data
library(caTools) # package "caTools" is used for splitting the data
split = sample.split(Y = student_data$Scores, SplitRatio = 0.75)
training_set = subset(student_data, split == TRUE)
test_set = subset(student_data, split==FALSE)
training_set # checking the training data
test_set# checking the test data
#Step-5 Training the dataset**
# training the dataset
result <- lm(formula = Scores~Hours, data = training_set)
summary(result)
result$coefficients
#Step-6 Using the Test Data to predict the outcome**
pred <- predict(result, test_set)
head(pred) #printing the predicted result
head(test_set) # printing the head of test set to compare with predicted values
#Step-7 Calculating the Mean Absolute Error**
#taking the head values of original test set
tset <- head(training_set)
tset
# taking the head values of predicted set
pset <- head(test_set)
pset
#Calculating Mean Absolute Error
library(ie2misc)
mae(pset$Hours, tset$Hours)
#Step-8 What will be predicted score if a student studies for 9.25 hrs/ day?**
predicted_result <- predict(result, data.frame(Hours = 9.25))
predicted_result
|
3e2b4bf935a66b296b38d9106f27b0817b30ae3c | 38642304f0afbc664388685ff195727872207335 | /iUI/util_corr_viewer.R | ccfb374490884730de2165d62c5c55ab60f02e5b | [] | no_license | DrRoad/ShinyMLHome | 929681a900bd4b05b35a337c36563a72e2acc2cd | 590c44133b157b96f06193c3ae911b957b4548cb | refs/heads/master | 2020-07-22T11:24:03.278521 | 2018-09-23T12:02:22 | 2018-09-23T12:02:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,325 | r | util_corr_viewer.R | tp_util_corr_viewer <- tabPanel(
"Correlation viewer",
fluidRow(
column(
width = tp_wid_hlf,
tags$div(class = "title_wrapper", tags$h6(class = "title_content_sm", "Predictors")),
selectInput("ucv_upload", NULL, choices = ListTblsFromSSviaCS(proj_db_name), selected = db_predictors_src,
selectize = TRUE)
),
column(
width = 12 - tp_wid_hlf,
tags$div(class = "title_wrapper", tags$h6(class = "title_content_sm", "fields")),
uiOutput("ucv_features")
)
),
fluidRow(
column(
width = tp_wid_hlf,
tags$div(class = "title_wrapper", tags$h6(class = "title_content_sm", "Pearson Correlation Matrix")),
DT::dataTableOutput("corr_mtx")
),
column(
width = 12 - tp_wid_hlf,
tags$div(class = "title_wrapper", tags$h6(class = "title_content_sm", "Pearson Correlation Matrix Plot")),
plotOutput("corr_plot")
)
),
fluidRow(
column(
width = tp_wid_hlf,
tags$div(class = "title_wrapper", tags$h6(class = "title_content_sm", "GK Correlation Matrix")),
DT::dataTableOutput("gk_corr_mtx")
),
column(
width = 12 - tp_wid_hlf,
tags$div(class = "title_wrapper", tags$h6(class = "title_content_sm", "GK Correlation Matrix Plot")),
plotOutput("gk_corr_plot")
)
)
) |
b89e7a465e2729ea98c4a9ab90e18e48d3143242 | e103b12751676afcc73fb321a41725a74012124c | /R/FeatureImp.R | c35183b7a2a90eb7c2eed40d5b7ecb5dbf7fda9d | [
"MIT"
] | permissive | ShunanGao/iml | ed5ae788182e4dab6cd4ebe25a988cd869b769c3 | a892b520ebc9084ff0a4d8168fbb0d3ef483886e | refs/heads/master | 2020-04-07T01:32:28.606515 | 2018-10-30T08:28:01 | 2018-10-30T08:28:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,963 | r | FeatureImp.R | #' Feature importance
#'
#' @description
#' \code{FeatureImp} computes feature importances for prediction models.
#' The importance is measured as the factor by which the model's prediction error increases when the feature is shuffled.
#'
#' @format \code{\link{R6Class}} object.
#' @name FeatureImp
#'
#' @section Usage:
#' \preformatted{
#' imp = FeatureImp$new(predictor, loss, method = "shuffle", n.repetitions = 3, run = TRUE)
#'
#' plot(imp)
#' imp$results
#' print(imp)
#' }
#'
#' @section Arguments:
#'
#' For FeatureImp$new():
#' \describe{
#' \item{predictor: }{(Predictor)\cr
#' The object (created with Predictor$new()) holding the machine learning model and the data.}
#' \item{loss: }{(`character(1)` | function)\cr The loss function. Either the name of a loss (e.g. "ce" for classification or "mse") or a loss function. See Details for allowed losses.}
#' \item{method: }{(`character(1)`\cr Either "shuffle" or "cartesian". See Details.}
#' \item{n.repetitions: }{`numeric(1)`\cr How often should the shuffling of the feature be repeated? Ignored if method is set to "cartesian".
#' The higher the number of repetitions the more stable the results will become.}
#' \item{parallel: }{`logical(1)`\cr Should the method be executed in parallel? If TRUE, requires a cluster to be registered, see ?foreach::foreach.}
#' \item{run: }{(`logical(1)`)\cr Should the Interpretation method be run?}
#' }
#'
#' @section Details:
#' Read the Interpretable Machine Learning book to learn in detail about feature importance:
#' \url{https://christophm.github.io/interpretable-ml-book/feature-importance.html}
#'
#' Two permutation schemes are implemented:
#' \itemize{
#' \item shuffle: A simple shuffling of the feature values, yielding n perturbed instances per feature (fast)
#' \item cartesian: Matching every instance with the feature value of all other instances, yielding n x (n-1) perturbed instances per feature (very slow)
#' }
#'
#' The loss function can be either specified via a string, or by handing a function to \code{FeatureImp()}.
#' If you want to use your own loss function it should have this signature: function(actual, predicted).
#' Using the string is a shortcut to using loss functions from the \code{Metrics} package.
#' Only use functions that return a single performance value, not a vector.
#' Allowed losses are: "ce", "f1", "logLoss", "mae", "mse", "rmse", "mape", "mdae",
#' "msle", "percent_bias", "rae", "rmse", "rmsle", "rse", "rrse", "smape"
#' See \code{library(help = "Metrics")} to get a list of functions.
#'
#' @section Fields:
#' \describe{
#' \item{original.error: }{(`numeric(1)`)\cr The loss of the model before perturbing features.}
#' \item{predictor: }{(Predictor)\cr The prediction model that was analysed.}
#' \item{results: }{(data.frame)\cr data.frame with the results of the feature importance computation.}
#' }
#'
#' @section Methods:
#' \describe{
#' \item{loss(actual,predicted)}{The loss function. Can also be applied to data: \code{object$loss(actual, predicted)}}
#' \item{plot()}{method to plot the feature importances. See \link{plot.FeatureImp}}
#' \item{\code{run()}}{[internal] method to run the interpretability method. Use \code{obj$run(force = TRUE)} to force a rerun.}
#' \item{\code{clone()}}{[internal] method to clone the R6 object.}
#' \item{\code{initialize()}}{[internal] method to initialize the R6 object.}
#' }
#'
#' @references
#' Fisher, A., Rudin, C., and Dominici, F. (2018). Model Class Reliance: Variable Importance Measures for any Machine Learning Model Class, from the "Rashomon" Perspective. Retrieved from http://arxiv.org/abs/1801.01489
#'
#' @import Metrics
#' @importFrom foreach %dopar% foreach %do%
#' @importFrom data.table copy rbindlist
#' @examples
#' if (require("rpart")) {
#' # We train a tree on the Boston dataset:
#' data("Boston", package = "MASS")
#' tree = rpart(medv ~ ., data = Boston)
#' y = Boston$medv
#' X = Boston[-which(names(Boston) == "medv")]
#' mod = Predictor$new(tree, data = X, y = y)
#'
#'
#' # Compute feature importances as the performance drop in mean absolute error
#' imp = FeatureImp$new(mod, loss = "mae")
#'
#' # Plot the results directly
#' plot(imp)
#'
#'
#' # Since the result is a ggplot object, you can extend it:
#' if (require("ggplot2")) {
#' plot(imp) + theme_bw()
#' # If you want to do your own thing, just extract the data:
#' imp.dat = imp$results
#' head(imp.dat)
#' ggplot(imp.dat, aes(x = feature, y = importance)) + geom_point() +
#' theme_bw()
#' }
#'
#' # FeatureImp also works with multiclass classification.
#' # In this case, the importance measurement regards all classes
#' tree = rpart(Species ~ ., data= iris)
#' X = iris[-which(names(iris) == "Species")]
#' y = iris$Species
#' mod = Predictor$new(tree, data = X, y = y, type = "prob")
#'
#' # For some models we have to specify additional arguments for the predict function
#' imp = FeatureImp$new(mod, loss = "ce")
#' plot(imp)
#'
#' # For multiclass classification models, you can choose to only compute performance for one class.
#' # Make sure to adapt y
#' mod = Predictor$new(tree, data = X, y = y == "virginica",
#' type = "prob", class = "virginica")
#' imp = FeatureImp$new(mod, loss = "ce")
#' plot(imp)
#' }
NULL
#' @export
FeatureImp = R6::R6Class("FeatureImp",
inherit = InterpretationMethod,
public = list(
loss = NULL,
original.error = NULL,
n.repetitions = NULL,
initialize = function(predictor, loss, method = "shuffle", n.repetitions = 3, run = TRUE, parallel = FALSE) {
assert_choice(method, c("shuffle", "cartesian"))
assert_number(n.repetitions)
assert_logical(parallel)
private$parallel = parallel
if (n.repetitions > predictor$data$n.rows) {
message('Number of repetitions larger than number of unique permutations per row.
Switching to method = "cartesian"')
method = "cartesian"
}
if (!inherits(loss, "function")) {
## Only allow metrics from Metrics package
allowedLosses = c("ce", "f1", "logLoss", "mae", "mse", "rmse", "mape", "mdae",
"msle", "percent_bias", "rae", "rmse", "rmsle", "rse", "rrse", "smape")
checkmate::assert_choice(loss, allowedLosses)
private$loss.string = loss
loss = getFromNamespace(loss, "Metrics")
} else {
private$loss.string = head(loss)
}
if (is.null(predictor$data$y)) {
stop("Please call Predictor$new() with the y target vector.")
}
super$initialize(predictor = predictor)
self$loss = private$set.loss(loss)
private$method = method
private$getData = private$sampler$get.xy
self$n.repetitions = n.repetitions
actual = private$sampler$y[[1]]
predicted = private$run.prediction(private$sampler$X)[[1]]
# Assuring that levels are the same
self$original.error = loss(actual, predicted)
if(run) private$run(self$predictor$batch.size)
}
),
private = list(
method = NULL,
# for printing
loss.string = NULL,
q = function(pred) probs.to.labels(pred),
combine.aggregations = function(agg, dat){
if(is.null(agg)) {
return(dat)
} else {
}
},
run = function(n){
private$dataSample = private$getData()
result = NULL
estimate.feature.imp = function(feature, data.sample, y, n.repetitions, cartesian, y.names, pred, loss) {
cartesian = ifelse(private$method == "cartesian", TRUE, FALSE)
cnames = setdiff(colnames(data.sample), y.names)
mg = iml:::MarginalGenerator$new(data.sample, data.sample,
features = feature, n.sample.dist = n.repetitions, y = y, cartesian = cartesian, id.dist = TRUE)
qResults = data.table::data.table()
y.vec = data.table::data.table()
while(!mg$finished) {
data.design = mg$next.batch(n, y = TRUE)
y.vec = rbind(y.vec, data.design[, y.names , with = FALSE])
qResults = rbind(qResults, pred(data.design[,cnames, with = FALSE]))
}
# AGGREGATE measurements
results = data.table::data.table(feature = feature, actual = y.vec[[1]], predicted = qResults[[1]])
results = results[, list("permutation.error" = loss(actual, predicted)), by = feature]
results
}
loss = self$loss
n.repetitions = self$n.repetitions
data.sample = private$dataSample
y = private$sampler$y
y.names = private$sampler$y.names
pred = private$run.prediction
loss = self$loss
`%mypar%` = private$get.parallel.fct(private$parallel)
result = foreach(feature = private$sampler$feature.names, .combine = rbind, .export = "self",
.packages = devtools::loaded_packages()$package, .inorder = FALSE) %mypar%
estimate.feature.imp(feature, data.sample = data.sample, y = y, cartesian = cartesian,
n.repetitions = n.repetitions, y.names = y.names, pred = pred, loss = loss)
result$original.error = self$original.error
result[, importance := permutation.error / self$original.error]
result = result[order(result$importance, decreasing = TRUE),]
# Removes the n column
result = result[,list(feature, original.error, permutation.error, importance)]
private$finished = TRUE
self$results = data.frame(result)
},
generatePlot = function(sort = TRUE, ...) {
res = self$results
if (sort) {
res$feature = factor(res$feature, levels = res$feature[order(res$importance)])
}
ggplot(res, aes(y = feature, x = importance)) + geom_point()+
geom_segment(aes(y = feature, yend = feature, x=1, xend = importance)) +
scale_x_continuous("Feature Importance") +
scale_y_discrete("Feature")
},
set.loss = function(loss) {
self$loss = loss
},
printParameters = function() {
cat("error function:", private$loss.string)
}
)
)
#' Plot Feature Importance
#'
#' plot.FeatureImp() plots the feature importance results of a FeatureImp object.
#'
#' @param x A FeatureImp R6 object
#' @param sort logical. Should the features be sorted in descending order? Defaults to TRUE.
#' @param ... Further arguments for the objects plot function
#' @return ggplot2 plot object
#' @export
#' @seealso
#' \link{FeatureImp}
#' @examples
#' if (require("rpart")) {
#' # We train a tree on the Boston dataset:
#' data("Boston", package = "MASS")
#' tree = rpart(medv ~ ., data = Boston)
#' y = Boston$medv
#' X = Boston[-which(names(Boston) == "medv")]
#' mod = Predictor$new(tree, data = X, y = y)
#'
#' # Compute feature importances as the performance drop in mean absolute error
#' imp = FeatureImp$new(mod, loss = "mae")
#'
#' # Plot the results directly
#' plot(imp)
#' }
plot.FeatureImp = function(x, sort = TRUE, ...) {
x$plot(sort = sort, ...)
}
|
6b67cb9cb96053e2f07959ee5f663776033b7cfa | f458e3e38c4e8eff6cbe8988488e31aa916ab38b | /cachematrix.R | 4d7ac85a71f908ff4d7fd37ae188c2c779ef3765 | [] | no_license | rculpi/ProgrammingAssignment2 | d65da6a607a973048929a2e0e6a7846d88720e87 | f8d1612f7bea2eb4622f26eb2d4695a631257e7d | refs/heads/master | 2020-04-01T15:40:53.325253 | 2018-10-16T20:16:11 | 2018-10-16T20:16:11 | 153,346,830 | 0 | 0 | null | 2018-10-16T20:02:59 | 2018-10-16T20:02:58 | null | UTF-8 | R | false | false | 2,359 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix: This function creates a special "matrix" object that can cache
## its inverse.
## cacheSolve: This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the matrix
## has not changed), then the cachesolve should retrieve the inverse from the cache.
## Write a short comment describing this function
## makeCacheMatrix create a matrix that can have its inverse stored on cache by the next function.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
## cacheSolve will store the inverse of the indicated matrix on cache. If the function was already been applied to the matrix,
## than the function will get the inverse matrix on cached data.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
## Testing the funcions
my_matrix <- matrix(rnorm(25),5,5)
my_matrix_cached <- makeCacheMatrix(my_matrix)
cacheSolve(my_matrix_cached)
## [,1] [,2] [,3] [,4] [,5]
## [1,] 0.243659392 -0.4429717 -0.4985223 -1.65642020 0.4645730
## [2,] 0.033708219 0.6971282 1.0428226 1.14785339 -1.4920296
## [3,] -0.604838774 0.6280577 0.2140196 1.47902504 0.7852962
## [4,] -0.260374914 -0.1351449 0.3420485 -0.04331824 -1.4472900
## [5,] -0.004439894 -0.1412987 1.0594317 0.28799940 -1.0416161
cacheSolve(my_matrix_cached)
## getting cached data
## [,1] [,2] [,3] [,4] [,5]
## [1,] 0.243659392 -0.4429717 -0.4985223 -1.65642020 0.4645730
## [2,] 0.033708219 0.6971282 1.0428226 1.14785339 -1.4920296
## [3,] -0.604838774 0.6280577 0.2140196 1.47902504 0.7852962
## [4,] -0.260374914 -0.1351449 0.3420485 -0.04331824 -1.4472900
## [5,] -0.004439894 -0.1412987 1.0594317 0.28799940 -1.0416161
|
a65860fcfa3e7c17972b63851364c204d86a565a | 7f6cfb830ff9b1658c8b943a72d6bb8b8a3a6168 | /man/geom_cobweb.Rd | 9f95c269c36c2f42e950417a18209da9fd9abfcf | [
"Apache-2.0"
] | permissive | Beirnaert/MetaboMeeseeks | aad9dd2a1fa956464478ee5a6703c3bfd44dba6b | 7c9926b24050679f1efbec2da81b8fec4b5207a8 | refs/heads/master | 2021-01-25T08:13:22.690656 | 2019-03-14T12:18:11 | 2019-03-14T12:18:11 | 93,733,340 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,898 | rd | geom_cobweb.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_cobweb.R
\name{geom_cobweb}
\alias{geom_cobweb}
\title{Cobweb plot}
\usage{
geom_cobweb(mapping = NULL, data = NULL, stat = "identity",
position = "identity", na.rm = FALSE, show.legend = TRUE,
theme = list(panel.background = element_rect(fill = "white", colour =
"white")), grid_lty = "dashed", grid_lwd = 1, grid_alpha = 0.75,
grid_fontsize = 12, grid_Nticks = 4, grid_ticks = NULL,
grid_labels = NULL, grid_label_position = 1,
grid_label_hjust = 0.5, grid_label_vjust = -0.5, ...)
}
\arguments{
\item{mapping}{(from ggplot call) A mapping of ggplot variables. geom_cobweb
expects an AUCs variable.}
\item{data}{(from ggplot call) The data frame supplied to the}
\item{stat}{(from ggplot call) ggplot identity stat.}
\item{position}{(from ggplot call) ggplot identity position.}
\item{na.rm}{(from ggplot call) remove NAs, default is FALSE.}
\item{show.legend}{Whether to show the legend. Default is TRUE.}
\item{theme}{Default theme settings for correct visualization.}
\item{grid_lty}{The linetype of the background grid. Deafult is 'dashed'.}
\item{grid_lwd}{The linewidth of the background grid. Deafult is 1.}
\item{grid_alpha}{The alpha of the background grid. Deafult is 0.75.}
\item{grid_fontsize}{The fontsize of the grid labels. Deafult is 12.}
\item{grid_Nticks}{The number of gridlines (makes use of
\code{\link[base]{pretty}} so number is not a hard threshold). Default is
4.}
\item{grid_ticks}{vector with values of gridlines to be drawn (unlike
grid_Nticks, this is exact).}
\item{grid_labels}{The labels to use for the gridlines.}
\item{grid_label_position}{The position alonng the cobweb where to plot the
grid labels. (numeric value)}
\item{grid_label_hjust}{The hjust of grid_labels}
\item{grid_label_vjust}{The vjust of grid_labels}
\item{...}{Extra aesthetics parameters, see Aesthetics section.}
}
\description{
The cobweb geom is an addition to the \pkg{ggplot2} package for plotting
cobweb or spider plots.
}
\section{Aesthetics}{
\code{geom_cobweb} understands the following aesthetics
(required aesthetics are in bold):
\itemize{
\item \strong{\code{AUCs}}
\item \code{alpha}
\item \code{colour}
\item \code{group}
\item \code{linetype}
\item \code{size} }
}
\examples{
library(ggplot2)
n_comparison <- 3
AUC.df <- data.frame(type = c(rep("ROC",6),rep("Random",6)),
AUCs = c(0.34, 1.00, 0.56, 0.40, 0.37, 0.45, rep(1/n_comparison, 6)))
ggplot(AUC.df, aes(AUCs = AUCs, colour = type)) +
geom_cobweb() +
theme_cobweb() +
ggtitle("Testen") +
coord_equal()
ggplot(AUC.df, aes(AUCs = AUCs, colour = type)) +
geom_cobweb(grid_Nticks = 2,
show.legend = TRUE,
grid_label_position = 2) +
theme_cobweb() +
ggtitle("Testen") +
coord_equal()
}
|
7e61c26ce29b1059c1d59846aa3d7e7c00aac38a | 1318b29d7b0f212ebe1a87145a13ee563ea094d8 | /R/AB.withDescalation.R | 715c1c2fc03a3b4253f4148ffb80bf3bb375a688 | [] | no_license | cran/TrialSize | 73c3ff9086760334fa83d4608c111bb0ea32765b | 314e951e9d33786b6a2883f7cd483984cb611243 | refs/heads/master | 2021-06-04T14:24:45.564491 | 2020-07-06T20:40:03 | 2020-07-06T20:40:03 | 17,693,970 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,016 | r | AB.withDescalation.R | AB.withDescalation <-function(A,B,C,D,E,DLT){
pj=DLT
P0=rep(0,ncol=length(pj))
P1=rep(0,ncol=length(pj))
Q0=rep(0,ncol=length(pj))
Q1=rep(0,ncol=length(pj))
Q2=rep(0,ncol=length(pj))
n=rep(0,ncol=length(pj))
for (j in 1:length(pj))
{
k=ifelse(C>1,c(0:(C-1)),0)
P0[j]=sum(choose(A,k)*pj[j]^k*(1-pj[j])^(A-k))
}
for (j in 1:length(pj))
{
k=ifelse(D>C,c(C:D),C)
P1[j]=sum(choose(A,k)*pj[j]^k*(1-pj[j])^(A-k))
}
for (j in 1:length(pj))
{
for (k in C:D)
{
m=ifelse((E-k)>0,c(0:(E-k)),0)
Q0[j]=sum(choose(A,k)*pj[j]^k*(1-pj[j])^(A-k)*choose(B,m)*pj[j]^m*(1-pj[j])^(B-m))
}
}
for (j in 1:length(pj))
{
for (k in 0:(C-1))
{
m=ifelse((E-k)>0,c(0:(E-k)),0)
Q1[j]=sum(choose(A,k)*pj[j]^k*(1-pj[j])^(A-k)*choose(B,m)*pj[j]^m*(1-pj[j])^(B-m))
}
}
for (j in 1:length(pj))
{
for (k in 0:(C-1))
{
m=ifelse((E-k)>(D+1-k),c((D+1-k):(E-k)),0)
Q2[j]=sum(choose(A,k)*pj[j]^k*(1-pj[j])^(A-k)*choose(B,m)*pj[j]^m*(1-pj[j])^(B-m))
}
}
Njn=(A*P0+(A+B)*Q0)/(P0+Q0)
Pstar=rep(0,ncol=length(pj))
Pstar[1]=1-P0[1]-Q0[1]
for(k in 2:length(pj))
{
temp=prod(Q2[1:(k-1)])*(1-P0[k]-Q0[k])
Pstar[1]=Pstar[1]+temp
}
Pstar[length(pj)]=prod((P0+Q0))
Pik<-function(i,k)
{
P=(P0[i]+Q0[i])*(1-P0[k]-Q0[k])*(prod((P0[1:(i-1)]+Q0[1:(i-1)])))*(prod(Q2[(i+1):(k-1)]))
return(P)
}
for (i in 2:(length(pj)-1))
{
Pstar[i]=0
for (k in (i+1):length(pj))
{
Pstar[i]=Pstar[i]+Pik(i,k)
}
}
Njik<-function(j,i,k)
{
if (j < i){N=(A*P0[j]+(A+B)*Q0[j])/(P0[j]+Q0[j])}
else if (i<=j & j<k){N=A+B}
else if (j==k){N=(A*(1-P0[j]-P1[j])+(A+B)*(P1[j]-Q0[j]))/(1-P0[j]-Q0[j])}
else N=0
return(N)
}
N_ij=rep(0,ncol=length(pj))
N_j=rep(0,ncol=length(pj))
Nj<-function(j)
{
for (i in 1:(length(pj)-1))
{
N_ij[i]=0
for(k in i:(length(pj)))
{
N_ij[i]=N_ij[i]+Njik(j,i,k)*Pik(i,k)
}
}
N_ij[length(pj)]=Njik(j,length(pj),length(pj))*Pstar[length(pj)]
n=sum(N_ij)
return(n)
}
for (i in 1:length(pj))
{
N_j[i]=Njn[i]*Pstar[length(pj)]+Nj(i)
}
return(N_j)
}
|
d6034a55eeee691264f83157161e2cc2e20056b6 | da627ef8abba2774be316152556d871160e1ac15 | /man/get_HPDI.Rd | d2abbb46c4aede2bf9ccff03afbb146e0b5c36d6 | [] | no_license | fditraglia/binivdoctr | fae27a133659a0378f229aa5f1b34a7d226a33f4 | 907482963e82a95fff5a7672a422610f61a3db9c | refs/heads/master | 2020-04-12T08:24:40.322701 | 2016-08-23T17:16:04 | 2016-08-23T17:16:04 | 52,390,880 | 0 | 2 | null | 2017-04-29T19:57:49 | 2016-02-23T20:53:29 | R | UTF-8 | R | false | true | 226 | rd | get_HPDI.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{get_HPDI}
\alias{get_HPDI}
\title{Title}
\usage{
get_HPDI(draws, level = 0.9)
}
\arguments{
\item{level}{}
}
\description{
Title
}
|
5a9497bcb22be400201536ea685acc1cfc0a6be4 | 54a599da54a7083329e805c0d8eda2987c6a94a0 | /mkare_reference_5_10_2019.R | 6ce5c310f5789126259e7f670fc3185535bdca56 | [] | no_license | xiaodongy86/Tomato_methylome | ac2ca92e4e0974f8352554ba6003d89ffe42ba3c | f227ed50bf8482eae16adbd94f1f16393ac0cc39 | refs/heads/master | 2021-06-21T21:02:46.620743 | 2021-01-20T14:48:04 | 2021-01-20T14:48:04 | 178,910,187 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,033 | r | mkare_reference_5_10_2019.R | #####################################################################################
# all four wt as reference
#####################################################################################
ref_all_four_wt_CG <- poolFromGRlist(list(WT_CG$WT1_P1, WT_CG$WT1_P2, WT_CG$WT2_P1,WT_CG$WT2_P2), stat = "sum", num.cores = 12L)
ref_all_four_wt_CHG <- poolFromGRlist(list(WT_CHG$WT1_P1, WT_CHG$WT1_P2, WT_CHG$WT2_P1,WT_CHG$WT2_P2), stat = "sum", num.cores = 12L)
ref_all_four_wt_CHH <- poolFromGRlist(list(WT_CHH$WT1_P1, WT_CHH$WT1_P2, WT_CHH$WT2_P1,WT_CHH$WT2_P2), stat = "sum", num.cores = 12L)
load("/data5/F15FTSUSAT0747_TOMrcwM/YXD_Bismark_pipeline/methyl_extractor/mild_minus_CHH_5_9_2019.RData")
save(ref_all_four_wt_CG, file = "/data5/F15FTSUSAT0747_TOMrcwM/YXD_Bismark_pipeline/methyl_extractor/ref_all_four_wt_CG_5_10_2019.RData")
save(ref_all_four_wt_CHG, file = "/data5/F15FTSUSAT0747_TOMrcwM/YXD_Bismark_pipeline/methyl_extractor/ref_all_four_wt_CHG_5_10_2019.RData")
save(ref_all_four_wt_CHH, file = "/data5/F15FTSUSAT0747_TOMrcwM/YXD_Bismark_pipeline/methyl_extractor/ref_all_four_wt_CHH_5_10_2019.RData")
HD_WT_CG = estimateDivergence(ref = ref_all_four_wt_CG,
indiv = WT_CG,
Bayesian = TRUE,
min.coverage = 4,
high.coverage = 300,
percentile = 0.999,
num.cores = 4L, tasks = 0L, verbose = FALSE )
save(HD_WT_CG, file = "/data5/F15FTSUSAT0747_TOMrcwM/YXD_Bismark_pipeline/methyl_extractor/HD_ref_all_four_wt_CG_5_10_2019.RData")
nlms_WT_CG = nonlinearFitDist(HD_WT_CG, column = 9, num.cores = 10L, verbose = TRUE)
PS_WT_CG = getPotentialDIMP(LR = HD_WT_CG, nlms = nlms_WT_CG, div.col = 9, alpha = 0.05,
tv.col = 7, tv.cut = 0.2)
save(nlms_WT_CG, PS_WT_CG, file = "/data5/F15FTSUSAT0747_TOMrcwM/YXD_Bismark_pipeline/methyl_extractor/PS_nlms_WT_CG_5_10_2019.RData")
rm(HD_WT_CG)
rm(ref_all_four_wt_CG)
rm(WT_CG)
HD_WT_CHG = estimateDivergence(ref = ref_all_four_wt_CHG,
indiv = WT_CHG,
Bayesian = TRUE,
min.coverage = 4,
high.coverage = 300,
percentile = 0.999,
num.cores = 4L, tasks = 0L, verbose = FALSE )
save(HD_WT_CHG, file = "/data5/F15FTSUSAT0747_TOMrcwM/YXD_Bismark_pipeline/methyl_extractor/HD_ref_all_four_wt_CHG_5_10_2019.RData")
nlms_WT_CHG = nonlinearFitDist(HD_WT_CHG, column = 9, num.cores = 10L, verbose = TRUE)
PS_WT_CHG = getPotentialDIMP(LR = HD_WT_CHG, nlms = nlms_WT_CHG, div.col = 9, alpha = 0.05,
tv.col = 7, tv.cut = 0.2)
save(nlms_WT_CHG, PS_WT_CHG, file = "/data5/F15FTSUSAT0747_TOMrcwM/YXD_Bismark_pipeline/methyl_extractor/PS_nlms_WT_CHG_5_10_2019.RData")
rm(HD_WT_CHG)
rm(ref_all_four_wt_CHG)
rm(WT_CHG)
|
c4d2cfd51ed7eac1ff6147c453ecaa99392e8bae | 1176e185df07a19c00d0baf112fa11f2f8a8f5f2 | /R/RV_Cor.R | 93750a921bee90d57c7ea549c25cbaa0faa97491 | [] | no_license | Pintademijote/multipack | 142cba0c0376a5779e06a8dd66762bf6e497bb9e | e9ff3c6695ac794cfc7bf681a109e03740d77f0f | refs/heads/master | 2020-05-02T14:44:20.301357 | 2019-09-23T11:54:47 | 2019-09-23T11:54:47 | 178,019,296 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,683 | r | RV_Cor.R | #' Computed the RV coefficient between the metrics.
#'
#' `RV_COR()` Return matrix of correlation between all metrics.
#'
#' This function is meant to be used with the tab generated by formultivariate()
#' The RV coefficient is meant to compute the correlation between two tabs.
#' In this case metric's values at each scale are considered as tab. So a RV coefficient is computed
#' for each pairwise metrics with an associeted p-value.
#' @param Varia_paysage_multi Tab generated by formodel()
#'
#' @param dist Vector of scales you choosed during you analusis in Chloe
#' @param metrics Vector of metrics you choosed in Chloe during your analysis in Chloe
#' @return Return matrix of correlation between all metrics.
#' @export
RV_COR=function(Varia_paysage_multi,metrics,dist){
coefRV=matrix(ncol=length(metrics),nrow=length(metrics))
coefRV[lower.tri(coefRV)]=2
diag(coefRV)=2
pvalue=matrix(ncol=length(metrics),nrow=length(metrics))
colnames(coefRV) = row.names(coefRV) = metrics
colnames(pvalue) = row.names(pvalue) = metrics
pb <- txtProgressBar(min = 0, max = length(metrics), style = 3)
rep=0
for (i in metrics) {
Sys.sleep(0.1)
rep=rep+1
setTxtProgressBar(pb, rep)
for (j in metrics) {
if(is.na(coefRV[i,j])){next}
temp=coeffRV(Varia_paysage_multi[Varia_paysage_multi$Metric==i,4:(3+length(scales))],
Varia_paysage_multi[Varia_paysage_multi$Metric==j,4:(3+length(scales))])
coefRV[i,j]=temp$rv
pvalue[i,j]=temp$p.value
}
}
coefRV[upper.tri(coefRV)]=t(coefRV)[upper.tri(coefRV)]
diag(coefRV)=1
pvalue[upper.tri(pvalue)]=t(pvalue)[upper.tri(pvalue)]
return(list(coefRV,pvalue))
}
|
48f4d5de4583a7fc1c5988f213d873a4ad3525c7 | fff46d4e1384d111c8aab667e2cba107d9212769 | /plot2.R | d9b97ef1d8a63fdbe0089aeb19a5fc86405da1da | [] | no_license | iioanamar/ExData_Plotting1 | f488c82f36f3ecbe2566f15e053d885a66c926f7 | 502a3cb78cdac5721d0fa744e158eb4b5fc98549 | refs/heads/master | 2020-04-09T12:01:06.188383 | 2018-12-04T12:39:45 | 2018-12-04T12:39:45 | 160,332,630 | 0 | 0 | null | 2018-12-04T09:31:29 | 2018-12-04T09:31:28 | null | UTF-8 | R | false | false | 642 | r | plot2.R | ## Exploratory Data Analysis
## Course Project 1
## Code for Plot 2
# Reading the data
data <- read.delim("household_power_consumption.txt", header = TRUE, sep = ";",
na.strings = "?")
# Subsetting and converting plot data
power <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007", ]
power$datetime <- strptime(paste(power$Date, power$Time), format = "%d/%m/%Y %H:%M:%S")
# Creating plot
plot(power$datetime, power$Global_active_power, type = "l",
xlab = "",
ylab = "Global Active Power (kilowatts)")
# Creating PNG file
dev.copy(png, file = "plot2.png", width = 480, height = 480)
dev.off()
|
12c7bd073f7f623b869a3904f197bc94e7db6b75 | 75f792fc99722753a76b6e428ba30a591f2d5368 | /man/small_meta.Rd | 4472a8c88a34bf2c78b782eda7fcdf3bd964fd59 | [
"MIT"
] | permissive | jtmccr1/HIVEr | 9739c7883ec6e8f1071a537517a62b7288f553cd | 61601afa9f173fe2ca033b0a0028b109f7b90de6 | refs/heads/master | 2021-09-20T03:25:25.257483 | 2018-08-02T15:15:31 | 2018-08-02T15:15:31 | 109,002,994 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,264 | rd | small_meta.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{small_meta}
\alias{small_meta}
\title{A small meta data file}
\format{A data frame with 20 rows and 17 variables:
\describe{
\item{HOUSE_ID}{House identifier of sample}
\item{ENROLLID}{personal identifier}
\item{SPECID}{sample identifier}
\item{onset}{Date of symptom onset}
\item{collect}{date of sample collection}
\item{vaccination_status}{vaccine status}
\item{pcr_result}{strain H1N1 H3N2}
\item{LAURING_ID}{Old sample id}
\item{DPI}{Days post infection}
\item{season}{Season of illness}
\item{log_copy_num}{Log copy number of titer in qpcr well}
\item{gc_ul}{genomes per ul in sample isolate}
\item{HIGHSD}{Y or N for high standard deviation in qpcr}
\item{sequenced}{T/F was the sample sequenced}
\item{home_collected}{T/F was the sample taken at home}
\item{snv_qualified}{T/F does the sample qualify for snv calling}
\item{iSNV}{Number of iSNV in sample - not real data here}
}}
\source{
data-raw/small_meta.R
}
\usage{
small_meta
}
\description{
A small meta data file containing a few samples that
exihibit what we want to do in the code and a few intereseting
cases. Each row is a sample.
}
\keyword{datasets}
|
7e665a40d62c299bffeef63e8167c0466aab9602 | 9818480ac9ade5cf6a7fbe037e2c9cb6d4b87bb8 | /huge_matrix.R | 1f5018a9053f58bdbf539540b93d4ce159dde1f0 | [] | no_license | sseemayer/R-hacks | 0531d79c37559c3dad9eb44dbd938960c366360d | 80ae420f888c3c5953455f1fdedf27b3b8f557a9 | refs/heads/master | 2020-03-29T23:19:10.319560 | 2012-09-06T08:45:28 | 2012-09-06T08:45:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,309 | r | huge_matrix.R |
# Data structure HugeMatrix for storing a matrix with more elements than .Machine$integer.max
# by splitting the matrix into multiple matrices by columns
setClass("HugeMatrix", representation( data="list", matrix.cols="integer", ncol="integer", nrow="integer" ))
setMethod("initialize", "HugeMatrix", function(.Object, fill, ncol, nrow, max.elements.per.matrix=.Machine$integer.max) {
max.elements.per.matrix = max( nrow, max.elements.per.matrix)
num.matrices = ceiling( ncol * nrow / max.elements.per.matrix)
matrix.cols = ceiling( ncol / num.matrices )
.Object@data = lapply( seq(num.matrices), function(i) {
if(i < num.matrices) cols = matrix.cols else cols = ncol - ((num.matrices - 1) * matrix.cols)
matrix( rep(fill, nrow * cols), nrow, cols)
})
.Object@ncol = as.integer(ncol)
.Object@nrow = as.integer(nrow)
.Object@matrix.cols = as.integer(matrix.cols)
.Object
})
setMethod("show", "HugeMatrix", function(object) {
cat(sprintf("HugeMatrix of dimensions [%d x %d], split into %d columns each (%d matrices) \n", object@nrow, object@nrow, object@matrix.cols, length(object@data)))
})
huge.matrix = function(fill, nrow, ncol, max.elements.per.matrix=.Machine$integer.max) {
new("HugeMatrix", fill=fill, nrow=nrow, ncol=ncol, max.elements.per.matrix=max.elements.per.matrix)
}
|
18339ed0203f9754d4b62c6134b174af8f046735 | 38f657868816ded08bfe661bf27a529c59f8e6cc | /q2/g.R | 1e5fdddae1fb2b7c0451ecb0d4eefb7a5daf35cb | [] | no_license | AntonioDelleCanne/stats_r_coursework | d75ab9351136d7be71519f491df2b47f399b2c4a | 094f81455c8e3feceeb0f5cadf0efcc30595a8ec | refs/heads/master | 2023-01-23T03:44:52.632394 | 2020-12-12T15:08:59 | 2020-12-12T15:08:59 | 318,884,358 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,191 | r | g.R | # Read the data
data <- read.csv("q2/DartPoints.csv", header = TRUE, row.names = 'X')
# Remove NA rows
data <- data[complete.cases(data),]
colnames(data)[c(-8,-1)]
# Define the dependent variable
Y <- "Weight"
# Define the combinations of covariate
X <- colnames(data)[c(-8,-1)]
# specifications of how to model,
# coming from somewhere else
# our modeling effort,
# fully parameterized!
f <- as.formula(
paste(Y,
paste(X, collapse = " + "),
sep = " ~ "))
print(f)
# fit the model
lin.mod <- lm(f, data = data)
# Coefficients
summ <- summary(lin.mod)
coeff.summ <- summ[["coefficients"]]
# Here we show the above mentioned correlations, applying the above described methodology
# variables with no significant correlation
corr.vars.idx <- abs(coeff.summ[, "t value"]) > coeff.summ[, "Pr(>|t|)"]
uncorr.vars <- row.names(coeff.summ)[corr.vars.idx == F]
uncorr.vars
# directly correlated variables
corr.dir.idx <- coeff.summ[corr.vars.idx, "Estimate"] > 0
corr.dir <- row.names(coeff.summ)[corr.dir.idx]
corr.dir
# inversely correlated variables
corr.inv <- row.names(coeff.summ)[corr.dir.idx == F]
corr.inv
|
582c290f7e50e35ea8e44e4af855a433581a4d5a | db8a43ce4e4d58a57a0a2bb29b63acf6c30b5092 | /R/ML_FDAModel.R | 1d31910b7570f637bef411a57b8d68738ba40b95 | [] | no_license | zhaoxiaohe/MachineShop | ca6fa7d6e7f00ac7d6f8522d50faeec2f4735b2d | 85b1ff6a9d7df425d041289856861e75ce596621 | refs/heads/master | 2020-04-11T06:30:08.059577 | 2018-12-13T00:45:43 | 2018-12-13T00:45:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,915 | r | ML_FDAModel.R | #' Flexible and Penalized Discriminant Analysis Models
#'
#' Performs flexible discriminant analysis.
#'
#' @rdname FDAModel
#'
#' @param theta optional matrix of class scores, typically with number of
#' columns less than one minus the number of classes.
#' @param dimension dimension of the discriminant subspace, less than the number
#' of classes, to use for prediction.
#' @param eps numeric threshold for small singular values for excluding
#' discriminant variables.
#' @param method regression function used in optimal scaling. The default of
#' linear regression is provided by \code{\link[mda]{polyreg}} from the
#' \pkg{mda} package. For penalized discriminant analysis,
#' \code{\link[mda]{gen.ridge}} is appropriate. Other possibilities are
#' \code{\link[mda]{mars}} for multivariate adaptive regression splines and
#' \code{\link[mda]{bruto}} for adaptive backfitting of additive splines. Use
#' the \code{\link[MachineShop:dot-]{.}} operator to quote specified functions.
#' @param ... additional arguments to \code{method} for \code{FDAModel} and to
#' \code{FDAModel} for \code{PDAModel}.
#'
#' @details
#' \describe{
#' \item{Response Types:}{\code{factor}}
#' }
#'
#' The \code{\link{predict}} function for this model additionally accepts the
#' following argument.
#' \describe{
#' \item{\code{prior}}{prior class membership probabilities for prediction data
#' if different from the training set.}
#' }
#'
#' Default values for the \code{NULL} arguments and further model details can be
#' found in the source links below.
#'
#' @return \code{MLModel} class object.
#'
#' @seealso \code{\link[mda]{fda}}, \code{\link[mda]{predict.fda}},
#' \code{\link{fit}}, \code{\link{resample}}, \code{\link{tune}}
#'
#' @examples
#' fit(Species ~ ., data = iris, model = FDAModel())
#'
FDAModel <- function(theta = NULL, dimension = NULL, eps = .Machine$double.eps,
method = .(mda::polyreg), ...) {
MLModel(
name = "FDAModel",
packages = "mda",
types = "factor",
params = params(environment()),
nvars = function(data) nvars(data, design = "model.matrix"),
fit = function(formula, data, weights, ...) {
environment(formula) <- environment()
mda::fda(formula, data = data, weights = weights, ...)
},
predict = function(object, newdata, prior = object$prior, ...) {
predict(object, newdata = newdata, type = "posterior", prior = prior)
}
)
}
#' @rdname FDAModel
#'
#' @param lambda shrinkage penalty coefficient.
#' @param df alternative specification of \code{lambda} in terms of equivalent
#' degrees of freedom.
#'
#' @examples
#' fit(Species ~ ., data = iris, model = PDAModel())
#'
PDAModel <- function(lambda = 1, df = NULL, ...) {
args <- c(as.list(environment()), list(...))
args$method <- .(mda::gen.ridge)
model <- do.call(FDAModel, args, quote = TRUE)
model@name <- "PDAModel"
model
}
|
6aeec00d410b6139de806e618ddd9c635d5f9679 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/distrMod/examples/SelfNorm.Rd.R | 7d6c0b54e270306d118f8f0ef19a63f8e9123afc | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 235 | r | SelfNorm.Rd.R | library(distrMod)
### Name: SelfNorm
### Title: Generating function for SelfNorm-class
### Aliases: SelfNorm
### Keywords: robust
### ** Examples
SelfNorm()
## The function is currently defined as
function(){ new("SelfNorm") }
|
caacbf373a90bb3b58b956bc25e05a3878afed54 | d835aeaf4cacac3ccf8b9f76da1ae89f4eabc469 | /RandomForest_Radar_Regression_EBC.r | 14480c3b4e67861c8ce33bd6af059bc82886ea6f | [] | no_license | NewForestsUS/R-Code | ce3cca43a979c4638649c548d5eb5a68765e6acf | b06340f073c44d9921409dc28033f1d7f393b8c3 | refs/heads/master | 2020-04-04T20:45:47.780074 | 2020-02-18T18:17:39 | 2020-02-18T18:17:39 | 156,260,112 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,645 | r | RandomForest_Radar_Regression_EBC.r | ###THis script will create a .tif file of aboveground Biomass after using a random forest approach
##and Principle Compenents that have been predetermined
library(sp)
library(raster)
library(maptools)
library(rgdal)
library(fields)
library(dplyr)
library(odbc)
library(DBI)
library(RSQLite)
library(caret)
library(corrplot)
library(gdalUtils)
library(ggplot2)
library(reshape2)
library(rgeos)
library(randomForest)
library(randomForestExplainer)
library(classInt)
###Bring in Plots with spatial data and carbon quantification
EBCPlots <- readOGR(dsn = "./EBC_Plots_CarbonQuant_ELCS_GCSWGS84.shp")
##Bring in the Shapefile to clip everything by
EBCArea <- readOGR(dsn = "../../Virtual_Machine/shapefiles/EBC_Project_Area_Boundary_WGS84_Buf.shp")
##Create our raster stack
path.Rando <- "../../Virtual_Machine/resampled17mInput/"
file.names.rando <- dir(path.Rando, pattern = ".TIF", ignore.case = T)
RandFor.file <- stack()
start <- Sys.time()
for (i in 1:length(file.names.rando)){
Rast_i <- raster(paste0("../../Virtual_Machine/resampled17mInput/",file.names.rando[i]))
crop_i <- crop(Rast_i, EBCArea)
mask_i <- mask(crop_i, EBCArea)
RandFor.file <- stack(RandFor.file, mask_i)
}
end <- Sys.time()
tot.t <- end - start
tot.t ##Time difference should be about 7.8 min
###Make sure all the rasters made it through the stack
nlayers(RandFor.file)
Rando_Forest_PCA_Ras <- extract(RandFor.file, EBCPlots, small = TRUE, sp=TRUE)
df.RF <- as.data.frame(Rando_Forest_PCA_Ras)
df.RF.cut <- subset(df.RF, df.RF$ID != "2268")
df.RF.cut$New_Strata <- as.factor(df.RF.cut$New_Strata)
df.RF.short <- df.RF.cut[-c(1,2, 29:30)]
##Use this file in your Random Forest
model_rf_new <- randomForest(LIVE_AG~ ., data = df.RF.short, importantance = TRUE, nodesize = 5)
model_rf_new
importance(model_rf_new)
varImpPlot(model_rf_new)
plot(model_rf_new)
tuneRF(LIVE_AG~ ., data = df.RF.short)
Ras.Strata <- predict(RandFor.file, model_rf_new)
plot(Ras.Strata)
writeRaster(Ras.Strata, "../EBC_Regression_RF/EBC_AGBiomass_Map_RF_Radar.tif", format = "GTiff", overwrite = TRUE)
inTrain <- createDataPartition(df.RF.short$LIVE_AG, p = 0.75, list = FALSE)
trainDF <- df.RF.short[inTrain,]
testDF <- df.RF.short[-inTrain,]
mtry_def <- floor(sqrt(ncol(trainDF))*.75)
t_grid <- expand.grid(mtry= c(mtry_def))
model_rf <- train(LIVE_AG ~ ., data = trainDF, method = "rf", ntree = 500)
print(model_rf)
predictions <- predict(model_rf, testDF[,2:26])
RMSE <- sqrt(sum((predictions - testDF$LIVE_AG)^2)/length(predictions))
print(RMSE)
print(RMSE/mean(testDF$LIVE_AG))
|
95a76e545ffb26dbf8fda490f92ca6566f6b5dec | 9576704cf2bac865ba7ae90ae0ca66a6bab13ffd | /CancerPredictionModel.R | d92eade1c6007d4532d25a22e0817189c24eeee3 | [] | no_license | 23ArmaanT/Machine-Learning-Projects | 5142e7caf711be5b3d531733c18a37951b06480a | f1b4c35ffb2564fd9918544ea0a4c6f4831e207e | refs/heads/master | 2023-03-07T15:52:21.386887 | 2021-02-22T16:59:27 | 2021-02-22T16:59:27 | 281,810,769 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,359 | r | CancerPredictionModel.R | library(caret)
library(corrplot)
library(ggplot2)
library(rpart)
library(kknn)
library(hydroGOF)
library(cowplot)
library(caret)
library(randomForest)
library(e1071)
originalCancerData = read.csv("cData.csv")
originalCancerData= originalCancerData[,c(-1,-2,-34)]#A lot of NA's in both coloumns 2 and 34. id has low correlation to the diagnosis which is why we dropped the feature.
ggplot(data1, aes(diagnosis)) + geom_bar(fill = "red")
#we observe data points with no diagnosis, and with several NA's.
cleanedUpCancerData = originalCancerData[complete.cases(originalCancerData),]
#Checking for NA's.
cleanedUpCancerData[is.na(cleanedUpCancerData)]
cleanedUpCancerData = cleanedUpCancerData[-which(cleanedUpCancerData$diagnosis==""),]
#data[1,1] = "myValue"
str(originalCancerData)
#Plot One Variable Graph.
data$diagnosis = as.factor[]
table(cleanedUpCancerData$diagnosis)
prop.table(table(cleanedUpCancerData$diagnosis))
#Converting Coloumn One to Numeric then drawing a correlation plot.
cleanedUpCancerData$diagnosis=ifelse(cleanedUpCancerData$diagnosis == "M",1,0)
#Using the
cancerCorrelationPlot = cor(cleanedUpCancerData)
cancerCorrelationPlot
corrplot(cancerCorrelationPlot, method = "color", type = "lower")
#Selecting just the Mean,SE, and Worst parts of the Cancer Data and plotting them.
correlationMeanCancerData = cor(cleanedUpCancerData[,c(1:11)])
correlationSECancerData = cor(cleanedUpCancerData[,c(12:21)])
correlationWorstCancerData = cor(cleanedUpCancerData[,c(22:31)])
corrplot(correlationMeanCancerData, method = "color", type = "lower" )
corrplot(correlationSECancerData, method = "color", type = "lower" )
corrplot(correlationWorstCancerData, method = "color", type = "lower" )
#Don't use the same variables if they have the same correlation.
#heatmap.
heatmap(cleanedUpCancerData,col=col,symm=F)
#Creating a Cleaned Up Cancer Data Reference with the NA's and blank spaces removed, but diagnosis still in the character data type so the boxplot can recognize the fill.
cleanedUpCancerDataForVisualization = originalCancerData[,c(-2,-34)]
cleanedUpCancerDataForVisualization = cleanedUpCancerDataForVisualization[complete.cases(cleanedUpCancerDataForVisualization),]
cleanedUpCancerDataForVisualization[is.na(cleanedUpCancerDataForVisualization)]
cleanedUpCancerDataForVisualization = cleanedUpCancerDataForVisualization[-which(cleanedUpCancerDataForVisualization$diagnosis==""),]
p1=ggplot(cleanedUpCancerDataForVisualization, aes(y=radius_mean, fill=diagnosis)) + geom_boxplot()
p2=ggplot(cleanedUpCancerDataForVisualization, aes(y=texture_mean, fill=diagnosis)) + geom_boxplot()
p3=ggplot(cleanedUpCancerDataForVisualization, aes(y=perimeter_mean, fill=diagnosis)) + geom_boxplot()
p4=ggplot(cleanedUpCancerDataForVisualization, aes(y=area_mean, fill=diagnosis)) + geom_boxplot()
p5=ggplot(cleanedUpCancerDataForVisualization, aes(y=smoothness_mean, fill=diagnosis)) + geom_boxplot()
p6=ggplot(cleanedUpCancerDataForVisualization, aes(y=compactness_mean, fill=diagnosis)) + geom_boxplot()
p7=ggplot(cleanedUpCancerDataForVisualization, aes(y=concavity_mean, fill=diagnosis)) + geom_boxplot()
p8=ggplot(cleanedUpCancerDataForVisualization, aes(y=concave.points_mean, fill=diagnosis)) + geom_boxplot()
p9=ggplot(cleanedUpCancerDataForVisualization, aes(y=symmetry_mean, fill=diagnosis)) + geom_boxplot()
p10=ggplot(cleanedUpCancerDataForVisualization, aes(y=fractal_dimension_mean, fill=diagnosis)) + geom_boxplot()
plot_grid(p1,p2,p3,p4,p5,p6,p7,p8,p9,p10, nrow=3)
###########Visulization Complete.
#####Split into Test and Train Data.
#Splitting Test and Training Data.
set.seed(17)
holderSplit = createDataPartition(cleanedUpCancerData$diagnosis,p=0.7,list = F)
trainingData = cleanedUpCancerData[holderSplit,]
testingData = cleanedUpCancerData[-holderSplit,]
#Removing collinearity by picking specific features, and creating a DataSet.
specificCancerFeatures = c('diagnosis', 'texture_mean','perimeter_mean','smoothness_mean','compactness_mean','symmetry_mean')
trainingData.SelectedFeatures = cleanedUpCancerData[holderSplit,specificCancerFeatures]
#Check Imbalances in the Data.
table(trainingData$diagnosis)
#Build model using logistic regression.
cancerModel.lg = glm(diagnosis ~. , data= trainingData.SelectedFeatures, family = binomial(link="logit"))
cancerPredictions = predict(cancerModel.lg, testingData, type="response")
testingData$cancerPredictions=ifelse(cancerPredictions > 0.5, 1, 0)
table(testingData$diagnosis)
#Testing the accuracy for the logistic regression model.
accuracyCancerPredictions = mean(testingData$cancerPredictions == testingData$diagnosis)
accuracyCancerPredictions
summary(cancerModel.lg)
cancerPredictions
#Confusion Matrix For logisitic regression model.
cm_lg = confusionMatrix(as.factor(testingData$cancerPredictions), as.factor(testingData$diagnosis))
cm_lg
#Building Model using KKNN.
table(trainingData$diagnosis)
class(trainingData$diagnosis)
table(testingData$diagnosis)
trainingData$diagnosis = as.factor(trainingData$diagnosis)
cancerModel.kknn = train(diagnosis~., trainingData, method="kknn")
accuracyKKNN = mean (predictedCancerModelKKNN == testingData$diagnosis)
accuracyKKNN #accuracy = 97.05882 %
#Build confusion matrix for KKNN model.
cm_KKNN = confusionMatrix(as.factor(predictedCancerModelKKNN), as.factor(testingData$diagnosis))
cm_KKNN
#Build model using Random Forest.
cancerModel.rf = train(diagnosis~., trainingData, method="rf")
predictedCancerModelRF = predict(cancerModel.rf, testingData, method = "rf")
accuracyRF = mean (predictedCancerModelRF == testingData$diagnosis)
accuracyRF#Accuracy is 94.70588 %.
#Build confusion matric for Random Forest.
cm_rf = confusionMatrix(as.factor(predictedCancerModelRF), as.factor(testingData$diagnosis))
cm_rf
#Building the Support Vector Machines.
cancerModel.svm = svm(diagnosis~., data=trainingData)
cancerPredictions.svm=predict(cancerModel.svm, testingData)
accuracy.svm = mean(cancerPredictions.svm==testingData$diagnosis)
accuracy.svm #0.9823529
#Building the Confusion Matrix for the SVM Model.
cm_svm=confusionMatrix(as.factor(cancerPredictions.svm), as.factor(testingData$diagnosis))
cm_svm
#Build the Rpart Model.
cancerModel.rpart = train(diagnosis~., trainingData, method="rpart")
cancerPredictions.rpart = predict(cancerModel.rpart, testingData)
accuracy.rpart = mean(cancerPredictions.rpart==testingData$diagnosis)
accuracy.rpart #0.9470588
#Build the Confusion Matrix for the Rpart Model.
confusionMatrix_rpart = confusionMatrix(as.factor(cancerPredictions.rpart), as.factor(testingData$diagnosis))
confusionMatrix_rpart
#Build Validation Table.colnames = diagnosis, diagnosis.lg, diagnosis.knn.
modelsUsedColoumnNameVTable = c("Diagnosis","Logistic Regression", "Random Forest", "KKNN", "SVM", "R-Part")
modelsUsedVTable = c(testingData$diagnosis,cancerPredictions,predictedCancerModelKKNN,predictedCancerModelRF,cancerPredictions.svm,cancerPredictions.rpart)
cancerPredictionsVTable = data.frame(modelsUsedVTable, modelsUsedColoumnNameVTable)
#Build an Observation Table.
modelsUsed = c("Logistic Regression", "Random Forest", "KKNN", "SVM", "R-Part")
accuracy = c(accuracyCancerPredictions, accuracyRF, accuracyKKNN, accuracy.svm, accuracy.rpart)
observationTable = data.frame(modelsUsed, accuracy)
#Do Feature Engineering.
cancerModel.lg = glm(diagnosis ~. , data= trainingData, family = binomial(link="logit"))
cancerPredictions = predict(cancerModel.lg, testingData, type="response")
accuracy1 = mean(cancerPredictions == testingData$diagnosis)
accuracy1
cm_lg = confusionMatrix(as.factor(cancerPredictions), as.factor(testingData$diagnosis))
cm_lg
summary(cancerModel.lg)
#creating a mean features.
#Do feature engineering for KKNN Model.
library(hydroGOF)
cancerMeanSelectedFeatures = c("diagnosis","radius_mean")
cancerMeanPossibleFeatures = c("texture_mean","perimeter_mean","area_mean","smoothness_mean","compactness_mean","concavity_mean","concave.points_mean", "symmetry_mean","fractal_dimension_mean")
training = cleanedUpCancerData[seq(1,nrow(cleanedUpCancerData),2), cancerMeanSelectedFeatures]
testing = cleanedUpCancerData[seq(2,nrow(cleanedUpCancerData),2),cancerMeanSelectedFeatures]
featureEngineeringModel = train(diagnosis~., training, method="kknn")
predictedFeatureEngineering = predict(featureEngineeringModel, testing)
baseAccuracy = mean(predictedCancerModelKKNN == testingData$diagnosis)
baseAccuracy
cleanedUpCancerData$diagnosis = as.factor(cleanedUpCancerData$diagnosis)
for (i in 1: length(cancerMeanPossibleFeatures)){
print(i)
training = cleanedUpCancerData[seq(1,nrow(cleanedUpCancerData),2), c(cancerMeanSelectedFeatures,cancerMeanPossibleFeatures[i])]
testing = cleanedUpCancerData[seq(2,nrow(cleanedUpCancerData),2), c(cancerMeanSelectedFeatures,cancerMeanPossibleFeatures[i])]
featureEngineeringModel= train(diagnosis~., training, method = "kknn")
predictedFeatureEngineering = predict(featureEngineeringModel, testing)
newAccuracyKKNN = mean(predictedCancerModelKKNN == testingData$diagnosis)
newAccuracyKKNN
if (newAccuracyKKNN>accuracyKKNN){
print(cancerMeanSelectedFeatures)
cancerMeanSelectedFeatures = c(cancerMeanSelectedFeatures, cancerMeanPossibleFeatures[i])
newAccuracyKKNN = accuracyKKNN
print(i)
paste("Features Selected are", cancerMeanSelectedFeatures)
}
}
|
09a430e331a2f01bfc46ba8cf46eaddc3310a115 | 4c29b2e8cd6d5d12889958a408ecde2498a25eba | /R/best_binomial_bandit.R | 5ae5a930609f9b4cfb7beaab03b8736664ad961a | [] | no_license | ssimeonov/bandit-1 | f5fc38cc2c4e97dca8039b65fba89dddc8a76422 | 485deccebf06b77d68d00df45b877dc28da45e33 | refs/heads/master | 2021-01-18T00:22:34.032009 | 2012-08-23T22:07:22 | 2012-08-23T22:07:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 968 | r | best_binomial_bandit.R | # estimate the Bayesian posterior probability of each alternative being the best binomial bandit
best_binomial_bandit <-
function(x, n) {
k <- length(x)
ans <- numeric(k)
for (i in (1:k)) {
indx <- (1:k)[-i]
f <- function(z) {
r <- dbeta(z, x[i]+1, n[i]-x[i]+1)
for (j in indx) {
r <- r * pbeta(z, x[j]+1, n[j]-x[j]+1)
}
return(r)
}
ans[i] = integrate(f,0,1)$value
}
return(ans)
}
bbb <-
function(x, n) {
best_binomial_bandit(x,n)
}
# functions for computing resulting optimal probabilities via simulation
# sim.post <-
# function(y, n, ndraws) {
# k <- length(y)
# ans <- matrix(nrow=ndraws, ncol=k)
# no <- n - y
# for (i in 1:k) {
# ans[,i] <- rbeta(ndraws, y[i]+1, no[i]+1)
# }
# return(ans)
# }
# prob.winner <-
# function(post) {
# k <- ncol(post)
# w <- table(factor(max.col(post), levels=1:k))
# return(w/sum(w))
# }
# compute.win.prob <-
# function(y, n, ndraws) {
# return(prob.winner(sim.post(y, n, ndraws)))
# }
|
c65598557ecb241c8351ed0043554f2cbd612870 | 9ddd0a7857cfbf406c1508d91934a9269b0aacae | /1.characteristics/PG_HC_group_comparisons.R | a6fb4563f30df302144d0bb7bda99770c2ef0063 | [
"MIT"
] | permissive | CGR-UBC/gambling-sibling-study | 2b13a922a9be76af0a5dea46e532359d0100bee7 | 076cfc037e0ff75d35d033e012964e08a1155c4b | refs/heads/master | 2020-06-05T02:56:37.702135 | 2019-06-26T20:44:13 | 2019-06-26T20:44:13 | 192,289,746 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,652 | r | PG_HC_group_comparisons.R | library(pastecs)
rfromwilcox<-function(wilcoxModel,N){
z<-qnorm(wilcoxModel$p.value/2)
r<-z/sqrt(N)
cat(wilcoxModel$data.name,"Effect size, r = " , r)
}
setwd("~/Data/github/SIBSTUDY_slots/1.characteristics")
data_all=read.csv("data_for_R_new.csv")
data<-subset(data_all, GDHC==1)
data$Group <- ifelse(substr(data$participant, 1, 1) == 2, 1,0)
data_smokers<-subset(data,Smoke==1)
data_drug_users<-subset(data,DAST>0)
################ NON-PARAMETRIC #######################
BDI<-data.frame(data$Group,data$BDI)
by(BDI$data.BDI,BDI$data.Group,stat.desc,basic=TRUE,norm=TRUE)
BDImodel<-wilcox.test(BDI$data.BDI~BDI$data.Group,paired=FALSE,exact=FALSE)# turn off exact as get error that cant do it
BDImodel
rfromwilcox(BDImodel,nrow(BDI))
Age<-data.frame(data$Group,data$Age)
by(Age$data.Age,Age$data.Group,stat.desc,basic=TRUE,norm=TRUE)
Agemodel<-wilcox.test(Age$data.Age~Age$data.Group,paired=FALSE,exact=FALSE)# turn off exact as get error that cant do it
Agemodel
rfromwilcox(Agemodel,nrow(Age))
BAI<-data.frame(data$Group,data$BAI)
by(BAI$data.BAI,BAI$data.Group,stat.desc,basic=TRUE,norm=TRUE)
BAImodel<-wilcox.test(BAI$data.BAI~BAI$data.Group,paired=FALSE,exact=FALSE)# turn off exact as get error that cant do it
BAImodel
rfromwilcox(BAImodel,nrow(BAI))
Alcohol<-data.frame(data$Group,data$Alcohol)
by(Alcohol$data.Alcohol,Alcohol$data.Group,stat.desc,basic=TRUE,norm=TRUE)
Alcoholmodel<-wilcox.test(Alcohol$data.Alcohol~Alcohol$data.Group,paired=FALSE,exact=FALSE)# turn off exact as get error that cant do it
Alcoholmodel
rfromwilcox(Alcoholmodel,nrow(Alcohol))
IQ<-data.frame(data$Group,data$IQ)
by(IQ$data.IQ,IQ$data.Group,stat.desc,basic=TRUE,norm=TRUE)
IQmodel<-wilcox.test(IQ$data.IQ~IQ$data.Group,paired=FALSE,exact=FALSE)# turn off exact as get error that cant do it
IQmodel
rfromwilcox(IQmodel,nrow(IQ))
Emotional.abuse<-data.frame(data$Group,data$Emotional.abuse)
by(Emotional.abuse$data.Emotional.abuse,Emotional.abuse$data.Group,stat.desc,basic=TRUE,norm=TRUE)
Emotional.abusemodel<-wilcox.test(Emotional.abuse$data.Emotional.abuse~Emotional.abuse$data.Group,paired=FALSE,exact=FALSE)# turn off exact as get error that cant do it
Emotional.abusemodel
rfromwilcox(Emotional.abusemodel,nrow(Emotional.abuse))
Physical.Abuse<-data.frame(data$Group,data$Physical.Abuse)
by(Physical.Abuse$data.Physical.Abuse,Physical.Abuse$data.Group,stat.desc,basic=TRUE,norm=TRUE)
Physical.Abusemodel<-wilcox.test(Physical.Abuse$data.Physical.Abuse~Physical.Abuse$data.Group,paired=FALSE,exact=FALSE)# turn off exact as get error that cant do it
Physical.Abusemodel
rfromwilcox(Physical.Abusemodel,nrow(Physical.Abuse))
Sexual.Abuse<-data.frame(data$Group,data$Sexual.Abuse)
by(Sexual.Abuse$data.Sexual.Abuse,Sexual.Abuse$data.Group,stat.desc,basic=TRUE,norm=TRUE)
Sexual.Abusemodel<-wilcox.test(Sexual.Abuse$data.Sexual.Abuse~Sexual.Abuse$data.Group,paired=FALSE,exact=FALSE)# turn off exact as get error that cant do it
Sexual.Abusemodel
rfromwilcox(Sexual.Abusemodel,nrow(Sexual.Abuse))
by(data_drug_users$DAST,data_drug_users$Group,stat.desc,basic=TRUE,norm=TRUE)
DAST_model<-wilcox.test(data_drug_users$DAST~data_drug_users$Group,paired=FALSE)
DAST_model
rfromwilcox(DAST_model,nrow(data_drug_users))
Fagerstrom<-data.frame(data_smokers$Group,data_smokers$Fagerstrom)
by(Fagerstrom$data_smokers.Fagerstrom,Fagerstrom$data_smokers.Group,stat.desc,basic=TRUE,norm=TRUE)
Fagerstrom_model<-wilcox.test(data_smokers$Fagerstrom~data_smokers$Group,paired=FALSE)
Fagerstrom_model
rfromwilcox(Fagerstrom_model,nrow(Fagerstrom))
PGSI<-data.frame(data$Group,data$PGSI)
by(PGSI$data.PGSI,PGSI$data.Group,stat.desc,basic=TRUE,norm=TRUE)
|
4527581c90a81054fdcd0048f51bb2a06981a7a2 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.management/R/appregistry_service.R | b2d5174e1a1adca17bf29ffe71766646d5417c9b | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | false | 8,371 | r | appregistry_service.R | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config merge_config
NULL
#' AWS Service Catalog App Registry
#'
#' @description
#' Amazon Web Services Service Catalog AppRegistry enables organizations to
#' understand the application context of their Amazon Web Services
#' resources. AppRegistry provides a repository of your applications, their
#' resources, and the application metadata that you use within your
#' enterprise.
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#' \itemize{
#' \item{\strong{credentials}:} {\itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
#' \item{\strong{region}:} {The AWS Region used in instantiating the client.}
#' }}
#' \item{\strong{close_connection}:} {Immediately close all HTTP connections.}
#' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
#' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e. `http://s3.amazonaws.com/BUCKET/KEY`.}
#' \item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
#' }
#' @param
#' credentials
#' Optional credentials shorthand for the config parameter
#' \itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' }
#' @param
#' endpoint
#' Optional shorthand for complete URL to use for the constructed client.
#' @param
#' region
#' Optional shorthand for AWS Region used in instantiating the client.
#'
#' @section Service syntax:
#' ```
#' svc <- appregistry(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string",
#' close_connection = "logical",
#' timeout = "numeric",
#' s3_force_path_style = "logical",
#' sts_regional_endpoint = "string"
#' ),
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- appregistry()
#' svc$associate_attribute_group(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=appregistry_associate_attribute_group]{associate_attribute_group} \tab Associates an attribute group with an application to augment the application's metadata with the group's attributes\cr
#' \link[=appregistry_associate_resource]{associate_resource} \tab Associates a resource with an application\cr
#' \link[=appregistry_create_application]{create_application} \tab Creates a new application that is the top-level node in a hierarchy of related cloud resource abstractions\cr
#' \link[=appregistry_create_attribute_group]{create_attribute_group} \tab Creates a new attribute group as a container for user-defined attributes\cr
#' \link[=appregistry_delete_application]{delete_application} \tab Deletes an application that is specified either by its application ID, name, or ARN\cr
#' \link[=appregistry_delete_attribute_group]{delete_attribute_group} \tab Deletes an attribute group, specified either by its attribute group ID, name, or ARN\cr
#' \link[=appregistry_disassociate_attribute_group]{disassociate_attribute_group} \tab Disassociates an attribute group from an application to remove the extra attributes contained in the attribute group from the application's metadata\cr
#' \link[=appregistry_disassociate_resource]{disassociate_resource} \tab Disassociates a resource from application\cr
#' \link[=appregistry_get_application]{get_application} \tab Retrieves metadata information about one of your applications\cr
#' \link[=appregistry_get_associated_resource]{get_associated_resource} \tab Gets the resource associated with the application\cr
#' \link[=appregistry_get_attribute_group]{get_attribute_group} \tab Retrieves an attribute group by its ARN, ID, or name\cr
#' \link[=appregistry_get_configuration]{get_configuration} \tab Retrieves a TagKey configuration from an account\cr
#' \link[=appregistry_list_applications]{list_applications} \tab Retrieves a list of all of your applications\cr
#' \link[=appregistry_list_associated_attribute_groups]{list_associated_attribute_groups} \tab Lists all attribute groups that are associated with specified application\cr
#' \link[=appregistry_list_associated_resources]{list_associated_resources} \tab Lists all of the resources that are associated with the specified application\cr
#' \link[=appregistry_list_attribute_groups]{list_attribute_groups} \tab Lists all attribute groups which you have access to\cr
#' \link[=appregistry_list_attribute_groups_for_application]{list_attribute_groups_for_application} \tab Lists the details of all attribute groups associated with a specific application\cr
#' \link[=appregistry_list_tags_for_resource]{list_tags_for_resource} \tab Lists all of the tags on the resource\cr
#' \link[=appregistry_put_configuration]{put_configuration} \tab Associates a TagKey configuration to an account\cr
#' \link[=appregistry_sync_resource]{sync_resource} \tab Syncs the resource with current AppRegistry records\cr
#' \link[=appregistry_tag_resource]{tag_resource} \tab Assigns one or more tags (key-value pairs) to the specified resource\cr
#' \link[=appregistry_untag_resource]{untag_resource} \tab Removes tags from a resource\cr
#' \link[=appregistry_update_application]{update_application} \tab Updates an existing application with new attributes\cr
#' \link[=appregistry_update_attribute_group]{update_attribute_group} \tab Updates an existing attribute group with new details
#' }
#'
#' @return
#' A client for the service. You can call the service's operations using
#' syntax like `svc$operation(...)`, where `svc` is the name you've assigned
#' to the client. The available operations are listed in the
#' Operations section.
#'
#' @rdname appregistry
#' @export
appregistry <- function(config = list(), credentials = list(), endpoint = NULL, region = NULL) {
config <- merge_config(
config,
list(
credentials = credentials,
endpoint = endpoint,
region = region
)
)
svc <- .appregistry$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.appregistry <- list()
.appregistry$operations <- list()
.appregistry$metadata <- list(
service_name = "appregistry",
endpoints = list("*" = list(endpoint = "servicecatalog-appregistry.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "servicecatalog-appregistry.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "servicecatalog-appregistry.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "servicecatalog-appregistry.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "Service Catalog AppRegistry",
api_version = "2020-06-24",
signing_name = "servicecatalog",
json_version = "1.1",
target_prefix = ""
)
.appregistry$service <- function(config = list()) {
handlers <- new_handlers("restjson", "v4")
new_service(.appregistry$metadata, handlers, config)
}
|
577cdf6e4c684e06ade8f3d7981fe30f0e990aad | af84f4fb4bd7c41432482cce6170da3e6af3a130 | /epi/SIR_cyclical_mitigation.R | f6aaa35152cdb342cffc6c413e356ab7a54948c9 | [] | no_license | tkmckenzie/pan | a65fc375eea8171c9b64f8360c5cf7b152830b7d | 5337e4c1d09f06f2043551e1dd1ec734aab75b49 | refs/heads/master | 2023-01-09T11:54:29.636007 | 2022-12-21T22:26:58 | 2022-12-21T22:26:58 | 156,240,022 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 977 | r | SIR_cyclical_mitigation.R | library(deSolve)
library(ggplot2)
setwd("~/git/pan/epi")
rm(list = ls())
# SIR model:
# dS / dt = -alpha * S * I
# dI / dt = alpha * S * I - beta * I
# dR / dt = beta * I
# Parameters
alpha = 0.05
beta = 0.01
gamma = 0.001
I.0 = 0.01
# Setting up model
parameters = c(alpha = alpha, beta = beta, gamma = gamma)
state = c(S = 1 - I.0, I = I.0, R = 0)
model = function(t, state, parameters){
with(as.list(c(state, parameters)),{
dS = gamma * R - alpha * S * I
dI = alpha * S * I - beta * I
dR = beta * I - gamma * R
return(list(c(dS, dI, dR)))
})
}
times = seq(0, 2500, length.out = 500)
# Solving model
out = ode(y = state, times = times, func = model, parms = parameters)
# Plot results
df = data.frame(t = rep(out[,1], times = 3),
y = c(out[,2:4]),
variable = rep(c("S", "I", "R"), each = nrow(out)))
ggplot(df, aes(t, y)) +
geom_line(aes(color = variable)) +
theme_bw() +
theme(legend.position = "top")
|
a76ac341b4067581b8a80ab5f97ed37df9d78398 | be5d16a8bfbd1f721d20bfc1596ab6ebca674f48 | /ecopackage/R/firmbeta.R | a405c8f518ba6f74b27a803682977195822c679c | [] | no_license | gabriellecy/ecopackage | 24e162535517d1a2004d6a808a772e2f7ee3713f | a7b0273dc7a17d4ab7b88ad8a393244b1f9b2f8d | refs/heads/main | 2023-03-18T17:18:18.482087 | 2021-03-13T23:52:50 | 2021-03-13T23:52:50 | 347,504,248 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 177 | r | firmbeta.R | #Financial functions - firmbeta
firmbeta <- fuction(equity,debt, equitybeta, debtbeta){
value = equity + debt
return((equity/value)*equitybeta + (debt/value)*debtbeta)
}
|
e6db40d5c32afffbb2f332627f8fcea35d6e4cb7 | 87df222e6e4b2280c6de03895be912d95e45cd52 | /scripts/plot-lr04.R | b25fdfc1e7e6ac9ae5d992b0dd612923f91d7434 | [] | no_license | robynfsj/lr04 | a392736cefe882461cdc9ced38480ed743ba9fae | 6677aeaa5967d717250183a067751550c5e87d5a | refs/heads/master | 2023-01-11T01:52:21.211768 | 2020-11-13T20:36:53 | 2020-11-13T20:36:53 | 297,066,536 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,120 | r | plot-lr04.R | # Set up ------------------------------------------------------------------
library(ggplot2)
# Read and check data -----------------------------------------------------
lr04 <- read.delim("./data/lr04.txt")
str(lr04)
head(lr04)
# Basic plot --------------------------------------------------------------
ggplot(data = lr04, aes(x = time, y = d18o)) + # set up plot
geom_line(colour = "#00AFBB", size = 0.5) + # plot line
labs(title = "LR04 Benthic Stack", # plot labels
x = "Time (ka)",
y = expression(δ^{18}*O~"(‰)"),
caption = "Data from Lisiecki & Raymo (2005)") +
scale_y_reverse() + # reverse scale on y-axis
theme_classic() # set ggplot theme
# NOTES
# —————
# - Time is plotted in thousands of years but I would prefer millions of years!
# - Lower d18O represent warmer interglacials so I reversed the y axis as it is
# more intuitive to see warmer temperatures towards the top.
# - It was difficult to plot the y axis label with symbols and superscipt but I
# managed to get it to work.
# - There are several different ggplot themes:
# https://ggplot2.tidyverse.org/reference/ggtheme.html
# - These can be tweaked:
# https://ggplot2.tidyverse.org/reference/theme.html)
# Time in millions of years -----------------------------------------------
lr04$time <- lr04$time / 1000 # convert time from ka to ma
ggplot(data = lr04, aes(x = time, y = d18o)) +
geom_line(colour = "#00AFBB", size = 0.5) +
labs(title = "LR04 Benthic Stack",
x = "Time (Ma)",
y = expression(δ^{18}*O~"(‰)"),
caption = "Data from Lisiecki & Raymo (2005)") +
scale_y_reverse() +
scale_x_continuous(breaks = seq(0, 6, by = 1)) + # set tick frequency
theme_classic(base_size = 15)
# NOTES
# —————
# I had originally reversed the x-axis using scale_x_reverse() as this makes
# more sense (going from older on the left to younger on the right) but this
# doesn't seem to be the done thing for this type of data. All plots I have seen
# show it going from younger to older, left to right.
# Line colour as gradient -------------------------------------------------
ggplot(data = lr04, aes(x = time, y = d18o, colour = d18o)) +
geom_line(size = 0.5) +
scale_colour_gradient(low = "#DB5824", high = "#1A5A95") +
labs(title = "LR04 Benthic Stack",
x = "Time (Ma)",
y = expression(δ^{18}*O~"(‰)"),
caption = "Data from Lisiecki & Raymo (2005)") +
scale_y_reverse() +
scale_x_continuous(breaks = seq(0, 6, by = 1)) +
theme_classic(base_size = 15) +
theme(legend.position = "none")
# Last 500 ka -------------------------------------------------------------
lr04_500 <- subset(lr04, time < 0.5) # subset last 500 ka
lr04_500$time <- lr04_500$time * 1000 # convert time from back from ma to ka
ggplot(data = lr04_500,
aes(x = time, y = d18o, colour = d18o)) +
geom_line(size = 0.8) +
scale_colour_gradient(low = "#DB5824", high = "#1A5A95") +
labs(title = "LR04 Benthic Stack (Last 500 ka)",
x = "Age (ka)",
y = expression(δ^{18}*O~"(‰)"),
caption = "Data from Lisiecki & Raymo (2005)") +
scale_y_reverse() +
annotate("text",
x = c(5, 125, 205, 237, 327, 410),
y = c(3, 3, 3.3,3.3, 3, 3),
label = c("1", "5", "7 a-c", "7e", "9", "11")) +
theme_classic(base_size = 15) +
theme(legend.position = "none")
# MIS 7–9 (191–337 ka) ----------------------------------------------------
lr04_7to9 <- subset(lr04_500, time > 130 & time < 374) # subset MIS 7-9
ggplot(data = lr04_7to9,
aes(x = time, y = d18o, colour = d18o)) +
geom_line(size = 0.8) +
scale_colour_gradient(low = "#DB5824", high = "#1A5A95") +
labs(title = "LR04 Benthic Stack (MIS 7–9)",
x = "Age (ka)",
y = expression(δ^{18}*O~"(‰)"),
caption = "Data from Lisiecki & Raymo (2005)") +
annotate("text",
x = c(160, 200, 225, 237, 260, 283, 297, 310, 320, 327, 360),
y = c(4.9, 3.4, 4.5, 3.3, 4.7, 3.75, 4.4, 3.6, 3.95, 3.1, 4.9),
label = c("6", "7 a-c", "7d", "7e", "8", "9a", "9b", "9c?", "9d?",
"9e", "10")) +
scale_y_reverse() +
scale_x_continuous(breaks = seq(120, 380, by = 20)) +
theme_classic(base_size = 15) +
theme(legend.position = "none")
# Plot vertically ---------------------------------------------------------
# These types of plots are often displayed vertically so I'll have a go at
# flipping them around. The following block of code needs to be added to the
# plots:
# scale_y_reverse(position = "right") +
# scale_x_reverse() +
# coord_flip() +
# All data
ggplot(data = lr04, aes(x = time, y = d18o)) +
geom_line(colour = "#00AFBB", size = 0.5) +
labs(title = "LR04 Benthic Stack",
x = "Time (Ma)",
y = expression(δ^{18}*O~"(‰)"),
caption = "Data from Lisiecki & Raymo (2005)") +
scale_y_reverse(position = "right") +
scale_x_reverse(breaks = seq(0, 6, by = 1)) +
coord_flip() +
theme_classic(base_size = 15)
# Last 500 ka
ggplot(data = lr04_500,
aes(x = time, y = d18o, colour = d18o)) +
geom_line(size = 0.8)+
scale_colour_gradient(low = "#DB5824", high = "#1A5A95") +
labs(title = "LR04 Benthic Stack (Last 500 ka)",
x = "Age (ka)",
y = expression(δ^{18}*O~"(‰)"),
caption = "Data from Lisiecki & Raymo (2005)") +
annotate("text",
x = c(5, 125, 205, 237, 327, 410),
y = c(3, 3, 3.3,3.3, 3, 3),
label = c("1", "5", "7 a-c", "7e", "9", "11")) +
scale_y_reverse(position = "right") +
scale_x_reverse() +
coord_flip() +
theme_classic(base_size = 15) +
theme(legend.position="none")
# MIS 7-9
ggplot(data = lr04_7to9, aes(x = time, y = d18o, colour = d18o)) +
geom_line(size = 0.8)+
scale_colour_gradient(low = "#DB5824", high = "#1A5A95") +
labs(title = "LR04 Benthic Stack (MIS 7–9)",
x = "Age (ka)",
y = expression(δ^{18}*O~"(‰)"),
caption = "Data from Lisiecki & Raymo (2005)") +
annotate("text",
x = c(160, 200, 225, 237, 260, 283, 297, 310, 320, 327, 360),
y = c(4.9, 3.4, 4.5, 3.3, 4.7, 3.75, 4.4, 3.6, 3.95, 3.1, 4.9),
label = c("6", "7 a-c", "7d", "7e", "8", "9a", "9b", "9c?", "9d?",
"9e", "10"))+
scale_y_reverse(position = "right") +
scale_x_reverse(breaks = seq(120, 380, by = 20)) +
coord_flip() +
theme_classic(base_size = 15) +
theme(legend.position = "none")
# To Do -------------------------------------------------------------------
# - see if it is possible to alter where the blue changes to red on the gradient
# - move plot title to centre
# - investigate how to export plots to pdf with special characters preserved |
67c5294bae01f20f0c790d5a9438e45bbde0ebdb | ac4c1e700063a3cdc2d96047724c4a14d0014c04 | /man/optimisation_f.Rd | d217bf42d3ccb13492fe48a0213ffc44a76b84ef | [] | no_license | dmkant/outilsechant | 9b5557ceaeaa84a0c9d6a765c106dff6980f6c5b | 77c07f1cb876ae4bb71461b5c33b3e9a2fe98fda | refs/heads/master | 2021-08-07T14:22:02.320094 | 2020-06-25T15:01:17 | 2020-06-25T15:01:17 | 194,061,815 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 909 | rd | optimisation_f.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimisation_f.R
\name{optimisation_f}
\alias{optimisation_f}
\title{Optimisation de faisable}
\usage{
optimisation_f(varselect, data, constr, taille)
}
\arguments{
\item{varselect}{variable utilise pour diviser la tableau de quota}
\item{data}{base de sondage}
\item{constr}{tableau de quota}
\item{taille}{taille de l'echantillon}
}
\value{
une liste avec la faisabilite, la liste des bases de sondages, la liste des sous-tableau
}
\description{
Cette fonction permet de determiner la faisabilité plus rapidement que la fonction faisable
}
\examples{
iris$couleur=sample(c("rose","blanc"),150,replace = TRUE)
constr=data.frame(variables=c("Species","Species","Species","couleur","couleur"),
modalites=c("setosa","versicolor","virginica","rose","blanc"),objectifs=c(25,25,25,35,40))
optimisation_f("couleur",iris,constr,75)
}
|
8c2f1cdb8b8c7ceddb764aeea11ce5012d70c25a | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /understandBPMN/man/cross_connectivity.Rd | cc49e6ea1d9abffc7494409b989aefa18eeb689b | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,622 | rd | cross_connectivity.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metrics.R
\name{cross_connectivity}
\alias{cross_connectivity}
\title{Cross Connectivity}
\usage{
cross_connectivity(file_path, signavio = FALSE,
path_log_already_created = FALSE, generate_new_path_log = FALSE,
time_to_generate_path_log = 1500)
}
\arguments{
\item{file_path}{document object created using the create_internal_document function}
\item{signavio}{boolean which indicates whether the file stems from signavio}
\item{path_log_already_created}{boolean which indicates whether the path log has already been created before or not. When you are not sure, it is best to use the standard which is false}
\item{generate_new_path_log}{used when it is not possible to save the path log such as with the Rapid miner or in unit tests and examples}
\item{time_to_generate_path_log}{time which is the maximum time to generate a new path log in seconds. The standard setting is 1500 seconds.}
}
\value{
an integer indicating the cross connectivity of a model
}
\description{
The cross-connectivity metric that measures the strength of the links between process model elements.
The definition of this new metric builds on the hypothesis that process models are easier understood and contain less errors if they have a high cross-connectivity.
The metric is calculated based on the creation of a data frame containing the values of all connections
}
\examples{
\dontshow{file_path <- system.file("extdata", "doc.txt", package="understandBPMN")}
cross_connectivity(file_path, generate_new_path_log = TRUE)
}
|
1851ea621dbf83dd3c9b8cebdccc3b1f9ce3a931 | e2f1fd23f8209eefa27c8392b29e2a8b1d704128 | /man/m.test.Rd | 1b5c4d7f806ab242a83bf22a564924b3551593c5 | [] | no_license | KirtOnthank/MTest | 7dda55bd3373529882cae4636cd34e9e73d78fb9 | 7de708c8b8f4a7466d144c3875aebfe4aa5d478e | refs/heads/master | 2022-05-22T04:15:19.978580 | 2020-05-01T23:44:29 | 2020-05-01T23:44:29 | 260,345,418 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 409 | rd | m.test.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Mtest.R
\name{m.test}
\alias{m.test}
\title{M-test}
\usage{
m.test(X1, X2, b = 100)
}
\arguments{
\item{X1}{A point pattern object (ppp)}
\item{X2}{Another point pattern object (ppp)}
\item{b}{number of bootstrap replicates to generate randomization envelope}
}
\value{
Returns a list of things
}
\description{
Does that thang
}
|
a47e23fb49db0823e2eb719657cc0660d3a570f8 | 8dc4df93d88fe8b504006eefc4e35ee66e29e96e | /learn-R/input-output/step2-expr.R | dbb4e58d646fe996eeba1ed99ee05df7610f147e | [] | no_license | NailouZhang/5years | dd574e54aa5d23b124a48c8aa4890e1f46fc7669 | 099e77bc244ff6b3c080b734956fb125d2f872a5 | refs/heads/master | 2022-03-01T18:17:42.818340 | 2019-10-28T03:18:56 | 2019-10-28T03:18:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,233 | r | step2-expr.R | rm(list=ls())
b=read.table('GSE17215_series_matrix.txt.gz',##读入该文件
comment.char = '!',header =T,##有!的行不被读入,有表头
sep = '\t')##以制表符分隔
rownames(b)=b[,1]##以b的第一列为行名
b=b[,-1]##将b的第一列去掉
b=log2(b)##对b中数据求log2
mean(as.numeric(b[1,]))##将b第一行数据框类型转换为数值型,再求第一行平均值
mean(as.numeric(b[2,]))##将b第二行数据框类型转换为数值型,再求第二行平均值
head(rowMeans(b))##对b每一行求平均值,用head查看前6行
for(i in 1:nrow(b)){ ##for循环,变化的是i,i从b的第一行一直循环到第n行
print(mean(as.numeric(b[i,])))##将b第i行数据框类型转换为数值型,再求第i行平均值,最后打印出结果
}
apply(b,1,function(x){##apply循环一般针对矩阵或者数据框,1代表对行进行处理,对列进行处理则用2表示,设定一个函数
mean(x)##这个函数是求平均值
})
for(i in 1:nrow(b)){
x=as.numeric(b[i,])##x为b的第i行
y=x[1]+x[2]-x[3]+x[4]-x[5]+x[6]##y赋值为x的第一个元素加第二个元素减去第三个元素加第四个元素减去第五个元素加第六个元素。对向量取元素直接坐标,对dataframe取元素逗号左行逗号右列(list一样)
print(y)##输出显示y
}
apply(b,1,max)##求每一行最大值,是apply(b,1,function(x){max(x)})的简写
rowMax=function(x){##将rowMax定义为一个函数
apply(x,1,max)##这个函数的内容是求每一行的最大值
}
rowMax(b)##用定义好的rowMax函数求b
jimmy <- function(b){##构建一个叫jimmy的函数
for(i in 1:nrow(b)){
x=as.numeric(b[i,])
y=x[1]+x[2]-x[3]+x[4]-x[5]+x[6]
print(y)
}
}
jimmy(b)##对b运行函数jimmy
cg=names(sort(apply(b,1,sd),decreasing = T)[1:50])##对b的每一行求方差,之后从大到小排序,再列出前一到五十的名字
sample(1:nrow(b),50)##sample为随机取值函数,从b的一到n行随机取50个行
pheatmap::pheatmap(b[1:50,])##用b的一到五十行画热图
pheatmap::pheatmap(b[sample(1:nrow(b),50),])##用b中随机取的五十行画热图
pheatmap::pheatmap(b[cg,])##用得到的cg基因画热图
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.