blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
03bc02a2ba21287608c9798eee0d6aaf9e0ca765 | eeff270817c8e739230bfd668635dbbdd10f6fb5 | /PaperCode_PTE_circlePlotR.R | da58617697aeb49aad74b788d5eff0a5be2b310a | [] | no_license | AnnaCastelnovo/sleep-EOC | 6d3fee1d7705190c03b4d54042bf747fe925e1e2 | 9e8934a10f62f87d8f1838322bfc6391148e5a28 | refs/heads/master | 2022-07-03T15:44:59.715786 | 2022-06-14T12:39:41 | 2022-06-14T12:39:41 | 219,853,668 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,930 | r | PaperCode_PTE_circlePlotR.R | library(circlize)
library(R.matlab)
library(stringr)
library("readxl")
#add path
setwd("/.../CirclePlots/")
set.seed(14)
TFM <- readMat("TFM_reord.mat")
labelsh <- readMat("reord_labelHigh.mat")
labels <- readMat("reord_scouts2.mat")
label = labels$test
labelh = labelsh$reord.label
temp = list.files(pattern="*.mat")
for (i in 1:length(temp)) {
assign('TFM', readMat(temp[i]))
name = temp[i]
tfm = TFM$reord.M
dim(tfm)
rownames(tfm) <- sapply( label, paste0)
colnames(tfm) <- sapply( label, paste0)
df <- data.frame(from = rep(rownames(tfm), ncol(tfm)),
to = rep(colnames(tfm), each = nrow(tfm)),
value = as.vector(tfm))
mat = df
nami = str_trim(substr(name[1], 1, (nchar(name)-4)))
#replace with correct path
s_name = paste("path/to/save/imgs",nami,".png")
s_name = gsub(" ", "", s_name, fixed = TRUE)
png(s_name, width = 2300, height = 2300, res=150)
nm = unique(unlist(rownames(tfm)))
group = structure(substr(nm, nchar(nm), nchar(nm)), names = nm)
circos.par(start.degree = 250)
circos.par("canvas.xlim" = c(-1.3, 1.3), "canvas.ylim" = c(-1.3, 1.3))
par(cex = 1.8)
rainb = c(rainbow(length(nm)/2, start = 0.1, end =0.4), rainbow(length(nm)/2, start = 0.5, end =0.9))
grid.col <- setNames(rainb, nm)
chordDiagram(tfm, group = group, big.gap = 20, annotationTrack = "grid", preAllocateTracks = 1, directional = 1, grid.col = grid.col, direction.type = c("diffHeight", "arrows"), link.arr.type = "big.arrow",scale = TRUE)
circos.trackPlotRegion(track.index = 1, panel.fun = function(x, y) {
xlim = get.cell.meta.data("xlim")
ylim = get.cell.meta.data("ylim")
sector.name = get.cell.meta.data("sector.index")
circos.text(mean(xlim), ylim[1], sector.name, facing = "clockwise", niceFacing = TRUE, adj = c(0, 0.25), cex = 0.5)
}, bg.border = NA)
dev.off()
circos.clear()
}
|
c2353812b5fa9d944a5266c1b11da8edb69ab60e | c023ef48ba59adc6c7c23162971086518910f914 | /cachematrix.R | 291a5320728ca2d3824229cfc51eb3c708114854 | [] | no_license | surya0389/ProgrammingAssignment2 | fbb91190eb4968e74bb5411492953a4db2cd0dbd | 0eeb290fd2f6866fed4b1d1c7748e7534147ae06 | refs/heads/master | 2021-01-18T02:45:59.649224 | 2015-05-24T20:11:04 | 2015-05-24T20:11:04 | 36,190,539 | 0 | 0 | null | 2015-05-24T20:03:26 | 2015-05-24T20:03:26 | null | UTF-8 | R | false | false | 2,033 | r | cachematrix.R | ## Pair of functions that cache the inverse of a matrix.
## This function creates a special "matrix" object that can.
## set the value of the matrix
## get the value of the matrix
## set the value of the inverse
## get the value of the inverse
makeCacheMatrix <- function( m = matrix() ) {
## Initialize the inverse property
i <- NULL
## Method to set the matrix
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
## Method the get the matrix
get <- function() {
## Return the matrix
m
}
## Method to set the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function() {
## Return the inverse
i
}
## Return a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## The following function computes the inverse of the special "matrix" created with
## the "makeCacheMatrix" function above. However, it first checks to see if the inverse
## has already been computed. If so, it gets the inverse from the cache and skips the
## computation. Otherwise, it computes the inverse of the "matrix" and sets the value of
## the inverse in the cache via the setInverse function.
cacheSolve <- function(x, ...) {
## get a matrix that is the inverse of 'x'
i <- x$getInverse()
## Just return the inverse if its already exists
if( !is.null(i) ) {
message("getting data from cache")
return(i)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using solve function
i <- solve(data, ...)
## Set the inverse to the object
x$setInverse(i)
## Return the inverse
i
}
|
a9a1746815171a3b58940e9ae4d664a126e9cb1d | 3456a09a3b1727f273c2ed92a0b2841a420f105b | /Maker.R | d05fdd2507e4e9893e7634d1177c22ca6dc6bb12 | [] | no_license | xxxjvila/exampleMoodle | 48398ad8c531572705f8c8ad3d42a136f67e8010 | edb0ef3dd306ee1bc339082d93c74b09fb277ec9 | refs/heads/master | 2020-06-18T08:34:57.575187 | 2019-07-30T17:22:36 | 2019-07-30T17:22:36 | 196,234,781 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,373 | r | Maker.R | rm(list=ls())
library(exams)
setwd("/Users/jvila/Dropbox/UseR2019/ExampleMoodle")
## create exams skeleton with:
## - demo-*.R scripts
## - exercises/ folder with all .Rmd/.Rnw exercises
## - templates/ folder with various customizable templates
## - nops/ folder (empty) for exams2nops output
# dir.create(mydir <- getwd())
# exams_skeleton(dir = mydir, absolute = TRUE)
# T: mc = multiple choice; di = different options
# V: number of valid anwser; 01 = only one; se= several
# P: include plot
# D: include data
# swisscapital.Rmd"
exams2html("./exercises/TmcV01PnoDno1.Rmd", converter = "pandoc-mathjax")
# anova.Rmd
exams2html("./exercises/TmcVsePyesDno1.Rmd", converter = "pandoc-mathjax")
# automaton.Rmd: needs 'magick' package
exams2html("./exercises/TmcVsePyesDno2.Rmd", converter = "pandoc-mathjax")
# boxhist.Rmd:
exams2html("./exercises/TdiVsePyesDyes1.Rmd", converter = "pandoc-mathjax")
## Moodle
elearn_exam <- c("TmcV01PnoDno1.Rmd",
"TmcVsePyesDno1.Rmd",
"TmcVsePyesDno2.Rmd",
"TdiVsePyesDyes1.Rmd")
set.seed(2019-07-09)
exams2moodle(elearn_exam, n = 5, name = "toMoodle",
dir = "output", edir = "exercises")
# Pregunta 1:
exams2html("./exercises/TipVar01.Rmd", converter = "pandoc-mathjax")
# Pregunta 2:
exams2html("./exercises/TipVar02.Rmd", converter = "pandoc-mathjax")
|
1d2b794883fd3fc58f82bb06e4f99065b6979d6c | 022fc53b9f32151c34edf0f8476122c4707f2314 | /Functions.R | 6da85c52e7d82290f31e3de8556ec7a9e1503f54 | [] | no_license | githubmao/MawanDrivingSimulatorDataAnalysis | b5c7fae383aae6d3fa83e27570acef2e489232ac | f321623aabb2666001d3743bb3f5b5bf13c601fd | refs/heads/master | 2020-03-11T18:49:54.961600 | 2018-04-19T09:11:15 | 2018-04-19T09:11:15 | 130,189,378 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,804 | r | Functions.R | #-----------------------Code Description---------------------------------------#
# Notes:
# ver1.0, data: 20171217, by MaoY
#
# Description: 存放各类分析函数
#------------------------------------------------------------------------------#
# 道路线形示意底图绘制----
PlotRoadLine <- function(kXStart, kXEnd, kYStart, kYEnd, kLineType = "solid", kSize = 1){
# 绘制道路线形,组成行车轨迹图底图.
#
# 输入:
# kXStart: 描绘点的x坐标起点,对应annotate()函数的x变量.
# kXEnd: 描绘点的x坐标终点,对应annotate()函数的xend变量.
# kYStart: 描绘点的y坐标起点,对应annotate()函数的y变量.
# kYEnd: 描绘点的y坐标终点,对应annotate()函数的yend变量.
# kLineType: 描绘线的类型,对应annotate()函数的linetype变量.
# kSize:描绘线的类型,对应annotate()函数的size变量.
#
# 输出:
# 对应的annotate()函数参数.
return(annotate("segment",
x = kXStart,
xend = kXEnd,
y = kYStart,
yend = kYEnd,
colour = "black",
size = kSize,
linetype = kLineType))
}
# 主路进入匝道,计算行车轨迹----
CalcDrivingTrajectory <- function(data, is.main2ramp = TRUE){
# 计算行车轨迹.
#
# 输入:
# data: 重命名后的数据框.
#
# 输出:
# 含行车轨迹变量drivingTrajectory的数据框.
get.roadname <- unique(data$roadName)
get.tmpsubdata1 <- subset(data, data$roadName == get.roadname[1])
get.tmpsubdata2 <- subset(data, data$roadName == get.roadname[2])
kData1Length <- length(get.tmpsubdata1$disToLeftBorder)
if (is.main2ramp) {
get.tmpsubdata1$drivingTrajectory <- get.tmpsubdata1$disToLeftBorder
kDeltaDrivingTrajectory <- get.tmpsubdata1[kData1Length,]$drivingTrajectory -
get.tmpsubdata2[1,]$disToLeftBorder
get.tmpsubdata2$drivingTrajectory <- get.tmpsubdata2$disToLeftBorder +
kDeltaDrivingTrajectory
} else {
get.tmpsubdata2$drivingTrajectory <- get.tmpsubdata2$disToLeftBorder
kDeltaDrivingTrajectory <- get.tmpsubdata2[1,]$drivingTrajectory -
get.tmpsubdata1[kData1Length,]$disToLeftBorder
get.tmpsubdata1$drivingTrajectory <- get.tmpsubdata1$disToLeftBorder +
kDeltaDrivingTrajectory
}
results.data <- rbind(get.tmpsubdata1, get.tmpsubdata2)
results.data <- results.data[order(results.data$disTravelled),]
return(results.data)
}
# 新disFromRoadStart生成----
CalcNewDis <- function(data, is.main2ramp = TRUE, is.maindisraising = TRUE){
# 计算新disFromRoadStart,解决不同道路桩号不一问题.
# 计算的newDisFromRoadStart用于绘制行车轨迹图的横坐标.
#
# 输入:
# data: 重命名后的数据框.
# is.main2ramp:行驶方向是否为主线进入匝道.
#
# 输出:
# 含新桩号newDisFromRoadStart的数据框.
get.roadname <- unique(data$roadName)
get.tmpsubdata1 <- subset(data, data$roadName == get.roadname[1])
get.tmpsubdata2 <- subset(data, data$roadName == get.roadname[2])
kData1Length <- length(get.tmpsubdata1$disFromRoadStart)
if (is.main2ramp) {
get.tmpsubdata1$newDisFromRoadStart <- get.tmpsubdata1$disFromRoadStart
if (is.maindisraising) {
get.tmpsubdata2$newDisFromRoadStart <- get.tmpsubdata2$disFromRoadStart +
get.tmpsubdata1[kData1Length,]$disFromRoadStart
} else {
get.tmpsubdata2$newDisFromRoadStart <- get.tmpsubdata1[kData1Length,]$disFromRoadStart -
get.tmpsubdata2$disFromRoadStart
}
} else {
if (is.maindisraising) {
get.tmpsubdata1$newDisFromRoadStart <- get.tmpsubdata1$disFromRoadStart -
get.tmpsubdata1[kData1Length,]$disFromRoadStart +
get.tmpsubdata2[1,]$disFromRoadStart
} else {
get.tmpsubdata1$newDisFromRoadStart <- get.tmpsubdata1[kData1Length,]$disFromRoadStart -
get.tmpsubdata1$disFromRoadStart +
get.tmpsubdata2[1,]$disFromRoadStart
}
get.tmpsubdata2$newDisFromRoadStart <- get.tmpsubdata2$disFromRoadStart
}
results.data <- rbind(get.tmpsubdata1, get.tmpsubdata2)
results.data <- results.data[order(results.data$disTravelled),]
return(results.data)
}
# newDisFromRoadStart和drivingTrajectory变量计算----
CalcNewDisDrivingTrajectory <- function(data, is.Main2Ramp = TRUE, is.MainDisRaising = TRUE){
data <- CalcNewDis(data,
is.main2ramp = is.Main2Ramp,
is.maindisraising = is.MainDisRaising)
data <- CalcDrivingTrajectory(data,
is.main2ramp = is.Main2Ramp)
return(data)
}
|
168419c43a0fde8deebd728c77faeb37e8e270f5 | f85c3a502acc3e1252b28ca1af2f728a6ca573f0 | /R/are_valid_protein_sequences.R | 7a5c0484208dbb74ffc85e5392084bf552c01f9f | [] | no_license | cran/pureseqtmr | 9f3d7f4010adfdc6c5b9a8446a13b3cc904dd406 | 9b53165e7dd3cdd0ded2fdf163f5c73b0c5a7fae | refs/heads/master | 2023-04-15T04:44:21.088795 | 2023-04-06T12:40:02 | 2023-04-06T12:40:02 | 284,769,850 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 545 | r | are_valid_protein_sequences.R | #' Determine if these are all valid protein sequences
#'
#' Determine if these are all valid protein sequences,
#' as can be used in topology prediction
#' @inheritParams default_params_doc
#' @return TRUE if the protein sequence is valid
#' @export
are_valid_protein_sequences <- function(
protein_sequences,
verbose = FALSE
) {
are_valid <- FALSE
tryCatch({
pureseqtmr::check_protein_sequences(protein_sequences)
are_valid <- TRUE
}, error = function(e) {
if (verbose) {
message(e$message)
}
})
are_valid
}
|
79e592ff878129ef9d59e7a6a03ead471ee193dd | 433a68e1105316e046a4a61e786d214dc877301e | /Analysis/Binary Exchange/Revising answers.R | 902c50e482e768db3aa99603e9d86b7272deb661 | [] | no_license | joshua-a-becker/crowd-classification-problem | b039048802976f9ca5c68bbecd318c55f6fa7fdb | e6c541d7543ff365f9f58ca7b573b5b23dd918f8 | refs/heads/master | 2021-08-29T13:27:22.351212 | 2021-08-16T14:05:36 | 2021-08-16T14:05:36 | 172,978,414 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,490 | r | Revising answers.R | ################################################################################
# This is the script for the main results about revising answers for
# proposition 1: Binary Exchange
################################################################################
###############
# Preparation #
###############
# Cleaning the environment
rm(list=ls());gc()
# Loading dependencies
library(tidyverse)
################
# Loading data #
################
source("Analysis/Prep main experiment data.R")
#######################
# Preparing variables #
#######################
# initially_accurate is if the first judgment was correct
# switch is whether participants changed answers between the first and the
# third judgment
d <- d %>%
mutate(
initially_accurate = correct_1
, switch = ifelse(response_1==response_3, "stay","switch")
, switch12 = ifelse(response_1==response_2, "stay","switch")
)
########################
# Analyses and outputs #
########################
# Table of the number of switches according to being initially correct
myTable <- table(d$switch, d$initially_accurate)
myTable
# % of participants initially inaccurate who revised their answer
myTable[2] / (myTable[1] + myTable[2])
# % of participants initially accurate who revised their answer
myTable[4] / (myTable[3] + myTable[4])
# Comparison of levels of switching between the initially accurate and the
# initially inaccurate
prop.test(table(d$switch, d$initially_accurate))$p.value
|
43d25936aa8a81e43554fc148c9a390bea778212 | 434ef872d538c03eee9cc47563ea88c737ffb88a | /exp1/scripts/run_nsga3.R | 6fe74d3320a680dc72fda0433b9816320de0f1bf | [] | no_license | minghao2016/MOFS-in-CS | 6f360862d7f80e236b950b415ea70e4032f74eaf | d8da8d168ff393935e5b60c9dcde31d46ab656ab | refs/heads/master | 2020-09-03T00:04:51.377930 | 2019-03-23T08:55:27 | 2019-03-23T08:55:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,659 | r | run_nsga3.R | library(nsga3)
for(f in datasets){
df <- readRDS(file.path(data.folder, f))
# Convert values in target column into binary
levels(df$BAD)[levels(df$BAD) == "GOOD"] <- "0"
levels(df$BAD)[levels(df$BAD) == "BAD"] <- "1"
print(paste(f, "rows: ", nrow(df), "features: ", ncol(df)))
# Tune model parameters
params <- train_model(df)
# Create Classifier
xgb_learner <- makeLearner(
"classif.xgboost",
predict.type = "prob",
par.vals = list(
objective = "binary:logistic",
eval_metric = "error",
early_stopping_rounds = 100,
nrounds = 10000,
max_depth = params$max_depth,
lambda = params$lambda,
alpha = params$alpha,
eta = params$eta,
subsample = params$subsample,
min_child_weight = params$min_child_weight,
colsample_bytree = params$colsample_bytree
)
)
#Cross Validation
resampling <- makeResampleDesc("CV", iters = 5)
#Objective functions
obj_list <- c(mshare, emp) #get_spec) #list of objective functions
obj_names <- c("mshare", "emp", "nf")#names of objective fns will be used as column names
#specify pareto criteria
pareto <- low(mshare)*low(emp)*low(nf)#*low(fcost) # high = maximize
#Activate parallelisation
parallelStartSocket(24, show.info = FALSE)
#start NSGA III
ans <- nsga3fs(df = df, target = "BAD", obj_list, obj_names, pareto,
n = 100, max_gen = 100,
model = xgb_learner,
resampling = resampling,
num_features = TRUE,
mutation_rate = 0.01)
parallelStop()
save(ans, file = paste0(f, ".nsga3.RData"))
} |
e21efdaa1499b9139fb5e29995f3f5b28a601c65 | 948dc17401eb812276c36df79c695cf14d91ae06 | /docs/book/sppa-026.R | 2b280679e633d7ffe39e8e2309f392ed3b1515a9 | [
"CC0-1.0",
"CC-BY-4.0",
"CC-BY-3.0"
] | permissive | r-spatial/asdar-book.org | bc5d177add0128affb61b8c2ea84dd7ed47869c3 | 366af3b26c2083e585daf2faab6b261e3c622a9b | refs/heads/master | 2023-05-24T19:33:13.224914 | 2023-05-23T10:52:17 | 2023-05-23T10:52:17 | 153,242,511 | 50 | 35 | null | 2020-10-15T08:24:57 | 2018-10-16T07:35:34 | PHP | UTF-8 | R | false | false | 210 | r | sppa-026.R | plot(k, ylab="Intensity", main="")
points(x, rep(0, nx), pch=20)
for(i in 1:length(x))
lines(density(x[i], bw=bw, kernel="biweight"), lty=2)
legend(x=14, y=0.6, legend=c("Intensity", "Kernel"), lty=c(1,2))
|
06b83916f4c6c873ce14dfc65e4ff7b266d4e15c | c43ae097b3e0a7821be41f9c060716442f4e9382 | /assets/shinyapps/app5/ui.R | a6ab3bc451c669d330a942b2fa1f4ec9f2dc531f | [] | no_license | seankross/developing-data-products | 7d4eb15b173c70562cd70570826ea4b639d86d9f | 6a9ebde26547e6300145ee052a555d6c4ae46cd4 | refs/heads/master | 2021-01-13T13:14:49.323266 | 2017-03-29T18:01:19 | 2017-03-29T18:01:19 | 72,706,347 | 5 | 11 | null | 2017-03-29T17:53:17 | 2016-11-03T03:43:40 | R | UTF-8 | R | false | false | 317 | r | ui.R | # ---- app5-ui ----
library(shiny)
fluidPage(
titlePanel("Dynamic UI"),
sidebarLayout(
sidebarPanel(
selectInput("country", "Which Country do you live in?",
choices = c("USA", "Canada")),
uiOutput("region")
),
mainPanel(
textOutput("message")
)
)
) |
ed16923a408c39a08bc989e2d32063899c830d91 | 80dd264661fa2d22dd89e765a76ea245b0379433 | /man/mx.concave.cond.Rd | c2662b08c6e69df3d2f83ee2064ae2c3434b05ef | [] | no_license | skranz/RMaxima | 6bd3ae2bc63b237b3c20b9ad36af2f457c257e25 | 4abcf82e9e54971e44af32519d78d2fa316df13b | refs/heads/master | 2021-01-01T05:47:31.037235 | 2015-06-13T03:54:01 | 2015-06-13T03:54:22 | 12,140,704 | 5 | 3 | null | null | null | null | UTF-8 | R | false | false | 555 | rd | mx.concave.cond.Rd | \name{mx.concave.cond}
\alias{mx.concave.cond}
\title{Returns the conditions from the principal minor test
that the function f is jointly quasi-concave in in all variables in var}
\usage{
mx.concave.cond(f, var, with.descr = FALSE,
hessian = NULL)
}
\arguments{
\item{f}{a string that specifies the Maxima formula of
the function body}
\item{var}{a character vector specifying the variables}
}
\description{
Returns the conditions from the principal minor test that
the function f is jointly quasi-concave in in all
variables in var
}
|
f234106c0044aefde940875d5b06628c18c3160c | 878aa28161ed778da05902113a9a18fbb2738319 | /R stan run/Keogh Stan run v2.R | c870081e43fffaed8cd54b9b823218a5b625c7f2 | [] | no_license | klwilson23/Keogh | a6f9f3ccb24d10ce08d694eaa8cdecae8dd06dbf | e499c087c267d3e3a89c8edfe4b088248f6338ec | refs/heads/master | 2023-04-11T02:09:36.009864 | 2022-01-21T21:39:32 | 2022-01-21T21:39:32 | 152,797,463 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,780 | r | Keogh Stan run v2.R | source("some functions.R")
library(reshape2)
library(gridExtra)
library(ggpubr)
library(ggplot2)
library(MARSS)
library(broom)
library(wesanderson)
library(rstan)
library(loo)
library(rethinking)
rstan_options(auto_write = TRUE)
#options(mc.cores = parallel::detectCores(logical=FALSE))
#Sys.setenv(LOCAL_CPPFLAGS = '-march=native')
keogh <- readRDS("Keogh_newJuv_enviro.rds")
run_time <- readRDS("Data/steelhead_run.rds")
sh_annual <- readRDS("Data/steelhead_run_annual.rds")
sh_annual$time <- 1:length(sh_annual$year)
keogh_long <- subset(keogh,Year<=2015 & Year>=1976)
keogh_long <- subset(keogh_long,Species!="Chum")
keogh_long$Species <- factor(keogh_long$Species,levels=unique(keogh_long$Species))
keogh_long$marSurv <- keogh_long$Stock/keogh_long$juvCohort
keogh_long$logitSurv <- log(keogh_long$marSurv/(1-keogh_long$marSurv))
keogh_long$prod <- log(keogh_long$Recruits/keogh_long$Stock)
Xvars <- c("seals","npgo")
sdSurv_sh <- attr(scale(sh_annual[,Xvars],center=TRUE,scale=TRUE),"scaled:scale")
mnSurv_sh <- attr(scale(sh_annual[,Xvars],center=TRUE,scale=TRUE),"scaled:center")
enviro <- scale(sh_annual[,Xvars],center=TRUE,scale=TRUE)
enviro <- data.frame(Xvars=enviro)
colnames(enviro) <- Xvars
sh_trends <- model.matrix(~seals+npgo,data=enviro)
XXvars <- c("total_rain_run","mean_temp_run")
sdSurv_run <- attr(scale(sh_annual[,XXvars],center=TRUE,scale=TRUE),"scaled:scale")
mnSurv_run <- attr(scale(sh_annual[,XXvars],center=TRUE,scale=TRUE),"scaled:center")
enviro_run <- scale(sh_annual[,XXvars],center=TRUE,scale=TRUE)
enviro_run <- data.frame(enviro_run)
colnames(enviro_run) <- XXvars
run_trends <- model.matrix(~total_rain_run+mean_temp_run,data=enviro_run)
dat <- list("N"=nrow(sh_trends),
"K"=ncol(sh_trends),
"X"=sh_trends,
"lSurv"=sh_annual$logit_surv,
"J"=ncol(run_trends),
"XX"=run_trends,
"run_time"=sh_annual$run)
fit <- stan(file = "Stan code/Keogh Surv.stan", data=dat, iter=5000,chains=4,cores=4,control=list("adapt_delta"=0.8))
summary(fit, pars=c("beta_surv","beta_run","sigma_surv","sigma_run","phi_surv","phi_run"),probs=c(0.025,0.975))$summary
Xvars <- c("seals","npgo")
sdSurv_sh <- attr(scale(sh_annual[,Xvars],center=TRUE,scale=TRUE),"scaled:center")
mnSurv_sh <- attr(scale(sh_annual[,Xvars],center=TRUE,scale=TRUE),"scaled:center")
enviro <- scale(sh_annual[,Xvars],center=TRUE,scale=TRUE)
enviro <- data.frame(enviro)
sh_trends <- model.matrix(~-1+seals+npgo,data=enviro)
XXvars <- c("total_rain_run","mean_temp_run")
sdSurv_run <- attr(scale(sh_annual[,XXvars],center=TRUE,scale=TRUE),"scaled:center")
mnSurv_run <- attr(scale(sh_annual[,XXvars],center=TRUE,scale=TRUE),"scaled:center")
enviro_run <- scale(sh_annual[,XXvars],center=TRUE,scale=TRUE)
enviro_run <- data.frame(enviro_run)
run_trends <- model.matrix(~-1+total_rain_run+mean_temp_run,data=enviro_run)
datNew <- list("N"=nrow(sh_trends),
"K"=ncol(sh_trends),
"X"=sh_trends,
"lSurv"=sh_annual$logit_surv,
"J"=ncol(run_trends),
"XX"=run_trends,
"run_time"=sh_annual$run)
trackPars <- c("beta_surv","beta_run","bSurv","pS0","run0","obs_sigma_surv","obs_sigma_run","pro_sigma_surv","pro_sigma_run","pro_devS","pro_devR","surv_new","run_new","mnSurv","mnRun","log_lik1","log_lik2")
fit2 <- stan(file = "Stan code/Keogh Surv DLM.stan", data=datNew,pars=trackPars, iter=5000,chains=4,cores=1,control=list("adapt_delta"=0.9))
summary(fit2, pars=c("beta_surv","beta_run","bSurv","pS0","run0","obs_sigma_surv","obs_sigma_run","pro_sigma_surv","pro_sigma_run","pro_devS","pro_devR"),probs=c(0.1,0.9))$summary
mypost <- as.data.frame(fit)
surv_ppd <- extract(fit)$surv_new
mn_ppd <- colMeans(surv_ppd)
ci_ppd <- apply(surv_ppd,2,HPDI,prob=0.89)
plot(1/(1+exp(-dat$lSurv)),mn_ppd,pch=21,bg="grey50",ylim=range(ci_ppd),xlim=range(1/(1+exp(-dat$lSurv))), main = "Survival",xlab="Observed survival",ylab="Posterior predictive")
segments(x0=1/(1+exp(-dat$lSurv)),y0=ci_ppd[1,],y1=ci_ppd[2,],lwd=1)
abline(b=1,a=0,lwd=2,lty=1,col="red")
run_ppd <- extract(fit)$run_new
mn_ppd <- colMeans(run_ppd)
ci_ppd <- apply(run_ppd,2,HPDI,prob=0.89)
plot(dat$run_time,mn_ppd,pch=21,bg="grey50",ylim=range(ci_ppd),xlim=range(dat$run_time), main = "Run time",xlab="Observed run time",ylab="Posterior predictive")
segments(x0=dat$run_time,y0=ci_ppd[1,],y1=ci_ppd[2,],lwd=1)
abline(b=1,a=0,lwd=2,lty=1,col="red")
run_ci <- apply(extract(fit)$run_new,2,HPDI,prob=0.95)
plot(colMeans(run_ppd),ylim=range(run_ci),lwd=2,type="l")
polygon(x=c(1:nrow(dat$X),rev(1:nrow(dat$X))),y=c(run_ci[1,],rev(run_ci[2,])),col=adjustcolor("grey",0.5))
lines(colMeans(run_ppd),lwd=2)
points(dat$run_time,pch=21,bg="red")
surv_ci <- apply(extract(fit)$surv_new,2,HPDI,prob=0.95)
plot(colMeans(extract(fit)$surv_new),ylim=range(surv_ci),lwd=2,type="l")
polygon(x=c(1:nrow(dat$X),rev(1:nrow(dat$X))),y=c(surv_ci[1,],rev(surv_ci[2,])),col=adjustcolor("grey",0.5))
lines(colMeans(extract(fit)$surv_new),lwd=2)
points(sh_annual$Stock/sh_annual$juvCohort,pch=21,bg="red")
# seals:
surv_ppd <- extract(fit)$surv_new[,order(sh_annual$seals)]
surv_ci <- apply(surv_ppd,2,HPDI,prob=0.95)
plot(sort(sh_annual$seals),colMeans(surv_ppd),ylim=range(surv_ci),lwd=2,type="l")
polygon(x=c(sort(sh_annual$seals),rev(sort(sh_annual$seals))),y=c(surv_ci[1,],rev(surv_ci[2,])),col=adjustcolor("grey",0.5))
lines(sort(sh_annual$seals),colMeans(surv_ppd),lwd=2)
plot(sh_annual$seals,colMeans(extract(fit)$surv_new))
plot(sh_annual$total_rain_run,colMeans(extract(fit)$run_new))
loo_1 <- loo(extract_log_lik(fit,parameter_name = c("log_lik1","log_lik2")),cores=1)
loo_2 <- loo(extract_log_lik(fit2,parameter_name = c("log_lik1","log_lik2")),cores=1)
loo_compare(loo_1,loo_2)
|
2958bd216ad806fb8978840504fa31c698f72764 | 6cb50a95d62f1318f867d6b695bf3c31bb894d97 | /R/tsJumpStats.R | 14282c2e7abd1ff37fdb6b9555169ebec79d2c43 | [] | no_license | TankMermaid/seqtime | c15364705f265538e826004f8100189a6885ea4b | 960903d37e2ba95763ef58a1eaf78d94375fe829 | refs/heads/master | 2020-03-29T21:34:12.267387 | 2018-06-13T06:46:21 | 2018-06-13T06:46:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,551 | r | tsJumpStats.R | #' @title Compute statistics on jumps through community space
#'
#' @description Consecutive samples representing time points in an ordination plot can be interpreted
#' as vectors, which have a length and an angle. If ordinate is true, these lengths and angles
#' are computed from the selection dimensions of the ordination. If ordinate is false, dissimilarities between
#' consecutive time points are computed. If ordinate is true, the Euclidean distance to the centroid is computed as well.
#' If a perturbation object is provided and plot.type hist or box is true, the histogram or box plot of
#' the perturbed and non-perturbed jump lengths is plotted and the significance of the difference between
#' perturbed and non-perturbed jump lengths is assessed with a Wilcoxon test.
#' Note that jump lengths plots are a visualization of beta-diversity, which is however not computed between all
#' pair-wise but only between consecutive samples.
#'
#' @param x a community time series, with rows as taxa and columns as time points
#' @param time.given sample names provide time steps, only needed for plotting and plot.type jumps
#' @param time.unit unit of time, only needed for plotting and plot.type jumps
#' @param ordinate if TRUE, compute jumps in ordination plot
#' @param distance the distance (or dissimilarity) used to compute jumps directly or to compute the ordination
#' @param dimensions if ordinate is TRUE, the principal components considered
#' @param groups a vector with group assignments and as many entries as there are samples
#' @param min.jump.length minimum length of jumps to be considered for the statistic
#' @param max.jump.length maximum length of jumps to be considered for the statistic
#' @param plot if TRUE, a plot of the jumps is made
#' @param plot.type bar: plot jumps as bars in the order of occurrence; box: a box plot of jump lengths; hist: a histogram of jump lengths, power: power law of jumps of a certain length, msd: mean square displacement vs time lag
#' @param header text to be used as plot title instead of default text
#' @param subsample for plot.type hist or box: subsample larger jump vector randomly down to smaller one, so that there are as many perturbed as non-perturbed jumps
#' @param perturb a perturbation object, if provided, the plot is colored accordingly
#' @return jump lengths (i.e. dissimilarities of community composition at consecutive time points) and, in case ordinate is true, angles are returned
#' @examples
#' \dontrun{
#' data(david_stoolA_otus)
#' data(david_stoolA_metadata)
#' rarefaction=rarefyFilter(david_stoolA_otus,min = 10000)
#' rarefiedA=rarefaction$rar
#' days=david_stoolA_metadata[1,rarefaction$colindices]
#' interpA=interpolate(rarefiedA,time.vector=days,interval=1,method="stineman")
#' interpA[interpA<0]=0
#' perturbA=perturbation(times=c(80,105), durations=c(5,9))
#' res=tsJumpStats(interpA, plot=TRUE, perturb=perturbA, header="in stool A")
#' out=powerspec(res$lengths,plot=TRUE)
#' }
#' @export
tsJumpStats<-function(x, time.given=FALSE, time.unit="days", ordinate=TRUE, distance="bray", dimensions=c(1,2), groups=c(), min.jump.length=0, max.jump.length=Inf, plot=FALSE, plot.type="bar", header="", subsample=FALSE, perturb=NULL){
centroid=c()
if(ordinate==TRUE){
ordinate.res=vegan::capscale(data.frame(t(x))~1,distance=distance)
centroid=computeCentroid(x=x,ordinate.res = ordinate.res,dimensions=dimensions,groups=groups,min.jump.length = min.jump.length, max.jump.length = max.jump.length)
print("Location of centroid:")
print(centroid)
}
jumps=c()
angles=c()
distancesToOrigin=c() # centroid is used as the origin
if(time.given==TRUE){
time=as.numeric(colnames(x))
}else{
time=1:ncol(x)
}
proceed=TRUE
# loop over samples
for(sample.index in 1:(ncol(x)-1)){
# avoid computation of jumps and angles across groups
if(length(groups)>0){
group1=groups[sample.index]
group2=groups[sample.index+1]
if(group1!=group2){
proceed=FALSE
}
}
if(proceed){
# vector defined by the two consecutive points in multidimensional space
betweenvector=c()
# vector defined by the null point and the first point
firstvector=c()
# vector defined by the null point and the second point
secondvector=c()
# compute the euclidean distance between points in multi-dimensional space
if(ordinate==TRUE){
# loop over dimensions of PCoA
for(dim.index in 1:length(dimensions)){
# vector between origin and sample
firstvector=c(firstvector,ordinate.res$CA$u[sample.index,dimensions[dim.index]])
# vector between origin and second sample
secondvector=c(secondvector,ordinate.res$CA$u[(sample.index+1),dimensions[dim.index]])
# subtracting value of current dimension
pointdim=ordinate.res$CA$u[(sample.index+1),dimensions[dim.index]] - ordinate.res$CA$u[sample.index,dimensions[dim.index]]
# needed to compute length of vector between samples
betweenvector=c(betweenvector,pointdim)
}
# compute length of vector between two points using Pythagoras (Euclidean distance)
betweenvector=betweenvector^2
length=sqrt(sum(betweenvector))
if(length>=min.jump.length){
if(is.infinite(max.jump.length) || length<=max.jump.length){
jumps=c(jumps,length)
firstlength=sqrt(sum(firstvector^2))
secondlength=sqrt(sum(secondvector^2))
dotproduct=sum(firstvector*secondvector)
angle=acos(dotproduct/(firstlength*secondlength))
angles=c(angles,angle)
# compute the Euclidean distance between the centroid and the first sample
distToOrigin=vegdist(rbind(centroid,firstvector),method="euclidean")[1]
#print(dim(as.matrix(vegdist(rbind(centroid,firstvector),method="euclidean"))))
#print(distToOrigin)
distancesToOrigin=c(distancesToOrigin,distToOrigin)
# if this is the pre-last sample, also compute distance of the second sample, since the loop does not consider the last sample
if(sample.index==(ncol(x)-1)){
distToOrigin=vegdist(rbind(centroid,secondvector),method="euclidean")[1]
distancesToOrigin=c(distancesToOrigin,distToOrigin)
}
} # jump is short enough
} # jump is long enough to be considered
}else{
mat=rbind(x[,sample.index],x[,(sample.index+1)])
jump=vegdist(mat, distance=distance)[1]
if(jump>=min.jump.length){
if(is.infinite(max.jump.length) || length<=max.jump.length){
jumps=c(jumps,jump)
}
}
}
}
proceed=TRUE
}
if(ordinate==TRUE){
low.quantile=0.3
high.quantile=0.7
res.trans=getTransitionProbs(distancesToOrigin,low.quantile = low.quantile,high.quantile = high.quantile, groups=groups)
print(paste("Probability of transitions from high to high distance to centroid:",res.trans$high/length(distancesToOrigin)))
print(paste("Probability of transitions from low to low distance to centroid:",res.trans$low/length(distancesToOrigin)))
print(paste("Probability of transitions from low to high distance to centroid:",res.trans$lowhigh/length(distancesToOrigin)))
print(paste("Probability of transitions from high to low distance to centroid:",res.trans$highlow/length(distancesToOrigin)))
}
# compute MSD
half.time=round(length(time)/2)
lag.values=c()
msd.values=c()
diffusion.coeffi=c()
for(lag in 1:half.time){
if(ordinate==TRUE){
msd=computeMSD(x=x,ordinate.res = ordinate.res,lag=lag,groups=groups,min.jump.length = min.jump.length, max.jump.length = max.jump.length, dimensions=dimensions, distance=distance)
}else{
msd=computeMSD(x=x,ordinate.res = NULL,lag=lag,groups=groups,min.jump.length = min.jump.length, max.jump.length = max.jump.length, dimensions=dimensions, distance=distance)
}
msd.values=c(msd.values,msd$dist)
lag.values=c(lag.values,lag)
# msd(lag)=2*d*D*lag, where d is the number of dimensions (=2) and D is the diffusion coefficient
D=msd$dist/(4*lag)
diffusion.coeffi=c(diffusion.coeffi,D)
}
if(plot==TRUE){
# histogram or box plot
if((plot.type=="hist" || plot.type=="box")){
if(!is.null(perturb)){
perturb.indicator=getPerturbedIndices(1:ncol(x),perturb)
perturb.indices=which(perturb.indicator==TRUE)
normal.indices=which(perturb.indicator==FALSE)
jump.perturb=jumps[perturb.indices]
jump.normal=jumps[normal.indices]
if(subsample==TRUE){
if(length(jump.normal)>length(jump.perturb)){
jump.normal=sample(jump.normal)[1:length(jump.perturb)]
}else if(length(jump.perturb)>length(jump.normal)){
jump.perturb=sample(jump.perturb)[1:length(jump.normal)]
}
}
print(paste("Number of non-perturbed jumps:",length(jump.normal)))
print(paste("Minimum of non-perturbed jumps:",min(jump.normal,na.rm=TRUE)))
print(paste("Maximum of non-perturbed jumps:",max(jump.normal,na.rm=TRUE)))
print(paste("Standard deviation of non-perturbed jumps:",sd(jump.normal,na.rm=TRUE)))
print(paste("Number of perturbed jumps:",length(jump.perturb)))
print(paste("Minimum of perturbed jumps:",min(jump.perturb,na.rm=TRUE)))
print(paste("Maximum of perturbed jumps:",max(jump.perturb,na.rm=TRUE)))
print(paste("Standard deviation of perturbed jumps:",sd(jump.perturb,na.rm=TRUE)))
wilcox.out=wilcox.test(jump.normal,jump.perturb)
print(wilcox.out)
# limits
xmax=max(jump.perturb, na.rm=TRUE)
xmin=min(jump.perturb,na.rm=TRUE)
ymax=max(jump.normal,na.rm=TRUE)
ymin=min(jump.normal,na.rm=TRUE)
max=max(xmax,ymax)
min=min(ymin,xmin)
if(header==""){
title="Jump lengths in perturbed and non-peturbed periods"
}else{
title=header
}
title=paste(header,", Wilcoxon p-value=",round(wilcox.out$p.value,2),sep="")
if(plot.type=="box"){
# add missing values to have the same lengths
if(length(jump.normal)>length(jump.perturb)){
while(length(jump.normal)>length(jump.perturb)){
jump.perturb=c(jump.perturb,NA)
}
}else if(length(jump.normal)<length(jump.perturb)){
while(length(jump.normal)<length(jump.perturb)){
jump.normal=c(jump.normal,NA)
}
}
jump.mat=cbind(jump.normal,jump.perturb)
colnames(jump.mat)=c("Normal","Perturbed")
boxplot(jump.mat, main=title, ylim=c(0,max+0.05), ylab="Jump length")
for(i in 1:ncol(jump.mat)){
points(rep(i,length(jump.mat[,i])),jump.mat[,i])
}
}else{
col2=rgb(0,1,0,0.5)
col1=rgb(1,0,0,0.5)
out.h.normal=hist(jump.normal,breaks="FD",plot=FALSE)
out.h.perturb=hist(jump.perturb,breaks="FD",plot=FALSE)
xmaxD=max(out.h.perturb$density)
ymaxD=max(out.h.normal$density)
# check that the density sums to one (it can be greater than one at some points)
print(paste("Total density normal jump length:",sum(out.h.normal$density*diff(out.h.normal$breaks))))
print(paste("Total density perturbed jump length:",sum(out.h.perturb$density*diff(out.h.perturb$breaks))))
maxD=max(xmaxD,ymaxD)
max=max+0.05 # add a margin
maxD=maxD+2.5 # add a margin
hist(jump.perturb,breaks="FD",xlim=c(min,max), ylim=c(0,maxD), prob=TRUE,col=col1, border=col1,xlab="Jump lengths", main=title)
hist(jump.normal,breaks="FD",prob=TRUE,col=col2, border=col2,add=TRUE)
legend("topright",legend=c("Perturbed","Normal"), lty = rep(1,2), col = c(col1,col2), merge = TRUE, bg = "white", text.col="black")
}
}
else{
if(plot.type=="box"){
boxplot(jumps, col="green", main="Jump lengths distribution", ylab="Jump length")
}else{
hist(jumps,main="Histogram", xlab="Jump lengths")
}
}
# bar plot
}else if(plot.type=="bar"){
defaultColor="green"
perturbColor="red"
colors=rep(defaultColor,length(time))
if(!is.null(perturb)){
perturb.indicator=getPerturbedIndices(1:ncol(x),perturb)
perturb.indices=which(perturb.indicator==TRUE)
normal.indices=which(perturb.indicator==FALSE)
colors[perturb.indices]=perturbColor
}
colors=colors[2:(length(colors))]
#print(length(colors))
time=time[2:(length(time))]
#print(length(time))
# above 100 time points labels become unreadable
if(length(time)<100){
names(jumps)=time
}
if(header==""){
title="Jumps in community composition space"
}else{
title=header
}
par(las=2, cex=0.9)
barplot(jumps, col=colors, xlab=paste("Time",time.unit,sep=" in "), ylab="Dissimilarity", main=title)
}else if(plot.type=="power"){
# bin jumps
interval.num=round(sqrt(length(jumps)))
print(paste("Number of intervals:",interval.num))
bins=cut(jumps,breaks=interval.num,labels=FALSE)
instances=table(bins)
values=c()
for(index in 1:length(instances)){
indices=which(bins==index)
values=c(values,median(jumps[indices]))
}
values=log(values)
instances=log(instances)
reg.data=data.frame(values,instances)
linreg = lm(formula = instances~values)
slope=linreg$coefficients[2]
sum=summary(linreg)
pval=1-pf(sum$fstatistic[1], sum$fstatistic[2], sum$fstatistic[3])
print(paste("slope of power law:",slope))
print(paste("p-value of power law:",pval))
plot(values,instances,main=paste("Power law, bins=",interval.num,", slope=",round(slope,2),", p-value=",round(pval,2),sep=""), xlab="log(jump length)", ylab="log(jump number)")
abline(linreg,bty="n",col="red")
}else if(plot.type=="msd"){
# diffusion coefficient is not reported, since this is not a true diffusion process
# for Brownian motion, log(msd) varies linearly with log(lag)
plot(log(lag.values),log(msd.values),type="b",xlab="log(lag)",ylab="log(MSD)",main="Mean squared displacement")
}else{
stop("Plot type is not supported. Supported plot types are: bar, box, hist and power.")
}
}
res=list(jumps,angles,msd.values,lag.values)
names(res)=c("lengths","angles","msds","lags")
if(ordinate==TRUE){
res[["distori"]]=distancesToOrigin
res[["probtranslow"]]=res.trans$low
res[["probtranshigh"]]=res.trans$high
res[["probtranslowhigh"]]=res.trans$lowhigh
res[["probtranshighlow"]]=res.trans$highlow
}
res
}
# given a time series and a perturbation object, return a vector with FALSE for non-perturbed
# and true for perturbed samples
getPerturbedIndices<-function(time,perturb){
perturbCounter=1
durationCounter=1
perturbationOn=FALSE
indicator.timeseries=c()
for(timepoint in time){
applied=applyPerturbation(perturb=perturb,t=timepoint, perturbCounter=perturbCounter, durationCounter=durationCounter, perturbationOn=perturbationOn, ori.growthrates = c(), abundances=c())
durationCounter=applied$durationCounter
perturbCounter=applied$perturbCounter
perturbationOn=applied$perturbationOn
if(perturbationOn==TRUE){
indicator.timeseries=c(indicator.timeseries,TRUE)
}else{
indicator.timeseries=c(indicator.timeseries,FALSE)
}
}
return(indicator.timeseries)
}
# compute the transition probabilities for different bins of distances to the centroid
#
getTransitionProbs<-function(distori=c(),low.quantile=0.25,high.quantile=0.75, groups=c()){
t.def=c(low.quantile,high.quantile)
thresholds=quantile(distori,t.def)
print(paste("Threshold low distance to centroid for quantile",low.quantile,":",thresholds[1]))
print(paste("Threshold high distance to centroid for quantile",high.quantile,":",thresholds[2]))
indices.low=which(distori<thresholds[1]) # low quantile group
indices.high=which(distori>thresholds[2]) # high quantile group
print(paste("Number of small distances from origin:",length(indices.low)))
print(paste("Number of large distances from origin:",length(indices.high)))
transition.vec=c()
prevVal=NA
# in the general case, can draw a graph
lowToHigh=0
highToLow=0
lowToLow=0
highToHigh=0
if(1 %in% indices.low){
prevVal="low"
}else if(1 %in% indices.high){
prevVal="high"
}else{
prevVal="medium"
}
if(length(groups)>0){
prevGroup=groups[1]
}
proceed=TRUE
#print(prevVal)
for(i in 2:length(distori)){
# do not compare distance to origin across group boundaries
if(length(groups)>0){
group=groups[i]
if(prevGroup!=group){
proceed=FALSE
}
}
if(proceed){
val=NA
if(i %in% indices.low){
val="low"
}else if(i %in% indices.high){
val="high"
}else{
val="medium"
}
#print(val)
if(prevVal=="high" && val=="low"){
highToLow=highToLow+1
}else if(prevVal=="low" && val=="low"){
lowToLow=lowToLow+1
}else if(prevVal=="high" && val=="high"){
highToHigh=highToHigh+1
}else if(prevVal=="low" && val=="high"){
lowToHigh=lowToHigh+1
}
prevVal=val
}
proceed=TRUE
}
#print(paste("Number of transitions from high to high distance to centroid",highToHigh))
#print(paste("Number of transitions from low to low distance to centroid",lowToLow))
#print(paste("Number of transitions from low to high distance to centroid",lowToHigh))
#print(paste("Number of transitions from high to low distance to centroid",highToLow))
res=list(lowToLow,highToHigh,lowToHigh,highToLow)
names(res)=c("low","high","lowhigh","highlow")
return(res)
}
# Compute the mean squared displacement for a given lag.
# The mean square distance is the squared distance traveled for a given lag, averaged
# over all possible time steps.
# http://web.mit.edu/savin/Public/.Tutorial_v1.2/Introduction.html
computeMSD<-function(x,ordinate.res,lag=1,groups=c(),dimensions=c(1,2),min.jump.length=0, max.jump.length=Inf, distance="bray"){
proceed=TRUE
msd.x.values=c()
msd.y.values=c()
msd.dist.values=c()
for(sample.index in 1:(ncol(x)-lag)){
# avoid computation of jumps and angles across groups
if(length(groups)>0){
group1=groups[sample.index]
group2=groups[sample.index+lag]
if(group1!=group2){
proceed=FALSE
}
}
if(proceed){
if(is.null(ordinate.res)){
# average distance traveled over all particles
# here: assess Bray Curtis dissimilarity, which is more reliable as a distance measure than Euclidean distance in taxon count data
mat=rbind(x[,sample.index],x[,(sample.index+lag)])
jump=vegdist(mat, distance=distance)[1]
if(jump>=min.jump.length){
if(is.infinite(max.jump.length) || length<=max.jump.length){
msd.dist.values=c(msd.dist.values,jump)
}
}
}else{
# vector defined by the two consecutive points in multidimensional space
betweenvector=c()
# vector defined by the null point and the first point
firstvector=c()
# vector defined by the null point and the second point
secondvector=c()
# compute the euclidean distance between points in multi-dimensional space
# loop over dimensions of PCoA
for(dim.index in 1:length(dimensions)){
# vector between origin and sample
firstvector=c(firstvector,ordinate.res$CA$u[sample.index,dimensions[dim.index]])
# vector between origin and second sample
secondvector=c(secondvector,ordinate.res$CA$u[(sample.index+lag),dimensions[dim.index]])
# subtracting value of current dimension
pointdim=ordinate.res$CA$u[(sample.index+lag),dimensions[dim.index]] - ordinate.res$CA$u[sample.index,dimensions[dim.index]]
# vector between samples
betweenvector=c(betweenvector,pointdim)
}
# compute length of vector between two points using Pythagoras
betweenvector=betweenvector^2
length=sqrt(sum(betweenvector))
if(length>=min.jump.length){
if(is.infinite(max.jump.length) || length<=max.jump.length){
msd.x.values=c(msd.x.values,betweenvector[1])
msd.y.values=c(msd.x.values,betweenvector[2])
msd.dist.values=c(msd.dist.values,length)
}
} # jump is long enough to be considered
}
} # proceed
proceed=TRUE
} # end loop samples
msd=list(mean(msd.x.values),mean(msd.y.values),mean(msd.dist.values))
names(msd)=c("x","y","dist")
#print(paste("lag",lag))
#print(paste("msd x:",msd$x))
#print(paste("msd y:",msd$y))
return(msd)
}
# Compute the location of the centroid.
computeCentroid<-function(x, ordinate.res=NULL, dimensions=c(1,2), groups=c(), min.jump.length=0, max.jump.length=Inf){
proceed=TRUE
centroid=c(0,0)
for(sample.index in 1:(ncol(x)-1)){
# avoid computation of jumps and angles across groups
if(length(groups)>0){
group1=groups[sample.index]
group2=groups[sample.index+1]
if(group1!=group2){
proceed=FALSE
}
}
if(proceed){
if(is.null(ordinate.res)){
stop("Please provide an ordination object.")
}else{
# vector defined by the two consecutive points in multidimensional space
betweenvector=c()
# vector defined by the null point and the first point
firstvector=c()
# vector defined by the null point and the second point
secondvector=c()
# compute the euclidean distance between points in multi-dimensional space
# loop over dimensions of PCoA
for(dim.index in 1:length(dimensions)){
# vector between origin and sample
firstvector=c(firstvector,ordinate.res$CA$u[sample.index,dimensions[dim.index]])
# vector between origin and second sample
secondvector=c(secondvector,ordinate.res$CA$u[(sample.index+1),dimensions[dim.index]])
# subtracting value of current dimension
pointdim=ordinate.res$CA$u[(sample.index+1),dimensions[dim.index]] - ordinate.res$CA$u[sample.index,dimensions[dim.index]]
# vector between samples
betweenvector=c(betweenvector,pointdim)
}
# compute length of vector between two points using Pythagoras
betweenvector=betweenvector^2
length=sqrt(sum(betweenvector))
if(length>=min.jump.length){
if(is.infinite(max.jump.length) || length<=max.jump.length){
# centroid: sum across coordinates
for(dim.index in 1:length(firstvector)){
centroid[dim.index]=centroid[dim.index]+firstvector[dim.index]
# pre-last sample: add coordinates of the last sample
if(sample.index==(ncol(x)-1)){
centroid[dim.index]=centroid[dim.index]+secondvector[dim.index]
}
} # loop dimensions
}
} # jump is long enough to be considered
}
} # proceed
proceed=TRUE
} # end loop samples
# divide sums of coordinates across samples by number of samples
centroid=centroid/ncol(x)
return(centroid)
}
|
851cf3bf631edb70e5894c45435a0e9b8bc9b61e | c13ce1d62b066f4180b0a4b5c4db6a068eae079f | /R/nifti_2_hdr.R | 4b296922456a43242cefa5c25aa0903366d373bf | [] | no_license | muschellij2/cifti | 9aa5c0ef0edeafd1a2688166dfc99a8b0e9f661e | 84b7947310dd5657dd22b809ca838e876f03673b | refs/heads/master | 2020-12-24T11:53:10.701313 | 2020-08-10T16:06:53 | 2020-08-10T16:06:53 | 73,105,792 | 4 | 7 | null | 2020-07-20T12:35:01 | 2016-11-07T17:57:04 | R | UTF-8 | R | false | false | 4,947 | r | nifti_2_hdr.R | #' @title Read NIfTI-2 Header
#' @description Reads a NIfTI-2 header from a filename
#' @param fname Filename
#' @param verbose Print diagnostic messages
#' @param warn Should warnings be printed? Passed to \code{\link{options}}
#'
#' @return Object of class \code{nifti}
#' @export
#' @note The \code{unused_str} part of the header is not returned, but is an
#' empty string of 15 characters. This code was adapted by
#' the \code{oro.nifti} package
#' @importFrom oro.nifti nifti
nifti_2_hdr = function(fname, verbose = FALSE, warn = -1) {
## Open appropriate file
fid <- file(fname, "rb")
if (verbose) {
cat(" hdr =", fname, fill = TRUE)
}
## Warnings?
oldwarn <- getOption("warn")
options(warn = warn)
on.exit({
close(fid)
options(warn = oldwarn)
})
## Test for endian properties
endian <- .Platform$endian
sizeof.hdr <- readBin(fid, integer(), size = 4, endian = endian)
if (sizeof.hdr != 540) {
close(fid)
stop("Header size is not 540 - likely not a NIfTI-2 or CIFTI file!")
}
## Construct S4 object
#int32 is size 4
#int16 is size 2
nim <- oro.nifti::nifti()
nim@"sizeof_hdr" <- sizeof.hdr
nim@"magic" <- cifti_read_char(fid, n=8)
nim@"datatype" <- readBin(fid, integer(), size=2,
endian=endian)
nim@"bitpix" <- readBin(fid, integer(), size=2,
endian=endian)
# worked for int64
nim@"dim_" <- readBin(fid, integer(), n = 8,
size = 8, endian=endian)
nim@"intent_p1" <- readBin(fid, double(),
size=8, endian=endian)
nim@"intent_p2" <- readBin(fid, double(),
size=8, endian=endian)
nim@"intent_p3" <- readBin(fid, double(),
size=8, endian=endian)
nim@"pixdim" <- readBin(fid, double(), 8,
size=8, endian=endian)
nim@"vox_offset" <- readBin(fid, integer(),
size = 8, endian=endian)
if (nim@"vox_offset" < 544) {
warning("vox_offset seems off!")
}
if (verbose) {
cat(" vox_offset =", nim@"vox_offset", fill=TRUE)
}
nim@"scl_slope" <- readBin(fid, double(),
size=8, endian=endian)
nim@"scl_inter" <- readBin(fid, double(),
size=8, endian=endian)
nim@"cal_max" <- readBin(fid, double(),
size=8, endian=endian)
nim@"cal_min" <- readBin(fid, double(),
size=8, endian=endian)
nim@"slice_duration" <- readBin(fid, double(),
size=8, endian=endian)
nim@"toffset" <- readBin(fid, double(),
size=8, endian=endian)
nim@"slice_start" <- readBin(fid, integer(),
size=8, endian=endian)
nim@"slice_end" <- readBin(fid, integer(),
size=8, endian=endian)
nim@"descrip" <- cifti_read_char(fid,
n=80)
nim@"aux_file" <- cifti_read_char(fid,
n=24)
nim@"qform_code" <- readBin(fid, integer(), size=4, endian=endian)
nim@"sform_code" <- readBin(fid, integer(), size=4, endian=endian)
nim@"quatern_b" <- readBin(fid, double(), size=8, endian=endian)
nim@"quatern_c" <- readBin(fid, double(), size=8, endian=endian)
nim@"quatern_d" <- readBin(fid, double(), size=8, endian=endian)
nim@"qoffset_x" <- readBin(fid, double(), size=8, endian=endian)
nim@"qoffset_y" <- readBin(fid, double(), size=8, endian=endian)
nim@"qoffset_z" <- readBin(fid, double(), size=8, endian=endian)
nim@"srow_x" <- readBin(fid, double(), 4, size=8, endian=endian)
nim@"srow_y" <- readBin(fid, double(), 4, size=8, endian=endian)
nim@"srow_z" <- readBin(fid, double(), 4, size=8, endian=endian)
# nim@"slice_code" <- readBin(fid, integer(),
# size=4, signed=FALSE,
# endian=endian)
# nim@"xyzt_units" <- readBin(fid, integer(),
# size=4, signed=FALSE, endian=endian)
# nim@"intent_code" = readBin(fid, integer(),
# size=4, signed=FALSE, endian=endian)
nim@"slice_code" <- readBin(fid, integer(),
size = 4,
endian = endian)
nim@"xyzt_units" <- readBin(fid, integer(),
size = 4,
endian = endian)
nim@"intent_code" = readBin(fid, integer(),
size = 4,
endian = endian)
nim@"intent_name" <- cifti_read_char(fid,
n=16)
nim@"dim_info" <- cifti_read_char(fid,
n=1)
# txt <- readBin(fid, "raw", n)
unused_str = cifti_read_char(fid,
n=15)
nhdr = seek(fid)
stopifnot(nhdr == sizeof.hdr)
return(nim)
} |
4e51229fe0623c0f91f9ddab7c1a1c6353cfe157 | 90c59b8d40e17af30b065103aff8c039855881d3 | /leaflet/sample.R | 3e9a2738449f842a78e146d4272d53af5bc7dfbc | [] | no_license | xaltin/spacial_temporal | 3b587f7db47bbb1b1314c352ac562abe9a89fa60 | 561e4e34f2eabdb92635d97d1fc389529b4007f0 | refs/heads/master | 2021-01-19T00:47:00.974910 | 2015-03-10T13:03:04 | 2015-03-10T13:03:04 | 31,841,994 | 0 | 0 | null | null | null | null | GB18030 | R | false | false | 3,875 | r | sample.R | library(leaflet)
m = leaflet() %>% addTiles()
m
m = m %>% setView(-93.65, 42.0285, zoom = 17)
m
m %>% addPopups(-93.65, 42.0285, 'Here is the <b>Department of Statistics</b>, ISU')
# add some circles to a map
df = data.frame(Lat = 1:10, Long = rnorm(10))
leaflet(df) %>% addCircles()
# you can also explicitly use Lat and Long
leaflet(df) %>% addCircles(lat = ~ Lat, lng = ~ Long)
leaflet() %>% addCircles(data = df)
# or use df in addCircles() only
leaflet() %>% addCircles(data = df, lat = ~ Lat, lng = ~ Long)
library(sp)
Sr1 = Polygon(cbind(c(2, 4, 4, 1, 2), c(2, 3, 5, 4, 2)))
Sr2 = Polygon(cbind(c(5, 4, 2, 5), c(2, 3, 2, 2)))
Sr3 = Polygon(cbind(c(4, 4, 5, 10, 4), c(5, 3, 2, 5, 5)))
Sr4 = Polygon(cbind(c(5, 6, 6, 5, 5), c(4, 4, 3, 3, 4)), hole = TRUE)
Srs1 = Polygons(list(Sr1), "s1")
Srs2 = Polygons(list(Sr2), "s2")
Srs3 = Polygons(list(Sr4, Sr3), "s3/4")
SpP = SpatialPolygons(list(Srs1, Srs2, Srs3), 1:3)
leaflet(height = "300px") %>% addPolygons(data = SpP)
library(maps)
mapStates = map("state", fill = TRUE, plot = FALSE)
leaflet(data = mapStates) %>% addTiles() %>% addPolygons(fillColor = topo.colors(10, alpha = NULL), stroke = FALSE)
m = leaflet() %>% addTiles()
df = data.frame(
lat = rnorm(100),
lng = rnorm(100),
size = runif(100, 5, 20),
color = sample(colors(), 100)
)
m = leaflet(df) %>% addTiles()
m %>% addCircleMarkers(radius = ~size, color = ~color, fill = FALSE)
m %>% addCircleMarkers(radius = runif(100, 4, 10), color = c('red'))
leaflet() %>% addTiles() %>%
addMarkers(174.7690922, -36.8523071, icon = JS("L.icon({iconUrl: 'http://cran.rstudio.com/Rlogo.jpg',iconSize: [40, 40]})")) %>%
addPopups(174.7690922, -36.8523071, 'R was born here!')
leaflet() %>%
addTiles(
'http://server.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer/tile/{z}/{y}/{x}',
attribution = 'Tiles © Esri — Esri, DeLorme, NAVTEQ, TomTom, Intermap, iPC, USGS, FAO, NPS, NRCAN, GeoBase, Kadaster NL, Ordnance Survey, Esri Japan, METI, Esri China (Hong Kong), and the GIS User Community') %>%
setView(-93.65, 42.0285, zoom = 17)
set.seed(123)
m = leaflet() %>% addTiles()
rand_lng = function(n = 10) rnorm(n, -93.65, .01)
rand_lat = function(n = 10) rnorm(n, 42.0285, .01)
# circles (units in metres)
m %>% addCircles(rand_lng(50), rand_lat(50), radius = runif(50, 10, 200))
# circle markers (units in pixels)
m %>% addCircleMarkers(rand_lng(50), rand_lat(50), color = '#ff0000')
m %>% addCircleMarkers(rand_lng(100), rand_lat(100), radius = runif(100, 5, 15))
# rectangles
m %>% addRectangles(
rand_lng(), rand_lat(), rand_lng(), rand_lat(),
color = 'red', fill = FALSE, dashArray = '5,5', weight = 3
)
# polylines
m %>% addPolylines(rand_lng(50), rand_lat(50), fill = FALSE)
# polygons
m %>% addPolygons(
c(rand_lng(3), NA, rand_lng(4), NA, rand_lng(5)),
c(rand_lat(3), NA, rand_lat(4), NA, rand_lat(5)),
color = c('red', 'green', 'blue')
)
# 6 Other Layers
# ########GeoJSON 格式
# var MPoint = {
# "type": "MultiPoint",
# "coordinates": [ [100.0, 0.0], [101.0, 1.0] ]
# };
# ########在R中用list表示
# MPoint = list(
# type = 'MultiPoint',
# coordinates = rbind(c(100.0, 0.0), c(101.0, 1.0))
# )
m = leaflet() %>% addCircles(lat = 1:26, lng = 1:26, popup = LETTERS)
shapes = list(
list(
type = 'Feature',
properties = list(
popup = 'Here are some markers!'
),
geometry = list(
type = 'MultiPoint',
coordinates = cbind(10:1, 1:10)
)
),
list(
type = 'Feature',
properties = list(
style = list(color = 'red', fillColor = 'yellow'),
popup = 'Here is a polygon, or perhaps a flower...'
),
geometry = list(
type = 'Polygon',
coordinates = list(26 + 10 * t(sapply(seq(0, 2 * pi, length = 10), function(x) {
c(cos(x), sin(x))
})))
)
)
)
m %>% addGeoJSON(shapes)
|
9419ca19fb33d7f6ac84a3d705f3fa679185d312 | fd823404007a3616481ac53efb0822f55b8e4956 | /R/carb.R | d667a253a6c68991dbb1946469e634538a719405 | [] | no_license | jamesorr/seacarb | ca5dde55c5cf2cc61d5da579fa4e6b62bd1c73f2 | 6eaa18524501bc299bbacd901a71a7e6ff7c50be | refs/heads/master | 2021-01-18T05:15:59.351380 | 2014-09-09T07:38:59 | 2014-09-09T07:38:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,832 | r | carb.R | # Copyright (C) 2008 Jean-Pierre Gattuso and Heloise Lavigne and Aurelien Proye
# with a most valuable contribution of Bernard Gentili <gentili@obs-vlfr.fr>
# and valuable suggestions from Jean-Marie Epitalon <epitalon@lsce.saclay.cea.fr>
#
# This file is part of seacarb.
#
# Seacarb is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or any later version.
#
# Seacarb is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with seacarb; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
#
carb<-
function(flag, var1, var2, S=35, T=25, P=0, Pt=0, Sit=0, k1k2='x', kf='x', ks="d", pHscale="T", b="l10"){
RES <- data.frame()
n <- max(length(var1), length(var2), length(S), length(T), length(P), length(Pt), length(Sit), length(k1k2), length(kf), length(pHscale), length(ks), length(b))
if(length(flag)!=n){ flag <- rep(flag[1],n)}
if(length(var1)!=n){ var1 <- rep(var1[1],n)}
if(length(var2)!=n){ var2 <- rep(var2[1],n)}
if(length(S)!=n){ S <- rep(S[1],n)}
if(length(T)!=n){ T <- rep(T[1],n)}
if(length(P)!=n){ P <- rep(P[1],n)}
if(length(Pt)!=n){ Pt <- rep(Pt[1],n)}
if(length(Sit)!=n){ Sit <- rep(Sit[1],n)}
if(length(k1k2)!=n){ k1k2 <- rep(k1k2[1],n)}
if(length(kf)!=n){ kf <- rep(kf[1],n)}
if(length(ks)!=n){ ks <- rep(ks[1],n)}
if(length(pHscale)!=n){pHscale <- rep(pHscale[1],n)}
if(length(b)!=n){ b <- rep(b[1],n)}
df <- data.frame(flag, var1, var2, S, T, P, Pt, Sit, pHscale, b)
##BOUCLE
for(i in (1:nrow(df))) {
flag <- as.numeric(df[i,1])
var1 <- as.numeric(df[i,2])
var2 <- as.numeric(df[i,3])
S <- as.numeric(df[i,4])
T <- as.numeric(df[i,5])
P <- as.numeric(df[i,6])
Pt <- as.numeric(df[i,7])
Sit <- as.numeric(df[i,8])
pHscale <- as.character(df[i,9])
b <- as.character(df[i,10])
res <- rep(NA, 14)
if((is.na(var1)==FALSE)&(is.na(var2)==FALSE)){
#-------Constantes----------------
tk = 273.15; # [K] (for conversion [deg C] <-> [K])
# JME: moved following code block here, after reading imput file
TK = T + tk; # TK [K]; T[C]
#---- issues de equic----
Cl = S / 1.80655; # Cl = chlorinity; S = salinity (per mille)
cl3 = Cl^(1/3);
ION = 0.00147 + 0.03592 * Cl + 0.000068 * Cl * Cl; # ionic strength
iom0 = 19.924*S/(1000-1.005*S);
ST = 0.14/96.062/1.80655*S; # (mol/kg soln) total sulfate
bor = bor(S=S , b=b); # (mol/kg), DOE94 boron total
fluo = (7*(S/35))*1e-5 # (mol/kg), DOE94 fluoride total
#---------------------------------------------------------------------
#--------------------- calcul des K ----------------------------------
#---------------------------------------------------------------------
K1 <- K1(S=S, T=T, P=P, pHscale=pHscale, k1k2=k1k2[i])
K2 <- K2(S=S, T=T, P=P, pHscale=pHscale, k1k2=k1k2[i])
Kf <- Kf(S=S, T=T, P=P, pHscale=pHscale, kf=kf[i])
Ks <- Ks(S=S, T=T, P=P, ks=ks[i])
Kw <- Kw(S=S, T=T, P=P, pHscale=pHscale)
K0 <- K0(S=S, T=T, P=P)
Kb <- Kb(S=S, T=T, P=P, pHscale=pHscale)
K1p <- K1p(S=S, T=T, P=P, pHscale=pHscale)
K2p <- K2p(S=S, T=T, P=P, pHscale=pHscale)
K3p <- K3p(S=S, T=T, P=P, pHscale=pHscale)
Ksi <- Ksi(S=S, T=T, P=P, pHscale=pHscale)
Kspa <- Kspa(S=S, T=T, P=P)
Kspc <- Kspc(S=S, T=T, P=P)
rho <- rho(S=S,T=T,P=P)
#------------------------------------------------------------------#
#------------------------------------------------------------------#
# VARIABLES #
#------------------------------------------------------------------#
#------------------------------------------------------------------#
# flag = 1 pH-CO2 given
# flag = 2 CO2-HCO3 given
# flag = 3 CO2-CO3 given
# flag = 4 CO2-ALK given
# flag = 5 CO2-DIC given
# flag = 6 pH and HCO3 given
# flag = 7 pH and CO3 given
# flag = 8 pH and ALK given
# flag = 9 pH and DIC given
# flag = 10 HCO3 and CO3 given
# flag = 11 HCO3 and ALK given
# flag = 12 HCO3 and DIC given
# flag = 13 CO3 and ALK given
# flag = 14 CO3 and DIC given
# flag = 15 ALK and DIC given
# flag = 21 pCO2-pH given
# flag = 22 pCO2-HCO3 given
# flag = 23 pCO2-CO3 given
# flag = 24 pCO2-ALK given
# flag = 25 pCO2-DIC given
# ------------ case 1.) PH and CO2 given
if (flag==1)
{
PH <- var1
CO2 <- var2
h <- 10^(-PH)
fCO2 <- CO2/K0
HCO3 <- (K1*CO2)/h
CO3 <- (K2*HCO3)/h
DIC <- CO2 + HCO3 + CO3
}
# ------------ case 2.) CO2 and HCO3 given
if (flag==2)
{
CO2 <- var1
HCO3 <- var2
fCO2 <- CO2/K0
h <- K0*K1*fCO2/HCO3
CO3 <- K0*K1*K2*fCO2/(h*h)
DIC <- CO2 + HCO3 + CO3
PH <- -log10(h)
}
# ------------ case 3.) CO2 and CO3 given
if (flag==3)
{
CO2 <- var1
CO3 <- var2
fCO2 <- CO2/K0
h <- sqrt((K0*K1*K2*fCO2)/CO3)
HCO3 <- (K0*K1*fCO2)/h
DIC <- CO2 + HCO3 + CO3
PH <- -log10(h)
}
# ------------ case 4.) CO2 and ALK given
Ks <- Ks(S=S, T=T, P=P)
if (flag==4)
{
CO2 <- var1
ALK <- var2
fALK <- function(x)# K1=K1, K2=K2, CO2=CO2, bor=bor, Kb=Kb, Kw=Kw, Pt=Pt, K1p=K1p, K2p=K2p, K3p=K3p, Sit=Sit, Ksi=Ksi, NH3t=NH3t, KNH3=KNH3, H2St=H2St, KH2S=KH2S, ST=ST, Ks=Ks, fluo=fluo, Kf=Kf, ALK=ALK) {
# composants for ALK
# x is the H+concentration
{DIC <- CO2*(1+K1/x+K1*K2/(x*x))
hco3 <- DIC*x*K1/(x*x + K1*x + K1*K2)
co3 <- DIC*K1*K2/(x*x + K1*x + K1*K2)
boh4 <- bor/(1+x/Kb)
oh <- Kw/x
h3po4 <- Pt*x^3/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
hpo4 <- Pt*K1p*K2p*x/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
po4 <- Pt*K1p*K2p*K3p/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
siooh3 <- Sit/(1+x/Ksi)
## calculate Hfree and Htot
if(pHscale=="F"){hfree <- x ## if pHscale = free scale
htot <- 10^(-pHconv(flag=2, pH=(-log10(x)), S=S, T=T, P=P))}
if(pHscale=="T"){hfree <- 10^(-pHconv(flag=4, pH=(-log10(x)), S=S, T=T, P=P))
htot <- x}
if(pHscale=="SWS"){hfree <- 10^(-pHconv(flag=5, pH=(-log10(x)), S=S, T=T, P=P))
htot <- 10^(-pHconv(flag=1, pH=(-log10(x)), S=S, T=T, P=P))}
hso4 <- ST/(1+Ks/hfree)
hf <- fluo/(1+Kf/htot)
############
OUT <- hco3+2*co3+boh4+oh+hpo4+2*po4+siooh3-hfree-hso4-hf-h3po4-ALK
OUT}
h <- uniroot(fALK,c(10^(-9.5),10^(-3.5)), tol=1e-20)$root
DIC <- CO2*(1+K1/h+K1*K2/(h*h))
HCO3 <- (DIC*K1*h)/(h*h+K1*h+K1*K2)
CO3 <- (DIC*K1*K2)/(h*h+K1*h+K1*K2)
fCO2 <- CO2/K0
PH <- -log10(h)
}
# ------------ case 5.) CO2 and DIC given
if (flag==5)
{
CO2 <- var1
DIC <- var2
fCO2 <- CO2/K0
a <- K1*K2*CO2
b <- K1*CO2
c <- CO2 - DIC
D <- b*b - 4*a*c
X <- (sqrt(D)-b)/(2*a) # X = 1/h
h <- 2*K1*K2*CO2/(sqrt(K1*CO2*K1*CO2 - 4*K1*K2*CO2*(CO2 - DIC))-K1*CO2)
HCO3 <- K0*K1*fCO2/h
CO3 <- DIC - CO2 - HCO3
PH <- -log10(h)
}
# ------------ case 6.) PH and HCO3 given
if (flag==6)
{
PH <- var1
HCO3 <- var2
h <- 10^(-PH)
CO2 <- (HCO3*h)/K1
CO3 <- K2*HCO3/h
DIC <- CO2 + HCO3 + CO3
fCO2 <- CO2/K0
}
# ------------ case 7.) PH and CO3 given
if (flag==7)
{
PH <- var1
CO3 <- var2
h <- 10^(-PH)
HCO3 <- CO3*h/K2
CO2 <- HCO3*h/K1
fCO2 <- CO2/K0
DIC <- CO2 + HCO3 + CO3
}
# ------------ case 8.) PH and ALK given
if (flag==8)
{
PH <- var1
ALK <- var2
h <- 10^(-PH)
fALK <- function(x) #h=h, K1=K1, K2=K2, x, bor=bor, Kb=Kb, Kw=Kw, Pt=Pt, K1p=K1p, K2p=K2p, K3p=K3p, Sit=Sit, Ksi=Ksi, NH3t=NH3t, KNH3=KNH3, H2St=H2St, KH2S=KH2S, ST=ST, Ks=Ks, fluo=fluo, Kf=Kf, ALK=ALK) {
# composants for ALK
{hco3 <- x*h*K1/(h*h + K1*h + K1*K2)
co3 <- x*K1*K2/(h*h + K1*h + K1*K2)
boh4 <- bor/(1+h/Kb)
oh <- Kw/h
h3po4 <- Pt*h^3/(h^3+K1p*h^2+K1p*K2p*h+K1p*K2p*K3p)
hpo4 <- Pt*K1p*K2p*h/(h^3+K1p*h^2+K1p*K2p*h+K1p*K2p*K3p)
po4 <- Pt*K1p*K2p*K3p/(h^3+K1p*h^2+K1p*K2p*h+K1p*K2p*K3p)
siooh3 <- Sit/(1+h/Ksi)
## calculate Hfree anf Htot
if(pHscale=="F"){hfree <- h ## if pHscale = free scale
htot <- 10^(-pHconv(flag=2, pH=(-log10(h)), S=S, T=T, P=P))}
if(pHscale=="T"){hfree <- 10^(-pHconv(flag=4, pH=(-log10(h)), S=S, T=T, P=P))
htot <- h}
if(pHscale=="SWS"){hfree <- 10^(-pHconv(flag=5, pH=(-log10(h)), S=S, T=T, P=P))
htot <- 10^(-pHconv(flag=1, pH=(-log10(h)), S=S, T=T, P=P))}
hso4 <- ST/(1+Ks/hfree)
hf <- fluo/(1+Kf/htot)
############
OUT <- hco3+2*co3+boh4+oh+hpo4+2*po4+siooh3-hfree-hso4-hf-h3po4-ALK
OUT}
DIC <- uniroot(fALK,c(5e-4,0.8), tol=1e-20)$root
CO2 <- DIC/(1+K1/h+K1*K2/(h^2))
HCO3 <- CO2*K1/h
CO3 <- HCO3*K2/h
fCO2 <- CO2/K0
}
# ------------ case 9.) PH and DIC given
if (flag==9)
{
PH <- var1
DIC <- var2
h <- 10^(-PH)
HCO3 <- (DIC*K1*h)/(h*h+K1*h+K1*K2)
CO3 <- (DIC*K1*K2)/(h*h+K1*h+K1*K2)
CO2 <- h*HCO3/K1
fCO2 <- CO2/K0
}
# ------------ case 10.) HCO3 and CO3 given
if (flag==10)
{
HCO3 <- var1
CO3 <- var2
h <- K2*HCO3/CO3
CO2 <- h*HCO3/K1
DIC <- CO2 + HCO3 + CO3
fCO2 <- CO2/K0
PH <- -log10(h)
}
# ------------ case 11.) HCO3 and ALK given
if (flag==11)
{
HCO3 <- var1
ALK <- var2
fALK <- function(x)# K1=K1, K2=K2, HCO3=HCO3, bor=bor, Kb=Kb, Kw=Kw, Pt=Pt, K1p=K1p, K2p=K2p, K3p=K3p, Sit=Sit, Ksi=Ksi, NH3t=NH3t, KNH3=KNH3, H2St=H2St, KH2S=KH2S, ST=ST, Ks=Ks, fluo=fluo, Kf=Kf, ALK=ALK) {
# composants for ALK
{DIC <- HCO3*(x^2+K1*x+K1*K2)/(K1*x)
hco3 <- HCO3
co3 <- DIC*K1*K2/(x*x + K1*x + K1*K2)
boh4 <- bor/(1+x/Kb)
oh <- Kw/x
h3po4 <- Pt*x^3/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
hpo4 <- Pt*K1p*K2p*x/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
po4 <- Pt*K1p*K2p*K3p/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
siooh3 <- Sit/(1+x/Ksi)
## calculate Hfree anf Htot
if(pHscale=="F"){hfree <- x ## if pHscale = free scale
htot <- 10^(-pHconv(flag=2, pH=(-log10(x)), S=S, T=T, P=P))}
if(pHscale=="T"){hfree <- 10^(-pHconv(flag=4, pH=(-log10(x)), S=S, T=T, P=P))
htot <- x}
if(pHscale=="SWS"){hfree <- 10^(-pHconv(flag=5, pH=(-log10(x)), S=S, T=T, P=P))
htot <- 10^(-pHconv(flag=1, pH=(-log10(x)), S=S, T=T, P=P))}
hso4 <- ST/(1+Ks/hfree)
hf <- fluo/(1+Kf/htot)
############
OUT <- hco3+2*co3+boh4+oh+hpo4+2*po4+siooh3-hfree-hso4-hf-h3po4-ALK
OUT}
h <- uniroot(fALK,c(10^(-9.5),10^(-3)),tol=1e-20)$root
CO2 <- h*HCO3/K1
CO3 <- K2*HCO3/h
DIC <- CO2 + HCO3 + CO3
PH <- -log10(h)
fCO2 <- CO2/K0
}
# ------------ case 12.) HCO3 and DIC given
if (flag==12)
{
HCO3 <- var1
DIC <- var2
a <- HCO3
b <- K1*(HCO3-DIC)
c <- K1*K2*HCO3
D <- b*b - 4*a*c
h <- (-b-sqrt(D))/(2*a)
CO2 <- h*HCO3/K1
CO3 <- K2*HCO3/h
fCO2 <- CO2/K0
PH <- -log10(h)
}
# ------------ case 13.) CO3 and ALK given
if (flag==13)
{
CO3 <- var1
ALK <- var2
fALK <- function(x)# K1=K1, K2=K2, HCO3=HCO3, bor=bor, Kb=Kb, Kw=Kw, Pt=Pt, K1p=K1p, K2p=K2p, K3p=K3p, Sit=Sit, Ksi=Ksi, NH3t=NH3t, KNH3=KNH3, H2St=H2St, KH2S=KH2S, ST=ST, Ks=Ks, fluo=fluo, Kf=Kf, ALK=ALK) {
# composants for ALK
{DIC <- CO3*(x^2+K1*x+K1*K2)/(K1*K2)
hco3 <- DIC*K1*x/(x*x + K1*x + K1*K2)
co3 <- CO3
boh4 <- bor/(1+x/Kb)
oh <- Kw/x
h3po4 <- Pt*x^3/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
hpo4 <- Pt*K1p*K2p*x/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
po4 <- Pt*K1p*K2p*K3p/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
siooh3 <- Sit/(1+x/Ksi)
## calculate Hfree anf Htot
if(pHscale=="F"){hfree <- x ## if pHscale = free scale
htot <- 10^(-pHconv(flag=2, pH=(-log10(x)), S=S, T=T, P=P))}
if(pHscale=="T"){hfree <- 10^(-pHconv(flag=4, pH=(-log10(x)), S=S, T=T, P=P))
htot <- x}
if(pHscale=="SWS"){hfree <- 10^(-pHconv(flag=5, pH=(-log10(x)), S=S, T=T, P=P))
htot <- 10^(-pHconv(flag=1, pH=(-log10(x)), S=S, T=T, P=P))}
hso4 <- ST/(1+Ks/hfree)
hf <- fluo/(1+Kf/htot)
############
OUT <- hco3+2*co3+boh4+oh+hpo4+2*po4+siooh3-hfree-hso4-hf-h3po4-ALK
OUT}
h <- uniroot(fALK,c(10^(-9.5),10^(-3.5)),tol=1e-20)$root
HCO3 <- h*CO3/K2
CO2 <- h*HCO3/K1
fCO2 <- CO2/K0
DIC <- HCO3+CO2+CO3
PH <- -log10(h)
}
# ------------ case 14.) CO3 and DIC given
if (flag==14)
{
CO3 <- var1
DIC <- var2
h <- (-K1*CO3 + sqrt(((K1*CO3)^2)-4*CO3*K1*K2*(CO3-DIC)))/(2*CO3)
HCO3 <- h*CO3/K2
CO2 <- h*HCO3/K1
fCO2 <- CO2/K0
PH <- -log10(h)
}
# ------------ case 15.) ALK and DIC given
if (flag==15)
{
ALK <- var1
DIC <- var2
fALK <- function(x) # K1=K1, K2=K2, DIC=DIC, bor=bor, Kb=Kb, Kw=Kw, Pt=Pt, K1p=K1p, K2p=K2p, K3p=K3p, Sit=Sit, Ksi=Ksi, NH3t=NH3t, KNH3=KNH3, H2St=H2St, KH2S=KH2S, ST=ST, Ks=Ks, fluo=fluo, Kf=Kf, ALK=ALK) {
# composants for ALK
{hco3 <- DIC*x*K1/(x*x + K1*x + K1*K2)
co3 <- DIC*K1*K2/(x*x + K1*x + K1*K2)
boh4 <- bor/(1+x/Kb)
oh <- Kw/x
h3po4 <- Pt*x^3/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
hpo4 <- Pt*K1p*K2p*x/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
po4 <- Pt*K1p*K2p*K3p/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
siooh3 <- Sit/(1+x/Ksi)
## calculate Hfree and Htot
if(pHscale=="F"){hfree <- x ## if pHscale = free scale
htot <- 10^(-pHconv(flag=2, pH=(-log10(x)), S=S, T=T, P=P))}
if(pHscale=="T"){hfree <- 10^(-pHconv(flag=4, pH=(-log10(x)), S=S, T=T, P=P))
htot <- x}
if(pHscale=="SWS"){hfree <- 10^(-pHconv(flag=5, pH=(-log10(x)), S=S, T=T, P=P))
htot <- 10^(-pHconv(flag=1, pH=(-log10(x)), S=S, T=T, P=P))}
hso4 <- ST/(1+Ks/hfree)
hf <- fluo/(1+Kf/htot)
############
OUT <- hco3+2*co3+boh4+oh+hpo4+2*po4+siooh3-hfree-hso4-hf-h3po4-ALK
OUT}
h <- uniroot(fALK,c(1e-10,10^(-3.5)),tol=1e-30)$root
HCO3 <- (DIC*K1*h)/(h*h+K1*h+K1*K2)
CO3 <- (DIC*K1*K2)/(h*h+K1*h+K1*K2)
CO2 <- h*HCO3/K1
fCO2 <- CO2/K0
PH <- -log10(h)
}
# ------------ calculation of pCO2 for cases 1 to 15
# JME: corrected fugacity calculation
# here P = Patm = 1 bar
if ((flag>=1)&(flag<=15))
{
B=(-1636.75+12.0408*TK-0.0327957*(TK*TK)+0.0000316528*(TK*TK*TK))*1e-6;
pCO2= fCO2*(1/exp((1*100000)*(B+2*(57.7-0.118*TK)*1e-6)/(8.314*TK)))
}
# ------------ calculation of fCO2 for cases 21 to 25
# JME: corrected fugacity calculation
# here P = Patm = 1 bar
if ((flag>=21)&(flag<=25))
{
pCO2 <- var1*1e-6
B=(-1636.75+12.0408*TK-0.0327957*(TK*TK)+0.0000316528*(TK*TK*TK))*1e-6;
fCO2= pCO2*(exp((1*100000)*(B+2*(57.7-0.118*TK)*1e-6)/(8.314*TK)))
}
# ------------ case 21.) PH and pCO2 given
if (flag==21)
{
PH <- var2
h <- 10^(-PH)
CO2 <- K0*fCO2
HCO3 <- K1*CO2/h
CO3 <- K2*HCO3/h
DIC <- CO2 + HCO3 + CO3
}
# ------------ case 22.) HCO3 and pCO2 given
if (flag==22)
{
HCO3 <- var2
CO2 <- fCO2*K0
h <- CO2*K1/HCO3
CO3 <- HCO3*K2/h
DIC <- CO2 + HCO3 + CO3
PH <- -log10(h)
}
# ------------ case 23.) CO3 and pCO2 given
if (flag==23)
{
CO3 <- var2
h <- sqrt(K0*K1*K2*fCO2/CO3)
HCO3 <- h*CO3/K2
CO2 <- h*HCO3/K1
DIC <- CO2 + HCO3 + CO3
PH <- -log10(h)
}
# ------------ case 24.) ALK and pCO2 given
if (flag==24)
{
ALK <- var2
CO2 <- fCO2*K0
fALK <- function(x)# K1=K1, K2=K2, CO2=CO2, bor=bor, Kb=Kb, Kw=Kw, Pt=Pt, K1p=K1p, K2p=K2p, K3p=K3p, Sit=Sit, Ksi=Ksi, NH3t=NH3t, KNH3=KNH3, H2St=H2St, KH2S=KH2S, ST=ST, Ks=Ks, fluo=fluo, Kf=Kf, ALK=ALK) {
# composants for ALK
{DIC <- CO2*(1+K1/x+K1*K2/(x*x))
hco3 <- DIC*x*K1/(x*x + K1*x + K1*K2)
co3 <- DIC*K1*K2/(x*x + K1*x + K1*K2)
boh4 <- bor/(1+x/Kb)
oh <- Kw/x
h3po4 <- Pt*x^3/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
hpo4 <- Pt*K1p*K2p*x/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
po4 <- Pt*K1p*K2p*K3p/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
siooh3 <- Sit/(1+x/Ksi)
## calculate Hfree and Htot
if(pHscale=="F"){hfree <- x ## if pHscale = free scale
htot <- 10^(-pHconv(flag=2, pH=(-log10(x)), S=S, T=T, P=P))}
if(pHscale=="T"){hfree <- 10^(-pHconv(flag=4, pH=(-log10(x)), S=S, T=T, P=P))
htot <- x}
if(pHscale=="SWS"){hfree <- 10^(-pHconv(flag=5, pH=(-log10(x)), S=S, T=T, P=P))
htot <- 10^(-pHconv(flag=1, pH=(-log10(x)), S=S, T=T, P=P))}
hso4 <- ST/(1+Ks/hfree)
hf <- fluo/(1+Kf/htot)
############
OUT <- hco3+2*co3+boh4+oh+hpo4+2*po4+siooh3-hfree-hso4-hf-h3po4-ALK
OUT}
h <- uniroot(fALK,c(1e-10,10^(-3.5)),tol=1e-20)$root
HCO3 <- K1*CO2/h
CO3 <- K2*HCO3/h
PH <- -log10(h)
DIC <- CO2 + HCO3 + CO3
}
# ------------ case 25.) DIC and pCO2 given
if (flag==25)
{
DIC <- var2
CO2 <- K0*fCO2
K <- K1/K2
HCO3 <- (1/2)*(-K*K0*fCO2+sqrt((K*K0*fCO2)^2 - 4*(K*K0*fCO2)*(K0*fCO2-DIC)))
CO3 <- DIC - CO2 - HCO3
h <- K1*CO2/HCO3
PH <- -log10(h)
}
# ------------ CALCULATION OF ALK in cases
Ks <- Ks(S=S, T=T, P=P)
cases <- c(1, 2, 3, 5, 6, 7, 9, 10, 12, 14, 21, 22, 23, 24, 25)
if (flag %in% cases){
x <- h
hco3 <- DIC*x*K1/(x*x + K1*x + K1*K2)
co3 <- DIC*K1*K2/(x*x + K1*x + K1*K2)
boh4 <- bor/(1+x/Kb)
oh <- Kw/x
h3po4 <- Pt*(x^3)/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
hpo4 <- Pt*K1p*K2p*x/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
po4 <- Pt*K1p*K2p*K3p/(x^3+K1p*x^2+K1p*K2p*x+K1p*K2p*K3p)
siooh3 <- Sit/(1+x/Ksi)
if(pHscale=="F"){hfree <- x ## if pHscale = free scale
htot <- 10^(-pHconv(flag=2, pH=(-log10(x)), S=S, T=T, P=P))}
if(pHscale=="T"){hfree <- 10^(-pHconv(flag=4, pH=(-log10(x)), S=S, T=T, P=P))
htot <- x}
if(pHscale=="SWS"){hfree <- 10^(-pHconv(flag=5, pH=(-log10(x)), S=S, T=T, P=P))
htot <- 10^(-pHconv(flag=1, pH=(-log10(x)), S=S, T=T, P=P))}
# h is the concentration in H+ at the free scale
hso4 <- ST/(1+Ks/hfree)
hf <- fluo/(1+Kf/htot)
ALK <- hco3+2*co3+boh4+oh+hpo4+2*po4+siooh3-hfree-hso4-hf-h3po4
}
##########################################################
# CALCULATION OF ARAGONITE AND CALCITE SATURATION STATE #
##########################################################
Oa <- ((0.01028*(S/35))*CO3)/Kspa
Oc <- ((0.01028*(S/35))*CO3)/Kspc
#PCO2 and fCO2 converted in microatmosphere
pCO2 <- pCO2*1e6
fCO2 <- fCO2*1e6
res <- data.frame(flag,S,T,P,PH,CO2,pCO2,fCO2,HCO3,CO3,DIC,ALK,Oa,Oc)
}
RES<- rbind(RES, res)
}
names(RES) <- c("flag", "S", "T", "P", "pH", "CO2", "pCO2", "fCO2", "HCO3", "CO3", "DIC", "ALK", "OmegaAragonite", "OmegaCalcite")
return(RES)
}
|
17e66b6b58917e493b448effab8ebeed58b26593 | 5483f96944be1d5c164f1478337bbf0ef2040cb9 | /R/adj_file_nos.R | cb5ece74eea46a909259b3a4f0b977c4bcaed8bf | [] | no_license | cran/ptspotter | da898cc45d0d2d621ed543ac41a78a179959c645 | b87d6088ba511e9516773c1c28e9dd1a3c316a23 | refs/heads/master | 2023-08-30T23:40:51.686523 | 2023-08-13T14:00:02 | 2023-08-13T15:30:43 | 360,262,383 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,901 | r | adj_file_nos.R | #' Adjust file numbers.
#'
#' This function is used to increment / decrease sequential scripts within the
#' specified directory, allowing efficient adjustment of script sequence for
#' additional or removed files.
#'
#' @param target Required. The number in the sequential scripts to begin the
#' adjustment. Use single digits only. The adjustment will affect script with
#' that leading digit and greater.
#' @param directory The directory holding the sequential
#' scripts.
#' @param action Defaults to "up". Whether to adjust file numbers up or down.
#'
#' @param step Defaults to 1. The step by which to increment or decrement the
#' file numbering.
#'
#' @return Renumbers filenames in the specified directory, according to the
#' specified action. Only affects the target file and above.
#'
#'@importFrom stringr str_count str_extract str_remove
#'
#' @examples
#' \dontshow{.old_wd <- setwd(tempdir())}
#'
#' seq_file_ops(n = 10, target_dir = "munge")
#'
#' # Increase files numbered 6 and above by 1
#' adj_file_nos(target = 6, directory = "munge")
#'
#' # Increase above target files by a further 2
#' adj_file_nos(target = 6, directory = "munge", step = 2)
#'
#' # Use step = "down" to restore original sequence
#' adj_file_nos(target = 6, directory = "munge", action = "down", step = 3)
#'
#' # writing books or websites:
#' seq_file_ops(n = 5, target_dir = "images", filetype = "png")
#' # adjust by decimals
#' adj_file_nos(target = 1, directory = "images", step = 0.1)
#'
#' # tidying up environment
#' unlink(c("munge", "images"), recursive = TRUE)
#'
#' \dontshow{setwd(.old_wd)}
#'
#' @export
adj_file_nos <- function(target, directory = NULL, action = "up", step = 1) {
# passing vectors to target currently dangerous. Future compatibility.
if(length(target) > 1) {
stop("Please use single digits for `target` only.")
}
# list all files in specified directory
files_found <- list.files(directory)
# filter out anything that doesn't contain digits at start of string
num_filenms <- files_found[grepl("^[0-9].", files_found)]
# if action == up, reverse filenames vec to ensure chain rewrite doesn't occur
if(action == "up"){
num_filenms <- rev(num_filenms)
}
# extract numbering
nums_only <- as.numeric(str_extract(num_filenms, "^[0-9]."))
# remove all numbers from listed filenames vector
alpha_only <- str_remove(num_filenms, "^[0-9].")
# reassign the numbers ready for increasing / decreasing
nums_new <- nums_only
# if action == up (the default), increment numbers from target and larger up
# by step
if (action == "up") {
# any file numbers greater than the specified target, increase by step
nums_new[nums_new >= target] <- nums_new[nums_new >= target] + step
# if action == down, decrease numbers from target and larger down by step
} else if (action == "down") {
# any file numbers greater than specified target, decrease by step
nums_new[nums_new >= target] <- nums_new[nums_new >= target] - step
}
# wherever the digits are single, add a 0 in front
nums_new[str_count(nums_new) == 1] <- paste0(
"0", nums_new[str_count(nums_new) == 1]
)
# paste together new digits and filenames
adj_filenames <- paste(directory, paste0(nums_new, alpha_only),
sep = "/")
# paste directory name to complete write path
old_nums <- paste(directory, num_filenms, sep = "/")
# write out only adjusted filenames
file.rename(from = old_nums, to = adj_filenames)
# If action is up, need to reverse old_nums & adj_filenames for print
if(action == "up"){
old_nums <- rev(old_nums)
adj_filenames <- rev(adj_filenames)
}
# message confirmation msg to console
message(paste(
length(old_nums), "Filenames adjusted.\n",
"From:", paste(basename(old_nums), collapse = ", "), "\n",
"To:",
paste(basename(adj_filenames), collapse = ", ")
))
}
|
d37cae39ba19e54e33e1f35f3b5ec833a5e11675 | e9aec749b5e84915816eaf435c90d740945c297e | /Projects/URLLinkFinder/man/traverseLinks.Rd | 69f7157a61909db8488bbf47feadd98e26978299 | [] | no_license | omegahat/XDynDocs | 399fe9eb6de25ec1423cc4275d0971f7945b9caf | a7c850f4e893640983bd2c68cdcb9af5d3bc99d7 | refs/heads/master | 2021-01-19T05:01:47.981346 | 2012-04-12T15:46:05 | 2012-04-12T15:46:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,700 | rd | traverseLinks.Rd | \name{traverseLinks}
\alias{traverseLinks}
\title{Collect hyperlinks in HTML pages}
\description{
This function is used to collect a catlog of
links within HTML pages and to recursively process these
links to find their links and so on.
}
\usage{
traverseLinks(urls, max = -1, filter = dropLink()$dropLink, processURL = processURLLinks)
}
\arguments{
\item{urls}{a list or character vector giving the URIs from which to
start the traversal.}
\item{max}{the total number of URIs to process.
This is used to limit the number of URIs processed when
debugging or handling very large sites.}
\item{filter}{a functiont that can be used to process the
URI names that are returned from processURL.
This can be used, for example, to discard
links not within a particular domain or
within a particular sub-directory not of interest.
}
\item{processURL}{a function that is called to parse the specified
URI. This allows us to easily change how we process a document,
e.g. following images, sound files, etc.
}
}
\details{
This is a dynamically iterative method that maintains a queue
of pages that need to be processed. As we encounter
links, we resolve their names and store these for processing
later.
}
\value{
A list with two elements: links and errors.
\item{links}{
The names of the elements of the list are the fully-qualified
URIs that were traversed.
Each element is a character vector corresponding to a URI
and the vector contains the
forward links forthat URI. In other words, it is
a character vector containing the
fully-qualified names of the URIs
found in the \code{<a href=...>} elements in the
HTML.
Currently, we don't include images, sound, etc.
that are "included" in the original URI, but limit
ourselves to the link structure.
Also, we don't handle JavaScript.
}
\item{errors}{
a character vector giving the names of the URIs which were
not parsed because of errors.
}
}
\author{Duncan Temple Lang <duncan@wald.ucdavis.edu>}
\seealso{
\code{\link{processURLLinks}}
\code{\link[XML]{htmlTreeParse}}
\code{\link{dropLink}}
\code{\link{URI}}
\code{\link{mergeURI}}
}
\examples{
traverseLinks("http://www.stat.berkeley.edu/users/nolan/test0.html")
traverseLinks("http://www.geocities.com/vi0linbear")
\dontrun{
d = dropLink()
gg = traverseLinks("http://www.ggobi.org/index.html", filter = d$dropLink, max = 50)
numLinks = sapply(gg, function(x) length(unique(x)))
hist(numLinks)
summary(numLinks)
}
\dontrun{
# Fail
traverseLinks("http://www.omegahat.org", max = 10)
traverseLinks("http://www.omegahat.org/index.html", max = 10)
}
}
\keyword{IO}
|
5d9ee9f663c18903fe2b9959bbc51234f55f0883 | ea7af3b4112333ba3b39d0018c72ccab285361b2 | /man/cohere.Rd | 60af12ceb8445781d61f9c8b928eb0b325439f60 | [] | no_license | cran/dfcrm | 8c412a456595a4e6b676f679791c43a3866d857a | 18891ccb969e3e4f87e4489a04b48df227bb9273 | refs/heads/master | 2020-04-06T04:09:06.222620 | 2019-01-26T15:38:38 | 2019-01-26T15:38:38 | 17,695,480 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,085 | rd | cohere.Rd | \name{cohere}
\alias{cohere}
\title{Coherence of two-stage CRM}
\description{
Returns a message on the coherence status of a two-stage CRM
design.}
\usage{
cohere(prior, target, x0, method = "bayes", model = "empiric",
intcpt = 3, scale = sqrt(1.34), detail = TRUE)
}
\arguments{
\item{prior}{A vector of initial guesses of toxicity probabilities
associated the doses.}
\item{target}{The target DLT rate.}
\item{x0}{The initial design containing a non-decreasing sequence of
dose levels. The length of the initial design is the sample size.}
\item{method}{A character string to specify the method for parameter
estimation. The default method ``bayes'' estimates the model
parameter by the posterior mean. Maximum likelihood estimation is
specified by ``mle''.}
\item{model}{A character string to specify the working model used in
the method. The default model is ``empiric''. A one-parameter
logistic model is specified by ``logistic''.}
\item{intcpt}{The intercept of the working logistic model. The
default is 3. If \code{model}=``empiric'', this argument will be
ignored.}
\item{scale}{Standard deviation of the normal prior of the model
parameter. Default is sqrt(1.34).}
\item{detail}{If TRUE, details about incoherent escalations
will be displayed.}
}
\value{
\item{message}{A string character giving a message regarding the
coherence status of a two-stage CRM design.}
}
\seealso{
\code{\link{crm}}
}
\references{
Cheung, Y. K. (2005). Coherence principles in dose-finding studies.
Biometrika 92:863-873.
Cheung, Y. K. (2011). Dose Finding by the Continual Reassessment Method. New York: Chapman & Hall/CRC Press.
}
\examples{
prior <- c(0.05, 0.10, 0.20, 0.35, 0.50, 0.70)
x0 <- c(rep(1,3), rep(2,3), rep(3,3), rep(4,3), rep(5,3), rep(6,9))
# The above design is coherent when target rate = 0.20
foo <- cohere(prior, target=0.2, x0)
foo
# The design is incoherent if a larger target DLT rate is used.
foo2 <- cohere(prior, target=0.3, x0)
}
\keyword{datasets}
|
eba90301d7b0c0858577f0429c1993c66e882a53 | bed9f5b2c923aa4f9d9e94296530a7b82f923585 | /Rpiday2.R | 74b24fb0da8f2a7a061bf23c1f312e43ef1eb0f6 | [
"MIT"
] | permissive | hack-r/Rpiday2 | 509f22e9f13a3976da562ec3985ad5c71eab1d6a | eb8267d3fa6c3379e385de256f8e19c6866558e3 | refs/heads/master | 2021-01-10T11:06:06.083247 | 2016-03-14T23:28:21 | 2016-03-14T23:28:21 | 53,893,991 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,799 | r | Rpiday2.R | ## File: Rpiday2.R
## Desc: Just a little Pi Day fun
## Copyright: (c) 2016, Jason D. Miller, http://hack-r.com
# Load Libraries ----------------------------------------------------------
source("functions.R")
# Source Pi ---------------------------------------------------------------
source(pi.R)
# Greet the user, print some stuff about Pi -------------------------------
cat("
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Greetings!
Today is March 14, 2016 -- Pi Day!
Pi (π) is the ratio of a circle’s circumference to its diameter.
Pi is a constant, irrational, transcendental number, which makes it really cool.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# Print Pi ----------------------------------------------------------------
paste("In short, the number Pi is equal to ", sprintf("%.15f", pi),
" but that's only the first few digits.")
# Pi Fibonacci Sequence ---------------------------------------------------
cat("This year, we'll look at the relationship between Pi and Fibonacci sequences. \n")
cat("Until very recently there were just two methods used to compute pi (π),
one invented by the Greek mathematician Archimedes,
and the other by the Scottish mathematician James Gregory. \n")
cat("If we use Sir Gregory's arc tangent method, you'll notice a pattern...")
pi/4
atan(1)
pi/4 == atan(1)
atan(1/3)
atan(1/5) + atan(1/8)
atan(1/8)
atan(1/13) + atan(1/21)
cat("We can combine what we saw above")
pi/4
atan(1/2) + atan(1/3)
atan(1/2) + atan(1/5) + atan(1/8)
atan(1/21)
atan(1/34) + atan(1/55)
cat("You'll notice that the pattern is a Fibonacci sequence! \n")
cat(" We have just seen that there are infinitely many formulae for π using the Fibonacci numbers!") |
b38c3f577fde7f667705dbcffa6c5d5ca354fe70 | 10c97b033b7d93d500a4dd563234eef128dc43ab | /tests/testthat/api.sleeper.app/v1/league/521379020332068864.R | 42684c4cf9361c731354c58b0653dab0405802d9 | [
"MIT"
] | permissive | tonyelhabr/ffscrapr | f38e7c87bb65ddbf6e1c9736c16e56944760af46 | 4e0944da56d8890c441c4abe9c25bc2477a1e388 | refs/heads/main | 2023-03-10T08:48:01.840281 | 2020-12-16T06:19:07 | 2020-12-16T06:19:07 | 328,791,006 | 0 | 0 | NOASSERTION | 2021-01-11T23:59:24 | 2021-01-11T21:03:44 | null | UTF-8 | R | false | false | 5,140 | r | 521379020332068864.R | structure(list(
url = "https://api.sleeper.app/v1/league/521379020332068864/",
status_code = 200L, headers = structure(list(
date = "Tue, 13 Oct 2020 01:45:17 GMT",
`content-type` = "application/json; charset=utf-8", vary = "Accept-Encoding",
`cache-control` = "max-age=0, private, must-revalidate",
`x-request-id` = "3a8ec8f235282915bb19ab12c0a72c22",
`access-control-allow-origin` = "*", `access-control-expose-headers` = "etag",
`access-control-allow-credentials` = "true", `strict-transport-security` = "max-age=15724800; includeSubDomains",
`content-encoding` = "gzip", `cf-cache-status` = "MISS",
`cf-request-id` = "05c13b2af60000ca6f41b69200000001",
`expect-ct` = "max-age=604800, report-uri=\"https://report-uri.cloudflare.com/cdn-cgi/beacon/expect-ct\"",
server = "cloudflare", `cf-ray` = "5e156157fe62ca6f-YUL"
), class = c(
"insensitive",
"list"
)), all_headers = list(list(
status = 200L, version = "HTTP/2",
headers = structure(list(
date = "Tue, 13 Oct 2020 01:45:17 GMT",
`content-type` = "application/json; charset=utf-8",
vary = "Accept-Encoding", `cache-control` = "max-age=0, private, must-revalidate",
`x-request-id` = "3a8ec8f235282915bb19ab12c0a72c22",
`access-control-allow-origin` = "*", `access-control-expose-headers` = "etag",
`access-control-allow-credentials` = "true", `strict-transport-security` = "max-age=15724800; includeSubDomains",
`content-encoding` = "gzip", `cf-cache-status` = "MISS",
`cf-request-id` = "05c13b2af60000ca6f41b69200000001",
`expect-ct` = "max-age=604800, report-uri=\"https://report-uri.cloudflare.com/cdn-cgi/beacon/expect-ct\"",
server = "cloudflare", `cf-ray` = "5e156157fe62ca6f-YUL"
), class = c(
"insensitive",
"list"
))
)), cookies = structure(list(
domain = "#HttpOnly_.sleeper.app",
flag = TRUE, path = "/", secure = TRUE, expiration = structure(1605145349, class = c(
"POSIXct",
"POSIXt"
)), name = "__cfduid", value = "REDACTED"
), row.names = c(
NA,
-1L
), class = "data.frame"), content = charToRaw("{\"total_rosters\":12,\"status\":\"in_season\",\"sport\":\"nfl\",\"shard\":332,\"settings\":{\"max_keepers\":1,\"draft_rounds\":4,\"trade_review_days\":0,\"reserve_allow_dnr\":1,\"capacity_override\":1,\"pick_trading\":1,\"taxi_years\":1,\"taxi_allow_vets\":0,\"last_report\":4,\"disable_adds\":0,\"waiver_type\":2,\"bench_lock\":0,\"reserve_allow_sus\":1,\"type\":2,\"reserve_allow_cov\":1,\"waiver_clear_days\":2,\"daily_waivers_last_ran\":12,\"waiver_day_of_week\":2,\"start_week\":1,\"playoff_teams\":4,\"num_teams\":12,\"reserve_slots\":10,\"playoff_round_type\":2,\"daily_waivers_hour\":0,\"waiver_budget\":200,\"reserve_allow_out\":1,\"offseason_adds\":1,\"last_scored_leg\":4,\"playoff_seed_type\":0,\"daily_waivers\":1,\"playoff_week_start\":14,\"daily_waivers_days\":567,\"league_average_match\":0,\"leg\":5,\"trade_deadline\":13,\"reserve_allow_doubtful\":0,\"taxi_deadline\":4,\"reserve_allow_na\":0,\"taxi_slots\":5,\"playoff_type\":0},\"season_type\":\"regular\",\"season\":\"2020\",\"scoring_settings\":{\"pass_2pt\":2.0,\"pass_int\":-1.0,\"fgmiss\":-1.0,\"rec_yd\":0.10000000149011612,\"xpmiss\":-1.0,\"fgm_30_39\":3.0,\"blk_kick\":2.0,\"pts_allow_7_13\":4.0,\"ff\":1.0,\"fgm_20_29\":3.0,\"fgm_40_49\":4.0,\"pts_allow_1_6\":7.0,\"st_fum_rec\":1.0,\"def_st_ff\":1.0,\"st_ff\":1.0,\"pts_allow_28_34\":-1.0,\"fgm_50p\":5.0,\"fum_rec\":2.0,\"def_td\":6.0,\"fgm_0_19\":3.0,\"int\":2.0,\"pts_allow_0\":10.0,\"pts_allow_21_27\":0.0,\"rec_2pt\":2.0,\"rec\":0.5,\"xpm\":1.0,\"st_td\":6.0,\"def_st_fum_rec\":1.0,\"def_st_td\":6.0,\"sack\":1.0,\"rush_2pt\":2.0,\"rec_td\":6.0,\"pts_allow_35p\":-4.0,\"pts_allow_14_20\":1.0,\"rush_yd\":0.10000000149011612,\"pass_yd\":0.03999999910593033,\"pass_td\":4.0,\"rush_td\":6.0,\"fum_lost\":-2.0,\"fum\":-1.0,\"safe\":2.0},\"roster_positions\":[\"QB\",\"RB\",\"RB\",\"WR\",\"WR\",\"WR\",\"TE\",\"FLEX\",\"FLEX\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\",\"BN\"],\"previous_league_id\":\"464109311174373376\",\"name\":\"DLP Dynasty League\",\"metadata\":null,\"loser_bracket_id\":null,\"league_id\":\"521379020332068864\",\"last_read_id\":null,\"last_pinned_message_id\":null,\"last_message_time\":1602511717292,\"last_message_text_map\":{},\"last_message_text\":\"well there's always next season guys rip the fantasy\",\"last_message_id\":\"621382815673585664\",\"last_message_attachment\":null,\"last_author_is_bot\":false,\"last_author_id\":\"589594433561858048\",\"last_author_display_name\":\"Conquistador00\",\"last_author_avatar\":null,\"group_id\":null,\"draft_id\":\"521379020332068865\",\"company_id\":null,\"bracket_id\":null,\"avatar\":null}"),
date = structure(1602553517, class = c("POSIXct", "POSIXt"), tzone = "GMT"), times = c(
redirect = 0, namelookup = 3.7e-05,
connect = 4.1e-05, pretransfer = 0.000146, starttransfer = 0.322427,
total = 0.32252
)
), class = "response")
|
868581a4b191586a3cc552d35a5308cf506a8102 | 5eb7493308e326f7bcee6820d1125198137cd08b | /Supplemental_Script_2_Calculate_effect_sizes.R | 629d6240a0f7ab1729e305addd07e4c968d77b0d | [] | no_license | jmwhitha/Trimming_and_decon | 34fe8449cf5e4e8bf70616dde87c4866ee566ca8 | 35bc81ab942b05054bd348c730b28bc8da62bb27 | refs/heads/main | 2023-04-16T09:19:01.254031 | 2021-04-22T17:32:48 | 2021-04-22T17:32:48 | 339,747,877 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,972 | r | Supplemental_Script_2_Calculate_effect_sizes.R | #Compute effect size of statistically significant differences
#Reference: Trimming and decontamination of metagenomic data can significantly impact assembly and binning metrics, phylogenomic and functional analysis
#Jason M. Whitham 2021
#Data for raw (assembly metrics)
raw_reads <-c('9117.5.MEGAHIT.assembly', '10158.8.MEGAHIT.assembly', '11263.1.MEGAHIT.assembly', '11306.3.MEGAHIT.assembly', '11306.1.MEGAHIT.assembly', '11260.6.MEGAHIT.assembly', '11260.5.MEGAHIT.assembly', '9053.2.MEGAHIT.assembly', '9672.8.MEGAHIT.assembly', '9053.4.MEGAHIT.assembly', '9053.3.MEGAHIT.assembly', '9053.5.MEGAHIT.assembly', '9108.2.MEGAHIT.assembly', '10186.3.MEGAHIT.assembly', '7333.1.MEGAHIT.assembly', '9117.8.MEGAHIT.assembly', '10158.6.MEGAHIT.assembly', '9117.7.MEGAHIT.assembly', '9117.6.MEGAHIT.assembly', '10186.4.MEGAHIT.assembly', '9108.1.MEGAHIT.assembly', '9117.4.MEGAHIT.assembly', '9041.8.MEGAHIT.assembly')
raw_total_contigs <-c(1183038.0, 362044.0, 1331533.0, 1042261.0, 506569.0, 1061986.0, 1015988.0, 1215308.0, 1096293.0, 1058433.0, 1184448.0, 721833.0, 1209055.0, 1044708.0, 763500.0, 795124.0, 1021022.0, 1134727.0, 1111363.0, 1138489.0, 518629.0, 1067385.0, 718988.0)
raw_10k_contigs <-c(7559.0, 2026.0, 10913.0, 7077.0, 2490.0, 9668.0, 9498.0, 7610.0, 5822.0, 6798.0, 8794.0, 3215.0, 5807.0, 6714.0, 2571.0, 4479.0, 6864.0, 9220.0, 5534.0, 9160.0, 1654.0, 7524.0, 2652.0)
raw_100k_contigs <-c(46.0, 12.0, 139.0, 77.0, 48.0, 119.0, 139.0, 55.0, 40.0, 46.0, 52.0, 11.0, 13.0, 114.0, 8.0, 21.0, 36.0, 70.0, 31.0, 79.0, 15.0, 41.0, 16.0)
raw_largest_contig <-c(238480.0, 214425.0, 981732.0, 1235329.0, 450787.0, 449860.0, 1831404.0, 303994.0, 229075.0, 225822.0, 213821.0, 223153.0, 162369.0, 1052999.0, 143149.0, 384383.0, 533581.0, 477964.0, 213302.0, 939775.0, 246044.0, 334225.0, 189163.0)
raw_total_length <-c(2191896544.0, 624530805.0, 2580725406.0, 1944130774.0, 893918652.0, 2024397572.0, 2011571274.0, 2188276796.0, 1952563350.0, 1931161472.0, 2172094081.0, 1237466876.0, 2150531073.0, 1912053702.0, 1294604033.0, 1402423204.0, 1856329992.0, 2171464829.0, 1956565172.0, 2148003718.0, 865456175.0, 1993953446.0, 1216967811.0)
raw_GC_percent <-c(62.76, 62.14, 62.11, 61.77, 63.06, 59.81, 59.06, 62.79, 63.6, 62.67, 63.27, 63.55, 64.27, 64.26, 65.07, 63.13, 61.74, 63.3, 64.36, 64.72, 63.48, 63.19, 63.44)
#Data for qc (assembly metrics)
qc_reads <-c('9117.5.QC.MEGAHIT.assembly', '10158.8.QC.MEGAHIT.assembly', '11263.1.QC.MEGAHIT.assembly', '11306.3.QC.MEGAHIT.assembly', '11306.1.QC.MEGAHIT.assembly', '11260.6.QC.MEGAHIT.assembly', '11260.5.QC.MEGAHIT.assembly', '9053.2.QC.MEGAHIT.assembly', '9672.8.QC.MEGAHIT.assembly', '9053.4.QC.MEGAHIT.assembly', '9053.3.QC.MEGAHIT.assembly', '9053.5.QC.MEGAHIT.assembly', '9108.2.QC.MEGAHIT.assembly', '10186.3.QC.MEGAHIT.assembly', '7333.1.QC.MEGAHIT.assembly', '9117.8.QC.MEGAHIT.assembly', '10158.6.QC.MEGAHIT.assembly', '9117.7.QC.MEGAHIT.assembly', '9117.6.QC.MEGAHIT.assembly', '10186.4.QC.MEGAHIT.assembly', '9108.1.QC.MEGAHIT.assembly', '9117.4.QC.MEGAHIT.assembly', '9041.8.QC.MEGAHIT.assembly')
qc_total_contigs <-c(1177673.0, 351143.0, 1290706.0, 1020207.0, 492752.0, 1031110.0, 992335.0, 1206733.0, 1081516.0, 1047224.0, 1175033.0, 714009.0, 1199339.0, 1026124.0, 748848.0, 788426.0, 1000991.0, 1127638.0, 1104718.0, 1118259.0, 514301.0, 1060948.0, 701661.0)
qc_10k_contigs <-c(7566.0, 2011.0, 10589.0, 6974.0, 2501.0, 9310.0, 9431.0, 7594.0, 5850.0, 6738.0, 8710.0, 3172.0, 5691.0, 6688.0, 2509.0, 4428.0, 6851.0, 9186.0, 5432.0, 9182.0, 1646.0, 7553.0, 2594.0)
qc_100k_contigs <-c(37.0, 14.0, 147.0, 84.0, 46.0, 128.0, 139.0, 56.0, 35.0, 41.0, 47.0, 14.0, 15.0, 116.0, 8.0, 23.0, 33.0, 66.0, 29.0, 83.0, 11.0, 37.0, 16.0)
qc_largest_contig <-c(238481.0, 213266.0, 969148.0, 1195236.0, 536441.0, 449999.0, 1831404.0, 316258.0, 266395.0, 225864.0, 310180.0, 223153.0, 162369.0, 1172225.0, 136759.0, 384383.0, 628222.0, 477964.0, 251356.0, 909164.0, 186562.0, 404583.0, 201602.0)
qc_total_length <-c(2182276600.0, 607791927.0, 2504659553.0, 1905685212.0, 874519186.0, 1971770469.0, 1972700920.0, 2172230088.0, 1929639607.0, 1909891884.0, 2154726090.0, 1222799627.0, 2132518680.0, 1881685843.0, 1267885014.0, 1390291730.0, 1824542955.0, 2157438614.0, 1944342577.0, 2113742330.0, 857783461.0, 1982261895.0, 1186079132.0)
qc_GC_percent <-c(62.76, 62.12, 62.1, 61.76, 63.04, 59.78, 59.03, 62.79, 63.61, 62.67, 63.27, 63.56, 64.27, 64.27, 65.08, 63.12, 61.76, 63.3, 64.36, 64.73, 63.48, 63.19, 63.44)
#Data for raw
Raw_Reads <-c('9117.5_raw', '10158.8_raw', '11263.1_raw', '11306.3_raw', '11306.1_raw', '11260.6_raw', '11260.5_raw', '9108.1_raw', '9053.2_raw', '9672.8_raw', '9108.2_raw', '9053.4_raw', '9053.3_raw', '9117.4_raw', '9117.6_raw', '9117.7_raw', '9117.8_raw', '10158.6_raw', '10186.3_raw', '10186.4_raw', '7331.1_raw', '9053.5_raw', '9041.8_raw')
Raw_Bin_Counts <-c(65, 47, 139, 99, 55, 90, 115, 38, 86, 87, 69, 71, 95, 70, 62, 95, 65, 78, 85, 109, 45, 49, 52)
Raw_Mean_Completeness <-c(45.96, 58.83, 51.28, 56.54, 56.55, 63.26, 52.23, 58.47, 54.69, 53.7, 58.18, 60.32, 62.41, 65.52, 52.14, 50.97, 56.26, 57.0, 65.39, 54.89, 53.78, 53.56, 52.81)
Raw_Mean_Contamination <-c(24.03, 69.63, 67.13, 60.4, 53.21, 81.43, 55.06, 35.14, 54.71, 74.62, 54.7, 76.81, 66.86, 78.94, 67.85, 46.5, 92.78, 97.57, 121.14, 103.47, 75.71, 57.06, 65.71)
Raw_Mean_Single_Copy <-c(64.74, 103.91, 93.11, 76.5, 99.52, 112.61, 86.86, 98.89, 93.73, 76.82, 107.73, 91.71, 87.35, 81.28, 79.12, 85.37, 95.93, 83.01, 100.92, 80.27, 93.02, 89.42, 88.78)
Raw_Mean_Multi_Copy <-c(27.66, 17.26, 15.24, 20.58, 15.06, 21.89, 22.63, 19.62, 20.36, 18.31, 17.33, 15.53, 20.47, 27.25, 16.86, 9.08, 12.54, 19.53, 26.08, 17.34, 16.47, 16.8, 17.32)
Raw_Median_Completeness <-c(45.61, 65.41, 53.6, 60.38, 62.54, 74.79, 53.46, 58.27, 59.88, 59.48, 61.08, 69.28, 75.5, 77.49, 57.92, 48.54, 57.68, 60.58, 70.25, 58.65, 62.06, 55.89, 56.47)
Raw_Median_Contamination <-c(3.45, 4.12, 1.72, 3.73, 2.49, 3.44, 2.9, 2.76, 2.63, 1.94, 5.07, 2.45, 4.08, 5.22, 2.76, 1.38, 2.2, 2.22, 4.11, 2.5, 2.41, 2.16, 3.45)
Raw_Median_Single_Copy <-c(25.0, 67.5, 31.0, 36.0, 39.0, 59.0, 33.0, 65.0, 39.0, 40.0, 66.5, 39.0, 33.0, 41.0, 46.0, 38.5, 38.0, 37.5, 70.0, 31.0, 38.0, 30.0, 39.0)
Raw_Median_Multi_Copy <-c(2.0, 9.0, 2.0, 4.0, 5.0, 8.0, 4.0, 6.0, 3.0, 4.0, 10.5, 4.5, 8.0, 10.0, 6.0, 2.0, 3.0, 4.0, 10.0, 4.0, 5.0, 4.0, 5.0)
#Data for qc
QC_Reads <-c('9117.5_qc', '10158.8_qc', '11263.1_qc', '11306.3_qc', '11306.1_qc', '11260.6_qc', '11260.5_qc', '9108.1_qc', '9053.2_qc', '9672.8_qc', '9108.2_qc', '9053.4_qc', '9053.3_qc', '9117.4_qc', '9117.6_qc', '9117.7_qc', '9117.8_qc', '10158.6_qc', '10186.3_qc', '10186.4_qc', '7331.1_qc', '9053.5_qc', '9041.8_qc')
QC_Bin_Counts <-c(67, 49, 132, 100, 53, 87, 121, 37, 95, 84, 76, 68, 94, 68, 65, 103, 62, 85, 93, 106, 48, 50, 51)
QC_Mean_Completeness <-c(43.78, 57.52, 48.0, 57.83, 56.04, 61.34, 52.56, 58.38, 51.41, 50.47, 60.85, 59.81, 63.0, 65.41, 52.03, 56.04, 58.18, 60.34, 61.34, 50.62, 54.41, 54.39, 54.49)
QC_Mean_Contamination <-c(21.53, 59.82, 58.14, 60.23, 50.14, 84.7, 59.04, 39.3, 54.22, 69.66, 54.38, 67.25, 65.32, 80.07, 61.0, 47.16, 82.49, 102.6, 119.25, 100.18, 72.7, 58.44, 64.68)
QC_Mean_Single_Copy <-c(69.0, 108.55, 77.43, 80.48, 96.57, 99.11, 80.71, 101.17, 88.28, 64.81, 111.78, 94.92, 99.15, 94.04, 74.4, 101.43, 95.18, 85.24, 90.4, 81.43, 77.26, 91.34, 94.68)
QC_Mean_Multi_Copy <-c(22.82, 16.88, 15.01, 18.69, 15.69, 22.28, 23.36, 16.21, 17.32, 15.42, 20.59, 22.48, 21.1, 29.04, 15.52, 13.0, 16.99, 22.41, 22.93, 19.65, 21.53, 13.94, 15.6)
QC_Median_Completeness <-c(35.71, 60.92, 51.15, 62.19, 64.85, 74.41, 55.16, 57.41, 53.02, 53.1, 67.68, 68.56, 74.65, 79.59, 58.2, 52.42, 61.69, 72.1, 66.38, 48.38, 61.21, 58.51, 60.49)
QC_Median_Contamination <-c(1.75, 4.17, 0.93, 4.12, 2.56, 3.27, 2.59, 3.04, 2.72, 1.94, 3.3, 3.64, 3.68, 5.15, 3.4, 1.72, 3.2, 3.32, 3.51, 1.72, 3.25, 1.94, 2.21)
QC_Median_Single_Copy <-c(23.0, 77.0, 26.0, 38.5, 48.0, 37.0, 30.5, 77.0, 29.0, 32.0, 84.0, 44.0, 46.0, 51.5, 35.5, 61.0, 41.5, 36.5, 61.0, 36.0, 35.0, 34.0, 39.0)
QC_Median_Multi_Copy <-c(2.0, 9.0, 1.0, 5.5, 5.0, 6.0, 3.0, 7.0, 4.0, 2.0, 7.0, 5.0, 7.0, 9.5, 6.0, 4.0, 6.0, 4.5, 7.0, 2.0, 4.0, 3.0, 4.5)
#Packages for calculating effect size metrics
#Cliff's Delta (for non-parametric paired data)
#Hedge's g (for paired parametric data)
#Hedge's g is similar to Cohen's d but with less bias
install.packages("devtools") ## if not already installed
devtools::install_github("mtorchiano/effsize")
#Load packages
library(effsize)
#Calculate Cliff's Delta for total contigs
res = cliff.delta(raw_total_contigs,qc_total_contigs,return.dm=TRUE)
print(res)
#Cliff's Delta
#delta estimate: 0.08506616 (negligible)
#95 percent confidence interval:
# lower upper
#-0.2508704 0.4027155
#Calculate Cliff's Delta for total contigs
res = cliff.delta(raw_total_length,qc_total_length,return.dm=TRUE)
print(res)
#Cliff's Delta
#delta estimate: 0.08884688 (negligible)
#95 percent confidence interval:
# lower upper
#-0.2476729 0.4062365
#Compute Hedge's g for
qc = qc_10k_contigs
raw = raw_10k_contigs
#cohen.d(qc,raw) #paired data
d = (c(qc,raw))
f = rep(c("qc","raw"),each=23) #change each to # of pairs
#d
#f
#cohen.d(d ~ f) #formula interface
#cohen.d(d,f) #data and factor
cohen.d(d,f,hedges.correction=TRUE)
#Hedges's g
#g estimate: -0.02252096 (negligible)
#95 percent confidence interval:
# lower upper
#-0.606651 0.561609
#Compute Hedge's g for
qc = QC_Mean_Completeness
raw = Raw_Mean_Completeness
#cohen.d(qc,raw) #paired data
d = (c(qc,raw))
f = rep(c("qc","raw"),each=23) #change each to # of pairs
#d
#f
#cohen.d(d ~ f) #formula interface
#cohen.d(d,f) #data and factor
cohen.d(d,f,hedges.correction=TRUE)
#Hedges's g
#g estimate: -0.02252096 (negligible)
#95 percent confidence interval:
# lower upper
#-0.606651 0.561609 |
b21c0fa3f101a17a0d077d8eba75a765beda3b8b | 0958d382d115a89410129f940bcdf6660f9dab7d | /class-notes/data-transformation/manipulating-strings.r | 96ab253feea56e48579490a32fee3944ff707fb7 | [] | no_license | nyrkuredla/learning-r | 3ba9e007b4783c40d9d14a3ce89f86d0b84bedf6 | cfdc5d8784dc80bbc32078f570305b4e41dd6a75 | refs/heads/main | 2023-06-10T23:38:59.363112 | 2021-07-08T00:49:50 | 2021-07-08T00:49:50 | 358,353,261 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,597 | r | manipulating-strings.r | ## string manipulation
# load libraries
library (stringr)
library (tidyverse)
# for string functions, can pass in either a string literal (standalone) or a variable w/string data
## first let's practice storing string literals in variables
# string literal
"abc"
# assigning it to a variable
str_three <- "abc"
# can use either single or double quotes
# assign to variable and print in one line
(str_print_ex <- "Assigning and printing using parentheses")
# multi-line:
(str_multi_line <- "whoa, it's one line
and another
one more")
# use cat() to display actual newline
cat (str_multi_line)
# storing multiple strings in one variable - "character vector
# can store literals, variables, and NA
str_chr_vector <- c ("string 1", str_print_ex, NA, str_three)
cat (str_chr_vector)
## str_length gives the length of a string, usually 1-to-1 per character but there are exceptions
str_length (str_three)
str_length (str_chr_vector)
# so when you pass it a vector, it will count each value separately
## str_c - join multiple strings into a single string
? str_c()
str_combine_literals <- str_c ("one", "two") %>% print ()
# onetwo
str_length (str_combine_literals)
# 6
str_chr_vector <- c ("five", "six", NA)
str_combine_mixed <- str_c (str_combine_literals, "three", str_chr_vector)
print (str_combine_mixed)
str_length (str_combine_mixed)
## recycling: replication of data in one vector to match the length of data in another
print (letters)
str_combine_letters <- str_c ("Letter: ", letters)
print (str_combine_letters)
str_length (str_combine_letters)
# when the length of both character vectors is the same, they just match up
# when one vector is smaller than the other, but the larger one is a multiple of the second, it will multiply
# when one is smaller than the other and there is no whole multiple, it will recycle anyway but throw a warning
str_123 <- c ("1", "2", "3")
str_c (str_123, letters) %>% print()
## collapsing and separating strings - sep and collapse arguments on str_c
str_c (letters, collapse = "")
str_c (letters, collapse = ",")
# str_sub: extract and replace substrings from a character vector
str_fox_dog <- "The crazy brown fox jumped over the lazy dog."
# first 9 characters, using positional arguments
str_fox_dog %>% str_sub (1, 9)
# last 9
str_fox_dog %>% str_sub (start = -9)
# just 'dog'
str_fox_dog %>% str_sub (start = -4, end = -2)
# replace "dog" with "cats"
# note: piping doesn't work for this
str_sub (str_fox_dog, start = -4, end = -2) <- "cats"
print (str_fox_dog)
## str_detect - use regex to search strings
# Define strings
str_apple <- c(" apple pie", "apple", "Apple pie cake",
"banana apple pie", "blueberry pie", "apple apple", "apricot applesause cake")
# Return true false vector for strings containing 'apple'
# Assign to match_index
print("strings containing 'apple'")
match_index <- str_detect(str_apple, "apple")
# print match_index
print(match_index)
# Print strings associated with
# TRUE in match_index
# Hint: Use the index inside [] for the string variable
str_apple[match_index] %>%
print()
# print strings containing "pie"
match_index <- str_detect (str_apple, "pie")
str_apple[match_index] %>%
print()
# print strings ENDING in "-pie"
match_index <- str_detect (str_apple, "pie$")
str_apple[match_index] %>%
print()
# print strings STARTING with "apple"
match_index <- str_detect (str_apple, "^apple")
str_apple [match_index] %>%
print ()
# print strings starting with EITHER Apple OR apple
match_index <- str_detect (str_apple, "^[Aa]pple")
str_apple [match_index] %>%
print ()
|
1910c9313679ae1f3de32f9c52c0ddf2678c6cb9 | c199c4414f7a080016460c74016d365a27c11bba | /stray/stray.r | f694e67b62131067881be41d6fa6d539b3e1c1e3 | [] | no_license | knazir/smac-resources | dc0c9adc3cfe08bbaa118f95af8298a067ec19b1 | 34e94ac0b84b17c4c8db6f1ae2e62c28533d0e2f | refs/heads/master | 2022-12-24T10:07:10.818906 | 2019-12-14T02:16:37 | 2019-12-14T02:16:37 | 227,952,347 | 0 | 0 | null | 2022-12-10T12:05:56 | 2019-12-14T02:02:20 | Python | UTF-8 | R | false | false | 277 | r | stray.r | raw_data = read.csv("./tibble_data.csv")
data_matrix = as.matrix(raw_data)
data_tibble = tibble::as.tibble(data_matrix)
outliers <- find_HDoutliers(data, k=1, knnsearchtype="brute", alpha=0.25)
display_HDoutliers(data, outliers) + ggplot2::ggtitle("Aimbot Anomaly Detection")
|
3e865f5c55ee774cfee9183dbe66c0c91fab6162 | a8792a4288284653418fd560ef07a90a53a4db4e | /ShinyTutorial/lesson-5/app.R | c8884a1b11a0a99fd7d1d5d1dd98991e5e30c151 | [] | no_license | CristianAbrante/ShinyVisualizationAssignments | c5a3d189843ef694ab6a57a28a26eaf34078dea3 | a0beea8c92a8afbedaea1999e781870ca42c4755 | refs/heads/master | 2023-02-17T11:54:22.915005 | 2021-01-21T11:51:41 | 2021-01-21T11:51:41 | 224,014,764 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,219 | r | app.R | # App for the lessons 3, 4 and 5
library(shiny)
library(maps)
library(mapproj)
source("helpers.R")
counties <- readRDS("data/counties.rds")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("censusVis"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
p("Create demographic maps with information from the 2010 US Census."),
selectInput("var",
label = "Choose a variable to display",
choices = c(
"Percent White",
"Percent Black",
"Percent Hispanic",
"Percent Asian"
),selected = "Percent White"
),
sliderInput("bins",
"Range of Interest:",
min = 0,
max = 100,
value = c(0,100))
),
mainPanel(
#textOutput("selected_var"),
#textOutput("selected_var_2")
plotOutput("map")
)
# Show a plot of the generated distribution
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$map <- renderPlot({
data <- switch(input$var,
"Percent White" = counties$white,
"Percent Black" = counties$black,
"Percent Hispanic" = counties$hispanic,
"Percent Asian" = counties$asian)
colr <- switch(input$var,
"Percent White" = "red",
"Percent Black" = "black",
"Percent Hispanic" = "blue",
"Percent Asian" = "darkgreen")
labl <- switch(input$var,
"Percent White" = "white",
"Percent Black" = "black",
"Percent Hispanic" = "hispanic",
"Percent Asian" = "asian")
percent_map(data, colr, paste("%",labl),input$bins[1], input$bins[2])
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
042c109f8edfbe5fe919ec5f5c7fef80dcc362a5 | 47d321f1c911444704812e2c7aa403553163fe2f | /gbmTestBatch.R | b15b8022faaeb8cf198031afb6f1495c854a5abe | [] | no_license | jaredpark/cleanNLF | e57502fe5f15d6676f64fdb7a0c50266d4351f95 | f7e129fa9f1a16215d69a99b756cf48db17333b7 | refs/heads/master | 2020-03-30T21:32:32.212472 | 2013-07-09T23:52:35 | 2013-07-09T23:52:35 | 11,297,064 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 736 | r | gbmTestBatch.R |
decisionRule = c('abs')
chooseMin = F
chooseParsimonious = F
chooseParsOnlyByTrees = F # ignored if chooseParsimonious == T
runName = ''
rootDir = '~/GitHub/VM_share'
load(paste(rootDir, '/', runName, '/settings.Rout', sep = ''))
require('gbm')
source(paste(rootDir, '/funcs.R', sep = ''))
configurations = read.table(paste(rootDir, '/', runName, '/configMatrix.txt', sep = ''), header = T)
betDelta = settings$betDeltas
fpWeeks = 181:345
for (validateWindowSize in settings$validateWindowSizes){
print(paste(validateWindowSize, 'validation weeks'))
for (finalPredWeek in fpWeeks){
print(paste('week', finalPredWeek, 'of', max(fpWeeks)))
source(paste(rootDir, '/validation1.R', sep = ''))
}
}
source('validation2.R') |
c1515aad219a14539f0353fc855effbee81f8a52 | cb41d91d9e15a8f5b5fe3d7941b4a23c821665f3 | /Loaddata.R | 972900c678a193057a758fd4c84824818bbda9b5 | [] | no_license | samir72/Project-CollegeMajor | 0e73da689668e7944236c602693ca7778fc760c5 | 41d219bd04e67c71ea7fbfea5f0f05d9b06eba4c | refs/heads/master | 2020-05-22T07:09:50.927309 | 2017-03-19T03:23:37 | 2017-03-19T03:23:37 | 84,679,867 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 493 | r | Loaddata.R | Loaddata <- function(file)
{
# browser()
## Read the csv file
Dataload <- read.csv(file, header = TRUE,stringsAsFactors = FALSE)
## Coerce some character columns to numeric
#numcols <- c('price', 'bore', 'stroke', 'horsepower', 'peak.rpm')
#auto.price[, numcols] <- lapply(auto.price[, numcols], as.numeric)
## Remove cases or rows with missing values. In this case we keep the
## rows which do not have nas.
Dataload[complete.cases(Dataload), ]
return(Dataload)
}
|
442980b1fee1c7ceb27d3c1d898359162595e19a | 96da706d22c938b2912b4254eb44e5a4b49a000b | /02_script/s016TZA/02a_merge_recode_s016TZA.R | 41ee39b5862152b4b02edd1d79b3efbf651845cb | [] | no_license | jmroemer/CATI_Panel_Attrition | 32c1c327078c49feed0aaf56100e3230a9bf3592 | 5c4472bc6c672a1cc7598f82007c8d6f2521033e | refs/heads/main | 2023-05-02T17:52:06.189238 | 2021-05-26T23:16:10 | 2021-05-26T23:16:10 | 360,322,107 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,127 | r | 02a_merge_recode_s016TZA.R | #------------------------------------------------------------------------------#
# TITLE: 02a_merge_recode_s016TZA.R #
# PURPOSE: Merge the relevant variables of the F2F-Baseline survey with #
# the columns of the CATI waves containing the call status, #
# remove ambiguous observations and convert the demographic #
# variables to the universal format used in this analysis. #
# INPUTS: <dfs>: named list of dataframes (one per survey round). #
# OUTPUTS: <data_wide>: dataframe containing the relevant columns of the #
# baseline and call status of all waves. #
# <data_wide_clean>: Identical to <data_wide> with the exception #
# that all ambiguous observations have been #
# removed. #
# <data_wide_formatted>: Identical to <data_wide_clean> with the #
# exception that the demographic variables #
# have been converted to the universal #
# format used in this analysis and all #
# reserve household observations have been #
# removed. #
# PUBLISHED: 2021-04-21 #
# AUTHOR: Janina Roemer #
#------------------------------------------------------------------------------#
## Section 1: Select relevant variables ########################################
# Create a dataframe data_wide containing only columns of baseline containing
# information considered relevant for attrition analysis.
data_wide <- dplyr::select(dfs[[1]], uhn, # Unique household number
Sttng, # Setting: 1 = rural, 2 = urban
Qn10201, # gender identity
age, # Age in years
Qn10601, # Main occupation
edlevel, # "Highest Level of schooling (grade) completed by respondent"
s1, s2, s3, s4, # Variables that might help verify if the demographics in baseline correspond to the same person who did (or did not) answere to the later waves
s5, # Interviewer observation: "Is ... the 1st, 2nd, 3rd or 4th randomly selected respondent?
Qn1010, # "Is (Name) the selected respondent?"
reshh, # reserve household
num_responses) # Number of responses per observation
## Section 2: Format the dataframe #############################################
# Create demographic columns with standardized column names for further analysis
data_wide["de_urban"] <- data_wide$Sttng
data_wide["de_gender"] <- data_wide$Qn10201
data_wide["de_age"] <- data_wide$age
data_wide["de_employed"] <- data_wide$Qn10601
data_wide["de_agriculture"] <- data_wide$Qn10601
data_wide["de_education"] <- data_wide$edlevel
# Add wave suffix to all columns of <data_wide> other than "uhn"
colnames(data_wide) <- paste(colnames(data_wide), "_baseline", sep = "")
colnames(data_wide)[1] <- "uhn"
# Add a suffix identifying the wave to all columns but "uhn" (used for joining)
for(i in seq_along(dfs)){
if (i != 1){
colnames(dfs[[i]])[colnames(dfs[[i]]) != "uhn"] <-
paste(colnames(dfs[[i]])[colnames(dfs[[i]]) != "uhn"],
names(dfs)[[i]], sep = "_")
}
}
## Section 3: Merging the call status information to data_wide ################
# Merge the call status columns of all survey rounds to data_wide matching by
# the unique household number
dfs_short <- dfs
for (i in names(dfs)[2:length(dfs)]){
# Select call status columns of each dataframe.
# q1 (education level) of r02 is kept for crosschecking.
if (i == "r02") {
dfs_short[[i]] <- dplyr::select(dfs_short[[i]], uhn,
(paste0("q1_", i)),
(paste0("num_responses_", i)),
(paste0("respondent_", i)))
}
# Status column of r08 is kept.
else if (i == "r08") {
dfs_short[[i]] <- dplyr::select(dfs_short[[i]], uhn,
(paste0("Status_", i)),
(paste0("num_responses_", i)),
(paste0("respondent_", i)))
}
# Call-status column (type2) of r14 is kept.
else if (i == "r14") {
dfs_short[[i]] <- dplyr::select(dfs_short[[i]], uhn,
(paste0("type1_", i)),
(paste0("type2_", i)),
(paste0("num_responses_", i)),
(paste0("respondent_", i)))
}
# For all other survey rounds only the number of responses and the
# "respondent" dummy is kept.
else {
dfs_short[[i]] <- dplyr::select(dfs_short[[i]], uhn,
(paste0("num_responses_", i)),
(paste0("respondent_", i)))
}
# Merge to data_wide.
data_wide <- left_join(data_wide, dfs_short[[i]], by = "uhn")
# Consistency check: Stop if at any i the sum of NAs in the respondent
# column is not equal to difference of column numbers between baseline
# and the i-th survey round.
stopifnot(sum(is.na(data_wide[, ncol(data_wide)]))
== nrow(dfs[[1]]) - nrow(dfs[[i]]))
# Print for crosscheck by user: How many values of "uhn" occur in both datasets
print(paste(sum(dfs_short[[i]]$uhn %in% data_wide$uhn),
" values of uhn appear in baseline and ", i, ", ",
sum(!(data_wide$uhn %in% dfs_short[[i]]$uhn)),
" appear in basline only.", sep = ""))
if (sum(!(dfs_short[[i]]$uhn %in% data_wide$uhn)) != 0) { # Warn user, if observations are omitted.
print(paste("Attention!", sum(!(dfs_short[[i]]$uhn %in%
data_wide$uhn)), " observations only occuring in ",
i, "were omitted!"))
}
}
rm("dfs_short")
# Set all NAs in "respondent" columns to FALSE
data_wide[ , grep("respondent", colnames(data_wide))][is.na(data_wide[ ,
grep("respondent", colnames(data_wide))])] <- FALSE
## Section 4: Consistency checks if the respondent/non-respondent classification
# worked properly for the survey rounds that contain a response-variable.
stopifnot(sum(data_wide$Status_r08 == 1 &
data_wide$respondent_r08 != TRUE, na.rm = T) == 0)
stopifnot(sum(data_wide$Status_r08 != 1 &
data_wide$respondent_r08 == TRUE, na.rm = T) == 0)
stopifnot(sum(data_wide$type2_r14 == 1 &
data_wide$respondent_r14 != TRUE, na.rm = T) == 0)
stopifnot(sum(data_wide$type2_r14 != 1 &
data_wide$respondent_r14 == TRUE, na.rm = T) == 0)
## Section 5: Clearing the data from ambiguous observations. ###################
# Remove respondents for whom answers were recorded that should have led to
# replacement of the respondent according to the questionnaire.
# Keep iff: Did not answer with "no" to s2 (Tanzanian) AND did not answer
# with "no" to s4 (is there mobile network at the place they are
# planning to move to - only asked if planning to relocate).
data_wide_clean <- data_wide[(data_wide$s2_baseline == 2 |
data_wide$s4_baseline == 2) == F |
(is.na((data_wide$s2_baseline == 2 |
data_wide$s4_baseline == 2))), ]
## Section 6: Transfer the data to match the coding used in the analysis: ######
## New format:
## de_age: age in years, NA= no answer or not a respondent in this wave
## (no change).
## de_female: 1 = female, 0 = male, NA = no answer.
## de_urban: 1 = urban, 0 = rural, NA = no answer.
## de_agriculture: 1 = main occupation in agriculture (including livestock),
## 0 = every other main occupation.
## User can manually include fishing to agriculture.
## de_completed_secondary: 1 for Form 4 or higher completed, 1 for < Form 4,
## NA for "Other" and "Has not attained school going
## age" (as all respondents should be 18 or older).
adapt.coding <- function(data, newnames){
# RETURNS: A new dataframe in which all demographic columns are coded in
# the new format.
# INPUTS: <data>: A dataframe in wide format containing all relevant columns
# from baseline and CATI rounds and respondent dummies for
# all CATI rounds.
# Define a new dataframe, originally containing only "uhn" and the
# dummies indicating the call status of each CATI round.
new <- dplyr::select(data, uhn, contains("respondent_"))
# Set all "no response" to NA, without touching the "uhn" column.
data[, colnames(data) != "uhn"][data[, colnames(data) != "uhn"] == 998] <- NA
# Format demographics variables
new["de_age"] <- data["de_age_baseline"]
# Format gender from "male" = 1, "female" = 2 to de_female dummy
# (yes = 1, no = 0)
stopifnot(sum(!(data$de_gender_baseline %in% 1:2|
is.na(data$de_gender_baseline ))) == 0)
new["de_female"] <- data["de_gender_baseline"] - 1
# Format urban from "rural" = 1, "urban" = 2 to de_urban dummy
# (yes = 1, no = 0)
stopifnot(sum(!(data$de_urban_baseline %in% 1:2|
is.na(data$de_urban_baseline))) == 0)
new["de_urban"] <- data["de_urban_baseline"] - 1
# Format agriculture from "farming/livestock" = 1, "fishing" = 2, other
# occupations and no occupation > 3 to de_agriculture dummy (yes = 1, no = 0).
# Change the first cutoff in the following ifelse statement to 3, if fishing
# should be included in de_agriculture.
stopifnot(sum(!(data$de_agriculture_baseline %in% 1:18 |
is.na(data$de_agriculture_baseline ))) == 0)
new["de_agriculture"] <- ifelse((data["de_agriculture_baseline"] >= 2 &
data["de_agriculture_baseline"] <= 18), 0, 1)
# Create a dummy for completed secondary education, that is 1 for
# Form 4 or higher and 0 otherwise. "other" = 23 will be set to NA.
# "Has not attained school going age" (=1) will be set to NA too,
# since all survey participants are at least 18 years old.
new["de_completed_secondary"] <- data["de_education_baseline"]
new["de_completed_secondary"][data["de_education_baseline"] == 1 |
data["de_education_baseline"] == 23] <- NA # Set "Other" and "Has not attained school going age" to NA.
new["de_completed_secondary"][data["de_education_baseline"] < 14 &
data["de_education_baseline"] > 1 ] <- 0 # All values below Form 4 are set to 0.
new["de_completed_secondary"][data["de_education_baseline"] >= 14 &
data["de_education_baseline"] < 23] <- 1 # Value 14 (Form 4) and higher is set to 1.
# Some consistency checks.
stopifnot(sum(new["de_urban"] == 1, na.rm = T) ==
sum(data["de_urban_baseline"] == 2, na.rm = T))
stopifnot(sum(new["de_urban"] == 0, na.rm = T) ==
sum(data["de_urban_baseline"] == 1, na.rm = T))
stopifnot(sum(new["de_female"] == 1, na.rm = T) ==
sum(data["de_gender_baseline"] == 2, na.rm = T))
stopifnot(sum(new["de_female"] == 0, na.rm = T) ==
sum(data["de_gender_baseline"] == 1, na.rm = T))
return(new)
}
data_wide_formatted <-
data_wide_clean[data_wide_clean$reshh_baseline == 0 , ] %>% adapt.coding()
## Section 7: Save the standardized data as .rds and .csv files.
saveRDS(data_wide_formatted, paste0(data_clean, "/data_formatted_", study_id, ".rds"))
write_csv(data_wide_formatted, paste0(data_clean, "/data_formatted_", study_id, ".csv")) |
dbb42e710c01aa68fe6fcd40a57262e1e84378df | 302d026524486f0ad386599fac8dd4f57278ba38 | /man/cpmSetSize.Rd | 3a84410bcf455aa5bf614212dedd4fba9cb74eaa | [
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | cwhitman/GenEst | 96d72e50eafe5e71c25a230c8046f80e152b1963 | 7c84c887b3f671fa8786eee8077512b8d80b7883 | refs/heads/master | 2020-03-30T18:03:28.168191 | 2018-10-11T07:04:03 | 2018-10-11T07:04:03 | 151,481,672 | 0 | 0 | NOASSERTION | 2018-10-03T21:17:44 | 2018-10-03T21:17:44 | null | UTF-8 | R | false | true | 2,516 | rd | cpmSetSize.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/carcass_persistence_functions.R
\name{cpmSetSize}
\alias{cpmSetSize}
\title{Fit all possible carcass persistence models across all size classes}
\usage{
cpmSetSize(formula_l, formula_s = NULL, data, left = NULL,
right = NULL, dists = c("exponential", "weibull", "lognormal",
"loglogistic"), sizeclassCol = NULL, CL = 0.9, quiet = FALSE)
}
\arguments{
\item{formula_l}{Formula for location; an object of class
"\code{\link{formula}}" (or one that can be coerced to that class):
a symbolic description of the model to be fitted. Details of model
specification are given under 'Details'.}
\item{formula_s}{Formula for scale; an object of class
"\code{\link{formula}}" (or one that can be coerced to that class):
a symbolic description of the model to be fitted. Details of model
specification are given under 'Details'.}
\item{data}{Dataframe with results from carcass persistence trials and any
covariates included in \code{formula_l} or {formula_s} (required).}
\item{left}{Name of columns in \code{data} where the time of last present
observation is stored.}
\item{right}{Name of columns in \code{data} where the time of first absent
observation is stored.}
\item{dists}{Names of the distributions (from "exponential", "weibull",
"loglogistic", and "lognormal") that are to be included}
\item{sizeclassCol}{Name of colum in \code{data} where the size classes
are recorded}
\item{CL}{confidence level}
\item{quiet}{Logical indicator of whether or not to print messsages}
}
\value{
\code{cpmSetSize} returns a class-\code{cpmSetSize} list of
objects, each of which is a class-\code{cpmSet} list of \code{cpm}"
outputs (each corresponding to the fit of a specific model
within the set of \code{cpm} models fit for the given size class), that
is of length equal to the total number of size classes
}
\description{
Run a set of \code{\link{cpmSet}} model set runs based on all
possible models for a suite of size classes. \code{cpmSetSize}'s inputs
generally follow \code{\link{cpmSet}} and \code{\link{cpm}} but with an
additional size column input and calculation of the set of cpm models for
each of the size classes.
}
\examples{
data(wind_RP)
mod <- cpmSetSize(formula_l = l ~ Season, formula_s = s ~ Season,
data = wind_RP$CP, left = "LastPresent", right = "FirstAbsent",
sizeclassCol = "Size"
)
}
|
8a072b255b32f5b1902350f69fb1e8c71a794b87 | cc7e47fdebabbdb367e2a06b64db0ebebb201f02 | /man/args_csem_dotdotdot.Rd | e67ed88795246e8f8f2b4945f1e87a98c597f6bb | [] | no_license | RicoDiel/cSEM | c54cde9bb313c74ffa089b52c8142088c5578352 | 44a19cfadb238e93a6ec8628a99c4f2a4ee8ee7e | refs/heads/master | 2021-01-06T14:00:20.427330 | 2020-02-18T12:34:29 | 2020-02-18T12:34:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 5,384 | rd | args_csem_dotdotdot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zz_arguments.R
\name{args_csem_dotdotdot}
\alias{args_csem_dotdotdot}
\title{Internal: Complete list of csem()'s ... arguments}
\arguments{
\item{.approach_cor_robust}{Character string. Approach used to obtain a robust
indicator correlation matrix. One of: "\emph{none}" in which case the standard
Bravais-Person correlation is used,
"\emph{spearman}" for the Spearman rank correlation, or
"\emph{mcd}" via \code{\link[MASS:cov.rob]{MASS::cov.rob()}} for a robust correlation matrix.
Defaults to "\emph{none}". Note that many postestimation procedures (such as
\code{\link[=testOMF]{testOMF()}} or \code{\link[=fit]{fit()}} implicitly assume a continuous
indicator correlation matrix (e.g. Bravais-Pearson correlation matrix).
Only use if you know what you are doing.}
\item{.conv_criterion}{Character string. The criterion to use for the convergence check.
One of: "\emph{diff_absolute}", "\emph{diff_squared}", or "\emph{diff_relative}". Defaults
to "\emph{diff_absolute}".}
\item{.dominant_indicators}{A character vector of \code{"construct_name" = "indicator_name"} pairs,
where \code{"indicator_name"} is a character string giving the name of the dominant indicator
and \code{"construct_name"} a character string of the corresponding construct name.
Dominant indicators may be specified for a subset of the constructs.
Default to \code{NULL}.}
\item{.estimate_structural}{Logical. Should the structural coefficients
be estimated? Defaults to \code{TRUE}.}
\item{.iter_max}{Integer. The maximum number of iterations allowed.
If \code{iter_max = 1} and \code{.approach_weights = "PLS-PM"} one-step weights are returned.
If the algorithm exceeds the specified number, weights of iteration step
\code{.iter_max - 1} will be returned with a warning. Defaults to \code{100}.}
\item{.PLS_modes}{Either a named list specifying the mode that should be used for
each construct in the form \code{"construct_name" = mode}, a single character
string giving the mode that should be used for all constructs, or \code{NULL}.
Possible choices for \code{mode} are: "\emph{modeA}", "\emph{modeB}", "\emph{modeBNNLS}",
"\emph{unit}", "\emph{PCA}", a single integer or
a vector of fixed weights of the same length as there are indicators for the
construct given by \code{"construct_name"}. If only a single number is provided this is identical to
using unit weights, as weights are rescaled such that the related composite
has unit variance. Defaults to \code{NULL}.
If \code{NULL} the appropriate mode according to the type
of construct used is chosen. Ignored if \code{.approach_weight} is not PLS-PM.}
\item{.PLS_ignore_structural_model}{Logical. Should the structural model be ignored
when calculating the inner weights of the PLS-PM algorithm? Defaults to \code{FALSE}.
Ignored if \code{.approach_weights} is not PLS-PM.}
\item{.PLS_weight_scheme_inner}{Character string. The inner weighting scheme
used by PLS-PM. One of: "\emph{centroid}", "\emph{factorial}", or "\emph{path}".
Defaults to "\emph{path}". Ignored if \code{.approach_weight} is not PLS-PM.}
\item{.PLS_approach_cf}{Character string. Approach used to obtain the correction
factors for PLSc. One of: "\emph{dist_squared_euclid}", "\emph{dist_euclid_weighted}",
"\emph{fisher_transformed}", "\emph{mean_arithmetic}", "\emph{mean_geometric}", "\emph{mean_harmonic}",
"\emph{geo_of_harmonic}". Defaults to "\emph{dist_squared_euclid}".
Ignored if \code{.disattenuate = FALSE} or if \code{.approach_weights} is not PLS-PM.}
\item{.tolerance}{Double. The tolerance criterion for convergence.
Defaults to \code{1e-05}.}
}
\description{
A complete alphabetical list of all possible arguments accepted by \code{csem()}'s \code{...}
(dotdotdot) argument.
}
\details{
Most arguments supplied to the \code{...} argument of \code{csem()} are only
accepted by a subset of the functions called by \code{csem()}. The following
list shows which argument is passed to which (internal) function:
\describe{
\item{.approach_cor_robust}{Accepted by/Passed down to: \code{\link[=calculateIndicatorCor]{calculateIndicatorCor()}}}
\item{.conv_criterion}{Accepted by/Passed down to: \code{\link[=calculateWeightsPLS]{calculateWeightsPLS()}},
\code{\link[=calculateWeightsGSCA]{calculateWeightsGSCA()}}, \code{\link[=calculateWeightsGSCAm]{calculateWeightsGSCAm()}} and subsequently
\code{\link[=checkConvergence]{checkConvergence()}}.}
\item{.dominant_indicators}{Accepted by/Passed down to: \code{\link[=setDominantIndicator]{setDominantIndicator()}}}
\item{.estimate_structural}{Accepted by/Passed down to: \code{\link[=foreman]{foreman()}}}
\item{.iter_max}{Accepted by/Passed down to: \code{\link[=calculateWeightsPLS]{calculateWeightsPLS()}},
\code{\link[=calculateWeightsGSCA]{calculateWeightsGSCA()}}, \code{\link[=calculateWeightsGSCAm]{calculateWeightsGSCAm()}}}
\item{.PLS_modes, .PLS_ignore_structural_model, .PLS_weight_scheme_inner, .PLS_approach_cf}{
Accepted by/Passed down to: \code{\link[=calculateWeightsPLS]{calculateWeightsPLS()}}}
\item{.tolerance}{Accepted by/Passed down to: \code{\link[=calculateWeightsPLS]{calculateWeightsPLS()}},
\code{\link[=calculateWeightsGSCA]{calculateWeightsGSCA()}}, \code{\link[=calculateWeightsGSCAm]{calculateWeightsGSCAm()}}, \code{\link[=calculateWeightsUnit]{calculateWeightsUnit()}}}
}
}
\keyword{internal}
|
b776d7c3d73b5e67b5bc6feb1da42deab03568cb | ee54a85e446285fcc568caffe2dab934089e12e2 | /xp04/05b_tables_dscore_xp04.R | e6914c67f9d3d7f4b9c04dd5ed6d53063b9d4f27 | [
"CC0-1.0"
] | permissive | bricebeffara/rwa_evaluative_conditioning_data_analysis | e8697aee9cb7097c19ec9207a092d547658246e2 | 81c35c84148f9579b68624e32c0efc0dbad43e3c | refs/heads/main | 2023-06-03T01:15:34.778559 | 2021-06-17T09:55:12 | 2021-06-17T09:55:12 | 375,408,897 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,856 | r | 05b_tables_dscore_xp04.R | p_load(plyr,
knitr,
kableExtra,
formattable,
dplyr,
webshot,
stringr,
install = TRUE,
update = getOption("pac_update"),
character.only = FALSE)
tgen_lme4 <- as.data.frame(tab_modD)
tgen_brms <- as.data.frame(tab_iat_d[c(1:4),])
tgen_lme4$term <- tgen_brms$term
# general regression ------------------------------------------------------
tgen <- join (tgen_brms, tgen_lme4, by = "term")
tgen$"b [95% HDI]" <- paste ( format( tgen$estimate, nsmall = 2), " [", format( tgen$hdi.low, nsmall = 2),
", ", format(tgen$hdi.high, nsmall = 2), "]", sep = "")
tgen$"b [95% HDI]" <- gsub("\\s+", " ", str_trim(tgen$"b [95% HDI]"))
tgen$"b [95% HDI]" <- gsub("\\[ ", "\\[", str_trim(tgen$"b [95% HDI]"))
tgen$"b [95% CI]" <- paste ( format( tgen$Estimate, nsmall = 2), " [", format( tgen$"2.5 %", nsmall = 2),
", ", format(tgen$"97.5 %", nsmall = 2), "]", sep = "")
tgen$"b [95% CI]" <- gsub("\\s+", " ", str_trim(tgen$"b [95% CI]"))
tgen$"b [95% CI]" <- gsub("\\[ ", "\\[", str_trim(tgen$"b [95% CI]"))
# join -------------------------------------------------------------------
col2k <- c("term", "b [95% HDI]", "std.error",
"b [95% CI]", "Std. Error", "t value")
tgen <- format(tgen[,col2k], nsmall = 2)
tgen$term <- c("Cong",
"Ordre",
"RWA",
"RWA × Ordre")
tgen$"n° β" <- 1:length( tgen$term)
ordc <- c("n° β", "term", "b [95% HDI]", "std.error",
"b [95% CI]", "Std. Error", "t value")
tgen <- tgen[,ordc]
colnames(tgen) <- c("n° β", "Paramètre", "β<sub>Bayes</sub> [95% HDI]",
"SE<sub>Bayes</sub>", "β<sub>freq</sub> [95% CI]",
"SE<sub>freq</sub>", "t")
rownames(tgen) <- NULL
|
cada1ef5afe2a7bdf554f0cd4db99cff9cd061b5 | 6838ea03822424d0e3eabdb281e02d409006a7f3 | /yspm/man/collect_csv_variables.Rd | eb626cf8a9b7962d8673def2dbb0608616888f36 | [] | no_license | cpfaff/yspm | 8eea7783b29b0183d58ddca3b4af3af94b182cf6 | 46acaba607008d72b143dbed0fe4245e4e09348a | refs/heads/master | 2020-04-23T20:50:47.319430 | 2019-04-05T07:13:16 | 2019-04-05T07:13:16 | 171,452,932 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,435 | rd | collect_csv_variables.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function_handle_metadata.R
\name{collect_csv_variables}
\alias{collect_csv_variables}
\title{Collect variables from csv files into a metadata sheet}
\usage{
collect_csv_variables(input_path = yspm::reference_content("data"),
output_path = yspm::reference_content("metadata/data"))
}
\arguments{
\item{input_path}{A path used to a folder which is used to search for csv files
in. The folder which you provide here is recursively searched through for
files. The parameter defaults to the data folder of the active project.}
\item{output_path}{A path to a folder which is used to save the collected
metadata to. This defaults to the metadata folder of the active project.}
}
\value{
No value is returned; this function is called for its side effects.
}
\description{
The function collects names of variables from all the datasets which are found
in the data folder inside of an enabled project. It also collects the class of
each variable and the count of mising values. It compiles a file in the folder
which contains the metadata. There a user can provide further inforamtion about
each column with a description or a unit if this is applicable. The function
can be called multiple times when the underlying data updates. It preserves
information which has been written by the user
}
\examples{
\dontrun{
collect_csv_variables(input_path = "~/test")
}
}
|
27305021b829772b5cdffb05e8c09694315568c8 | cf9b4586abfdb6f859ba840255cb3f4cdbcb2ed1 | /02_try_model1.R | c11867b7cd7531c21711f99bfeabdb914b0c9122 | [] | no_license | gtadiparthi/knocktober | be47996b9ed0e5613ebdc639c777d088e7112849 | 03cffb063fd14171d488cc4cd631fa3533b8cd56 | refs/heads/master | 2021-01-13T08:02:40.905731 | 2016-10-23T10:38:09 | 2016-10-23T10:38:09 | 71,683,351 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,446 | r | 02_try_model1.R | # Analytics Vidhya Knocktober hackathon
# In this code, we do any preprocessing to the data such as imputations etc.
# Step 0: Clear everything on the workspace and memory
rm(list=ls())
graphics.off()
setwd("/Users/gopalakrishnatadiparthi/Documents/RPrograms/analyticsvidhya/knocktober")
###### Include all the libraries
load("data/all_data.RData")
train = subset(all_data, !is.na(outcome))
test = subset(all_data, is.na(outcome))
##### Try GLM MODEL
library(caTools)
set.seed(188)
table(train$outcome)
split = sample.split(train$outcome, SplitRatio = 0.75)
split
qualityTrain = subset(train, split==TRUE)
qualityTest = subset(train, split==FALSE)
##### First test it on a training data set
outcomeLogistic = glm(outcome ~ Var1 + Var2 + Var3 +Var4 +Var5 +Category1 + Category2 + Category3
+duration +prev_visit+reg_duration +first_duration + prev_type ,
data= qualityTrain,
family= binomial)
summary(outcomeLogistic)
# Look at the performance on the train data
predictLogistic = predict(outcomeLogistic, type="response")
table(qualityTrain$outcome, predictLogistic >= 0.1)
library(ROCR)
ROCRpred = prediction(predictLogistic, qualityTrain$outcome)
ROCRperf = performance(ROCRpred, "tpr", "fpr")
plot(ROCRperf, colorize = TRUE, print.cutoffs.at = seq(0,1,0.1), text.adj = c(-0.2, 1.7))
auc = as.numeric(performance(ROCRpred, "auc")@y.values)
auc
# Look at the performance on the quality test data
predictTest = predict(outcomeLogistic, type="response",newdata = qualityTest)
table(qualityTest$outcome, predictTest >= 0.4)
ROCRpred = prediction(predictTest, qualityTest$outcome)
ROCRperf = performance(ROCRpred, "tpr", "fpr")
plot(ROCRperf, colorize = TRUE, print.cutoffs.at = seq(0,1,0.1), text.adj = c(-0.2, 1.7))
auc = as.numeric(performance(ROCRpred, "auc")@y.values)
auc
# 0.794
##### Run the same model for all train data
outcomeLog2 = glm(outcome ~ Var2 + Var5 +Category1 + Category2 + Category3
+duration +prev_visit+reg_duration +first_duration + prev_type ,
data= train,
family= binomial)
summary(outcomeLog2)
predictTest2 = predict(outcomeLog2, type="response",newdata = test)
mysubmission = data.frame(Patient_ID = test$Patient_ID, Health_Camp_ID = test$Health_Camp_ID, Outcome = predictTest2)
write.csv(mysubmission, "output/sub_log5.csv",row.names = FALSE)
#Leaderboard auc
0.7848
|
0152060a068bb7ae3754072611ef5a4fd5b629c8 | f1d4d986bbfe4d08026fb1c7f5e921becfb8895d | /man/unserializeNode.Rd | 56ce20c3492a20c67544a3320faeb09b3fab6497 | [
"Apache-2.0"
] | permissive | mickash/Adaptive-Bayesian-Networks | 988d3f2fcfeed921055035437e1f4c52f5f89660 | 56611cf9f8562ebcbfa17d876d2a7d27c201b67a | refs/heads/master | 2020-09-09T14:51:25.727845 | 2019-11-13T14:20:59 | 2019-11-13T14:20:59 | 221,476,013 | 1 | 0 | null | 2019-11-13T14:21:01 | 2019-11-13T14:18:43 | R | UTF-8 | R | false | false | 360 | rd | unserializeNode.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/NetworkSerialization.R
\name{unserializeNode}
\alias{unserializeNode}
\title{Unserialize Node}
\usage{
unserializeNode(row)
}
\arguments{
\item{row}{NetworkDataFrame row. See getNetwork}
}
\value{
The unserialized node
}
\description{
Unserialize Node
}
\keyword{internal}
|
7c7bac326f316986bd4b19994d310e30756b05be | ee54a85e446285fcc568caffe2dab934089e12e2 | /xp09/01_data_extraction_xp09.R | 84fe5f919564ba9a3130bf7bb752ccf677dad4df | [
"CC0-1.0"
] | permissive | bricebeffara/rwa_evaluative_conditioning_data_analysis | e8697aee9cb7097c19ec9207a092d547658246e2 | 81c35c84148f9579b68624e32c0efc0dbad43e3c | refs/heads/main | 2023-06-03T01:15:34.778559 | 2021-06-17T09:55:12 | 2021-06-17T09:55:12 | 375,408,897 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,744 | r | 01_data_extraction_xp09.R | ###########
#### RWA & load
###########
# Data preparation --------------------------------------------------------
# Optional generic preliminaries:
#graphics.off() # This closes all of R's graphics windows.
#rm(list=ls()) # Careful! This clears all of R's memory!
## install, load and update necessary packages if required##
if (!require("pacman")) install.packages("pacman")
p_load(dplyr,
stringi,
data.table,
reshape2,
rlist,
readr,
install = TRUE,
update = getOption("pac_update"),
character.only = FALSE)
# We get the current working directory and it will be used to load the data
curwd <- dirname(rstudioapi::getActiveDocumentContext()$path)
# set the working directory to the folder containing the data
# Select only text files corresponding to collected data
load_df <- read_delim("table_all_data.csv",
";", escape_double = FALSE, trim_ws = TRUE,
skip = 1)
##CODING CONDITION +0.5 = LOAD -0.5 = CONTROL
load_df$load <- ifelse(grepl("COND1",load_df$ExperimentName),+0.5,-0.5)
##RWA SCORE
load_df$RWARECOD<-ifelse(grepl("R",load_df$codage),8-load_df$RWAquest.RESP,load_df$RWAquest.RESP)
load_dfsum <- aggregate(RWARECOD ~ Subject,load_df,FUN = sum)
load_df <- merge(load_df,load_dfsum,by="Subject")
load_df$RWA<-load_df$RWARECOD.y
#Check data & See data
#boxplot(load_df$RWA)
#hist(load_df$RWA)
quart1 <- quantile(load_df$RWA)[2]
quart3 <- quantile(load_df$RWA)[4]
iqr <- IQR(load_df$RWA)
#which(load_df$RWAscore<quart1-3*iqr)
#which(load_df$RWAscore>quart3+3*iqr)
# Remove highly extreme values of RWA
load_df$RWA <- ifelse (load_df$RWA<quart1-3*iqr | load_df$RWA>quart3+3*iqr, NA, load_df$RWA)
load_df <- load_df[which(!is.na(load_df$RWA)),]
# scale RWA score
load_df$RWAscore <- scale(load_df$RWA, center = TRUE, scale = TRUE)
load_df$response<-load_df$Slide1.RESP
load_df$stim1<-load_df$`CS[Trial]`
load_df$ppt <- as.character(load_df$Subject)
##RECODING OF BLOC 1 valence
load_dfassi <- aggregate(assignment ~ Subject, load_df[which(!is.na(load_df$assignment)),],FUN = unique)
load_df <- merge(load_df,load_dfassi,by="Subject")
load_df$assignment <- load_df$assignment.y
load_df$usvalence <- ifelse ((load_df$assignment == 1 & grepl("M",load_df$stim1)) | (load_df$assignment == 2 & grepl("B",load_df$stim1)), +0.5, -0.5)
load_df <- load_df[which(!is.na(load_df$Slide1.RESP)),]
col2k <- c("ppt", "usvalence", "load", "RWAscore", "response", "stim1")
load_df <- load_df[,col2k]
# for IDA
xp9_df <- load_df[load_df$load == -0.5,]
col2k <- c("ppt", "stim1", "RWAscore", "usvalence", "response")
xp9_df <- xp9_df[,col2k]
xp9_df$XP <- "XP09"
setwd("../ida")
write.csv(xp9_df,file="XP09.csv", row.names=F)
|
5c3172c7370396d81938358bd69cbc27074f4ae8 | 87cb067c3c82e262e28813a1745eaf4b01fab285 | /man/get_sourced_objects.Rd | eedf7bc2eb7e35522c822c703befc3e7970ff676 | [] | no_license | charlesberthillon/clickme | 943ab2ee0f4bb8aa844ec3f98b7440f51c527a23 | 83ad841ba412af0495ac4ba96aff19f2f5705a15 | refs/heads/master | 2020-12-11T01:46:23.122162 | 2014-12-17T23:20:26 | 2014-12-17T23:20:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 603 | rd | get_sourced_objects.Rd | \name{get_sourced_objects}
\alias{get_sourced_objects}
\title{Traverse a list of paths and determine what objects are defined in the
source files that appear in the "translator" folder of each path
It returns a list of sourced objects.
We do this to control what objects get added to the global namespace}
\usage{
get_sourced_objects(paths)
}
\description{
Traverse a list of paths and determine what objects are
defined in the source files that appear in the "translator"
folder of each path It returns a list of sourced objects.
We do this to control what objects get added to the global
namespace
}
|
fce320867597f7636639e240bfcdfe3eb8648e9e | 604a31300da0d7923819a5e890ce9e860b5bf320 | /R/Functions.R | 2f583af5f2158303dd9736e5de6e54ef2d238dcb | [] | no_license | tallulandrews/scTarNet | f54121bcaf60fdcf6c664dd3c904f2ec1deadd43 | dada65b9290c2f721fe5cce262b2a7df0fa6b54b | refs/heads/master | 2021-01-21T14:49:39.735900 | 2018-09-10T10:53:19 | 2018-09-10T10:53:19 | 95,344,667 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 9,854 | r | Functions.R | #prevent t-test errors from crashing
getDirection <- function(...) {
obj<-try(t.test(...), silent=TRUE)
if (is(obj, "try-error")) {
obj$p.value=NA
return(0)
}else if (obj$p.value < 0.05) {
return(sign(obj$statistic))
} else {
return(0);
}
}
dcor.test.somevsall <- function(mat, rows, n.cores) {
# Parallelize
cl <- parallel::makeCluster(n.cores);
doParallel::registerDoParallel(cl);
N <- nrow(mat); # Number of potential targets
M <- length(rows); # Number of TFs
pvals <- matrix(rep(NA,times=N*M),nrow=M,ncol=N);
strength <- matrix(rep(NA,times=N*M),nrow=M,ncol=N);
direction <- matrix(rep(NA,times=N*M),nrow=M,ncol=N);
x <- foreach (i = 1:M, .combine='rbind', .packages='foreach') %dopar% {
row <- rows[i];
bins <- quantile(mat[row,], c(0.25,0.75))
low <- bins[1]
high <- bins[2]
if (low == high) {high <- high+10^-5}
foreach (j=1:N, .combine='c') %dopar% {
if (j == row) { # gene to self
return(data.frame(pv=1, st=1, d=1))
}
getDirection <- function(...) {
obj<-try(t.test(...), silent=TRUE)
if (is(obj, "try-error")) {
obj$p.value=NA
return(0)
}else if (is.na(obj$p.value)) {
obj$p.value=NA
return(0)
} else if (obj$p.value < 0.05) {
return(sign(obj$statistic))
} else {
return(0);
}
}
dcor_test = energy::dcor.ttest(unlist(mat[row,]),unlist(mat[j,]))
pvals = dcor_test$p.val;
strength = dcor_test$estimate;
#dir = getDirection(mat[j,(mat[row,]<=low)],mat[j,(mat[row,]>=high)])
dir = getDirection(mat[j,(mat[row,]>=high)], mat[j,(mat[row,]<=low)])
if (!is.finite(pvals)) {pvals <- 1}
if (!is.finite(strength)) {strength <- NA}
if (!is.finite(dir)) {dir <- 0}
return(data.frame(pv=pvals, st=strength, d=dir));
} # inner foreach (each potential target)
} # outer foreach (each TF)
stopCluster(cl);
output = list();
output$pvals<-x[,grep("pv",colnames(x))]
output$strength<-x[,grep("st",colnames(x))]
output$direction<-x[,grep("d",colnames(x))]
colnames(output$strength) = rownames(mat)
rownames(output$strength) = rownames(mat)[rows]
colnames(output$pvals) = rownames(mat)
rownames(output$pvals) = rownames(mat)[rows]
colnames(output$dir) = rownames(mat)
rownames(output$dir) = rownames(mat)[rows]
return(output)
}
# Post-hoc fix
internal_class_interactions <- function(Int, Dep) {
Int[,"type"] <- as.character(Int[,"type"])
i <- 1;
while (i <= nrow(Int)) {
set <- Int[i,]
set <- t(set)[,1]
dir1 <- Dep[ Dep[,1] == set[1] & Dep[,2] == set[3], "direction"]
dir2 <- Dep[ Dep[,1] == set[2] & Dep[,2] == set[3], "direction"]
if (length(dir1) == 0 | length(dir2) == 0) {
Int <- Int[-i,];
i <- i -1;
} else {
if (dir1 == dir2 | dir1 == 0 | dir2 == 0) {
Int[i,"type"] <- "cooperation";
} else {
Int[i,"type"] <- "antagonism";
}
}
i <- i+1;
}
return(Int);
}
dcor_classify_interaction <- function(x, Mat, Dep, threshold.indirect, threshold.interaction) { # DUPLICATED
tf1 <- x[1];
tf2 <- x[2];
threshold.interaction <- 1 + threshold.interaction
tf1.targets <- Dep[ which(as.character(Dep[,1]) == tf1) , 2]; tf1.targets[tf1.targets != tf2]; # triplets only
tf2.targets <- Dep[ which(as.character(Dep[,1]) == tf1) , 2]; tf1.targets[tf2.targets != tf1]; # triplets only
sharedtargets = intersect(as.character(tf1.targets), as.character(tf2.targets))
out <- vector()
for (t in sharedtargets) {
dcor_1_t <- Dep[Dep[,1] == tf1 & Dep[,2] == t,]$strength;
dcor_2_t <- Dep[Dep[,1] == tf2 & Dep[,2] == t,]$strength;
dir1 <- Dep[Dep[,1] == tf1 & Dep[,2] == t,]$direction
dir2 <- Dep[Dep[,1] == tf2 & Dep[,2] == t,]$direction
pdcor_1_t_g2 <- energy::pdcor(unlist(Mat[rownames(Mat)==tf1,]), unlist(Mat[rownames(Mat)==t,]),unlist(Mat[rownames(Mat)==tf2,]))
pdcor_2_t_g1 <- energy::pdcor(unlist(Mat[rownames(Mat)==tf1,]), unlist(Mat[rownames(Mat)==t,]),unlist(Mat[rownames(Mat)==tf2,]))
if (pdcor_1_t_g2 > threshold.interaction*dcor_1_t & pdcor_2_t_g1 > threshold.interaction*dcor_2_t) {
# Interaction
if (dir1 == dir2 | dir1 == 0 | dir2 == 0) {
out <- rbind(out, c(sort(c(tf1, tf2)), t, "cooperation"));
} else {
out <- rbind(out, c(sort(c(tf1, tf2)), t, "antagonism"));
}
} else if (pdcor_1_t_g2 < threshold.indirect*dcor_1_t) {
# 1->2->t
out <- rbind(out, c(tf1, tf2, t, "pathway"));
} else if (pdcor_2_t_g1 < threshold.indirect*dcor_2_t) {
# 2->1->t
out <- rbind(out, c(tf2, tf1, t, "pathway"));
}
}
out <- data.frame(out)
colnames(out) <- c("TF1", "TF2", "Target", "type");
return(out);
}
# Step 1
calculateTFstoTargets <- function(Mat, TFs, n.cores=1, mt_correction="bon=0.05"){
# Process arguments
undetected <- rowSums(Mat > 0) == 0;
if (sum(undetected) > 0) {
print(paste("Removing", sum(undetected), "undetected genes."))
Mat <- Mat[!undetected,];
}
MTmethod = unlist(strsplit(mt_correction,"="));
TFs <- as.character(TFs)
TFs.rows <- which(rownames(Mat) %in% TFs);
TFs.rows <- TFs.rows[!is.na(TFs.rows)];
out <- dcor.test.somevsall(Mat, TFs.rows, n.cores)
pvals <- as.matrix(out$pvals)
strength <- as.matrix(out$strength)
direction <- out$dir
if (MTmethod[1] == "bon") {
Sig <- which(pvals < as.numeric(MTmethod[2])/length(pvals[1,]),arr.ind=T)
} else if (MTmethod[1] == "str") {
Sig <- which(strength > MTmethod[2], arr.ind=T);
} else {
tmp <- p.adjust(pvals,method=MTmethod[1]) < MTmethod[2];
if (sum(tmp) > 0) {
tmp <- max(unlist(pvals[tmp]));
Sig <- which(pvals <= tmp, arr.ind=T);
} else {
Sig <- c()
}
}
Dep <- data.frame(Gene = rownames(pvals)[Sig[,1]], Target = colnames(pvals)[Sig[,2]], pval = unlist(pvals[Sig]), strength = unlist(strength[Sig]), direction = unlist(direction[Sig]))
return(Dep);
}
# Step 2
calculateConditionalCors <- function(Mat, TFs, Dep, n.cores=1, threshold.interaction=0.01, bidirectional=TRUE, threshold.indirect=0.5, exclude.indirect=TRUE) {
# threshold indirect = conditional dcors < this*original decor are equivalent to zero (set to a negative to turn off)
# threshold interaction = % increase in dcor after conditioning for a correlation to be considered an interaction
threshold.interaction <- 1 + threshold.interaction
undetected <- rowSums(Mat > 0) == 0;
if (sum(undetected) > 0) {
print(paste("Removing", sum(undetected), "undetected genes."))
Mat <- Mat[!undetected,];
}
TFs <- as.character(TFs)
TFs.rows <- which(rownames(Mat) %in% TFs);
TFs.rows <- TFs.rows[!is.na(TFs.rows)];
pairs <- t(combn(TFs,2))
# Parallelize
cl <- parallel::makeCluster(n.cores);
doParallel::registerDoParallel(cl);
inter <- foreach (i = 1:nrow(pairs), .combine='rbind', .packages='foreach') %dopar% {
dcor_classify_interaction <- function(x, Mat, Dep, threshold.indirect, threshold.interaction) { # DUPLICATED
tf1 <- x[1];
tf2 <- x[2];
tf1.targets <- Dep[ which(as.character(Dep[,1]) == tf1) , 2]; tf1.targets <- tf1.targets[tf1.targets != tf2]; # triplets only
tf2.targets <- Dep[ which(as.character(Dep[,1]) == tf2) , 2]; tf2.targets <- tf2.targets[tf2.targets != tf1]; # triplets only
sharedtargets = intersect(as.character(tf1.targets), as.character(tf2.targets))
out <- vector()
for (t in sharedtargets) {
dcor_1_t <- Dep[Dep[,1] == tf1 & Dep[,2] == t,]$strength;
dcor_2_t <- Dep[Dep[,1] == tf2 & Dep[,2] == t,]$strength;
dir1 <- Dep[Dep[,1] == tf1 & Dep[,2] == t,]$direction
dir2 <- Dep[Dep[,1] == tf2 & Dep[,2] == t,]$direction
pdcor_1_t_g2 <- energy::pdcor(unlist(Mat[rownames(Mat)==tf1,]), unlist(Mat[rownames(Mat)==t,]),unlist(Mat[rownames(Mat)==tf2,]))
pdcor_2_t_g1 <- energy::pdcor(unlist(Mat[rownames(Mat)==tf2,]), unlist(Mat[rownames(Mat)==t,]),unlist(Mat[rownames(Mat)==tf1,]))
print(c(dcor_1_t, pdcor_1_t_g2))
print(c(dcor_2_t, pdcor_2_t_g1))
if ((pdcor_1_t_g2 > threshold.interaction*dcor_1_t & pdcor_2_t_g1 > threshold.interaction*dcor_2_t) |
!bidirectional & (pdcor_1_t_g2 > threshold.interaction*dcor_1_t | pdcor_2_t_g1 > threshold.interaction*dcor_2_t )) {
# Interaction
if (dir1 == dir2 | dir1 == 0 | dir2 == 0) {
out <- rbind(out, c(sort(c(tf1, tf2)), t, "cooperation"));
} else {
out <- rbind(out, c(sort(c(tf1, tf2)), t, "antagonism"));
}
} else if (pdcor_1_t_g2 < threshold.indirect*dcor_1_t) {
# 1->2->t
out <- rbind(out, c(tf1, tf2, t, "pathway"));
} else if (pdcor_2_t_g1 < threshold.indirect*dcor_2_t) {
# 2->1->t
out <- rbind(out, c(tf2, tf1, t, "pathway"));
}
}
out <- data.frame(out)
if (nrow(out) > 0) {
colnames(out) <- c("TF1", "TF2", "Target", "type");
return(out);
}
}
dcor_classify_interaction(pairs[i,], Mat, Dep, threshold.indirect, threshold.interaction)
}
stopCluster(cl);
if (exclude.indirect) {
indirect <- unique(inter[inter[,"type"]=="pathway", c("TF2", "Target")])
direct_int <- dplyr::anti_join(inter, indirect, by=c("TF2", "Target"))
colnames(Dep) <- c("TF2", "Target", "pval", "strength", "direction");
Dep <- suppressWarnings(dplyr::anti_join(Dep, indirect, by=c("TF2", "Target"))) # get warnings if not all TFs involved in interactions/pathways
colnames(Dep) <- c("Gene", "Target", "pval", "strength", "direction");
} else {
direct_int <- inter;
}
return(list(Dep=Dep, Int=direct_int));
}
|
1309b8ec9d8296d2475fe7a8fc48d3241a7b8183 | fd8326488cda5ffcb779bb051101df8668e5e822 | /man/dyntoy.Rd | f93451200f1f8c6cb25b369ce3a11f45fb6d385f | [] | no_license | dynverse/dyntoy | 52ad35edb612455c38dbee1276dbb9cf7d0f4e62 | 0bd547448f1b1d4f16e250a18a3b6c6cdd1648be | refs/heads/master | 2021-06-06T08:32:07.647939 | 2019-05-17T11:10:02 | 2019-05-17T11:10:02 | 103,628,886 | 6 | 1 | null | 2019-05-10T07:08:48 | 2017-09-15T07:40:03 | R | UTF-8 | R | false | true | 292 | rd | dyntoy.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\docType{package}
\name{dyntoy}
\alias{dyntoy}
\alias{dyntoy-package}
\title{Generating simple toy data of cellular differentiation}
\description{
Generating simple toy data of cellular differentiation
}
|
7ecc3c97a66bcfa066522a9812e33e0987438590 | 1a8ab42b4aa8213c3bc263376eaf2f1ce07fa113 | /cachematrix.R | d690e65426180e2b9b7dfd0a2a692466a2859451 | [] | no_license | JonnyWPage/ProgrammingAssignment2 | c6ed4d285c61ee6dc89f0d6ccac9271ea2f3d2b0 | 285417ddc3fc43fee558f8faa5736abc5eabbef3 | refs/heads/master | 2021-01-12T13:46:30.678520 | 2017-12-03T23:27:20 | 2017-12-03T23:27:20 | 69,602,467 | 0 | 0 | null | 2016-09-29T19:51:18 | 2016-09-29T19:51:18 | null | UTF-8 | R | false | false | 855 | r | cachematrix.R | makeCacheMatrix <- function(x) {
## x is a matrix with ncol>2 and nrow>2
## makeCacheMatrix creates a matrix object that can
## cache its own inverse
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m <<- solve
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
cacheSolve <- function(x, ...) {
## cacheSolve returns a matrix that is the inverse of 'x'
## If the inverse of 'x' has already been calculated
## cacheSolve does not compute the matrix, and instead
## returns the inverse of 'x' that has already been calculated
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data,...)
x$setInverse(m)
m
}
|
5ae285d92a9c01bcadf29355945d80f7b42dadd7 | db3e846cda5e996d42ecb994708fbbf793490b18 | /19_190313_Partitions.R | 82cf86fc5406df06fb5185a6c3bde81e053166f0 | [] | no_license | tomzg12/machinel_R | 5c55dc246aed9b1acc81a50651948aa4bd81db5d | d1bcf96d85a90c27b34ddc73f12879691d5dd1f3 | refs/heads/master | 2021-06-25T10:32:28.214983 | 2020-11-20T19:49:20 | 2020-11-20T19:49:20 | 173,021,947 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 2,506 | r | 19_190313_Partitions.R | # Importar librería 'Caret'
install.packages('caret')
library(caret)
# Importar dataframe
data <- read.csv('F:/BD/GitHub/mlearningcourseR/r-course/data/tema2/BostonHousing.csv')
# Crear dos sets (entrenamiento, validacion)
raining.ids <- createDataPartition(data$MEDV, p = 0.8, list = F)
data.training <- data[training.ids,]
data.validation <- data[-training.ids,]
# Crear tres sets( entrenamiento, validacion, test)
training.ids.2 <- createDataPartition(data$MEDV, p = 0.7, list = F)
data.training.2 <- data[training.ids.2,]
temp <- data[-training.ids.2,] # Tabla temporal
validation.ids.2 <- createDataPartition(temp$MEDV, p = 0.5, list = F)
data.validation <- temp[validation.ids.2,]
data.testing <- temp[-validation.ids.2,]
# Crear partición con variables categóricas (factores)
data2 <- read.csv('F:/BD/GitHub/mlearningcourseR/r-course/data/tema2/boston-housing-classification.csv')
training.ids.3 <- createDataPartition(data2$MEDV_CAT, p = 0.7, list = F)
data.training.3 <- data2[training.ids.3,]
data.validation.3 <- data2[-training.ids.3,]
# Función para 2 particiones
rda.cb.partition2 <- function(dataframe, target.index, prob){
library(caret)
training.ids <- createDataPartition(dataframe[,target.index], p=prob, list = FALSE)
list(train = dataframe[training.ids,], val = dataframe[-training.ids,])
}
# Función para 3 particiones
rda.cb.partition3 <- function(dataframe, target.index,
prob.train, prob.val){
library(caret)
training.ids <- createDataPartition(dataframe[,target.index], p = prob.train, list = FALSE)
train.data <- dataframe[training.ids,]
temp <- dataframe[-training.ids,]
validation.ids <- createDataPartition(temp[,target.index], p = prob.val, list = FALSE)
list(train = train.data, val = temp[validation.ids,], test = temp[-validation.ids,])
}
#columna, #probabilidad
data.1 <- rda.cb.partition2(data, 14, 0.8)
#columna, probabilidad primer set, probabilidad set temporal
data.2 <- rda.cb.partition3(data2, 14, 0.7, 0.5)
head(data.1$val)
head(data.2$test) #train, test, val (sustituir en el head, porque ya hay 3 datasets)
nrow(data) # Número de filas
ncol(data) # Número de columnas
# Muestra aleatoria de los datos
#df y columna, # cantidad de datos, False Todos los elementos serán diferentes
sample1 <- sample(data$CRIM, 40, replace = F)
View(sample1)
|
75628ac53e2edb25d6533fb1a018ffb4f06735af | f5dac80016e1ba16f7f33a69dbc3c00f032cfab5 | /man/is_time.Rd | 8fdac41fb02383d5372b17d782d9be9e18d6dadb | [] | no_license | imanuelcostigan/predrcats | 154820621a92e63a804e9139b31a45cf9840756d | bcd7b3ab2f432d3393969f9091e47fb3c1fcd5a1 | refs/heads/master | 2021-01-21T14:25:20.304571 | 2016-07-02T05:07:03 | 2016-07-02T05:07:03 | 56,373,950 | 5 | 0 | null | null | null | null | UTF-8 | R | false | true | 465 | rd | is_time.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/others.R
\name{is_time}
\alias{is_POSIXct}
\alias{is_POSIXlt}
\alias{is_time}
\title{Is a time (Date-Time)?}
\usage{
is_time(x)
is_POSIXct(x)
is_POSIXlt(x)
}
\arguments{
\item{x}{object to test}
}
\value{
\code{TRUE} if \code{x} inherits from \code{\link{POSIXt}}.
Otherwise \code{FALSE}
}
\description{
Is a time (Date-Time)?
}
\examples{
is_time(Sys.Date())
is_time(Sys.time())
}
|
f4b6d36046c399fbc5c3f0297bd217afd70800ec | e70dff828f3912a755d8a95d9b6c4b8832c2b7a1 | /scatterplots_topSanos_vs_todos_subtipos.R | 0978edf9d7d1c4bc09fc666fe5ab4294b5691b36 | [] | no_license | tadeito/scatterplots_for_aracne_networks | 9f77aaffd64e403d4c863e855ad78242ce037464 | 873c02a9898cf515b2019288e38d401f924d7e54 | refs/heads/master | 2021-01-19T01:04:08.040425 | 2016-07-25T22:59:25 | 2016-07-25T22:59:25 | 64,162,726 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,592 | r | scatterplots_topSanos_vs_todos_subtipos.R | #####################################################################
## xy scatterplot for contrasts (healthy vs tumor subtype) with R #####
#####################################################################
setwd("/mnt/e/tadeo")
library("ggplot2", lib.loc="/home/tadeo/R/libs")
#load exp.matrix
read.table("aracne/matrices_by_subtype/mexp_sanos_geneonly",
,stringsAsFactors=FALSE, header=TRUE, row.names=1, sep="\t")->sanos
sanos<-t(sanos)
sanos<-as.data.frame(sanos)
LumA<-read.table("aracne/matrices_by_subtype/mexp_LumA_geneonly",
,stringsAsFactors=FALSE, header=TRUE, row.names=1, sep="\t")
LumA<-t(LumA)
LumA<-as.data.frame(LumA)
LumB<-read.table("aracne/matrices_by_subtype/mexp_LumB_geneonly",
,stringsAsFactors=FALSE, header=TRUE, row.names=1, sep="\t")
LumB<-t(LumB)
LumB<-as.data.frame(LumB)
Her2<-read.table("aracne/matrices_by_subtype/mexp_Her2_geneonly",
,stringsAsFactors=FALSE, header=TRUE, row.names=1, sep="\t")
Her2<-t(Her2)
Her2<-as.data.frame(Her2)
Basal<-read.table("aracne/matrices_by_subtype/mexp_Basal_geneonly",
,stringsAsFactors=FALSE, header=TRUE, row.names=1, sep="\t")
Basal<-t(Basal)
Basal<-as.data.frame(Basal)
#load top sorted list
sanos_toplist<-read.table("aracne/sanos/sanos_sif.txt", header=TRUE, sep="\t", stringsAsFactors=FALSE)
#sort the list in descending order and subset the top n interactions
sanos_toplist<- sanos_toplist[rev(order(sanos_toplist$weight)),]
#sanos_top10<-sanos_toplist[1:10,]
#sanos_top50<-sanos_toplist[1:50,]
sanos_top100<-sanos_toplist[1:100,]
#make the plots
pdf("scatterplots__Sanos_top100_vs_todos.pdf")
for(i in seq_along(sanos_top100$from))
{
print(
ggplot()+
geom_point(data = sanos, aes(x=sanos[,print(sanos_top100[i,1])], y=sanos[,print(sanos_top100[i,2])],colour="sanos",alpha=0.3))+
geom_point(data = LumA, aes(x=LumA[,print(sanos_top100[i,1])], y=LumA[,print(sanos_top100[i,2])],colour="LumA",alpha=0.3))+
geom_point(data = LumB, aes(x=LumB[,print(sanos_top100[i,1])], y=LumB[,print(sanos_top100[i,2])],colour="LumB",alpha=0.3))+
geom_point(data = Her2, aes(x=Her2[,print(sanos_top100[i,1])], y=Her2[,print(sanos_top100[i,2])],colour="Her2",alpha=0.3))+
geom_point(data = Basal, aes(x=Basal[,print(sanos_top100[i,1])], y=Basal[,print(sanos_top100[i,2])],colour="Basal",alpha=0.3))+
xlab(paste(sanos_top100[i,1]))+
ylab(paste(sanos_top100[i,2]))+
ggtitle(paste("Valores de expresión conjunta MI=",sanos_top100[i,3],"top",i))
)
}
dev.off()
|
2c862f3e8f11c741e2ced789841482cf1c19850e | b9a7317a4f83ec4d51f00cc574c7e492e5e5659f | /R/data.grid.R | e4cc9718fb98e32d4fb7efd6cbc71639dff6a852 | [] | no_license | rubenfcasal/npsp | 98120f2d1196e1f96941d2a874b41fcbf5fd9694 | 9655e881102c642219cb792607de93062bf138a2 | refs/heads/master | 2023-05-02T01:14:01.909236 | 2023-04-22T09:59:19 | 2023-04-22T09:59:19 | 64,307,277 | 5 | 6 | null | null | null | null | UTF-8 | R | false | false | 9,467 | r | data.grid.R | #····································································
# data.grid.R (npsp package)
#····································································
# data.grid S3 class and methods
# coords.data.grid(x)
# coordvalues.data.grid(x)
# dimnames.data.grid(x)
# dim.data.grid(x)
# interp.data.grid()
# as.data.grid S3 generic and methods
# as.data.grid.SpatialGridDataFrame(object, data.ind)
# as.data.frame.data.grid(x, data.ind = NULL, coords = FALSE, sp = FALSE,
# row.names = NULL, check.names = coords, ...)
# npsp-internals
# revdim(a, d)
#
# (c) R. Fernandez-Casal
#
# NOTE: Press Ctrl + Shift + O to show document outline in RStudio
#····································································
# PENDENTE:
# - exemplos
# - as.data.grid()
#····································································
#····································································
# data.grid(..., grid = NULL)
# 'Equivalent' to SpatialGridDataFrame-class of sp package
#····································································
#' Gridded data (S3 class "data.grid")
#'
#' Defines data on a full regular (spatial) grid.
#' Constructor function of the \code{data.grid}-\code{\link{class}}.
#' @aliases data.grid-class
#' @inheritParams mask.data.grid
#' @param ... vectors or arrays of data with length equal to \code{prod(grid$n)}.
#' @param grid a \code{\link{grid.par}}-\code{\link{class}} object (optional).
#' @details If parameter \code{grid.par} is not specified it is set from first argument.
#'
#' S3 "version" of the \code{\link[sp]{SpatialGridDataFrame}}-\code{\link{class}}
#' of the \pkg{sp} package.
#' @return Returns an object of \code{\link{class}} \code{data.grid}, a list with
#' the arguments as components.
#' @examples
#' # Grid parameters
#' grid <- grid.par(n = c(15,15), min = c(x = -1, y = -1), max = c(1, 1))
#' coordinates <- coords(grid)
#' plot(coordinates)
#' coordvs <- coordvalues(grid)
#' abline(v = coordvs[[1]], lty = 3)
#' abline(h = coordvs[[2]], lty = 3)
#' # Gridded data
#' y <- apply(coordinates, 1, function(x) x[1]^2 - x[2]^2 )
#' datgrid <- data.grid(y = y, grid = grid)
#' spersp(datgrid, main = 'f(x,y) = x^2 - y^2')
#' dim(datgrid)
#' all.equal(coordinates, coords(datgrid))
#' @seealso \code{\link{as.data.grid}}, \code{\link{grid.par}}, \code{\link{mask}},
#' \code{\link{binning}}, \code{\link{locpol}}.
#' @export
data.grid <- function(..., grid = NULL, window = NULL, mask = NULL,
set.NA = FALSE, warn = FALSE) {
#····································································
args <- list(...)
nargs <- length(args)
if ( is.null(grid) ) {
n <- dim( args[[1]] )
if (is.null(n))
stop("argument 'grid' (or array data) must be provided.")
# stop("cannot derive grid parameters from data!")
grid <- grid.par(n, min = rep(1,length(n)), max = n)
}
if (!inherits(grid, "grid.par"))
stop("argument 'grid' must be of class (or extending) 'grid.par'.")
# Let's go ...
n <- grid$n
index <- which(sapply(args, length) == prod(n))
if(length(index)==0)
stop("no data with length equal to 'prod(grid$n)'")
if(length(index)!=nargs) # "not all arguments have the same length"
warning("some data with length not equal to 'prod(grid$n)' (ignored)")
# NOTA: Seguramente hai unha forma mellor de facer o seguinte...
dimres <- if(grid$nd > 1) n else NULL # drop dimension for 1d grid
result <- args[index]
seqres <- seq_along(result)
if (is.null(names(result))) names(result) <- paste("y", seqres, sep="")
for (i in seqres) dim(result[[i]]) <- dimres
# Rematar de construir o obxeto
result$grid <- grid
oldClass(result) <- "data.grid"
if(!is.null(window)||!is.null(mask))
result <- mask(result, mask = mask, window = window,
set.NA = set.NA, warn = warn)
return(result)
#····································································
} # data.grid
#····································································
# Converts a \link[sp:00sp]{sp} gridded objects to a npsp `data.grid` object
#' data.grid-class methods
#'
#' S3 class \code{\link{data.grid}} methods.
#' @param object (gridded data) used to select a method.
#' @param ... further arguments passed to \code{\link{data.grid}}.
#' @seealso \code{\link{data.grid}}.
#' @return \code{as.data.grid} returns a \code{\link{data.grid}} object.
#' @export
#····································································
as.data.grid <- function(object, ...) {
UseMethod("as.data.grid")
} # S3 generic function as.data.grid
#····································································
#' @rdname as.data.grid
#' @method as.data.grid SpatialGridDataFrame
# @param data.ind integer or character vector with the indexes or names of the components.
#' @export
as.data.grid.SpatialGridDataFrame <- function(object, data.ind = NULL, ...) {
#····································································
gridpar <- gridparameters(object)
n <- gridpar$cells.dim
if (is.null(data.ind)) data.ind <- 1:ncol(object@data)
# Conversion a data.grid 2D
# result <- lapply(object@data[data.ind], function(x) matrix(x, nrow = n[1], ncol = n[2])[ , n[2]:1])
result <- lapply(object@data[data.ind], function(d) revdim(array(d, dim = n), 2))
result$grid <- with(gridpar, grid.par(n = n, min = cellcentre.offset, lag = cellsize))
# oldClass(result) <- "data.grid"
result <- do.call(data.grid, c(result, ...))
return(result)
}
#····································································
#' @rdname as.data.grid
#' @method as.data.frame data.grid
#' @param x a \code{data.grid} object.
#' @param data.ind integer or character vector with the indexes or names of the components.
#' @param coords logical; if \code{TRUE}, the (spatial) coordinates of the object are added.
#' @param sp logical; if \code{TRUE}, the second dimension of the data is reversed
#' (as it is stored in \pkg{sp} package).
#' @param row.names \code{NULL}, column to be used as row names, or vector giving the row names for the data frame.
#' @param optional logical; Not currently used (see \code{\link{as.data.frame}}).
#' @param check.names logical; if \code{TRUE}, the names of the variables in the data
#' frame are checked and adjusted if necessary.
# coords = TRUE, ns <- names(coords) if(any(ns %in% names) ns <- paste("coord", ns, sep=".")
#' @return \code{as.data.frame} returns a data frame.
#' @export
as.data.frame.data.grid <- function(x, row.names = NULL, optional = FALSE, data.ind = NULL, coords = FALSE, sp = FALSE,
check.names = coords, ...){
#····································································
# A botch...
index <- !sapply(x, is.list) & (sapply(x, length) == prod(dim(x)))
# verificar dimensiones...
if (is.null(data.ind)) {
data.ind <- which(index)
} else {
if (!all(index[data.ind], na.rm = TRUE)) stop("Invalid argument 'data.ind'")
}
res <- x[data.ind]
if(sp && (length(dim(x)) > 1))
res <- lapply(res, function(dat) revdim(array(dat, dim = dim(x)),2))
res <- lapply(res, as.vector)
if (coords)
res <- data.frame(coords(x), res, row.names = row.names, check.names = check.names)
else
res <- data.frame(res, row.names = row.names)
return(res)
#····································································
} # as.data.frame.data.grid
#····································································
# [R] Reversing one dimension of an array, in a generalized case
# https://stat.ethz.ch/pipermail/r-help/2017-June/thread.html#447298
# Jeff Newmiller jdnewmil at dcn.davis.ca.us
#' @rdname npsp-internals
#' @keywords internal
#····································································
revdim <- function(a, d) {
dims <- attr(a, "dim")
idxs <- lapply(seq_along(dims),
function(dd) {
if (d == dd) seq.int(dims[dd], 1, -1)
else seq.int(dims[dd])
})
do.call(`[`, c(list(a), idxs))
}
|
3bf76617c5fe810c7ec023ea082d2ad23090c9cb | 5f74a6d3a3ac0aa6eb0a3cf1711875ccbddd5a28 | /logistic_mtcars.R | 0bffaf4377ad737994c2f291e5ce0ad194ca2a9b | [] | no_license | avadhootdeshpande/DataScience | 074398ce7091d628e155c82fe67f015ec226992d | a832dc6d3a73a39c9006abb381e52df754027609 | refs/heads/master | 2020-06-01T04:23:50.429618 | 2019-06-09T13:56:32 | 2019-06-09T13:56:32 | 190,634,443 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,385 | r | logistic_mtcars.R | #Predict if a vehicle is automatic or manual - mtcars
logmtcars=mtcars
head(logmtcars)
library(caret)
summary(logmtcars)
set.seed(111)
traininput=createDataPartition(logmtcars$am,p = 0.8,list = FALSE)
Trainmtcars=logmtcars[traininput,]
Testmtcars=logmtcars[-traininput,]
sum(Trainmtcars$am)
sum(Testmtcars$am)
#method1
library(usdm)
names(logmtcars)
vif(logmtcars[,-9])
vifstep(logmtcars[,-9],th = 5)
#
# ---------- VIFs of the remained variables --------
# Variables VIF
# 1 mpg 4.722618
# 2 drat 2.881643
# 3 qsec 3.434482
# 4 vs 3.840130
# 5 gear 3.691968
# 6 carb 3.545845
#----building model on above var
m1=glm(am ~ mpg+drat+qsec+vs+gear+carb,data=Trainmtcars,family=binomial(link="logit"),maxit=100)
# Warning message:
# glm.fit: algorithm did not converge
# glm.fit: fitted probabilities numerically 0 or 1 occurred
# was getting above two warnings but keeping maxit=100 removed 1st still unable to resolve second one.
summary(m1)
# and getting incorrect p value
alldata=glm(am~. ,data=Trainmtcars,family=binomial(link="logit"))
summary(m1)
#method 2
library(MASS)
m2=stepAIC(alldata,direction = "both")
m2_1=glm(formula = am ~ vs + carb+disp+ wt+mpg+qsec+cyl+drat+ hp+gear+ cyl, family = binomial(link = "logit"),
data = Trainmtcars)
summary(m2_1)
|
375941f2844a58f943dae1034328095cd11532b9 | 0df74bd962e4b7158f3ca513f2336e54fdb4007b | /man/lik_ratios.Rd | f0c6b3db55265a306f17b2ac62d016444d71586a | [
"CC0-1.0",
"CC-BY-4.0"
] | permissive | cboettig/earlywarning | 8491e60225200c75dd2903fd27fa60a04f8d3a25 | ae00bfc1748f861c942a920785d4f34963412775 | refs/heads/master | 2021-01-25T10:06:29.025812 | 2017-10-17T04:26:47 | 2017-10-17T04:26:47 | 3,328,506 | 6 | 4 | null | null | null | null | UTF-8 | R | false | false | 494 | rd | lik_ratios.Rd | \name{lik_ratios}
\alias{lik_ratios}
\title{extract the likelihood ratios under both null and test simulations}
\usage{
lik_ratios(reps)
}
\arguments{
\item{reps}{a list of outputs from compare()}
}
\value{
a data.frame of columns "simulation" (null or test),
"value" (deviance between models on that simulation), and
"rep", a replicate id number.
}
\description{
extract the likelihood ratios under both null and test
simulations
}
\seealso{
\link{compare}, \link{roc_data}
}
|
0d0ed38a4f853b3a3741202bcf51aeda90f678a1 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609892604-test.R | d22b93e62da8cdca8d0680ab44593998812133df | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 117 | r | 1609892604-test.R | testlist <- list(type = 1193814627L, z = 4.44387685038413e+252)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
7633bc90773ff91c0c9a1f45ac1c43ea75e9118a | 1dc421a198c86a888f2029ce96d09924aac6cab2 | /R/fbed.glmm.reg.R | 421ab4cee84a780c0a14dddd47fb2a02d537032b | [] | no_license | JokerWhy233/MXM | 519b8216a1a2a965a8e43531fd52fb0a7c460f86 | 035673338ed6647239a4859981918ddf3b8ce38e | refs/heads/master | 2021-08-22T14:54:05.597447 | 2017-11-30T10:41:38 | 2017-11-30T10:41:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,157 | r | fbed.glmm.reg.R | fbed.glmm.reg <- function(y, x, id, alpha = 0.05, wei = NULL, K = 0, method = "LR", gam = NULL, backward = TRUE, type = "gaussian") {
#check for NA values in the dataset and replace them with the variable median or the mode
if ( any( is.na(x) ) ) {
#dataset = as.matrix(dataset);
warning("The dataset contains missing values (NA) and they were replaced automatically by the variable (column) median (for numeric) or by the most frequent level (mode) if the variable is factor")
x <- apply( x, 2, function(x){ x[which(is.na(x))] = median(x, na.rm = TRUE) ; return(x) } )
}
if ( method =="LR" ) {
if (type == "gaussian") {
result <- fbed.lmm(y = y, x = x, id = id, alpha = alpha, wei = wei, K = K)
} else {
result <- fbed.glmm(y = y, x = x, id = id, alpha = alpha, wei = wei, K = K, type = type)
}
result$back.rem <- 0
result$back.n.tests <- 0
if ( backward ) {
if (result$info[1, 1] > 0) {
a <- glmm.bsreg(y, x[, result$res[, 1], drop = FALSE], id, threshold = alpha, wei = wei, type = type)
if ( typeof(a) == "list" ) {
result$back.rem <- result$res[a$info[, 1], 1]
back.n.tests <- sum( dim(result$res)[1] : dim(a$mat)[1] )
sel <- result$res[a$mat[, 1], 1]
stat <- a$mat[, 3]
pval <- a$mat[, 2]
result$res <- cbind(sel, stat, pval)
result$back.n.tests <- back.n.tests
result$runtime <- result$runtime + a$runtime
} else {
back.rem <- 0
back.n.tests <- 0
result$back.rem <- back.rem
result$back.n.tests <- back.n.tests
result$runtime <- result$runtime
} ## end if ( typeof(a) == "list" )
} ## end if (result$info[1, 1] > 0)
} ## end if ( backward )
} else {
if (type == "gaussian") {
result <- ebic.fbed.lmm(y, x, id, gam = gam, wei = wei, K = K)
} else {
result <- ebic.fbed.glmm(y, x, id, gam = gam, wei = wei, K = K, type = type)
}
result$back.rem <- 0
result$back.n.tests <- 0
if ( backward ) {
if (result$info[1, 1] > 0) {
a <- ebic.glmm.bsreg(y, x[, result$res[, 1], drop = FALSE], id, wei = wei, gam = gam, type = type)
if ( typeof(a) == "list" ) {
back.n.tests <- sum( dim(result$res)[1] : length(a$mat[, 1]) )
result$back.rem <- result$res[a$info[, 1], 1]
sel <- result$res[ a$mat[, 1], 1]
val <- a$mat[, 2]
result$res <- cbind(sel, val)
colnames(result$res) <- c("Vars", "eBIC")
result$back.n.tests <- back.n.tests
result$runtime <- result$runtime + a$runtime
} else {
back.rem <- 0
back.n.tests <- 0
result$back.rem <- back.rem
result$back.n.tests <- back.n.tests
result$runtime <- result$runtime
}
} ## end if (result$info[1, 1] > 0)
} ## end if ( backward )
} ## end if ( method == "LR" )
result
}
|
d1c1cfb653a1c3db5a8bbcae145a7871265086f4 | 60884ab1db4935c61b405bffc7524d6d47ba8cc1 | /man/update_dc.Rd | 40cf4c488dc0335ee8b14edb6777591fb16de90e | [
"MIT"
] | permissive | chintanp/wsdot_evse_update_states | 82ac74bbe24226487ff3cdac6908537d72b98d5e | e959ace59d69225b8235799a4979c11b9627365a | refs/heads/master | 2021-08-31T08:33:08.616016 | 2021-04-27T19:52:25 | 2021-04-27T19:52:25 | 211,201,432 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 324 | rd | update_dc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/update_dc.R
\name{update_dc}
\alias{update_dc}
\title{Find if there is a destination charger}
\usage{
update_dc(a_id = 1)
}
\arguments{
\item{a_id}{analysis_id}
}
\value{
}
\description{
Find if there is a destination charger
}
|
699bf3aaef84e29c9f48ba665152e520dbad1775 | 35e707c55cff22002c6c66f967837736e3e0a0d8 | /tests/testthat/testcor_stressed.R | 5dee86bbd6e6a490733107a29c5ccd2a2cc5fc7e | [] | no_license | spesenti/SWIM | 9f4416b990e9bc82109b87b849ffd208f8bfe77f | 4e9cd0b2b4e4ad36e0798b1d67fdcd03c2d7114a | refs/heads/master | 2022-05-04T10:16:25.964880 | 2022-01-10T12:41:16 | 2022-01-10T12:41:16 | 185,445,679 | 5 | 2 | null | 2022-01-09T23:04:06 | 2019-05-07T17:08:21 | HTML | UTF-8 | R | false | false | 3,383 | r | testcor_stressed.R | context("Correaltion")
library("SWIM")
################ stress ################
set.seed(0)
x <- as.data.frame(cbind(
"log.normal" = rlnorm(1000),
"gamma" = rgamma(1000, shape = 2),
"normal" = rnorm(1000)))
res <- stress(type = "VaR", x = x, alpha = 0.9, q_ratio = 1.2)
res <- stress(type = "VaR", x = res, alpha = 0.95, q_ratio = 1.05)
xCol = c(1, 2)
s1 <- cor_stressed(res, xCol = xCol, wCol = 1, method = "Pearson", base = TRUE)
s2 <- cor_stressed(res, xCol = xCol, wCol = "all", method = "Kendall", base = TRUE)
s3 <- cor_stressed(res, xCol = xCol, wCol = "all", method = "Spearman", base = TRUE)
s4 <- cor_stressed(res, xCol = "all", wCol = "all", method = "Pearson", base = TRUE)
################ stress ################
# output test
test_that("output", {
# expect_warning(cor_stressed(x, xCol = xCol, wCol = 1, method = "peason"),
# "Method must be one of pearson, spearman and kendall")
expect_true(is.list(s1))
expect_true(is.list(s2))
expect_true(is.list(s3))
expect_true(is.list(s4))
expect_named(s1, c("base", "stress 1"))
expect_named(s2, c("base", "stress 1", "stress 2"))
expect_named(s3, c("base", "stress 1", "stress 2"))
expect_named(s4, c("base", "stress 1", "stress 2"))
})
# model test
test_that("output", {
expect_true(is.data.frame(s1$"stress 1"))
expect_true(is.data.frame(s1$"base"))
expect_equal(rep(length(xCol), 2), dim(s1$"stress 1"))
expect_equal(rep(length(xCol), 2), dim(s1$"base"))
expect_true(is.data.frame(s2$"stress 1"))
expect_true(is.data.frame(s2$"stress 2"))
expect_true(is.data.frame(s2$"base"))
expect_equal(rep(length(xCol), 2), dim(s2$"stress 1"))
expect_equal(rep(length(xCol), 2), dim(s2$"stress 2"))
expect_equal(rep(length(xCol), 2), dim(s2$"base"))
expect_true(is.data.frame(s3$"stress 1"))
expect_true(is.data.frame(s3$"stress 2"))
expect_true(is.data.frame(s3$"base"))
expect_equal(rep(length(xCol), 2), dim(s3$"stress 1"))
expect_equal(rep(length(xCol), 2), dim(s3$"stress 2"))
expect_equal(rep(length(xCol), 2), dim(s3$"base"))
expect_true(is.data.frame(s4$"stress 1"))
expect_true(is.data.frame(s4$"stress 2"))
expect_true(is.data.frame(s4$"base"))
expect_equal(c(3,3), dim(s4$"stress 1"))
expect_equal(c(3,3), dim(s4$"stress 2"))
expect_equal(c(3,3), dim(s4$"base"))
# Check self variation
expect_equal(rep(1, 2), unname(diag(data.matrix(s1$"stress 1"))))
expect_equal(rep(1, 2), unname(diag(data.matrix(s1$"base"))))
expect_equal(rep(1, 2), unname(diag(data.matrix(s2$"stress 1"))))
expect_equal(rep(1, 2), unname(diag(data.matrix(s2$"stress 2"))))
expect_equal(rep(1, 2), unname(diag(data.matrix(s2$"base"))))
expect_equal(rep(1, 2), unname(diag(data.matrix(s3$"stress 1"))))
expect_equal(rep(1, 2), unname(diag(data.matrix(s3$"stress 2"))))
expect_equal(rep(1, 2), unname(diag(data.matrix(s3$"base"))))
expect_equal(rep(1, 3), unname(diag(data.matrix(s4$"stress 1"))))
expect_equal(rep(1, 3), unname(diag(data.matrix(s4$"stress 2"))))
expect_equal(rep(1, 3), unname(diag(data.matrix(s4$"base"))))
# check baseline
expect_equal(cor(x[, xCol]), data.matrix(s1$"base"))
expect_equal(cor(x[, xCol], method = "kendall"), data.matrix(s2$"base"))
expect_equal(cor(x[, xCol], method = "spearman"), data.matrix(s3$"base"))
expect_equal(cor(x), data.matrix(s4$"base"))
}
)
|
6d425a52f9230a94b9ff3029cd7c776dbf370f25 | da9cdef9c1dab99c3b805e9560add92fdb9fdca9 | /WizualizacjaDanych/app.R | 048f7642e49234d3638df5cf3c0c125815385254 | [] | no_license | aleksandramiesiac/BigDataProject-QWERTY- | f4875f2e607f89b5ba6b83bc79c52b6555a0d933 | e9e4c1de7e1d3dd018e1403e9779f61e0d53ba5b | refs/heads/master | 2022-01-12T21:48:36.487353 | 2019-06-05T00:37:43 | 2019-06-05T00:37:43 | 180,004,780 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 10,615 | r | app.R | ### Packages
library(shinydashboard)
library(shiny)
library(dplyr)
library(ggplot2)
library(ggthemes)
library(RColorBrewer)
library(dplyr)
library(scales)
library(stringi)
#-----------------------------------------------------------------------------------------------------------------------
### Data
## Data for plots
hotData <- read.csv2("hot_flights", stringsAsFactors = FALSE, dec = ".")
top10AveragePricesData <- read.csv2("cheapest_connections.csv", stringsAsFactors = FALSE, sep = ",", dec = ".")
historyData <- read.csv2("smallest_prices", stringsAsFactors = FALSE, dec = ".")
# airlinesAveragePricesData <- read.csv2("", stringsAsFactors = FALSE, dec = ".")
## Choices lists
depart_choices <- unique(historyData$Country_from)
depart_choices <- depart_choices[order(depart_choices)]
destination_choices <- unique(historyData$Country_to)
destination_choices <- destination_choices[order(destination_choices)]
airline_choices <- c("Laudamotion", "easyJet", "Wizz Air", "Eurowings")
# airline_choices <- unique(airlinesAveragePricesData$Airline)
airline_choices <- airline_choices[order(airline_choices)]
hour_choices <- c(2, 20)
# hour_choices <- unique(airlinesAveragePricesData$Airline)
hour_choices <- hour_choices[order(hour_choices)]
journey_choices <- seq(4, 8)
# hour_choices <- unique(airlinesAveragePricesData$Airline)
journey_choices <- journey_choices[order(journey_choices)]
#-----------------------------------------------------------------------------------------------------------------------
### Dashboard
ui <- dashboardPage(skin = "purple",
dashboardHeader(title = "Fly cheap"),
dashboardSidebar(
sidebarMenu(
tags$script(HTML("$('body').addClass('fixed');")),
title = "Analysis panel",
selectInput("departure", label = "Departure", choices = depart_choices, selected = "Poland", width = 220),
selectInput("destination", label = "Destination", choices = destination_choices, width = 220),
selectInput("airline", label = "Airline", choices = airline_choices, width = 220),
selectInput("hour", label = "Hour of buying a ticket", choices = hour_choices, width = 220),
selectInput("journey", label = "Journey time (in days)", choices = journey_choices, width = 220),
width = 3,
height = 450
)
),
dashboardBody(
fluidRow(
box(
title = "Hot flights!",
status = "warning",
solidHeader = TRUE,
plotOutput("hotPlot"),
width = 6
),
box(
title = "Smallest average prices between cities",
status = "success",
solidHeader = TRUE,
plotOutput("top10Plot"),
width = 6
)
),
fluidRow(
box(
title = "Prices history",
status = "primary",
solidHeader = TRUE,
plotOutput("historyPlot"),
width = 6
),
box(
title = "Prices prediction",
status = "info",
solidHeader = TRUE,
plotOutput("pricePredictionPlot"),
width = 6
)
)#,
# fluidRow(
# box(
# title = "Airlines average prices",
# status = "success",
# solidHeader = TRUE,
# #plotOutput("airlinePlot"),
# width = 6
# )
# )
)
)
server <- function(input, output) {
output$hotPlot <- renderPlot(
{
df <- hotData %>%
group_by(Scrap_date) %>%
slice(which.min(Price)) %>%
select(Scrap_date, Country_from, Country_to, Price) %>%
tail(7)
labls <- apply(df, MARGIN = 1, function(x) { return(paste0("From : ", x[2], "\nTo : ", x[3]))})
df %>%
ggplot(aes(x = Scrap_date, y = Price)) +
geom_line(group = 1, col = "#3182bd", size = 1) +
geom_point(col = "#084594", size = 2) +
xlab("Day") + ylab("Price [EUR]") +
ggtitle("Hot flights prices") +
theme(plot.title = element_text(hjust = 0.5, size = 18, face = "bold"),
axis.title = element_text(size = 16, face = "bold"),
axis.text.x = element_text(size = 12, vjust = 0.6, angle = 90),
axis.text.y = element_text(size = 12),
plot.background = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_line(color = "#bdbdbd"),
panel.grid.minor = element_line(color = "#d9d9d9")) +
annotate(geom = "text", x = seq(nrow(df)) - 0.2, y = df$Price + 0.4, label = labls)
})
output$top10Plot <- renderPlot(
{
top10AveragePricesData$City1 <- stri_extract_first(top10AveragePricesData$City_1, regex = "^[a-zA-Z]+")
top10AveragePricesData$City2 <- stri_extract_first(top10AveragePricesData$City_2, regex = "^[a-zA-Z]+")
top10AveragePricesData$Line <- apply(top10AveragePricesData, MARGIN = 1, function(x) { return(paste0(x[5], " - ", x[6]))})
top10 <- head(top10AveragePricesData, 10)
ggplot(data = top10, aes(x = reorder(Line, -Avg_Price), y = Avg_Price)) +
geom_bar(stat = "identity") + coord_flip() +
geom_label(aes(label = paste0(sprintf("%0.2f", round(top10$Avg_Price, digits = 2)))), fill = "white", hjust = -0.15) +
xlab("Line") + ylab("Price [EUR]") +
ylim(c(0, 63)) +
ggtitle("The 10 smallest average ticket prices in the last 2 weeks") +
theme(plot.title = element_text(size = 18, face = "bold"),
axis.title = element_text(size = 16, face = "bold"),
axis.text = element_text(size = 12),
plot.background = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_line(color = "#bdbdbd"),
panel.grid.minor = element_line(color = "#d9d9d9"))
})
output$historyPlot <- renderPlot(
{
depart <- input[["departure"]]
dest <- input[["destination"]]
historyData <- historyData %>%
filter(Country_from == depart & Country_to == dest) %>%
group_by(Scrap_date) %>%
summarise(Mean = mean(Price))
historyData %>%
ggplot(aes(x = Scrap_date, y = Mean)) +
geom_line(group = 1, col = "#3182bd", size = 1) +
geom_point(col = "#084594", size = 2) +
xlab("Day") + ylab("Price [EUR]") +
ggtitle(paste0("Historic prices: ", depart, " - ", dest)) +
theme(plot.title = element_text(hjust = 0.5, size = 18, face = "bold"),
axis.title = element_text(size = 16, face = "bold"),
axis.text.x = element_text(size = 12, vjust = 0.6, angle = 90),
axis.text.y = element_text(size = 12),
plot.background = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_line(color = "#bdbdbd"),
panel.grid.minor = element_line(color = "#d9d9d9"))
})
output$pricePredictionPlot <- renderPlot(
{
# depart <- input[["departure"]]
depart <- "Poland"
dest <- input[["destination"]]
file_name <- paste0(depart, "_", dest, ".csv")
file_path <- file.path("predicted", file_name)
pricePredictionData <- read.csv2(file_path, stringsAsFactors = FALSE, sep = ",", dec = ".")
airline <- input[["airline"]]
hour <- input[["hour"]]
journey <- input[["journey"]]
pricePredictionData %>%
filter(Airline1_Back == airline, Scrap_time == hour, Journey_time == journey) %>%
arrange(Days) %>%
ggplot(aes(x = Days, y = prediction)) +
geom_line(group = 1, col = "#3182bd", size = 1) +
# geom_point(col = "#084594", size = 2) +
xlab("Days before flight") + ylab("Price [EUR]") +
ggtitle(paste0("Prices on line ", depart, " - ", dest, " for ", airline)) +
theme(plot.title = element_text(hjust = 0.5, size = 18, face = "bold"),
axis.title = element_text(size = 16, face = "bold"),
axis.text = element_text(size = 12),
plot.background = element_blank(),
panel.background = element_blank(),
panel.grid.major = element_line(color = "#bdbdbd"),
panel.grid.minor = element_line(color = "#d9d9d9"))
})
# output$airlinePlot <- renderPlot(
# {
# airline <- input[["airline"]]
#
# airlinesAveragePricesData %>%
# filter(Airline == airline) %>%
# arrange(Day) %>%
# tail(20) %>%
# ggplot(aes(x = Day, y = Price)) +
# geom_line(group = 1, col = "#3182bd", size = 1) +
# geom_point(col = "#084594", size = 2) +
# xlab("Day") + ylab("Price [EUR]") +
# ggtitle(paste0("Average prices for ", airline)) +
# theme(plot.title = element_text(hjust = 0.5, size = 18, face = "bold"),
# axis.title = element_text(size = 16, face = "bold"),
# axis.text.x = element_text(size = 12, vjust = 0.6, angle = 90),
# axis.text.y = element_text(size = 12),
# plot.background = element_blank(),
# panel.background = element_blank(),
# panel.grid.major = element_line(color = "#bdbdbd"),
# panel.grid.minor = element_line(color = "#d9d9d9"))
# })
}
shinyApp(ui = ui, server = server) |
93b88ea0e8245363f413a1ecc8152be9ad5e5dbf | 260be20ea7e944dd620f48cbadc071811b5a6213 | /apriori_algorithm.R | 02e803f08d094b6dd3d58a5fd0f10cdccdbadf0d | [] | no_license | PoojithaSuryadevara/Market-Basket-Analysis-with-R | ff666f0a5555152e10a747c7cd72ddc36d4c100b | 1c9f4d5affefa86466d984b6e47011819db04535 | refs/heads/master | 2020-03-23T03:14:17.507368 | 2019-06-10T04:28:17 | 2019-06-10T04:28:17 | 141,018,460 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,761 | r | apriori_algorithm.R | #install packages
install.packages("arulesViz")
library(arules)
library(arulesViz)
# get and set directories.
getwd()
setwd("/users/poojitha/documents/data mining")
#read the transaction data and assigning it to class object.
FDGROC <- read.transactions("/users/poojitha/documents/data mining/transactions.txt",sep=",",format = "single",cols=c(1,2))
#summary of transaction data and most frequent items.
FDGROC
summary(FDGROC)
#view the observations.
head(FDGROC)
tail(FDGROC)
inspect(FDGROC[1:7])
#plot frequencies of frequent items
itemFrequency(FDGROC[ ,1:7])
itemFrequencyPlot(FDGROC,support=0.10)
itemFrequencyPlot(FDGROC, top=10)
#apply apiori algorithm and generate the rules.
FD_RULES <- apriori(FDGROC, parameter =list(support=0.01,confidence=0.5,minlen=2))
inspect(FD_RULES[1:6])
#1.find the purchase patterns of Wine.
FD_RULES1 <- apriori(data=FDGROC,parameter =list(support=0.003,confidence=0.09,minlen=2),appearance = list(rhs="Wine",default="lhs"),control = list(verbose=F) )
FD_RULES1 <- sort(FD_RULES1,decreasing=T,by="support")
inspect(FD_RULES1[390:396])
FD_RULES1 <- apriori(data=FDGROC,parameter =list(support=0.001,confidence=0.10,minlen=2),appearance = list(lhs="Wine",default="rhs"),control = list(verbose=F) )
FD_RULES1 <- sort(FD_RULES1,decreasing=T,by="confidence")
inspect(FD_RULES1[15:20])
plot(FD_RULES1,method="graph",interactive = T)
#find the purchase patterns of Beer.
FD_RULES1 <- apriori(data=FDGROC,parameter =list(support=0.01,confidence=0.08),appearance = list(rhs="Beer",default="lhs"),control = list(verbose=F) )
FD_RULES1 <- sort(FD_RULES1,decreasing=T,by="confidence")
inspect(FD_RULES1[10:16])
FD_RULES1 <- apriori(data=FDGROC,parameter =list(support=0.09,confidence=0.09),appearance = list(lhs="Beer",default="rhs"),control = list(verbose=F) )
FD_RULES1 <- sort(FD_RULES1,decreasing=T,by="support")
inspect(FD_RULES1[8:13])
plot(FD_RULES1,method ="graph",interactive = T,shading ="confidence")
#2.purchase patterns for canned food vs fresh food
CANNED_RULES <- apriori(data=FDGROC, parameter= list (support=0.003, confidence=0.7),appearance = list(rhs="Canned Fruit",default="lhs"),control = list(verbose=F))
summary(CANNED_RULES)
inspect(CANNED_RULES[1:7])
SUB_RULES <- subset(CANNED_RULES, subset=lhs %ain% c(" Canned Vegetables", "Fresh Fruits"))
plot(CANNED_RULES,method ="scatterplot",interactive = T, shading = "confidence")
FRESH_RULES <- apriori(data=FDGROC, parameter= list (support=0.006, confidence=0.7),appearance = list(rhs ="Fresh Fruit",default="lhs"),control = list(verbose=F))
summary(FRESH_RULES)
inspect(FRESH_RULES[1:7])
plot(FRESH_RULES,method ="scatterplot",interactive = T, shading = "confidence")
plot(FRESH_RULES,method ="scatterplot",interactive=T)
#3.purchace patterns for large vs small transactions
LTRANS <- (FDGROC[size(FDGROC)>30])
inspect(LTRANS[1:5])
summary(LTRANS)
image(LTRANS, method="scatterplot", shading="confidence")
STRANS <-(FDGROC[size(FDGROC)<10])
inspect(STRANS[1:7])
summary(STRANS)
#4.purchase patterns for Aspirin.
PATTERN1 <- apriori(data=FDGROC, parameter= list(support=0.004,confidence=0.02), appearance = list(default="rhs", lhs="Aspirin"))
PATTERN1 <- sort(PATTERN1,decreasing=T,by="support")
inspect(PATTERN1[95:102])
summary(PATTERN1)
plot(PATTERN1,method="scatterplot", interactive=T,shading="confidence")
#5.purchase patterns for Batteries.
PATTERN2 <- apriori(data=FDGROC, parameter= list(support=0.004,confidence=0.02), appearance = list(default="rhs", lhs="Batteries"))
PATTERN2 <- sort(PATTERN2,decreasing=T,by="confidence")
inspect(PATTERN2[130:140])
plot(PATTERN2,method="matrix3D", interactive=T,shading="confidence")
|
abeab8df238703e6f6f2e5e8d2ce52a891ba860f | 884294cf55b43c2a577422b34128bcf1db416ccb | /plot1.R | 46aa287e2eb887414404fa1bc00c1663aa0f1b6d | [] | no_license | belgraviton/ExData_Plotting1 | ed3cf207234b50feec687fe9db9e3320fd3bc872 | 49d1a9f474e216382188b81ddae6f456e57b58f0 | refs/heads/master | 2021-01-15T12:09:14.131155 | 2014-05-10T14:46:12 | 2014-05-10T14:46:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,074 | r | plot1.R | ## This script creates png histogram of household global minute-averaged active power distribution
## Data file 'household_power_consumption.txt' should be in current R directory
## Reading all data
data = read.table(file = 'household_power_consumption.txt', header = TRUE, sep = ';', colClasses = "character")
## Adding new column DateTime = Date + ' ' + Time
data$DateTime = paste(data$Date, data$Time, sep=' ')
## Converting column DateTime to Time format
data$DateTime = strptime(data$DateTime,format = '%d/%m/%Y %H:%M:%S')
## Boundaries dates
date1 = as.POSIXlt('2007-02-01')
date2 = as.POSIXlt('2007-02-03') ## This time is end of day 2007-02-02
## Lets find correct date data
dataSubset = subset(data,DateTime > date1 & DateTime < date2)
## Converting data to numeric type
dataSubset$Global_active_power = as.numeric(dataSubset$Global_active_power)
## Creating histogram
png(filename = 'plot1.png', width = 480, height = 480)
with(dataSubset,hist(Global_active_power, col = 'red',main = 'Global active power', xlab = 'Global active power (kilowatts)'))
dev.off() |
266054e207c7ca9b6fbaf756f0be3fc697aedf9c | 53078bb734f8c5a246d0112d09009ba28b50a66e | /splines/TMB/TMB_spline.R | c27249e2eff4fd86783ce714337a4c66ff6a52b0 | [
"MIT"
] | permissive | bowlerbear/sMon-insects | d30a6a9df0b174fd491173b2557bfe0f7ef67418 | 00e547b594ab2b50788c67860a28acc95057883b | refs/heads/master | 2022-01-06T09:00:32.042770 | 2022-01-04T10:55:15 | 2022-01-04T10:55:15 | 119,070,555 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,942 | r | TMB_spline.R | #read on data
#based on
#https://github.com/r-glennie/occuR
#https://r-glennie.github.io/occuR/
#remotes::install_github("r-glennie/occuR")
library(occuR)
library(occuR, lib.loc="/gpfs0/home/bowler")
library(tidyverse)
#myfolder
visit_data <- readRDS("splines/JAGS/listlengthDF_MTB_Sdanae.rds")
#add coordinates
load("mtbqsDF.RData")
visit_data$x <- mtbqsDF$x_MTB[match(visit_data$MTB, mtbqsDF$Value)]
visit_data$y <- mtbqsDF$y_MTB[match(visit_data$MTB, mtbqsDF$Value)]
#make coordinates smaller
visit_data$x <- visit_data$x/10000
visit_data$y <- visit_data$y/1000000
#multi-occasion occupancy
#correlation of site occupancy over space and time is induced by allowing occupancy probability to be a smooth function of space and time
#need to make visit data with columns: site, occassion and obs
visit_data <- visit_data[,c("MTB","Year","visit","Species",
"singleList","yday","CoarseNaturraum","x","y")]
names(visit_data)[1:4] <- c("site","occasion","visit","obs")
#need to make vist be indexed from i to n within each site and occasion
visit_data <- visit_data %>%
group_by(site, occasion) %>%
mutate(visit = as.numeric(as.factor(visit)))%>%
ungroup()
visit_data$occasion <- as.numeric(as.factor(visit_data$occasion))
#need to make site data with "site" and "occasion"
site_data <- unique(visit_data[,c("site","occasion","CoarseNaturraum","x","y")])
#basic model
m0 <- fit_occu(list(psi ~ 1, p ~ 1), as.data.table(visit_data), as.data.table(site_data))
m0
#fixed effect model
m_s <- fit_occu(list(psi ~ CoarseNaturraum, p ~ singleList),
as.data.table(visit_data), as.data.table(site_data))
m_s
#year effects
m_t <- fit_occu(list(psi ~ -1 + factor(occasion), p ~ occasion + singleList),
as.data.table(visit_data), as.data.table(site_data))
m_t
modelSummary <- data.frame(parameter = names(m_t$res$par.fixed),
estimate = plogis(as.numeric(m_t$res$par.fixed)),
index = 1: length(names(m_t$res$par.fixed)))
psiSummary <- subset(modelSummary, parameter = "beta_psi")
#extract and plot predictions
qplot(index, estimate, data = psiSummary, geom = "line")
#model with spline
#only splines with basis of “cs” or “ts” are well defined for this package
#one dimention
m_spline <- fit_occu(list(psi ~ s(x,bs = "cs"), p ~ 1),
as.data.table(visit_data), as.data.table(site_data))
m_spline
#two dimension
m_spline2d <- fit_occu(list(psi ~ t2(x,y,bs = "ts", k=10), p ~ 1),
as.data.table(visit_data), as.data.table(site_data))
m_spline2d
#k=5 worked pretty well
#k=10 more wiggly.
#k=15 strange lines
#predictions
siteInfo_NAs <- readRDS("splines/siteInfo_NAs.rds")
#make coordinates smaller
siteInfo_NAs$x <- siteInfo_NAs$x_MTB/10000
siteInfo_NAs$y <- siteInfo_NAs$y_MTB/1000000
pred_xy <- predict(m_spline2d,
as.data.table(visit_data),
data.table(occasion = 1, x = siteInfo_NAs$x, y = siteInfo_NAs$y),
nboot = 1000)
summary(pred_xy$psi)
siteInfo_NAs$preds <- pred_xy$psi[,1]
ggplot(siteInfo_NAs) +
geom_point(aes(x = x, y = y, colour = preds)) +
theme_bw() +
scale_colour_viridis_c("Occupancy")
# spatio-temporal effect
m_spline3d <- fit_occu(list(psi ~ t2(x, y, occasion, bs = c("ts", "cs"), k=c(5,2)), p ~ 1),
as.data.table(visit_data), as.data.table(site_data))
xgr <- rep(gr[,1], nocc)
ygr <- rep(gr[,2], nocc)
tgr <- rep(1:nocc, each = nrow(gr))
pred_xyt <- predict(m_spline2d, visit_data, data.table(occasion = tgr, x = xgr, y = ygr, hab = "arable"), nboot = 1000)
ggplot(data.frame(x = xgr, y = ygr, t = tgr, psi = pred_xyt$psi)) +
geom_tile(aes(x = x, y = y, group = t, fill = psi)) +
theme_bw() +
facet_wrap(~t) +
scale_x_continuous("x") +
scale_y_continuous("y") +
scale_fill_viridis_c("Occupancy")
|
c1f1c198e6397dfbe973d365839d7da170d089d0 | fef9155106b66ee0f21d6b6c54ac00f6b7f6b688 | /LongTerm/A375+A2058.R | 7eb92960ad13691398f1875acafd51a5267ac1b7 | [] | no_license | paudelbb/Paudel_et_al_2016 | 18dff7353562e250c449bae7e090d7273c970e81 | f27c27a0f3e89db9acc8a416e490560b1b9d6210 | refs/heads/master | 2021-01-12T09:56:39.769241 | 2018-01-18T21:16:47 | 2018-01-18T21:16:47 | 76,297,130 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,323 | r | A375+A2058.R | #=====================================================================================================================
# Analysis of A2058 response in PLX4720
#=====================================================================================================================
# Set the working directory, read the file, and pre-process the data
dd <- "/Users/paudelbb/Paudel_et_al_2016/LongTerm"
setwd(dd)
expName <- "20150227 A375+A2058+PLX4720.csv"
source("loadCellCount.R")
d <- loadCellCount(dd, 60)
#=======================================================================================
# Assign the cell type, different concentration, drugs etc
cell <- c('A375','A2058')
d$CellLine <- rep(cell, each=3)
d$drug <- "plx"
d$conc <- rep(8)
#=======================================================================================
dn <- data.frame()
for(l in unique(d$CellLine)){
s1 <- subset(d, d$CellLine==l)
for(i in unique(s1$conc)){
temp <- subset(s1, s1$conc==i)
temp$rep <- rep(seq(1:(length(temp$Time)/length(unique(temp$Time)))))
dn <- rbind(dn, temp)
}
}
d <- dn
#===============================================================
#===============================================================
well <- as.character(unique(d$Well))
dn <- data.frame()
dn <- data.frame()
for (i in unique(d$Well)){
temp <- subset(d, d$Well==i)
temp$nl2 <- temp$l2 - temp$l2[temp$Time==0]
dn <- rbind(dn, temp)
}
d <- dn
#=======================================================================================
d$Date <- "20150227"
d <- d[, c("Date","Row","Well","CellLine","Cell.Nucleus", "Time", "drug","conc", "rep", "l2", "nl2" )]
write.csv(d, file="20150227 A375_A2058_PLX4720_Processed.csv")
d <- subset(d, d$CellLine=="A2058")
write.csv(d, file="20150227 A2058_PLX4720_Processed.csv")
#=======================================================================================
s1 <- subset(d, d$CellLine=="A2058")
ggplot(data = s1, aes(x=s1$Time, y=s1$nl2, col=conc)) +
theme_bw()+
geom_smooth(span=.15, aes(group=1),data=s1, method = "loess", size=.5, alpha=0.8, col="blue")+
scale_colour_manual(values=rainbow(1)) + ylim(-2, 6)+
theme(legend.position="none") +
ggtitle("")+ labs(x="", y="") +
theme(axis.text=element_text(size=12))+theme(text = element_text(size=12))
|
0cefa9526e2af9efe362d2fcd35a631cd154a015 | 319afdb8fe0ff0556d75e3ba287f956792b67538 | /scripts/PCA-across-patients.R | c446529bdec8c796823fe58e4f012af5929e19b0 | [] | no_license | myazdani/KEGG-study | c4866a403494695fd23371a83688548b938a8123 | 223861de499f770fe43403f68bc46f7b4b9e74de | refs/heads/master | 2021-01-17T17:58:39.386078 | 2018-09-01T01:37:34 | 2018-09-01T01:37:34 | 58,764,517 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,454 | r | PCA-across-patients.R | ####
#
#
## explore reducing KEGGs by their uniqueness (as a way to get rid of a lot of zeros)
##
#
#
##
setwd("~/Documents/KEGG-IEEE-BigData/")
df.patients = read.csv("./data/table-kegg-clean-transpose.csv", header = TRUE, stringsAsFactors = FALSE)
numeric.df = df.patients[,-c(1, ncol(df.patients))]
num.unique = apply(numeric.df, 2, FUN = function(x) length(unique(x)))
numeric.df.reduced = numeric.df[,which(num.unique > 55)]
## naive imputation
#pca = prcomp(log10(1e-9 + as.matrix(numeric.df)))
## prep random imputations:
##
m <- 1e-9
s <- 0
location <- log(m^2 / sqrt(s^2 + m^2))
shape <- sqrt(log(1 + (s^2 / m^2)))
draws <- rlnorm(n=63*ncol(numeric.df.reduced), location, shape)
d <- matrix(draws, nrow = 63, byrow = TRUE)
data_imputed = d + as.matrix(numeric.df.reduced)
data_imputed_n = data_imputed/apply(data_imputed, 1, sum)
pca = prcomp(log10(data_imputed_n))
pca.res = cbind(df.patients$subject.type, df.patients$X, as.data.frame(pca$x))
names(pca.res)[2] = "subject.ID"
names(pca.res)[1] = "subject.type"
ggplot(pca.res, aes(x = PC1, y = PC2, colour = subject.type, label = subject.ID)) +
geom_point(size = 3) +
ggtitle(paste(ncol(data_imputed_n), "KEGGs with imputation mean = ", m, "and SD = ", s)) +
theme(legend.title=element_blank())-> p
print(p)
ggplotly(p)
api_create(p, filename = paste0("Keggs_pca_across_subjects_imputation_mean_", m, "_sd_", s),
fileopt = "overwrite", sharing = "public")
|
5f0e246355a2da5759cfd23aefb82dc96f789347 | efcda1097e024f543e0359b68aa349187ccedd46 | /Brainwaves-Societe-Generale/ma,knn,eval.R | 1c12d4c35e2d561659b32467fca302a025c9427b | [] | no_license | neelnj/Data-Science-Competitions | c473c875c0724de6833a02242288bb9cb86cc055 | 0c926347957162ff2c344330eee854f9bd4a59c2 | refs/heads/master | 2020-07-04T03:41:01.341726 | 2017-03-23T09:52:47 | 2017-03-23T09:52:47 | 74,213,768 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,532 | r | ma,knn,eval.R | library(class)
E=read.csv("train.csv")
cl=E[,ncol(E)]
cl=as.factor(cl)
E=E[,-ncol(E)]
E=E[,-1]
#E=E[,fs]
#ma <- function(x,n){filter(x,2*c(1:n)/(n*(n+1)), sides=1)}
ma <- function(x,n){filter(x,rep(1/n,n), sides=1)}
nma=20
nk=30
svm_res=vector(length = nma)
acc=matrix(ncol = nk,nrow = nma)
for(ii in 1:nma)
{
print(ii)
n=ii+5
ME=ma(E,n)
rem=(which(is.na(ME[,1])))
ME=ME[-rem,]
TE=E[-rem,]
clT=cl[-rem]
TE.train=TE[1:(2500-n),]
ME.train=ME[1:(2500-n),]
TE.test=TE[(2500-n+1):nrow(TE),]
ME.test=ME[(2500-n+1):nrow(ME),]
cltr=clT[1:(2500-n)]
clte=clT[(2500-n+1):length(clT)]
train=matrix(-1,ncol = ncol(TE.train),nrow = nrow(TE.train))
for(i in 1:nrow(train))
{
for(j in 1:ncol(train))
{
if(ME.train[i,j]>=TE.train[i,j])
{
train[i,j]=1
}
}
}
test=matrix(-1,ncol = ncol(TE.test),nrow = nrow(TE.test))
for(i in 1:nrow(test))
{
for(j in 1:ncol(test))
{
if(ME.test[i,j]>=TE.test[i,j])
{
test[i,j]=1
}
}
}
#ax=vector(length = nk)
for(kk in 1:nk)
{
pred=knn(train,test,cltr,k=(2*kk-1))
#model=svm(train,cltr,kernel = "radial",gamma = 0.00195)
#pred=predict(model,test)
t=table(pred,clte)
acc[ii,kk]=(t[1,1]+t[2,2])/(t[1,1]+t[2,1]+t[1,2]+t[2,2])
#ax[kk]=(t[1,1]+t[2,2])/(t[1,1]+t[2,1]+t[1,2]+t[2,2])
#svm_res[ii]=(t[1,1]+t[2,2])/(t[1,1]+t[2,1]+t[1,2]+t[2,2])
}
#print(ax)
#print(abs(acc[ii,]-0.5)*10,digits = 1)
}
place=which(acc==max(acc))
pla=place/10
cat(place-((round(pla)-1)*10),round(pla)) |
10beeda4eed4a2c1cfe2679ce66a269d7c209889 | ede41b362f24057224cbcd608695ad2ad2a0144c | /page_uploads/Farragut_1942_pt1/build_manifest.R | 9321210f748ce12807ec95e5d1276f1fd56b9556 | [] | no_license | oldweather/oldWeather5 | 57ac0a899ea5e3ee811decd2a452aec7e59ffd50 | fc0eeda44e8bd4aae99ce545f4dec9d1bce2823e | refs/heads/master | 2020-04-06T10:03:03.464364 | 2016-11-10T17:11:16 | 2016-11-10T17:11:16 | 48,338,644 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 650 | r | build_manifest.R | # Make the manifest CSV file for the Farragut 1942 images
log.files<-Sys.glob('/data/local/hadpb/oW5.uploads/Distant_Seas/Navy/Farragut_DD-348/Farragut-DD-348-1942-pt1/for_upload/*.jpg')
# Redirect ouput to the manifest file
sink("/data/local/hadpb/oW5.uploads/Distant_Seas/Navy/Farragut_DD-348/Farragut-DD-348-1942-pt1/for_upload/manifest.csv")
cat('subject_id,image_name_1,origin,group,subgroup,ship,year\n')
for(i in seq_along(log.files)) {
cat(i,',',sep="")
cat(basename(log.files[i]),',',sep="")
cat('NARA,')
cat('Distant Seas,')
cat('Navy,')
cat('Farragut DD-348,')
cat('1942 pt. 1\n')
}
sink() # Output back to default
|
99b6773a7b8fbd72ecb28112139c74ff81a5f59a | a784fbda4d32c7700a9737616c91ff2a84ca5182 | /module1.R | e3d70716fa744eb1fc4910453ed780deee60e053 | [] | no_license | leojacoby/moneyball | 1f2716695214db5f8ec7875710d97b2927f12bcc | 26dd9afe5860fdd7cff368930fa2dac71cf4a05c | refs/heads/master | 2020-04-14T06:39:57.943503 | 2018-12-31T19:16:50 | 2018-12-31T19:16:50 | 163,692,500 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 608 | r | module1.R | fgm <- c(805, 572, 470, 710, 698, 737, 498, 373, 215, 552)
fga <- c(1597, 1349, 1034, 1617, 1381, 1416, 1112, 886, 442, 1061)
tpm <- c(402, 115, 64, 236, 186, 87, 126, 81, 0, 2)
tpa <- c(887, 327, 206, 657, 480, 282, 342, 243, 2, 6)
ftm <- c(363, 272, 395, 720, 447, 359, 250, 201, 92, 208)
fta <- c(400, 344, 475, 837, 498, 491, 280, 240, 131, 586)
efg <- (fgm + 0.5 * tpm) / fga
names(efg) <- players
shooting_data <- tibble(
Player = players,
Position = c("PG", "PG", "SG", "SG", "SF", "SF", "PF", "PF", "C", "C"),
FGM = fgm,
FGA = fga,
FTM = ftm,
FTA = fta,
TPM = tpm,
TPA = tpa
) |
8450a64344a31e150f0a97875d2af7afbf9255fd | 3bba52743cc37cd587df45d1e3cfb25fde65ce2b | /src/datacleansing/createrating.R | fef38aa5cc98e405d71b7397be32c2fa8aab2d08 | [
"MIT"
] | permissive | kim3-sudo/march_madness_analysis | 5b4229453ccba27e6ce543c19691b2c06d0ab469 | 8d9c0b72f50e3fb1c0a7d8241a82afd4b63639ce | refs/heads/main | 2023-04-08T09:51:18.644655 | 2021-04-21T18:25:31 | 2021-04-21T18:25:31 | 342,004,796 | 0 | 0 | MIT | 2021-03-16T04:01:57 | 2021-02-24T19:01:00 | Python | UTF-8 | R | false | false | 1,185 | r | createrating.R | ### March Madness Analysis
### Steven Lucas & Sejin Kim
### STAT 306 S21 @ Kenyon College
# Prerequisites ----
### You should have already downloaded the tarball with CSV data
### Unroll the tarball and put it in a directory that R has +R permissions for
### Make sure you have a directory that R has +RW permissions to write RDS files to
# Set directories ----
### workingDir is where your CSV files are
### saveDir is where you want to write RDS files out to
workingDir = "c:/Users/kim3/Downloads/March-Madness-ML/Data/RatingStats"
saveDir = "c:/Users/kim3/Downloads/March-Madness-ML-RDS/Data/RatingStats"
## Play-by-play loop ----
years <- c(1993:2019)
for (i in years) {
print(paste("Processing", i, "Rating Stats data"))
setwd(workingDir)
### Read ----
ratingStatsIter <- paste("RatingStats_", i, sep = "")
ratingStats <- read.csv(paste("RatingStats_", i, ".csv", sep = ""))
assign(ratingStatsIter, ratingStats)
### Write ----
if (file.exists(saveDir)) {
setwd(saveDir)
} else {
dir.create(saveDir)
setwd(saveDir)
}
ratingStatsOutTemp = toString(paste("RatingStats_", i, ".rds", sep = ""))
saveRDS(ratingStats, file = ratingStatsOutTemp)
}
|
41ddec47da5d2006efc340cad3c6ea53e2633949 | 594f65513c5f1a18e61ba7a748da58942c1896fb | /old/scripts/layers.r | 0f835874a0601058447d92833ed2f400bda7a52e | [] | no_license | cyang-2014/ggplot2-book | f050778b2bf326511ab9cef21b4f76da8ac8c2fe | 49af540b919e69e02ff1686260e2805cd8459def | refs/heads/master | 2021-01-16T22:12:14.512001 | 2015-07-30T22:28:04 | 2015-07-30T22:28:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 886 | r | layers.r | source("latex.r")
l(decumar)
aesthetics <- function(o) sort(c(names(o$default_aes()), o$required_aes))
params <- function(x) sort(setdiff(names(x$parameters()), c(aesthetics(x), "...")))
output <- function(o) sort(names(o$desc_outputs))
comma <- function(x) paste(escape_tex(x), collapse= ", ")
ap <- function(x) paste(comma(aesthetics(x)), comma(params(x)), sep = " + ")
g <- GeomPoint
aesthetics2 <- function(g) {
a <- aesthetics(g)
req <- a %in% g$required_aes
a[req] <- paste("\\textbf{", a[req], "}", sep = "")
paste(a, collapse = ", ")
}
cat(tabulate(ldply(Geom$find_all(), function(c) c(escape_tex(c$objname), escape_tex(c$desc)))))
cat(tabulate(ldply(Geom$find_all(), function(c) c(escape_tex(c$objname),escape_tex(c$default_stat()$objname), aesthetics2(c)))) )
cat(tabulate(ldply(Stat$find_all(), function(c) c(escape_tex(c$objname), escape_tex(c$desc)))))
|
1d4a62e9a2f5a02bf8a53daf827d215e156d7fe6 | 2e0a2841d2e00e22a91924e1f35a5689511eb6b3 | /Scripts/Notes.R | 6046ba852fc54535f50cbad67875014c0d920393 | [] | no_license | CarsonLKeeter/RMACSM_Conference | ef1544c13fc9cec0153d8b40261234a4dba1de6d | cf410de4adc5a50133d0f7af92330dd0d410b7a1 | refs/heads/master | 2020-04-19T00:38:45.506445 | 2019-01-27T20:36:22 | 2019-01-27T20:36:22 | 167,852,303 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 500 | r | Notes.R | #Notes
#1) Largest mean daily change score compared to largest mean change score of restriction week. Set value = mean largest change during baseline. Compare change score between baseline and restriction. Mean change score = mean restriction (avg 3-7) - mean baseline (avg 8-10).
#2) Is the change observed differently from the average daily fluctuation? Average daily difference between consecutive for all subjects. That is set value. Compare change score between baseline and fluid restriction. |
3801c3cd59579c4a27d17507ee3a36531ebe0412 | c7b332e8cd2371fc24356804c2d56ce2faa5975e | /man/use_survival_analysis.Rd | c7a43bd49e7ed2ff34a4bf3e7c07dff926ee1716 | [] | no_license | sheejamk/packDAMipd | a138a132bb34da27dbbf7c02c17d0b26650c7458 | f37cbe0702f8bb9addc4ae1875669a08f5d20416 | refs/heads/main | 2023-03-18T19:36:50.293630 | 2022-07-28T20:32:51 | 2022-07-28T20:32:51 | 342,639,162 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,719 | rd | use_survival_analysis.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/2b_parameter_estimation_survival_functions.R
\name{use_survival_analysis}
\alias{use_survival_analysis}
\title{############################################################################
Get the parameter values using the survival analysis}
\usage{
use_survival_analysis(
param_to_be_estimated,
dataset,
indep_var,
info_get_method,
info_distribution,
covariates,
timevar_survival,
cluster_var = NA
)
}
\arguments{
\item{param_to_be_estimated}{parameter of interest}
\item{dataset}{data set to be provided}
\item{indep_var}{the independent variable (column name in data file)}
\item{info_get_method}{additional information on methods e.g
Kaplan-Meier ot hazard}
\item{info_distribution}{distribution name eg. for logistic
regression -binomial}
\item{covariates}{list of covariates - calculations to be done
before passing}
\item{timevar_survival}{time variable for survival analysis,
default is NA}
\item{cluster_var}{cluster variable for survival analysis}
}
\value{
the results of the regression analysis
}
\description{
############################################################################
Get the parameter values using the survival analysis
}
\details{
This function helps to get the parameter values after the survival analysis
Takes into account many different methods like KM.FH, Cox proportional etc.
and then calls appropriate functions to do the survival analysis
}
\examples{
\donttest{
data_for_survival <- survival::aml
surv_estimated_aml <- use_survival_analysis("status", data_for_survival,
"x",
info_get_method = "parametric", info_distribution = "weibull",
covariates = NA, "time")
}
}
|
e9fa218ea3b2c9a9b44261c4af3446481160ce5c | 468394e5eea229ac8076cdb399fbe14e803311f4 | /man/gendata.ep.Rd | 1ff4dae49870b10c6c93045190a2a1dc8f9dd61f | [] | no_license | cran/RxCEcolInf | 825ab7284a775c45f9c059bbe72d1a834be0bd64 | 6c02fe52ce6f582a08488debaca58ec823b708da | refs/heads/master | 2023-04-07T04:32:42.759661 | 2021-11-06T13:50:12 | 2021-11-06T13:50:12 | 17,693,465 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,259 | rd | gendata.ep.Rd | \name{gendata.ep}
\alias{gendata.ep}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Function To Simulate Ecological and Survey Data For Use in
Testing And Analyzing Other Functions in Package}
\description{
This function generates simulated ecological data, \emph{i.e.}, data in the form of contigency
tables in which the row and column totals but none of the internal
cell counts are observed. At the user's option, data from simulated surveys of
some of the `units' (in voting parlance, 'precincts') that gave rise
to the contingency tables are also produced.
}
\usage{
gendata.ep(nprecincts = 175,
nrowcat = 3,
ncolcat = 3,
colcatnames = c("Dem", "Rep", "Abs"),
mu0 = c(-.6, -2.05, -1.7, -.2, -1.45, -1.45),
rowcatnames = c("bla", "whi", "his", "asi"),
alpha = c(.35, .45, .2, .1),
housing.seg = 1,
nprecincts.ep = 40,
samplefrac.ep = 1/14,
K0 = NULL,
nu0 = 12,
Psi0 = NULL,
lambda = 1000,
dispersion.low.lim = 1,
dispersion.up.lim = 1,
outfile=NULL,
his.agg.bias.vec = c(0,0),
HerfInvexp = 3.5,
HerfNoInvexp = 3.5,
HerfReasexp = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{nprecincts}{positive integer: The number of contingency tables (precincts)
in the simulated dataset.}
\item{nrowcat}{integer > 1: The number of rows in each of the
contingency tables.}
\item{ncolcat}{integer > 1: The number of columns in each of the
contingency tables.}
\item{rowcatnames}{string of length = length(\code{nrowcat}): Names
of rows in each contingency table.}
\item{colcatnames}{string of length = length(\code{ncolcat}): Names
of columns in each contingency table.}
\item{alpha}{vector of length(\code{nrowcat}): initial parameters to
a Dirichlet distribution used to generate each contingency table's
row fractions.}
\item{housing.seg}{scalar > 0: multiplied to alpha to generate final
parameters to Dirichlet distribution used to generate each
contingency table's row fractions.}
\item{mu0}{vector of length (\code{nrowcat} * (\code{ncolcat} - 1)):
The mean of the
multivariate normal hyperprior at the top level of the hierarchical
model from which the data are simulated. See Details.}
\item{K0}{square matrix of dimension (\code{nrowcat} * (\code{ncolcat}
- 1)): the covariance
matrix of the multivariate normal hyperprior at the top level of the
hierarchical model from which the data are simulated. See Details.}
\item{nu0}{scalar > 0: the degrees of freedom for the Inv-Wishart
hyperprior from which the \eqn{SIGMA}{\Sigma} matrix will be drawn.}
\item{Psi0}{square matrix of dimension (\code{nrowcat} *
(\code{ncolcat} - 1)): scale matrix for the Inv-Wishart
hyperprior from which the \code{SIGMA} matrix will be drawn.}
\item{lambda}{scalar > 0: initial parameter of the Poisson
distribution from which the number of voters in each precinct will be drawn}
\item{dispersion.low.lim}{scalar > 0 but < dispersion.up.lim:
lower limit of a draw from \code{runif()} to be multiplied to
\code{lambda} to set a lower limit on
the parameter used to draw from the Poisson distribution that
determines the number of voters in each precinct.}
\item{dispersion.up.lim}{scalar > dispersion.low.lim:
upper limit of a draw from \code{runif()} to be multiplied
to \code{lambda} to set a upper limit on
the parameter used to draw from the Poisson distribution that
determines the number of voters in each precinct.}
\item{outfile}{string ending in ".Rdata": filepath and name of
object; if non-NULL, the object returned by this function will be
saved to the location specified by \code{outfile}.}
\item{his.agg.bias.vec}{vector of length 2: only implemented for
nowcat = 3 and ncolcat = 3: if non-null, induces aggregation bias
into the simulated data. See Details.}
\item{nprecincts.ep}{integer > -1 and less than nprecincts: number of
contingency tables (precincts) to be included in simulated survey
sample (ep for "exit poll").}
\item{samplefrac.ep}{fraction (real number between 0 and 1):
percentage of individual units (voters) within each contingency table
(precinct) include in the survey sample.}
\item{HerfInvexp}{scalar: exponent used to generate inverted
quasi-Herfindahl weights used to sample contingency tables (precincts) for
inclusion in a sample survey. See Details.}
\item{HerfNoInvexp}{scalar: same as HerInvexp except the quasi-Herfindahl
weights are not inverted. See Details.}
\item{HerfReasexp}{scalar: same as HerfInvexp, for a separate sample
survey. See Details.}
}
\details{
This function simulates data from the ecological inference model outlined in Greiner \&
Quinn (2009). At the user's option (by setting nprecincts.ep to an
integer greater than 0), the function generates three survey samples
from the simulated dataset. The specifics of the function's operation
are as follows.
First, the function simulates the total number of individual units
(voters) in each contigency table (precinct) from a Poisson
distribution with parameter \code{lambda} * runif(1, dispersion.low.lim,
dispersion.up.lim). Next, for each table, the function simulates the
vector of fraction of units (voters) in each table (precinct) row.
The fractions are simulated from a Dirichlet distribution with
parameter vector \code{housing.seg} * \code{alpha}. The row fractions are
multiplied by the total number of units (voters), and the resulting
vector is rounded to produce contingency table row counts for each
table.
Next, a vector \eqn{mu}{\mu} is simulated from a multivariate normal
with mean \code{mu0} and covariance matrix \code{K0}. A covariance
matrix \code{Sigma} is simulated from an Inv-Wishart with
\code{nu0} degrees of freedom and scale matrix \code{Psi0}.
Next, \code{nprecincts} vectors are drawn from \eqn{N(mu,
SIGMA)}{N(\mu, \Sigma)}. Each of these draws undergoes an inverse-stacked
multidimensional logistic transformation to produce a set of \code{nrowcat}
probability vectors (each of which sums to one) for \code{nrowcat}
multinomial distributions, one for each row in that contingency
table. Next, the \code{nrowcat} multinomial values, which represent the true (and
in real life, unobserved) internal cell counts, are drawn from the relevant row
counts and these probability vectors. The column totals are
calculated via summation.
If \code{nprecincts.ep} is greater than 0, three simulated surveys (exit polls) are
drawn. All three select contingency tables (precincts) using weights
that are a function of the composition of the row totals. Specifically the row
fractions are raised to a power q and then summed (when q = 2 this calculation is
known in antitrust law as a Herfindahl index). For one of the three
surveys (exit polls) \code{gendata.ep} generates, these
quasi-Herfindahl indices are the weights. For two of the three
surveys (exit polls) \code{gendata.ep} generates, denoted \code{EPInv}
and \code{EPReas}, the sample weights are the reciprocals of these
quasi-Herfindhal indices. The former method tends to weight
contingency tables (precincts) in which one row dominates the table
higher than contigency tables (precincts) in which row fractions are close to the
same. In voting parlance, precincts in which one racial group
dominates are more likely to be sampled than racially mixed
precincts. The latter method, in which the sample weights are
reciprocated, weights contingency tables in which row fractions are
similar more highly; in voting parlance, mixed-race precincts are more
likly to be sampled.
For example, suppose \code{nrowcat} = 3, \code{HerInvexp} = 3.5,
\code{HerfReas} = 2, and
\code{HerfNoInv} = 3.5. Consider
contingency table P1 with row counts (300, 300, 300) and contingency
table P2 with row counts (950, 25, 25). Then:
\bold{Row fractions:} The corresponding row
fractions are (300/900, 300/900, 300/900) = (.33, .33, .33) and
(950/1000, 25/1000, 25/1000) = (.95, .025, .025).
\bold{EPInv weights:} \code{EPInv} would
sample from assign P1 and P2 weights as follows: \eqn{1/sum(.33^3.5,
.33^3.5, .33^3.5) = 16.1} and \eqn{1/sum(.95^3.5, .025^3.5, .025^3.5) =
1.2}.
\bold{EPReas weights:} \code{EPReas} would assign weights as
follows: \eqn{1/sum(.33^2, .33^2, .33^2) = 3.1} and \eqn{1/sum(.95^2, .025^2,
.025^2) = 1.1}.
\bold{EPNoInv weights:} \code{EPNoInv} would assign weights as
follows: \eqn{sum(.33^3.5, .33^3.5, .33^3.5) = .062} and \eqn{sum(.95^3.5,
.025^3.5, .025^3.5) = .84}.
For each of the three simulated surveys (\code{EPInv}, \code{EPReas},
and \code{EPNoInv}), \code{gendata.ep} returns a list of length
three. The first element of the list, \code{returnmat.ep}, is a matrix of
dimension \code{nprecincts} by (\code{nrowcat} * \code{ncolcat})
suitable for passing to \code{TuneWithExitPoll} and
\code{AnalyzeWithExitPoll}. That is, the first row of
\code{returnmat.ep} corresponds to the first row of \code{GQdata},
meaning that they both contain information from the same
contingency table. The second row of \code{returnmat.ep} contains
information from the contingency table represented in the second row
of \code{GQdata}. And so on. In addition, \code{returnmat.ep} has counts
from the sample of the contingency table in vectorized row major
format, as required for \code{TuneWithExitPoll} and
\code{AnalyzeWithExitPoll}.
If \code{nrowcat} = \code{ncolcat} = 3, then the user may set
\code{his.agg.bias.vec} to be nonzero. This will introduce aggregation
bias into the data by making the probability vector of the second row
of each contingency table a function of the fractional composition of
the third row. In voting parlance, if the rows are black, white, and
Hispanic, the white voting behavior will be a function of the percent
Hispanic in each precinct. For example, if \code{his.agg.bias.vec} =
c(1.7, -3), and if the fraction Hispanic in each precinct i is
\eqn{X_hi}{X_{h_i}}, then in the ith precinct, the \eqn{mu_i[3]}{\mu_i[3]}
is set to \code{mu0[3]} + \eqn{X_hi * 1.7}{X_{h_i} * 1.7}, while \eqn{mu_i[4]}{\mu_i[4]}
is set to \code{mu0[4]} + \eqn{X_hi * -3}{X_{h_i} * -3}. This feature
allows testing of the ecological inference model with aggregation
bias.
}
\value{
A list with the follwing elements.
\item{GQdata}{Matrix of dimension \code{nprecincts} by (\code{nrowcat}
+ \code{ncolcat}): The simulated (observed) ecological data, meaning
the row and column totals in the contingency tables. May be passed as
\code{data} argument in \code{Tune}, \code{Analyze},
\code{TuneWithExitPoll}, and \code{AnalyzeWithExitPoll}}
\item{EPInv}{List of length 3: \code{returnmat.ep}, the
first element in the list, is a matrix that may be passed as the
\code{exitpoll} argument in \code{TuneWithExitPoll} and
\code{AnalyzeWithExitPoll}. See Details. \code{ObsData} is
a dataframe that may be used as the \code{data} argument in the
\code{survey} package. \code{sampprecincts.ep} is a vector detailing
the row numbers of \code{GQdata} (meaning the contingency tables) that
were included in the \code{EPInv} survey (exit
poll). See Details for an explanation of the weights used to select the contingency
tables for inclusion in the \code{EPInv} survey (exit poll).}
\item{EPNoInv}{List of length 3: Contains the same elements as
\code{EPInv}. See Details for an explanation of weights used to
select the contingency tables for inclusion in the \code{EPNoInv}
survey (exit poll).}
\item{EPReas}{List of length 3: Contains the same elements as
\code{EPInv}. See Details for an explanation of weights used to
select the contingency tables for inclusion in the \code{EPReas}
survey (exit poll).}
\item{omega.matrix}{Matrix of dimension \code{nprecincts} by
(\code{nrowcat} * (\code{ncolcat}-1)): The matrix of draws from the
multivariate normal distribution at the second level of the hiearchical
model giving rise to \code{GQdata}. These values undergo an
inverse-stacked-multidimensional logistic transformation to produce contingency
table row probability vectors.}
\item{interior.tables}{List of length \code{nprecincts}: Each element of the
list is a full (meaning all interior cells are filled in)
contingency table.}
\item{mu}{vector of length \code{nrowcat} * (\code{ncolcat}-1): the
\eqn{mu}{\mu} vector drawn at the top level of the hierarchical
model giving rise to \code{GQdata}. See Details.}
\item{Sigma}{square matrix of dimension \code{nrowcat} *
(\code{ncolcat}-1): the covariance matrix drawn at the top level of
the hierarchical model giving rise to \code{GQdata}. See Details.}
\item{Sigma.diag}{the output of \code{diag(Sigma)}.}
\item{Sigma.corr}{the output of \code{cov2cor(Sigma)}.}
\item{sim.check.vec}{vector: the true values of the
parameters generated by \code{Analyze} and
\code{AnalyzeWithExitPoll} in the same order as the parameters are
produced by those two functions. This vector is useful in assessing
the coverage of intervals from the posterior draws from \code{Analyze} and
\code{AnalyzeWithExitPoll}.}
}
\references{D. James Greiner \& Kevin M. Quinn. 2009. ``R x C Ecological
Inference: Bounds, Correlations, Flexibility, and Transparency of
Assumptions.'' \emph{J.R. Statist. Soc. A} 172:67-81.}
\author{D. James Greiner \& Kevin M. Quinn}
\examples{
\dontrun{
SimData <- gendata.ep() # simulated data
FormulaString <- "Dem, Rep, Abs ~ bla, whi, his"
EPInvTune <- TuneWithExitPoll(fstring = FormulaString,
data = SimData$GQdata,
exitpoll=SimData$EPInv$returnmat.ep,
num.iters = 10000,
num.runs = 15)
EPInvChain1 <- AnalyzeWithExitPoll(fstring = FormulaString,
data = SimData$GQdata,
exitpoll=SimData$EPInv$returnmat.ep,
num.iters = 2000000,
burnin = 200000,
save.every = 2000,
rho.vec = EPInvTune$rhos,
print.every = 20000,
debug = 1,
keepTHETAS = 0,
keepNNinternals = 0)
EPInvChain2 <- AnalyzeWithExitPoll(fstring = FormulaString,
data = SimData$GQdata,
exitpoll=SimData$EPInv$returnmat.ep,
num.iters = 2000000,
burnin = 200000,
save.every = 2000,
rho.vec = EPInvTune$rhos,
print.every = 20000,
debug = 1,
keepTHETAS = 0,
keepNNinternals = 0)
EPInvChain3 <- AnalyzeWithExitPoll(fstring = FormulaString,
data = SimData$GQdata,
exitpoll=SimData$EPInv$returnmat.ep,
num.iters = 2000000,
burnin = 200000,
save.every = 2000,
rho.vec = EPInvTune$rhos,
print.every = 20000,
debug = 1,
keepTHETAS = 0,
keepNNinternals = 0)
EPInv <- mcmc.list(EPInvChain1, EPInvChain2, EPInvChain3)
}
} |
3844d31b9f4e522c96e07a529e383044ea6c3daa | c234fabf31a9f69610b2135486ca99dc3e61aa75 | /man/exchange_rate.Rd | 4c40ae715eb4212e38a28dff90b02eb039a0b611 | [] | no_license | cran/geckor | 8a1b5258ed648835e90333cc73d285c5daa8e127 | 1d585c4dfe990622be9b0c2c7bbde0d567260224 | refs/heads/master | 2023-08-23T08:54:12.952509 | 2021-11-01T14:20:02 | 2021-11-01T14:20:02 | 385,804,363 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,950 | rd | exchange_rate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exchange_rate.R
\name{exchange_rate}
\alias{exchange_rate}
\title{Exchange rate}
\usage{
exchange_rate(currency = NULL, max_attempts = 3)
}
\arguments{
\item{currency}{(character or \code{NULL}): a vector with abbreviated names of the
currencies of interest. An up-to-date list of supported currencies (both
fiat and cryptocurrencies) can be retrieved with the \code{\link[=supported_currencies]{supported_currencies()}}
function. If an unsupported \code{currency} is requested, the call will fail
with the respective error message. If \code{currency = NULL} (default), the
function will return exchange rates for all supported currencies.}
\item{max_attempts}{(double, positive): specifies the
maximum number of attempts to call the CoinGecko API (e.g., if
the first call fails for some reason). Additional attempts are
implemented with an exponential backoff. Defaults to 3.}
}
\value{
A tibble with the following columns:
\itemize{
\item \code{timestamp} (POSIXct): date and time of the API request;
\item \code{currency} (character): abbreviated name of the currency;
\item \code{name} (character): common name of the currency;
\item \code{price_in_btc} (double): price in Bitcoin;
\item \code{type} (character): type of the currency (\code{"fiat"} or \code{"crypto"}).
}
}
\description{
Retrieves the current exchange rate for a crypto- of fiat currency in Bitcoin
}
\details{
This function is based on the public
\href{https://www.coingecko.com/api/documentations/v3}{CoinGecko API},
which has a limit of 50 calls per minute. Please
keep this limit in mind when developing your applications.
}
\examples{
\donttest{
# get exchange rates for all supported currencies
r1 <- exchange_rate()
print(r1)
# get exchange rates for a set of currencies:
r2 <- exchange_rate(currency = c("usd", "eur", "gbp"))
print(r2)
}
}
|
2b7cc003cbd48f870895477b2de738b3522637f2 | 46fc788c22af1c49f9a3bc013e1bf1abf66c6231 | /dataAcquisition.R | f694b9b766121b863b50078ff0963c6cf92ddb39 | [] | no_license | hoyinli1211/WCprediction2 | 700f92ed1e4f96d9ef92aa8f13158eb6a94889cb | 0741304d6ce7468ebf0aecbd1c9466fd1003c1ea | refs/heads/master | 2020-03-21T20:57:51.575567 | 2018-07-08T16:04:23 | 2018-07-08T16:04:23 | 139,039,257 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,001 | r | dataAcquisition.R | ################################
#data acquisition
################################
library(rvest)
#FIFA coca-cola ranking
#source: https://www.fifa.com/fifa-world-ranking/
df.fifaRanking.Extraction <- function (v.year, v.team) {
v.year <- as.character(v.year)
v.url <- df.wc.timeframe %>%
filter(year==v.year) %>%
pull(url.ranking)
v.team <- df.wc.team %>%
filter(year==v.year) %>%
pull(team)
df.extraction <- read_html(v.url) %>%
html_nodes("table") %>%
.[1] %>%
html_table(fill=TRUE) %>%
.[[1]]
df.extraction <- df.extraction[,c(2,3)]
df.extraction$year <- rep(v.year,dim(df.extraction)[1])
colnames(df.extraction) <- c('fifa.rank.beforeWC','team','year')
df.extraction <- df.extraction[c(3,2,1)] %>%
mutate(year=as.character(year),
team=as.character(team),
fifa.rank.beforeWC=as.integer(fifa.rank.beforeWC))
#str(df.extraction)
return(df.extraction)
}
df.fifa.ranking <- data.frame(year=character(),
team=character(),
fifa.rank.beforeWC=integer())
for (i in 1:dim(df.wc.timeframe)[1]) {
v.year <- df.wc.timeframe$year[i]
df.fifa.ranking <- df.fifa.ranking %>%
bind_rows(df.fifaRanking.Extraction(v.year))
}
df.fifa.ranking <- df.fifa.ranking %>%
mutate(team=ifelse(team=='United Kingdom of Great Britain and Northern Ireland','England',
ifelse(team=='Russian Federation','Russia',
ifelse(team %in% c('Iran (Islamic Republic of)','IR Iran'),'Iran',
ifelse(team=='Czechia','Czech Republic',
ifelse(team=="Côte d'Ivoire",'Ivory Coast',
ifelse(team=='Bosnia and Herzegovina','Bosnia-Herzegovina',
ifelse(team=='China PR','China',
ifelse(team=='Korea Republic','Republic of Korea',
ifelse(team=='Republic of Ireland','Ireland',
ifelse(team=='USA','United States of America',
ifelse(team %in% c('Serbia and Montenegro','Yugoslavia'),'Serbia',team)
)
)
)
)
)
)
)
)
)
)
)
|
b4977c3a2669345eee1563f8e9921a7ae2c4fef3 | f23cb8b279cc83b0ccb87b1cb320084f595d7dee | /OpinionDiffusion/Source/cluster_tweet_id_mapping.R | 213f9e6a37307ba0881c9be99aaaa634a75d80c0 | [] | no_license | AadhithyaRamesh/Opinion_diffusion | 5f243b2d084de19389a3fad4e6e38501561ba42b | b33d49a760a7a1c46d38ad1fd75bcb90292797a8 | refs/heads/master | 2020-03-28T14:10:04.406405 | 2019-03-20T08:03:32 | 2019-03-20T08:03:32 | 148,462,763 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,013 | r | cluster_tweet_id_mapping.R | library(igraph)
library(hashmap)
retweet_table = read.csv("../Data/demonetization_retweet_table.csv", sep = ";", stringsAsFactors = FALSE)
cluster_table = read.csv("../Data/demonetization_cluster_tweet_table.csv", sep = ";", stringsAsFactors = FALSE)
unique_clusters = 1:max(cluster_table$Cluster.number)
cluster_tweet_indices = data.frame(Cluster.number = unique_clusters, tweet_indices = integer(length(unique_clusters)))
for (index in 1:nrow(cluster_table))
{
data = as.list(cluster_table[index, ])
tweet = data$tweet
cluster_number = data$Cluster.number
valid_indices = which(retweet_table$tweet == tweet)
if (cluster_tweet_indices[cluster_number, ]$tweet_indices == 0)
{
if (length(valid_indices) == 0)
{
print(tweet)
}
cluster_tweet_indices$cluster_number[2] = valid_indices
}
else
{
cluster_tweet_indices[cluster_number, ]$tweet_indices = c(cluster_tweet_indices[cluster_number, ]$tweet_indices, valid_indices)
}
}
print(cluster_tweet_indices) |
d73a895ec12799734a589e6b7951d2540e264d28 | 37804579772f536b8703c27eba4185ab41d30d16 | /demographies.R | 671b6bd63082b5135784bb1dc87ce141b93e8a1e | [] | no_license | abhishekterdalkar/World-Bank-Demographics | f126daad0d3eaec6d3031affe9fe61cad448fed2 | 625d57654b9f5f1fe247f030ff5cd67f8decd3d8 | refs/heads/master | 2022-04-21T10:33:54.104144 | 2020-04-20T18:13:13 | 2020-04-20T18:13:13 | 257,245,388 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,200 | r | demographies.R | getwd()
stats <- read.csv("P2-Demographic-Data (1).csv")
head(stats)
colnames(stats) <- c("Country","CountryCode","BirthRate","InternetUsers","IncomeGroup")
tail(stats)
nrow(stats)
ncol(stats)
summary(stats)
testDF <- data.frame(Code = Codes_2012_Dataset, Countries = Countries_2012_Dataset, Regions = Regions_2012_Dataset)
is.data.frame(testDF)
head(testDF)
tail(testDF)
nrow(testDF)
ncol(testDF)
summary(testDF)
updatedStats <- merge(stats,testDF, by.x = "CountryCode", by.y = "Code")
head(updatedStats)
updatedStats$Countries <- NULL
tail(updatedStats)
nrow(updatedStats)
ncol(updatedStats)
summary(updatedStats)
library(ggplot2)
qplot(data = updatedStats, x = BirthRate, y = InternetUsers, color = Regions, size = I(3), shape = I(18), alpha = 0.4, xlim = c(0,30), ylim = c(25,75))
plot1 <- ggplot(data = updatedStats, aes(x = BirthRate, y = InternetUsers, color = Regions))
plot1 + geom_point(alpha = 0.4, shape = I(18), size = I(3)) + facet_grid(Regions~., space = "free") + geom_smooth() + coord_cartesian(x = c(10,40), y = c(0,50))
plot2 <- ggplot(data = updatedStats, aes(x = BirthRate, y = InternetUsers, color = Regions))
plot2 + geom_jitter(shape = I(17)) + geom_boxplot(alpha = 0.4) + facet_grid(Regions~., space = "free")
plot3 <- ggplot(data = updatedStats, aes(x = BirthRate, fill = Regions))
themeTest <- plot3 + geom_histogram(binwidth = 5, color = "Black") + facet_grid(Regions~IncomeGroup,)
themeTest + xlab("Birth Rate") + ylab("Population") + ggtitle("Birth Rate Analysis") +
theme(axis.title.x = element_text(color = "Red", size = 10),
axis.title.y = element_text(color = "Blue", size = 10),
axis.text = element_text(color = "Dark Blue", size = 10),
plot.title = element_text(color = "Dark Green", size = 12))
Games[c(1,10), c(9,10)]
Games[c("KobeBryant","DwayneWade"),c("2013","2014")]
myplot <- function(data, row = 1:5){
matplot(t(data[row,,drop = F]), type = "o", pch = 15:18, col = c(1:4,6),lwd = 1, lty = 1:4)
legend("bottomright", legend = Players[row], inset = 0.01, pch = 15:18, col = c(1:4,6), lwd = 1, lty = 1:4)
}
myplot(MinutesPlayed/Games, c(1,9))
#Seasons
Seasons <- c("2005","2006","2007","2008","2009","2010","2011","2012","2013","2014")
#Players
Players <- c("KobeBryant","JoeJohnson","LeBronJames","CarmeloAnthony","DwightHoward","ChrisBosh","ChrisPaul","KevinDurant","DerrickRose","DwayneWade")
#Free Throws
KobeBryant_FT <- c(696,667,623,483,439,483,381,525,18,196)
JoeJohnson_FT <- c(261,235,316,299,220,195,158,132,159,141)
LeBronJames_FT <- c(601,489,549,594,593,503,387,403,439,375)
CarmeloAnthony_FT <- c(573,459,464,371,508,507,295,425,459,189)
DwightHoward_FT <- c(356,390,529,504,483,546,281,355,349,143)
ChrisBosh_FT <- c(474,463,472,504,470,384,229,241,223,179)
ChrisPaul_FT <- c(394,292,332,455,161,337,260,286,295,289)
KevinDurant_FT <- c(209,209,391,452,756,594,431,679,703,146)
DerrickRose_FT <- c(146,146,146,197,259,476,194,0,27,152)
DwayneWade_FT <- c(629,432,354,590,534,494,235,308,189,284)
#Matrix
#Free Throw Attempts
KobeBryant_FTA <- c(819,768,742,564,541,583,451,626,21,241)
JoeJohnson_FTA <- c(330,314,379,362,269,243,186,161,195,176)
LeBronJames_FTA <- c(814,701,771,762,773,663,502,535,585,528)
CarmeloAnthony_FTA <- c(709,568,590,468,612,605,367,512,541,237)
DwightHoward_FTA <- c(598,666,897,849,816,916,572,721,638,271)
ChrisBosh_FTA <- c(581,590,559,617,590,471,279,302,272,232)
ChrisPaul_FTA <- c(465,357,390,524,190,384,302,323,345,321)
KevinDurant_FTA <- c(256,256,448,524,840,675,501,750,805,171)
DerrickRose_FTA <- c(205,205,205,250,338,555,239,0,32,187)
DwayneWade_FTA <- c(803,535,467,771,702,652,297,425,258,370)
FreeThrows <- rbind(KobeBryant_FT,JoeJohnson_FT,LeBronJames_FT,CarmeloAnthony_FT,DwightHoward_FT,ChrisBosh_FT,ChrisPaul_FT,KevinDurant_FT,DerrickRose_FT,DwayneWade_FT)
colnames(FreeThrows) <- Seasons
colnames(FreeThrowsAttempts) <- Seasons
myplot2 <- function(data, row = 1:5){
matplot(t(data[row,,drop = F]), type = "o", pch = 15:18, col = c(1:4,6), lwd = 1, lty = 1:4)
legend("bottomright",legend = Players[row], inset = 0.01, pch = 15:18, col = c(1:4,6), lwd = 1, lty = 1:4)
}
myplot2(FieldGoals/FieldGoalAttempts, c(1,5))
|
15eace57bd2db5f0a2acdc5bcd601811adbea180 | 9b3455cfbb8a16b9380a5f8a725cc1e410301761 | /Web Scraper.R | 3654087331caa8604c5e0518140a576ea0342e1d | [] | no_license | ChapmanNitta/R_Web_Scraper | d289a8f3f541d27cbd895663d635dd3a0c882674 | 499d088049d16d7f0bb05c09d0936cc7cf50c793 | refs/heads/master | 2022-01-21T17:35:37.216460 | 2019-07-22T02:49:04 | 2019-07-22T02:49:04 | 198,135,110 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,878 | r | Web Scraper.R | library("rvest")
library("googleVis")
library("tidyverse")
library("xml2")
#Access the specific movie theater's show times web page from your R code.
#. Import the HTML content of the show times web page into an R object called"Showtimes".
URI <- 'https://www.cinemark.com/pennsylvania/cinemark-university-city-penn-6?showDate='
getShowtimes <- function(date) {
URL <- paste0(URI, date)
read_html(URL)
}
Showtimes <- getShowtimes('05/04/2019')
Showtimes
#Save this content to a file and explain in your report how to reload it (in case your grader
#needs to reload the show times as they may change between after you submit your project but before your grader can grade it).
write_html(Showtimes, file = 'showtime.Rdata')
Showtimes <- xml2::read_html('showtime.Rdata')
#Scrape the show times web page to extract the list of movies currently playing.
#. Create an R list object called "Program" containing all movie titles currently showing in
#the theater. (Most likely you will get this from the "Showtimes" object).
getPrograms <- function(showtimes){
html_nodes(showtimes, "div.showtimeMovieBlock h2") %>%
html_text() %>%
as.list()
}
Program <- getPrograms(Showtimes)
Program
#Choose a particular movie title and scrape the R object "Showtimes" To extract the times at which this particular movie is playing
Showtimes %>%
html_nodes(xpath = '//*[@class="showtimeMovieBlock 76838 "]')%>% #Needed to use xpath as html_nodes alone do not support numerical values as an argument parameter
html_nodes(., "div.showtime") %>%
html_text() %>%
{ gsub("\r\n", "", .) } %>% # Need to encompass piped data in curly brackets and use a period as a data marker
trimws()
#d) (25 points) Identify all of the links from R object "Showtimes" to the other web pages contained
#the movie titles currently playing in the theater.
#. Create an R list object out of them called "Page_Links" and save it to a file.
Page_Links <- html_nodes(Showtimes, "a.movieLink") %>%
html_attr("href") %>%
unique() %>%
as.list()
save(Page_Links ,file = "Page_Links.RData")
load(file = "Page_Links.RData")
typeof(Page_Links)
Page_Links
#. Use a hierarchical visualization technique (perhaps gvisOrgChart()) to show the
#relationship between the web pages that were scraped.
df <- data.frame(matrix(unlist(Page_Links), nrow=length(Page_Links), byrow=T))
df <- cbind(df, 'https://www.cinemark.com/pennsylvania/cinemark-university-city-penn-6?showDate=05/04/2019', 'Cinemark')
colnames(df) <- c("Child", "Parent", "Placeholder")
de <- data.frame('https://www.cinemark.com/pennsylvania/cinemark-university-city-penn-6?showDate=05/04/2019', NA, 'Cinemark')
colnames(de) <- c("Child", "Parent", "Placeholder")
df <- rbind(df, de)
hierarchy <- gvisOrgChart(df, idvar = "Child", parentvar = "Parent", options=list(width="200px", height="300px"))
plot(hierarchy)
|
8ebe2dad864e4903126349a58554ea1f991324c2 | d0a780845a48a3190c3861224eccc1cf077b750a | /R/TF_Activity.R | 12ec54ea6fa81b719bb7ce21ca693c8ec106d9ec | [
"MIT"
] | permissive | lusystemsbio/NetAct | 58f77c079262471e69a060bf14c635dc3ec36d16 | a763ef165e6c05424439c674566eaac9f107a0dd | refs/heads/master | 2023-04-26T16:05:06.257855 | 2023-04-19T16:14:28 | 2023-04-19T16:14:28 | 285,619,557 | 12 | 2 | MIT | 2022-04-06T19:32:12 | 2020-08-06T16:30:48 | R | UTF-8 | R | false | false | 7,465 | r | TF_Activity.R | ########################################################
################ TF Activities Function ################
########################################################
#' Inference of TF activity
#' @param tfs a vector of selected tfs
#' @param GSDB gene set database (a list of gene sets, each of which is comprised of a vector genes)
#' @param eset expression set of gene expression data or gene expression matrix
#' @param DErslt DEG results
#' @param with_weight whether weighting factors (based on DEG p-values) are used to compute TF activity (default: TRUE)
#' @param if_module whether the grouping scheme (activation or inhibition) depends on module detection algorithm (default: FALSE, no need to change)
#' @param ind Hill coefficient parameter used in the weighting factors (default: 1/5, recommend to use 0 < ind < 1/4)
#' @param useCorSign allow the option to use the TF gene expression correlation to flip signs (default: TRUE)
#' @return a list of results:
#' all_list: grouping scheme of all TF gene sets.
#' all_activity: matrix of TF activity.
#' @export
TF_Activity = function (tfs, GSDB, eset, DErslt, with_weight = TRUE, if_module = FALSE,
ind = 1/5, useCorSign = TRUE){
if (is(eset, "ExpressionSet")){
data = exprs(eset)
}else{
data = eset
}
table = DErslt$table
DEweights = data.frame(p = table$padj)
rownames(DEweights) = rownames(table)
tfs = tfs[tfs %in% rownames(data)]
all_activity = all_list = NULL
for (tf in tfs) {
comms = intersect(GSDB[[tf]], rownames(table))
g_inds = which(rownames(data) %in% comms)
tmp_data = data[g_inds, ]
tmp_data = rem_data(tmp_data)
cor_mat = cor(as.matrix(t(tmp_data)), method = "spearman")
tmp_dist <- (cor_mat+1)/2
diag(tmp_dist) <- 0
dimnames(cor_mat) = list(rownames(tmp_data), rownames(tmp_data))
tmp_names = list(rownames(tmp_data), "sign")
col_sums = apply(tmp_dist, 2, sum)
mod_mat = tmp_dist - sapply(col_sums, function(x) x *
col_sums/sum(tmp_dist))
ev = eigen(mod_mat)
tmp_sign = data.frame(sign(ev$vectors[, 1]))
dimnames(tmp_sign) = tmp_names
ng_mat = cor_mat[tmp_sign$sign == -1, tmp_sign$sign ==
-1]
ps_mat = cor_mat[tmp_sign$sign == 1, tmp_sign$sign ==
1]
test_mats = list(ng_mat = ng_mat, ps_mat = ps_mat)
gs_list1 = list()
test_center = numeric()
for (i in 1:2) {
test_mat = test_mats[[i]]
nrow_mat = nrow(test_mat)
if (is.null(nrow_mat)) {
next
}
else if (nrow_mat <= 2) {
gs_list1[[i]] = rownames(test_mat)
}
else {
tmp_kmean = kmeans(test_mat, 1)
centers = tmp_kmean$centers
test_center = c(test_center, centers)
test_dim = length(centers)
distances = apply(test_mat, 1, function(x) cor(as.numeric(x),
as.numeric(centers), method = "spearman"))
m = mean(distances)
names_left = names(distances)[distances >= m]
gs_list1[[i]] = names_left
}
}
tf_exprs = as.numeric(data[tf, ])
gs_remain = unlist(gs_list1)
tmp_rslt = cal_activity(gs_remain, tmp_data, tmp_sign,
ind, with_weight, DEweights, tf_exprs, useCorSign)
tmp_activity = tmp_rslt$activity
tmp_sign = tmp_rslt$sign
gs_list2 = list()
gs_remain2 = c()
cors = apply(tmp_data, 1, function(x) cor(x, tf_exprs,
method = "spearman"))
pos_cors = cors[cors > 0]
neg_cors = cors[cors < 0]
tmp_sign2 = data.frame(sign(cors))
m_pos = mean(pos_cors)
m_neg = mean(neg_cors)
gs_list2[[1]] = names(pos_cors[pos_cors >= m_pos])
gs_list2[[2]] = names(neg_cors[neg_cors <= m_neg])
gs_remain = unlist(gs_list2)
tmp_rslt = cal_activity(gs_remain, tmp_data, tmp_sign2,
ind, with_weight, DEweights, tf_exprs, useCorSign)
tmp_activity2 = tmp_rslt$activity
tmp_sign2 = tmp_rslt$sign
cor1 = apply(tmp_data, 1, function(x) {
tmpc = cor(x, tmp_activity, method = "spearman")
return(tmpc)
})
cor2 = apply(tmp_data, 1, function(x) {
tmpc = cor(x, tmp_activity2, method = "spearman")
return(tmpc)
})
if (if_module == T){
all_list[[tf]] = tmp_sign
all_activity = rbind(all_activity, tmp_activity)
}else{
mean1 = mean(abs(cor1))
mean2 = mean(abs(cor2))
if (mean1 < mean2) {
tmp_sign = tmp_sign2
tmp_activity = tmp_activity2
}
all_list[[tf]] = tmp_sign
all_activity = rbind(all_activity, tmp_activity)
}
}
dimnames(all_activity) = list(tfs, colnames(data))
return(list(all_list = all_list, all_activities = all_activity[complete.cases(all_activity), ]))
}
#' The core function to compute the activity profile of an TF
#' @param gs_remain a vector of target genes after filtering
#' @param tmp_data gene expression of target genes
#' @param tmp_sign sign of target genes (+1 for one group, -1 for the other)
#' @param ind Hill coefficient parameter used in the weighting factors (default: 1/5, recommend to use 0 < ind < 1/4)
#' @param with_weight whether weighting factors (based on DEG p-values) are used to compute TF activity (default: TRUE)
#' @param DE_weights a vector of the input for computing DE weighting factors (typically, adjusted p-values from DEG analysis)
#' @param tf_exprs a vector of gene expression of the TF
#' @param useCorSign allow the option to use the TF gene expression correlation to flip signs (default: TRUE)
#' @return a list of results:
#' activity: matrix of TF activity.
#' sign: grouping scheme of all TF gene sets.
cal_activity = function (gs_remain, tmp_data, tmp_sign, ind, with_weight, DE_weights, tf_exprs, useCorSign = useCorSign) {
if(length(gs_remain) == 1) {
tmp_activity = as.vector(scale(tmp_data[gs_remain,])) # by default it's a matrix
tmp_sign = tmp_sign[gs_remain, ,drop = FALSE ]
} else{
tmp_rem_data = row_norm(tmp_data[gs_remain, ])
tmp_sign = tmp_sign[gs_remain, , drop = FALSE]
if(with_weight) {
tmp_weight = Hill(DE_weights[gs_remain, ], ind)
} else {
tmp_weight = rep(1.0, length(gs_remain))
}
tmp_activity = apply(tmp_rem_data, 2, function(x){
rlst = x * tmp_sign * tmp_weight
return(sum(rlst)/sum(tmp_weight))})
tmp_activity = as.numeric(tmp_activity)
}
if(useCorSign){
if (cor(tmp_activity, tf_exprs, method="spearman") < 0){
tmp_activity = -tmp_activity
tmp_sign = -tmp_sign
# dimnames(tmp_sign) = gs_remain
}
}
else{
tmp_activity = tmp_activity
tmp_sign = tmp_sign
# dimnames(tmp_sign) = gs_remain
}
return(list(activity = tmp_activity, sign = tmp_sign))
} |
e74cb4517fbcadd753076c799f39d58a0cc03c9c | 3cbe123168055dd2e245e7fd4a2a052441c520a8 | /R/annotate_functions.R | 15bfe777e311f0510a496cf786e1fed0c980af41 | [] | no_license | westbrook-lab/MEMA | 21ad5980df5f7f13181cbb94dfa96365231399b7 | 7fd622576a4322418f497c0883a1f5ea4ea8fcf3 | refs/heads/master | 2021-01-20T13:36:27.269466 | 2017-01-18T19:01:21 | 2017-01-18T19:01:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,603 | r | annotate_functions.R | #Functions to support MEMAs printed in 8 well plates
#' Rotate the metadata 180 degrees in Array space
#'
#'@param DT A data.table of metadata with Spot, ArrayRow and ArrayColumn columns.
#'@return The same data.table rotated 180 degrees in array space. The ArrayRow, Arraycolumn and Spot values are updated.
#'
#' @export
rotateMetadata <- function(DT){
DT$ArrayRow <- max(DT$ArrayRow)+1-DT$ArrayRow
DT$ArrayColumn <- max(DT$ArrayColumn)+1-DT$ArrayColumn
DT$Spot <- as.integer(max(DT$Spot)+1-DT$Spot)
return(DT)
}
#' Read in and parse an Aushon XML log file
#'
#' @param logFile An Aushon logfile
#' @return A datatable keyed by Row and Column with Depositions and
#' PrintOrder columns.
#'
#' @export
readLogData<-function(logFile){
#browser()
data<-XML::xmlParse(logFile)
dataList<-XML::xmlToList(data)
#Only keep the sample attributes
dataList<-dataList[names(dataList)=="Sample"]
#Bind the XML data into a data table
data<-data.table::rbindlist(dataList)
#Create Row and Column data by shifting the values by 1
data$Row<-as.integer(data$SpotRow)+1
data$Column<-as.integer(data$SpotCol)+1
#Convert deposition to an integer
data$Depositions<-as.integer(data$Depositions)
#Remove the 0 deposition entries
data<-data[data$Depositions!=0,]
#Create a print order column
data$PrintOrder<-1:nrow(data)
data.table::setkey(data,"PrintOrder")
#Remove unneeded columns
data <- data[,c("Row","Column","PrintOrder","Depositions"), with=FALSE]
#Rotate by 90 degrees CW to match gal file orientation
tmp <- data$Row
data$Row <- data$Column
data$Column <- 1+max(tmp)-tmp
DT <- data.table::data.table(data,key="Row,Column")
return(DT)
}
#' Convert column names in a data.table
#'
#' @param DT A data.table
#'
#' @return DT The same data.table with duplicated columns, invalid column name characters and trailing spaces removed.
#'
#' @export
convertColumnNames <- function (DT) {
#Delete any duplicate names keeping the first instance
DT <- DT[, unique(colnames(DT)), with = FALSE]
#Replace invalid characters with a '.'
data.table::setnames(DT, colnames(DT), make.names(colnames(DT)))
#Remove all '.'s
data.table::setnames(DT, colnames(DT), gsub("[.]", "", colnames(DT)))
}
#' Return the median of a vector as a numeric value
#'
#'\code{numericMedian} is a helper function for use within data.table that ensure the all medians are returned as numeric instead of numeric or integer values.
#' @param x integer or double vector
#'
#' @return The median of x as a numeric value
#'
#'@export
numericMedian <- function(x) as.numeric(median(x))
|
5bb549cd3b5f358d69d3466175983e0cab82e0bf | 7be2f6044afbbf654bebca5caa7a7e34b0e75d42 | /man/print_dict.Rd | e05e77e394176bb1d143b0f85a3ffda390720e59 | [
"MIT"
] | permissive | KWB-R/kwb.pathdict | b6228fd423dbc6de33094e997644e0a6ed86dd33 | b107485e18408a1bfb0346c785ce28d90fa6b087 | refs/heads/master | 2022-06-16T01:50:12.815755 | 2020-01-10T13:53:33 | 2020-01-10T13:53:33 | 175,963,892 | 0 | 0 | MIT | 2019-03-17T17:17:06 | 2019-03-16T11:33:35 | R | UTF-8 | R | false | true | 335 | rd | print_dict.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{print_dict}
\alias{print_dict}
\title{Helper function to print the dictionary to the console}
\usage{
print_dict(x, maxdepth = 1, compressed = NULL)
}
\description{
Helper function to print the dictionary to the console
}
\keyword{internal}
|
68ba18f78c928e4080040c3bdd2e0826609b5029 | a36e639e16fe3008ffd166377a7af05ca1c76fba | /analyses/code/2.1.2-risk-loss-relation.R | a4392fc64d9e37263a1bf7443ab4666ad4d4145d | [] | no_license | JanaJarecki/RRC | 91ecaa54082972919f85187368788ae59a7d466f | 1fa413f607747deeff62daad4fffcc0f4faa6365 | refs/heads/master | 2020-04-27T07:51:38.701073 | 2020-04-24T13:24:06 | 2020-04-24T13:24:06 | 174,150,014 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,861 | r | 2.1.2-risk-loss-relation.R | library(data.table)
library(papaja)
library(corx)
# z-standardise or not?
z <- FALSE
# Read the data
d <- fread("../../data/processed/study2.csv", key = 'id', colClasses = list(factor = c('id', 'index', 'quest'), double = "perception"))
# Mean-center the perceived ratings (1-7 Likert-type scale)
d[, perception := perception - mean(perception), by = .(id, quest)]
d[, quest := factor(quest, levels = c("risk", "risk (variance)", "fluctuation"))]
d[, avgloss_obj := avgloss_obj/100]
d[, evloss_x_ploss_obj := avgloss_obj * ploss_obj]
# # z-standardize
# if (z) {
# v <- c(vars, 'ret_subj')
# d[, c(v) := lapply(.SD, as.numeric), .SDcols = v]
# d[, c(v) := lapply(.SD, function(z) c(scale(z))), .SDcols = v, by = id]
# }
# ------------------------------------------------------------------------
# Objective correlation between loss and return
# ------------------------------------------------------------------------
d[!duplicated(index), apa_print(cor.test(mroi_obj, avgloss_obj))$estimate]
d[!duplicated(index), apa_print(cor.test(mroi_obj, ploss_obj))$estimate]
d[!duplicated(index), apa_print(cor.test(mroi_obj, evloss_x_ploss_obj))$estimate]
# ------------------------------------------------------------------------
# Perceived risk correlated with different measures of loss
# ------------------------------------------------------------------------
tab <- d[quest == "risk"& graph == TRUE][, .SD, .SDcols = c("perception", "mroi_obj", "avgloss_obj", "ploss_obj", "evloss_x_ploss_obj")]
cor_tab <- corx(tab, triangle = "lower", stars = c(0.05, 0.01, 0.001), describe = c(`$M$` = mean, `$SD$` = sd))
row.names(cor_tab$apa) <- paste(paste0("(", 1:5, ")"), c("Perceived Risk", "Return", "Mean(loss)", "P(loss)", "EV(loss | loss)"))
cat(papaja::apa_table(cor_tab$apa,
caption = "Simple Correlation of Perceived Risk with Objective Measures of Loss",
#col.names = c("Perceived Risk", "Return", "Mean(loss)", "P(loss)", "EV(loss | loss)", "$M$", "$SD$"),
col_spanner = list("Objective" = c(2, 6)),
note = "* p < .05; ** p < .01; *** p < .001. Perceived values mean-centered.",
escape = F))
tab <- d[quest == "risk"& graph == FALSE][, .SD, .SDcols = c("perception", "mroi_obj", "avgloss_obj", "ploss_obj", "evloss_x_ploss_obj")]
cor_tab <- corx(tab, triangle = "lower", stars = c(0.05, 0.01, 0.001), describe = c(`$M$` = mean, `$SD$` = sd))
row.names(cor_tab$apa) <- paste(paste0("(", 1:5, ")"), c("Perceived Risk", "Return", "Mean(loss)", "P(loss)", "EV(loss | loss)"))
cat(papaja::apa_table(cor_tab$apa,
caption = "Simple Correlation of Perceived Risk with Objective Measures of Loss",
#col.names = c("Perceived Risk", "Return", "Mean(loss)", "P(loss)", "EV(loss | loss)", "$M$", "$SD$"),
col_spanner = list("Objective" = c(2, 6)),
note = "* p < .05; ** p < .01; *** p < .001. Perceived values mean-centered.",
escape = F)) |
7dc297e03bf79f1e1da9b3c4520ff63e567594e7 | af6952b5f75ac8f6268ee148b138ad2de69887e2 | /man/microtiter_data_frame.Rd | 59d2df613b5bf40a5ef0faa61dec2e13a63acbba | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | briandconnelly/microtiterr | 361f36dc748db1d0b30d051d6d6946809443cca4 | 4d80ff62cb3c3abee74c0c820f104168428ef8e6 | refs/heads/master | 2021-01-19T00:50:53.459630 | 2017-04-18T00:23:58 | 2017-04-18T00:23:58 | 87,212,634 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 910 | rd | microtiter_data_frame.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/microtiter_data_frame.R
\name{microtiter_data_frame}
\alias{microtiter_data_frame}
\alias{microtiter_tibble}
\title{Create a Data Frame or Tibble with All Wells in a Microtiter Plate}
\usage{
microtiter_data_frame(nwells = 96, include_well = TRUE)
microtiter_tibble(nwells = 96, include_well = TRUE)
}
\arguments{
\item{nwells}{Microtiter plate format specified as number of wells (e.g., 6 or 24; default: 96)}
\item{include_well}{Logical value indicating whether or not to include
well labels (default: \code{TRUE})}
}
\value{
A data frame
}
\description{
\code{microtiter_data_frame} creates a data frame representing a microtiter
plate, with one row per well, and columns for Row, Column, and Well
(optional). \code{microtiter_tibble} creates a \link[tibble]{tibble} instead.
}
\examples{
microtiter_data_frame(nwells = 96)
}
|
872eb6229fc30f061e7508589d4a5941e04069f4 | a114996ecfdd212ea60e54238a24b3bf51112b13 | /Problems/Problem12.R | 9985a0de280336feeea5f5b92993767d4c5b6f97 | [] | no_license | Adam-Hoelscher/ProjectEuler.R | 35610697d5bc4c61e9adfaec6d923a424df20486 | e060484365cafcfcd400c3cecacc189293dfc682 | refs/heads/master | 2021-01-23T01:29:54.585429 | 2017-09-11T18:28:46 | 2017-09-11T18:28:46 | 102,432,553 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 232 | r | Problem12.R | Problem12<-function(){
FactorCount<-function(x){return(2*length(which(0==x%%(1:sqrt(x)))))}
i<-1
temp<-1
fc<-FactorCount(temp)
while (fc<=500){
i<-i+1
temp<-temp+i
fc<-FactorCount(temp)
}
return(temp)
}
|
57106ed4237c7e02d6165733808bb1f3e429864d | 33d5ef0b582ff31345eeb78c8bddf70b49166eb2 | /Models/PenalizedCox/penalizedcox.R | 299d62f33887b04b8a7ee21bd9241fb263006c32 | [] | no_license | 1RuobingLiu/DeepTensorSurvival | 3ad0d60741d06bc1dc9f8969c3286115380e4718 | 6f81515197edf2e5a084ff8771905ba79e5b50ca | refs/heads/master | 2023-07-01T07:43:18.482886 | 2021-08-04T14:24:07 | 2021-08-04T14:24:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,191 | r | penalizedcox.R | #import libraries needed
library(tidyverse)
library(survival)
library(ggplot2)
library(ggfortify)
library(dplyr)
library("glmnet")
library(coxphf)
library(caret)
library(boot)
library(rlist)
#separate data into train/test set by predefined index
sep_data <- function(alldata, ind) {
sets = list()
#ind
sets[[1]] = alldata[ind,]
#data rest
sets[[2]] = alldata[-ind,]
return(sets)
}
#set penalty factor, Then Lasso/Ridge will not penalize last two columns(Age and Stage)
pf <- function(data) {
return(c(rep(1,ncol(data)-2),0,0))
}
#path to load datasets
dirpath <- "D:\\practicum\\ndtf\\"
datapath <- paste(dirpath,"new_datasets\\",sep = "")
facpath <- paste(dirpath,"factorized_data_\\",sep = "")
indpath <- paste(dirpath,"ind100\\",sep = "")
resultpath <- paste(dirpath,"results\\",sep = "")
filename = "ridge_GE.Rda"
#load datasets
clinical_data <- read.csv(paste(datapath,"new_clinical_10y.csv",sep="") ,header = TRUE)
ge <- read.csv(paste(datapath,"new_ge.csv",sep="") ,header = TRUE)
cn <- read.csv(paste(datapath,"new_cn.csv",sep="") ,header = TRUE)
me <- read.csv(paste(datapath,"new_me.csv",sep="") ,header = TRUE)
con <- cbind(ge,cn,me) #dataconcatenation
time = clinical_data$OS.time
status = clinical_data$OS
#add two cols(Age and Stage to datasets)
ge$agegroup = clinical_data$agegroup
cn$agegroup = clinical_data$agegroup
me$agegroup = clinical_data$agegroup
ge$stagegroup = clinical_data$stagegroup
cn$stagegroup = clinical_data$stagegroup
me$stagegroup = clinical_data$stagegroup
con$agegroup = clinical_data$agegroup
con$stagegroup = clinical_data$stagegroup
PenalizedCox <- function(data, time, status, trainind,alpha = 1,df){
train_time = time[trainind]
test_time = time[-trainind]
train_status = status[trainind]
test_status = status[-trainind]
split_data = sep_data(data,trainind)
train <- split_data[[1]]
test<- split_data[[2]]
traindata <- as.matrix(train)
trainsurv <- Surv(train_time,train_status)
testdata <- as.matrix(test)
testsurv <- Surv(test_time,test_status)
set.seed(2020)
try({ fit.lasso <- cv.glmnet(traindata, trainsurv, alpha = alpha, family = "cox",type.measure = "C",maxit = 1000,penalty.factor = pf(traindata))
Coefficients = coef(fit.lasso, s = "lambda.min")
Active.Index <- which(Coefficients != 0)
pred1 = predict(fit.lasso, newx = traindata)
pred2 = predict(fit.lasso, newx = testdata)
cindex1 = Cindex(pred1, trainsurv)
cindex2 = Cindex(pred2, testsurv)
})
df = rbind(df, data.frame(seed = s, type = paste(dt,st,sep = ""), train = cindex1, test = cindex2,num_cov = length(Active.Index)))
return(df0)
}
#Main procedure:
for (s in c(1:20)) {
print(s)
trainind <- read.csv(paste(indpath,"trainind", s, ".csv",sep="") ,header = TRUE)
trainind <- trainind$x
df0 <- data.frame(seed = numeric(), type = character(), train = numeric(), test = numeric(),num_cov = numeric())
df0 = PenalizedCox(ge, time,status, trainind,1,df0)
}
save(df0,file = paste(resultpath,filename,sep = ""))
|
4f82c45bf2093253cd5afd205c4b23ee5b5d3dbf | 2c09783547da268fc37456f4dfafb7016f3f9744 | /R/surrounding_signal.R | 6c5f3d926b7d16cdc76b4fb906effbdd72f6d42d | [] | no_license | rpolicastro/deepTSSscrubbeR | 8170048b20de8cab71d759b51b5ef6ad62e85ccd | ccec83f6355f9efaddd925103ca0d62082664aee | refs/heads/master | 2020-09-10T17:31:05.200261 | 2020-03-17T17:27:05 | 2020-03-17T17:27:05 | 221,779,402 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,025 | r | surrounding_signal.R |
#' Get Surrounding Signal
#'
#' Get TSS signal around selected TSS
#'
#' @import tibble
#' @importFrom dplyr select contains count rename left_join bind_rows vars mutate_at
#' @importFrom tidyr pivot_wider replace_na
#' @importFrom GenomicRanges GRanges makeGRangesFromDataFrame
#' @importFrom IRanges findOverlapPairs
#' @importFrom purrr map map2
#'
#' @param deep_obj deep tss object
#'
#' @rdname get_signal-function
#'
#' @export
get_signal <- function(deep_obj) {
## Grab all TSS positions and scores.
all_ranges <- as.data.table(deep_obj@experiment)[,
.(seqnames, start, end, strand, score)
]
all_ranges <- unique(all_ranges)
all_ranges <- makeGRangesFromDataFrame(all_ranges, keep.extra.columns = TRUE)
## Find surrounding TSSs.
overlaps <- deep_obj@ranges$signal %>%
findOverlapPairs(., all_ranges) %>%
as.data.table
overlaps <- overlaps[,
.(first.X.seqnames, first.X.start, second.X.start, first.X.tss,
second.X.score, first.X.end, first.X.strand, first.tss_group)
]
overlaps[,
position := ifelse(
first.X.strand == "+",
second.X.start - first.X.start,
first.X.end - second.X.start
)
][,
second.X.start := NULL
]
setnames(
overlaps,
old = c(
"first.X.seqnames", "first.X.start", "first.X.end",
"first.X.strand", "second.X.score", "first.X.tss",
"first.tss_group"
),
new = c("seqnames", "start", "end", "strand", "score", "tss", "tss_group")
)
## Create matrix of surrounding signal.
signal_length <- (deep_obj@settings$signal_expansion * 2) + 1
dummy <- data.table(
seqnames = "__dummy__",
position = seq(0, signal_length - 1, 1)
)
positions <- bind_rows(dummy, overlaps)
positions <- dcast(
overlaps, seqnames + start + end + strand + tss + tss_group ~ position,
fill = 0, value.var = "score"
)[
seqnames != "__dummy__"
][
order(tss_group)
]
## Return surrounding signal to deep tss object.
positions <- makeGRangesFromDataFrame(positions, keep.extra.columns = TRUE)
deep_obj@ranges$signal <- positions
return(deep_obj)
}
|
be0885d87c7849aadaee478c1f64708d24f9bc37 | ead929cf3aefc7867206e9710cf3ec406545bb63 | /data-raw/quadrats.R | 36b6e8e27ab41916e114edbe5ad18cb2f88dec37 | [] | no_license | lydiaPenglish/STRIPS2veg | cadee6c843d09dfec156defe820937fb68777181 | 3482b932fbd50e76bbe6131b2a9b3cf2a9f3a070 | refs/heads/master | 2022-11-06T07:47:17.287401 | 2020-06-08T17:53:08 | 2020-06-08T17:53:08 | 132,038,278 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,360 | r | quadrats.R | library("STRIPS2PRIVATE")
library("dplyr")
library("tidyr")
library("readr")
library("stringr")
my_read_csv = function(f, into) {
readr::read_csv(
file = f,
col_types = cols(quadratID = readr::col_character(),
easting = readr::col_double(),
northing = readr::col_double())) %>%
dplyr::mutate(file=f) %>%
tidyr::separate(file, into)
}
read_dir = function(path, pattern, into) {
files = list.files(path = path,
pattern = pattern,
recursive = TRUE,
full.names = TRUE)
plyr::ldply(files, my_read_csv, into = into)
}
# Above modified from https://gist.github.com/jarad/8f3b79b33489828ab8244e82a4a0c5b3
#######################################################################
quadrats <- read_dir(path = "quadrat",
pattern = "*.csv",
into = c("quadrat","year", "siteID","quadrats","csv")) %>%
mutate(
siteID = factor(toupper(siteID)),
quadratID = str_to_upper(quadratID),
# - Don't need these lines anymore since quadrats are already anonymized
# easting = anonymizeGPS(easting, siteID, "easting"),
# northing = anonymizeGPS(northing, siteID, "northing")
) %>%
select(year, quadratID, siteID, easting, northing)
usethis::use_data(quadrats, overwrite = TRUE)
|
f4e0cc38ebf2a7134bf04ae405eb2e7e5182350c | 6cbffff7534b28f260777a84b889eb1e521c02b3 | /R/m_plot_activity.R | 884bad765cc3b1868f6b2992db79392e0af51bb6 | [] | no_license | sebmader/LizardsAndNiches | 281291fcafc844f84d2027bfc3b2f4175f305b46 | cfb3e19a9e60becdc44a2e74292b5e7a2895e120 | refs/heads/master | 2021-06-27T15:38:21.453007 | 2021-02-22T11:54:26 | 2021-02-22T11:54:26 | 217,536,470 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 32,415 | r | m_plot_activity.R | #' @title Plot activity times over the organisms reflectance
#' @description This function plots the output of the biophysical model of ectotherms
#' from the NicheMapR package.
#' @name m_plot_activity
#' @param multi_ecto A tidy data frame of summarised output results of the ecotherm function
#' containing the activity times (total, percentage, ratios, etc.), absorptivity, weight,
#' yearly averaged microclimate variables per scenario and location (for details see ?m_tidy_output).
#' @param rcps Character stating which RCP scenario shall be plotted: "4.5", "8.5" or "both".
#' The latter leads to facet rows for each RCP scenario.
#' @param save_plot Boolean whether the plot should be saved or not (default = FALSE).
#' @return Plot
# @importFrom graphics abline legend text
#' @importFrom grDevices png
#' @export
m_plot_activity <- function(multi_ecto, rcps = "both", save_plot = FALSE) {
# create directory of save path if applicable
save_path <- "./Plots/activity_plots/"
if(save_plot) {
if(!dir.exists(save_path)) {
dir.create(save_path, recursive = T)
cat(paste0("Created folder ", save_path, "\n"))
}
}
assertthat::assert_that(is.data.frame(multi_ecto))
assertthat::assert_that(is.character(rcps))
# rename variables
names(multi_ecto)[17] <- "Temp"
names(multi_ecto)[19] <- "RH"
names(multi_ecto)[5] <- "time"
names(multi_ecto)[23] <- "mag_change_act"
multi_ecto$time <- factor(multi_ecto$time, levels = c("pres", "40-59", "80-99"))
# make dataframe with 'present' being both rcp 4.5 and 8.5 instead of none
present45 <- multi_ecto[which(stringr::str_detect(multi_ecto$time,
"pres")),]
present85 <- present45
present45$rcp <- "4.5"
present85$rcp <- "8.5"
multi_ecto_full <- rbind(multi_ecto[which(
!stringr::str_detect(multi_ecto$time,
"pres")),],
present45, present85)
# depending on which rcp is supposed to be plotted, collapse dataframe
if(rcps == "4.5") {
multi_ecto <- multi_ecto_full[which(multi_ecto_full$rcp == "4.5"),]
} else if(rcps == "8.5") {
multi_ecto <- multi_ecto_full[which(multi_ecto_full$rcp == "8.5"),]
}
multi_ecto$rcp <- droplevels(multi_ecto$rcp)
multi_ecto$rcp <- factor(multi_ecto$rcp, levels = c("8.5", "4.5"))
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
multi_ecto_SE <- summarySE(multi_ecto,
measurevar = "h_active",
groupvars = c("rcp", "LID", "time"))
multi_ecto_nopres <- multi_ecto[which(multi_ecto$time != "pres")]
#### plot the data ####
# plot size
unit <- "cm"
width <- 22
height <- 13.7
### split into locations ###
# # change in active hours vs. time point; facet grid locations
# p <- ggplot2::ggplot(data = multi_ecto)+
# ggplot2::geom_point(size = 2,
# mapping = ggplot2::aes_string(x = 'timeper',
# y = 'change_act',
# colour = 'rcp',
# shape = 'rcp'))+
# ggplot2::geom_line(size = 1,
# mapping = ggplot2::aes_string(x = 'timeper',
# y = 'change_act',
# colour = 'rcp',
# group = 'rcp'))+
# ggplot2::scale_x_discrete(limits = c("pres", "40-59", "80-99"))+
# ggplot2::facet_wrap(~LID)+
# ggplot2::theme_bw()
#
# # print or save plot
# if(save_plot) {
# file_name <- "change_act_scenario.png"
# ggplot2::ggsave(filename = file_name, plot = p, device = png(),
# path = save_path, units = unit,
# width = width, height = height, dpi = 500)
#
# message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# # unlink(file_name)
# } else { print(p) }
# percentage of change in active hours vs. time point; facet grid locations
p <- ggplot2::ggplot(data = multi_ecto_4585pres)+
ggplot2::geom_point(size = 2,
mapping = ggplot2::aes_(x = quote(timeper),
y = quote(perc_change_act),
colour = quote(ID)))+
ggplot2::geom_line(size = 1,
mapping = ggplot2::aes_(x = quote(timeper),
y = quote(perc_change_act),
colour = quote(ID),
group = quote(ID)))+
ggplot2::geom_hline(ggplot2::aes(yintercept = 1, linetype = "present"),
colour = "black")+
ggplot2::scale_linetype_manual(name = "Reference", values = 2,
guide = ggplot2::guide_legend(override.aes = list(color = "black")))+
ggplot2::scale_x_discrete(limits = c("pres", "40-59", "80-99"))+
ggplot2::scale_color_discrete(guide = "none")+
ggplot2::scale_shape(guide = "none")+
ggplot2::facet_grid(rows = ggplot2::vars(rcp), cols = ggplot2::vars(LID))+
ggplot2::theme_bw()
# print or save plot
if(save_plot) {
file_name <- "perc_change_act_scenario.png"
ggplot2::ggsave(filename = file_name, plot = p, device = png(),
path = save_path, units = unit,
width = width, height = height, dpi = 500)
message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# unlink(file_name)
} else { print(p) }
### activity vs. physio data ###
multi_ecto_nopres <- multi_ecto[which(multi_ecto$timeper != "pres"),]
# absorptivity #
# # activity-basking hours ratio vs. absorptivity (all locations & scenarios)
# p <- ggplot2::ggplot(data = multi_ecto)+
# ggplot2::geom_point(size = 2,
# mapping = ggplot2::aes_string(x = 'absorp',
# y = 'act_bask_ratio',
# colour = 'id',
# shape = 'id'))+
# ggplot2::geom_line(size = 1,
# mapping = ggplot2::aes_string(x = 'absorp',
# y = 'act_bask_ratio',
# colour = 'id'))+
# ggplot2::annotate(geom = "text", x = unique(multi_ecto$absorp),
# y = 0.1 * (max(multi_ecto_nopres$act_bask_ratio) -
# min(multi_ecto_nopres$act_bask_ratio)) +
# unlist(lapply(split(multi_ecto,
# f = multi_ecto$LID),
# function(x) max(x$act_bask_ratio))),
# label = unique(multi_ecto$LID))+
# ggplot2::labs(title = "Activity-basking ratio (per year) vs. absorptivity")+
# ggplot2::theme_bw()
#
# # save plot
# if(save_plot) {
# file_name <- "act-bask_ratio_absorp.png"
# ggplot2::ggsave(filename = file_name, plot = p, device = png(),
# path = save_path, units = unit,
# width = width, height = height, dpi = 500)
#
# message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# # unlink(file_name)
# } else { print(p) }
#
#
# # total hours active vs. absorptivity (all locations & scenarios)
# p <- ggplot2::ggplot(data = multi_ecto)+
# ggplot2::geom_point(size = 2,
# mapping = ggplot2::aes_string(x = 'absorp',
# y = 'h_active',
# colour = 'id',
# shape = 'id'))+
# ggplot2::geom_line(size = 1,
# mapping = ggplot2::aes_string(x = 'absorp',
# y = 'h_active',
# colour = 'id'))+
# ggplot2::annotate(geom = "text", x = unique(multi_ecto$absorp),
# y = 0.1 * (max(multi_ecto_nopres$h_active) -
# min(multi_ecto_nopres$h_active)) +
# unlist(lapply(split(multi_ecto,
# f = multi_ecto$LID),
# function(x) max(x$h_active))),
# label = unique(multi_ecto$LID))+
# ggplot2::labs(title = "Hours of activity (per year) vs. absorptivity")+
# ggplot2::theme_bw()
#
# # save plot
# if(save_plot) {
# file_name <- "total_act_absorp.png"
# ggplot2::ggsave(filename = file_name, plot = p, device = png(),
# path = save_path, units = unit,
# width = width, height = height, dpi = 500)
#
# message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# # unlink(file_name)
# } else { print(p) }
#
#
#
#
# # change in hours active vs. absorptivity (all locations & scenarios)
# p <- ggplot2::ggplot(data = multi_ecto_nopres)+
# ggplot2::geom_point(size = 2,
# mapping = ggplot2::aes_string(x = 'absorp',
# y = 'change_act',
# colour = 'id',
# shape = 'id'))+
# ggplot2::geom_line(size = 1,
# mapping = ggplot2::aes_string(x = 'absorp',
# y = 'change_act',
# colour = 'id'))+
# ggplot2::annotate(geom = "text", x = unique(multi_ecto_nopres$absorp),
# y = 0.1 * (max(multi_ecto_nopres$change_act) -
# min(multi_ecto_nopres$change_act)) +
# unlist(lapply(split(multi_ecto_nopres,
# f = multi_ecto_nopres$LID),
# function(x) max(x$change_act))),
# label = unique(multi_ecto_nopres$LID))+
# ggplot2::labs(title = "Change in activity hours (per year) vs. absorptivity")+
# ggplot2::theme_bw()
#
# # save plot
# if(save_plot) {
# file_name <- "change_act_absorp.png"
# ggplot2::ggsave(filename = file_name, plot = p, device = png(),
# path = save_path, units = unit,
# width = width, height = height, dpi = 500)
#
# message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# # unlink(file_name)
# } else { print(p) }
# percentage of change in hours active vs. absorptivity (all locations & scenarios)
p <- ggplot2::ggplot(data = multi_ecto_nopres)+
ggplot2::geom_point(size = 2,
mapping = ggplot2::aes_string(x = 'absorp',
y = 'perc_change_act',
colour = 'scenario',
shape = 'scenario'))+
ggplot2::geom_line(size = 1,
mapping = ggplot2::aes_string(x = 'absorp',
y = 'perc_change_act',
colour = 'scenario'))+
ggplot2::geom_hline(ggplot2::aes(yintercept = 1, linetype = "present"),
colour = "black")+
ggplot2::scale_linetype_manual(name = "Reference", values = 2,
guide = ggplot2::guide_legend(override.aes = list(color = "black")))+
# ggplot2::annotate(geom = "text", x = unique(multi_ecto_nopres$absorp),
# y = 0.1 * (max(multi_ecto_nopres$perc_change_act) -
# min(multi_ecto_nopres$perc_change_act)) +
# unlist(lapply(split(multi_ecto_nopres,
# f = multi_ecto_nopres$LID),
# function(x) max(x$perc_change_act))),
# label = unique(multi_ecto_nopres$LID))+
ggplot2::labs(title = "% of change in activity (per year) vs. absorptivity")+
ggplot2::theme_bw()
# save plot
if(save_plot) {
file_name <- "perc_change_act_absorp.png"
ggplot2::ggsave(filename = file_name, plot = p, device = png(),
path = save_path, units = unit,
width = width, height = height, dpi = 500)
message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# unlink(file_name)
} else { print(p) }
### just RCP 8.5 ###
multi_ecto_85 <- multi_ecto_nopres[which(multi_ecto_nopres$rcp != "4.5"),]
# percentage of change in hours active vs. absorptivity (all locations & scenarios)
p <- ggplot2::ggplot(data = multi_ecto_85)+
ggplot2::geom_point(size = 2,
mapping = ggplot2::aes_string(x = 'absorp',
y = 'perc_change_act',
colour = 'timeper',
shape = 'timeper'))+
ggplot2::geom_line(size = 1,
mapping = ggplot2::aes_string(x = 'absorp',
y = 'perc_change_act',
colour = 'timeper'))+
ggplot2::geom_hline(ggplot2::aes(yintercept = 1, linetype = "present"),
colour = "black")+
ggplot2::scale_linetype_manual(name = "Reference", values = 2,
guide = ggplot2::guide_legend(override.aes = list(color = "black")))+
# ggplot2::annotate(geom = "text", x = unique(multi_ecto_85$absorp),
# y = 0.1 * (max(multi_ecto_85$perc_change_act) -
# min(multi_ecto_85$perc_change_act)) +
# unlist(lapply(split(multi_ecto_85,
# f = multi_ecto_85$LID),
# function(x) max(x$perc_change_act))),
# label = unique(multi_ecto_85$LID))+
ggplot2::labs(title = "% of change in activity (per year) vs. absorptivity")+
ggplot2::theme_bw()
# save plot
if(save_plot) {
file_name <- "perc_change_act_absorp_85.png"
ggplot2::ggsave(filename = file_name, plot = p, device = png(),
path = save_path, units = unit,
width = width, height = height, dpi = 500)
message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# unlink(file_name)
} else { print(p) }
# body weight #
# # activity-basking hours ratio vs. body weight (all locations & scenarios)
# p <- ggplot2::ggplot(data = multi_ecto)+
# ggplot2::geom_point(size = 2,
# mapping = ggplot2::aes_string(x = 'ww',
# y = 'act_bask_ratio',
# colour = 'id',
# shape = 'id'))+
# ggplot2::geom_line(size = 1,
# mapping = ggplot2::aes_string(x = 'ww',
# y = 'act_bask_ratio',
# colour = 'id'))+
# ggplot2::annotate(geom = "text", x = unique(multi_ecto$ww),
# y = 0.1 * (max(multi_ecto$act_bask_ratio) -
# min(multi_ecto$act_bask_ratio)) +
# unlist(lapply(split(multi_ecto,
# f = multi_ecto$LID),
# function(x) max(x$act_bask_ratio))),
# label = unique(multi_ecto$LID))+
# ggplot2::labs(title = "Activity-basking ratio (per year) vs. body weight")+
# ggplot2::theme_bw()
#
# # save plot
# if(save_plot) {
# file_name <- "act-bask_ratio_ww.png"
# ggplot2::ggsave(filename = file_name, plot = p, device = png(),
# path = save_path, units = unit,
# width = width, height = height, dpi = 500)
#
# message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# # unlink(file_name)
# } else { print(p) }
# # percantage of change in hours active vs. weight (all locations & scenarios)
# p <- ggplot2::ggplot(data = multi_ecto_nopres)+
# ggplot2::geom_point(size = 2,
# mapping = ggplot2::aes_string(x = 'ww',
# y = 'perc_change_act',
# colour = 'id',
# shape = 'id'))+
# ggplot2::geom_line(size = 1,
# mapping = ggplot2::aes_string(x = 'ww',
# y = 'perc_change_act',
# colour = 'id'))+
# ggplot2::geom_hline(ggplot2::aes(yintercept = 1, linetype = "present"),
# colour = "black")+
# ggplot2::scale_linetype_manual(name = "Reference", values = 2,
# guide = ggplot2::guide_legend(override.aes = list(color = "black")))+
# ggplot2::annotate(geom = "text", x = unique(multi_ecto_nopres$ww),
# y = 0.1 * (max(multi_ecto_nopres$perc_change_act) -
# min(multi_ecto_nopres$perc_change_act)) +
# unlist(lapply(split(multi_ecto_nopres,
# f = multi_ecto_nopres$LID),
# function(x) max(x$perc_change_act))),
# label = unique(multi_ecto_nopres$LID))+
# ggplot2::labs(title = "% of change in activity (per year) vs. body weight")+
# ggplot2::theme_bw()
#
# # save plot
# if(save_plot) {
# file_name <- "perc_change_act_weight.png"
# ggplot2::ggsave(filename = file_name, plot = p, device = png(),
# path = save_path, units = unit,
# width = width, height = height, dpi = 500)
#
# message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# # unlink(file_name)
# } else { print(p) }
# just RCP 8.5
# percantage of change in hours active vs. weight (all locations & scenarios)
p <- ggplot2::ggplot(data = multi_ecto_85)+
ggplot2::geom_point(size = 2,
mapping = ggplot2::aes_string(x = 'ww',
y = 'perc_change_act',
colour = 'timeper',
shape = 'timeper'))+
ggplot2::geom_line(size = 1,
mapping = ggplot2::aes_string(x = 'ww',
y = 'perc_change_act',
colour = 'timeper'))+
ggplot2::geom_hline(ggplot2::aes(yintercept = 1, linetype = "present"),
colour = "black")+
ggplot2::scale_linetype_manual(name = "Reference", values = 2,
guide = ggplot2::guide_legend(override.aes = list(color = "black")))+
# ggplot2::annotate(geom = "text", x = unique(multi_ecto_85$ww),
# y = 0.1 * (max(multi_ecto_85$perc_change_act) -
# min(multi_ecto_85$perc_change_act)) +
# unlist(lapply(split(multi_ecto_85,
# f = multi_ecto_85$LID),
# function(x) max(x$perc_change_act))),
# label = unique(multi_ecto_85$LID))+
ggplot2::labs(title = "% of change in activity (per year) vs. body weight")+
ggplot2::theme_bw()
# save plot
if(save_plot) {
file_name <- "perc_change_act_weight_85.png"
ggplot2::ggsave(filename = file_name, plot = p, device = png(),
path = save_path, units = unit,
width = width, height = height, dpi = 500)
message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# unlink(file_name)
} else { print(p) }
# body length #
# # activity-basking hours ratio vs. body length (all locations & scenarios)
# p <- ggplot2::ggplot(data = multi_ecto)+
# ggplot2::geom_point(size = 2,
# mapping = ggplot2::aes_string(x = 'ttl',
# y = 'act_bask_ratio',
# colour = 'id',
# shape = 'id'))+
# ggplot2::geom_line(size = 1,
# mapping = ggplot2::aes_string(x = 'ttl',
# y = 'act_bask_ratio',
# colour = 'id'))+
# ggplot2::annotate(geom = "text", x = unique(multi_ecto$ttl),
# y = 0.1 * (max(multi_ecto$act_bask_ratio) -
# min(multi_ecto$act_bask_ratio)) +
# unlist(lapply(split(multi_ecto,
# f = multi_ecto$LID),
# function(x) max(x$act_bask_ratio))),
# label = unique(multi_ecto$LID))+
# ggplot2::labs(title = "Activity-basking ratio (per year) vs. body length")+
# ggplot2::theme_bw()
#
# # save plot
# if(save_plot) {
# file_name <- "act-bask_ratio_ttl.png"
# ggplot2::ggsave(filename = file_name, plot = p, device = png(),
# path = save_path, units = unit,
# width = width, height = height, dpi = 500)
#
# message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# # unlink(file_name)
# } else { print(p) }
#
# # percantage of change in hours active vs. absorptivity (all locations & scenarios)
# p <- ggplot2::ggplot(data = multi_ecto_nopres)+
# ggplot2::geom_point(size = 2,
# mapping = ggplot2::aes_string(x = 'ttl',
# y = 'perc_change_act',
# colour = 'id',
# shape = 'id'))+
# ggplot2::geom_line(size = 1,
# mapping = ggplot2::aes_string(x = 'ttl',
# y = 'perc_change_act',
# colour = 'id'))+
# ggplot2::geom_hline(ggplot2::aes(yintercept = 1, linetype = "present"),
# colour = "black")+
# ggplot2::scale_linetype_manual(name = "Reference", values = 2,
# guide = ggplot2::guide_legend(override.aes = list(color = "black")))+
# ggplot2::annotate(geom = "text", x = unique(multi_ecto_nopres$ttl),
# y = 0.1 * (max(multi_ecto_nopres$perc_change_act) -
# min(multi_ecto_nopres$perc_change_act)) +
# unlist(lapply(split(multi_ecto_nopres,
# f = multi_ecto_nopres$LID),
# function(x) max(x$perc_change_act))),
# label = unique(multi_ecto_nopres$LID))+
# ggplot2::labs(title = "% of change in activity (per year) vs. body length")+
# ggplot2::theme_bw()
#
# # save plot
# if(save_plot) {
# file_name <- "perc_change_act_length.png"
# ggplot2::ggsave(filename = file_name, plot = p, device = png(),
# path = save_path, units = unit,
# width = width, height = height, dpi = 500)
#
# message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# # unlink(file_name)
# } else { print(p) }
#
#
# # weight / length #
#
# multi_ecto_nopres$wwttl_ratio <- multi_ecto_nopres$ww / multi_ecto_nopres$ttl
#
# # percantage of change in hours active vs. absorptivity (all locations & scenarios)
# p <- ggplot2::ggplot(data = multi_ecto_nopres)+
# ggplot2::geom_point(size = 2,
# mapping = ggplot2::aes_string(x = 'wwttl_ratio',
# y = 'perc_change_act',
# colour = 'id',
# shape = 'id'))+
# ggplot2::geom_line(size = 1,
# mapping = ggplot2::aes_string(x = 'wwttl_ratio',
# y = 'perc_change_act',
# colour = 'id'))+
# ggplot2::geom_hline(ggplot2::aes(yintercept = 1, linetype = "present"),
# colour = "black")+
# ggplot2::scale_linetype_manual(name = "Reference", values = 2,
# guide = ggplot2::guide_legend(override.aes = list(color = "black")))+
# ggplot2::annotate(geom = "text", x = unique(multi_ecto_nopres$wwttl_ratio),
# y = 0.1 * (max(multi_ecto_nopres$perc_change_act) -
# min(multi_ecto_nopres$perc_change_act)) +
# unlist(lapply(split(multi_ecto_nopres,
# f = multi_ecto_nopres$LID),
# function(x) max(x$perc_change_act))),
# label = unique(multi_ecto_nopres$LID))+
# ggplot2::labs(title = "% of change in activity (per year) vs. weight-length ratio")+
# ggplot2::theme_bw()
#
# # save plot
# if(save_plot) {
# file_name <- "perc_change_act_wwttl-ratio.png"
# ggplot2::ggsave(filename = file_name, plot = p, device = png(),
# path = save_path, units = unit,
# width = width, height = height, dpi = 500)
#
# message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# # unlink(file_name)
# } else { print(p) }
# # total hours basking vs. absorptivity (all locations & scenarios)
# ggplot2::ggplot(data = multi_ecto)+
# ggplot2::geom_point(size = 2,
# mapping = ggplot2::aes_string(x = 'absorp',
# y = 'h_bask',
# colour = 'id',
# shape = 'id'))+
# ggplot2::geom_line(size = 1,
# mapping = ggplot2::aes_string(x = 'absorp',
# y = 'h_bask',
# colour = 'id'))+
# ggplot2::labs(title = "Hours of basking (per year) vs. absorptivity")+
# ggplot2::theme_bw()
#### activity vs. temperature ####
# library(ggpubr)
# p <- ggscatter(multi_ecto, x = "T_loc", y = "h_active",
# use = "complete.obs",
# add = "reg.line", conf.int = TRUE,
# cor.coef = TRUE, cor.method = "pearson",
# xlab = "Microclimate temperature [°C]", ylab = "Hours of activity [h]")
#
# # save plot
# if(save_plot) {
# file_name <- "relationship_h_act_vs_temp_ind.png"
# ggplot2::ggsave(filename = file_name, plot = p, device = png(),
# path = save_path, units = unit,
# width = width, height = height, dpi = 500)
#
# message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# # unlink(file_name)
# } else { print(p) }
# total vs. total
p <- ggplot2::ggplot(data = multi_ecto)+
ggplot2::geom_smooth(ggplot2::aes_(x = quote(T_loc), y = quote(h_active)),
color = "grey",
method = lm)+
ggplot2::geom_point(size = 3,
mapping = ggplot2::aes_(x = quote(T_loc),
y = quote(h_active),
colour = quote(timeper),
shape = quote(timeper)))+
ggplot2::labs(title = "Hours of activity (per year) vs. microclimate temperature")+
ggplot2::theme_bw()
# save plot
if(save_plot) {
file_name <- "activity_vs_temperature.png"
ggplot2::ggsave(filename = file_name, plot = p, device = png(),
path = save_path, units = unit,
width = width, height = height, dpi = 500)
message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# unlink(file_name)
} else { print(p) }
# # change vs. change
# p <- ggplot2::ggplot(data = multi_ecto)+
# ggplot2::geom_point(size = 3,
# mapping = ggplot2::aes_string(x = 'change_T_loc',
# y = 'change_act',
# colour = 'timeper',
# shape = 'timeper'))+
# ggplot2::labs(title = "Change of activity vs. change in micro temperature")+
# ggplot2::theme_bw()
# # save plot
# if(save_plot) {
# file_name <- "change_act_vs_change_temp.png"
# ggplot2::ggsave(filename = file_name, plot = p, device = png(),
# path = save_path, units = unit,
# width = width, height = height, dpi = 500)
#
# message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# # unlink(file_name)
# } else { print(p) }
#
#
# # percentage change vs. percentage change
# p <- ggplot2::ggplot(data = multi_ecto)+
# ggplot2::geom_point(size = 3,
# mapping = ggplot2::aes_string(x = 'perc_T_loc',
# y = 'perc_change_act',
# colour = 'timeper',
# shape = 'timeper'))+
# ggplot2::labs(title = "% change of act vs. % change of micro temp")+
# ggplot2::theme_bw()
# # save plot
# if(save_plot) {
# file_name <- "perc_activity_vs_temperature.png"
# ggplot2::ggsave(filename = file_name, plot = p, device = png(),
# path = save_path, units = unit,
# width = width, height = height, dpi = 500)
#
# message(paste0("Plot ", file_name, " has been saved in ", save_path, "\n"))
# # unlink(file_name)
# } else { print(p) }
}
|
e1eade8d1e34860663567b303d149727d8a646ca | 3d811680b4e998091fd76e78017d089cd0749857 | /cytometry_analysis/pLac_EXPRESSOR/run.R | 5e18ef906135864a80aa568ff68655ed44005aa5 | [] | no_license | CIDARLAB/phoenix-core | ba4b59c0a2a80ed288f3d482dc6e397453030519 | c1d3cf6749305fd5a52131419c0234a71410a79b | refs/heads/master | 2021-07-06T22:56:14.940755 | 2017-04-04T22:14:35 | 2017-04-04T22:14:35 | 27,271,578 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 350 | r | run.R | #Import key file
key <- read.csv("key_EXPRESSOR_pLac_81115.csv", header = TRUE)
key [is.na(key)] <- ""
#Specify minimum number of events to be considered
minEvents <- 1000
#Call master script
wd <- getwd()
dataPath <- "data"
source("~/phoenix/phoenix-core/cytometry_analysis/master_analytics_scripts/analyze.R",chdir=T)
# analyzeKey(key,minEvents)
|
0729a518386ab79c0964e322c41cb19c9e1a298e | c7b332e8cd2371fc24356804c2d56ce2faa5975e | /R/3c_costing_medication_liquid_functions.R | ee115c63241e0593898b3b4888f8577cc3762cf5 | [] | no_license | sheejamk/packDAMipd | a138a132bb34da27dbbf7c02c17d0b26650c7458 | f37cbe0702f8bb9addc4ae1875669a08f5d20416 | refs/heads/main | 2023-03-18T19:36:50.293630 | 2022-07-28T20:32:51 | 2022-07-28T20:32:51 | 342,639,162 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 38,493 | r | 3c_costing_medication_liquid_functions.R |
##############################################################################
#' Function to estimate the cost of liquids taken (from IPD)
#' @param ind_part_data IPD
#' @param name_med name of medication
#' @param brand_med brand name of medication if revealed
#' @param dose_med dose of medication used
#' @param unit_med unit of medication ; use null if its along with the dose
#' @param bottle_size size of the bottle used
#' @param bottle_size_unit unit of bottle volume
#' @param bottle_lasts how long the bottle lasted
#' @param bottle_lasts_unit time unit of how long the bottle lasted
#' @param preparation_dose dose if preparation is given
#' @param preparation_unit unit of preparatio dose
#' @param timeperiod time period for cost calculation
#' @param unit_cost_data unit costs data
#' @param unit_cost_column column name of unit cost in unit_cost_data
#' @param cost_calculated_per column name of unit where the cost is calculated
#' @param strength_column column column name that has strength of medication
#' @param list_of_code_names if names is coded, give the code:name pairs,
#' optional
#' @param list_of_code_brand if brand names are coded, give the
#' code:brand pairs, optional
#' @param list_of_code_dose_unit if unit is coded, give the code:unit pairs,
#' optional
#' @param list_of_code_bottle_size_unit list of bottle size units and codes
#' @param list_of_code_bottle_lasts_unit list of time of bottle lasts and codes
#' @param list_preparation_dose_unit list of preparation dose units and codes
#' @param eqdose_covtab table to get the conversion factor for equivalent
#' doses, optional, but the column names have to unique
#' Similar to c("Drug", "form", "unit", "factor") or
#' c("Drug", "form", "unit", "conversion")
#' @param basis_strength_unit strength unit to be taken as basis
#' required for total medication calculations
#' @return the calculated cost of tablets along with original data
#' @examples
#' med_costs_file <- system.file("extdata", "medicaton_costs_all.xlsx",
#' package = "packDAMipd")
#' data_file <- system.file("extdata", "medication_liq.xlsx",
#' package = "packDAMipd")
#' ind_part_data <- load_trial_data(data_file)
#' med_costs <- load_trial_data(med_costs_file)
#' conv_file <- system.file("extdata", "Med_calc.xlsx",package = "packDAMipd")
#' table <- load_trial_data(conv_file)
#' res <- microcosting_liquids_wide(
#' ind_part_data = ind_part_data, name_med = "liq_name", brand_med = NULL,
#' dose_med = "liq_strength", unit_med = NULL, bottle_size = "liq_bottle_size",
#' bottle_size_unit = NULL, bottle_lasts = "liq_lasts",
#' bottle_lasts_unit = NULL, preparation_dose = NULL, preparation_unit = NULL,
#' timeperiod = "4 months", unit_cost_data = med_costs,
#' unit_cost_column = "UnitCost", cost_calculated_per = "Basis",
#' strength_column = "Strength", list_of_code_names = NULL,
#' list_of_code_brand = NULL, list_of_code_dose_unit = NULL,
#' list_of_code_bottle_size_unit = NULL, list_of_code_bottle_lasts_unit = NULL,
#' list_preparation_dose_unit = NULL, eqdose_covtab = table,
#' basis_strength_unit = NULL)
#' @export
#' @importFrom dplyr %>%
microcosting_liquids_wide <- function(ind_part_data,
name_med,
brand_med = NULL,
dose_med,
unit_med = NULL,
bottle_size,
bottle_size_unit = NULL,
bottle_lasts,
bottle_lasts_unit = NULL,
preparation_dose = NULL,
preparation_unit = NULL,
timeperiod,
unit_cost_data,
unit_cost_column,
cost_calculated_per,
strength_column,
list_of_code_names = NULL,
list_of_code_brand = NULL,
list_of_code_dose_unit = NULL,
list_of_code_bottle_size_unit = NULL,
list_of_code_bottle_lasts_unit = NULL,
list_preparation_dose_unit = NULL,
eqdose_covtab = NULL,
basis_strength_unit = NULL) {
internal_basis_time <- "day"
# check the form as liquids
words <- c("liquid", "liq", "solution", "liquids", "solutions")
generated_list <- generate_wt_vol_units()
wt_per_vol_units <- generated_list$weight_per_vol
#Error - data should not be NULL
if (is.null(ind_part_data) | is.null(unit_cost_data))
stop("data should not be NULL")
#Checking if the required parameters are NULL or NA
variables_check <- list(name_med, dose_med,
bottle_size, bottle_lasts,
timeperiod, unit_cost_column,
cost_calculated_per, strength_column)
results <- sapply(variables_check, check_null_na)
names_check <- c("name_med", "dose_med", "bottle_size", "bottle_lasts",
"timeperiod", "unit_cost_column",
"cost_calculated_per", "strength_column")
if (any(results != 0)) {
indices <- which(results < 0)
stop(paste("Error - the variables can not be NULL or NA,
check the variable(s)", names_check[indices]))
}
# if null,keep proper strength unit and time unit
if (!is.null(basis_strength_unit)) {
if (is.na(basis_strength_unit)) {
basis_strength_unit <- "mg/ml"
basis_wt_unit <- "mg"
basis_vol_unit <- "ml"
} else {
if (!(basis_strength_unit %in% wt_per_vol_units))
stop("Basis strength unit is not valid")
index <- stringr::str_locate(basis_strength_unit, "/")
basis_wt_unit <- stringr::str_sub(basis_strength_unit, 1, index[1] - 1)
basis_vol_unit <- stringr::str_sub(basis_strength_unit, index[2] + 1,
nchar(basis_strength_unit))
}
} else {
basis_strength_unit <- "mg/ml"
basis_wt_unit <- "mg"
basis_vol_unit <- "ml"
}
## Check the columns in IPD and get the columns
brand_check <- return0_if_not_null_na(brand_med)
unit_med_check <- return0_if_not_null_na(unit_med)
bottle_size_unit_check <- return0_if_not_null_na(bottle_size_unit)
bottle_lasts_unit_check <- return0_if_not_null_na(bottle_lasts_unit)
preparation_dose_check <- return0_if_not_null_na(preparation_dose)
preparation_unit_check <- return0_if_not_null_na(preparation_unit)
check_list <- c(unit_med_check, brand_check, bottle_size_unit_check,
bottle_lasts_unit_check, preparation_dose_check,
preparation_unit_check)
partial_list <- c(name_med, dose_med, bottle_size, bottle_lasts)
another_list <- list(unit_med, brand_med, bottle_size_unit, bottle_lasts_unit,
preparation_dose, preparation_unit)
another_list[check_list == -1] <- -1
info_list <- unlist(append(partial_list, another_list))
ipd_cols_exists <- list()
for (i in seq_len(length(info_list))) {
if (info_list[i] != -1) {
check <- IPDFileCheck::check_column_exists(info_list[i], ind_part_data)
res <- grep(info_list[i], colnames(ind_part_data))
if (sum(check) != 0) {
if (length(res) == 0)
stop("Atleast one of the required columns not found")
ipd_cols_exists[length(ipd_cols_exists) + 1] <- list(res)
} else {
ipd_cols_exists[length(ipd_cols_exists) + 1] <- list(res)
}
} else {
ipd_cols_exists[length(ipd_cols_exists) + 1] <- -1
}
}
names_med_ipd_cols <- unlist(ipd_cols_exists[1])
doses_med_ipd_cols <- unlist(ipd_cols_exists[2])
bottle_size_ipd_cols <- unlist(ipd_cols_exists[3])
bottle_lasts_ipd_cols <- unlist(ipd_cols_exists[4])
if (unit_med_check != -1)
unit_med_ipd_cols <- unlist(ipd_cols_exists[5])
if (brand_check != -1)
brand_med_ipd_cols <- unlist(ipd_cols_exists[6])
if (bottle_size_unit_check != -1)
bottle_size_unit_ipd_cols <- unlist(ipd_cols_exists[7])
if (bottle_lasts_unit_check != -1)
bottle_lasts_unit_ipd_cols <- unlist(ipd_cols_exists[8])
if (preparation_dose_check != -1)
preparation_dose_ipd_cols <- unlist(ipd_cols_exists[9])
if (preparation_unit_check != -1)
preparation_unit_ipd_cols <- unlist(ipd_cols_exists[10])
# check columns exist in unit cost data
info_list <- c(unit_cost_column, cost_calculated_per, strength_column)
checks <- sapply(info_list, IPDFileCheck::check_column_exists, unit_cost_data)
if (sum(checks) != 0) {
stop("Atleast one of the required columns in unit cost data not found")
}
## if the information is coded
names_from_ipd_code <- encode_codes_data(list_of_code_names,
names_med_ipd_cols, ind_part_data)
if (is.null(unlist(names_from_ipd_code)) |
sum(is.na(unlist(names_from_ipd_code))) ==
length(unlist(names_from_ipd_code))) {
stop("Error - name_from_code can not be null - check the input for
list of names and codes")
}
if (unit_med_check == -1) {
med_ipd_dose <- ind_part_data %>%
dplyr::select(dplyr::all_of(doses_med_ipd_cols))
med_dose_unlist <- unlist(med_ipd_dose)
unit_from_ipd_code <- gsub("[0-9\\.]", "", med_dose_unlist)
unit_from_ipd_code <- matrix(unit_from_ipd_code,
nrow = dim(med_ipd_dose)[1])
colnames(unit_from_ipd_code) <- colnames(med_ipd_dose)
unit_from_ipd_code <- as.data.frame(unit_from_ipd_code)
} else {
med_ipd_dose <- ind_part_data %>%
dplyr::select(dplyr::all_of(doses_med_ipd_cols))
med_ipd_dose <- as.data.frame(med_ipd_dose)
unit_from_ipd_code <- encode_codes_data(list_of_code_dose_unit,
unit_med_ipd_cols, ind_part_data)
if (is.null(unlist(unit_from_ipd_code)) |
sum(is.na(unlist(unit_from_ipd_code))) ==
length(unlist(unit_from_ipd_code))) {
stop("Error - unit_from_code can not be null - check the input for
list of units")
}
}
if (bottle_lasts_unit_check == -1) {
bottle_lasts_ipd <- ind_part_data %>%
dplyr::select(dplyr::all_of(bottle_lasts_ipd_cols))
bottle_lasts_unit_unlist <- unlist(bottle_lasts_ipd)
unit_from_lasts <- gsub("[0-9\\.]", "", bottle_lasts_unit_unlist)
unit_from_lasts <- matrix(unit_from_lasts,
nrow = dim(bottle_lasts_ipd)[1])
colnames(unit_from_lasts) <- colnames(bottle_lasts_ipd)
bottle_lasts_unit_from_ipd_code <- as.data.frame(unit_from_lasts)
} else {
bottle_lasts_ipd <- ind_part_data %>%
dplyr::select(dplyr::all_of(bottle_lasts_ipd_cols))
bottle_lasts_unit_from_ipd_code <-
encode_codes_data(list_of_code_bottle_lasts_unit,
bottle_lasts_unit_ipd_cols, ind_part_data)
if (is.null(unlist(bottle_lasts_unit_from_ipd_code)) |
sum(is.na(unlist(bottle_lasts_unit_from_ipd_code))) ==
length(unlist(bottle_lasts_unit_from_ipd_code))) {
stop("Error - bottle_lasts_unit_from_ipd_code can not be null -
check the input for bottle lasts unit code")
}
}
if (bottle_size_unit_check == -1) {
bottle_size_ipd <- ind_part_data %>%
dplyr::select(dplyr::all_of(bottle_size_ipd_cols))
bottle_size_unlist <- unlist(bottle_size_ipd)
unit_from_size <- gsub("[0-9]+", "", bottle_size_unlist)
unit_from_size <- matrix(unit_from_size,
nrow = dim(bottle_size_ipd)[1])
colnames(unit_from_size) <- colnames(bottle_size_ipd)
bottle_size_unit_from_ipd_code <- as.data.frame(unit_from_size)
} else {
bottle_size_ipd <- ind_part_data %>%
dplyr::select(dplyr::all_of(bottle_size_ipd_cols))
bottle_size_unit_from_ipd_code <-
encode_codes_data(list_of_code_bottle_size_unit,
bottle_size_unit_ipd_cols,
ind_part_data)
if (is.null(unlist(bottle_size_unit_from_ipd_code)) |
sum(is.na(unlist(bottle_size_unit_from_ipd_code))) ==
length(unlist(bottle_size_unit_from_ipd_code))) {
stop("Error - bottle_size_unit_from_ipd_code can not be null - check the input for
bottel size unit code")
}
}
if (brand_check != -1) {
brand_from_ipd_code <- encode_codes_data(list_of_code_brand,
brand_med_ipd_cols, ind_part_data)
if (is.null(unlist(brand_from_ipd_code)) |
sum(is.na(unlist(brand_from_ipd_code))) ==
length(unlist(brand_from_ipd_code))) {
stop("Error - brand_from_ipd_code can not be null - check the input for
brand code")
}
}
# dose is specified as 2mg/ml unit is not separate, no coding required
if (preparation_dose_check != -1 & preparation_unit_check == -1) {
preparation_ipd_dose <- ind_part_data %>%
dplyr::select(dplyr::all_of(preparation_dose_ipd_cols))
preparation_dose_unlist <- unlist(preparation_ipd_dose)
prepare_unit_from_ipd_code <- gsub("[0-9\\.]", "", preparation_dose_unlist)
prepare_unit_from_ipd_code <- matrix(prepare_unit_from_ipd_code,
nrow = dim(preparation_ipd_dose)[1])
colnames(prepare_unit_from_ipd_code) <- colnames(preparation_ipd_dose)
prepare_unit_from_ipd_code <- as.data.frame(prepare_unit_from_ipd_code)
}
if (preparation_dose_check != -1 & preparation_unit_check != -1) {
preparation_ipd_dose <- ind_part_data %>%
dplyr::select(dplyr::all_of(preparation_dose_ipd_cols))
preparation_ipd_dose <- as.data.frame(preparation_ipd_dose)
prepare_unit_from_ipd_code <- encode_codes_data(list_preparation_dose_unit,
preparation_unit_ipd_cols, ind_part_data)
}
# get column names for name, form, dosage and unit from unit cost data
name_pattern <- c("name", "drug", "med", "patch", "tablet", "liquid", "injection")
form_pattern <- c("form", "patch/tablet")
size_pattern <- c("size")
size_unit_pattern <- c("type")
vol_pattern <- c("volume")
vol_unit_pattern <- c("measured")
brand_pattern <- c("brand", "trade")
preparation_pattern <- c("preparation", "prepare", "make")
name_cost_col_no <- get_single_col_multiple_pattern(name_pattern, unit_cost_data)
form_cost_col_no <- get_single_col_multiple_pattern(form_pattern, unit_cost_data)
size_pack_cost_col_no <-
get_single_col_multiple_pattern(size_pattern, unit_cost_data)
size_unit_cost_col_no <-
get_single_col_multiple_pattern(size_unit_pattern, unit_cost_data)
vol_cost_col_no <-
get_single_col_multiple_pattern(vol_pattern, unit_cost_data)
vol_unit_cost_col_no <-
get_single_col_multiple_pattern(vol_unit_pattern, unit_cost_data)
unit_cost_col_no <- IPDFileCheck::get_columnno_fornames(unit_cost_data,
cost_calculated_per)
dosage_cost_col_no <- IPDFileCheck::get_columnno_fornames(unit_cost_data,
strength_column)
brand_pattern <- c("brand", "trade")
res <- unlist(lapply(brand_pattern,
IPDFileCheck::get_colno_pattern_colname, colnames(unit_cost_data)))
if (length(res[which(res != -1)]) < 1) {
if (brand_check != -1) {
stop("Error - No brand column in the unit cost data")
} else{
brand_cost_col_no <- -1
}
} else {
brand_cost_col_no <-
get_single_col_multiple_pattern(brand_pattern, unit_cost_data)
}
if (preparation_dose_check != -1) {
preparation_cost_col_no <- get_single_col_multiple_pattern(preparation_pattern,
unit_cost_data)
}
## information from equivalent dose tables
if (is.null(eqdose_covtab)) {
conversion_factor <- 1
eqdose_check <- -1
} else {
if (typeof(eqdose_covtab) != "closure" & typeof(eqdose_covtab) != "list") {
if (is.na(eqdose_covtab)) {
eqdose_check <- -1
conversion_factor <- 1
}
} else {
eqdose_check <- 0
name_pattern <- c("name", "drug", "medication")
form_pattern <- c("form")
dose_unit_pattern <- c("unit")
conv_factor_pattern <- c("conversion", "factor")
drug_col_conv_table <- get_single_col_multiple_pattern(name_pattern,
eqdose_covtab)
form_col_conv_table <- get_single_col_multiple_pattern(form_pattern,
eqdose_covtab)
dose_unit_col_conv_table <- get_single_col_multiple_pattern(dose_unit_pattern,
eqdose_covtab)
conv_factor_col_conv_table <-
get_single_col_multiple_pattern(conv_factor_pattern, eqdose_covtab)
}
}
list_total_med_wt_period <- list()
list_total_cost_period <- list()
list_total_med_equiv_dose_period <- list()
list_total_cost_per_equiv_period <- list()
for (i in 1:nrow(ind_part_data)) {
name_ipd <- names_from_ipd_code[i, ]
dose_ipd <- med_ipd_dose[i, ]
unit_dose_ipd <- unit_from_ipd_code[i, ]
bot_lasts_ipd <- bottle_lasts_ipd[i, ]
bot_lasts_unit_ipd <- bottle_lasts_unit_from_ipd_code[i, ]
bot_size_ipd <- bottle_size_ipd[i, ]
bot_size_unit_ipd <- trimws(bottle_size_unit_from_ipd_code[i, ])
if (brand_check != -1)
brand_ipd <- brand_from_ipd_code[i, ]
if (preparation_dose_check != -1)
prepare_dose_ipd <- preparation_ipd_dose[i, ]
if (preparation_unit_check != -1)
prepare_unit_ipd <- prepare_unit_from_ipd_code[i, ]
no_na_dose_ipd <- dose_ipd[!is.na(dose_ipd)]
no_na_name_ipd <- name_ipd[!is.na(name_ipd)]
if (length(no_na_name_ipd) != length(no_na_dose_ipd))
stop("number of doses and number of medications should be equal")
if (is.null(name_ipd)) {
med_valid_check <- -1
} else {
if (sum(is.na(unname(name_ipd))) >= length(name_ipd))
med_valid_check <- -1
else
med_valid_check <- 0
}
if (med_valid_check != -1) {
total_med_wt_period <- 0
total_cost_period <- 0
total_med_equiv_dose_period <- 0
total_cost_per_equiv_period <- 0
for (j in seq_len(length(name_ipd))) {
if (!is.null(name_ipd[j]) & !is.na(name_ipd[j])) {
#(name, form, brand, dose, preparation,and volume of bottle)
match_name <- return_equal_str_col(name_cost_col_no, unit_cost_data,
name_ipd[j])
indices_form1 <- which(stringr::str_detect(toupper(match_name[[form_cost_col_no]]), "LIQUID"))
indices_form2 <- which(stringr::str_detect(toupper(match_name[[form_cost_col_no]]), "SOLUTION"))
indices_form3 <- which(stringr::str_detect(toupper(match_name[[form_cost_col_no]]), "DROP"))
indices_form4 <- which(stringr::str_detect(toupper(match_name[[form_cost_col_no]]), "AMPOULE"))
indices_form5 <- which(stringr::str_detect(toupper(match_name[[form_cost_col_no]]), "SUSPENSION"))
indices_form6 <- which(stringr::str_detect(toupper(match_name[[form_cost_col_no]]), "VIAL"))
indices_form <- unique(c(indices_form1, indices_form2, indices_form3, indices_form4,
indices_form5, indices_form6))
match_form <- match_name[indices_form, ]
if (brand_check != -1) {
if (is.null(brand_ipd[j])) {
match_form_brand <- match_form
} else {
if (is.na(brand_ipd[j])) {
match_form_brand <- match_form
} else {
if (brand_ipd[j] == "" | brand_ipd[j] == " ") {
match_form_brand <- match_form
} else {
match_form_brand <- return_equal_str_col(brand_cost_col_no,
match_form, brand_ipd[j])
if (nrow(match_form_brand) < 1)
stop("Did not find matching brand name of medication")
}
}
}
} else {
match_form_brand <- match_form
}
# get the unit of doses from the ipd
if (unit_med_check == -1)
dose_num_val_ipd <-
as.numeric(stringr::str_extract(dose_ipd[j], "\\d+\\.*\\d*"))
else
dose_num_val_ipd <- as.numeric(dose_ipd[j])
dose_in_ipd <- paste(dose_num_val_ipd, unit_dose_ipd[j], sep = "")
strength_unit_cost <- trimws(gsub("[0-9\\.]", "",
match_form_brand[[dosage_cost_col_no]]))
strength_val_cost <-
as.numeric(stringr::str_extract(match_form_brand[[dosage_cost_col_no]],
"\\d+\\.*\\d*"))
# if the unit cost are listed for different unit of doses say
# mg/ml or g/ml
# choose the right one after finding the multiplier which is 1.
dose_in_cost_data <- paste(strength_val_cost, strength_unit_cost,
sep = "")
if (any(dose_in_cost_data == dose_in_ipd)) {
match_form_brand_unit <-
match_form_brand[dose_in_cost_data == dose_in_ipd, ]
} else {
stop("The used dosage is not in costing table")
}
basis_str_unit_multiply <-
convert_wtpervoldiff_basis(unit_dose_ipd[j],
basis_strength_unit)
# get the unit of preparation from the ipd
if (preparation_dose_check != -1) {
if (preparation_unit_check == -1) {
ipd_preparation_dose <- prepare_dose_ipd[j]
index_slash <- stringr::str_locate(ipd_preparation_dose, "/")
first_dose_withstr <- stringr::str_sub(ipd_preparation_dose, 1,
index_slash[1] - 1)
second_dose_withstr <- stringr::str_sub(ipd_preparation_dose,
index_slash[2] + 1, nchar(ipd_preparation_dose))
strength_val_seconddose <-
as.numeric(stringr::str_extract(second_dose_withstr,
"\\d+\\.*\\d*"))
vol_unit <- trimws(gsub("[0-9\\.]", "", second_dose_withstr))
if (strength_val_seconddose == 1)
ipd_preparation_dose <- paste(first_dose_withstr, "/",
vol_unit, sep = "")
} else {
prepare_dose_val <- (prepare_dose_ipd[j])
prepare_dose_unit_val <- prepare_unit_ipd[j]
index_slash <- stringr::str_locate(prepare_dose_val, "/")
first_dose <- stringr::str_sub(prepare_dose_val, 1,
index_slash[1] - 1)
second_dose <- stringr::str_sub(prepare_dose_val,
index_slash[2] + 1, nchar(prepare_dose_val))
index <- stringr::str_locate(prepare_dose_unit_val, "/")
wt_unit <- stringr::str_sub(prepare_dose_unit_val, 1,
index[1] - 1)
vol_unit <- stringr::str_sub(prepare_dose_unit_val,
index[2] + 1, nchar(prepare_dose_unit_val))
if (as.numeric(second_dose) == 1) {
ipd_preparation_dose <- paste(first_dose, wt_unit, "/",
vol_unit, sep = "")
} else {
ipd_preparation_dose <- paste(first_dose, wt_unit, "/",
second_dose, vol_unit, sep = "")
}
}
match_form_brand_unit_prepare <-
match_form_brand_unit[match_form_brand_unit[[preparation_cost_col_no]] ==
ipd_preparation_dose, ]
if (nrow(match_form_brand_unit_prepare) == 0) {
match_form_brand_unit_prepare <- match_form_brand_unit
}
} else {
match_form_brand_unit_prepare <- match_form_brand_unit
}
unit_used_costing <-
tolower(unique(match_form_brand_unit_prepare[[unit_cost_col_no]]))
if (sum(unit_used_costing %in% "per bottle") >= 1) {
bottle_vol_cost <-
as.numeric(unlist(match_form_brand_unit_prepare[vol_cost_col_no]))
bottle_vol_unit_cost <-
unlist(match_form_brand_unit_prepare[vol_unit_cost_col_no])
bottle_volandunit_cost <- paste(bottle_vol_cost,
bottle_vol_unit_cost, sep = "")
if (bottle_size_unit_check == -1)
bottle_size_num_val_ipd <-
as.numeric(stringr::str_extract(bot_size_ipd[j], "\\d+\\.*\\d*"))
else
bottle_size_num_val_ipd <- as.numeric(bot_size_ipd[j])
bottle_size_in_ipd <- paste(bottle_size_num_val_ipd,
bot_size_unit_ipd[j], sep = "")
if (any(bottle_volandunit_cost == bottle_size_in_ipd)) {
match_form_brand_unit_prepare_size <-
match_form_brand_unit_prepare[bottle_volandunit_cost ==
bottle_size_in_ipd, ]
uni_cost_per_bottle <-
sum(match_form_brand_unit_prepare_size[[unit_cost_column]]) /
nrow(match_form_brand_unit_prepare_size)
} else {
stop("The used vol and unit of bottle is not in costing table")
}
} else {
stop("Error- liquids needs to be costed per bottle")
}
if (eqdose_check != -1) {
temp <- return_equal_str_col(drug_col_conv_table, eqdose_covtab,
name_ipd[j])
if (nrow(temp) != 0) {
tempa <- return_equal_liststring_listcol(form_col_conv_table, temp,
words)
unit_conv_table <- tempa[[dose_unit_col_conv_table]]
unit_converts <-
unlist(lapply(unit_conv_table, convert_wtpervoldiff_basis,
unit_dose_ipd[j]))
temp2 <- tempa[which(unit_converts == 1), ]
if (nrow(temp2) < 1)
stop("The unit in the conversion table is not correct or
can not be checked")
conver_factor <- temp2[[conv_factor_col_conv_table]]
if (!is.numeric(conver_factor)) {
if (conver_factor == "N/A" | is.na(conver_factor)) {
conversion_factor <- 1
} else {
check_num <- suppressWarnings(as.numeric(conver_factor))
if (is.na(check_num))
conversion_factor <-
as.numeric(stringr::str_extract(conver_factor,
"\\d+\\.*\\d*"))
else
conversion_factor <- as.numeric(conver_factor)
}
}
} else {
conversion_factor <- 0
}
}
if (bottle_lasts_unit_check == -1)
bottle_lasts_num_val_ipd <-
as.numeric(stringr::str_extract(bot_lasts_ipd[j], "\\d+\\.*\\d*"))
else
bottle_lasts_num_val_ipd <- as.numeric(bot_lasts_ipd[j])
ipd_bottle_lasts <- paste(bottle_lasts_num_val_ipd,
bot_lasts_unit_ipd[j], sep = " ")
basis_time_multiply <-
convert_to_given_timeperiod(ipd_bottle_lasts, internal_basis_time)
no_bottles_used_basis <- (1 / basis_time_multiply)
vol_unit_multiplier <- convert_volume_basis(bot_size_unit_ipd[j],
basis_vol_unit)
index <- stringr::str_locate(unit_dose_ipd[j], "/")
this_wt_unit <- stringr::str_sub(unit_dose_ipd[j], 1, index[1] - 1)
wt_unit_multiplier <- convert_weight_diff_basis(this_wt_unit,
basis_wt_unit)
# no of bottle times the unit cost
cost_basis <- ceiling(no_bottles_used_basis) * uni_cost_per_bottle
time_multiplier <- convert_to_given_timeperiod(timeperiod,
internal_basis_time)
no_bottles_period <- no_bottles_used_basis * time_multiplier
bottles_taken_period_costing <- ceiling(no_bottles_period)
#converting to correct strength unit
med_str_correct_unit <- dose_num_val_ipd * basis_str_unit_multiply
bottle_size_correct_unit <- bottle_size_num_val_ipd *
vol_unit_multiplier
# 2mg/ml * 500ml in 40 days - calculating for a day
# 2 mg/ml * (500ml/40 days) = 25 mg per day
med_wt_period <- med_str_correct_unit * wt_unit_multiplier *
bottle_size_correct_unit * no_bottles_period * time_multiplier
cost_period <- bottles_taken_period_costing * uni_cost_per_bottle
med_wt_equiv_period <- med_wt_period * conversion_factor
cost_per_equiv_period <- cost_period / med_wt_equiv_period
} else {
med_wt_period <- 0
cost_period <- 0
med_wt_equiv_period <- 0
cost_per_equiv_period <- 0
}
total_med_wt_period <- total_med_wt_period + med_wt_period
total_cost_period <- total_cost_period + cost_period
total_med_equiv_dose_period <- total_med_equiv_dose_period +
med_wt_equiv_period
total_cost_per_equiv_period <- total_cost_per_equiv_period +
cost_per_equiv_period
}
} else {
total_med_wt_period <- NA
total_cost_period <- NA
total_med_equiv_dose_period <- NA
total_cost_per_equiv_period <- NA
}
keywd <- "liquid"
list_total_med_wt_period <- append(list_total_med_wt_period,
total_med_wt_period)
list_total_cost_period <- append(list_total_cost_period, total_cost_period)
list_total_med_equiv_dose_period <- append(list_total_med_equiv_dose_period,
total_med_equiv_dose_period)
list_total_cost_per_equiv_period <- append(list_total_cost_per_equiv_period,
total_cost_per_equiv_period)
}
this_name <- paste("totmed_wt_period_", keywd, "_", basis_wt_unit, sep = "")
ind_part_data[[this_name]] <- unlist(list_total_med_wt_period)
this_name <- paste("totcost_period_", keywd, sep = "")
ind_part_data[[this_name]] <- unlist(list_total_cost_period)
this_name <- paste("totmed_per_equiv_period_", keywd, sep = "")
ind_part_data[[this_name]] <- unlist(list_total_med_equiv_dose_period)
this_name <- paste("totcost_per_equiv_period_", keywd, sep = "")
ind_part_data[[this_name]] <- unlist(list_total_cost_per_equiv_period)
return(ind_part_data)
}
##############################################################################
#' Function to estimate the cost of liquids when IPD is in long format
#' @param the_columns columns that are to be used to convert the data
#' from long to wide
#' @param ind_part_data_long IPD
#' @param name_med name of medication
#' @param brand_med brand name of medication if revealed
#' @param dose_med dose of medication used
#' @param unit_med unit of medication ; use null if its along with the dose
#' @param bottle_size size of the bottle used
#' @param bottle_size_unit unit of bottle volume
#' @param bottle_lasts how long the bottle lasted
#' @param bottle_lasts_unit time unit of how long the bottle lasted
#' @param preparation_dose dose if preparation is given
#' @param preparation_unit unit of preparatio dose
#' @param timeperiod time period for cost calculation
#' @param unit_cost_data unit costs data
#' @param unit_cost_column column name of unit cost in unit_cost_data
#' @param cost_calculated_per column name of unit where the cost is calculated
#' @param strength_column column column name that has strength of medication
#' @param list_of_code_names if names is coded, give the code:name pairs,
#' optional
#' @param list_of_code_brand if brand names are coded, give the
#' code:brand pairs, optional
#' @param list_of_code_dose_unit if unit is coded, give the code:unit pairs,
#' optional
#' @param list_of_code_bottle_size_unit list of bottle size units and codes
#' @param list_of_code_bottle_lasts_unit list of time of bottle lasts and codes
#' @param list_preparation_dose_unit list of preparation dose units and codes
#' @param eqdose_covtab table to get the conversion factor for equivalent
#' doses, optional
#' @param basis_strength_unit strength unit to be taken as basis
#' required for total medication calculations
#' @return the calculated cost of tablets along with original data
#' @examples
#'med_costs_file <- system.file("extdata", "medicaton_costs_all.xlsx",
#'package = "packDAMipd")
#'data_file <- system.file("extdata", "medication_liq.xlsx",
#' package = "packDAMipd")
#' ind_part_data <- load_trial_data(data_file)
#' med_costs <- load_trial_data(med_costs_file)
#' conv_file <- system.file("extdata", "Med_calc.xlsx",
#' package = "packDAMipd")
#' table <- load_trial_data(conv_file)
#' names <- colnames(ind_part_data)
#' ending <- length(names)
#' ind_part_data_long <- tidyr::gather(ind_part_data, measurement, value,
#' names[2]:names[ending], factor_key = TRUE)
#' the_columns <- c("measurement", "value")
#' res <- microcosting_liquids_long(the_columns,
#' ind_part_data_long = ind_part_data_long,
#' name_med = "liq_name", brand_med = NULL, dose_med = "liq_strength",
#' unit_med = NULL, bottle_size = "liq_bottle_size",bottle_size_unit = NULL,
#' bottle_lasts = "liq_lasts",bottle_lasts_unit = NULL,preparation_dose = NULL,
#' preparation_unit = NULL,timeperiod = "4 months",unit_cost_data = med_costs,
#' unit_cost_column = "UnitCost",cost_calculated_per = "Basis",
#' strength_column = "Strength",list_of_code_names = NULL,
#' list_of_code_brand = NULL,list_of_code_dose_unit = NULL,
#' list_of_code_bottle_size_unit = NULL,list_of_code_bottle_lasts_unit = NULL,
#' list_preparation_dose_unit = NULL,eqdose_covtab = table,
#' basis_strength_unit = NULL)
#' @export
#' @importFrom tidyr gather
#' @importFrom tidyr spread_
microcosting_liquids_long <- function(the_columns,
ind_part_data_long,
name_med,
brand_med = NULL,
dose_med,
unit_med = NULL,
bottle_size,
bottle_size_unit = NULL,
bottle_lasts,
bottle_lasts_unit = NULL,
preparation_dose = NULL,
preparation_unit = NULL,
timeperiod,
unit_cost_data,
unit_cost_column,
cost_calculated_per,
strength_column,
list_of_code_names = NULL,
list_of_code_brand = NULL,
list_of_code_dose_unit = NULL,
list_of_code_bottle_size_unit = NULL,
list_of_code_bottle_lasts_unit = NULL,
list_preparation_dose_unit = NULL,
eqdose_covtab = NULL,
basis_strength_unit = NULL) {
#Error - data should not be NULL
if (is.null(ind_part_data_long) | is.null(unit_cost_data))
stop("data should not be NULL")
ind_part_data_wide <- tidyr::spread_(ind_part_data_long, the_columns[1],
the_columns[2])
results_wide <- microcosting_liquids_wide(ind_part_data_wide,
name_med,
brand_med,
dose_med,
unit_med,
bottle_size,
bottle_size_unit,
bottle_lasts,
bottle_lasts_unit,
preparation_dose,
preparation_unit,
timeperiod,
unit_cost_data,
unit_cost_column,
cost_calculated_per,
strength_column,
list_of_code_names,
list_of_code_brand,
list_of_code_dose_unit,
list_of_code_bottle_size_unit,
list_of_code_bottle_lasts_unit,
list_preparation_dose_unit,
eqdose_covtab,
basis_strength_unit)
results_wide <- as.data.frame(results_wide)
columns <- colnames(results_wide)
num <- length(columns)
result_long <- tidyr::gather(results_wide, key = "measurement", value = "value",
columns[2]:columns[num], factor_key = TRUE)
return(result_long)
}
|
549632a39219fcf452d135426582bc33678b21fc | 03d4032ed72d3f1a543a6ac8aa629f5a49cdd30e | /R/nearest_neighbours_on_a_torus.R | 8eead18c0e9fec24e2224950642cf8dfed6a8aff | [] | no_license | paulnorthrop/donut | 2eea636531c0e272dda7390b6ad71b5804357ccf | d0f36d21f773dadfafb3a55aeff70ee7799585af | refs/heads/master | 2022-04-29T19:21:34.265544 | 2022-04-11T21:55:18 | 2022-04-11T21:55:18 | 211,706,498 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,475 | r | nearest_neighbours_on_a_torus.R | # ================================== nnt ==================================== #
#' Nearest Neighbour Search with Variables on a Torus
#'
#' Uses a user-supplied function to find the \code{k} nearest neighbours of
#' specified points in a dataset, adding the option to wrap certain variables
#' on a torus.
#'
#' @param data An \eqn{M} by \eqn{d} numeric matrix or data frame. Each of the
#' \eqn{M} rows contains a \eqn{d}-dimensional observation.
#' @param query An \eqn{N} by \eqn{d} numeric matrix or data frame. Each row
#' contains an \eqn{d}-dimensional point that will be queried against
#' \code{data}.
#' @param k An integer scalar. The number of nearest neighbours, of the
#' points in the rows of \code{query}, to find.
#' @param fn The function with which to calculate the nearest neighbours.
#' The syntax of this function must be \code{fn(data, query, k, ...)}.
#' The default is \code{RANN::nn2}. Another possibility is
#' \code{nabor::knn}.
#' @param torus An integer vector with element(s) in
#' \{1, ..., \code{ncol(data)}\}. The corresponding variables are wrapped
#' on the corresponding range gives in \code{ranges}.
#' @param ranges A \code{length(torus)} by \code{2} numeric matrix.
#' Row \code{i} gives the range of variation of the variable indexed by
#' \code{torus[i]}. \code{ranges[i, 1]} and \code{ranges[i, 2]}
#' are equivalent values of the variable, such as 0 degrees and 360 degrees.
#' If \code{length(torus)} = 1 then \code{ranges} may be a vector of length
#' 2.
#' @param method An integer scalar, equal to 1 or 2. See \strong{Details}.
#' @param ... Further arguments to be passed to \code{fn}.
#' @details
#' If \code{method = 1} then the data are partially replicated, arranged
#' around the original data in a way that wraps the variables in \code{torus} on their respective
#' ranges in \code{ranges}. Then \code{fn} is called using this replicated
#' dataset as the argument \code{data}. If \code{k} is large and/or
#' \code{data} is a sparse dataset then it is possible that a single
#' observation contributes more than once to a set of nearest neighbours,
#' which is incorrect. If this occurs then \code{nnt} uses method 2 to
#' correct the offending rows in \code{nn.idx} and \code{nn.dists} in the
#' returned list object.
#'
#' If \code{method = 2} then the
#' following approach is used for the point in each row in \code{query}.
#' The data indexed by \code{torus} are shifted (and wrapped) so that the
#' point is located at the respective midpoints of \code{ranges}.
#' Method 2 is efficient only if the number of points in \code{query} is
#' small.
#'
#' If \code{torus} is missing then \code{fn} is called using
#' \code{fn(data = data, query = query, k = k, ...)}, so that a call to
#' \code{nnt} is equivalent to a call to the function chosen by \code{fn}.
#' @return An object (a list) of class \code{c("nnt", "donut")} containing the
#' following components.
#' \item{nn.idx}{An \eqn{N} by \eqn{d} integer matrix of the \code{k}
#' nearest neighbour indices, i.e. the rows of \code{data}.}
#' \item{nn.dists}{An \eqn{N} by \eqn{d} numeric matrix of the \code{k}
#' nearest neighbour distances.}
#' \item{data, query, k, fn}{The arguments \code{data}, \code{query},
#' \code{k} and \code{fn} (in fact \code{substitute(fn)}).}
#' \item{torus, ranges, method}{If \code{torus} is supplied, the
#' arguments \code{torus}, \code{ranges} and \code{method}.}
#' \item{call}{The call to \code{spm}.}
#' @seealso \code{\link[RANN:nn2]{RANN::nn2}},
#' \code{\link[nabor:knn]{nabor::knn}}: nearest neighbour searches.
#' @references Arya, S., Mount, D., Kemp, S. E. and Jefferis, G. (2019)
#' RANN: Fast Nearest Neighbour Search (Wraps ANN Library) Using L2
#' Metric. R package version 2.6.1.
#' \url{https://CRAN.R-project.org/package=RANN}
#' @references Elseberg J., Magnenat S., Siegwart R., Nuchter, A. (2012)
#' Comparison of nearest-neighbor-search strategies and implementations for
#' efficient shape registration. \emph{Journal of Software Engineering for
#' Robotics (JOSER)}, \strong{3}(1), 2-12
#' \url{https://CRAN.R-project.org/package=nabor}
#' @seealso \code{\link{plot.nnt}} plot method for objects returned from
#' \code{\link{nnt}} (1 and 2 dimensional data only).
#' @examples
#' got_RANN <- requireNamespace("RANN", quietly = TRUE)
#' got_nabor <- requireNamespace("nabor", quietly = TRUE)
#'
#' set.seed(20092019)
#' # 2D example from the RANN:nn2 documentation (L2 metric)
#' x1 <- runif(100, 0, 2 * pi)
#' x2 <- runif(100, 0, 3)
#' DATA <- data.frame(x1, x2)
#' if (got_RANN) {
#' nearest <- nnt(DATA, DATA)
#' }
#'
#' # Suppose that x1 should be wrapped
#' ranges1 <- c(0, 2 * pi)
#' query1 <- rbind(c(6, 1.3), c(2 * pi, 3), c(3, 1.5), c(4, 0))
#' if (got_RANN) {
#' res1 <- nnt(DATA, query1, k = 8, torus = 1, ranges = ranges1)
#' plot(res1, ylim = c(0, 3))
#' }
#'
#' # Suppose that x1 and x2 should be wrapped
#' ranges2 <- rbind(c(0, 2 * pi), c(0, 3))
#' query2 <- rbind(c(6, 1.3), c(2 * pi, 3), c(3, 1.5), c(4, 0))
#' if (got_RANN) {
#' res2 <- nnt(DATA, query2, k = 8, torus = 1:2, ranges = ranges2)
#' plot(res2)
#' }
#'
#' # Use nabor::knn (L2 metric) instead of RANN::nn2
#' if (got_nabor) {
#' res3 <- nnt(DATA, query2, k = 8, fn = nabor::knn, torus = 1:2,
#' ranges = ranges2)
#' plot(res3)
#' }
#'
#' # 1D example
#' ranges <- c(0, 2 * pi)
#' query <- c(4, 0.1)
#' if (got_RANN) {
#' res <- nnt(x1, query, torus = 1, ranges = ranges, method = 1)
#' plot(res)
#' }
#' @export
nnt <- function(data, query = data, k = min(10, nrow(data)),
fn = RANN::nn2, torus, ranges, method = 1, ...) {
Call <- match.call(expand.dots = TRUE)
# Make data and query matrices
data <- as.matrix(data)
query <- as.matrix(query)
# Do the search and add data and query to the returned object
if (missing(torus)) {
res <- fn(data = data, query = query, k = k, ...)
res <- c(res, list(data = data, query = query, k = k, fn = substitute(fn),
call = Call))
class(res) <- c("nnt", "donut")
return(res)
}
if (missing(ranges)) {
stop("ranges must be supplied")
}
if (length(torus) == 0L || mode(torus) != "numeric") {
stop("'torus' must be a non-empty numeric vector")
}
if (!all(torus %in% 1:ncol(data))) {
stop("All elements of 'torus' must be in 1:ncol(data)")
}
dim_ranges <- ncol(ranges)
if (is.null(dim_ranges)) {
dim(ranges) <- 1:2
}
if (!is.matrix(ranges) || ncol(ranges) != 2 ||
nrow(ranges) != length(torus)) {
stop("ranges not consistent with length(torus)")
}
# Sort the values in the rows of ranges, from smallest to largest
ranges <- t(apply(ranges, 1, sort))
# Check that all data and query are in the appropriate ranges
check_ranges <- function(i, x) {
return(any(x[, torus[i]] < ranges[i, 1] | x[, torus[i]] > ranges[i, 2]))
}
check_data <- vapply(1:length(torus), check_ranges, TRUE, x = data)
check_query <- vapply(1:length(torus), check_ranges, TRUE, x = query)
if (any(check_data)) {
stop("value(s) in 'data' are outside the corresponding range in 'ranges'")
}
if (any(check_query)) {
stop("value(s) in 'query' are outside the corresponding range in 'ranges'")
}
# Check method
if (!(method %in% 1:2)) {
stop("method must be equal to 1 or 2")
}
if (method == 1) {
res <- method1_function(data, query, k, torus, ranges, fn, ...)
} else {
res <- method2_function(data, query, k, torus, ranges, fn, ...)
}
res <- c(res, list(data = data, query = query, k = k, fn = substitute(fn),
torus = torus, ranges = ranges, method = method,
call = Call))
class(res) <- c("nnt", "donut")
return(res)
}
# =========================== Plot nearest neighbours ======================= #
#' Plot diagnostics for an nnt object
#'
#' \code{plot} method for an object of class \code{c("nnt")}.
#'
#' @param x an object of class \code{c("nnt")}, a result of
#' a call to \code{\link{nnt}}.
#' @param ... Further arguments to be passed to
#' \code{\link[graphics:plot.default]{plot}}, or
#' \code{\link[graphics]{points}}.
#' @details This function is only applicable in 1 or 2 dimensions, that is,
#' when \code{ncol(x$data)} = 1 or 2. It provides a visual check that the
#' wrapping of variables is working as intended, in cases where the
#' number of query points, that is, \code{nrow(x$query)} is small
#' enough that sets of nearest neighbours do not overlap much.
#'
#' If \code{ncol(x$data)} = 1 then the index of each observation is plotted
#' against its value, using a plotting character \code{pch = 1}. A vertical
#' line is superimposed at each value in \code{x$query} and the \code{x$k$}
#' nearest neighbours of each line are colour-coded.
#'
#' If \code{ncol(x$data)} = 2 then \code{x$data[, 2]} is plotted against
#' \code{x$data[, 1]}, using a plotting character \code{pch = 1}. Each point
#' in \code{x$query} is plotted with a cross and the \code{x$k$}
#' nearest neighbours of each point are colour-coded.
#'
#' Colours of the lines/crosses and nearest neighbour points can be set sing an
#' argument \code{col}. If a variable is wrapped then the default plotting
#' limits are set using the corresponding values in \code{x$ranges}.
#' @return Nothing is returned.
#' @seealso \code{\link{nnt}} for nearest neighbour with some variables
#' wrapped on a torus.
#' @section Examples:
#' See the examples in \code{\link{nnt}}.
#' @export
plot.nnt <- function(x, ...) {
if (!inherits(x, "donut")) {
stop("use only with \"donut\" objects")
}
ncov <- ncol(x$data)
if (ncov > 2) {
stop("The plot method works for up to 2 covariates only")
}
if (is.null(colnames(x$data))) {
colnames(x$data) <- paste0("X", 1:ncol(x$data))
}
my_plot <- function(x, ..., pch, lwd, col, xlim = my_xlim, ylim = my_ylim) {
graphics::plot(x, ..., lwd = 1, col = "black", xlim = xlim, ylim = ylim)
}
my_points <- function(x, ..., pch = 16, col = "red", lwd) {
graphics::points(x, ..., pch = pch, col = col, lwd = 1)
}
user_args <- list(...)
nquery <- nrow(x$query)
if (is.null(user_args$col)) {
user_args$col <- 1 + 1:nquery
}
my_xlim <- my_ylim <- NULL
if (ncov == 2) {
if (!is.null(x$torus)) {
if (1 %in% x$torus) {
my_xlim <- x$ranges[1, ]
}
if (2 %in% x$torus) {
my_ylim <- x$ranges[2, ]
}
}
my_plot(x$data, ...)
for (i in 1:nquery) {
i_user_args <- user_args
i_user_args$col <- user_args$col[i]
for_my_points <- c(list(x = x$data[x$nn.idx[i, ], , drop = FALSE]),
i_user_args)
do.call(my_points, for_my_points)
}
for_points <- list(x = x$query, col = user_args$col, pch = "x")
do.call(graphics::points, for_points)
} else {
if (!is.null(x$torus)) {
my_xlim <- x$ranges
}
plot_data <- cbind(x$data, index = 1:nrow(x$data))
my_plot(plot_data, ...)
graphics::abline(v = x$query, col = user_args$col)
for (i in 1:nquery) {
i_user_args <- user_args
i_user_args$col <- user_args$col[i]
plot_data <- cbind(x$data[x$nn.idx[i, ], , drop = FALSE], x$nn.idx[i, ])
for_my_points <- c(list(x = plot_data), i_user_args)
do.call(my_points, for_my_points)
}
}
return(invisible())
}
|
606ee1ffd94e5c1853f46f790f403440f47fe6f3 | c824ffc8c30991afc74f8079f8c73f18f7f0ca24 | /tomtomsimple.R | 59437dca97404d841db6f30eb5cc4aab1669e3ff | [] | no_license | tolgakurtuluss/tomtomtrafikendeksi | 65ee2f3f3b5c44a10ea66320193f2c2d9e44ec04 | 73a70539b76c9eae62e20f030e811d352c1acffc | refs/heads/master | 2022-06-01T23:46:46.416262 | 2020-04-30T16:08:38 | 2020-04-30T16:08:38 | 259,356,142 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,090 | r | tomtomsimple.R | library(jsonlite)
library(httr)
library(ggplot2)
library(hrbrthemes)
sehir_isim1 <- "ankara"
sehir_isim2 <- "istanbul"
df <- fromJSON(paste0("https://api.midway.tomtom.com/ranking/live/TUR_", sehir_isim1))
df2 <- fromJSON(paste0("https://api.midway.tomtom.com/ranking/live/TUR_", sehir_isim2))
data <- df$data
colnames(data) <- c("RaporlananGecikme","CanlıEndeks", "Tarih","TrafikUzunlugu(km)","TrafikSayisi")
data$Tarih<- as.POSIXct(data$Tarih/1000, origin = "1970-01-01", tz = "UTC")
data2 <- df2$data
colnames(data2) <- c("RaporlananGecikme","CanlıEndeks", "Tarih","TrafikUzunlugu(km)","TrafikSayisi")
data2$Tarih<- as.POSIXct(data2$Tarih/1000, origin = "1970-01-01", tz = "UTC")
gg <- ggplot(data, aes(x=Tarih, y=CanlıEndeks)) +
geom_line(color="grey") +
geom_point(aes(col=TrafikSayisi, size=RaporlananGecikme)) +
labs(subtitle= paste0("Son 7 gün değerleri (",sehir_isim,")"),
y="Tomtom Trafik Endeksi",
x="Tarih",
title="Haftalik Trafik Endeksi",
caption = "Kaynak: TomTom International BV")
plot(gg)
gg2 <- ggplot(data, aes(x=Tarih, y=CanlıEndeks)) +
geom_line(color="grey") +
geom_point(shape=21, color="black", fill="#69b3a2", size=3) +
theme_ipsum() +
labs(subtitle= paste0("Son 7 gün değerleri (",sehir_isim,")"),
y="Tomtom Trafik Endeksi",
x="Tarih",
title="Haftalık Trafik Endeksi",
caption = "Kaynak: TomTom International BV")
plot(gg2)
gg3 <- ggplot(data, aes(Tarih)) +
geom_line(mapping=aes(y = CanlıEndeks,color="Şehir 1"),size=1) +
geom_line(mapping=aes(y = data2$CanlıEndeks,color="Şehir 2"),size=1) +
scale_color_manual(values = c(
"Şehir 1" = 'blue',
"Şehir 2" = 'red')) +
labs(color = 'Lejant',
subtitle=paste0("Son 7 gün değerleri ","Şehir 1:",sehir_isim1," Şehir 2:",sehir_isim2),
y="Tomtom Trafik Yogunluk Endeksi",
x="Tarih",
title="Şehirler Arası Trafik Karşılaştırma",
caption = "Kaynak: TomTom International BV")
plot(gg3)
|
fded4aa4076530c54c9620bf82466145f4e51c0d | 99dc4dace4f7be1e04655a19bfa26938c319d85c | /R/spark_functions.r | b281f352b1004ac629f460a83cdd68f12625c5c8 | [] | no_license | tito-am/prsim.tools | 04ca8eaf69139801152f8b280b7e7d33a3776055 | 217fe9e5b10746c9be63a39f642bcb5f6cd98351 | refs/heads/master | 2023-03-14T18:57:57.448477 | 2021-03-09T20:14:10 | 2021-03-09T20:14:10 | 291,827,646 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,409 | r | spark_functions.r |
#' ecriture_prsim_vers_csv
#'
#' @param path
#'
#' @return
#' @export
#'
#' @examples
#' mainDir<-'/media/tito/TIIGE/PRSIM/0.9995/bv_csv_hecressim'
ecriture_prsim_vers_csv_angliers<-function(path){
require(tidyverse)
require(readxl)
require(reshape2)
bvs_angliers<-c("Dozois","Lac Victoria et lac Granet","Rapide-7","Rapide-2","Riviere Kinojevis","Lac des Quinze")
mainDir2<-'/media/tito/TIIGE/PRSIM/0.9995/'
subDir<-'angliers_sum'
dir.create(file.path(mainDir2, subDir), showWarnings = FALSE)
setwd(path)
fichiers<-list.files()
for(fichier in fichiers){
df<-read_csv(fichier)
df<-df[bvs_angliers]
angliers_row_sum<-rowSums(df)
plot(angliers_row_sum,type='l')
filename<-paste0(mainDir2,subDir,'/angliers_sum_',fichier)
df<-as.data.frame(cbind(rep(fichier,length(angliers_row_sum)),seq(1,length(angliers_row_sum)),angliers_row_sum))
colnames(df)<-c('sim_number','julian_day','angliers_sum')
write_csv(as.data.frame(df),filename)
}
}
#' ecriture_prsim_vers_csv_temiscamingue
#'
#' @param path
#'
#' @return
#' @export
#'
#' @examples
#' mainDir<-'/media/tito/TIIGE/PRSIM/0.9995/bv_csv_hecressim'
ecriture_prsim_vers_csv_temiscamingue<-function(path){
require(tidyverse)
require(readxl)
require(reshape2)
bvs_temiscamingue<-c( "Dozois","Lac Victoria et lac Granet","Rapide-7","Rapide-2","Riviere Kinojevis","Lac des Quinze","Mistinikon","Lady Evelyn","Lower Notch et Indian Chute","Rabbit Lake", "Kipawa",
"Lac Temiscamingue a Angliers","Riviere Blanche" )
mainDir2<-'/media/tito/TIIGE/PRSIM/0.9995/'
subDir<-'temiscamingue_sum'
dir.create(file.path(mainDir2, subDir), showWarnings = FALSE)
setwd(path)
fichiers<-list.files()
for(fichier in fichiers){
df<-read_csv(fichier)
df<-df[bvs_temiscamingue]
temiscamingue_row_sum<-rowSums(df)
plot(temiscamingue_row_sum,type='l')
filename<-paste0(mainDir2,subDir,'/temiscamingue_sum_',fichier)
df<-as.data.frame(cbind(rep(fichier,length(temiscamingue_row_sum)),seq(1,length(temiscamingue_row_sum)),temiscamingue_row_sum))
colnames(df)<-c('sim_number','julian_day','temiscamingue_sum')
write_csv(as.data.frame(df),filename)
}
}
#' calcul_prsim_vers_statistiques_sommaires_temiscamingue
#'
#' @param path
#'
#' @return
#' @export
#'
#' @examples
calcul_prsim_vers_statistiques_sommaires_angliers<-function(mainDir2){
require(sparklyr)
require(dplyr)
require(tidyr)
config <- spark_config()
config$`sparklyr.shell.driver-memory` <- "4G"
config$`sparklyr.shell.executor-memory` <- "4G"
config$`spark.yarn.executor.memoryOverhead` <- "512"
# Connect to local cluster with custom configuration
sc <- spark_connect(master = "local", config = config)
spec_with_r <- sapply(read.csv('/media/tito/TIIGE/PRSIM/0.9995/angliers_sum/angliers_sum_0000001.csv', nrows = 1), class)
subDir<-'angliers_sum'
testo<-spark_read_csv(sc = sc,path = paste0(mainDir2,subDir),columns=spec_with_r,memory = FALSE)
src_tbls(sc)
testo$ops$vars
#testo %>% filter(carillon_sum > 2000)
start_time = Sys.time()
df_mean_per_julian_day = testo %>% group_by(julian_day) %>% summarise(AvgQ=mean(angliers_sum,na.rm = TRUE))%>% collect()
end_time = Sys.time()
end_time - start_time
df_max_per_julian_day = testo %>% group_by(julian_day) %>% summarise(MaxQ=max(angliers_sum,na.rm = TRUE))%>% collect()
df_min_per_julian_day = testo %>% group_by(julian_day) %>% summarise(MinQ=min(angliers_sum,na.rm = TRUE))%>% collect()
#mettre en ordre les statistiques sommaires de l'hydrogramme
df_mean_per_julian_day_ordered<-df_mean_per_julian_day[order(df_mean_per_julian_day$julian_day),]
df_max_per_julian_day_ordered<-df_max_per_julian_day[order(df_max_per_julian_day$julian_day),]
df_min_per_julian_day_ordered<-df_min_per_julian_day[order(df_min_per_julian_day$julian_day),]
final_prsim_angliers<-c(df_mean_per_julian_day_ordered,df_max_per_julian_day_ordered,df_min_per_julian_day_ordered)
save(final_prsim_angliers,file='~/Documents/github/prsim/outaouais_sup_lynda/final_prsim_angliers.RData')
#ggplot des statistiques sommaires des simulations PRSIM
plot(df_max_per_julian_day_ordered,type='l',ylim=c(0,4000))
points(df_mean_per_julian_day_ordered,type='l')
points(df_min_per_julian_day_ordered,type='l')
#ajout des informations de debit de la premiere etude
#points(qobs_mean_per_day$AvgQ,type='l',col='red')
#points(qobs_max_per_day$MaxQ,type='l',col='red')
#points(qobs_min_per_day$MinQ,type='l',col='red')
#sdf_pivot(testo, sim_number ~ carillon_sum)
#calcul de la pointe a carillon
#ajouter les saisons
#res=testo%>%filter(season %in% target)%>%group_by(sim_number)%>%summarize(max=max(carillon_sum))%>%collect()
#maximun par annee
res=testo%>%group_by(sim_number)%>%summarize(max=max(angliers_sum))%>%collect()
#ecdf cunnane
ecdf_cunnane<-function (x)
{
x <- sort(x)
n <- length(x)
if (n < 1)
stop("'x' must have 1 or more non-missing values")
vals <- unique(x)
rval <- approxfun(vals, cumsum(tabulate(match(x, vals))-0.4)/(n+0.2),
method = "constant", yleft = 0, yright = 1, f = 0, ties = "ordered")
class(rval) <- c("ecdf", "stepfun", class(rval))
assign("nobs", n, envir = environment(rval))
attr(rval, "call") <- sys.call()
rval
}
ecdf_max_year<-ecdf_cunnane(res$max)
plot(ecdf_max_year)
Fn<- ecdf_cunnane(res$max)
#prendre les codes pour cunnane
quantiles_qinter<-data.frame(quantiles=quantile(Fn, prob = c((1-(1/10000)),(1-(1/2000)),(1-(1/1000)),(1-(1/200)),(1-(1/100)),(1-(1/50)),(1-(1/20)),(1-(1/10)),(1-(1/2))), names = FALSE),row.names=c(10000,2000,1000,200,100,50,20,10,2))
quantiles_qinter_2<-data.frame(quantiles=quantile(ecdf_max_year, prob = c((1-(1/10000)),(1-(1/2000)),(1-(1/1000)),(1-(1/200)),(1-(1/100)),(1-(1/50)),(1-(1/20)),(1-(1/10)),(1-(1/2))), names = FALSE),row.names=c(10000,2000,1000,200,100,50,20,10,2))
#
res<-c(final_prsim_angliers,quantiles_qinter)
spark_disconnect(sc)
return(res)
}
#' calcul_prsim_statistiques_sommaires
#'
#' @param path
#'
#' @return
#' @export
#'
#' @examples
calcul_prsim_vers_statistiques_sommaires<-function(mainDir2){
require(sparklyr)
require(dplyr)
require(tidyr)
config <- spark_config()
config$`sparklyr.shell.driver-memory` <- "4G"
config$`sparklyr.shell.executor-memory` <- "4G"
config$`spark.yarn.executor.memoryOverhead` <- "512"
# Connect to local cluster with custom configuration
sc <- spark_connect(master = "local", config = config, spark_home = spark_home)
#sc <- spark_connect(master = "local", config = config)
#le path peut être directement le repertoire où se trouvent tous les folders.
subDirs<-list.files(paste0(mainDir2,'/bv_csv'))
#exemple de fichier
fichiers_csv<-list.files(paste0(mainDir2,'/bv_csv/',subDirs[2]))
spec_with_r <- sapply(read.csv(paste0(mainDir2,'/bv_csv/',subDirs[2],'/',fichiers_csv[2]), nrows = 1), class)
#création du dossier qui va contenir les fichiers rdata avec les stats sommaires
dir.create(paste0(mainDir2,'/','prsim_stats_rdata'))
for(subDir in subDirs ){
testo<-spark_read_csv(sc = sc,path = paste0(mainDir2,'/bv_csv/',subDir),columns=spec_with_r,memory = FALSE)
src_tbls(sc)
start_time = Sys.time()
df_mean_per_julian_day = testo %>% group_by(julian_day) %>% summarise(AvgQ=mean(value,na.rm = TRUE))%>% collect()
end_time = Sys.time()
end_time - start_time
df_max_per_julian_day = testo %>% group_by(julian_day) %>% summarise(MaxQ=max(value,na.rm = TRUE))%>% collect()
df_min_per_julian_day = testo %>% group_by(julian_day) %>% summarise(MinQ=min(value,na.rm = TRUE))%>% collect()
#mettre en ordre les statistiques sommaires de l'hydrogramme
df_mean_per_julian_day_ordered<-df_mean_per_julian_day[order(df_mean_per_julian_day$julian_day),]
df_max_per_julian_day_ordered<-df_max_per_julian_day[order(df_max_per_julian_day$julian_day),]
df_min_per_julian_day_ordered<-df_min_per_julian_day[order(df_min_per_julian_day$julian_day),]
final_prsim_angliers<-c(df_mean_per_julian_day_ordered,df_max_per_julian_day_ordered,df_min_per_julian_day_ordered)
filename<-paste0(mainDir2,'/','prsim_stats_rdata','/',subDir,'_stats.Rdata')
save(final_prsim_angliers,file=filename)
}
spark_disconnect(sc)
#return(res)
}
|
f5baac4214a8e673635ee6c7cb73a61ff066eb56 | 45135fd392d35d2bdc37e7d2af71b3631bc525f3 | /analysis/clustering_analysis_plots.R | 7a29aef2447b0019ffde9245d5643e2a9aba37e6 | [] | no_license | stripathy/valiante_ih | bec9d294ed8ebf58193454c05c91c23fec087f5c | 4f48897f17b6fd9279e50210db42ae999bba4d57 | refs/heads/master | 2022-11-11T12:16:41.622014 | 2020-06-14T13:57:12 | 2020-06-14T13:57:12 | 227,676,491 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,007 | r | clustering_analysis_plots.R | library(ggbeeswarm)
library(ggrepel)
cell_patient_ephys_combined = read.csv('summary_tables/cell_patient_ephys_combined.csv')
cell_patient_ephys_combined$layer_name = plyr::mapvalues(cell_patient_ephys_combined$layer_name, from = levels(cell_patient_ephys_combined$layer_name), to = c("Layer 2/3", "Layer 3c", "Layer 5"))
cell_patient_ephys_combined %>% ggplot(aes(x = layer_name, fill = cell_type)) +
geom_bar() + facet_wrap(~recorder_name) +
xlab('Neocortex layer') + ylab('Cell count')
p1 = cell_patient_ephys_combined %>% filter(cell_type == 'Pyr') %>%
filter(!res_center_freq %>% is.na) %>%
ggplot(aes(x = layer_name, fill = has_resonance)) +
geom_bar() + facet_wrap(~cell_type)
cell_patient_ephys_combined$cell_type = factor(cell_patient_ephys_combined$cell_type, levels = c('Pyr', 'Int'))
p2 = cell_patient_ephys_combined %>%
filter(res_center_freq > 1) %>% filter(cell_type == 'Pyr') %>%
ggplot(aes(x = layer_name, y = res_center_freq, color = layer_name)) +
scale_color_manual(values = c('blue', 'turquoise4', 'red')) +
geom_quasirandom(alpha = .75, size = 2) +
facet_wrap(~cell_type) +
ylab('Resonance frequency (Hz)') +
xlab('Cortical layer')
plot_grid(p1, p2, nrow = 1)
library(umap)
# kri_extracted_features
kri_ephys_features = c('rin', 'rmp', 'apamp', 'ahpamp', 'aphw', 'apvel', 'sagamp',
#'adratio', 'first_isi', 'avgisi', 'cvisi',
'sag', 'fislope', 'latency', 'avg_rate', 'tau', 'rheo',
'apthr')
use_cell_types = c('Pyr')
#kri_ephys_features = c('aphw', 'apvel', 'adratio', 'fislope', 'ahpamp', 'avgisi', 'avg_rate')
#use_cell_types = c('Pyr', 'Int', NA)
# reassign big Layer3 cell to Layer 2/3 for consistency
cell_patient_ephys_combined[cell_patient_ephys_combined$cell_id == '19129004.abf', 'layer_name'] = 'Layer 2/3'
# add new field to data frame to indicate naming of cells in Fig 1
complete_kri_human_ephys = cell_patient_ephys_combined %>%
filter(cell_type %in% use_cell_types) %>%
dplyr::select(cell_id, kri_ephys_features, ) %>% drop_na %>% as.data.frame() %>% select(-cell_id)
rownames(complete_kri_human_ephys) = cell_patient_ephys_combined %>%
filter(cell_type %in% use_cell_types) %>%
dplyr::select(c(kri_ephys_features, 'cell_id'), ) %>% drop_na %>% pull(cell_id)
complete_kri_human_ephys_umap = umap(complete_kri_human_ephys)
complete_kri_human_ephys_umap_new = merge(complete_kri_human_ephys_umap$layout %>% as.data.frame() %>% tibble::rownames_to_column(var = 'cell_id'),
cell_patient_ephys_combined, by = 'cell_id')
# k2 <- kmeans(complete_aibs_human_ephys, centers = 3, nstart = 25)
# cluster_id_df = k2$cluster %>% as.data.frame() %>% tibble::rownames_to_column(var = 'name')
# colnames(cluster_id_df) = c('name', 'cluster')
# aibs_human_ephys_umap_new = merge(aibs_human_ephys_umap_new, cluster_id_df, by = 'name')
# aibs_human_ephys_umap_new$cluster = aibs_human_ephys_umap_new$cluster %>% factor()
k2 <- kmeans(complete_kri_human_ephys, centers = 4, nstart = 25)
cluster_id_df = k2$cluster %>% as.data.frame() %>% tibble::rownames_to_column(var = 'cell_id')
colnames(cluster_id_df) = c('cell_id', 'cluster')
complete_kri_human_ephys_umap_new = merge(complete_kri_human_ephys_umap_new, cluster_id_df, by = 'cell_id')
complete_kri_human_ephys_umap_new$cluster = complete_kri_human_ephys_umap_new$cluster %>% factor()
complete_kri_human_ephys_umap_new$layer_name = factor(complete_kri_human_ephys_umap_new$layer_name,
levels = c('Layer 5', 'Layer 2/3', 'Layer 3c'))
res_types = c('non-resonant (fR = 0 Hz)', 'weak (0 < fR < 2 Hz)', 'strong (fR > 2 Hz)')
complete_kri_human_ephys_umap_new$resonance_type = factor(complete_kri_human_ephys_umap_new$resonance_type,
levels = c('non-resonant', 'weak', 'strong'))
# complete_kri_human_ephys_umap_new$resonance_type = plyr::mapvalues(complete_kri_human_ephys_umap_new$resonance_type ,
# from = levels(cell_patient_ephys_combined$resonance_type), to = res_types)
p0 = complete_kri_human_ephys_umap_new %>% arrange(layer_name) %>%
ggplot(aes(x = V1, y = V2, color = layer_name)) +
scale_color_manual(values = c('red', 'blue', 'turquoise4'), name = 'Layer') +
geom_point(size = 2, alpha = .75) +
geom_text_repel(data = complete_kri_human_ephys_umap_new %>%
filter(cell_id %in% cells_w_morphology), aes(label = cell_id),
nudge_y = -1, nudge_x = 1) +
ylab('UMAP 2') + xlab('UMAP 1') +
theme(legend.position="top")
p1 = complete_kri_human_ephys_umap_new %>% filter(has_resonance %in% c(T, F)) %>% arrange(resonance_type) %>%
ggplot(aes(x = V1, y = V2, color = resonance_type)) +
scale_color_manual(values = c('lightgrey', 'black', 'red'), name = 'Resonance') +
#scale_color_manual(values = c('blue', 'turquoise4', 'red')) +
geom_jitter(size = 1.5, width = .25, height = .25, alpha = .5) +
#scale_color_discrete(name = "Resonant") +
#scale_alpha_manual(guide='none', values = list(a = 0.2, point = 1))
# geom_text_repel(data = complete_kri_human_ephys_umap_new %>%
# filter(cell_id %in% cells_w_morphology), aes(label = cell_id)) +
ylab('UMAP 2') + xlab('UMAP 1') +
theme(legend.position="right")
complete_kri_human_ephys_umap_new = complete_kri_human_ephys_umap_new %>% mutate(has_burst_algo_hero = ((1/first_isi) > 75 & !(is.na(first_isi))))
complete_kri_human_ephys_umap_new = complete_kri_human_ephys_umap_new %>% mutate(has_burst_def = case_when(
(has_burst_algo == T)~ 'strong',
(has_burst_algo == F & has_burst_algo_hero == T) ~ 'weak',
has_burst_algo_hero == F ~ 'none'
))
complete_kri_human_ephys_umap_new$has_burst_def = factor(complete_kri_human_ephys_umap_new$has_burst_def, levels = c('none', 'weak', 'strong'))
p2 = complete_kri_human_ephys_umap_new %>%
arrange(has_burst_def) %>%
ggplot(aes(x = V1, y = V2, color = has_burst_def)) +
scale_color_manual(values = c('lightgrey', 'black', 'red'), name = "Bursting") +
geom_jitter(size = 1.5, width = .25, height = .25, alpha = .5) +
# geom_text_repel(data = complete_kri_human_ephys_umap_new %>%
# filter(cell_id %in% cells_w_morphology), aes(label = cell_id)) +
ylab('UMAP 2') + xlab('UMAP 1') +
theme(legend.position="right") #+ scale_color_discrete(name = "Bursting")
#
# p2 = complete_kri_human_ephys_umap_new %>% filter(has_burst_algo_hero %in% c(T, F)) %>%
# arrange(has_burst_algo) %>%
# ggplot(aes(x = V1, y = V2, color = has_burst_algo_hero)) +
# scale_color_manual(values = c('lightgrey', 'red'), name = "Bursting") +
# geom_jitter(size = 1.5, width = .25, height = .25, alpha = .5) +
# # geom_text_repel(data = complete_kri_human_ephys_umap_new %>%
# # filter(cell_id %in% cells_w_morphology), aes(label = cell_id)) +
# ylab('UMAP 2') + xlab('UMAP 1') +
# theme(legend.position="right") #+ scale_color_discrete(name = "Bursting")
p3 = complete_kri_human_ephys_umap_new %>%
#arrange(has_burst_algo) %>%
ggplot(aes(x = V1, y = V2, color = rin)) +
scale_color_steps(name = 'Rin (MOhm)', breaks = c(25, 50, 75, 100, 125)) +
geom_jitter(size = 1.5, alpha = .5, width = .25, height = .25) +
# geom_text_repel(data = complete_kri_human_ephys_umap_new %>%
# filter(cell_id %in% cells_w_morphology), aes(label = cell_id)) +
ylab('UMAP 2') + xlab('UMAP 1') +
theme(legend.position="right") #+ scale_color_discrete(name = "Bursting")
right_plot = plot_grid(p3, p2, p1, ncol = 1, align = 'hv')
full_plot = plot_grid(p0, right_plot, nrow = 1, rel_widths = c(1, .75))
full_plot
ggsave(file = 'figures/Fig7_clustering_r_gen.pdf', plot = full_plot, width = 8, height = 5, device = "pdf")
#plot_grid(p0, p1, p2, nrow = 1)
|
5d8f1a09f7b1607ebf108a3280e71ffb4032d6d8 | 651ce52e8e4a3c5bf8812420230334e21c5c0d48 | /R-Script_Assignment 4.R | a6c87dce32e5818ee84e6e3d0d942b06d607385f | [] | no_license | erichmccartney/R | a9f5accddac8531f49343288ee8f1843e8e3d320 | ba26cadee28b05f08ea600aa42ba8c5d763a7651 | refs/heads/main | 2023-06-06T10:03:13.455199 | 2021-06-26T13:12:22 | 2021-06-26T13:12:22 | 333,322,798 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,847 | r | R-Script_Assignment 4.R | # Pre-processing
#1 Load and Clean file
rm(list=ls())
library(rio)
library(moments)
cars=import("6304 Assignment 4 Data.xlsx")
colnames(cars)=tolower(make.names(colnames(cars)))
#2 Convert year and origin
str(cars)
cars$year=as.factor(cars$year)
cars$origin=as.factor(cars$origin)
str(cars)
attach(cars)
#3 Random Sample of 70% of data
unum=31484443
set.seed(unum)
random.cars=cars[sample(1:nrow(cars),(NROW(cars)*.7)),]
random.cars$cylinders=as.factor(random.cars$cylinders)
random.cars$make=as.factor(random.cars$make)
random.cars$model=as.factor(random.cars$model)
str(random.cars)
# Analysis
#1 Multiple Regression - MPG vs. CI, HP, and LB
cars.out=lm(mpg~cubic.inches+horsepower+weight,
data=random.cars)
#2 Model Output
summary(cars.out)
#3 Confidence Interval
confint(cars.out)
#4 Conformity with LINE assumptions of regression
#Linearity
plot(random.cars$mpg,cars.out$fitted.values,
pch=19,main="Cars Actual v. Fitted Values")
abline(0,1,col="red",lwd=3)
#Normality
qqnorm(cars.out$residuals,pch=19,main="Cars Normality Plot")
qqline(cars.out$residuals,col="red",lwd=3)
#Equality of Variances
plot(rstandard(cars.out),pch=19,
main="Cars Standardized Residuals")
abline(0,0,col="red",lwd=3)
#5 Identifying high leverage points.
leverages=hat(model.matrix(cars.out))
plot(leverages,pch=19,main="Leverage Plot, MPG Data")
abline(3*mean(leverages),0,col="red",lwd=3)
abline(2*mean(leverages),0,col="blue",lwd=3)
# Report ONLY the year, make, and model
random.cars[leverages>(3*mean(leverages)),c(6, 8, 9)]
#6 Square Horsepower and Weight in Model
#only hp^2
cars1.out=lm(mpg~cubic.inches+horsepower+weight+I(horsepower^2)
,data=random.cars)
summary(cars1.out)
#only weight^2
cars2.out=lm(mpg~cubic.inches+horsepower+weight+
I(weight^2),data=random.cars)
summary(cars2.out)
#both squared is best model
cars3.out=lm(mpg~cubic.inches+horsepower+weight+I(horsepower^2)+
I(weight^2),data=random.cars)
summary(cars3.out)
par(mfrow=c(1,2))
plot(random.cars$mpg,cars.out$fitted.values,pch=19,
main="Main Effects Model")
abline(0,1,lwd=3,col="red")
plot(random.cars$mpg,cars3.out$fitted.values,pch=19,
main="Squared Term Model")
abline(0,1,lwd=3,col="red")
par(mfrow=c(1,1))
plot(random.cars$mpg,cars3.out$fitted.values,pch=19,
main="Cars3 Actual v. Forecast")
abline(0,1,lwd=3,col="red")
qqnorm(cars3.out$residuals,pch=19,
main="Cars3 Normality Plot")
qqline(cars3.out$residuals,lwd=3,col="red")
hist(cars3.out$residuals, col = "red")
plot(random.cars$mpg,rstandard(cars3.out),pch=19,
main="Cars3 Normality Plot")
abline(0,0,col="red",lwd=3)
#7 Add Year to Model
cars4.out=lm(mpg~cubic.inches+year+horsepower+weight,
data=random.cars)
summary(cars4.out)
confint(cars4.out)
par(mfrow=c(1,3))
plot(random.cars$mpg,cars.out$fitted.values,pch=19,
main="Main Effects Model")
abline(0,1,lwd=3,col="red")
plot(random.cars$mpg,cars3.out$fitted.values,pch=19,
main="Squared Term Model")
abline(0,1,lwd=3,col="red")
plot(random.cars$mpg,cars4.out$fitted.values,pch=19,
main="Main Effects Model w/Year")
abline(0,1,lwd=3,col="red")
par(mfrow=c(1,1))
plot(random.cars$mpg,cars4.out$fitted.values,
pch=19,main="Cars4 Actual v. Fitted Values")
abline(0,1,col="red",lwd=3)
qqnorm(cars4.out$residuals,pch=19,main="Cars4 Normality Plot")
qqline(cars4.out$residuals,col="red",lwd=3)
plot(rstandard(cars4.out),pch=19,
main="Cars4 Standardized Residuals")
abline(0,0,col="red",lwd=3)
leverages=hat(model.matrix(cars4.out))
plot(leverages,pch=19,main="Leverage Plot, MPG Data")
abline(3*mean(leverages),0,col="red",lwd=3)
abline(2*mean(leverages),0,col="blue",lwd=3)
cars5.out=lm(mpg~cubic.inches+year+horsepower+weight+I(horsepower^2)+
I(weight^2),data=random.cars)
par(mfrow=c(1,4))
plot(random.cars$mpg,cars.out$fitted.values,pch=19,
main="Main Effects Model")
abline(0,1,lwd=3,col="red")
plot(random.cars$mpg,cars3.out$fitted.values,pch=19,
main="Squared Term Model")
abline(0,1,lwd=3,col="red")
plot(random.cars$mpg,cars4.out$fitted.values,pch=19,
main="Main Effects Model w/Year")
abline(0,1,lwd=3,col="red")
plot(random.cars$mpg,cars5.out$fitted.values,pch=19,
main="Squared Term Model w/Year")
abline(0,1,lwd=3,col="red")
par(mfrow=c(1,1))
plot(random.cars$mpg,cars5.out$fitted.values,pch=19,
main="Cars5 Actual v. Forecast")
abline(0,1,lwd=3,col="red")
qqnorm(cars5.out$residuals,pch=19,
main="Cars5 Normality Plot")
qqline(cars5.out$residuals,lwd=3,col="red")
hist(cars5.out$residuals, col = "red")
plot(random.cars$mpg,rstandard(cars5.out),pch=19,
main="Cars5 Normality Plot")
abline(0,0,col="red",lwd=3)
|
f770ac422f41b8126f88150b377e9e2bdd0e7a53 | 6ae8330e79111d9bd1905bb7eae2125c06a6eb42 | /cachematrix.R | 346695fcc30e221d787b34efdeb18a4b7de1e207 | [] | no_license | linazhu11/ProgrammingAssignment2 | 8457dada4ce2e864fad191a030bb20137cf53916 | 5917d283097d26569941be76a5d12bbf66964399 | refs/heads/master | 2020-12-30T21:35:01.460554 | 2015-05-24T20:25:34 | 2015-05-24T20:25:34 | 36,156,163 | 0 | 0 | null | 2015-05-24T04:37:36 | 2015-05-24T04:37:35 | null | UTF-8 | R | false | false | 1,288 | r | cachematrix.R | ## The functions below, "makeCacheMatrix" and "cacheSolve," calculate the inverse of
## a square, numeric matrix and store the results. These matrix objects can then be
## retrieved later without repeating calculations.
## "makeCacheMatrix" creates a list of functions for a numeric matrix object,
## allowing the user to define and cache both the matrix and its inverse
makeCacheMatrix <- function(x = numeric()) {
i <- NULL
setmatrix <- function(y) {
x <<- y
i <<- NULL
}
getmatrix <- function() x
setinverse <- function(inv) i <<- inv
getinverse <- function() i
list(setmatrix = setmatrix, getmatrix = getmatrix, setinverse = setinverse, getinverse = getinverse)
}
## "cacheSolve" returns the inverse of the matrix defined in "makeCacheMatrix" above
## It first checks to see if the inverse already exists. If so, it returns the cached value.
## Otherwise, it proceeds to calculate the inverse.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i) #returns existing inverse and exits the function
}
matrix <- x$getmatrix()
i <- solve(matrix) #calculates inverse
x$setinverse(i)
i
}
|
7d36ab04b4e19ca5529f5cbabc53b96711e52000 | 5ef320a68d5ca4ee0e7d8a85cabfc0ca5a7b1647 | /code/solitary_decision_data_analysis.r | 7453cc938a525682d8e85f853821c40c634003fa | [] | no_license | michael-franke/choice-prince-evo | b95f9b70183b8ee84d28cb8aae980107a12b7bf0 | 3cc92c5c30ebd313ad1af3868e615f7c092fd62f | refs/heads/master | 2020-04-05T14:38:33.521104 | 2016-09-23T15:12:45 | 2016-09-23T15:12:45 | 22,376,317 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,109 | r | solitary_decision_data_analysis.r | require('gtools') # for dirichlet distribution
require('plyr') # to conveniently manipulate data frames
require('ggplot2')
require('reshape2')
# load('data_solitary_decisions_10x10.RDATA')
grandMeans = c(br = mean(data$u.br), mr = mean(data$u.mr), mm = mean(data$u.mm))
show(grandMeans)
means = ddply(data, .(data$n.t,data$n.a), summarise,
N = length(u.br),
mean.br= mean(u.br) ,
sd.br = sd(u.br),
mean.mr= mean(u.mr),
sd.mr = sd(u.mr),
mean.mm= mean(u.mm),
sd.mm = sd(u.mm),
best = c("br", "mr", "mm")[which.max(c(mean(u.br), mean(u.mr), mean(u.mm)))],
worst = c("br", "mr", "mm")[which.min(c(mean(u.br), mean(u.mr), mean(u.mm)))])
meansLong = melt(data = means, id.vars = c("data$n.t", "data$n.a"), measure.vars = c("mean.br", "mean.mr", "mean.mm"))
colnames(meansLong)[1] = "n.t"
colnames(meansLong)[2] = "n.a"
meansPlot = ggplot(data = meansLong, aes(x = n.a, y = value, col = variable)) +
geom_line() + geom_point() + facet_wrap(~ n.t, nrow = 3)
show(meansPlot)
|
bb9173e86957183a9adbde18e81686d998e7f636 | 7b1db0e77a666a37f1597cda09299fb2b0730dac | /week 3.R | 07259ea130fe4b78b883080313be4dfe91431a7d | [] | no_license | ericborn/R-555 | c4d952f043410ef2c8cc666fb3f7449972dc6fc0 | 323174bc56becdb1d1e2e02cd58057de4a11700a | refs/heads/master | 2020-06-20T21:03:21.794297 | 2019-08-21T00:24:49 | 2019-08-21T00:24:49 | 197,248,151 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,936 | r | week 3.R | # 1)
# Read the data into R
dat <- read.csv(file='c:/CS_555_Assignment_3.csv', sep = ',')
# 2)
# The x-axis should be the explanatory or independent variable, which is generally the
# one that occurs first. For this I chose the number of fish meals per week, as the fishermen
# would need to eat the fish before their mercury levels could rise. The y-axis is the response
# or dependent variable, which would be the response to eating fish each week and is represented
# by the mercury levels in fisherman.
ggplot(dat, aes(x=NumFishMeals, y=TotalMercury)) +
geom_point(color='blue') +
xlab('Number of fish meals (per week)') + ylab ('Mercury levels in fisherman') +
ggtitle("Scatterplot of fish meals (per week) vs mercury levels in fisherman") +
theme(plot.title = element_text(hjust = 0.5))
# 3)
# Correlation coefficient - 0.7398083
cor(dat$NumFishMeals, dat$TotalMercury)
# 4)
# Regression line added with geom_smooth, method = lm which is linear model
ggplot(dat, aes(x=NumFishMeals, y=TotalMercury)) +
geom_point(color='blue') +
geom_smooth(color='red', method = 'lm', se = FALSE) +
xlab('Number of fish meals (per week)') + ylab ('Mercury levels in fisherman') +
ggtitle("Scatterplot of fish meals (per week) vs mercury levels in fisherman") +
theme(plot.title = element_text(hjust = 0.5))
# 5)
xbar <- mean(dat$NumFishMeals)
sx <- sd(dat$NumFishMeals)
ybar <- mean(dat$TotalMercury)
sy <- sd(dat$TotalMercury)
r <- cor(dat$NumFishMeals, dat$TotalMercury)
beta1 <- r*sy/sx
beta1
beta0 <- ybar - beta1*xbar
beta0
m <- lm(dat$TotalMercury ~ dat$NumFishMeals)
summary(m)
# 6)
# Anova
anova(m)
# f-test
# 2.75743
qf(0.90, 1, 98)
# Confidence interval at 90%
# 0.3277582 0.4457599
confint(m, level = 0.90)
# Reg SS Res SS
# 1394.2 1153.2
reg.ss <- 1394.2
res.ss <- 1153.2
total.ss <- reg.ss + res.ss
r.squared <- reg.ss / total.ss
|
6c64f6a5bf3d15d96b079ddc7982112ba9f3fa60 | da695472b178c4edb0122c29c7eae68a75e2e408 | /R/Code/Experiments/2017.09.10/run_Benchmarks_2017_09_10.R | 3e59e9d661c6275b8528037e104b3935c4d6997e | [] | no_license | shlpu/RandomerForest | 029b43c9f71569970673138df298741816eb2fe1 | d8e939ab6a878a844542bb83d4a3f8e246fa1998 | refs/heads/master | 2020-03-18T10:19:14.870064 | 2018-05-07T23:41:19 | 2018-05-07T23:41:19 | 134,606,629 | 1 | 0 | null | 2018-05-23T17:52:44 | 2018-05-23T17:52:43 | null | UTF-8 | R | false | false | 8,248 | r | run_Benchmarks_2017_09_10.R | # evaluate classifiers on benchmark datasets
rm(list=ls())
options(scipen = 999)
rerfPath <- "~/"
dataPath <- "~/Data/uci/processed/"
source(paste(rerfPath, "RerF/Code/Classifiers/rfr_function.R", sep = ""), chdir = T)
# source("~/R-RerF/rfr_function.R", chdir = T)
classifiers <- c("rf", "rerf", "rerfr", "rerfp", "rerfpr", "frc", "frank", "rr-rf", "rr-rfr")
# classifiers <- "rerfpr"
nCl <- length(classifiers)
nTrees <- 500L
MinParent <- 2L
MaxDepth <- "inf"
NumCores <- 40L
seed <- 09092017L
testError <- list()
OOBError <- list()
OOBAUC <- list()
trainTime <- list()
OOBTime <- list()
testTime <- list()
treeStrength <- list()
treeCorr <- list()
numNodes <- list()
bestIdx <- list()
params <- list()
# initialize arrays
contents <- list.files(paste0(dataPath, "data/"))
catFiles <- list.files(paste0(dataPath, "categorical_map/"))
# loop through datasets
for (fileName in contents) {
dataSet <- strsplit(fileName,"\\.")[[1L]][1L]
print(dataSet)
fold <- get.folds(paste0(dataPath, "cv_partitions/", dataSet, "_partitions.txt"))
nFolds <- length(fold)
X <- as.matrix(read.table(paste0(dataPath, "data/", dataSet, ".csv"), header = F, sep = ",", quote = "", row.names = NULL))
if (paste0(dataSet, "_catmap.txt") %in% catFiles) {
catMap <- get.catmap(paste0(dataPath, "categorical_map/", dataSet, "_catmap.txt"))
pcat <- length(catMap)
pnum <- catMap[[1L]][1L] - 1L
p <- pcat + pnum
p.ohe <- ncol(X) - 1L
Y <- as.integer(X[, p.ohe + 1L]) + 1L
X <- X[, -(p.ohe + 1L)]
} else {
catMap <- NULL
p <- ncol(X) - 1L
Y <- as.integer(X[, p + 1L]) + 1L
X <- X[, -(p + 1L)]
}
# labels <- sort(unique(Y))
nClasses <- length(unique(Y))
testError[[dataSet]] <- vector(mode = "list", length = nCl)
names(testError[[dataSet]]) <- classifiers
OOBError[[dataSet]] <- vector(mode = "list", length = nCl)
names(OOBError[[dataSet]]) <- classifiers
OOBAUC[[dataSet]] <- vector(mode = "list", length = nCl)
names(OOBAUC[[dataSet]]) <- classifiers
trainTime[[dataSet]] <- vector(mode = "list", length = nCl)
names(trainTime[[dataSet]]) <- classifiers
OOBTime[[dataSet]] <- vector(mode = "list", length = nCl)
names(OOBTime[[dataSet]]) <- classifiers
testTime[[dataSet]] <- vector(mode = "list", length = nCl)
names(testTime[[dataSet]]) <- classifiers
treeStrength[[dataSet]] <- vector(mode = "list", length = nCl)
names(treeStrength[[dataSet]]) <- classifiers
treeCorr[[dataSet]] <- vector(mode = "list", length = nCl)
names(treeCorr[[dataSet]]) <- classifiers
numNodes[[dataSet]] <- vector(mode = "list", length = nCl)
names(numNodes[[dataSet]]) <- classifiers
bestIdx[[dataSet]] <- vector(mode = "list", length = nCl)
names(bestIdx[[dataSet]]) <- classifiers
params[[dataSet]] <- vector(mode = "list", length = nCl)
names(params[[dataSet]]) <- classifiers
for (m in classifiers) {
if (m == "rf" || m == "rr-rf" || m == "rr-rfr") {
randomMatrix <- "rf"
if (p < 5) {
mtrys <- 1:p
} else {
mtrys <- ceiling(p^c(1/4, 1/2, 3/4, 1))
}
sparsity <- 1/p # this parameter doesn't actually matter for RF
} else if (m == "rerf" || m == "rerfr") {
randomMatrix <- "binary"
if (p < 5) {
mtrys <- c(1:p, p^2)
} else if (p >= 5 && p <= 100) {
mtrys <- ceiling(p^c(1/4, 1/2, 3/4, 1, 2))
} else {
mtrys <- ceiling(p^c(1/4, 1/2, 3/4, 1, 1.5))
}
sparsity <- (1:min(p-1, 5))/p
# sparsity <- 1/p
} else if (m == "rerfc") {
randomMatrix <- "continuous"
if (p < 5) {
mtrys <- c(1:p, p^2)
} else if (p >= 5 && p <= 100) {
mtrys <- ceiling(p^c(1/4, 1/2, 3/4, 1, 2))
} else {
mtrys <- ceiling(p^c(1/4, 1/2, 3/4, 1, 1.5))
}
sparsity <- (1:min(p-1, 5))/p
} else if (m == "rerfp" || m == "rerfpr") {
randomMatrix <- "poisson"
if (p < 5) {
mtrys <- c(1:p, p^2)
} else if (p >= 5 && p <= 100) {
mtrys <- ceiling(p^c(1/4, 1/2, 3/4, 1, 2))
} else {
mtrys <- ceiling(p^c(1/4, 1/2, 3/4, 1, 1.5))
}
sparsity <- (1:min(ceiling(p/2), 5))
# sparsity <- 1L
} else if (m == "frc" || m == "frank") {
randomMatrix <- "frc"
if (p < 5) {
mtrys <- c(1:p, p^2)
} else if (p >= 5 && p <= 100) {
mtrys <- ceiling(p^c(1/4, 1/2, 3/4, 1, 2))
} else {
mtrys <- ceiling(p^c(1/4, 1/2, 3/4, 1, 1.5))
}
sparsity <- (2:min(p, 5))
} else if (m == "frcn") {
randomMatrix <- "frcn"
if (p < 5) {
mtrys <- c(1:p, p^2)
} else if (p >= 5 && p <= 100) {
mtrys <- ceiling(p^c(1/4, 1/2, 3/4, 1, 2))
} else {
mtrys <- ceiling(p^c(1/4, 1/2, 3/4, 1, 1.5))
}
sparsity <- (2:min(p, 5))
}
if (m == "rr-rf" || m == "rr-rfr") {
rotate <- T
} else {
rotate <- F
}
if (m == "rerfr" || m == "rerfpr" || m == "rerfcr" || m == "frank" || m == "rr-rfr") {
rank.transform <- T
} else {
rank.transform <- F
}
params[[dataSet]][[m]] <- list(trees = nTrees, randomMatrix = randomMatrix, d = mtrys, sparsity = sparsity, rotate = rotate,
rank.transform = rank.transform, MinParent = MinParent, MaxDepth = MaxDepth, NumCores = NumCores,
seed = seed)
testError[[dataSet]][[m]] <- matrix(as.double(rep(NA, nFolds*length(sparsity)*length(mtrys))),
nrow = nFolds, ncol = length(sparsity)*length(mtrys))
OOBError[[dataSet]][[m]] <- matrix(as.double(rep(NA, nFolds*length(sparsity)*length(mtrys))),
nrow = nFolds, ncol = length(sparsity)*length(mtrys))
OOBAUC[[dataSet]][[m]] <- matrix(as.double(rep(NA, nFolds*length(sparsity)*length(mtrys))),
nrow = nFolds, ncol = length(sparsity)*length(mtrys))
trainTime[[dataSet]][[m]] <- matrix(as.double(rep(NA, nFolds*length(sparsity)*length(mtrys))),
nrow = nFolds, ncol = length(sparsity)*length(mtrys))
OOBTime[[dataSet]][[m]] <- matrix(as.double(rep(NA, nFolds*length(sparsity)*length(mtrys))),
nrow = nFolds, ncol = length(sparsity)*length(mtrys))
testTime[[dataSet]][[m]] <- matrix(as.double(rep(NA, nFolds*length(sparsity)*length(mtrys))),
nrow = nFolds, ncol = length(sparsity)*length(mtrys))
treeStrength[[dataSet]][[m]] <- matrix(as.double(rep(NA, nFolds*length(sparsity)*length(mtrys))),
nrow = nFolds, ncol = length(sparsity)*length(mtrys))
treeCorr[[dataSet]][[m]] <- matrix(as.double(rep(NA, nFolds*length(sparsity)*length(mtrys))),
nrow = nFolds, ncol = length(sparsity)*length(mtrys))
numNodes[[dataSet]][[m]] <- matrix(as.double(rep(NA, nFolds*length(sparsity)*length(mtrys))),
nrow = nFolds, ncol = length(sparsity)*length(mtrys))
bestIdx[[dataSet]][[m]] <- as.integer(rep(NA, nFolds))
# loop over folds
for (k in seq.int(nFolds)) {
print(paste0("fold ", k))
trainIdx <- unlist(fold[-k])
testIdx <- fold[[k]]
# evaluate models
res <- rerf_eval(X[trainIdx, ], Y[trainIdx], X[testIdx, ], Y[testIdx], nClasses, catMap, params[[dataSet]][[m]])
testError[[dataSet]][[m]][k, ] <- res$testError
OOBError[[dataSet]][[m]][k, ] <- res$oobError
OOBAUC[[dataSet]][[m]][k, ] <- res$oobAUC
trainTime[[dataSet]][[m]][k, ] <- res$trainTime
OOBTime[[dataSet]][[m]][k, ] <- res$oobTime
testTime[[dataSet]][[m]][k, ] <- res$testTime
treeStrength[[dataSet]][[m]][k, ] <- res$treeStrength
treeCorr[[dataSet]][[m]][k, ] <- res$treeCorrelation
numNodes[[dataSet]][[m]][k, ] <- res$numNodes
bestIdx[[dataSet]][[m]][k] <- res$best.idx
save(testError, OOBError, OOBAUC, trainTime, OOBTime, testTime, treeStrength, treeCorr, numNodes, bestIdx, params, file = paste0(rerfPath, "RerF/Results/2017.09.10/Benchmarks_2017_09_10.RData"))
}
}
}
|
e9eb478ab13ea2176b52451d22f176688e682f0a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/hierfstat/examples/boot.vc.Rd.R | be0a749eba45a463bed5a8ffbb3ac7a859b66283 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 260 | r | boot.vc.Rd.R | library(hierfstat)
### Name: boot.vc
### Title: Bootstrap confidence intervals for variance components
### Aliases: boot.vc
### Keywords: univar
### ** Examples
#load data set
data(gtrunchier)
boot.vc(gtrunchier[,c(1:2)],gtrunchier[,-c(1:2)],nboot=100)
|
2377ca58d95a0cbd29c7951785a1c60258445692 | 908b58d65454acadbfb22c8f6ec32a907d4434fe | /myApp/server.R | 8c74db8894abbf67c5f617daee3aa78cc1aed3b9 | [] | no_license | ysebega/developingdataproduct | c3df6d543e4e23404c679c307ecbdb1703b9c42b | 0116a3ffe313bcd703b44f044ca9ddfa5540e3d0 | refs/heads/master | 2021-01-12T07:15:42.895594 | 2016-12-20T05:30:24 | 2016-12-20T05:30:24 | 76,925,123 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 641 | r | server.R |
library(shiny)
dfshowCAC<- EuStockMarkets[, 1]
dfshowDAX<- EuStockMarkets[, 2]
dfshowSMI<- EuStockMarkets[, 3]
dfshowFTSE<- EuStockMarkets[, 4]
shinyServer(function(input, output) {
output$EuStock <- renderPlot({
plot(EuStockMarkets, col=input$var, title(main = input$txtTitle))
if (input$showCAC){plot(dfshowCAC, col=input$var, title(main = "CAC"))} else
if (input$showDAX){plot(dfshowDAX, col=input$var, title(main = "DAX"))}else
if (input$showSMI){plot(dfshowSMI, col=input$var, title(main = "SMI"))}else
if (input$showFTSE){plot(dfshowFTSE, col=input$var, title(main = "FTSE"))}
})
})
|
e3c1e1f52d8c24b880d9b45e5669740b6bec3467 | 31f3d6031b5ac2310317b72a20ef8f2c29d55049 | /r/src/column_obs/oco_functions/get.wgt.oco.func.r | d7a98cf228eac0bb79328157a7b243ba7e06e450 | [] | no_license | uataq/X-STILT | 638c3c76e6e396c0939c85656a53eb20a1eaba74 | eaa7cfabfc13569a9e598c90593f6418bc9113d5 | refs/heads/master | 2023-07-22T03:14:50.992219 | 2023-07-14T16:40:55 | 2023-07-14T16:40:55 | 128,477,511 | 12 | 5 | null | 2023-06-19T22:57:03 | 2018-04-06T22:46:36 | R | UTF-8 | R | false | false | 6,650 | r | get.wgt.oco.func.r | ### subroutine to get new AK PWF profiles based on OCO-2's
# pressure weighting and averaging kernel, for combined levels
# OCO-2 only provides the PWF, AK and a priori at 20 levels,
# use linear interpolation to "approx" values at given STILT releasing levels
# Dien Wu, 08/05/2016
# Bugs fixed, DW:
# fix 1, 11/20/2016, modify the interpolate of PWF from OCO2 to STILT
# fix 2, 11/28/2016, remove storeTF flag, as we always need .RData file stored
# for generating footprint using Trajectfoot()
# fix 3, 02/08/2017, add uneven vertical spacing
# fix 4, 04/06/2017, change AK from zero to original OCO2 AK above model level
# add 5, 04/19/2017, store interpolated AK PWF apriori
# version 3 for matching Ben's STILT-R version 2, DW, 05/25/2018
# interpolate ground hgt in this subroutine, DW, 05/25/2018
# output refers to all the content from .rds file using STILTv2, 05/25/2018
# which is the same 'output' from simulation_step()
# *** USE OCO-2 retrieved surface pressure/altitude for air column
# because modeled Psurf/air column differs from retrieved Psurf/air column
# DW, 08/30/2019
# minor update for using OCO-3 data, i.e., change variable names, DW, 06/28/2020
# interpolate vert profiles onto each particle instead of each release level,
# due to changes in HYSPLIT compiler, DW, 07/07/2020
get.wgt.oco.func = function(output, oco.path, oco.fn = NA) {
# before weighting trajec-level footprint by AK & PWF
# get specific humidity and temp profiles that have been extracted via
# before_trajec_xstilt()
qt.prof = output$qt_prof
if (is.null(qt.prof)) stop('get.wgt.oco.func(): no extracted q and T profiles found...\n')
# grab receptor info and select the particles at first time step back
receptor = output$receptor; p = output$particle
min.time = min(abs(p$time)) * sign(p$time[1]) # MIN time in mins
sel.p = p[p$time == min.time, ] %>% dplyr::select(indx, zagl, zsfc, pres, xhgt)
# get OCO-2/3 profile first according to lat/lon of receptor, return a list
if (!is.na(oco.fn)) oco.path = NA
oco.info = get.oco.info(oco.path, receptor, oco.fn)
if (is.null(oco.info)) {
warning('get.wgt.oco.func(): NO OCO info found for this receptor...please check\n')
return()
} # end if is.null
#### ------------------------ DEALING WITH OCO NOW -------------------- ####
# grab press weighting function, pressures, normalized AK, apriori
oco.df = oco.info[c('ak.norm', 'pwf', 'pres', 'ap')] %>% as.data.frame()
oco.pres.bound = as.numeric(oco.df$pres)
oco.ak.norm = as.numeric(oco.df$ak.norm)
oco.zsfc = oco.info$oco.grdhgt
oco.psfc = oco.info$oco.psfc
oco.xh2o = oco.info$oco.xh2o # already in mol m-2
if (is.na(oco.zsfc)) stop('get.wgt.oco.func(): satellite-based surface height is NA, please check...\n')
# --------- correct for model-satellite mismatch in sfc P or Z --------- #
# inferred from trajec, DW, 08/30/2019
# P = Psfc * exp (g/RTv_mean * (Zsfc - Z)), fit nonlinear regression between P and delta_Z,
# Tv_mean is the mean virtual temp
# correct for diff in ZSFC between sensor and particle ZSFC
p.zasl = sel.p$zagl + sel.p$zsfc # trajec-level ASL
p.pres = sel.p$pres # trajec-level pressure
p.zagl.corr.co2 = p.zasl - oco.zsfc # corrected trajec-level AGL
# use satellite surface altitude and pressure to calculate ZAGL that TROPOMI believes
# use particle-level pressure and ASL to calculate the coeffient
nls = stats::nls(p.pres ~ oco.psfc * exp(a * (-p.zagl.corr.co2)), start = list(a = 1E-4))
a.oco = coef(nls)[[1]] # a3 stands for g/RTv_mean based on OCO atmos-X
# --------------------------------------------------------------------------
# now start to weight trajec-level footprint for X-STILT
# --------------------------------------------------------------------------
# first bin up specific according to pressure of bottom level
# +1 is adjust as findInterval() finds the lower end of a pressure interval
# but pressure declines with height
qt.bin = qt.prof %>%
mutate(xpres = oco.pres.bound[findInterval(pres, oco.pres.bound) + 1]) %>%
group_by(xpres) %>% summarise_all(mean) %>% ungroup() %>%
dplyr::select(-pres) %>% na.omit()
# estimate initial release height and pressure based on hyposmetric equation
zmin = min(receptor$zagl)
zmax = max(receptor$zagl)
npar = max(p$indx)
# HYSPLITv5 deploys a line source (receptor$zagl) for releasing particles,
#xhgt = zmax - zmax / npar * (npar - indx), already incorporated in STILT
# release height of each particle is roughly distributed between levels of a line source
sel.p = sel.p %>%
# use OCO sfc pressure to calculate pressure of release heights
# P = Psfc * exp (g/RT * (Zsfc - Z))
mutate(
xpres = oco.psfc * exp(a.oco * (-xhgt)),
# interpolate specific humidity, AK_norm, apriori to
# each particle based on its release pressure
q = approx(qt.bin$xpres, qt.bin$sphu, xout = xpres, rule = 2)$y,
ak.norm = approx(oco.df$pres, oco.df$ak.norm, xout = xpres, rule = 2)$y,
ap = approx(oco.df$pres, oco.df$ap, xout = xpres, rule = 2)$y
) %>% arrange(desc(xpres))
# calculate diff in pressure between particles based on release heights
dp = abs(c(oco.psfc - max(sel.p$xpres), diff(sel.p$xpres)))
# merge all info per particle and calculate dry-air column density in mol m-2
g = 9.8 # m s-2
Mdry = 29 / 1E3 # kg mol-1
# dry air column density in mol m-2, 100 for converting pres from hPa to Pa
xdry.tot = 1 / g / Mdry * oco.psfc * 100 - oco.xh2o
sel.p = sel.p %>% mutate(xdry = (1 - q) / g / Mdry * dp * 100,
pwf = xdry / xdry.tot) # xdry for total column
# lastly locate the OCO-2 levels that above the STILT particles
min.pres.xstilt = min(sel.p$xpres)
upper.oco.df = oco.df %>% filter(pres < min.pres.xstilt) %>%
arrange(desc(pres))
upper.oco.df$indx = seq(npar + 1, npar + nrow(upper.oco.df))
# adjust the PWF for the first level above XSTILT particles
upper.oco.df$pwf[1] = 1 - sum(sel.p$pwf) - sum(upper.oco.df$pwf[-1])
cat('get.wgt.oco.func(): Done computing PWF...\n')
## ----- Combine all interpolated OCO-2 profiles and calculating AK_norm *PWF
lower.df = sel.p %>% dplyr::select(ak.norm, pwf, xpres, ap, indx) %>%
rename(pres = xpres)
combine.prof = rbind(lower.df, upper.oco.df) %>%
mutate(ak.pwf = ak.norm * pwf)
# NOW ALL PROFILES CONTAIN--pressure, pressure weighting, normalized ak,
#AK*PWF and a priori contribution
combine.prof$stiltTF = F
combine.prof[combine.prof$indx <= npar, 'stiltTF'] = T
combine.prof # return weighting profiles
}
# end of subroutine
|
11eceaf69fb24dc10d13f24ba4e61c78af954272 | b27f3ca9fb38ee017c82a3308ba774f20070cc6d | /code/final_code_Kristin/extract/difference_maps.R | 9483caffdf11ebd359a8307149570fb5e66e684d | [] | no_license | PennLINC/isla | ef085f563827c50ed51a12f2085ca4589c71f75a | 24e0a252b0c47cca0a458a3a82aaedad16c65187 | refs/heads/master | 2022-04-19T19:40:47.046576 | 2020-04-21T19:29:44 | 2020-04-21T19:29:44 | 151,718,662 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,475 | r | difference_maps.R | library(methods)
library(fslr)
library(stringr)
library(dplyr)
library(data.table)
library(readr)
pncdir = '/project/taki2/pnc/n1601_dataFreeze2016'
rootdir = '/project/taki2/kristin_imco'
# Brainmask
maskdir = file.path(rootdir, 'masks')
run = function(nsize, type){
imcodir = file.path(rootdir, 'coupling_maps', paste0('gmd_', type, '_size', nsize))
subjects = list.files(imcodir)
lapply(subjects, function(x){
islafile = file.path(imcodir, x, 'predictedGMD1.nii.gz')
if(file.exists(islafile)){
isla = readnii(islafile)
if(type=='cbf'){
maskFile = file.path(maskdir, 'gm10perc_PcaslCoverageMask.nii.gz')
maskImg = readnii(maskFile)
yfile = file.path(pncdir, 'neuroimaging/asl/voxelwiseMaps_cbf', paste0(x, '_asl_quant_ssT1Std.nii.gz'))
}
if(type=='alff'){
maskFile = file.path(maskdir, 'gm10perc_RestCoverageMask.nii.gz')
maskImg = readnii(maskFile)
yfile = file.path(pncdir, 'neuroimaging/rest/voxelwiseMaps_alff', paste0(x, '_alffStd.nii.gz'))
}
if(type=='reho'){
maskFile = file.path(maskdir, 'gm10perc_RestCoverageMask.nii.gz')
maskImg = readnii(maskFile)
yfile = file.path(pncdir, 'neuroimaging/rest/voxelwiseMaps_reho', paste0(x, '_rehoStd.nii.gz'))
}
yy = readnii(yfile)
diff = isla - yy
diff = diff*maskImg
writenii(diff, filename=file.path(imcodir, x, paste0('isla_', type, '_diff')))
}
})
}
run(4, 'cbf')
run(3, 'cbf')
run(4, 'alff')
run(3, 'alff')
run(4, 'reho')
run(3, 'reho')
|
b0ee5b267fdad28a8dba014c8ea6b15643e2c604 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/diffeqr/tests/testthat.R | 61bb589277ad97ef487e7d0c34448bfd76dd1824 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 42 | r | testthat.R | library(testthat)
test_check("diffeqr")
|
b03d2afba758652179c894f81ca415317bc1a4b7 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/spMC/examples/mixplot.Rd.R | 81952db0739eb2f7f6e3f0ca3cb9d6d8ad20698d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 654 | r | mixplot.Rd.R | library(spMC)
### Name: mixplot
### Title: Plot of Multiple One-dimensional Transiograms
### Aliases: mixplot
### Keywords: spatial distribution hplot
### ** Examples
## No test:
data(ACM)
# Estimate empirical transition
# probabilities by points
ETr <- transiogram(ACM$MAT3, ACM[, 1:3], c(0, 0, 1), 100)
# Estimate the transition rate matrix
RTm <- tpfit(ACM$MAT3, ACM[, 1:3], c(0, 0, 1))
# Compute transition probabilities
# from the one-dimensional MC model
TPr <- predict(RTm, lags = ETr$lags)
# Plot empirical vs. theoretical transition probabilities
mixplot(list(ETr, TPr), type = c("p", "l"), pch = "+", col = c(3, 1))
## End(No test)
|
0ce85293fe36862c3ce641f30afd377dbbf844c0 | 438f8d0cea301874566a98cab57cdacf4250c00a | /db_update.R | 733df942b4378106020bd12ca6ca66ceb252657f | [] | no_license | Poissonfish/MC_Function | 3efb019d17dc6d35407cab370e1bdf5ffd3ab62f | 9d02683d1f38b0c0a441850744f7c80f58470357 | refs/heads/master | 2021-01-17T13:17:56.045533 | 2016-10-16T00:19:26 | 2016-10-16T00:19:26 | 59,733,350 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 870 | r | db_update.R | library(RCurl)
library(magrittr)
setwd("/home/mclab/workspace/M.C.Function/db")
db_names=getURL("ftp://ftp.ncbi.nlm.nih.gov/blast/db/",verbose=TRUE,ftp.use.epsv=TRUE, dirlistonly = TRUE) %>%
strsplit("[\\\\]|[^[:print:]]",fixed = FALSE) %>%
unlist() %>%
(function(x){x[grep('^nt\\.',x)]})
#UPADATING DABABASE FROM NCBI FTP
filepath=sprintf('ftp://ftp.ncbi.nlm.nih.gov/blast/db/%s',db_names)
for (i in 1:(length(db_names)/2)){
download.file(filepath[i],
paste(getwd(),'/',db_names[i],sep=''),
mode='wb')
}
Sys.sleep(60)
for (i in ((length(db_names)/2)+1):length(db_names)){
download.file(filepath[i],
paste(getwd(),'/',db_names[i],sep=''),
mode='wb')
}
#Extract files from compressed items
for (i in grep("^(?!.*md5)",db_names, perl=TRUE)){
untar(db_names[i])
}
|
c362630745000ee0a57582937e8e823417df0e9a | 3c782d524ed4378ecd48022797182f23869f85cd | /first.r | 0f36a56b72fb65f806bb7aa4b6bbbe38cade8eff | [] | no_license | DilberHusainKhan/R_Programming | 9002778db639c00ed4dc5dff84c7e8e7304e724a | 9388317033841a963856af6e7ee8c6ff4e76d0fb | refs/heads/main | 2023-03-30T14:10:07.150039 | 2021-03-17T09:46:45 | 2021-03-17T09:48:40 | 348,655,604 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 186 | r | first.r | print("Dilber Husain KHan")
#assignment -> , <- ,->> ,<<- ,=
x<-10
x
y<-15
z=99
#vector
apple<- c("red","green","yellow") #c() is a combine function.
print(apple)
print(class(apple))
|
d71a9634a33f1ef791293562424ebb6825063f22 | 7374303c14e64c42bed64be1c8aff78e9aefa3d8 | /inst/doc/kde.R | 7031484140f75cbc8e6302c70c3199d237e6d08a | [] | no_license | cran/ks | cd7d27f9a0d865f577c0bc4e857dbeca09ed55a6 | f571ffa28e9dbc5ab649b4f6ac30879cf8fad43c | refs/heads/master | 2022-11-30T03:47:41.411752 | 2022-11-24T02:40:02 | 2022-11-24T02:40:02 | 17,696,943 | 6 | 6 | null | null | null | null | UTF-8 | R | false | false | 2,761 | r | kde.R | ### R code from vignette source 'kde.Rnw'
###################################################
### code chunk number 1: kde.Rnw:99-107
###################################################
library(ks)
set.seed(8192)
samp <- 200
mus <- rbind(c(-2,2), c(0,0), c(2,-2))
Sigmas <- rbind(diag(2), matrix(c(0.8, -0.72, -0.72, 0.8), nrow=2), diag(2))
cwt <- 3/11
props <- c((1-cwt)/2, cwt, (1-cwt)/2)
x <- rmvnorm.mixt(n=samp, mus=mus, Sigmas=Sigmas, props=props)
###################################################
### code chunk number 2: kde.Rnw:113-114
###################################################
plotmixt(mus=mus, Sigmas=Sigmas, props=props, xlim=c(-4,4), ylim=c(-4,4))
###################################################
### code chunk number 3: kde.Rnw:116-117
###################################################
plot(x, xlim=c(-4,4), ylim=c(-4,4), xlab="x", ylab="y")
###################################################
### code chunk number 4: kde.Rnw:126-128
###################################################
Hpi1 <- Hpi(x=x)
Hpi2 <- Hpi.diag(x=x)
###################################################
### code chunk number 5: kde.Rnw:132-134
###################################################
fhat.pi1 <- kde(x=x, H=Hpi1)
fhat.pi2 <- kde(x=x, H=Hpi2)
###################################################
### code chunk number 6: kde.Rnw:142-144 (eval = FALSE)
###################################################
## plot(fhat.pi1)
## plot(fhat.pi2)
###################################################
### code chunk number 7: kde.Rnw:156-157
###################################################
plot(fhat.pi1, main="Plug-in", cex.main=1.4, xlim=c(-4,4), ylim=c(-4,4))
###################################################
### code chunk number 8: kde.Rnw:159-160
###################################################
plot(fhat.pi2, main="Plug-in diagonal", cex.main=1.4, xlim=c(-4,4), ylim=c(-4,4))
###################################################
### code chunk number 9: kde.Rnw:171-173
###################################################
Hscv1 <- Hscv(x=x)
Hscv2 <- Hscv.diag(x=x)
###################################################
### code chunk number 10: kde.Rnw:177-179
###################################################
fhat.cv1 <- kde(x=x, H=Hscv1)
fhat.cv2 <- kde(x=x, H=Hscv2)
###################################################
### code chunk number 11: kde.Rnw:181-182
###################################################
plot(fhat.cv1, main="SCV", cex.main=1.4, xlim=c(-4,4), ylim=c(-4,4))
###################################################
### code chunk number 12: kde.Rnw:184-185
###################################################
plot(fhat.cv2, main="SCV diagonal", cex.main=1.4, xlim=c(-4,4), ylim=c(-4,4))
|
aa4d4678987c7b04768f796a6b0352ab4469303d | b56b1c0841477c9505405170e7a3f56b81088270 | /errortypes/server.R | 7470830d0e75639151e83a133e7c3e3bbfe2fcbf | [] | no_license | dswalter/shinylibs | 365b1202adf12d106f82aad7fa88e2bbc20d8f92 | 7f7f35ab107fe5869a44a174e5be320b2c36765e | refs/heads/master | 2021-01-10T20:46:24.309440 | 2015-05-12T03:49:44 | 2015-05-12T03:49:44 | 29,115,500 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,546 | r | server.R | library(shiny)
library(ggplot2)
library(grid)
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# Expression that generates a histogram. The expression is
# wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should re-execute automatically
# when inputs change
# 2) Its output type is a plot
output$distPlot <- renderPlot({
x <- faithful[, 2] # Old Faithful Geyser data
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'darkgray', border = 'white')
})
output$changedecision <-renderPlot({
xmin<-10
xmax<-20
nullmean<-15
sd<-1
decision_point<-input$decision_point
rng<-seq(xmin,xmax,0.05)
null_vals<-dnorm(x=rng,mean=nullmean,sd=sd)
#null_vals<-dnorm(x=rng,mean=input$onemean,sd=input$onesdev)
nulldf<-data.frame(x=rng,null_vals)
nulldf$type1vals<-nulldf$null_vals
nulldf$type1vals[nulldf$x>decision_point]<-0
type_1_df<-subset(nulldf,x<=decision_point)
nullplot<-ggplot(data=nulldf,aes(x=x))+xlim(xmin,xmax)+
ylab("Density")+xlab("Possible Lengths")+
ggtitle("Probability Distribution under the Null Hypothesis")+
geom_ribbon(data=nulldf,aes(ymin=0,ymax=null_vals),fill="#238E23")+
geom_vline(x=decision_point)+
geom_ribbon(data=type_1_df, aes(ymin=0,
ymax=type1vals),fill="red")
altmean<-13
sd<-1
decision_point<-input$decision_point
rng<-seq(xmin,xmax,0.05)
null_vals<-dnorm(x=rng,mean=altmean,sd=sd)
#null_vals<-dnorm(x=rng,mean=input$onemean,sd=input$onesdev)
nulldf<-data.frame(x=rng,null_vals)
nulldf$type1vals<-nulldf$null_vals
nulldf$type1vals[nulldf$x>decision_point]<-0
type_1_df<-subset(nulldf,x<=decision_point)
altplot<-ggplot(data=nulldf,aes(x=x))+xlim(xmin,xmax)+
ylab("Density")+xlab("Possible Lengths")+
ggtitle("Probability Distribution under the Alternate Hypothesis")+
geom_ribbon(data=nulldf,aes(ymin=0,ymax=null_vals),fill="#FF7F00")+
geom_vline(x=decision_point)+
geom_ribbon(data=type_1_df, aes(ymin=0,
ymax=type1vals),fill="steelblue")
multiplot(nullplot,altplot)
})
output$type1 <- renderText({
firsttext<-"The probability of a type 1 error is the probability of incorrectly rejecting the null.
In this case, that is concluding that the bolt is from machine B when it is really from machine A. With
a decision point of "
secondtext<-", that probability is the area under a Normal(15,1) curve to the left of our decision point,
which is "
colortext<- "This area is colored red in the plots above."
cdfval<-round(pnorm(input$decision_point,mean=15,sd=1),3)
paste(firsttext,input$decision_point,secondtext,cdfval,". ",colortext,sep="")
})
output$keepnull<-renderText({
firsttext<-"Correctly failing to reject the null is the same as concluding that the bolt is from
machine A when it is really from machine A. With a decision point of "
secondtext<-", that probability is the area under a Normal(15,1) curve to the right of our decision point,
which is "
colortext<- "This area is colored green in the plots above."
cdfval<-round(1-pnorm(input$decision_point,mean=15,sd=1),3)
paste(firsttext,input$decision_point,secondtext,cdfval,". ",colortext,sep="")
})
output$power<-renderText({
firsttext<-"Power is the probability of correctly rejecting the null.
In this case, that is concluding that the bolt is from machine B when it is really from machine B. With
a decision point of "
secondtext<-", that probability is the area under a Normal(13,1) curve to the left of our decision point,
which is "
colortext<- "This area is colored blue in the plots above."
cdfval<-round(pnorm(input$decision_point,mean=13,sd=1),3)
paste(firsttext,input$decision_point,secondtext,cdfval,". ",colortext,sep="")
})
output$type2<-renderText({
firsttext<-"The probability of a type 2 error is the probability of incorrectly failing to reject the null.
In this case, that is concluding that the bolt is from machine A when it is really from machine B. With
a decision point of "
secondtext<-", that probability is the area under a Normal(13,1) curve to the right of our decision point,
which is "
colortext<- "This area is colored orange in the plots above."
cdfval<-round(1-pnorm(input$decision_point,mean=13,sd=1),3)
paste(firsttext,input$decision_point,secondtext,cdfval,". ",colortext,sep="")
})
output$all_table<-renderTable({
type_1<-paste("Type 1 = ",round(pnorm(input$decision_point,mean=15,sd=1),3),sep="")
keepnull<-round(1-pnorm(input$decision_point,mean=15,sd=1),3)
power<-paste("Power = ",round(pnorm(input$decision_point,mean=13,sd=1),3),sep="")
type_2<-paste("Type 2 = ",round(1-pnorm(input$decision_point,mean=13,sd=1),3),sep="")
out_matrix<-matrix(c(type_1,power,keepnull,type_2),ncol=2,nrow=2)
rownames(out_matrix)<-c("Null is True", "Null is not True")
colnames(out_matrix)<-c("P(Reject Null)","P(FTR Null)")
out_matrix
})
})
|
812bcbb2b7c55f9f77ec5bd9779edc5deb2d4ffe | 2cc758c13a14e5044b08a7bafb6d9f923a83c773 | /calendar/Tabs_ui/html_styles_ui.R | adf36dad71db9f2a048a78d6f46f963526ab98b6 | [] | no_license | TBrach/shiny_apps | 2cfdbbe11a1702251198d7186c16a87c8b42004d | c727095900d35d0c06107d7e3fdffffbd1d03ff2 | refs/heads/master | 2023-02-27T15:12:24.809444 | 2021-02-08T08:23:53 | 2021-02-08T08:23:53 | 326,055,438 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 451 | r | html_styles_ui.R | html_styles <- tags$head(
# set color of the info texts
tags$style(HTML("
.shiny-text-output {
color: rgb(102,0,51);
font-size: 15px;
}
")),
tags$style(HTML("
.shiny-output-error-validation {
color: red;
}
"))
) |
a5ed8a71b2f4b2c2f761deb426e79a1ed2561df2 | 4d242778c9ed5edc3ef5ba1d75b996151c88c49f | /r-scripts/pop.R | df1b3d6765ecd607037907fcb7d60adac7c03ce3 | [
"Apache-2.0"
] | permissive | pviotti/lurch | 39486ce896ed2eab703f75e33e68e1da4cd5a950 | ec8e33fdda1bdec8672348ee1728f4f7f25f9c54 | refs/heads/master | 2021-01-19T16:46:27.710869 | 2013-09-17T13:21:50 | 2013-09-17T13:21:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 338 | r | pop.R | suppressMessages(library("VGAM"))
param_zipf = 2.0 # zipf parameter
n_files = 50 # number of files
n_req = 1000 # number of requests (downloads)
ps = dzipf(1:n_files, n_files, param_zipf)
req = sample(1:n_files, n_req, replace='TRUE', prob=ps)
#H = hist(req, breaks=1:(n_files+1)-0.5, prob=T)
#lines(1:n_files, ps)
#sum(H$density)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.