blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9c9a226156c0eec121b004aca4f926df15ce5244
|
570d4141186786df5179cc4346dd3808c1c41f26
|
/wrangle/AMP-AD/rosmap.R
|
e080d90751dcddc0259106dfcfb20e7002e3fccc
|
[
"MIT"
] |
permissive
|
ArtemSokolov/amp-ad
|
552fee92c0ec30539386745210f5ed2292931144
|
dd5038f2497698b56a09471c89bb710329d3ef42
|
refs/heads/master
| 2021-06-21T21:04:44.368314
| 2019-09-10T17:40:48
| 2019-09-10T17:40:48
| 114,150,614
| 0
| 4
|
MIT
| 2019-09-10T17:40:49
| 2017-12-13T17:39:02
|
HTML
|
UTF-8
|
R
| false
| false
| 3,643
|
r
|
rosmap.R
|
## Wrangling of ROS/MAP RNAseq data and matching clinical annotations
##
## by Artem Sokolov
suppressMessages(library( tidyverse ))
suppressMessages(library( synapseClient ))
library( stringr )
## Composes a mapping between ENSEMBL IDs and HUGO names
ens2hugo <- function()
{
edb <- EnsDb.Hsapiens.v86::EnsDb.Hsapiens.v86
tx <- ensembldb::transcripts( edb, column=c("gene_id", "gene_name") )
data_frame( HUGO = tx$gene_name, ENSEMBL = tx$gene_id ) %>% distinct
}
## Parse local directory specification
argv <- commandArgs( trailingOnly = TRUE )
if( length(argv) == 0 )
{
cat( "NOTE: No directory specified on command line. Using default.\n" )
local.dir <- "/data/AMP-AD/ROSMAP"
} else { local.dir <- argv[1] }
## Create directory if it doesn't exist
dir.create( local.dir, showWarnings=FALSE )
cat( "Wrangling ROS/MAP dataset to", local.dir, "\n" )
## Login to Synapse and download/wrangle data
cat( "Logging in to Synapse...\n" )
synapseLogin( rememberMe=TRUE )
## Read raw expression matrix
cat( "Downloading expression data...\n" )
fnX <- synGet( "syn3505720", downloadLocation = local.dir )@filePath
cat( "Loading local copy...\n" )
Xraw <- suppressMessages( read_tsv( fnX ) )
## Map ENSEMBL Gene IDs to HUGO
## Remove alternative splice forms, non-coding RNA and duplicated genes
## There are only 14 duplicates after removing non-coding transcripts
cat( "Mapping gene IDs to HUGO...\n" )
E2H <- ens2hugo()
cat( "Removing alternative splice forms, non-coding RNA and duplicate entries...\n" )
f <- function( x, pattern ) { filter( x, !grepl(pattern, HUGO) ) }
X <- Xraw %>% mutate( ENSEMBL = str_split( gene_id, "\\.", simplify=TRUE )[,1] ) %>%
inner_join( E2H, by="ENSEMBL" ) %>% filter( !grepl("\\.", HUGO) ) %>%
filter( !(HUGO %in% c("Y_RNA", "Metazoa_SRP", "Vault", "5S_rRNA")) ) %>%
f( "^MIR" ) %>% f( "^RNU" ) %>% f( "^SNOR" ) %>% f( "^U[1-9]$" ) %>%
f( "^SCARNA" ) %>% f( "^sno" ) %>% f( "^LINC" ) %>% f( "-AS[1-9]$" ) %>%
f( "^ACA[1-9]" ) %>% filter( !duplicated( HUGO ) ) %>%
select( -tracking_id, -gene_id, -ENSEMBL )
## Log-transform the data and combine the replicates
cat( "Additional processing...\n" )
flog <- function(v) {log2(v+1)}
fmed <- function(x) {x %>% as.matrix %>% apply( 1, median )}
XX <- X %>% mutate_at( vars(-HUGO), funs(flog) ) %>%
mutate( `492_120515_j` = fmed(select( ., contains("492_120515") )) ) %>%
select( -`492_120515_0`, -`492_120515_6`, -`492_120515_7` ) %>%
gather( rnaseq_id, Value, -HUGO ) %>%
mutate( rnaseq_id = str_sub( rnaseq_id, 0, -3 ) )
## Match sample IDs against individual identifiers
cat( "Matching sample and individual IDs...\n" )
fnZ <- synGet( "syn3382527", downloadLocation = local.dir )@filePath
XZ <- suppressMessages( read_csv(fnZ) ) %>% select( projid, rnaseq_id ) %>% na.omit %>%
distinct %>% inner_join( XX, ., by="rnaseq_id" )
## Match expression data up against the following clinical covariates:
## ID, PMI, AOD, CDR, Braak, BrodmannArea
cat( "Matching against clinical covariates...\n" )
fnY <- synGet( "syn3191087", downloadLocation = local.dir )@filePath
Y <- suppressWarnings( suppressMessages( read_csv(fnY) ) ) %>%
select( projid, PMI = pmi, AOD = age_death, CDR = cogdx, Braak = braaksc ) %>%
mutate( BrodmannArea = "BM9,BM46" )
## Combining everything into a common data frame
cat( "Finalizing...\n" )
XY <- inner_join( Y, XZ, by="projid" ) %>% rename( ID = projid, Barcode = rnaseq_id ) %>%
spread( HUGO, Value )
## Write out wrangled dataset to file
fnOut <- file.path( local.dir, "rosmap-wrangled.tsv.gz" )
cat( "Writing output to", fnOut, "\n" )
write_tsv( XY, fnOut )
|
278e4541a7e5eb551538ea5585c6b2385847cbb5
|
0a19c5a8ee204ddf9d6c8ac95692e551c20913b7
|
/knn_tuning_FP.R
|
e6b44237a789ab8505948e62abd92ff71a9923a9
|
[] |
no_license
|
noahholubow/airbnb_nyc
|
53f20e4177f4014d6b17ad9306e7217493e640c2
|
be69e0e0b3d2547118071a9cc529d98561f45bfc
|
refs/heads/main
| 2023-03-17T21:17:16.840135
| 2021-03-15T15:40:28
| 2021-03-15T15:40:28
| 348,025,324
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 750
|
r
|
knn_tuning_FP.R
|
# Boosted Tree tuning
# load packages
library(tidyverse)
library(tidymodels)
# set seed
set.seed(2021)
# load necessary items
load("airbnb_setup_log.rda")
# define model
knn_model <- nearest_neighbor(
mode = "regression",
neighbors = tune()) %>%
set_engine("kknn")
# setup tuning grid
knn_params <- parameters(knn_model)
# define grid
knn_grid <- grid_regular(knn_params, levels = 5) # trying out every single combination of mtry and min_n
# workflow
knn_workflow <-
workflow() %>%
add_model(knn_model) %>%
add_recipe(airbnb_recipe)
# tuning/fitting
knn_tune <- knn_workflow %>%
tune_grid(resamples = airbnb_fold, grid = knn_grid)
# write out results & workflow
save(knn_tune, knn_workflow, file = "knn_tune_log.rda")
|
c24be13e10e6346ce45850eb3d4ef2b4c48e56e3
|
e79df1c2164b29c127c2102bc4495b2384f3895e
|
/R/preview_images.R
|
7186cdb782826ea9cf990b938b3ed8bf01fc2093
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
feddelegrand7/ralger
|
1b263a3c15c3b208a96a633f30ec2433efb4aabc
|
57ebc6b07511675c23d91007e701a9722aeb86d4
|
refs/heads/master
| 2023-03-13T08:54:24.066033
| 2023-03-05T20:41:03
| 2023-03-05T20:41:03
| 241,394,878
| 162
| 18
|
NOASSERTION
| 2022-06-18T19:16:22
| 2020-02-18T15:21:00
|
R
|
UTF-8
|
R
| false
| false
| 2,027
|
r
|
preview_images.R
|
#' Scrape Images URLs
#'
#' @param link the link of the web page
#' @param askRobot logical. Should the function ask the robots.txt if we're allowed or not to scrape the web page ? Default is FALSE.
#'
#' @return Images URLs
#'
#' @examples \donttest{
#'
#' images_preview(link = "https://rstudio.com/")
#'
#' }
#'
#' @export
#' @importFrom rvest html_nodes html_attr %>%
#' @importFrom xml2 read_html
#' @importFrom robotstxt paths_allowed
#' @importFrom crayon green
#' @importFrom crayon bgRed
#' @importFrom curl has_internet
#' @importFrom utils download.file
images_preview <- function(link, askRobot = FALSE) {
if (missing(link)) {
stop("the 'link' paramater is mandatory")
}
if (!is.character(link)) {
stop("the 'link' parameter must be provided
as a character string")
}
###############
####### Ask robot related ##################################################
if (askRobot) {
if (paths_allowed(link) == TRUE) {
message(green("the robot.txt doesn't prohibit scraping this web page"))
}
else {
message(bgRed(
"WARNING: the robot.txt doesn't allow scraping this web page"
))
}
}
##########################################################################################
tryCatch(
expr = {
img_urls <- lapply(link, function(url) {
url %>%
read_html() %>%
html_nodes("img") %>%
html_attr("src")
})
return(img_urls %>% unlist())
},
error = function(cond) {
if (!has_internet()) {
message(paste0("Please check your internet connexion: ", cond))
return(NA)
} else if (grepl("current working directory", cond) || grepl("HTTP error 404", cond)) {
message(paste0("The URL doesn't seem to be a valid one: ", link))
message(paste0("Here the original error message: ", cond))
return(NA)
} else {
message(paste0("Undefined Error: ", cond))
return(NA)
}
}
)
}
|
38dcba31dec840cb36c1067e27068bf07fe92540
|
93286036db17d0a5d6f49031ff109e48ee4688d0
|
/isogram/isogram.R
|
ef96e4e4530e58447d3324528f4257933d209290
|
[
"MIT"
] |
permissive
|
stephenfeagin/exercism-r
|
378dbf2f1c89e1d0703a48cbb2ab52df37f40056
|
0ac8a5b4a0e726fa97dce2b7e9ae9ffaa77e08f0
|
refs/heads/master
| 2020-04-11T19:02:55.171832
| 2018-12-17T01:23:32
| 2018-12-17T01:23:32
| 162,020,200
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 386
|
r
|
isogram.R
|
is_isogram <- function(word) {
# Extract just the alpha characters
chars <- gsub("[^[:alpha:]]", "", tolower(word))
# Split into a vector
chars_vec <- strsplit(chars, "")[[1]]
# If the length of the original vector is equal to the length of the vector
# with duplicates removed, then the word is an isogram
length(chars_vec) == length(unique(chars_vec))
}
|
0ca4901e5e7637444c077a5206a61709955309a0
|
187e9cc9d5ef51c4024ff20418416cdf8883cece
|
/R/probAffectedRelative.R
|
2a0a8f0ac9c1951d21469ed598c8f7f19f52fd20
|
[] |
no_license
|
DudbridgeLab/familialdisease
|
adc67735415b6c5ba2d3d5da78fc88e4c2353524
|
7a4dadcb2d8112c2af01ce89607eca33e0a6e65e
|
refs/heads/master
| 2021-01-02T08:59:29.244137
| 2018-07-17T19:42:47
| 2018-07-17T19:42:47
| 99,115,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,801
|
r
|
probAffectedRelative.R
|
#' Probability of a relative being affected with familial disease
#'
#' Given a series of pedigrees of different sizes, each with at least r affected relatives of the proband,
#' probAffectedRelative estimates the probability of a relative being affected with familial disease,
#' where this probability is an average over all observed relationships to probands.
#' This probability can be used at the pf parameter in the probFamilial function.
#'
#' Parameter r accounts for ascertainment of pedigrees that are highly likely to segregate familial disease.
#'
#' Each pedigree contributes to the likelihood the binomial probability of m successes in k trials,
#' conditional on at least r successes. Estimation is then by maximum likelihood.
#'
#' @examples
#' # Familial nonmedullary thyroid cancer, table 2 in Charkes 2006.
#' m=c(2,2,2,4,2,2,2)
#' k=c(9,9,10,7,9,14,7)
#' probAffectedRelative(m,k,2)
#' @references Dudbridge F, Brown SJ, Ward L, Wilson SG, Walsh JP.
#' How many cases of disease in a pedigree imply familial disease?
#' Submitted.
#' @references Charkes ND (2006)
#' On the Prevalence of Familial Nonmedullary Thyroid Cancer in Multiply Affected Kindreds.
#' Thryoid 16:181-186
#'
#' @param m Vector in which each element corresponds to a pedigree and is the number of affected relatives of the proband.
#' @param k Vector in which element corresponds to a pedigree and is the number of relatives of the proband with known affection status.
#' @param r Minimum number of affected relatives in each pedigree.
#'
#' @return Probability of a relative being affected with familial disease.
#' @export
probAffectedRelative <- function(m,k,r) {
llhd=function(p) {-sum(log(dbinom(m,k,p)/(1-pbinom(r-1,k,p))))}
pf=optimise(llhd,c(0,1))$min
pf
}
|
8b0ae95dab69baa360ba2140c8f3dce0dea6ab44
|
556fbe5b5bfec4a57f03d70656132ad36e4703b7
|
/raw_scripts/DimExploreRel.R
|
e3d9a9a6f87974b72fa6aaf10414e3577fa0a638
|
[] |
no_license
|
mastoffel/scent
|
1cf03da1b41f4a161421d5e98a884877da2ee667
|
cbc1beca6a455f3f2d23ba7c51dbe9e4aa706e61
|
refs/heads/master
| 2021-01-20T05:33:37.163758
| 2015-05-18T16:21:25
| 2015-05-18T16:21:25
| 24,264,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,938
|
r
|
DimExploreRel.R
|
DimExploreRel <- function(y, x, method = "fa", distance = "bray", subset = "all") {
## function to explore different dimension reduction / ordination methods for similarity matrix models
##
## libraries
library(psych)
library("ggplot2")
library("grid")
library("vegan")
library("MASS") #required for vegan
library("HDMD")
## functions
source("PCADiff.R")
source("SubsetAll.R")
source("MinMod.R")
#source("ResultsRelatedness.R")
#source("resultsHet.R")
#source("multiplot.R")
## predefine
x <- as.data.frame(x)
AbundMat <- x
results <- vector()
## subset
if (subset == "mums") {ind <- 1:41}
if (subset == "pups") {ind <- 42:82}
if (subset == "all") {ind <- 1:82}
for (numDim in c(1:10)) {
## define function fo the chosen ordination method with increasing factor number
if (method == "pcoa") {
ordiscores <- function(AbundMat,numDim){
score <- as.data.frame(cmdscale(vegdist(AbundMat, method = distance), k = numDim))
}
} else if (method == "fa") {
ordiscores <- function(AbundMat, numDim){
model <- factor.pa.ginv(AbundMat, nfactors = numDim, prerotate=T,
rotate = "varimax", scores = T, m=4)
score <- as.data.frame(model$scores)
}
} else if (method == "pca") {
ordiscores <- function(AbundMat, numDim){
model <- prcomp(AbundMat)
score <- as.data.frame(scores(model, choices = 1:numDim))
}
} else if (method == "mds") {
ordiscores <- function(AbundMat, numDim){
model <- metaMDS(AbundMat, distance = "bray", k = numDim, trymax = 50)
score <- as.data.frame(scores(model))
}
}
# get factor scores
tempscores <- ordiscores(AbundMat, numDim)
alldf <- PCADiff(y, tempscores, df = F)
## get chosen subset
alldf <- alldf[ind, ]
bestmod <- MinMod(alldf)[[2]]
devExpl <- (bestmod$null.deviance - bestmod$deviance)/bestmod$null.deviance
results[numDim] <- devExpl
}
results
}
|
404589c0377a92bbfb22b924e0663412667b2aef
|
9474632c610ac788f0c6338ada158ef54a58ddb8
|
/R_history_05_03_pm.R
|
981f8ad151d434caadf7e9d4d936f836116ce192
|
[] |
no_license
|
davekk/summary_r_scripts
|
62aa6aed3820733fb50c686ed91f88d3e807797b
|
c00a8092024b1c30a4084f01af0090581f428c85
|
refs/heads/master
| 2020-03-22T20:12:21.538896
| 2019-07-10T14:24:05
| 2019-07-10T14:24:05
| 140,581,071
| 0
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,511
|
r
|
R_history_05_03_pm.R
|
## IMPORTING DATA FROM FILES
# main function is read.table()
getwd() # identical to Bash's pwd
getwd
getwd()
getwd("~")
getwd("/home/jbde/")
setwd("~")
getwd()
setwd("Trainings/Biostats_and_R_bixcop_github/module2_R_biostats/")
setwd("~/Trainings/Biostats_and_R_bixcop_github/module2_R_biostats")
# LOADING THE DATA
read.table("fev_dataset.txt")
read.table("fev_dataset.txt") -> fev_dat
str(fev_dat)
read.table("fev_dataset.txt", header = TRUE) -> fev_dat # indicating the presence of a header row in the file
str(fev_dat)
range(fev_dat$Age) # just to get the range of ages in the dataset
range(fev_dat$Gender)
range(fev_dat$Smoke)
summary(fev_dat)
# transform the columns Gender and Smoke into factors, with levels {boy,girl} and {smoking, non-smoking}
# count the number of observations with Gender == 1
sum(fev_dat$Gender)
sum(fev_dat$Gender == 1) # same thing
nrow(fev_dat)
sum(fev_dat$Smoke)
fev_dat$Gender
?as.factor
factor(fev_dat$Gender)
tempfactor = factor(fev_dat$Gender)
levels(tempfactor)
levels(tempfactor) <- 'boy'
levels(tempfactor) <- c('girl', 'boy') # "0" becomes "girl" and "1" becomes "boy"
tempfactor
# modifying the data frame:
fev_dat$Gender = tempfactor
str(fev_dat)
tempfactor=factor(fev_dat$Smoke,levels = c("non-smoking","smoking"))
tempfactor
# the above is WRONG!
# instead, the correct way is a two-step thing:
tempfactor=factor(fev_dat$Smoke)
levels(tempfactor)
head(tempfactor)
sum(is.na(tempfactor))
# happy!
levels(tempfactor)
# cahnge the levels carefully, respecting the order!
levels(tempfactor) <- c('non-smoking','smoking')
head(tempfactor)
# finally, change the original dataset:
fev_dat$Smoke = tempfactor
str(fev_dat)
summary(fev_dat$Gender)
summary(fev_dat) # column-wise summary
# a new graphical function: scatterplots of all pairs of variables
pairs(fev_dat)
# select the numerical columns before calling pairs():
pairs(fev_dat[1:3])
pairs(fev_dat[-c(4,5)]) # same
pairs(fev_dat[c("Age","Ht","FEV")]) # same
# draw the histograms of heights of boys (blue) and girls (red) superimposed
# first have the boxplots of boys and girls, properly labeled
boxplot(Ht ~ Gender,data=fev_dat)
boxplot(Ht ~ Gender,data=fev_dat, xlab = "Gender", ylab = "Height (ft)", border = c("pink","lightblue")) # adding some ornament
?par
T
F
# wonderful short forms!!!
height_boys = fev_dat[fev_dat$Gender=="boys","Ht"]
length(height_boys)
height_boys = fev_dat[fev_dat$Gender=="boy","Ht"]
length(height_boys)
height_girls = fev_dat[fev_dat$Gender=="girl","Ht"]
hist(height_girls, border="pink")
hist(height_girls, border="pink", main="Histogram of heights", xlab = "Height")
hist(height_boys, border="lightblue", main="Histogram of heights", xlab = "Height")
# problem: we want to superimpose another graph
# by default, hist() being a first-class citizen of the world of graphical functions, it *erases* the previous plot
# we have to turn this behaviour down
par(new=T) # so that the next plot does not erase the existing one
hist(height_girls, border="pink", main="", xlab = "", ylab = "") # labels were already there
# we have no choice but to specify what are the axes limits we want
hist(height_boys, border="lightblue", main="Histogram of heights", xlab = "Height", xlim=c(45,75), ylim=c(0,60)) # xlim and ylim to set limits for the axes
par(new=T) # so that the next plot does not erase the existing one
hist(height_girls, border="pink", main="", xlab = "", ylab = "", xlim=c(45,75), ylim=c(0,60)) # labels were already there
# we can remove the axes altogether when drawing:
hist(height_girls, border="pink", main="", xlab = "", ylab = "", xlim=c(45,75), ylim=c(0,60), xaxt="n",yaxt="n") # labels were already there
# now we're finally good to go in three steps:
hist(height_boys, border="lightblue", main="Histogram of heights", xlab = "Height", xlim=c(45,75), ylim=c(0,60)) # xlim and ylim to set limits for the axes
par(new=T) # so that the next plot does not erase the existing one
hist(height_girls, border="pink", main="", xlab = "", ylab = "", xlim=c(45,75), ylim=c(0,60), xaxt="n",yaxt="n") # labels were already there
# exercise: add a legend to this plot to explain the colours
# other way exploiting the "hidden" add parameter:
# FIRST STEP
hist(height_boys, border="blue", main="Histogram of heights", xlab = "Height", xlim=c(45,75), ylim=c(0,60)) # xlim and ylim to set limits for the axes
# SECOND AND LAST STEP
hist(height_girls, border="red", main="", xlab = "", ylab = "", xlim=c(45,75), ylim=c(0,60), xaxt="n",yaxt="n", add=T) # labels were already there
?legend
savehistory("R_history_05_02_pm.R")
str(mtcars)
cu_in_to_L = 0.0163871
hist(mtcars[mtcars$cyl==4,"mpg"]) -> histobj # saving the histogram as an R object
str(histobj)
plot(histobj,yaxp=c(0,2,2), main='Histogram of some consumption values',xlab="Consumption (mpg)")
plot(histobj,yaxp=c(0,1,2), main='Histogram of some consumption values',xlab="Consumption (mpg)")
plot(histobj,yaxp=c(0,1,1), main='Histogram of some consumption values',xlab="Consumption (mpg)")
plot(histobj,yaxp=c(0,2,2), main='Histogram of some consumption values',xlab="Consumption (mpg)") # correct solution for exercise 1
?axes
?axis
?axis # for complete control on your axes (use after issuing a plot with xaxt="n" and/or yaxt="n")
ls()
hist(height_boys, border="blue", main="Histogram of heights", xlab = "Height", xlim=c(45,75), ylim=c(0,60)) # xlim and ylim to set limits for the axes
hist(height_girls, border="red", main="", xlab = "", ylab = "", xlim=c(45,75), ylim=c(0,60), xaxt="n",yaxt="n", add=T) # labels were already there
?legend
legend(x='topright')
legend(xx='topright', legend = "boy", "girl")
legend(x='topright', legend = c("boys", "girls"), fill=c("blue","red"))
legend(x='topright', legend = c("boys", "girls"), fill=c("blue","red"))
legend(x='topright', legend = c("boys", "girls"), fill=c("blue","red"))
legend(x='topright', legend = c("boys", "girls"), fill=c("blue","red"))
hist(height_boys, border="blue", main="Histogram of heights", xlab = "Height (ft)", xlim=c(45,75), ylim=c(0,60)) # xlim and ylim to set limits for the axes
hist(height_girls, border="red", main="", xlab = "", ylab = "", xlim=c(45,75), ylim=c(0,60), xaxt="n",yaxt="n", add=T) # labels were already there
legend(x='topright', legend = c("boys", "girls"), fill=c("blue","red"), border=F)
hist(height_boys, border="blue", main="Histogram of heights", xlab = "Height (ft)", xlim=c(45,75), ylim=c(0,60)) # xlim and ylim to set limits for the axes
hist(height_girls, border="red", main="", xlab = "", ylab = "", xlim=c(45,75), ylim=c(0,60), xaxt="n",yaxt="n", add=T) # labels were already there
legend(x='topright', legend = c("boys", "girls"), fill=c("blue","red"), border=c("blue","red"))
legend(x='topleft', legend = c("boys", "girls"), fill=c("blue","red"), border=c("blue","red"))
legend(x=60,y=50, legend = c("boys", "girls"), fill=c("blue","red"), border=c("blue","red")) # we can use numerical values for the position
hist(height_boys, border="blue", main="Histogram of heights", xlab = "Height (ft)", xlim=c(45,75), ylim=c(0,60)) # xlim and ylim to set limits for the axes
hist(height_girls, border="red", main="", xlab = "", ylab = "", xlim=c(45,75), ylim=c(0,60), xaxt="n",yaxt="n", add=T) # labels were already there
legend(x='topright', legend = c("boys", "girls"), fill=c("blue","red"), border=c("blue","red"),bty='n')
hist(height_boys, border="blue", main="Histogram of heights", xlab = "Height (ft)", xlim=c(45,75), ylim=c(0,60)) # xlim and ylim to set limits for the axes
hist(height_girls, border="red", main="", xlab = "", ylab = "", xlim=c(45,75), ylim=c(0,60), xaxt="n",yaxt="n", add=T) # labels were already there
legend(x=70, y=60, legend = c("boys", "girls"), fill=c("blue","red"), border=c("blue","red"),bty='n') # higher up
hist(height_boys, border="blue", main="Histogram of heights", xlab = "Height (ft)", xlim=c(45,75), ylim=c(0,60)) # xlim and ylim to set limits for the axes
hist(height_girls, border="red", main="", xlab = "", ylab = "", xlim=c(45,75), ylim=c(0,60), xaxt="n",yaxt="n", add=T) # labels were already there
legend(x=65, y=66, legend = c("boys", "girls"), fill=c("blue","red"), border=c("blue","red"),bty='n') # higher up
hist(height_boys, border="blue", main="Histogram of heights", xlab = "Height (ft)", xlim=c(45,75), ylim=c(0,60)) # xlim and ylim to set limits for the axes
hist(height_girls, border="red", main="", xlab = "", ylab = "", xlim=c(45,75), ylim=c(0,60), xaxt="n",yaxt="n", add=T) # labels were already there
legend(x=69, y=70, legend = c("boys", "girls"), fill=c("blue","red"), border=c("blue","red"),bty='n') # higher up
?read.csv
## WORKING WITH ANOTHER DATASET
dat = read.table("tutorial_data.csv", sep=";") # failed to specify the decimal point
str(dat)
dat = read.table("tutorial_data.csv", header = T, sep = ";") # failed to specify the decimal point
str(dat)
dat = read.table("tutorial_data.csv", header = T, sep = ";", dec = ",") # correct decimal point
str(dat)
23,3
# we can specify the classes for the various columns right at import
# checking that in educ I have only a few values:
range(dat$educ)
summary(dat$educ) # not very informative
table(dat$educ)
table(dat$BMI) # table() also works with numeric values
table(dat$educ)
str(dat)
table(dat$SEX)
dat$SEX = factor(dat$SEX)
str(dat)
# specify the classes for the different columns:
dat = read.table("tutorial_data.csv", header = T, sep = ";", dec = ",", colClasses = c("integer","factor","integer","numeric","logical","numeric","factor","integer",rep("logical",2)))
dat = read.table("tutorial_data.csv", header = T, sep = ";", dec = ",", colClasses = c("integer","factor","integer","numeric","logical","numeric","factor","integer",rep("logical",2))) # a logical column needs to see 'T's and 'F's only
dat = read.table("tutorial_data.csv", header = T, sep = ";", dec = ",", colClasses = c("integer","factor","integer",rep(c("numeric","factor"),2),"integer",rep("factor",2)))
# let's play a bit with rep()
rep(3,5) # replicate the value 3 five times
rep(1:3,5)
# a vector is always FLAT, no nesting:
c(c(c(1,2),3),c(6:8)) # results in a flat vector
rep(c("jb",'helen',"ermias"),4)
# pay attention at the difference:
rep(1:3,5)
rep(1:3,each=5)
str(dat)
summary(dat)
# a dataframe has colnames and rownames
colnames(dat)
head(rownames(dat),n=20)
tail(rownames(dat),n=20)
rownames(dat)[2]
rownames(dat)[2] = "1" # row names MUST be unique
rownames(dat) = dat$RANDID # using the random IDs as row names
sum(table(dat$RANDID)) # trying to check that all IDs are unique
vec = c(3,3,3,4)
sum(table(vec)) # trying to check that all IDs are unique
sum(c("jb",'helen',"ermias")) # fails to convert to numeric
sum(c("1",'56',"2")) # fails to convert to numeric
sum(as.numeric(c("1",'56',"2"))) # doesn't even perform the automatic type transformation
mytable = table(dat$RANDID)
str(mytable)
str(table(vec)) # trying to check that all IDs are unique
length(table(dat$RANDID)) == nrow(dat) # correct test to check that all values in dat$RANDID are unique
?unique
unique(dat$RANDID) == nrow(dat)
length(unique(dat$RANDID)) == nrow(dat)
# beware when comparing objects
unique(dat$RANDID) == dat$RANDID
unique(dat$RANDID) == dat$RANDID # comparing two vectors, each one being of length 3109
?identical
identical(3,4)
identical(unique(dat$RANDID),dat$RANDID) # testing the equality between two objects
header(dat)
head(dat)
head(dat)
# now removing the column RANDID from the dataset:
newdat = dat[-1] # first solution
str(newdat)
newdat = dat[,-1] # first solution
str(newdat)
newdat = dat[c(2:10)] # third solution
str(newdat)
dat[1] = NULL # fourth solution
str(dat)
dim(dat)
f = dat
# give me the identifiers of all people stricly above 65 years of age
head(dat$AGE)
head(dat$AGE > 65)
length(dat$AGE > 65)
length(dat$AGE)
dat$AGE[dat$AGE>65] # gives a vector of all the ages of people above 65
# 929 people, but what are their IDs?
row_filter = dat$AGE > 65
length(row_filter)
5 > 49
dat[row_filter==TRUE]
dat[ ,row_filter==TRUE]
dat[row_filter==TRUE,]
dat[row_filter,]
rownames(dat[row_filter,]))
rownames(dat[row_filter,])
rownames(dat[row_filter,]) # these are all the IDs of people above 65.
rownames(dat[row_filter,]) # these are all the IDs of people above 65.
# SUBSETTING with subset() on a data frame
subset(dat, AGE>65) -> smaller_df # unprotected tokens in the formula refer to variable names in the said data frame
is.data.frame(smaller_df) # YES
nrow(smaller_df)
# we can have fun combining logical tests
identical(rownames(smaller_df),rownames(dat[row_filter,]))
# logical AND is '&'
# logical OR is '|'
subset(dat, AGE>65 & CURSMOKE==1) -> smaller_smaller_df # people older than 65 AND who smoke
dim(smaller_smaller_df)
dim(smaller_df)
# a logical OR would result in a larger dataframe than smaller_df:
dim(subset(dat, AGE>65 | CURSMOKE==1))
savehistory('R_history_05_03_am.R')
str(dat)
# plot BMI (y axis) versus AGE (x axis)
# see how plot() works in its simplest form:
plot(x = c(1,3), y = c(5,8))
plot(x = c(1,3), y = c(5,8))
plot(x = c(1,3,5), y = c(5,8,-3))
# by default, plot() plots a scatterplot of individual (x,y) points
# to plot lines, use lines()
lines(x = c(1,3,5), y = c(5,8,-3))
# lines() ADDS to the current plot
# one can indicate a "type of graph" to plot()
?plot
plot(x = c(1,3,5), y = c(5,8,-3), type='l') # to plot lines instead of points
plot(x = c(1,3,5), y = c(5,8,-3), type='b') # to plot lines AND points
plot(x = c(1,3,5), y = c(5,8,-3), type='h')
abline(h=0,col="darkgreen")
# important option for plot: the plotting character, pch
plot(x = c(1,3,5), y = c(5,8,-3)) # default plotting character: unfilled circle
?par
# points draws on top of an existing graph:
points(x=2,y=2,col='red')
# draw many points in green on the straight line from (3,0) to (4,4)
seq(3,4,by=0.04)
seq(from=3, to=4, by=0.04)
?seq
seq(from=3, to=4, length.out = 26)
seq(from=3, to=4, length.out = 20)
points(x=seq(from=3,to=4, length.out=20), y=seq(from=0, to=4, length.out=20), col="green")
#using different values for pch:
points(x=seq(from=3,to=4, length.out=20), y=seq(from=0, to=4, length.out=20)+2, col="blue",pch=2)
points(x=seq(from=3,to=4, length.out=20), y=seq(from=0, to=4, length.out=20)-2, col="brown",pch=2,cex=2)
# cex is an expansion factor in graphics
lines(x=c(1,5), y=c(-2,6), lwd=3) # a thick line
abline(v=2.5, col="darkpink", lwd=5)
abline(v=2.5, col="red", lwd=5)
# lty to control the type of line
abline(h=2, col="red", lty=2)
abline(h=3, col="red", lty=3)
abline(h=4, col="red", lty=4)
# BMI as a function of AGE
plot(x=dat$AGE,y=dat$BMI) # one way to do it...
plot(x=dat$AGE,y=dat$BMI,pch=20) # one way to do it...
plot(x=dat$AGE,y=dat$BMI,pch=20, cex=0.5) # even smaller
# we want to give different colours to different levels of education (variable dat$educ)
plot(x=dat$AGE,y=dat$BMI,pch=20, cex=0.5, col=dat$educ)
table(dat$educ)
colors()
head(colors())
palette()
plot(x=dat$AGE,y=dat$BMI,pch=20, col=dat$educ)
plot(x=dat$AGE,y=dat$BMI,pch=20, col=dat$educ+1)
plot(x=dat$AGE,y=dat$BMI,pch=20, col=as.integer(dat$educ))
plot(x=dat$AGE,y=dat$BMI,pch=20, col=as.integer(dat$educ)+1)
plot(x=dat$AGE,y=dat$BMI,pch=c(1:3), col=as.integer(dat$educ)+1)
plot(x=dat$AGE,y=dat$BMI,pch=CURSMOKE, col=as.integer(dat$educ)+1)
plot(x=dat$AGE,y=dat$BMI,pch=dat$CURSMOKE, col=as.integer(dat$educ)+1)
plot(x=dat$AGE,y=dat$BMI,pch=as.integer(dat$CURSMOKE), col=as.integer(dat$educ)+1)
plot(x=dat$AGE,y=dat$BMI,pch=as.integer(dat$educ)) # educ is used to determine the plotting character
# plot the BMI distribution split by education level (boxplots)
str(dat)
boxplot(BMI~educ, data=dat)
# add some text (text() function) in the form 'n=...' on top of each boxplot
# another way to represent the size of the samples is to use the width of each boxplot:
?boxplot
boxplot(BMI~educ, data=dat, varwidth=T)
#counting first:
table(dat$educ)
?text
text(x=1:4, y=50, labels = c("n=1273","n=962","n=542","n=332"), col="red")
# text is a bit too big
boxplot(BMI~educ, data=dat, varwidth=T)
text(x=1:4, y=50, labels = c("n=1273","n=962","n=542","n=332"), col="red",pos=3,cex=0.8)
text(x=1:4, y=50, labels = c("n=1273","n=962","n=542","n=332"), col="red",pos=3,offset=-0.3,cex=0.8)
boxplot(BMI~educ, data=dat, varwidth=T)
text(x=1:4, y=50, labels = c("n=1273","n=962","n=542","n=332"), col="red",pos=3,offset=-0.1,cex=0.8)
paste("I am", c("Helen","Ermias","Bernice","Eric","Joseph"))
paste("I am", c("Helen","Ermias","Bernice","Eric","Joseph"), "and I'm happy.")
# sep modifies the separator used by paste()
paste("I am", c("Helen","Ermias","Bernice","Eric","Joseph"), "and I'm happy.", sep="_")
# paste0() is paste() with sep=""
paste0("I am", c("Helen","Ermias","Bernice","Eric","Joseph"), "and I'm happy.")
paste0("n=",c(1,3,434,535)) # paste() and paste0() perform automatic type conversion to character
# combining all things in a smart way:
boxplot(BMI~educ, data=dat, varwidth=T)
text(x=1:4, y=50, labels = paste0("n=",table(dat$educ)), col="red",pos=3,offset=-0.1,cex=0.8)
# SOME DESCRIPTIVE STATS
# mode of a sample:
length(table(dat$BMI))
head(table(dat$BMI))
max(table(dat$BMI))
?max
table(dat$BMI) -> mytable
mytable[mytable==14]
mytable[mytable==13]
mytable[mytable==12]
hist(dat$BMI)
hist(dat$BMI, breaks=11) # default
hist(dat$BMI, breaks=50) # increasing the number of bins
plot(dat$BMI) # plotting a mere vector
plot(dat$BMI, pch=20) # plotting a mere vector
plot(dat$BMI, pch=20) # plotting a mere vector
# exercise: calculate "MANUALLY" the variance on the set of all BMI observations, and then on the observations for each of the educ classes
savehistory("R_history_05_03_pm.R")
|
f0722d2d7c7238f1eb45fdf6447977478a9b1bfe
|
f549e55767e873c144811ec2c7fd9e7db93717fa
|
/run_analysis.R
|
06b34d50523353eddd68ba82d975196bf82dca65
|
[] |
no_license
|
bammoss/tidydata
|
1c5127115f09d110a1ee04e3c2abd19f1b8c454f
|
a66c45e8ea0672ff0637b1b4fa12da026abfb213
|
refs/heads/master
| 2021-01-10T20:59:40.900493
| 2015-06-21T21:44:56
| 2015-06-21T21:44:56
| 37,824,369
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,867
|
r
|
run_analysis.R
|
run_analysis<-function(){
#This is a major note, I changed the title in file so that there were no spaces
#The next 7 lines are for extracting the relevant information from the file.
subject_test<-read.table("UCIHARDataset/test/subject_test.txt")
x_test<-read.table("UCIHARDataset/test/X_test.txt")
y_test<-read.table("UCIHARDataset/test/y_test.txt")
subject_train<-read.table("UCIHARDataset/train/subject_train.txt")
x_train<-read.table("UCIHARDataset/train/X_train.txt")
y_train<-read.table("UCIHARDataset/train/y_train.txt")
feature<-read.table("UCIHARDataset/features.txt")
#The next 3 lines are for renaming some columns
colnames(subject_train)<-"subject_number"
colnames(y_train)<-"activity"
colnames(y_test)<-"activity"
# The for loop below is to rename all the columns for the training set
for (i in 1:561){
colnames(x_train)[i]<-as.character(feature[i,2])
}
#The next two loops change out the numbers for the corresponding activity
for (i in 1:7352){
if(y_train$activity[i]==1){
y_train$activity[i]<-"WALKING"
} else if(y_train$activity[i]==2){
y_train$activity[i]<-"WALKING_UPSTAIRS"
} else if(y_train$activity[i]==3){
y_train$activity[i]<-"WALKING_DOWNSTAIRS"
} else if(y_train$activity[i]==4){
y_train$activity[i]<-"SITTING"
} else if(y_train$activity[i]==5){
y_train$activity[i]<-"STANDING"
} else{
y_train$activity[i]<-"LAYING"
}
}
for (i in 1:2947){
if(y_test$activity[i]==1){
y_test$activity[i]<-"WALKING"
} else if(y_test$activity[i]==2){
y_test$activity[i]<-"WALKING_UPSTAIRS"
} else if(y_test$activity[i]==3){
y_test$activity[i]<-"WALKING_DOWNSTAIRS"
} else if(y_test$activity[i]==4){
y_test$activity[i]<-"SITTING"
} else if(y_test$activity[i]==5){
y_test$activity[i]<-"STANDING"
} else{
y_test$activity[i]<-"LAYING"
}
}
#Now we make two sets that will be combined.
training_set<-cbind(subject_train,y_train,x_train)
test_set<-cbind(subject_test,y_test,x_test)
#Before we combine the training set and test set, we must having matching column names
for (i in 1:563){
colnames(test_set)[i]<-colnames(training_set)[i]
}
#Now we combine the training set and test set by adding the rows together
data_set<-rbind(training_set,test_set)
#Below reduces the number of columns to the ones we are interested in.
data_set<-data_set[,c(1:8,43:48,83:88,123:128,163:168,203:204,216:217,229:230,242:243,255:256,268:273,347:352,426:431,505:506,531:532,544:545)]
#The next 4 lines summarizes columns 3 through 66 grouping by the 1st two columns.
cat<-group_by(data_set,subject_number,activity)
cols<-names(cat)[-(1:2)]
dots <- sapply(cols ,function(x) substitute(mean(x), list(x=as.name(x))))
final_data<-do.call(summarise, c(list(.data=cat), dots))
#The last line writes the table into your working directory and returns the data.
write.table(final_data,"coursera_project.txt", sep=" ", row.names=FALSE)
return(final_data)
}
|
0ed632786641f855ff560b27b28edf364fc3918c
|
4467cfb9f142f3b709ef8f1245f740fc2924280b
|
/filter_contig_by_length.R
|
e00d477419e89bf9a5281683a5923e4f970a86de
|
[
"MIT"
] |
permissive
|
laninsky/genome-scripts
|
550c0883296b46fe7267a3a8412a2b2107dbc241
|
a5942d4406fe0ab9a5599f10955cd6b471b4ca2d
|
refs/heads/master
| 2020-05-21T19:22:43.811028
| 2019-08-12T04:17:29
| 2019-08-12T04:17:29
| 65,322,730
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,259
|
r
|
filter_contig_by_length.R
|
filter_contig_by_length <- function(file_to_filter,contig_length) {
if(missing(file_to_filter) | missing(contig_length)) {
print(paste("This script needs you to define the location and name of your fasta file and the minimum contig length you want to retain in the outfile: length_filtered_genome.fasta"))
print(paste("Example of calling filter_contig_by_length:"))
cat('filter_contig_by_length("/mnt/anolis/Braker/scrubbed_genome.fasta.masked",1000)\n\n')
stop("Please fill in the missing info in your function call")
}
#Setting the variable to record sequence (we are going to write out all the sequence on one line for each scaffold, rather than having linebreaks in teh sequence)
sequencerec <- NULL
con <- file(file_to_filter)
open(con)
while ( TRUE ) {
line = readLines(con, n = 1)
#If the line is longer than length 0 (i.e. empty carriage returns at the end of the file)
if ( length(line) == 0 ) {
break
}
#If there is a ">" in the line, and we've previously recorded sequence (i.e. we won't do this for the first line in the file)
if (grepl(">",line,fixed=TRUE) && (!(is.null(sequencerec)))) {
#Getting the length of the sequence
sequencelength <- nchar(sequencerec)
#If the sequence is longer than our defined length, finding the non-gapped length, and writing out the scaffold name, length and non-gapped length to "name_lengths_Ns.txt". Writing out the scaffold name and sequence to "scrubbed_genome.fasta"
if(sequencelength>=contig_length) {
sequencelengthN <- nchar(gsub("N","",sequencerec,fixed=TRUE))
lengthdist <- t(as.matrix(c(seqname,sequencelength,sequencelengthN)))
write.table(lengthdist,"name_lengths_Ns_filtered.txt",append=TRUE,quote=FALSE,row.names=FALSE,col.names=FALSE)
write.table(seqname,"length_filtered_genome.fasta",append=TRUE,quote=FALSE,row.names=FALSE,col.names=FALSE)
write.table(sequencerec,"length_filtered_genome.fasta",append=TRUE,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
#resetting the sequencerec value and changing the sequencename to the line we just read in
sequencerec <- NULL
seqname <- line
} else {
#or else, if we didn't grep ">", appending sequence to the previous sequence
if (!(grepl(">",line,fixed=TRUE))) {
sequencerec <- paste(sequencerec,line,sep="")
} else {
#This condition (sequence is null, we've grepped ">"), should only be true for the first line
seqname <- line
}
}
}
# The following code writes out the last sequence in the file if it is over 100bp
sequencelength <- nchar(sequencerec)
if(sequencelength>=contig_length) {
sequencelengthN <- nchar(gsub("N","",sequencerec,fixed=TRUE))
lengthdist <- t(as.matrix(c(seqname,sequencelength,sequencelengthN)))
write.table(lengthdist,"name_lengths_Ns_filtered.txt",append=TRUE,quote=FALSE,row.names=FALSE,col.names=FALSE)
write.table(seqname,"length_filtered_genome.fasta",append=TRUE,quote=FALSE,row.names=FALSE,col.names=FALSE)
write.table(sequencerec,"length_filtered_genome.fasta",append=TRUE,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
#Closing the connection to the file
close(con)
}
|
05f0e1a310800c1f7bf79421681354108ac92846
|
7dafb67a629570e6e3ba72c5ae24d441fca5e208
|
/library_prep/01_simulate_library_prep.R
|
bb53a375c94a3f5daabb9de90d05ffafe80af987
|
[] |
no_license
|
ksamuk/dpse_gbs
|
09f97fb1e10c163cc9c46c924a2e82fe6b9f3e0d
|
c96b7d86fb0faae84ee8352a49b2320e81b72ffe
|
refs/heads/master
| 2020-12-25T15:07:57.184459
| 2017-05-24T14:08:13
| 2017-05-24T14:08:13
| 67,240,974
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,339
|
r
|
01_simulate_library_prep.R
|
# simulate a GBS library prep using the Drosophila pseudoobscura reference genome
# examine resultant coverage/fragment distribution for different restriction enzymes
# kms sept 2016
############################################################
# libraries
############################################################
# install/load packages
#install.packages("SimRAD")
#source("https://bioconductor.org/biocLite.R")
#biocLite("ShortRead")
library("SimRAD")
library("dplyr")
library("tidyr")
library("ggplot2")
############################################################
# raw data
############################################################
# download reference genome
genome_url <- "ftp://ftp.flybase.net/genomes/Drosophila_pseudoobscura/dpse_r3.04_FB2016_02/fasta/dpse-all-chromosome-r3.04.fasta.gz"
dir.create("data")
if(!file.exists("data/dpse-all-chromosome-r3.04.fasta.gz")){
download.file(genome_url, "data/dpse-all-chromosome-r3.04.fasta.gz")
}
# create list of fasta sequences from reference genome
ref_genome <- ref.DNAseq("data/dpse-all-chromosome-r3.04.fasta.gz", subselect.contigs = TRUE, prop.contigs = 1.0)
# restriction enzyme sequences
# enzyme cut sites from:
# https://en.wikipedia.org/wiki/List_of_restriction_enzyme_cutting_sites
# enzyme cut site variable names are coded as follows:
# 5'--cs_5p1 cs_3p1--3'
# 3'--cs_5p2 cs_3p2-—5'
# Csp6I
#5' ---G TAC--- 3'
#3' ---CAT G--- 5'
Csp6I <- list(cs_5p1 = "G", cs_3p1 = "TAC", cs_5p2 = "G", cs_3p2 = "CAT", name = "Csp6I")
# MspI
# 5' ---C CGG--- 3'
# 3' ---GGC C--- 5'
MspI <- list(cs_5p1 = "C", cs_3p1 = "CGG", cs_5p2 = "C", cs_3p2 = "GGC", name = "MspI")
# EcoRI
# 5' ---G AATTC--- 3'
# 3' ---CTTAA G--- 5'
EcoRI <- list(cs_5p1 = "G", cs_3p1 = "AATTC", cs_5p2 = "G", cs_3p2 = "CTTA", name = "EcoRI")
############################################################
# Simulating distribution of reads per inidivudal
############################################################
# function for full library prep simulation
reference_genome = ref_genome
enzyme_cut1 = Csp6I
size_range = c(300, 500)
num_ind = 60
expected_reads = 100000000
expected_sequencing_variance = 0.4
read_cutoff = 10
ind_cutoff = 0.8
n_top_cuts = 1000
enzyme_cut2 = NULL
rr_library_prep <- function(reference_genome, enzyme_cut1, enzyme_cut2 = NULL, size_range = c(300, 500), num_ind = 60,
expected_reads = 100000000, expected_sequencing_variance = 0.4,
read_cutoff = 10, ind_cutoff = 0.8, n_top_cuts = 1000){
# perform in silico restriction digest
digest <- insilico.digest(reference_genome, enzyme_cut1[1], enzyme_cut1[2], verbose = FALSE)
# perform second restriction digest (if specified)
if (!is.null(enzyme_cut2)){
digest <- insilico.digest(digest, enzyme_cut2[1], enzyme_cut2[2], verbose = FALSE)
enzyme_name <- paste0(enzyme_cut1$name, "_", enzyme_cut2$name)
} else{
enzyme_name <- enzyme_cut1$name
}
# size selection of fragments
size_sel <- size.select(digest, size_range[1], size_range[2], verbose = FALSE, graph = FALSE)
# simulate variable sequencing of each fragment
# assumes an approximate exponential distribution of sequencing depth per fragment
# tbd : specify fragment sequecning variance function
all_reads <- rexp(length(size_sel), 1/(expected_reads/length(size_sel)))
# init output df list
out_df <- list()
# if num_ind is a vector of length > 1
# iterate over number of individuals and same summary stats
for (i in 1:length(num_ind)){
# expected reads for each individual
expected_reads_per_ind <- expected_reads/num_ind[i]
# simulate variable sequencing per individual
# assumes an approximate gaussian distribution of sequencing per individual
# tbd: specify individual sequencing variance function
prop_reads <- rnorm(num_ind[i], mean = expected_reads_per_ind, sd = expected_reads_per_ind * expected_sequencing_variance)
prop_reads <- (prop_reads / (expected_reads_per_ind)) / num_ind[i]
prop_reads <- ifelse(prop_reads < 0, 0, prop_reads)
# assign reads to individuals
assigned_reads <- lapply(prop_reads, function(x) round(all_reads * x)) %>% data.frame
# create data frame of assigned reads
names(assigned_reads) <- paste0("ind_", 1:num_ind[i])
assigned_reads <- data.frame(fragment = 1:length(all_reads), assigned_reads) %>%
gather(-fragment, key = "ind", value = "frag_count")
# calculate the number of fragments that pass:
# 1: the depth threshold (reads per fragment)
# 2: the individual representation threshold (prop individuals that pass #1)
prop_fragments_useable <- assigned_reads %>%
mutate(frag_count_acceptable = frag_count > read_cutoff) %>%
group_by(fragment) %>%
summarise(prop_ind_acceptable = mean(frag_count_acceptable )) %>%
mutate(ind_count_acceptable = prop_ind_acceptable > ind_cutoff) %>%
summarise(prop_frags_usable = mean(ind_count_acceptable)) %>%
as.numeric
# depth of the top 1000 fragments
top_n <- assigned_reads %>%
group_by(fragment) %>%
summarise(mean_depth = mean(frag_count)) %>%
arrange(desc(mean_depth)) %>%
select(mean_depth) %>%
.[1:n_top_cuts,] %>%
unlist
# generate a single-row dataframe as output
out_df[[i]] <- data.frame(enzyme_name = enzyme_name, num_ind = num_ind[i], number_of_fragments = length(size_sel),
mean_reads_per_fragment_per_ind = (expected_reads/length(size_sel))/num_ind[i], prop_fragments_useable = prop_fragments_useable,
number_fragments_usable = prop_fragments_useable*length(size_sel), mean_top_n_fragments = round(mean(top_n)),
min_top_n_fragments = round(min(top_n)), max_top_n_fragments = round(max(top_n)))
}
out_df
}
# perform in silico preps for csp6I and ecoRI + mspI
# varying number of individuals
ind_list <- seq(20, 600, by = 20)
csp_preps <- rr_library_prep(num_ind = ind_list, reference_genome = ref_genome, enzyme_cut1 = Csp6I)
eco_msp_preps <- rr_library_prep(num_ind = ind_list, reference_genome = ref_genome, enzyme_cut1 = MspI, enzyme_cut2 = EcoRI)
eco_preps <- rr_library_prep(num_ind = ind_list, reference_genome = ref_genome, enzyme_cut1 = EcoRI)
prep_df <- bind_rows(csp_preps, eco_msp_preps, eco_preps)
# bar plot of usable fragments, split by enzyme
prep_df %>%
filter(num_ind < 600) %>%
ggplot(aes(x = num_ind, y = number_fragments_usable)) +
geom_bar(stat = "identity") +
facet_wrap(~enzyme_name, scales = "free_y")+
theme_bw()+
xlab("Number of individuals")+
ylab("Number of usable restriction fragments")
# line plot comparing usable fragments in both digests
prep_df %>%
filter(num_ind < 600) %>%
ggplot(aes(x = num_ind, y = number_fragments_usable, color = enzyme_name)) +
geom_line(size = 2)+
theme_bw()+
theme(legend.justification = c(1, 1),
legend.position = c(1, 1),
legend.background = element_blank(),
legend.key = element_blank())+
xlab("Number of individuals")+
ylab("Number of usable restriction fragments")
prep_df %>%
filter(num_ind < 400) %>%
ggplot(aes(x = num_ind, y = mean_top_n_fragments, color = enzyme_name, ymin = min_top_n_fragments, ymax = max_top_n_fragments)) +
geom_point(size = 2)+
geom_errorbar()+
geom_hline(yintercept = 20, lty = 2)+
theme_bw()+
theme(legend.position = "none")+
facet_wrap(~enzyme_name, scales = "free_y")+
xlab("Number of individuals")+
ylab("Depth of top 1000 fragments")
|
8af9e29072139467a3671f389d481903bae552fb
|
7dc24ce2d943197c2d8d20e9cb25d32f7e4399be
|
/man/Stool_subset.Rd
|
4d7d7d74585432022d1d23d45cccd8f15017e0a8
|
[] |
no_license
|
biobakery/SparseDOSSA2
|
26f9ceb91a2965b119d783b07b3cd02ee75d6027
|
e013d9e3c0fd79e1c343340775f33f14f22b8c5e
|
refs/heads/master
| 2023-01-24T09:26:23.553053
| 2023-01-19T16:45:46
| 2023-01-19T16:45:46
| 219,829,612
| 9
| 2
| null | 2022-10-21T17:36:22
| 2019-11-05T19:05:37
|
R
|
UTF-8
|
R
| false
| true
| 538
|
rd
|
Stool_subset.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Stool_subset}
\alias{Stool_subset}
\title{A subset of the HMP1-II stool samples}
\format{
A matrix with 5 rows (species) and 5 columns (samples)
}
\source{
\url{https://www.hmpdacc.org/hmp/}
}
\usage{
Stool_subset
}
\description{
A dataset containing species-level microbial counts of a subset of
the HMP1-II stool samples. This includes the top 5 most abundant
species and top 5 most deeply sequenced samples.
}
\keyword{datasets}
|
423985adec48f0ee7eb6517548180def62f08a9d
|
9d5c31dae452a3a06ba98911327dd719561acaa0
|
/PlotLowerBoundCases.R
|
3d63089f26d685b32b7f73baf17620d055ffcdf3
|
[] |
no_license
|
nickwkoning/Thesis
|
3f37b58dfa8f8e4723c0c775fa03b032f3b33f3f
|
1af11be7ea1b4e8ffb539b3d78e04acbf62e2eb2
|
refs/heads/master
| 2022-11-04T11:27:23.868684
| 2020-06-16T13:30:12
| 2020-06-16T13:30:12
| 263,144,109
| 0
| 0
| null | 2020-05-11T19:54:45
| 2020-05-11T19:54:45
| null |
UTF-8
|
R
| false
| false
| 2,647
|
r
|
PlotLowerBoundCases.R
|
source("Packages.R")
source("Thesisggtheme.R")
baseplot = ggplot() +
geom_rect(aes(xmin = 0, xmax = 2, ymin = 0, ymax = 2),
alpha = 0, color = "black") +
scale_x_continuous(breaks = c(0, 1, 2),
labels = c(0, TeX('$C_{\\alpha} - \\delta_{p,n}^2$'), TeX('$\\delta_{p,n}^2$')),
limits = c(-0.5, 3.1)) +
scale_y_continuous(breaks = c(0, 1, 2),
labels = c(0, TeX('$C_{\\alpha} - \\delta_{p,n}^2$'), TeX('$\\delta_{p,n}^2$')),
limits = c(-0.5, 3.1)) +
xlab(TeX('$n \\widehat{\\theta}_1$')) +
ylab(TeX('$n \\widehat{\\theta}_2$')) +
#geom_hline(yintercept = 0) +
#geom_vline(xintercept = 0) +
ThesisggTheme() +
theme(panel.grid.minor = element_blank())
case1 = baseplot +
geom_abline(intercept = 4.3, slope = -1, linetype = "longdash", label = 3) +
annotate("text", x = 2.47, y = 2.4, size = 3.7,
label = TeX('$n \\widehat{\\theta}_1$ + $n \\widehat{\\theta}_2 = C_{\\alpha}$'))
case2 = baseplot +
geom_polygon(aes(x = c(1,2,2), y = c(2,2,1)), alpha = 0.3, fill = "red") +
geom_abline(intercept = 3, slope = -1, linetype = "longdash", label = 3) +
annotate("text", x = 2.58, y = 0.8, size = 5,
label = TeX('$n \\widehat{\\theta}_1$ + $n \\widehat{\\theta}_2 = C_{\\alpha}$'))
case3 = baseplot +
geom_polygon(aes(x = c(0, 0, 2, 2, 1), y = c(1, 2, 2, 0, 0)),
alpha = 0.3, fill = "red") +
geom_abline(intercept = 1, slope = -1, linetype = "longdash", label = 3) +
annotate("text", x = 1.8, y = -0.25, size = 3.7,
label = TeX('$n \\widehat{\\theta}_1$ + $n \\widehat{\\theta}_2 = C_{\\alpha}$'))
case4 = baseplot +
geom_polygon(aes(x = c(0, 0, 2, 2), y = c(0, 2, 2, 0)),
alpha = 0.3, fill = "red") +
geom_abline(intercept = -0.3, slope = -1, linetype = "longdash", label = 3) +
annotate("text", x = 0.7, y = -0.4, size = 3.7,
label = TeX('$n \\widehat{\\theta}_1$ + $n \\widehat{\\theta}_2 = C_{\\alpha}$'))
plot_grid(case1, case2, case3, case4,
labels = c("Case 1", "Case 2", "Case 3", "Case 4"),
label_size = 15, hjust = 0, vjust = 1,
scale = c(1., 1., 1., 1.))
case2 +
geom_polygon(aes(x = c(0, 0, 1), y = c(2, 3, 2)),
alpha = 0.3, fill = "blue") +
geom_polygon(aes(x = c(2, 3, 2), y = c(1, 0, 0)),
alpha = 0.3, fill = "blue") +
geom_line(aes(x = c(-Inf, 2), y = c(1, 1)), linetype = "dotted") +
geom_line(aes(x = c(1, 1), y = c(-Inf, 2)), linetype = "dotted") +
annotate("text", x = 1.5, y = 1.5, size = 18,
label = "D")
|
88878ed31c85bc63ecb9952d4960eee3389c6e88
|
bde44ebcaa25a97b55d8f553b85ae00e3980375b
|
/inst/essais/ellipsoid.R
|
0b22d03389a735b1ff61f6209f027f367d65acca
|
[] |
no_license
|
stla/uniformly
|
34a1165db694709c90294a4e19499102c5bb5bf9
|
9eea0f9ecf6606529173f169586ef791d9e84a6a
|
refs/heads/master
| 2023-07-20T14:58:32.464075
| 2023-07-18T07:11:43
| 2023-07-18T07:11:43
| 142,665,262
| 10
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,301
|
r
|
ellipsoid.R
|
library(PlaneGeometry)
library(uniformly)
A <- rbind(c(2, 1), c(1, 1))
r <- 1
sims <- runif_on_ellipsoid(10000, A, r)
alpha1 <- 10 * pi/180
alpha2 <- 60 * pi/180
ell <- EllipticalArc$new(ell, alpha1=0, alpha2=360, degrees = TRUE)
perimeter <- ell$length()
mean(atan2(sims[,2], sims[,1]) > alpha1 & atan2(sims[,2], sims[,1]) < alpha2) * perimeter
ell <- EllipseFromCenterAndMatrix(c(0, 0), A)
arc <- EllipticalArc$new(ell, alpha1, alpha2, degrees = FALSE)
arc$length()
path <- ell$path(500L)
perim <- 0
for(i in 2L:nrow(path)){
perim <- perim + sqrt(c(crossprod(path[i-1,]-path[i,])))
}
a <- ell$rmajor
b <- ell$rminor
x <- rnorm(100000, 0, a)
y <- rnorm(100000, 0, b)
d <- sqrt(x^2/a^2 + y^2/b^2)
sims <- cbind(x/d,y/d)
#######################
runifonellipsoid <- function(n, A, r) {
S <- A / (r * r)
stopifnot(isSymmetric(S))
e <- eigen(S, symmetric = TRUE)
if(any(e$values <= 0)) stop("`S` is not positive.")
radiisq <- 1 / e$values
radii <- sqrt(radiisq)
rot <- e$vectors
xyz <- t(vapply(radii, function(r) {
rnorm(n, 0, r)
}, FUN.VALUE = numeric(n)))
d <- sqrt(colSums(xyz*xyz / radiisq))
sims0 <- t(xyz) / d
t(rot %*% t(sims0))
}
sims <- runifonellipsoid(100, A, 1)
plot(NULL, xlim = c(-2, 2), ylim = c(-2, 2), asp = 1)
draw(ell)
points(sims, pch = 19)
|
217555a84d0d26df5a1a432f8a29447bd218330f
|
969d4316ad794a0eef0213b01a7b06ddfdf8d90d
|
/13_expressions/07_walking_ast/exercise6.r
|
34dbcab2e2dcf138abbcc4a5442ae9901308877e
|
[] |
no_license
|
Bohdan-Khomtchouk/adv-r-book-solutions
|
adaa5b5f178999d130aff1359a23e978e39e86ae
|
e1b3a63c0539de871728b522604110c0aa18c7d1
|
refs/heads/master
| 2021-01-22T00:36:17.450660
| 2015-12-06T02:54:02
| 2015-12-06T02:54:02
| 47,481,353
| 1
| 1
| null | 2015-12-06T02:49:46
| 2015-12-06T02:49:46
| null |
UTF-8
|
R
| false
| false
| 800
|
r
|
exercise6.r
|
### Compare bquote2() to bquote(). There is a subtle bug in bquote():
### it won’t replace calls to functions with no arguments. Why?
bquote(.(x)(), list(x = quote(f)))
# .(x)()
bquote2(.(x)(), list(x = quote(f)))
# f()
bquote(.(x)(1), list(x = quote(f)))
# f(1)
# Here's the source for `bquote` (from `base`):
bquote <- function(expr, where = parent.frame()) {
unquote <- function(e) {
if (is.pairlist(e)) {
as.pairlist(lapply(e, unquote))
} else if (length(e) <= 1L) { e }
else if (e[[1L]] == as.name(".")) {
eval(e[[2]], where)
} else { as.call(lapply(e, unquote)) }
}
unquote(substitute(expr))
}
# The subtle bug is on line 16, where it returns `e` if `length(e)` is <= 1.
# `length(substitute(.(x)()))` is 1, so it will just be returned instead of parsed.
|
6d4eb4a06d4c323c645a25031cc9ed9d5b14d2e8
|
c9e9fb81b2680f5adaa8600426f713a43853b80a
|
/AdventofCode2015_1.R
|
7daaa8695a7e5117ce7a7a1944b29b6074003457
|
[] |
no_license
|
codesformochi/AdventofCode_2015
|
55c55552e2ce9d535904e7220d96cea8250ac430
|
c8d1f4747da56d27069a7481bb3c14e5d557285d
|
refs/heads/main
| 2023-06-10T07:22:28.963444
| 2021-06-22T15:59:11
| 2021-06-22T15:59:11
| 336,143,592
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 848
|
r
|
AdventofCode2015_1.R
|
#libraries
library(stringr)
#read in rtf
floors <- striprtf::read_rtf("adventofcode2015_1.rtf")
#PART1
#Count all occurences of '(' and all occurrences of ')'
start_parentheses <- str_count(floors, fixed("("))
end_parentheses <- str_count(floors, fixed(")"))
#Find the final floor
total_floors <- start_parentheses - end_parentheses
#PART2
current_floor <- 0
#Separate each character and save in a vector
v_floors <- unlist(str_split(floors, ""))
#For each character in floors...
for(i in 1:length(v_floors)){
#If '(' then add 1, else -1.
ifelse(v_floors[i] == fixed("("),
current_floor <- current_floor+1,
current_floor <- current_floor-1)
#Print location, if current_floor < 0 then stop!
ifelse(current_floor < 0,
return(i),
print(current_floor))
} #for close
#Get position
i
|
a620b22b580754cad06b2a04538711b1cd59af75
|
76cf5e3c9d1ca3a7b1c8c395de7ab89dade4f3b6
|
/man/WAD.Rd
|
d1fec52ab100676cf0e16c38feabc42586099b03
|
[] |
no_license
|
swsoyee/TCC
|
4a12ac14f87bce91c0a3123167e6e33f324f4bc0
|
e0d41326d1e725cb12a23c87102c3454746dddf8
|
refs/heads/master
| 2023-04-29T05:14:06.435553
| 2021-03-11T10:56:29
| 2021-03-11T10:56:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,910
|
rd
|
WAD.Rd
|
\name{WAD}
\alias{WAD}
\title{Calculate WAD statistic for individual genes}
\description{
This function performs WAD method to identify differentially expressed genes
(DEGs) from two-group gene expression data. A high absolute value for the WAD
statistic is evident of a high degree of differential expression.
}
\usage{
WAD(data, group, logged = FALSE, floor = 1, sort = FALSE)
}
\arguments{
\item{data}{numeric matrix or data frame containing count data or
microarray data, where each row indicates the gene (or transcript
or probeset ID), each column indicates the sample (or library),
and each cell indicates the expression value (i.e., number of counts
or signal intensity) of the gene in the sample.}
\item{group}{numeric vector indicating the experimental group for each
sample (or library).}
\item{logged}{logical. If \code{TRUE}, the input data are regarded as
log2-transformed. If \code{FALSE}, the log2-transformation is
performed after the floor setting. The default is
\code{logged = FALSE}.}
\item{floor}{numeric scalar (> 0) specifying the floor value for
taking logarithm. The default is \code{floor = 1}, indicating that
values less than 1 are replaced by 1. Ignored if
\code{logged = TRUE}.}
\item{sort}{logical. If \code{TRUE}, the retrieved results are sorted
in order of the rank of absolute WAD statistic.
If \code{FALSE}, the results are retrieved by the original order.}
}
\value{
A numeric vector of WAD statistic for individual genes
}
\references{
Kadota K, Nakai Y, Shimizu K: A weighted average difference method for
detecting differentially expressed genes from microarray data.
Algorithms Mol Biol. 2008, 3: 8.
}
\examples{
data(nakai)
group <- c(1, 1, 1, 1, 2, 2, 2, 2)
wad <- WAD(nakai, group, logged = TRUE, sort = TRUE)
}
|
a249da6a1753369772a0d82a9f2c367282c01c03
|
7cf590876873d130474a448f55bd696d0aa08689
|
/pack7/R/visualize_airport_delays.R
|
4d371d8ef500a88aef8ca1308aaf9d1db7d40b78
|
[] |
no_license
|
jvf96/pack7
|
d8bb67e71863334286cd768850d0a1fed9d9dae0
|
ee5a999f0dc1255153d7c6dc7693124f526374d0
|
refs/heads/master
| 2021-08-16T12:59:27.780246
| 2017-11-19T23:15:01
| 2017-11-19T23:15:01
| 111,150,110
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 947
|
r
|
visualize_airport_delays.R
|
#' Visualize airport delays
#'
#' @return a plot with latitude and longitude
#' @importFrom stats na.omit
#' @export visualize_airport_delays
#'
visualize_airport_delays <- function(){
flights <- nycflights13::flights
airports <- nycflights13::airports
data_flights <- na.omit(flights)
data_flights <- dplyr:: summarise(dplyr::group_by(flights, dest), delay = mean(arr_delay))
data_airports <- dplyr::inner_join(airports, data_flights, by = c("faa" = "dest"))
ggplot2::ggplot(data_airports, ggplot2::aes(y = data_airports$lat, x = data_airports$lon)) +
ggplot2::geom_point(na.rm =TRUE) + ggplot2::theme_gray() +
ggplot2::scale_color_gradient(low = "black", high = "#F5F5F5") +
ggplot2::labs(title = "Flights and Airport", subtitle = "Longitude and Latitude",
y = "Latitude", x = "Longitude") +
ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5))
}
|
e3c3ed3e23f3066e5a68cb631cfb4a35dbe456f4
|
dd726f4f83fdb6ef8c4a2b7486795da27b1b4fc2
|
/r/2_19제출/CCTV/map_practice/map_practice/Script.R
|
1dbfd9ccb9e85cebd7b6823b3598d28183e638df
|
[] |
no_license
|
mgh3326/big_data_web
|
84890dc72cd0aa1dd49be736ab1c6963611ee4a5
|
f5cae3c710414697a1190ad57469f26dd9c87d8a
|
refs/heads/master
| 2023-02-20T07:28:32.024292
| 2019-09-04T15:49:13
| 2019-09-04T15:49:13
| 119,160,730
| 0
| 1
| null | 2023-02-15T21:30:18
| 2018-01-27T12:02:38
|
HTML
|
WINDOWS-1252
|
R
| false
| false
| 334
|
r
|
Script.R
|
setwd("C:\\easy_r")
install.packages("ggmap")
library(ggmap)
gangbuk <- read.csv("project_gangbuk_data.csv", header = T)
#°ºÏ±¸_Àüü CCTV
g_m <- get_map("gangbukgu", zoom = 13, maptype = "roadmap")
gang.map <- ggmap(g_m) + geom_point(data = gangbuk, aes(x = LON, y = LAT), size = 2, alpha = 0.7, color = "#980000")
gang.map
|
02ad58a99cd086a7bbbdf8f1f83a398f4a3613d6
|
880aff9580efa6b508db8aab0c16e36fd305acba
|
/NSF_Awards/nsf_awards/ui.R
|
3a10f609990fb2d7f98f57408dde30cf7ddb6fd0
|
[] |
no_license
|
cmwright12/shinyapps
|
5b8e225e10c468fed6376470cd997b6fb22b3eb3
|
69fc60a49f08a9c611c36d730164ae6ad001833b
|
refs/heads/master
| 2021-01-22T01:06:10.450687
| 2017-09-05T16:09:37
| 2017-09-05T16:13:43
| 102,199,480
| 0
| 0
| null | 2017-09-05T16:13:44
| 2017-09-02T13:46:09
|
R
|
UTF-8
|
R
| false
| false
| 1,147
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(dplyr)
library(tidyr)
filename <- "Awards.csv"
data <- read.csv(filename, sep=",", header=T, stringsAsFactors=F) %>%
subset(!duplicated(Abstract)) %>%
subset(!duplicated(Title)) %>%
select(AwardNumber, Title, Organization, AwardInstrument, AwardedAmountToDate, Abstract)
#arrange(Abstract) %>%
#mutate(Abstract.Short = substr(Abstract,1,500))
nrows <- nrow(data)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
titlePanel("NSF Awards"),
sidebarLayout(
sidebarPanel(
numericInput("item", "Item number: ", 1, min=1, max=nrows)
#actionButton("buttonNext", "Next")
#actionButton("buttonSave", "Save")
),
mainPanel(
h2(textOutput("awardnumber")),
h2(textOutput("title")),
h5(textOutput("org")),
h5(textOutput("type")),
h5(textOutput("amount")),
h4(textOutput("abstract"))
)
)
))
|
685fddaeca08b40438851e94c3575c3bad7e30f8
|
bc113c18c979f88158d1e2557efb81bf01f44e35
|
/man-roxygen/roxlate-ml-feature-estimator-transformer.R
|
2529450a0192af62d78aa8cfd9792dd98ed0e903
|
[
"Apache-2.0"
] |
permissive
|
awblocker/sparklyr
|
6524ce9ac1d9c24392cd9e179ca8836851d3c93f
|
18d9df6a6755f8bd10f81721e71c4f818a115084
|
refs/heads/master
| 2020-04-08T09:27:47.362403
| 2018-11-26T20:02:49
| 2018-11-26T20:02:49
| 159,225,148
| 0
| 0
|
Apache-2.0
| 2018-11-26T20:03:28
| 2018-11-26T19:59:20
|
R
|
UTF-8
|
R
| false
| false
| 910
|
r
|
roxlate-ml-feature-estimator-transformer.R
|
#' @param dataset (Optional) A \code{tbl_spark}. If provided, eagerly fit the (estimator)
#' feature "transformer" against \code{dataset}. See details.
#'
#' @details When \code{dataset} is provided for an estimator transformer, the function
#' internally calls \code{ml_fit()} against \code{dataset}. Hence, the methods for
#' \code{spark_connection} and \code{ml_pipeline} will then return a \code{ml_transformer}
#' and a \code{ml_pipeline} with a \code{ml_transformer} appended, respectively. When
#' \code{x} is a \code{tbl_spark}, the estimator will be fit against \code{dataset} before
#' transforming \code{x}.
#'
#' When \code{dataset} is not specified, the constructor returns a \code{ml_estimator}, and,
#' in the case where \code{x} is a \code{tbl_spark}, the estimator fits against \code{x} then
#' to obtain a transformer, which is then immediately used to transform \code{x}.
|
2d8efd6f45e57e4e752198df159e36939f2395fe
|
4974323011c90a93fdaeb44cd0b50d3b8ba605a9
|
/cachematrix.R
|
846df7c96d253ebf8efa20f2e88acb0fd0bbd338
|
[] |
no_license
|
ricardocarvalhods/ProgrammingAssignment2
|
ed01a5414ad46a44291a7679bbc4c51550ff8bf4
|
ad837edaca837bb33bc1a972fa03ccf3c3e3c49e
|
refs/heads/master
| 2021-05-28T11:09:05.136713
| 2014-04-28T01:32:23
| 2014-04-28T01:32:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,688
|
r
|
cachematrix.R
|
## This file contains functions that manipulates matrix and its
## inverse and enables caching the inverse to avoid recomputation
## This function take a matrix as input and creates a list with
## function variables that enables getting/setting the matrix and
## its inverse
makeCacheMatrix <- function(x = matrix()) {
## Initializes inverse of the CacheMatrix
i <- NULL
## Setter for the CacheMatrix
set <- function(y) {
x <<- y
i <<- NULL
}
## Getter for the CacheMatrix
get <- function() x
## Setter for the inverse of the CacheMatrix
setinverse <- function(inverse) i <<- inverse
## Getter for the inverse of the CacheMatrix
getinverse <- function() i
## Creates list that is returned
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function takes a list created with the makeCacheMatrix
## function above and verifies if the inverse of the matrix used
## as the input of the makeCacheMatrix function was already computated:
## if positive, it returns the inverse already calculated saved in cache,
## otherwise, it calculates the inverse and saves it in cache
cacheSolve <- function(x, ...) {
## Gets inverse of the CacheMatrix
i <- x$getinverse()
## Verifies if the inverse exists, if so, returns its cached value
if(!is.null(i)) {
message("getting cached data")
return(i)
}
## If inverse of CacheMatrix does not exists:
## Gets CacheMatrix
data <- x$get()
## Calculates inverse of CacheMatrix
i <- solve(data, ...)
## Stores inverse of CacheMatrix in cache
x$setinverse(i)
## Return a matrix that is the inverse of 'x'
i
}
|
fa1950994524d1ae6d43165b8267224b3504c29d
|
901331f01a6cd4ebdba4e25f30fd4a36da924191
|
/man/objfc.multipic.Rd
|
90f50d678e79aa0e2c8496e3345f13a0838475ea
|
[] |
no_license
|
priscillafialho/mopt
|
f49a26add6ef11096fc97bf6ea89d1cb2d7cc29d
|
6b7fc8124a56a0239225296114ff6128cf9b4a40
|
refs/heads/master
| 2021-01-22T16:37:34.838400
| 2014-10-22T23:11:01
| 2014-10-22T23:11:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 389
|
rd
|
objfc.multipic.Rd
|
\name{objfc.multipic}
\alias{objfc.multipic}
\title{create an objective function n dimensions and
k local maximums, returns maximum and function}
\usage{
objfc.multipic(n, k, x.max)
}
\description{
create an objective function n dimensions and k local
maximums, returns maximum and function
}
\examples{
x.max = runif(10)
myfunc <- objfc.multipic(10,25,x.max=x.max)
myfunc(x.max)
}
|
c6e85b32309660e6d0f15e5868715e71f295f37e
|
dbec26938945a66e2ea26308f04cde7fec7ce198
|
/Visa Project/global.R
|
fe0ba5ee5360242733d6c127b3dc7018340d2f9d
|
[] |
no_license
|
nmaloof/ShinyVisas
|
b283f0543c7f87c58261d07c008fcc8098ee0437
|
38631bebe562ea4bb6e458a3ea18ce099bc62139
|
refs/heads/master
| 2021-07-10T21:11:56.673678
| 2017-10-14T03:08:28
| 2017-10-14T03:08:28
| 106,484,724
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 76
|
r
|
global.R
|
library(shiny)
library(shinydashboard)
library(plotly)
library(googleVis)
|
8ab1a4e92edab70be46399f7ecf599a682631abd
|
d7d0182f961c37c9cf268e240d06b35f5fd36a5c
|
/cachematrix.R
|
5cd9f636b873365fdc356056069a7613b59604f3
|
[] |
no_license
|
djarman/ProgrammingAssignment2
|
4c398058148edfb258737ed1297006c4c861d757
|
ced2a059d119077e565c5f7facc9aa820e9a1d2e
|
refs/heads/master
| 2021-01-14T08:54:33.227622
| 2014-10-20T18:13:50
| 2014-10-20T18:13:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,600
|
r
|
cachematrix.R
|
## Devon Jarman
## Coursera: R Programming
## Week 3
## Solution to Programming Assignment 2
## 1. `makeCacheMatrix`: This function creates a special 'cacheMatrix' object
## that can cache its inverse.
## 2. `cacheSolve`: This function computes the inverse of the special
## 'cacheMatrix' returned by `makeCacheMatrix` above. If the inverse has
## already been calculated (and the matrix has not changed), then
## `cacheSolve` will retrieve the inverse from the cache.
##
## This function creates a special 'cacheMatrix' object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
## 'x' is a square matrix.
## return 'cacheMatrix' object.
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special 'cacheMatrix' returned by `makeCacheMatrix`.
## If the inverse has already been calculated (and the matrix has not changed), then
## `cacheSolve` will retrieve the inverse from the cache. Computing the inverse of a square matrix
## is done with the `solve` function (assumes that the matrix supplied is always invertible).
cacheSolve <- function(x, ...) {
## 'x' is a 'cacheMatrix' object.
## return inverse of 'cacheMatrix'.
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
f62c5b3bd463fca42a9ea6511881ed2d4d424107
|
1256464f6234f9a9ff380a4b4739142201655f36
|
/man/get_GM_network.Rd
|
1cd9ddee2e51e1a3ba462fbd01cc2f25befe43ca
|
[] |
no_license
|
hclimente/martini
|
6dd8a6c13454e739171d82d51b82a133a22b3ee0
|
544b8dd6762f5ede704a9471b940dba258ede8ed
|
refs/heads/master
| 2023-02-01T00:05:37.682562
| 2023-01-11T08:20:52
| 2023-01-11T08:20:52
| 87,456,798
| 4
| 2
| null | 2023-01-11T08:20:54
| 2017-04-06T17:31:13
|
R
|
UTF-8
|
R
| false
| true
| 1,608
|
rd
|
get_GM_network.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/networks.R
\name{get_GM_network}
\alias{get_GM_network}
\title{Get gene membership network.}
\usage{
get_GM_network(
gwas,
organism = 9606,
snpMapping = snp2ensembl(gwas, organism),
col_genes = c("snp", "gene")
)
}
\arguments{
\item{gwas}{A SnpMatrix object with the GWAS information.}
\item{organism}{Tax ID of the studied organism. The default is 9606 (human).}
\item{snpMapping}{A data.frame informing how SNPs map to genes. It contains
minimum two columns: SNP id and a gene it maps to. Each row corresponds to
one gene-SNP mapping. Unless column names are specified using
\code{col_genes}, involved columns must be named \code{'snp'} and
\code{'gene'}.}
\item{col_genes}{Optional, length-2 character vector with the names of the
two columns involving the SNP-gene mapping. The first element is the column
of the SNP, and the second is the column of the gene.}
}
\value{
An igraph network of the GM network of the SNPs.
}
\description{
Creates a network of SNPs where each SNP is connected as in the
\link[=get_GS_network]{GS} network and, in addition, to all the other SNPs
pertaining to the same gene. Corresponds to the gene membership (GM) network
described by Azencott et al.
}
\examples{
get_GM_network(minigwas, snpMapping = minisnpMapping)
}
\references{
Azencott, C. A., Grimm, D., Sugiyama, M., Kawahara, Y., &
Borgwardt, K. M. (2013). Efficient network-guided multi-locus association
mapping with graph cuts. Bioinformatics, 29(13), 171-179.
\url{https://doi.org/10.1093/bioinformatics/btt238}
}
|
b416b0d59aac6789e183bbd5920e30887bbdd950
|
6e22438c19b27043c4b3a188d663b37df935d6cd
|
/src/analysis/bee/RF_analysis_plots_bees.R
|
ecdd5d299cefa6cc2ed78b100d212721199393ef
|
[] |
no_license
|
vitorpavinato/Tracking-selection
|
fc42a930dee0e251123f539473091310dc9a1521
|
d05f49f81932ef0d45e288f1be1906145d84a3fc
|
refs/heads/master
| 2022-11-01T18:01:36.226996
| 2022-10-24T14:14:04
| 2022-10-24T14:14:04
| 113,848,735
| 0
| 2
| null | 2022-05-18T13:35:41
| 2017-12-11T11:04:57
|
R
|
UTF-8
|
R
| false
| false
| 13,634
|
r
|
RF_analysis_plots_bees.R
|
########################################
## Manuscript plots ##
## Application ##
########################################
library(gtools)
## DENSITY PLOTS
##---------------------------------------
## JOINT INFERENCE OF DEMOGRAPY AND SELECTION
##--------------------------------------------
# LOAD LIST OF VECTOR OF PRIORS
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/random_forests/list_vector_logthetaPS",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/random_forests/list_vector_logncs",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/random_forests/list_vector_logmeanNe2",".RData"))
# LOAD LIST OF VECTORS OF POSTERIORS
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/posterior_obs/list_posterior_logthetaPS",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/posterior_obs/list_posterior_logncs",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/posterior_obs/list_posterior_logmeanNe2",".RData"))
## SELECTION ONLY
##-------------------
# LOAD LIST OF VECTOR OF PRIORS
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/random_forests/list_vector_logPrPs",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/random_forests/list_vector_logitpopstrongmsel",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/random_forests/list_vector_loggammamean",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/random_forests/list_vector_logpopstrongselmean",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/random_forests/list_vector_averageGenLoad",".RData"))
# LOAD LIST OF VECTORS OF POSTERIORS
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/posterior_obs/list_posterior_logPrPs",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/posterior_obs/list_posterior_popstrongmsel",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/posterior_obs/list_posterior_loggammamean",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/posterior_obs/list_posterior_popstrongselmean",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/posterior_obs/list_posterior_averageGenLoad",".RData"))
population.vector <- c("Avalon","Humboldt","Davis","Stanislaus","Stebbins","Riverside","Platerita")
color.vector <- c("black", "red", "green", "blue", "cyan", "#FFDB58", "orange")
# FUNCTION TO PRODUCE THE PLOTS - LOG and LOGIT SCALE
plot.mult.densities <- function(list.prior=NULL, list.posterior=NULL, par.name=NULL,
col.vect=NULL, pop.vect=NULL, y_lim=c(0,0.5), cex_axis = 1.2, cex_lab = 1.2,
plot.legend=TRUE, legend.side="topright")
{
xlim.min = min(sapply(list.prior, min))
xlim.max = min(sapply(list.prior, max))
plot(density(list.prior[[1]]), lty=3, col=col.vect[1], xlim=c(xlim.min, xlim.max), ylim=y_lim,
main = "", ylab = "Density", xlab = par.name, cex.axis = cex_axis, cex.lab = cex_lab)
lines(density(list.prior[[1]], weights = list.posterior[[1]]$weights), col=col.vect[1])
for (p in 2:length(list.prior))
{
lines(density(list.prior[[p]]),lty=3, col=col.vect[p])
lines(density(list.prior[[p]], weights = list.posterior[[p]]$weights), col=col.vect[p])
}
if (plot.legend) {
legend(legend.side, col = c(rep(col.vect[1],2),col.vect[1:7]), lty = c(3,1,rep(1,7)), cex = 1.4,
legend = c("prior", "posterior", pop.vect), box.lwd = 0, box.col = NULL, bg = NULL)
}
}
plot.mult.densities.orig <- function(list.prior=NULL, list.posterior=NULL, par.name=NULL,
col.vect=NULL, pop.vect=NULL, y_lim=c(0,0.5), cex_axis = 1.2, cex_lab = 1.2,
plot.legend=TRUE, legend.side="topright", fromScale="logit")
{
if (fromScale=="logit")
{
xlim.min = min(inv.logit(sapply(list.prior, min)))
xlim.max = min(inv.logit(sapply(list.prior, max)))
plot(density(inv.logit(list.prior[[1]])), lty=3, col=col.vect[1], xlim=c(xlim.min, xlim.max), ylim=y_lim,
main = "", ylab = "Density", xlab = par.name, cex.axis = cex_axis, cex.lab = cex_lab)
lines(density(inv.logit(list.prior[[1]]), weights = list.posterior[[1]]$weights), col=col.vect[1])
for (p in 2:length(list.prior))
{
lines(density(inv.logit(list.prior[[p]])),lty=3, col=col.vect[p])
lines(density(inv.logit(list.prior[[p]]), weights = list.posterior[[p]]$weights), col=col.vect[p])
}
} else {
xlim.min = min(10^(sapply(list.prior, min)))
xlim.max = min(10^(sapply(list.prior, max)))
plot(density(10^(list.prior[[1]])), lty=3, col=col.vect[1], xlim=c(xlim.min, xlim.max), ylim=y_lim,
main = "", ylab = "Density", xlab = par.name, cex.axis = cex_axis, cex.lab = cex_lab)
lines(density(10^(list.prior[[1]]), weights = list.posterior[[1]]$weights), col=col.vect[1])
for (p in 2:length(list.prior))
{
lines(density(10^(list.prior[[p]])),lty=3, col=col.vect[p])
lines(density(10^(list.prior[[p]]), weights = list.posterior[[p]]$weights), col=col.vect[p])
}
}
if (plot.legend) {
legend(legend.side, col = c(rep(col.vect[1],2),col.vect[1:7]), lty = c(3,1,rep(1,7)), cex = 1.4,
legend = c("prior", "posterior", pop.vect), box.lwd = 0,box.col = NULL, bg = NULL)
}
}
## MANUSCRIPT PLOT - BEES JOINT INFERENCE
pdf(file = "joint.pdf", height = 5.5, width = 15.85)
par(mar=c(5,5,4,1)+.1, mfrow=c(1,3))
# THETA PS
plot.mult.densities(list.prior = list.logthetaPS,
list.posterior = list.posterior.logthetaPS,
par.name = expression(log[10](italic(theta)[b])),
col.vect = color.vector,
pop.vect = population.vector,
y_lim = c(0,0.4),
cex_axis = 1.6, cex_lab = 1.8,
plot.legend = TRUE, legend.side = "topleft")
# N
plot.mult.densities(list.prior = list.logncs,
list.posterior = list.posterior.logncs,
par.name = expression(log[10](italic(N))),
col.vect = color.vector,
pop.vect = population.vector,
y_lim = c(0,1.5),
cex_axis = 1.6, cex_lab = 1.8,
plot.legend = FALSE)
# NE
plot.mult.densities(list.prior = list.logmeanNe2,
list.posterior = list.posterior.logmeanNe2,
par.name = expression(log[10](italic(N)[e])),
col.vect = color.vector,
pop.vect = population.vector,
y_lim = c(0,1.5),
cex_axis = 1.6, cex_lab = 1.8,
plot.legend = FALSE)
dev.off()
## SELECTION ONLY
##-----------------
pdf(file = "selection.pdf", height = 11.00, width = 8.5)
par(mar=c(5,5,4,1)+.1, mfrow=c(3,2))
# PrPs - parameter
plot.mult.densities(list.prior = list.logPrPs,
list.posterior = list.posterior.logPrPs,
par.name = expression(log[10](italic(P)[R] * italic(P)[S])),
col.vect = color.vector,
pop.vect = population.vector,
y_lim = c(0,0.5),
cex_axis = 1.6, cex_lab = 1.8,
plot.legend = TRUE,
legend.side = "topleft")
# NUMBER OF STRONGLY SELECTED MUTATIONS
plot.mult.densities(list.prior = list.logitpopstrongmsel,
list.posterior = list.posterior.popstrongmsel,
par.name = expression(logit(italic(P))),
col.vect = color.vector,
pop.vect = population.vector,
y_lim = c(0,0.4),
cex_axis = 1.6, cex_lab = 1.8,
plot.legend = FALSE)
# GAMMA MEAN - Parameter
plot.mult.densities(list.prior = list.loggammamean,
list.posterior = list.posterior.loggammamean,
par.name = expression(log[10](italic(gamma))),
col.vect = color.vector,
pop.vect = population.vector,
y_lim = c(0,0.6),
cex_axis = 1.6, cex_lab = 1.8,
plot.legend = FALSE)
# GAMMA STRONG SELECTION
#plot.mult.densities(list.prior = list.logpopstrongselmean,
# list.posterior = list.posterior.popstrongselmean,
# par.name = expression(log[10](italic(bar(s)))),
# col.vect = color.vector,
# pop.vect = population.vector,
# y_lim = c(0,1.2),
# cex_axis = 1.6, cex_lab = 1.8,
# plot.legend = FALSE)
plot.mult.densities.orig(list.prior = list.logpopstrongselmean,
list.posterior = list.posterior.popstrongselmean,
par.name = expression(italic(bar(s))),
col.vect = color.vector,
pop.vect = population.vector,
y_lim = c(0,4),
cex_axis = 1.6, cex_lab = 1.8,
plot.legend = FALSE,
fromScale = "log")
# AVERAGE GENETIC LOAD
#plot.mult.densities(list.prior = list.averageGenLoad,
# list.posterior = list.posterior.averageGenLoad,
# par.name = expression(log[10](italic(L))),
# col.vect = color.vector,
# pop.vect = population.vector,
# y_lim = c(0,0.5),
# cex_axis = 1.6, cex_lab = 1.8,
# plot.legend = FALSE)
plot.mult.densities.orig(list.prior = list.averageGenLoad,
list.posterior = list.posterior.averageGenLoad,
par.name = expression(italic(L)),
col.vect = color.vector,
pop.vect = population.vector,
y_lim = c(0,7),
cex_axis = 1.6, cex_lab = 1.8,
plot.legend = FALSE,
fromScale = "logit")
dev.off()
## DEMOGRAPHY ONLY
##--------------------
# LOAD LIST OF VECTOR OF PRIORS
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/random_forests/list_vector_logmeanNe2ncs",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/random_forests/list_vector_logmeanNe2ncs",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/random_forests/list_vector_logmu",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/random_forests/list_vector_logrr",".RData"))
## LOAD LIST OF VECTORS OF POSTERIORS
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/posterior_obs/list_posterior_logmeanNe2ncs",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/posterior_obs/list_posterior_logmeanNe2ncs",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/posterior_obs/list_posterior_logmu",".RData"))
#load(file=paste0("~/My_repositories/Tracking-selection/results/pipeline_v6_bees/posterior_obs/list_posterior_logrr",".RData"))
pdf(file = "demography.pdf", height = 11.00, width = 8.5)
par(mar=c(5,5,4,1)+.1, mfrow=c(2,2))
# mu
plot.mult.densities(list.prior = list.logmu,
list.posterior = list.posterior.logmu,
par.name = expression(log[10](italic(mu))),
col.vect = color.vector,
pop.vect = population.vector,
y_lim = c(0,2.5),
cex_axis = 1.6, cex_lab = 1.8,
plot.legend = TRUE,
legend.side = "topleft")
# c0
plot.mult.densities(list.prior = list.logrr,
list.posterior = list.posterior.logrr,
par.name = expression(log[10](italic(c)[0])),
col.vect = color.vector,
pop.vect = population.vector,
y_lim = c(0,1.5),
cex_axis = 1.6, cex_lab = 1.8,
plot.legend = FALSE)
# Ne/N
#plot.mult.densities(list.prior = list.logmeanNe2ncs,
# list.posterior = list.posterior.logmeanNe2ncs,
# par.name = expression(log[10](italic(N)[e]/italic(N))),
# col.vect = color.vector,
# pop.vect = population.vector,
# y_lim = c(0,12.0),
# cex_axis = 1.6, cex_lab = 1.8,
# plot.legend = FALSE)
plot.mult.densities.orig(list.prior = list.logmeanNe2ncs,
list.posterior = list.posterior.logmeanNe2ncs,
par.name = expression(italic(N)[e]/italic(N)),
col.vect = color.vector,
pop.vect = population.vector,
y_lim = c(0,12),
cex_axis = 1.6, cex_lab = 1.8,
plot.legend = FALSE,
fromScale = "log")
dev.off()
|
5ad31e9e02b73d7f45a8a569ba1f3ff9b0d08200
|
24851be32893bfb1027b2a33164ef515fc4fb76b
|
/code/plotting/OLD/plotics.r
|
bfbb054f085578f356a2fad7836441a713df07b1
|
[] |
no_license
|
qdread/forestlight
|
acce22a6add7ab4b84957d3e17d739158e79e9ab
|
540b7f0a93e2b7f5cd21d79b8c8874935d3adff0
|
refs/heads/master
| 2022-12-14T03:27:57.914726
| 2022-12-01T23:43:10
| 2022-12-01T23:43:10
| 73,484,133
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,315
|
r
|
plotics.r
|
# Visualize the information criteria
# Edit 15 Aug: use new data and include new file paths
ics <- read.csv('~/google_drive/ForestLight/data/data_forplotting_aug2018/ics_by_fg.csv', stringsAsFactors = FALSE)
# Midsize
#ics <- read.csv('C:/Users/Q/Dropbox/projects/forestlight/ics_by_fg_midsizetrees.csv', stringsAsFactors = FALSE)
library(dplyr)
library(ggplot2)
ics %>%
filter(criterion == 'LOOIC', year == 1995, variable == 'production') %>%
ggplot(aes(x = interaction(dens_model, prod_model), y = ic)) +
geom_point() +
facet_wrap(~ fg, scales='free')
ics %>%
filter(criterion == 'LOOIC', year == 1995, variable == 'density') %>%
ggplot(aes(x = interaction(dens_model, prod_model), y = ic)) +
geom_point() +
facet_wrap(~ fg, scales='free')
ics %>%
filter(criterion == 'WAIC', year == 1995, variable == 'production') %>%
ggplot(aes(x = interaction(dens_model, prod_model), y = ic)) +
geom_point() +
facet_wrap(~ fg, scales='free')
ics %>%
filter(criterion == 'WAIC', year == 1995, variable == 'density') %>%
ggplot(aes(x = interaction(dens_model, prod_model), y = ic)) +
geom_point() +
facet_wrap(~ fg, scales='free')
# Summarize the ics
ic_production <- ics %>%
filter(criterion == 'LOOIC', variable == 'production') %>%
group_by(fg, year, prod_model) %>%
summarize(LOOIC = mean(ic))
ic_density <- ics %>%
filter(criterion == 'LOOIC', variable == 'density') %>%
group_by(fg, year, dens_model) %>%
summarize(LOOIC = mean(ic))
library(reshape2)
ic_production_cast <- dcast(ic_production, fg + year ~ prod_model) %>%
mutate(deltaLOOIC = power - exp)
ic_density_cast <- dcast(ic_density, fg + year ~ dens_model) %>%
mutate(deltaLOOIC = pareto - weibull)
write.csv(ic_production_cast, file = 'C:/Users/Q/google_drive/ForestLight/data/summarytables_12apr2018/LOOIC_production.csv', row.names = FALSE)
write.csv(ic_density_cast, file = 'C:/Users/Q/google_drive/ForestLight/data/summarytables_12apr2018/LOOIC_density.csv', row.names = FALSE)
# Midsize
write.csv(ic_production_cast, file = 'C:/Users/Q/google_drive/ForestLight/data/summarytables_12apr2018/LOOIC_production_midsize.csv', row.names = FALSE)
write.csv(ic_density_cast, file = 'C:/Users/Q/google_drive/ForestLight/data/summarytables_12apr2018/LOOIC_density_midsize.csv', row.names = FALSE)
|
5725f280552beb36a3c0d6dbccbcbaaa9cb51e0c
|
772e23d19ad10ddd4a6bb3ed98354ce0fe7f06fa
|
/plot4.R
|
7e6cd4b7f85494a71bb8078560415c5ab8b8d19e
|
[] |
no_license
|
MarcelaGuevara/ExData_Plotting1
|
40fd17d2d04c73674f69e97a318850a22bc34ea2
|
566fba1a9a8471a5e19fc90b294f2b9e7396f24b
|
refs/heads/master
| 2022-12-11T08:43:04.688862
| 2020-09-10T04:25:40
| 2020-09-10T04:25:40
| 294,159,442
| 0
| 0
| null | 2020-09-09T15:50:39
| 2020-09-09T15:50:38
| null |
UTF-8
|
R
| false
| false
| 1,701
|
r
|
plot4.R
|
library(dplyr)
#Read the data
unzip("./exdata_data_household_power_consumption.zip")
data=read.csv("./household_power_consumption.txt",sep = ";",nrows = 100000)
#Filter the dates we are going to use for the analysis
data=filter(data,Date=="1/2/2007" | Date=="2/2/2007")
#Transform into one column with date and time
data=mutate(data, DateTime=paste(data$Date,data$Time))
#Delete the Date and Time columns
data=select(data,Global_active_power:DateTime)
#Transform the "DateTime" column to Date/Time format
data$DateTime=strptime(data$DateTime, format = "%d/%m/%Y %H:%M:%S")
#Set the number of plots by row and column
par(mfrow=c(2,2),mar=c(2.5,5,1,1))
#Plot 1
plot(data$DateTime,data$Global_active_power, type="n", xlab = "", ylab ="Global Active Power (kilowatts)")
lines(data$DateTime,data$Global_active_power,lwd=1)
#Plot 2
plot(data$DateTime,data$Voltage, type="n", xlab = "datetime", ylab ="Voltage" )
lines(data$DateTime,data$Voltage,lwd=1)
#Plot 3
plot(data$DateTime,data$Sub_metering_1, type="n", xlab = "", ylab = "Energy sub metering")
lines(data$DateTime,data$Sub_metering_1,lwd=1,col="black")
lines(data$DateTime,data$Sub_metering_2,lwd=1, col="red")
lines(data$DateTime,data$Sub_metering_3,lwd=1, col="blue")
legend("topright",legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"), lty=1,lwd=1.5,cex=0.8, pt.cex(2),bty="n")
#Plot 4
plot(data$DateTime,data$Global_reactive_power, type="n", xlab = "datetime", ylab ="Global_reative_power" )
lines(data$DateTime,data$Global_reactive_power,lwd=1)
#Export the Histogram as a png file
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
|
b5ef50e6ac3a4624a876bd306c47d0201422278f
|
66d0e5ad41a55ef019de7c3e370b8de64dd1da44
|
/inst/doc/graph_comparisons_4.R
|
3adcc5ca7a705dab2d324caf295c3ba30779e791
|
[] |
no_license
|
paigemaroni/graph4lg
|
29bce7f42756267c791df74280fe1614b30e7e7b
|
1687c04b05c2125c0217d57dfc29618d4229a0a3
|
refs/heads/master
| 2023-06-02T20:47:22.424437
| 2021-06-22T00:05:48
| 2021-06-22T00:05:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,417
|
r
|
graph_comparisons_4.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
library(graph4lg)
library(igraph)
## ---- echo = FALSE, eval = TRUE-----------------------------------------------
data("data_tuto")
mat_dps <- data_tuto[[1]]
mat_pg <- data_tuto[[2]]
graph_ci <- data_tuto[[3]]
dmc <- data_tuto[[4]]
land_graph <- data_tuto[[5]]
mat_ld <- data_tuto[[6]]
## -----------------------------------------------------------------------------
land_graph <- gen_graph_topo(mat_w = mat_ld,
mat_topo = mat_ld,
topo = "comp")
# Plot the histogram of its link weights
plot_w_hist(graph = land_graph)
## -----------------------------------------------------------------------------
miw_lg <- compute_node_metric(graph = land_graph, metrics = "miw")
head(miw_lg)
## -----------------------------------------------------------------------------
land_graph <- add_nodes_attr(graph = land_graph,
input = "df",
data = miw_lg,
index = "ID")
## ---- eval = FALSE------------------------------------------------------------
# mat_dps <- mat_gen_dist(x = data_simul_genind, dist = "DPS")
## -----------------------------------------------------------------------------
gen_comp_graph <- gen_graph_topo(mat_w = mat_dps,
mat_topo = mat_dps,
topo = "comp")
## -----------------------------------------------------------------------------
plot_w_hist(graph = gen_comp_graph,
fill = "darkblue")
## -----------------------------------------------------------------------------
miw_comp <- compute_node_metric(graph = gen_comp_graph, metrics = "miw")
gen_comp_graph <- add_nodes_attr(graph = gen_comp_graph,
input = "df",
data = miw_comp,
index = "ID")
## -----------------------------------------------------------------------------
graph_node_compar(x = land_graph, y = gen_comp_graph,
metrics = c("miw", "miw"), method = "spearman",
weight = TRUE, test = TRUE)
## -----------------------------------------------------------------------------
mat_geo <- mat_geo_dist(data = pts_pop_simul,
ID = "ID", x = "x", y = "y")
mat_geo <- reorder_mat(mat_geo, order = row.names(mat_dps))
gen_gab_graph <- gen_graph_topo(mat_w = mat_dps,
mat_topo = mat_geo,
topo = "gabriel")
# Associate the values of miw from the complete graph to this graph
gen_gab_graph <- add_nodes_attr(gen_gab_graph,
data = miw_comp,
index = "ID")
# Plot the graph with node sizes proportional to MIW
plot_graph_lg(graph = gen_gab_graph,
crds = pts_pop_simul,
mode = "spatial",
node_size = "miw",
link_width = "inv_w")
## -----------------------------------------------------------------------------
land_graph_thr <- gen_graph_thr(mat_w = mat_ld, mat_thr = mat_ld,
thr = 2000, mode = "larger")
plot_graph_lg(land_graph_thr,
mode = "spatial",
crds = pts_pop_simul,
link_width = "inv_w",
pts_col = "#80C342")
## -----------------------------------------------------------------------------
graph_topo_compar(obs_graph = land_graph_thr,
pred_graph = gen_gab_graph,
mode = "mcc",
directed = FALSE)
## -----------------------------------------------------------------------------
graph_plot_compar(x = land_graph_thr, y = gen_gab_graph,
crds = pts_pop_simul)
## -----------------------------------------------------------------------------
graph_modul_compar(x = land_graph_thr,
y = gen_gab_graph)
## -----------------------------------------------------------------------------
module_land <- compute_graph_modul(graph = land_graph_thr,
algo = "fast_greedy",
node_inter = "distance")
land_graph_thr <- add_nodes_attr(graph = land_graph_thr,
data = module_land,
index = "ID")
module_gen <- compute_graph_modul(graph = gen_gab_graph,
algo = "fast_greedy",
node_inter = "distance")
gen_gab_graph <- add_nodes_attr(graph = gen_gab_graph,
data = module_gen,
index = "ID")
## -----------------------------------------------------------------------------
plot_graph_lg(graph = land_graph_thr,
mode = "spatial",
crds = pts_pop_simul,
module = "module")
## -----------------------------------------------------------------------------
plot_graph_lg(graph = gen_gab_graph,
mode = "spatial",
crds = pts_pop_simul,
module = "module")
|
d6caacc32c3e75919a9f27c2793ffb74b4eb0c31
|
761b4aaa1cd79d422ae960028578f180637d7a09
|
/R/sv.R
|
25f83ed4ac2c6e14ecc3c267b82c48a333594547
|
[] |
no_license
|
cran/dhglm
|
279ee39c3be14c587aabb9a7b7bc3859d94f445e
|
a83ad49e7074126dded7395a6fb613cc0c441a02
|
refs/heads/master
| 2020-06-04T04:34:41.668052
| 2018-10-25T07:30:03
| 2018-10-25T07:30:03
| 17,695,491
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 685
|
r
|
sv.R
|
sv <-
function(RespDist="gaussian",BinomialDen=NULL, DataMain, MeanModel,DispersionModel,
PhiFix=NULL,LamFix=NULL,mord=0,dord=1,REML=TRUE,Maxiter=200,convergence=1e-02,Iter_mean=3) {
n<-nrow(DataMain)
phi<-matrix(1,n,1)
lambda<-matrix(1,n,1)
tau<-matrix(1,n,1)
date<-matrix(c(1:n),n,1)
DataMain<-data.frame(cbind(DataMain,phi,lambda,tau,date))
res<-dhglmfit_run_sv(RespDist=RespDist,BinomialDen=BinomialDen, DataMain=DataMain, MeanModel=MeanModel,
DispersionModel=DispersionModel,PhiFix=PhiFix,LamFix=LamFix,mord=mord,dord=dord,REML=REML,
Maxiter=Maxiter,convergence=convergence,Iter_mean=Iter_mean)
return(res)
}
|
0c95769c6747b987e33b1151010427e1a65988ec
|
15b2666efdeade833221c1b2ae1f2c97db7ed010
|
/MegaCorr_GenerateImputedValues_ADRC_kNN.R
|
5efacaaf036a5bf9c07289bdc83a8f35ec015e3e
|
[] |
no_license
|
jwisch/BiomarkerClustering
|
a819b5ff8bbe3844ed56a9fcdcfeb9303ae47430
|
4b56efd9bcea4f595d36040f6dd19fe585457325
|
refs/heads/master
| 2020-12-21T21:23:40.870520
| 2020-01-27T18:54:57
| 2020-01-27T18:54:57
| 236,566,500
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,735
|
r
|
MegaCorr_GenerateImputedValues_ADRC_kNN.R
|
library(corrplot)
library(mice)
library(UpSetR)
library(tableone)
FILEPATH_DATA<-"C:/Users/julie.wisch/Documents/MegaCorr/"
df<-read.csv(paste(FILEPATH_DATA, "ADRC_cleaned.csv", sep = ""))
df$NFL<-log(df$NFL)
#####################################################################
#Visualizing - checking for correlations between all the measures
M<-df[0, 18:35]
colnames(M)[10:11]<-c("AV45", "PIB")
p<-M
for(i in 18:35){
for(j in 18:35){
M[i-17, j-17]<- cor.test(df[,i], df[,j], na.rm = TRUE, method = "spearman")$estimate
p[i-17, j-17]<- cor.test(df[,i], df[,j], na.rm = TRUE, method = "spearman")$p.value
}
}
row.names(M)<-names(M)
#This creates a correlation plot for ONLY the real values that we have. Skips missing values. Uses spearman.
corrplot(as.matrix(M), order = "hclust", p.mat = as.matrix(p), insig = "blank", type = "upper")
rm(M, p, i, j)
#####################################################################
#####################################################################
#Visualizing what we've got....
VENN<-df[,c(18:35)]
VENN[!is.na(VENN)] <- 1
VENN[is.na(VENN)] <- 0
VENN<-data.frame(cbind(df$ID, VENN))
colnames(VENN)[1]<-"ID"
upset(VENN, nsets = 18, mainbar.y.label = "Number of Participants", sets.x.label = "Measurement Counts", order.by = "freq")
df$apoe4<-ifelse(df$apoe == "22" | df$apoe == "23" | df$apoe == "33", 0, 1)
CreateTableOne(vars = c("AgeatLP", "GENDER", "EDUC", "apoe4", "race2"),
data = df, factorVars = c("GENDER", "apoe4", "race2"))
rm(VENN)
#####################################################################
library(bnstruct)
md.pairs(df[,c(8, 18:35)])$rr
#Getting the proportion of usable cases for predictions
p <- md.pairs(df[,c(8, 18:35)])
round(p$mr/(p$mr + p$mm), 3)
df.imputed<-df[,18:35]
library(DMwR)
knnOutput <- knnImputation(df.imputed[,c(1:11, 14:18)]) # perform knn imputation.
df.imputed<-data.frame(cbind(knnOutput[,1:11], df.imputed[,12:13], knnOutput[,12:16]))
df.imputed.knn <- knnImputation(df.imputed)
df.imputed.knn<-data.frame(cbind(df[,1:17], df.imputed.knn, df[,36]))
#Now creating a corrplot for the imputed values
#Visualizing - checking for correlations between all the measures
M<-df.imputed.knn[0, 18:35]
colnames(M)[10:11]<-c("AV45", "PIB")
p<-M
for(i in 18:35){
for(j in 18:35){
M[i-17, j-17]<- cor.test(df.imputed.knn[,i], df.imputed.knn[,j], na.rm = TRUE, method = "spearman")$estimate
p[i-17, j-17]<- cor.test(df.imputed.knn[,i], df.imputed.knn[,j], na.rm = TRUE, method = "spearman")$p.value
}
}
row.names(M)<-names(M)
#This creates a correlation plot for ONLY the real values that we have. Skips missing values. Uses spearman.
corrplot(as.matrix(M), order = "hclust", p.mat = as.matrix(p), insig = "blank", type = "upper")
#GREAT news. looks pretty much the same, except that some of the correlations are stronger
rm(M, p, i, j)
df.imputed.scaled<-df.imputed.knn
scale2 <- function(x, na.rm = FALSE) (x - mean(x, na.rm = na.rm)) / sd(x, na.rm)
df.imputed.scaled<-df.imputed.scaled %>% mutate_at(names(df.imputed.scaled[c(18:29, 33)]), scale2)
write.csv(df.imputed.scaled, paste(FILEPATH_DATA, "ImputedValues_ADRC_knn.csv", sep = ""), row.names = FALSE)
##########################################################################################################
##########################################################################################################
##########################################################################################################
##########################################################################################################
|
f9ab1e520b4ab8b33e692418db8aec6229047a10
|
eadbf21c897318c61d7d8a0f74ba5a6c90a49ce7
|
/man/padova.Rd
|
1f580b5d470ce593b60849f24357aa7b67084087
|
[] |
no_license
|
ycroissant/descstat
|
9b6c89f12fe5d6993ea1c36b5e763370582feea1
|
99d2bf54c959fe8d7b11a1971eec2072f3681a61
|
refs/heads/master
| 2023-03-21T03:29:40.542496
| 2021-03-17T05:28:05
| 2021-03-17T05:28:05
| 316,519,197
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,130
|
rd
|
padova.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{padova}
\alias{padova}
\title{Housing prices in Padova}
\format{
a tibble containing
\itemize{
\item zone : one of the 12 zones of Padova,
\item condition : \code{new} for new housings, \code{ordinary} or \code{good} for old ones,
\item house : dummy for houses,
\item floor : floor,
\item rooms : number of rooms,
\item bathrooms : number of bathrooms,
\item parking : dummy for parkings,
\item energy : energy cathegory for the house (A for the best, G for the worst),
\item area : area of the house in square meters,
\item price : price of the house in thousands of euros.
}
}
\source{
\href{https://www.sciencedirect.com/science/article/pii/S2352340915003224}{Data in Brief}'s website.
}
\description{
This data set documents characteristics (including the prices) of a
sample of housings in Padova.
}
\references{
Bonifaci P, Copiello S
(2015). "Real estate market and building energy performance: Data for a mass appraisal approach."
\emph{Data in Brief}, \emph{5}, 1060-1065. ISSN 2352-3409.
}
\keyword{datasets}
|
df92115bd4bcf52ec2fc124af20a56cb13ddb766
|
a31b1b02270fee9b3291327ac6672efb209c12b5
|
/R/regression_formula.R
|
6349aec2e5400cad7da7cf323134b5250158460a
|
[] |
no_license
|
sukhyun23/tpa
|
e1acabfafee45b701f88da10a3b204baf10259fe
|
db767a8358bb9bc43f4a29ba9fe9a380e545afd6
|
refs/heads/master
| 2021-07-03T18:28:39.391557
| 2020-08-15T05:32:25
| 2020-08-15T05:32:25
| 142,148,911
| 0
| 1
| null | 2018-08-18T12:13:47
| 2018-07-24T11:27:44
|
R
|
UTF-8
|
R
| false
| false
| 1,494
|
r
|
regression_formula.R
|
linear_formula <- function(x) {
xf <- paste(x, collapse = ' + ')
return(xf)
}
poly_formula <- function(x, poly = 2) {
xp <- sapply(2:poly, function(p) paste('I(', x, '^', p, ')', sep = ''))
xp <- paste(xp, collapse = ' + ')
return(xp)
}
inter_formula <- function(x) {
df <- expand.grid(x1 = x, x2 = x, stringsAsFactors = F)
df <- df[df$x1 != df$x2, ]
xi <- apply(df, 1, function(x) paste(x, collapse = ':'))
xi <- paste(xi, collapse = ' + ')
return(xi)
}
full_formula <- function(x, y, poly = 1, inter = F) {
xf <- linear_formula(x)
if (poly > 1) {
xp <- poly_formula(x, 2)
} else {
xp <- NULL
}
if (inter) {
xi <- inter_formula(x)
} else {
xi <- null
}
x_formula <- c(xf, xp, xi)
x_formula <- paste(x_formula, collapse = ' + ')
result <- as.formula(paste(y, '~', x_formula))
return(result)
}
revision_formula <- function(x, y) {
is.poly <- function(x) {
grepl('^I([[:alpha:][:digit:][:punct:]]{1,})$', x)
}
is.inter <- function(x) {
grepl('[[:alpha:][:digit:][:punct:]]{1,}\\:[[:alpha:][:digit:][:punct:]]{1,}', x)
}
xp_idx <- is.poly(x)
xi_idx <- is.inter(x)
xf_idx <- !xp_idx & !xi_idx
xf <- x[xf_idx]
xp <- x[xp_idx]
xi <- x[xi_idx]
xp_f <- stringr::str_sub(xp, 3, -4)
xi_f <- unique(unlist(strsplit(xi, ':')))
xf <- unique(c(xf, xp_f, xi_f))
x_formula <- linear_formula(c(xf, xp, xi))
result <- as.formula(paste(y, '~', x_formula))
return(result)
}
|
dd6c9a5a5131c2b97af1869d76f4953770ce90b0
|
f57bcbec1f356c30a279d9930d4fce8752b79274
|
/hw4/9.R
|
5eabb0534d2b6e571cb5c71d515f6ddb3f55f33a
|
[] |
no_license
|
Roger7410/R_Data_Mining
|
6867a5da25b266e77c44163476e9f9a9623f46f7
|
7145780a777d53fb256543a5e49117ed0db301a3
|
refs/heads/master
| 2020-09-20T21:59:59.552058
| 2016-08-22T11:53:30
| 2016-08-22T11:53:30
| 66,268,422
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 428
|
r
|
9.R
|
Auto=read.csv("Auto.csv",header=T,na.strings ="?")
fix(Auto)
dim(Auto)
Auto=na.omit(Auto)
attach(Auto)
range(Auto$mpg)
sapply(Auto[, 1:8], range)
sapply(Auto[, 1:8], mean)
sapply(Auto[, 1:8], sd)
Auto2<-Auto[-(10:85),]
Auto2
head(Auto2,10)
dim(Auto2)
sapply(Auto2[, 1:8], range)
sapply(Auto2[, 1:8], mean)
sapply(Auto2[, 1:8], sd)
head(Auto)
pairs(Auto)
plot(Auto$mpg,Auto$cylinders)
plot(Auto$mpg, Auto$weight)
pairs(Auto)
|
d57ddc6038df843c81903518dd052b59161afa25
|
9549a7b626118518dab96d3c295fd755ccecfb25
|
/R/mechBiogeo.R
|
e7c76d7509746f0038ce6a50fd001493c798485d
|
[] |
no_license
|
KevCaz/figTalks
|
9848e64f1a808060f6502e7f3ed349e6b96635e3
|
b7802263c727fbb3e4429a7077193498acc797ca
|
refs/heads/master
| 2021-08-29T16:04:31.926426
| 2017-12-14T08:15:26
| 2017-12-14T08:15:26
| 113,661,195
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,154
|
r
|
mechBiogeo.R
|
##---
myspec <- function(nb, cx, cy, ry, seed = 1987, ...){
set.seed(seed)
theta <- runif(nb)*2*pi
rayon <- runif(nb)*ry
points(cx + rayon*cos(theta), cy + rayon*sin(theta), ...)
}
##---
"#3fb3b2";
"#ffdd55";
"#c7254e";
"#1b95e0";
"#8555b4";
"#8ddd75";
"#787878";
"#CCCCCC";
##---
figAll <- function(filename="img/figall", part=1, wi=8.5, hg=5, colg="#CCCCCC",
col1="#ffdd55", col2="#8ddd75", col3="#c7254e", col4="#1b95e0"){
filename <- paste0(filename, part, ".png")
png(file = filename, res = 300, width = wi, height = hg, unit = "in")
par(
cex = 1.5,
cex.lab = 4,
cex.axis = 4,
las = 1,
lwd = 2,
bg = "transparent",
fg = colg,
col.axis = colg,
family = "arya",
mar = c(1.5,0.5,1,0),
mgp = c(2.2,1,0)
)
layout(rbind(1,2,3,c(4,5)), heights=c(.2, .06, 1, .24), widths=c(.2,1))
par(mar=c(0.2,0.1,0.1,0.1))
##
plot0(c(0,10), c(0,.5))
seqx <- seq(0, 10, .1)
if (part<4) text(5,.25, labels="abiotic gradient", cex=3, col=colg)
# if (part>3 & part<8) lines(seqx, dnorm(seqx, 2, .78), col=col1, lwd=1.8)
# if (part>4 & part<8) {
# lines(seqx, dnorm(seqx, 5.4, .78), col=col2, lwd=1.8)
# lines(seqx, dnorm(seqx, 8.2, .78), col=col4, lwd=1.8)
# }
# if (part>5) lines(seqx, dnorm(seqx, 5, 1.1), col=col3, lwd=1.8)
if (part>8){
lines(seqx, dnorm(seqx, 7.8, .92), col=col2, lwd=1.8)
lines(seqx, dnorm(seqx, 2, .88), col=col1, lwd=1.8)
lines(seqx, dnorm(seqx, 5.2, .78), col=col3, lwd=1.8)
}
par(mar=c(0.1, 2, 0.1, 2))
image(matrix(1:100, ncol=1), col=colorRampPalette(c('#CCCCCC', '#3fb3b2', '#545454'))(512), axes=FALSE, ann=FALSE)
##
par(mar=c(0.2,0.1,0.1,0.1), yaxs="i")
plot0(c(0,10), c(0,5))
if (part>3) myspec(2, 1, 1, .8, pch=19, cex=1.2, col=col1, lwd=.8)
if (part==1) circle(x = 1, y = 1, radi =.9, lwd=1)
if (part>3){
myspec(60, 1, 1, .8, pch=19, cex=1.2, col=col1, lwd=.8)
if (part>2){
myspec(60, 2, 4, .8, seed=123, pch=19, cex=1.2, col=col1, lwd=.8)
myspec(60, 3, 2, .8, seed=455, pch=19, cex=1.2, col=col1, lwd=.8)
}
}
if (part>4 & part<8){
myspec(60, 5, 3.8, 1.2, seed=901, pch=19, cex=1.2, col=col2, lwd=.8)
myspec(40, 4.5, 1, .6, seed = 22, pch=19, cex=1.2, col=col2, lwd=.8)
myspec(45, 6.8, 1.6, 1, pch=19, cex=1.2, col=col2, lwd=.8)
#
myspec(60, 7.5, 4, .8, pch=19, cex=1.2, col=col4, lwd=.8)
myspec(80, 9, 1.6, 1.4, seed = 266, pch=19, cex=1.2, col=col4, lwd=.8)
}
if (part>7){
myspec(60, 5, 3.8, 1.2, pch=19, cex=1.2, col=col2, lwd=.8)
myspec(20, 5, 3.8, 1.2, pch=19, cex=1.2, col=col1, lwd=.8)
myspec(30, 4.5, 1, .6, pch=19, cex=1.2, col=col1, lwd=.8)
myspec(45, 6.8, 1.6, 1, pch=19, cex=1.2, col=col2, lwd=.8)
#
myspec(60, 7.5, 4, .8, pch=19, cex=1.2, col=col2, lwd=.8)
myspec(80, 9, 1.6, 1.4, pch=19, cex=1.2, col=col2, lwd=.8)
}
if (part>5){
myspec(30, 5, 3.8, 1.2, pch=19, cex=1.2, col=col3, lwd=.8)
myspec(20, 7.5, 4, .8, pch=19, cex=1.2, col=col3, lwd=.8)
myspec(15, 3, 2, .8, pch=19, cex=1.2, col=col3, lwd=.8)
}
if (part>5){
par(mar=c(1,0,0,0))
plot0(c(0,10),c(0,10))
lines(c(2, 5), c(1.5, 8.5), lwd=2)
lines(c(5, 5), c(1.5, 8.5), lwd=2)
if (part<8) lines(c(8, 5), c(1.5, 8.5), lwd=2)
points(c(2,5,5),c(1.5,1.5,8.5), pch=19, col=c(col1, col2, col3), cex=3.4)
if (part<8) points(8, 1.5, pch=19, col=col4, cex=3.4)
}
if (part>6){
par(mar=c(1,2,0,1), lend=2)
plot0(c(0,1,0,1))
cold <- col4
##
if (part>7) {
xe <- .9
xe2 <- .65
cold <- NA
} else xe <- xe2 <- .65
lwhc <- 1.8
lines(c(.1,xe), c(.1,.1), lwd=lwhc)
lines(c(.1,.4), c(.65,.65), lwd=lwhc)
lines(c(.4,xe), c(.5,.5), lwd=lwhc)
lines(c(.4,xe2), c(.9,.9), lwd=lwhc)
lines(c(.1,.1), c(.1,.65), lwd=lwhc)
lines(c(.4,.4), c(.5,.9), lwd=lwhc)
lines(c(0,.1), c(.375,.375), lwd=lwhc)
points(c(xe,xe,xe2), c(.1, .5, .9), col=c(col1, col2, cold), pch=19, cex=2.2)
}
dev.off()
}
figAll(part = 1)
figAll(part = 2)
figAll(part = 3)
figAll(part = 4)
figAll(part = 5)
figAll(part = 6)
figAll(part = 7)
figAll(part = 8)
figAll(part = 9)
|
7a78623f156e505e4b6141aed1a0e4c5bc6ace32
|
6cbafc6d5ba417d0a16f56f309889ab7e3725dda
|
/simple_linear_regression.R
|
a5888fd19861c989dfbfe0d7349573db4f8663a0
|
[] |
no_license
|
johnnylazoq/Artificial-Intelligence
|
bac7c95bbe2c46909234da280194d1e237be6c5b
|
73d69f56d220bf32e573d0c52b4f5ee6510afa86
|
refs/heads/master
| 2021-09-06T06:34:37.796140
| 2018-02-03T08:18:45
| 2018-02-03T08:18:45
| 120,072,518
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,822
|
r
|
simple_linear_regression.R
|
##
#Machine Learning algorithm - Simple Linear regression
##------------------------------------------------------------------------------------
# The dataset contains a linear dependency in years of experience vs salary.
# This ML model predicts salaries for comming years.
#install.packages('caTools')
library(caTools)
#Importing the data set
# setwd("Salary_Experience_Data.csv")
dataset = read.csv('Salary_Experience_Data.csv')
# Splitting the dataset into the Training set and Test set
set.seed(123)
split = sample.split(dataset$Salary, SplitRatio = 2/3)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split== FALSE)
# Fitting Simple Linerar Regression to the Training set (training the algorithm)
regressor = lm(formula = Salary ~ YearsExperience, data = training_set)
# Predicting the Test set results
y_prediction = predict(regressor, newdata = test_set)
#install.packages('ggplot2')
library(ggplot2)
# plot of the training set
ggplot() +
geom_point(aes(x = training_set$YearsExperience, y= training_set$Salary),
colour = 'red')+
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue')+
ggtitle('Salary vs Experience (Training set)')+
xlab('Years of experience')+
ylab('Salary')
# plot of the test set
ggplot() +
geom_point(aes(x = test_set$YearsExperience, y= test_set$Salary),
colour = 'red')+
geom_line(aes(x = training_set$YearsExperience, y = predict(regressor, newdata = training_set)),
colour = 'blue')+
ggtitle('Salary vs Experience (Test set)')+
xlab('Years of experience')+
ylab('Salary')
#Blue line is the predicting line while red dots are data points from the dataset.
|
bd85507c110fe4d1eb35256e28f8dcded43a9d69
|
b8024e65e4f0fd08085697b6de672a3e75da26f2
|
/Stockreturns.R
|
8a6f6f47168d7913287e93ab8912b2874ce38807
|
[] |
no_license
|
Rajesh16702/GLIM-Advance-Stat-Assignment
|
0fd4e8bee8cb78173958ae79b5d5cb61a8fba41c
|
fd2d5f0a6a1b4560e3d4a20b54672fe7afc9b6a1
|
refs/heads/master
| 2021-04-06T18:14:42.026542
| 2018-03-14T16:34:34
| 2018-03-14T16:34:34
| 125,240,871
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,976
|
r
|
Stockreturns.R
|
###Step1: Include the following library packages:
library(lawstat)
library(psych)
library(car)
library(MASS)
library(Rcmdr)
library(ggplot2)
library(graphics)
#Step 2: Load the data file into R
setwd("F:/Sridhar/Data Sets/t/Two-way ANOVA")
df<-read.table("sensexr.txt",header=TRUE)
#Step 3: Clearly identify the factors in the data
df$incdec1<-factor(df$incdec, labels=c("decrease","increase"))
df$frimon1<-factor(df$frimon,labels=c("Monday", "Friday"))
#Step 4: Descriptive summary of the data and plots:
table(df$incdec1,df$frimon1)
aggregate(df[, 3], list(df$incdec1,df$frimon1), mean)
with(df,interaction.plot(incdec1,frimon1,return, fun=mean, type="b",legend = TRUE, xlab="Decrease or Increase", ylab="Monday or Friday", main="Interaction Plot"))
par(mfrow=c(1,2))
plot(return ~ incdec1 + frimon1, data=df)
##Step 5:
####Assumption 1 Testing: Levene's Test for equal variances
leveneTest(df$return, df$incdec1)
leveneTest(df$return, df$frimon1)
leveneTest(df$return, interaction(df$incdec1, df$frimon1)) ###(not required generally)
###Test of Normality
#Check the histograms
increase<-subset(df$return,df$incdec1=="increase")
decrease<-subset(df$return,df$incdec1=="decrease")
friday<-subset(df$return,df$frimon1=="Friday")
monday<-subset(df$return,df$frimon1=="Monday")
hist(increase)
hist(decrease)
hist(friday)
hist(monday)
str(df)
#Shapiro-Wilk normality tests by increase decrease
cat("Normality p-values by Factor incdec1: ")
for (i in unique(factor(df$incdec1))){
cat(shapiro.test(df[df$incdec1==i, ]$return)$p.value," ")
}
cat("Normality p-values by Factor frimon1: ")
#Shapiro-Wilk normality tests by friday monday
for (i in unique(factor(df$frimon1))){
cat(shapiro.test(df[df$frimon1==i, ]$return)$p.value," ")
}
###### Step 6: ANOVA Test: It is a two-way ANOVA test:
fit<-lm(return~incdec1+frimon1+incdec1*frimon1,data=df)
anova(fit)
|
feeea1095ac5f6b382af9462e3b57994c0d57901
|
76487c270ad5ec7b1b6a1e7ec6f49a6d293a04d1
|
/man/bmeasures-package.Rd
|
3fcfdd59f808c16173e03c6347cddbc20a170aa6
|
[] |
no_license
|
shwijaya/bmeasures
|
20d57754ec8f08420eed4da1840c351be80013aa
|
b7e8aaf47a7078f4517c16095bfb9e2ed0d75946
|
refs/heads/master
| 2021-01-18T14:22:34.612179
| 2016-12-07T05:46:24
| 2016-12-07T05:46:24
| 26,023,167
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,809
|
rd
|
bmeasures-package.Rd
|
\name{bmeasures-package}
\alias{bmeasures-package}
\docType{package}
\title{
Binary similarity/dissimilarity measures
}
\description{
This package generates the quantities of the OTUs table, calculates the binary similarity/dissimilarity measures between two vectors, and finding the most suitable binary similarity/dissimilarity equations using ROC analysis.
}
\details{
\tabular{ll}{
Package: \tab bmeasures\cr
Type: \tab Package\cr
Version: \tab 1.1\cr
Date: \tab 2016-10-13\cr
License: \tab GPL-2\cr
}
\bold{Functions:}\cr
\tabular{ll}{
\code{bmeasures(x, y, method)} \tab Calculates the binary similarity/dissimilarity coefficient between two vectors. \cr
\code{bmeasures_otu(x, y)} \tab Generates the quantities of the Operational Taxonomic Units table. \cr
\code{bmeasures_find(inFile, setSeed=0, numSample=20)} \tab Finding a suitable binary similarity and dissimilarity measures.
}
}
\author{
Sony H. Wijaya, Farit M. Afendi, Irmanida Batubara, Latifah K. Darusman, Md. Altaf-Ul-Amin, Shigehiko Kanaya\cr
Maintainer: Sony H. Wijaya <\email{sonyhartono@gmail.com}>
}
\references{
[1]. Avcibaş I, Kharrazi M, Memon N, Sankur B: Image steganalysis with binary similarity measures. EURASIP J Appl Signal Processing 2005, 17:2749–2757.\cr
[2]. Baroni-urbani C, Buser MW: Similarity of binary data. Syst Biol 1976, 25:251–259.\cr
[3]. Batagelj V, Bren M: Comparing resemblance measures. J Classif 1995, 12:73–90.\cr
[4]. Boyce RL, Ellison PC: Choosing the best similarity index when performing fuzzy set ordination on binary data. J Veg Sci 2001, 12:711–720.\cr
[5]. Cha S-H, Tappert CC, Yoon S: Enhancing Binary Feature Vector Similarity Measures. 2005.\cr
[6]. Cha S, Choi S, Tappert C: Anomaly between Jaccard and Tanimoto coefficients. In Proceedings of Student-Faculty Research Day, CSIS, Pace University; 2009:1–8.\cr
[7]. Chang J, Chen R, Tsai S: Distance-preserving mappings from binary vectors to permutations. IEEE Trans Inf Theory 2003, 49:1054–1059.\cr
[8]. Cheetham AH, Hazel JE, Journal S, Sep N: Binary (presence-absence) similarity coefficients. J Paleontol 1969, 43:1130–1136.\cr
[9]. Choi S-S, Cha S-H, Tappert CC: A survey of binary similarity and distance measures. J Syst Cybern Informatics 2010, 8:43–48.\cr
[10]. Consonni V, Todeschini R: New similarity coefficients for binary data. Match-Communications Math Comput Chem 2012, 68:581–592.\cr
[11]. da Silva Meyer A, Garcia AAF, Pereira de Souza A, Lopes de Souza C: Comparison of similarity coefficients used for cluster analysis with dominant markers in maize (Zea mays L). Genet Mol Biol 2004, 27:83–91.\cr
[12]. Dalirsefat SB, da Silva Meyer A, Mirhoseini SZ: Comparison of similarity coefficients used for cluster analysis with amplified fragment length polymorphism markers in the silkworm, Bombyx mori. J Insect Sci 2009, 9:1–8.\cr
[13]. Dice LR.: Measures of the amount of ecologic association between species. Ecology 1945, 26:297–302.\cr
[14]. Faith DP: Asymmetric binary similarity measures. Oecologia 1983, 57:287–290.\cr
[15]. Gower JC, Legendre P: Metric and Euclidean properties of dissimilarity coefficients. J Classif 1986, 3:5–48.\cr
[16]. Holliday JD, Hu C-Y, Willett P: Grouping of coefficients for the calculation of inter-molecular similarity and dissimilarity using 2D fragment bit-strings. Comb Chem High Throughput Screen 2002, 5:155–166.\cr
[17]. Hubalek Z: Coefficients of association and similarity, based on binary (presence-absence) data: An evaluation. Biol Rev 1982, 57:669–689.\cr
[18]. Jaccard P: The distribution of the flora in the alpine zone. New Phytol 1912, 11:37–50.\cr
[19]. Jackson DA, Somers KM, Harvey HH: Similarity coefficients: Measures of co-occurrence and association or simply measures of occurrence? Am Nat 1989, 133:436–453.\cr
[20]. Johnson SC: Hierarchical clustering schemes. Psychometrika 1967, 32:241–254.\cr
[21]. Lance GN, Williams WT: Computer Programs for Hierarchical Polythetic Classification (``Similarity Analyses’’). Comput J 1966, 9:60–64.\cr
[22]. Lourenco F, Lobo V, Bacao F: Binary-Based Similarity Measures for Categorical Data and Their Application in Self-Organizing Maps. 2004.\cr
[23]. Michael EL: Marine ecology and the coefficient of association: A plea in behalf of quantitative biology. J Ecol 1920, 8:54–59.\cr
[24]. Nei M, Li W-H: Mathematical model for studying genetic variation in terms of restriction endonucleases. Proc Natl Acad Sci U S A 1979, 76:5269–5273.\cr
[25]. Ojurongbe TA: Comparison of different proximity measures and classification methods for binary data. Justus Liebig University Gießen; 2012.\cr
[26]. Stiles HE: The association factor in information retrieval. J ACM 1961, 8(2):271–279.\cr
[27]. Todeschini R, Consonni V, Xiang H, Holliday J, Buscema M, Willett P: Similarity coefficients for binary chemoinformatics data: Overview and extended comparison using simulated and real data sets. J Chem Inf Model 2012, 52:2884–2901.\cr
[28]. Warrens MJ: Similarity coefficients for binary data: properties of coefficients, coefficient matrices, multi-way metrics and multivariate coefficients. Leiden University; 2008.\cr
[29]. Zhang B, Srihari SN: Binary vector dissimilarity measures for handwriting identification. In Proceedings of SPIE-IS&T Electronic Imaging Vol. 5010; 2003:28–38.\cr
[30]. Zhang B, Srihari SN: Properties of binary vector dissimilarity measures. In Proc. JCIS Int’l Conf. Computer Vision, Pattern Recognition, and Image Processing; 2003:1–4.\cr
}
%%~~ Optionally other standard keywords, one per line, from file KEYWORDS in the R documentation directory ~~
\keyword{ package }
%% \seealso{
%% ~~ Optional links to other man pages, e.g. ~~
%% ~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
%% }
%% \examples{
%%
%% }
|
fe270de0154c9087194586096cd263b8fe890eaa
|
2818ff9dee771dce4d21e8ae1a8addff66165b98
|
/airquality arturo.R
|
5b40864bdc27b78d91ad55dc1b17ea2854f6b0e5
|
[] |
no_license
|
LordRickard/Programacion_Actuarial_III_OT16
|
92e008a905e4f954b78ea5792c63290901c3f5f2
|
e04a91bff9af72eae5035331cf01de6176e2d676
|
refs/heads/master
| 2020-12-01T18:42:38.580125
| 2016-10-19T03:46:27
| 2016-10-19T03:46:27
| 66,847,291
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 38
|
r
|
airquality arturo.R
|
dput(airquality, file="airquality.R")
|
51bfad96866d69b42968c7b7d1efb2ccebd3722e
|
ebc487eed0ccae8a2db104261627a802988e2406
|
/man/make_filename.Rd
|
660d2ad1d9f48dd23099e07d530d2634023a617e
|
[] |
no_license
|
RussellPolitzky/fars
|
2292c40ccae32c4274f80de033514c3272be1787
|
84e55f4725028c974888fedd1a3b9e71936c58f3
|
refs/heads/master
| 2021-01-20T05:43:59.340767
| 2017-05-07T12:23:36
| 2017-05-07T12:23:36
| 89,803,584
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 802
|
rd
|
make_filename.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_filename.R
\name{make_filename}
\alias{make_filename}
\title{Build an accident data file name}
\usage{
make_filename(year)
}
\arguments{
\item{year}{Accident data year expressed as a four digit number e.g. 2012.
This parameter may also be any type coercible to a suitable integer,
such as the character string "2012", for example.}
}
\value{
A character string of the form "accident_xxxx.csv.bz2", where
xxxx is the accident data year expressed as a four digit number.
}
\description{
Given a four digit \code{year}, this function builds an accident data file name
in the "accident_xxxx.csv.bz2" format, where "xxxx" is the supplied \code{year}.
}
\examples{
\dontrun{
make_filename(2012)
make_filename("2012")
}
}
|
d9b7f41b52950caf67c34f0d75970f8d466268ec
|
242737293b846c619d2aef6762d88c42bf6c8553
|
/R/simulateCC.R
|
7e853743c316c5f0a1396706f9cf6bb17f899519
|
[] |
no_license
|
dtharvey/eChem
|
d95006456b06d8ce142b1e1fc683a9935b7f0a34
|
2811d48c1d47d591214c55ec1f1cb05aa81ac409
|
refs/heads/master
| 2020-03-21T09:30:26.450840
| 2019-07-06T12:23:44
| 2019-07-06T12:23:44
| 138,403,219
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,422
|
r
|
simulateCC.R
|
#' Simulate a Chronocoulometry Experiment
#'
#' Simulates either a single pulse or a double pulse
#' chroncoulometry experiment as either an E, EC, or CE
#' mechanism, where E is a redox reaction and where C is a
#' chemical reaction that either precedes or follows the redox
#' reaction. The function operates on an object created using
#' \code{caSim}, which simulates the corresponding
#' chronoamperometry experiment, integrating current over time
#' using the trapezoidal integration rule.
#'
#' @param filename The filename that contains the results of a chronampeometry simulation created using the \code{caSim} function.
#'
#' @return Returns a list with the following components \item{expt}{type of experiment; defaults to CC for a chronocoulometry simulation} \item{mechanism}{type of mechanism used for the simulation} \item{file_type}{value that indicates whether the output includes all data (full) or a subset of data (reduced); defaults to full for \code{ccSim}} \item{charge}{vector giving the charge as a function of time} \item{potential}{vector giving the potential as a function of time} \item{time}{vector giving the times used for the diffusion grids} \item{distance}{vector giving the distances from electrode surface used for the diffusion grids} \item{oxdata}{diffusion grid, as a matrix, giving the concentration of Ox} \item{reddata}{diffusion grid, as a matrix, giving the concentrations of Red} \item{chemdata}{diffusion grid, as a matrix, giving the concentrations of Z} \item{formalE}{formal potential for the redox reaction} \item{initialE}{initial potential} \item{pulseE}{potential after apply the initial pulse} \item{electrons}{number of electrons, n, in the redox reaction} \item{ko}{standard heterogeneous electron transfer rate constant} \item{kcf}{homogeneous first-order rate constant for forward chemical reaction} \item{kcr}{homogeneous first-order rate constant for reverse chemical reaction} \item{alpha}{transfer coefficient} \item{diffcoef}{diffusion coefficient for Ox and Red} \item{area}{surface area for electrode} \item{temperature}{temperature} \item{conc.bulk}{initial concentration of Ox or Red for an E or EC mechanism, or the combined initial concentrations of Ox and Z, or of Red and Z for a CE mechanism} \item{tunits}{the number of increments in time for the diffusion grids} \item{xunits}{the number of increments in distance for the diffusion grids} \item{sdnoise}{standard deviation, as percent of maximum current, used to add noise to simulated data} \item{direction}{-1 for an initial reduction reaction of Ox to Red; +1 for an initial oxidation reaction of Red to Ox} \item{pulses}{number of pulses: either single or double} \item{time_pulse1}{time when first pulse is applied} \item{time_pulse2}{time when second pulse is applied} \item{time_end}{time when experiment ends} \item{k_f}{vector of forward electron transfer rate constant as a function of potential} \item{k_b}{vector of reverse electron transfer rate constant as a function of potential} \item{jox}{vector giving the flux of Ox to the electrode surface as a function of potential} \item{jred}{vector giving the flux of Red to the electrode surface as a function of potential}
#'
#' @export
#'
#' @examples
#' ex_ca = simulateCA(e.start = 0.25, e.pulse = -0.25, e.form = 0,
#' pulses = "double", t.2 = 20, x.units = 100, t.units = 1000)
#' ex_cc = simulateCC(ex_ca)
#' str(ex_cc)
simulateCC = function(filename){
# check that file being used is for a chronoamperometry experiment
if (filename$expt != "CA") {
stop("This file is not from a chronoamperometry simulation.")
}
# create vector to hold charge
charge = rep(0, length(filename$current))
# calculate charge
for (i in 2:length(charge)) {
x = 2:i
charge[i] = as.double((filename$time[x] - filename$time[x-1]) %*% (filename$current[x] + filename$current[x - 1])/2)
}
output = list("expt" = "CC",
"mechanism" = filename$mechanism,
"file_type" = filename$file_type,
"charge" = charge,
"potential" = filename$potential,
"time" = filename$time,
"distance" = filename$distance,
"oxdata" = filename$oxdata,
"reddata" = filename$reddata,
"chemdata" = filename$chemdata,
"formalE" = filename$formalE,
"initialE" = filename$initialE,
"pulseE" = filename$pulseE,
"electrons" = filename$electrons,
"ko" = filename$ko,
"kcf" = filename$kcf,
"kcr" = filename$kcr,
"alpha" = filename$alpha,
"diffcoef" = filename$diffcoef,
"area" = filename$area,
"temperature" = filename$temperature,
"conc.bulk" = filename$conc.bulk,
"tunits" = filename$tunits,
"xunits" = filename$xunits,
"sdnoise" = filename$sdnoise,
"direction" = filename$direction,
"pulses" = filename$pulses,
"time_pulse1" = filename$time_pulse1,
"time_pulse2" = filename$time_pulse2,
"time_end" = filename$time_end,
"k_f" = filename$kf,
"k_b" = filename$kb,
"jox" = filename$jox,
"jred" = filename$jred
)
invisible(output)
}
|
d8bd9706690a70b356ff9cc994e9f23f59df5e26
|
cb37e962ef00a6ecef5fb1757a8d949e6003a70e
|
/R/convert_to_data.R
|
e68b652010e578ba8fe9fa7596b03f5b73690e8b
|
[] |
no_license
|
robertgambrel/tabler
|
374a9b35c84a9f87c04c259bfde66458e5e1f866
|
5974540f00372dfc62b6df37f553eb34197b625a
|
refs/heads/master
| 2020-12-28T21:39:53.021086
| 2016-10-05T16:03:55
| 2016-10-05T16:03:55
| 66,608,933
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,952
|
r
|
convert_to_data.R
|
#' Convert a model output to a dataframe
#'
#' This takes each model used in the final table and converts it to a tidy
#' dataset, formatted and ready to be merged with others in a nice table.
#'
#' @inheritParams tablify
#' @param model A single model result
#'
#' @importFrom magrittr %>%
#'
convert_to_data <- function(model,
teststat = 'p.value',
digits = 3,
digits_coef = digits,
digits_teststat = digits,
cutoffs = c(0.1, 0.05, 0.01),
stars = c('*', '**', '***'),
N = T,
fit = NULL) {
if (length(stars) != length(cutoffs)) {
stop("Cutoff values for significance and significance signifiers (stars)
must have the same length.")
}
if (!all.equal(cutoffs, sort(cutoffs, decreasing = T))) {
stop(paste0("Please enter cutoff values in descending order (i.e. c(",
paste(sort(cutoffs, decreasing = T), collapse = ', '), ")) and
verify that the order of the stars are as intended."))
}
# use broom to tidy the model
cleaned <- broom::tidy(model)
# assign stars based on cutoffs
cleaned$displayed_stars <- ''
for (i in 1:length(cutoffs)) {
cleaned <-
dplyr::mutate(cleaned,
displayed_stars = ifelse(p.value < cutoffs[i], stars[i], displayed_stars)
)
}
cleaned <-
dplyr::mutate(cleaned,
displayed_estimate = paste0(round(estimate, digits_coef), displayed_stars)
)
# add test statistic, requires standard evaluation from dplyr
if (!teststat %in% names(cleaned)) {
stop(paste0("Test statistic ", teststat, " not available. Please select from ",
paste(names(cleaned)[3:ncol(cleaned)], collapse = ", ")))
}
if (is.na(teststat)) {
NULL
} else {
cleaned <- dplyr::`mutate_`(cleaned,
displayed_stat = lazyeval::interp(~round(var, digits_teststat),
var = as.name(teststat))
)
}
# convert to long form for merging
cleaned_long <- cleaned %>%
dplyr::select(term, displayed_estimate, displayed_stat) %>%
tidyr::gather(type, value, 2:3) %>%
dplyr::arrange(term, type)
# add number of observations if desired
if (N) {
n_obs <- length(model$residuals)
cleaned_long <- rbind(cleaned_long, c("N", "", n_obs))
}
# add fit stats if they're requested
if (!is.null(fit)) {
for (stat in fit) {
if (!stat %in% names(summary(model))) {
stop(paste0("Error with fit statistic ", stat,
". No statistic by that name found in model ",
deparse(substitute(model)), "."))
}
model_fit <- round(summary(model)[stat][[1]], digits_teststat)
cleaned_long <- rbind(cleaned_long, c(stat, '', model_fit))
}
}
return(cleaned_long)
}
|
8ef2d1ef379fec04983e8ea4fbe1b4683094d5d6
|
96476973d90e1936a29563c0b716a8bf0f170fbe
|
/Lesson_2_10-11-19.R
|
8b521d99e61bd78b10ff4df783832e75db102b03
|
[] |
no_license
|
Manish-Dahivadkar/Machine-Learning-in-R
|
8c5c66505ba957ca4332184682b75d94c4038438
|
6c8e21dd6a2a42486b6a1a81422de71b802411f8
|
refs/heads/master
| 2022-08-06T14:32:13.654584
| 2020-05-18T09:19:31
| 2020-05-18T09:19:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,937
|
r
|
Lesson_2_10-11-19.R
|
How to run Array function
vector1=c(2,6,7,11,12,13,20,21,2,3,4,5)
arr1=array(vector1, dim=c(3,2,2))
arr1
****************************************************
List1=list(mtcars,iris) # this is creating list
List1
#how to print value of variable
a<-50
a
print(a)
print(paste("The value of a is ...",a))
#how to calculate length of dataset or vector
vector1=c(1,2,3,5,6,1,2,5,8,6,4,8,9,2,1)
length(vector1)
name=c("data","science")
nchar(name)
#Sampling:
irissample=sample(2,nrow(iris),replace=TRUE,prob=c(.8,.2))
irissample
dim(iris)
length(irissample)
irissample
irissample=sample(2,nrow(iris),replace=TRUE,prob=c(.8,.2))
irissample
iristrain=iris[irissample==1,]
iristest=iris[irissample==2,]
iristrain
iristest
mtcarssample=sample(2,nrow(mtcars),replace=TRUE,prob=c(.8,.2))
mtcarssample
dim(mtcars)
#How import external data
CR=read.csv("C:/Users/Manish/Desktop/R-classroom/CreditRisk.csv")
head(CR)
View(head(CR))
tail(CR)
class(CR)
summary(CR)
nrow(CR)
ncol(CR)
dim(CR)
View(summary(CR))
table(CR$Gender)
#hOW to convert BLANK to NA'S
cr1=read.csv("C:/Users/Manish/Desktop/R-classroom/CreditRisk.csv",na.strings="")
cr1
View(cr1)
#so if blanks are present in data please convert them to NA's
summary(cr1)
table(cr1$Credit_History)
#whenever null values present need to make assupmtion or can remove the null so craet databse by omitting values
cr2=na.omit(cr1)
summary(cr2)
#or if you have to replace nulls then follow
is.na(cr1$Credit_History) #for identifying nulls in specifi column
cr1$Credit_History[is.na(cr1$Credit_History)]<-0
View(cr1)
summary(cr1)
cr1$LoanAmount[is.na(cr1$LoanAmount)]<-142.5
summary(cr1)
table(cr1$Dependents)
cr1$Gender[is.na(cr1$Gender)]<-"Male"
summary(cr1)
cr1$Self_Employed[is.na(cr1$Self_Employed)]<-"No"
summary(cr1)
cr1$Married[is.na(cr1$Married)]<-"Yes"
cr1$Loan_Amount_Term[is.na(cr1$Loan_Amount_Term)]<-360
summary(cr1)
summary(cr1)
cr1$Dependents[is.na(cr1$Dependents)]<-0
summary(cr1)
#**************************************************************************
quantile(cr1$ApplicantIncome)
max(cr1$ApplicantIncome)
quantile(cr1$ApplicantIncome,prob = c(.1,.2,.3,.4,.5,.6,.7,.8,.9,1))
quantile(cr1$ApplicantIncome,prob=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,.98,1))
quantile(cr1$ApplicantIncome,prob=c(.1,.2,.3,.4,.5,.6,.7,.8,.9,.98,0.99,1))
#quntile funtion combined with probability helps in finding how the data is distributed
#************************************************************************************
library(dplyr)
library(dplyr)
cr1<-read.csv("C:/Users/Manish/Desktop/R-classroom/CreditRisk.csv")
#filter function
database1<-filter(cr1,Gender=="Male")
View(database1)
#######
#and function
colnames(cr1)
df2<-filter(cr1,Gender=="Male"& ApplicantIncome>5000)
View(df2)
#or condition
df3<-filter(cr1,Gender=="Male" | ApplicantIncome >5000)
View(df3)
|
35d561b4c79c34f88749792242d698c5c9f6e919
|
d2c5ba4826787014da2658b2de5af9287d33f5b6
|
/landuse.R
|
1a8b1df4e48669e7eca7f0fee97a6a26f77add64
|
[] |
no_license
|
kjbark3r/Migration
|
2536ca3ef2c9886d4cdecaf5508afa67ff3353a2
|
126311095f6d5145453a21538d807b65561b292b
|
refs/heads/master
| 2021-01-23T08:44:09.949472
| 2018-11-06T18:55:54
| 2018-11-06T18:55:54
| 67,642,868
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,905
|
r
|
landuse.R
|
### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# CREATING HOME RANGES FOR HERDS AND INDIVIDUALS #
# TO ASSESS FACTORS INFLUENCING MIGRATORY BEHAVIOR #
# KRISTIN BARKER #
# OCTOBER 2017 #
### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ### ### ### ###
#### |SETUP| ####
### ### ### ### ###
#### Packages ####
library(sp) # spatial
library(rgeos) # clip polygons
library(adehabitatHR) # home ranges and kernel centroids
library(rgdal) # latlong/stateplane conversions
library(raster) # shapefile()
library(gsubfn) # no idea, possibly unnecessary
library(maptools) # writeSpatialShape
library(dplyr) # joins, data work, general awesomeness
#### Working directory ####
wd_workcomp <- "C:\\Users\\kristin.barker\\Documents\\GitHub\\Migration"
wd_laptop <- "C:\\Users\\kjbark3r\\Documents\\GitHub\\Migration"
wd_worklaptop <- "C:\\Users\\kristin\\Documents\\Migration"
if (file.exists(wd_workcomp)) {setwd(wd_workcomp)
} else {
if(file.exists(wd_laptop)) {setwd(wd_laptop)
} else {
setwd(wd_worklaptop)
}
}
#### "Raw" data ####
# winter home range for each individual (from homeranges.R)
indivhrsprelim <- shapefile("../GIS/Shapefiles/Elk/IndivHRs/AllFebHRs")
# list of land use files
lulist <- list.files(path = "../GIS/Shapefiles/Land", pattern = "lu.+shp$",full.names = TRUE)
# read in and store each land use file (naming convention = "lu"+yr)
for (i in 1:length(lulist)) {
inname <- lulist[i]
outname <- substr(inname, nchar(inname)-7, nchar(inname)-4)
assign(outname, shapefile(inname))
}
#### Data prep ####
# dataframe of individuals, herds, and years of interest
indivdat <- indivhrsprelim@data %>%
rename(AnimalID = id, HRarea = area, YOI = Year)
# dataframe to store new data in
moddat <- indivdat %>%
mutate(nOwn = NA, acreAg = NA)
# match land use and home range projections
indivhrs <- spTransform(indivhrsprelim, crs(lu08)) # match aoi proj to ndvi
### ### ### ### ### ### ### ### #
#### |OWNERSHIP PER INDIV| ####
### ### ### ### ### ### ### ### #
# for each individual
for (i in 1:nrow(moddat)) {
# identify elk and year of interest
elk <- moddat[i,"AnimalID"]
yoi <- moddat[i, "YOI"]
# pull last 2 digits of year to identify correct land use file
yrsub <- ifelse(yoi == 2015, 14, # no cadastral data for 2015, use 2014
ifelse(yoi == 2006, "08", substr(yoi, 3, 4))) # and none for 2006, use 2008
# pull correct winter home range
hr <- subset(indivhrs, id == elk)
# pull land use file from that year
lui <- get(paste0("lu", yrsub))
# remove leading or trailing spaces from landowner names
lui@data$owner <- trimws(lui@data$owner)
# clip land use to indiv winter range
luclip <- raster::intersect(lui, hr)
# calculate and store number unique landowners on winter range
moddat[i, "nOwn"] <- length(unique(luclip@data$owner))
# calculate and store acres of irrigated ag on winter range
moddat[i, "acreAg"] <- sum(luclip@data$irrigAcre)
}
# calculate ownership density and proportion irrigated ag
moddat <- moddat %>%
mutate(densOwn = nOwn/HRarea,
ppnAg = (0.004047*acreAg)/HRarea,
irrig = ifelse(ppnAg > 0, 1, 0))
summary(moddat$densOwn)
summary(moddat$ppnAg)
length(which(moddat$irrig == 1))
# export updated model data file
write.csv(moddat, "human-covariates-feb.csv", row.names = F)
|
1a4285eb205b38c06fe184ee957071013c372dfc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/VGAM/examples/acat.Rd.R
|
630efc10cff5373d00da9fe2b6a3c52fb2d83b2a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 355
|
r
|
acat.Rd.R
|
library(VGAM)
### Name: acat
### Title: Ordinal Regression with Adjacent Categories Probabilities
### Aliases: acat
### Keywords: models regression
### ** Examples
pneumo <- transform(pneumo, let = log(exposure.time))
(fit <- vglm(cbind(normal, mild, severe) ~ let, acat, data = pneumo))
coef(fit, matrix = TRUE)
constraints(fit)
model.matrix(fit)
|
254c4363136ae1b1380993c379488ba79701be7e
|
4343cc3a96791b9fd5acc767f1705bb4a8c5e95e
|
/reader/Models/08 Beer Game/01 Beer Game.R
|
4ebfb7725d274f03fe0be8f858169c751f441b97
|
[
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
mohammad-miftakhus-sholikin/SDMR
|
5d96e6748cc465f8b2d040d752212479f620b1b8
|
06b1bd78c1bdb5815834ea4f04ee4d91a3a7ead9
|
refs/heads/master
| 2020-05-17T13:32:45.542242
| 2019-04-22T14:33:07
| 2019-04-22T14:33:07
| 183,738,775
| 1
| 0
|
MIT
| 2019-04-27T06:47:42
| 2019-04-27T06:47:42
| null |
UTF-8
|
R
| false
| false
| 11,243
|
r
|
01 Beer Game.R
|
###########################################
# Translation of Vensim file.
# Date created: 2017-11-10 16:57:13
###########################################
library(deSolve)
library(ggplot2)
library(tidyr)
#Displaying the simulation run parameters
START_TIME <- 0.000000
FINISH_TIME <- 40.000000
TIME_STEP <- 0.125000
#Setting aux param to NULL
auxs<-NULL
#Generating the simulation time vector
simtime<-seq(0.000000,40.000000,by=0.125000)
# Writing global variables (stocks and dependent auxs)
stocks <-c( DExpectedCustomerOrders = 100 , DStock = 400 , DSupplyLine = 400 , FExpectedCustomerOrders = 100 , FStock = 400 , FSupplyLine = 400 , RExpectedCustomerOrders = 100 , RStock = 400 , RSupplyLine = 400 , WExpectedCustomerOrders = 100 , WStock = 400 , WSupplyLine = 400 )
# This is the model function called from ode
model <- function(time, stocks, auxs){
with(as.list(c(stocks, auxs)),{
ALPHA <- 1
BETA <- 0.05
DDeliveryDelay <- 4
DAcquisitionRate <- DSupplyLine/DDeliveryDelay
DDesiredInventory <- 400
DALPHA <- 1
DInventoryAdjustmentTime <- 1/DALPHA
DAdjustmentforInventory <- (DDesiredInventory-DStock)/DInventoryAdjustmentTime
DDesiredSupplyLine <- DDeliveryDelay*DExpectedCustomerOrders
DBETA <- 0.05
DSupplyLIneAdjustmentTime <- 1/DBETA
DAdjustmentforSupplyLine <- (DDesiredSupplyLine-DSupplyLine)/DSupplyLIneAdjustmentTime
DAdjustmentTime <- 1
WDeliveryDelay <- 4
WDesiredSupplyLine <- WDeliveryDelay*WExpectedCustomerOrders
WBETA <- 0.05
WSupplyLIneAdjustmentTime <- 1/WBETA
WAdjustmentforSupplyLine <- (WDesiredSupplyLine-WSupplyLine)/WSupplyLIneAdjustmentTime
WDesiredInventory <- 400
WALPHA <- 1
WInventoryAdjustmentTime <- 1/WALPHA
WAdjustmentforInventory <- (WDesiredInventory-WStock)/WInventoryAdjustmentTime
WDesiredDeliveryRate <- WAdjustmentforInventory+WExpectedCustomerOrders
WIndicatedOrders <- max(0,WAdjustmentforSupplyLine+WDesiredDeliveryRate)
DCustomerOrders <- WIndicatedOrders
DErrorTerm <- DCustomerOrders-DExpectedCustomerOrders
DCECO <- DErrorTerm/DAdjustmentTime
DDesiredDeliveryRate <- DAdjustmentforInventory+DExpectedCustomerOrders
DIndicatedOrders <- max(0,DAdjustmentforSupplyLine+DDesiredDeliveryRate)
DMinShipmentTime <- 1
DMaximumShippedOrders <- DStock/DMinShipmentTime
FCustomerOrders <- DIndicatedOrders
FMinShipmentTime <- 1
FMaximumShippedOrders <- FStock/FMinShipmentTime
FShipmentRate <- min(FCustomerOrders,FMaximumShippedOrders)
DOrderRate <- FShipmentRate
DShipmentRate <- min(DCustomerOrders,DMaximumShippedOrders)
FDeliveryDelay <- 4
FAcquisitionRate <- FSupplyLine/FDeliveryDelay
FDesiredInventory <- 400
FALPHA <- 1
FInventoryAdjustmentTime <- 1/FALPHA
FAdjustmentforInventory <- (FDesiredInventory-FStock)/FInventoryAdjustmentTime
FDesiredSupplyLine <- FDeliveryDelay*FExpectedCustomerOrders
FBETA <- 0.05
FSupplyLIneAdjustmentTime <- 1/FBETA
FAdjustmentforSupplyLine <- (FDesiredSupplyLine-FSupplyLine)/FSupplyLIneAdjustmentTime
FAdjustmentTime <- 1
FErrorTerm <- FCustomerOrders-FExpectedCustomerOrders
FCECO <- FErrorTerm/FAdjustmentTime
FDesiredDeliveryRate <- FAdjustmentforInventory+FExpectedCustomerOrders
FIndicatedOrders <- FAdjustmentforSupplyLine+FDesiredDeliveryRate
FOrderRate <- max(0,FIndicatedOrders)
InventoryAdjustmentTime <- 1/ALPHA
MinShipmentTime <- 1
RDeliveryDelay <- 4
RAcquisitionRate <- RSupplyLine/RDeliveryDelay
RDesiredInventory <- 400
RALPHA <- 1
RInventoryAdjustmentTime <- 1/RALPHA
RAdjustmentforInventory <- (RDesiredInventory-RStock)/RInventoryAdjustmentTime
RDesiredSupplyLine <- RDeliveryDelay*RExpectedCustomerOrders
RBETA <- 0.05
RSupplyLIneAdjustmentTime <- 1/RBETA
RAdjustmentforSupplyLine <- (RDesiredSupplyLine-RSupplyLine)/RSupplyLIneAdjustmentTime
RAdjustmentTime <- 1
RCustomerOrders <- 100
RErrorTerm <- RCustomerOrders-RExpectedCustomerOrders
RCECO <- RErrorTerm/RAdjustmentTime
RDesiredDeliveryRate <- RAdjustmentforInventory+RExpectedCustomerOrders
RIndicatedOrders <- max(0,RAdjustmentforSupplyLine+RDesiredDeliveryRate)
RMinShipmentTime <- 1
RMaximumShippedOrders <- RStock/RMinShipmentTime
WCustomerOrders <- RIndicatedOrders
WMinShipmentTime <- 1
WMaximumShippedOrders <- WStock/WMinShipmentTime
WShipmentRate <- min(WCustomerOrders,WMaximumShippedOrders)
ROrderRate <- WShipmentRate
RShipmentRate <- min(RCustomerOrders,RMaximumShippedOrders)
SupplyLIneAdjustmentTime <- 1/BETA
WAcquisitionRate <- WSupplyLine/WDeliveryDelay
WAdjustmentTime <- 1
WErrorTerm <- WCustomerOrders-WExpectedCustomerOrders
WCECO <- WErrorTerm/WAdjustmentTime
WOrderRate <- DShipmentRate
d_DT_DExpectedCustomerOrders <- DCECO
d_DT_DStock <- DAcquisitionRate-DShipmentRate
d_DT_DSupplyLine <- DOrderRate-DAcquisitionRate
d_DT_FExpectedCustomerOrders <- FCECO
d_DT_FStock <- FAcquisitionRate-FShipmentRate
d_DT_FSupplyLine <- FOrderRate-FAcquisitionRate
d_DT_RExpectedCustomerOrders <- RCECO
d_DT_RStock <- RAcquisitionRate-RShipmentRate
d_DT_RSupplyLine <- ROrderRate-RAcquisitionRate
d_DT_WExpectedCustomerOrders <- WCECO
d_DT_WStock <- WAcquisitionRate-WShipmentRate
d_DT_WSupplyLine <- WOrderRate-WAcquisitionRate
return (list(c(d_DT_DExpectedCustomerOrders,d_DT_DStock,d_DT_DSupplyLine,d_DT_FExpectedCustomerOrders,d_DT_FStock,d_DT_FSupplyLine,d_DT_RExpectedCustomerOrders,d_DT_RStock,d_DT_RSupplyLine,d_DT_WExpectedCustomerOrders,d_DT_WStock,d_DT_WSupplyLine)))
})
}
# Function call to run simulation
o<-data.frame(ode(y=stocks,times=simtime,func=model,parms=auxs,method='euler'))
to<-gather(o,key=Stock,value=Value,2:ncol(o))
ggplot(data=to)+geom_line(aes(x=time,y=Value,colour=Stock))
#----------------------------------------------------
# Original text file exported from Vensim
# D Expected Customer Orders = INTEG( D CECO , 100)
# D Stock = INTEG( D Acquisition Rate - D Shipment Rate , 400)
# D Supply Line = INTEG( D Order Rate - D Acquisition Rate , 400)
# F Expected Customer Orders = INTEG( F CECO , 100)
# F Stock = INTEG( F Acquisition Rate - F Shipment Rate , 400)
# F Supply Line = INTEG( F Order Rate - F Acquisition Rate , 400)
# R Expected Customer Orders = INTEG( R CECO , 100)
# R Stock = INTEG( R Acquisition Rate - R Shipment Rate , 400)
# R Supply Line = INTEG( R Order Rate - R Acquisition Rate , 400)
# W Expected Customer Orders = INTEG( W CECO , 100)
# W Stock = INTEG( W Acquisition Rate - W Shipment Rate , 400)
# W Supply Line = INTEG( W Order Rate - W Acquisition Rate , 400)
# ALPHA = 1
# BETA = 0.05
# D Delivery Delay = 4
# D Acquisition Rate = D Supply Line / D Delivery Delay
# D Desired Inventory = 400
# D ALPHA = 1
# D Inventory Adjustment Time = 1 / D ALPHA
# D Adjustment for Inventory = ( D Desired Inventory - D Stock ) / D Inventory Adjustment Time
# D Desired Supply Line = D Delivery Delay * D Expected Customer Orders
# D BETA = 0.05
# D Supply LIne Adjustment Time = 1 / D BETA
# D Adjustment for Supply Line = ( D Desired Supply Line - D Supply Line ) / D Supply LIne Adjustment Time
# D Adjustment Time = 1
# W Delivery Delay = 4
# W Desired Supply Line = W Delivery Delay * W Expected Customer Orders
# W BETA = 0.05
# W Supply LIne Adjustment Time = 1 / W BETA
# W Adjustment for Supply Line = ( W Desired Supply Line - W Supply Line ) / W Supply LIne Adjustment Time
# W Desired Inventory = 400
# W ALPHA = 1
# W Inventory Adjustment Time = 1 / W ALPHA
# W Adjustment for Inventory = ( W Desired Inventory - W Stock ) / W Inventory Adjustment Time
# W Desired Delivery Rate = W Adjustment for Inventory + W Expected Customer Orders
# W Indicated Orders = max ( 0, W Adjustment for Supply Line + W Desired Delivery Rate)
# D Customer Orders = W Indicated Orders
# D Error Term = D Customer Orders - D Expected Customer Orders
# D CECO = D Error Term / D Adjustment Time
# D Desired Delivery Rate = D Adjustment for Inventory + D Expected Customer Orders
# D Indicated Orders = max ( 0, D Adjustment for Supply Line + D Desired Delivery Rate)
# D Min Shipment Time = 1
# D Maximum Shipped Orders = D Stock / D Min Shipment Time
# F Customer Orders = D Indicated Orders
# F Min Shipment Time = 1
# F Maximum Shipped Orders = F Stock / F Min Shipment Time
# F Shipment Rate = min ( F Customer Orders , F Maximum Shipped Orders )
# D Order Rate = F Shipment Rate
# D Shipment Rate = min ( D Customer Orders , D Maximum Shipped Orders )
# F Delivery Delay = 4
# F Acquisition Rate = F Supply Line / F Delivery Delay
# F Desired Inventory = 400
# F ALPHA = 1
# F Inventory Adjustment Time = 1 / F ALPHA
# F Adjustment for Inventory = ( F Desired Inventory - F Stock ) / F Inventory Adjustment Time
# F Desired Supply Line = F Delivery Delay * F Expected Customer Orders
# F BETA = 0.05
# F Supply LIne Adjustment Time = 1 / F BETA
# F Adjustment for Supply Line = ( F Desired Supply Line - F Supply Line ) / F Supply LIne Adjustment Time
# F Adjustment Time = 1
# F Error Term = F Customer Orders - F Expected Customer Orders
# F CECO = F Error Term / F Adjustment Time
# F Desired Delivery Rate = F Adjustment for Inventory + F Expected Customer Orders
# F Indicated Orders = F Adjustment for Supply Line + F Desired Delivery Rate
# F Order Rate = max ( 0, F Indicated Orders )
# FINAL TIME = 40
# INITIAL TIME = 0
# Inventory Adjustment Time = 1 / ALPHA
# Min Shipment Time = 1
# R Delivery Delay = 4
# R Acquisition Rate = R Supply Line / R Delivery Delay
# R Desired Inventory = 400
# R ALPHA = 1
# R Inventory Adjustment Time = 1 / R ALPHA
# R Adjustment for Inventory = ( R Desired Inventory - R Stock ) / R Inventory Adjustment Time
# R Desired Supply Line = R Delivery Delay * R Expected Customer Orders
# R BETA = 0.05
# R Supply LIne Adjustment Time = 1 / R BETA
# R Adjustment for Supply Line = ( R Desired Supply Line - R Supply Line ) / R Supply LIne Adjustment Time
# R Adjustment Time = 1
# R Customer Orders = 100
# R Error Term = R Customer Orders - R Expected Customer Orders
# R CECO = R Error Term / R Adjustment Time
# R Desired Delivery Rate = R Adjustment for Inventory + R Expected Customer Orders
# R Indicated Orders = max ( 0, R Adjustment for Supply Line + R Desired Delivery Rate)
# R Min Shipment Time = 1
# R Maximum Shipped Orders = R Stock / R Min Shipment Time
# W Customer Orders = R Indicated Orders
# W Min Shipment Time = 1
# W Maximum Shipped Orders = W Stock / W Min Shipment Time
# W Shipment Rate = min ( W Customer Orders , W Maximum Shipped Orders )
# R Order Rate = W Shipment Rate
# R Shipment Rate = min ( R Customer Orders , R Maximum Shipped Orders )
# TIME STEP = 0.125
# Supply LIne Adjustment Time = 1 / BETA
# W Acquisition Rate = W Supply Line / W Delivery Delay
# W Adjustment Time = 1
# W Error Term = W Customer Orders - W Expected Customer Orders
# W CECO = W Error Term / W Adjustment Time
# W Order Rate = D Shipment Rate
#----------------------------------------------------
|
b1db1af2427e4849b56606205dff34ae70cd449f
|
a3020c890e3782b684f68a935472368e39a043e7
|
/man/PCP_plot.Rd
|
1144417711264018522091b12da98591fd897c06
|
[] |
no_license
|
trilnick/sharpshootR
|
2193ac1625a8b9aa314c05256ebe4dd773878f10
|
92be9a67c9ee542d33cd46be8bbf4f43582813df
|
refs/heads/master
| 2023-02-22T05:33:22.679766
| 2021-01-21T06:33:04
| 2021-01-21T06:33:04
| 327,691,598
| 0
| 0
| null | 2021-01-07T18:18:26
| 2021-01-07T18:18:25
| null |
UTF-8
|
R
| false
| false
| 1,722
|
rd
|
PCP_plot.Rd
|
\name{PCP_plot}
\alias{PCP_plot}
\title{Percentiles of Cumulative Precipitation}
\description{Generate a plot representing percentiles of cumulative precipitation, given a historic record, and criteria for selecting a year of data for comparison.}
\usage{
PCP_plot(x, this.year, this.day = NULL, method = "exemplar",
q.color = "RoyalBlue", c.color = "firebrick", ...)
}
\arguments{
\item{x}{result from \code{CDECquery()} for now, will need to generalize to other sources}
\item{this.year}{a single water year, e.g. 2020}
\item{this.day}{optional integer representing days since start of selected water year}
\item{method}{'exemplar' or 'daily', currently 'exemplar' is the only method available}
\item{q.color}{color of percentiles cumulative precipitation}
\item{c.color}{color of selected year}
\item{\dots}{addtional arguments to \code{plot()}}
}
\details{This is very much a work in progress. Further examples at \url{https://ncss-tech.github.io/AQP/sharpshootR/CDEC.html}, and \url{https://ncss-tech.github.io/AQP/sharpshootR/cumulative-PPT.html}}
\value{Currently nothing is returned.}
\author{D.E. Beaudette}
\seealso{
\code{\link{waterDayYear}}
}
\examples{
\donttest{
if(requireNamespace("curl") &
curl::has_internet()
) {
s <- 'SPW'
# get metadata
s.info <- CDEC_StationInfo(s)
# format title for cumulative PPT
title.text <- sprintf("\%s [\%s]", s.info$site.meta$Name, s)
# get data
x <- CDECquery(id=s, sensor=45, interval='D', start='2000-01-01', end='2030-01-01')
## NOTE: requires sharpshootR >= 1.6.1
# plot
par(mar=c(4.5, 4.5, 2.5, 1.5))
PCP_plot(x[1:(nrow(x)-60), ], ylab='Cumulative PPT (inches)', main=title.text, this.year = 2020)
}
}
}
\keyword{ hplots }
|
78d096d5bbdeb16bd7bec914ac8a7f21d6e72e2b
|
3c72d7144acd35317e329ef994fbea33d08526ad
|
/mapStats/man/jiggleClass.Rd
|
2f3aae77e7e906588fcd0e78be0ec0aaa4009b7d
|
[] |
no_license
|
sam-data-guy/mapStats
|
b1d75506c5f4ed8be95388738d43b942853b737d
|
403064184f5d98823bcc89e494f42b7a758a987a
|
refs/heads/master
| 2021-05-21T16:25:42.468919
| 2020-04-03T11:47:42
| 2020-04-03T11:47:42
| 252,716,817
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,367
|
rd
|
jiggleClass.Rd
|
\name{jiggleClass}
\alias{jiggleClass}
\title{
Adjust class boundaries to protect from rounding errors
}
\description{
When using \code{\link[classInt]{classIntervals}} to compute classes, occasionally there are rounding errors so that
when the data is plotted and the class breaks are used for colors, for instance, the rounding error may cause a value to
not be plotted with the right color, or to not be plotted at all. For this reason, we add a small value to each of the
break points to accomodate a possible rounding error. This correction is negligible and should not affect plotting.
Additionally, in case \code{ngroups} is high, resulting in empty groups (even though the number of unique values is higher than \code{ngroups}),
the function also eliminates the empty groups as part of the adjustment above. In case there is such a change, the palettes are also changed.
}
\usage{
jiggleClass(x)
}
\arguments{
\item{x}{
an object of class \code{classIntervals} from the function \code{\link[classInt]{classIntervals}}.
}
}
\value{
an object of class \code{classIntervals}.
}
\examples{
y <- 100*rnorm(50)
#compute class intervals using either right or left interval closure
class_list_right <- classInt::classIntervals(var=y, n=12, intervalClosure="left")
class_list_right$brks
class_list_left <- classInt::classIntervals(var=y, n=12, intervalClosure="left")
class_list_left$brks
#there should be a slight difference now between class breaks from before, but should
#have same number of observations per interval as before, and for both left and right closure
jiggleClass(x=class_list_right)
jiggleClass(x=class_list_left)
#example with discrete data, 7 groups but 9 unique values.
#classIntervals generates some empty intervals, so jiggleClass eliminates them and adjusts breaks
#in this example, with integer values, right/left closure matters more, and so the results
#will differ slightly depending on which is chosen
y <- c(1, 1, 1, 1, 2, 3, 6, 7, 8, 9, 10, 10, 10, 10, 11)
class_list_right <- classInt::classIntervals(y, 7, intervalClosure="right")
class_list_right
class_list_left <- classInt::classIntervals(y, 7, intervalClosure="left")
class_list_left
#number of groups falls now for left closure
jiggleClass(x=class_list_right)
jiggleClass(x=class_list_left)
}
|
c066f7971103bbbc72d3f7fed9ca75b0e6870a02
|
c727a5dcac33f7bafc53b9ff4732aae824bf0058
|
/benchmark.R
|
3744dc94b16209e75d76e79f3b9c07d3e70a6a8c
|
[] |
no_license
|
QuayAu/openml100bm
|
e3af81fa79be1cc908acb66a25cb7405b81f744d
|
a81ee6c70ee95e281f732748310c6fa2b0666059
|
refs/heads/master
| 2021-01-21T10:34:47.019106
| 2017-09-26T14:21:45
| 2017-09-26T14:21:45
| 83,457,692
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,987
|
r
|
benchmark.R
|
# getting the data
# openml 100 data sets: study_14
library(OpenML)
setOMLConfig(server = "https://www.openml.org/api/v1")
#setOMLConfig(apikey = "1536489644f7a7872e7d0d5c89cb6297")# batchtools experiment
library(mlr)
library(BBmisc)
library(parallelMap)
library(batchtools)
source("defs.R")
datasets = listOMLTasks(tag = "study_14",
number.of.instances = c(1, 10000),
number.of.features = c(1, 500),
number.of.missing.values = 0)
#datasets = datasets[1:10, ]
populateOMLCache(task.ids = datasets$task.id)
oml.tasks = lapply(datasets$task.id, function(x) try(getOMLTask(task.id = x)))
oml.tasks = oml.tasks[!vlapply(oml.tasks, is.error)]
oml.tasks = setNames(oml.tasks, vcapply(oml.tasks, function(x) x$input$data.set$desc$name))
# create registry
unlink("mlr_defaults_openml60", recursive = TRUE)
reg = makeExperimentRegistry("mlr_defaults_openml60",
packages = c("OpenML", "mlr", "parallelMap"),
source = "defs.R", seed = 123)
tmpl = "/home/hpc/pr74ze/ri89coc2/lrz_configs/config_files/batchtools/slurm_lmulrz.tmpl"
reg$cluster.functions = makeClusterFunctionsSlurm(template = tmpl, clusters = "mpp2")
# add problems
for (tn in names(oml.tasks)) {
task = oml.tasks[[tn]]
addProblem(name = tn, data = task)
}
# add algorithms
addAlgorithm(name = "algorithm", fun = function(data, lrn, ...) {
learner = LEARNERS[[lrn]]
parallelStartMulticore(10, level = "mlr.resample")
run = runTaskMlr(task = data, learner = learner, measures = MEASURES, models = FALSE)
run.id = try(uploadOMLRun(run, confirm.upload = FALSE, tags = "mlr_defaults_openml60"))
parallelStop()
return(list(run = run, run.id = run.id))
})
# make algorithm design
algo.designs = list(
algorithm = data.frame(lrn = names(LEARNERS))
)
# add Experiments
addExperiments(algo.designs = algo.designs)
summarizeExperiments()
#submit
resources = list(walltime = 3*3600, memory = 2*1024, measure.memory = TRUE, ntasks = 10)
submitJobs(ids = findNotSubmitted(), resources = resources, reg = reg)
|
c54651a76d989dc8ebaa3356719ab4d951e78a0d
|
2eac833e9d0cec33683cac42558143a160834bc4
|
/R/stat-fivenumber.R
|
026fa2b81e63189f3a2cc32bce82926ec9e88bc2
|
[
"MIT"
] |
permissive
|
jrnold/ggtufte
|
c1c457c046e350e6afb7807fe41088e2899b8521
|
7ba3e4f0e185b1b22e7120bde51bc5502cd77e3a
|
refs/heads/master
| 2020-03-19T06:52:58.469107
| 2018-06-09T03:58:37
| 2018-06-09T03:58:37
| 136,063,123
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,617
|
r
|
stat-fivenumber.R
|
#' Calculate components of a five-number summary
#'
#' The five number summary of a sample is the minimum, first quartile,
#' median, third quartile, and maximum.
#'
#' @param na.rm If \code{FALSE} (the default), removes missing values with
#' a warning. If \code{TRUE} silently removes missing values.
#' @param qs Quantiles to use for the five number summary.
#' @inheritParams ggplot2::stat_identity
#' @return A data frame with additional columns:
#' \item{width}{width of boxplot}
#' \item{min}{minimum}
#' \item{lower}{lower hinge, 25\% quantile}
#' \item{middle}{median, 50\% quantile}
#' \item{upper}{upper hinge, 75\% quantile}
#' \item{max}{maximum}
#' @seealso \code{\link{stat_boxplot}}
#' @export
#' @importFrom ggplot2 StatSummaryBin
stat_fivenumber <- function(mapping = NULL,
data = NULL,
geom = "boxplot",
qs = c(0, 0.25, 0.5, 0.75, 1),
na.rm = FALSE,
position = "identity",
show.legend = NA,
inherit.aes = TRUE,
...) {
layer(
data = data,
mapping = mapping,
stat = StatSummaryBin,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
qs = qs,
na.rm = na.rm,
...
)
)
}
#' @export
#' @format NULL
#' @usage NULL
#' @rdname stat_fivenumber
StatFivenumber <- ggplot2::ggproto("StatFivenumber", ggplot2::Stat,
required_aes = c("x", "y"),
compute_group = function(data,
scales,
width = NULL,
na.rm = FALSE,
qs = c(0, 0.25, 0.5, 0.75, 1)) {
if (length(qs) != 5) {
stop("'qs' should contain 5 quantiles.")
qs <- sort(qs)
}
if (!is.null(data$weight)) {
mod <- quantreg::rq(y ~ 1, weights = weight, tau = qunatiles,
data = data)
stats <- as.numeric(stats::coef(mod))
} else {
stats <- as.numeric(quantile(data$y, qs))
}
names(stats) <- c("ymin", "lower", "middle", "upper", "ymax")
df <- as.data.frame(as.list(stats))
if (is.null(data$weight)) {
n <- sum(!is.na(data$y))
} else {
# Sum up weights for non-NA positions of y and weight
n <- sum(data$weight[!is.na(data$y) & !is.na(data$weight)])
}
df$x <- if (is.factor(data$x)) data$x[1] else mean(range(data$x))
df$width <- width
df$relvarwidth <- sqrt(n)
df
}
)
|
966864dd948cb992da45d3b7c5c1adc2c940c0cb
|
35e707c55cff22002c6c66f967837736e3e0a0d8
|
/R/sensitivity.R
|
da398e45ecad78a9122ebbcea9c53ddbe6635e4e
|
[] |
no_license
|
spesenti/SWIM
|
9f4416b990e9bc82109b87b849ffd208f8bfe77f
|
4e9cd0b2b4e4ad36e0798b1d67fdcd03c2d7114a
|
refs/heads/master
| 2022-05-04T10:16:25.964880
| 2022-01-10T12:41:16
| 2022-01-10T12:41:16
| 185,445,679
| 5
| 2
| null | 2022-01-09T23:04:06
| 2019-05-07T17:08:21
|
HTML
|
UTF-8
|
R
| false
| false
| 11,399
|
r
|
sensitivity.R
|
#' Sensitivities of a Stressed Model
#'
#' Provides different sensitivity measures that compare the stressed
#' and the baseline model.
#'
#' @inheritParams summary.SWIM
#' @inheritParams stress_moment
#' @param f A function, or list of functions, that, applied to
#' \code{x}, constitute the transformation of the data
#' for which the sensitivity is calculated.
#' @param type Character, one of \code{"Gamma", "Kolmogorov",
#' "Wasserstein", "reverse", "all"} (\code{default = "all"}).
#' @param s A function that, applied to \code{x}, defines the reverse
#' sensitivity measure. If \code{type = "reverse"} and
#' \code{s = NULL}, defaults to \code{type = "Gamma"}.
#' @param xCol Numeric or character vector, (names of) the columns
#' of the underlying data of the \code{object}
#' (\code{default = "all"}). If \code{xCol = NULL}, only
#' the transformed data \code{f(x)} is considered.
#' @param p Numeric vector, the p-th moment of Wasserstein distance (\code{default = 1}).
#'
#' @details Provides sensitivity measures that compare the stressed and
#' the baseline model. Implemented sensitivity measures:
#' \enumerate{
#' \item
#' \code{Gamma}, the \emph{Reverse Sensitivity Measure}, defined
#' for a random variable \code{Y} and scenario weights \code{w} by
#' \deqn{Gamma = ( E(Y * w) - E(Y) ) / c,}
#' where \code{c} is a normalisation constant such that
#' \code{|Gamma| <= 1}, see
#' \insertCite{Pesenti2019reverse}{SWIM}. Loosely speaking, the
#' Reverse Sensitivity Measure is the normalised difference
#' between the first moment of the stressed and the baseline
#' distributions of \code{Y}.
#'
#' \item
#' \code{Kolmogorov}, the Kolmogorov distance, defined for
#' distribution functions \code{F,G} by
#' \deqn{Kolmogorov = sup |F(x) - G(x)|.}
#'
#' \item
#' \code{Wasserstein}, the Wasserstein distance of order 1, defined
#' for two distribution functions \code{F,G} by
#' \deqn{Wasserstein = \int |F(x) - G(x)| dx.}
#'
#' \item
#' \code{reverse}, the \emph{General Reverse Sensitivity Measure}, defined
#' for a random variable \code{Y}, scenario weights \code{w}, and a function
#' \code{s:R -> R} by \deqn{epsilon = ( E(s(Y) * w) - E(s(Y)) ) / c,}
#' where \code{c} is a normalisation constant such that
#' \code{|epsilon| <= 1}. \code{Gamma} is a special instance of
#' the reverse sensitivity measure when \code{s} is the identity function.
#' }
#'
#' If \code{f} and \code{k} are provided, the sensitivity of the
#' transformed data is returned.
#'
#' @return A data.frame containing the sensitivity measures of the
#' stressed model with rows corresponding to different random
#' variables. The first two rows specify the \code{stress} and
#' \code{type} of the sensitivity measure.
#'
#' @examples
#' ## example with a stress on VaR
#' set.seed(0)
#' x <- as.data.frame(cbind(
#' "log-normal" = rlnorm(1000),
#' "gamma" = rgamma(1000, shape = 2)))
#' res1 <- stress(type = "VaR", x = x,
#' alpha = c(0.9, 0.95), q_ratio = 1.05)
#'
#' sensitivity(res1, wCol = 1, type = "all")
#' ## sensitivity of log-transformed data
#' sensitivity(res1, wCol = 1, type = "all",
#' f = list(function(x)log(x), function(x)log(x)), k = list(1,2))
#'
#' ## Consider the portfolio Y = X1 + X2 + X3 + X4 + X5,
#' ## where (X1, X2, X3, X4, X5) are correlated normally
#' ## distributed with equal mean and different standard deviations,
#' ## see the README for further details.
#'
#'
#' \dontrun{
#' set.seed(0)
#' SD <- c(70, 45, 50, 60, 75)
#' Corr <- matrix(rep(0.5, 5 ^ 2), nrow = 5) + diag(rep(1 - 0.5, 5))
#' if (!requireNamespace("mvtnorm", quietly = TRUE))
#' stop("Package \"mvtnorm\" needed for this function
#' to work. Please install it.")
#' x <- mvtnorm::rmvnorm(10 ^ 5,
#' mean = rep(100, 5),
#' sigma = (SD %*% t(SD)) * Corr)
#' data <- data.frame(rowSums(x), x)
#' names(data) <- c("Y", "X1", "X2", "X3", "X4", "X5")
#' rev.stress <- stress(type = "VaR", x = data,
#' alpha = c(0.75, 0.9), q_ratio = 1.1, k = 1)
#'
#' sensitivity(rev.stress, type = "all")
#' ## sensitivity to sub-portfolios X1 + X2 and X3 + X4
#' sensitivity(rev.stress, xCol = NULL, type = "Gamma",
#' f = rep(list(function(x)x[1] + x[2]), 2), k = list(c(2, 3), c(4, 5)))
#' plot_sensitivity(rev.stress, xCol = 2:6, type = "Gamma")
#' importance_rank(rev.stress, xCol = 2:6, type = "Gamma")
#' }
#'
#' @author Silvana M. Pesenti, Zhuomin Mao
#'
#' @seealso See \code{\link{importance_rank}} for ranking of random
#' variables according to their sensitivities,
#' \code{\link{plot_sensitivity}} for plotting
#' sensitivity measures and \code{\link{summary}} for
#' summary statistics of a stressed model.
#'
#' @references \insertRef{Pesenti2019reverse}{SWIM}
#'
#' @export
#'
sensitivity <- function(object, xCol = "all", wCol = "all",
type = c("Gamma", "Kolmogorov", "Wasserstein", "reverse", "all"),
f = NULL, k = NULL, s = NULL, p = 1){
if (!is.SWIM(object) && !is.SWIMw(object)) stop("Wrong object")
if (anyNA(object$x)) warning("x contains NA")
if (missing(type)) type <- "all"
if (!is.null(f) | !is.null(k)){
if (is.function(f)) f <- list(f)
if (!all(sapply(f, is.function))) stop("f must be a list of functions")
if (is.numeric(k)) k <- list(k)
if (!all(sapply(k, is.numeric))) stop("k must be a list of numeric vectors")
if (length(f) != length(k)) stop("Objects f and k must have the same length.")
}
if (!is.null(s)){
if (!is.function(s)) stop("s must be a function")
}
if ((type == 'reverse' | type == 'all') && is.null(s)){
warning("No s passed in. Using Gamma sensitivity instead.")
s <- function(x) x
}
if (!is.null(xCol)){
if (is.character(xCol) && xCol == "all") xCol <- 1:ncol(get_data(object))
if (is.character(xCol) && xCol != "all") cname <- xCol
if (is.null(colnames(get_data(object)))){
cname <- paste("X", as.character(xCol), sep = "")
} else if (!is.character(xCol)){
cname <- colnames(get_data(object))[xCol]
}
x_data <- get_data(object)[ , xCol]
}
if (!is.null(f)){
z <- matrix(0, ncol = length(f), nrow = nrow(get_data(object)))
for (i in 1:length(f)){
z[, i] <- apply(get_data(object)[, k[[i]], drop = FALSE], 1, f[[i]])
}
if(is.null(xCol)) cname <- NULL
cname <- c(cname, paste("f", 1:length(f), sep = ""))
if(is.null(xCol)) x_data <- NULL
x_data <- cbind(x_data, z)
colnames(x_data) <- cname
}
if (is.character(wCol) && wCol == "all") wCol <- 1:ncol(get_weights(object))
new_weights <- get_weights(object)[ , wCol]
sens_w <- stats::setNames(data.frame(matrix(ncol = length(x_data) + 2, nrow = 0)), c("stress", "type", cname))
if (type == "Gamma" || type == "all"){
sens_gamma_w <- function(z) apply(X = as.matrix(new_weights), MARGIN = 2, FUN = .gamma, z = z)
sens_gw <- apply(X = as.matrix(x_data), MARGIN = 2, FUN = sens_gamma_w)
if (length(wCol) == 1) sens_gw <- as.matrix(t(sens_gw))
if (length(xCol) == 1) colnames(sens_gw) <- cname
sens_w <- rbind(sens_w, data.frame(stress = names(object$specs)[wCol], type = rep("Gamma", length.out = length(wCol)), sens_gw))
}
if (type == "Kolmogorov" || type == "all"){
sens_kolmogorov_w <- function(z) apply(X = as.matrix(new_weights), MARGIN = 2, FUN = .kolmogorov, z = z)
sens_kw <- apply(X = as.matrix(x_data), MARGIN = 2, FUN = sens_kolmogorov_w)
if (length(wCol) == 1) sens_kw <- as.matrix(t(sens_kw))
if (length(xCol) == 1) colnames(sens_kw) <- cname
sens_w <- rbind(sens_w, data.frame(stress = names(object$specs)[wCol], type = rep("Kolmogorov", length.out = length(wCol)), sens_kw))
}
if (type == "Wasserstein" || type == "all"){
for (p_value in c(p)) {
sens_wasser_w <- function(z) apply(X = as.matrix(new_weights), MARGIN = 2, FUN = .wasserstein, z = z, p = p_value)
sens_ww <- apply(X = as.matrix(x_data), MARGIN = 2, FUN = sens_wasser_w)
if (length(wCol) == 1) sens_ww <- as.matrix(t(sens_ww))
if (length(xCol) == 1) colnames(sens_ww) <- cname
sens_w <- rbind(sens_w, data.frame(stress = names(object$specs)[wCol], type = rep("Wasserstein", length.out = length(wCol)), sens_ww))
# Paste p to Wasserstein
idx <- sens_w["type"] == "Wasserstein"
sens_w[idx, "type"] <- paste("Wasserstein", "p =", p_value)
}
}
if (type == "reverse" || type == "all"){
sens_reverse_w <- function(z) apply(X = as.matrix(new_weights), MARGIN = 2, FUN = .reverse, z = z, s=s)
sens_rw <- apply(X = as.matrix(x_data), MARGIN = 2, FUN = sens_reverse_w)
if (length(wCol) == 1) sens_rw <- as.matrix(t(sens_rw))
if (length(xCol) == 1) colnames(sens_rw) <- cname
sens_w <- rbind(sens_w, data.frame(stress = names(object$specs)[wCol], type = rep("Reverse", length.out = length(wCol)), sens_rw))
}
rownames(sens_w) <- NULL
return(sens_w)
}
# help function Reverse Sensitivity, Gamma
# comparison between input vectors for a given stress
.gamma <- function(z, w){
w <- as.numeric(w)
w_comm <- sort(w)[rank(z, ties.method = "first")]
w_counter <- sort(w, decreasing = TRUE)[rank(z, ties.method = "first")]
if (stats::cov(z, w) >= 0){
gamma_sens <- stats::cov(z, w) / stats::cov(z, w_comm)
} else {
gamma_sens <- - stats::cov(z, w) / stats::cov(z, w_counter)
}
return(gamma_sens)
}
# help function Kolmogorov distance
# maximal difference between the corresponding ecdf
# comparison between different stresses. All inputs from one
# stress have the same Kolmogorov distance.
.kolmogorov <- function(z, w){
n <- length(z)
# print(length(z))
# print(length(w))
# print(n)
xw_cdf <- cumsum(w[order(z)])[1:(n-1)]
kol_sense <- max(abs(xw_cdf - 1:(n-1))) / n
return(kol_sense)
}
# help function Wasserstein distance of order p = 1
# x vector
# w vector of weights
.wasserstein <- function(z, w, p = 1){
n <- length(z)
x_sort <- sort(z)
w_cdf <- cumsum(w[order(z)])[1:(n - 1)]
x_diff <- diff(x_sort, lag = 1)
wasser_sens <- (sum(abs(w_cdf - 1:(n-1))^(p) * x_diff) / n)^(1/p)
return(wasser_sens)
}
# help function Reverse Sensitivity
# comparison between input vectors for a given stress and function s
.reverse <- function(z, s, w){
w <- as.numeric(w)
EQ_sX <- mean(sapply(z, s) * w)
EP_sX <- mean(sapply(z, s))
z_inc <- sort(z)
w_inc <- sort(w)
w_dec <- sort(w, decreasing = TRUE)
if (EQ_sX >= EP_sX){
max_EQ <- mean(sapply(z_inc, s) * w_inc)
reverse_sens <- (EQ_sX - EP_sX) / (max_EQ - EP_sX)
} else {
min_EQ <- mean(sapply(z_inc, s) * w_dec)
reverse_sens <- - (EQ_sX - EP_sX) / (min_EQ - EP_sX)
}
return(reverse_sens)
}
|
d6f71f10e1cc847ff92c30e03b4b7ee76049025e
|
4c2835dcc76cdff0f3c7f2fcae6fc59896a6f632
|
/man/beamht_std.Rd
|
298a5f5813c910410e960c1e21c293e5ff52171c
|
[] |
no_license
|
birderboone/Radar
|
fe747a1d3991a4e1ab99616d4b5efe765d786f46
|
b1715b1006faf676152cef697c05f49e230f017b
|
refs/heads/master
| 2021-05-06T23:16:38.482865
| 2017-12-03T20:31:02
| 2017-12-03T20:31:02
| 112,961,156
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 835
|
rd
|
beamht_std.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/beamht_std.R
\name{beamht_std}
\alias{beamht_std}
\title{Calculates the height of the radar beam using standard refraction}
\usage{
beamht_std(range, groundht = 0, elev = 0.5, radar = radar,
nexrad_n = "V:/Documents/nexrad_site_list_with_utm.csv")
}
\arguments{
\item{range}{numeric range of each pulse volume}
\item{groundht}{the ground height above sea level at each pulse volume}
\item{elev}{the elevation of the radar beam. Can be a scalar or vector}
\item{radar}{the 3 letter radar code}
\item{nexrad_n}{location of the nexrad site table. Default is the location on the AP's server}
}
\description{
Output is a list containing the bottom, middle, and top of the beam.
Use beamht_radio if you want to calculate beam heights using radiosonde
}
|
cbf3caf7f1247ba244dda740a0d39bd1741d5cb5
|
59ad950a6706b65979c064a231d0e80860e1f9b9
|
/ui.R
|
f93c1de524efa8546da447f23cec03a0e5f46511
|
[] |
no_license
|
carlosrochap/ddp-project
|
3d6854667bd5d8d44f3eb27a346e85e35b734701
|
0793d418c523b3f09bd11f05df0ea9f97f76b101
|
refs/heads/master
| 2020-03-29T08:32:22.537494
| 2018-09-21T05:48:29
| 2018-09-21T05:48:29
| 149,715,778
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,331
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Using different predictors and percentage splits for training linear regression on the Boston dataset"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
h3("Select one of the values"),
radioButtons("split_level", "Data % used for training:",
c("10%" = ".1",
"20%" = ".2",
"40%" = ".4",
"80%" = ".8")),
radioButtons("attr", "Select Predictor",
c("crim" = "crim",
"zn" = "zn",
"chas" = "chas",
"age" = "age"))
),
# Show a plot of the generated distribution
mainPanel(
h5("scatter plot of the criterion medv and the selected prectior with the corresonding regression line fitted using the selected percentage of training data"),
plotOutput("distPlot")
)
)
))
|
75585a3a87012ef7b72413ac4be5d29b2c5ae3ee
|
13cef36ee0392f108b0f97228efc7c855e7dba3d
|
/man/h5fileLocking.Rd
|
3010ae5831360811d18f0664dcd5525e1739b320
|
[] |
no_license
|
MatthieuRouland/rhdf5
|
a6068de28a21a0e0715fc6c930f819009dc23dbc
|
bf49c81e1e255b7d4cae6d1048dc835d8921081a
|
refs/heads/master
| 2022-11-07T22:05:51.323176
| 2020-07-06T10:33:29
| 2020-07-06T10:33:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,628
|
rd
|
h5fileLocking.Rd
|
\name{h5testFileLocking}
\alias{h5testFileLocking}
\alias{h5enableFileLocking}
\alias{h5disableFileLocking}
\title{Test and set file locking for HDF5}
\description{HDF5 1.10 uses file locking by default. On some file systems
this is not available, and the HDF5 library will throw an error if the user
attempts to create or access a file located on such a file system. These
functions help identify if file locking is available without throwing an
error, and allow the locking to be disabled for the duration of the R
session if needed.}
\usage{
h5testFileLocking(location)
h5disableFileLocking()
h5enableFileLocking()
}
\arguments{
\item{location}{The name of a directory or file to test. If an existing
directory is provided a temporary file will be created in this folder.
If non-existant location is provided a file with the name will be created,
tested for file locking, and then removed. Providing an existing file will
result in an error.}
}
\value{
\code{h5testFileLocking} returns \code{TRUE} if a file can be successfully
locked at the specified location, or \code{FALSE} otherwise.
\code{h5disableFileLocking} and \code{h5enableFileLocking} set are called
for the side effect of setting or unsetting the environment variable
\code{HDF5_USE_FILE_LOCKING} and do not return anything.
}
\details{
\code{h5testFileLocking} will create a temporary file and then attempt to apply
a file lock using the appropriate function within the HDF5 library. The
success or failure of the locking is then recorded and the temporary file
removed. Even relatively low level functions such as \code{\link{H5Fcreate}}
will fail inelegantly if file locking fails.
\code{h5disableFileLocking} will set the environment variable
\code{RHDF5_USE_FILE_LOCKING=FALSE}, which is the recommended was to disable
this behaviour if file locking is not supported. This will only persist within
the current R session. You can set the environment variable outside of R if
this is a more general issue on your system.
\code{h5enableFileLocking} will unset the \code{RHDF5_USE_FILE_LOCKING}.
More discussion of HDF5's use of file locking can be found online e.g.
https://forum.hdfgroup.org/t/hdf5-1-10-0-and-flock/3761/4 or
https://forum.hdfgroup.org/t/hdf5-files-on-nfs/3985/5
}
\author{Mike Smith}
\examples{
## either a file name or directory can be tested
file <- tempfile()
dir <- tempdir()
h5testFileLocking(dir)
h5testFileLocking(file)
## we can check for file locking, and disable if needed
if( !h5testFileLocking(dir) ) {
h5disableFileLocking()
}
}
\keyword{ IO }
\keyword{ file }
|
8c3de6a1ca8581d0ca42100749096cc9b1e8a05d
|
c5c8edf59f5ac6458e75439fae252381c57fcba8
|
/RestaurantRevenue/caret - using random forest.R
|
21a0b5d3e3334aee5bb1c3417de3635baaad67f0
|
[] |
no_license
|
jiunsiew/UFLDL
|
ceb8db3f3b2a9ee6b1650d977cd6d332a993ee5a
|
8be24944de7cc0b24daa45cdd3fa0e03d2db8bbe
|
refs/heads/master
| 2020-12-24T17:25:53.657052
| 2016-04-29T07:14:13
| 2016-04-29T07:14:13
| 39,235,669
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,796
|
r
|
caret - using random forest.R
|
# get data ----------------------------------------------------------------
library(RODBC)
conn <- odbcConnect('EVORA')
trainData <- sqlFetch(conn, 'restaurant_revenue_prediction.training_data')
trainPercent = 0.8
# look at relatioship with P ----------------------------------------------
library(tidyr)
plotDf <- gather(trainData[, 5:ncol(trainData)], variable, value, -Type, -revenue)
library(ggplot2)
theme_set(theme_bw())
ggplot(plotDf, aes(x = value, y = revenue)) +
geom_point(aes(colour = Type)) +
facet_wrap(~variable)
library(caret)
#set.seed(107)
inTrain <- createDataPartition(y = trainData$revenue, p = trainPercent, list = FALSE)
training <- trainData[inTrain, !(names(trainData) %in% "City")]
testing <- trainData[-inTrain, !(names(trainData) %in% "City")]
rm(list = c("trainData","plotDf","ctrl"))
#library(mlbench)
#dt <- data(BostonHousing)
ctrl <- trainControl(method = "cv")
library(pROC)
rfFit <- train(revenue ~ P1:P37, data = training, method = "rf",tuneLength = 10, trControl = ctrl, preProc = c("center","scale"))
#plot(rfFit)
rfRevenue <- predict(rfFit, newdata = testing)
plot((rfRevenue - testing$revenue)/max(testing$revenue))
# use the model on given larger test set.
TestingData <- sqlFetch(conn,'restaurant_revenue_prediction.test_data')
TestingData <- TestingData[,!(names(TestingData) %in% "City")]
rfTestRevenue <- predict(rfFit, newdata = TestingData)
resultData <- data.frame(TestingData[,1:1], rfTestRevenue)
names(resultData)[1] <- paste("Id")
names(resultData)[2] <- paste("Prediction")
date <- Sys.Date()
fileName <- paste("submission_",date,"_1",".csv", sep = "")
write.csv(resultData,file=fileName,row.names = FALSE)
#xtab <- table(rfRevenue, testing$revenue)
#confusionMatrix(data = plsRevenue, testing$revenue)
|
011064342999c8052dead6a35195958e1e311979
|
b2e75a3733198ee98618041005fec8a60f8ba7fb
|
/man/rawpathfinder.Rd
|
3b6d8c67a3ea6c3a47ba6262863370d746c296c4
|
[] |
no_license
|
MatteoLacki/rawpathfinder_R
|
4d09fc105e711d8973575c7f4048a5ef38b202d2
|
7d5690dd628a87044d119a352fdb64549cc496ce
|
refs/heads/main
| 2023-08-01T04:25:33.018014
| 2021-09-10T15:15:26
| 2021-09-10T15:15:26
| 404,970,903
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 814
|
rd
|
rawpathfinder.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rawpathfinder.R
\name{rawpathfinder}
\alias{rawpathfinder}
\title{Query rawpathfinder for files/folders raw unix paths.}
\usage{
rawpathfinder(query, protocol = "http", ip = "192.168.1.209", port = 8958)
}
\arguments{
\item{query}{A character vector with names of files/folders.}
\item{protocol}{Used protocol: http or https.}
\item{ip}{The ip of the rawpathfinder flask service.}
\item{port}{The port of the rawpathfinder flask service.}
}
\value{
A data frame with files/folder names maped to unix paths.
}
\description{
Query rawpathfinder for files/folders raw unix paths.
}
\examples{
\dontrun{
query = c("M210903_008_1_1_4704.d",
"M210903_017_1_1_4713.d",
"M210903_026_1_1_4722.d")
rawpathfinder(query)
}
}
|
b9d186ef4cad534ca4d597482b17008f463d1f6e
|
858fac00773dc2e419d9002850d3145822d79691
|
/week6_stat/stat.R
|
6196a205dedc19dd9fa7c3b455457f4d448241f9
|
[] |
no_license
|
uh-sheesh/AIT602_Spring2021
|
538f98e999dba4ef6125e106535fa77580435ad4
|
f6b64ba9d9fca7640c335d44a548f0da293ec5a3
|
refs/heads/main
| 2023-06-09T04:52:23.198008
| 2021-04-15T18:27:10
| 2021-04-15T18:27:10
| 340,192,883
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,739
|
r
|
stat.R
|
library(ggplot2)
library(stringr)
library(readr)
library(dplyr)
library(reshape2)
library(PerformanceAnalytics)
library(rtweet)
library(sm)
library(car)
setwd("~/git/AIT602_Spring2021//week6_stat/")
#############
# 1. Load the data.
data <- read_delim("data/corona_tweets_03042021.csv", delim = ",",col_names = TRUE)
data$source <- as.factor(data$source)
data$user_id <- as.factor(data$user_id)
#############
# 2. Tweet frequency per user & per source
table(data$user_id)
summary(data$source)
#############
# 3. Numbers of favorites, retweets, and text length distribution
hist(data$favorite_count)
hist(data$retweet_count)
hist(data$display_text_width)
# denstiy graph
d <- density(data$display_text_width)
plot(d)
polygon(d, col="red")
# Not normal...
shapiro.test(data$favorite_count)
shapiro.test(data$retweet_count)
shapiro.test(data$display_text_width)
#############
# 4. Normality test per each source (3 biggest)
shapiro.test(data$favorite_count[data$source=="Twitter for Android"])
shapiro.test(data$favorite_count[data$source=="Twitter for iPhone"])
shapiro.test(data$favorite_count[data$source=="Twitter Web App"])
shapiro.test(data$retweet_count[data$source=="Twitter for Android"])
shapiro.test(data$retweet_count[data$source=="Twitter for iPhone"])
shapiro.test(data$retweet_count[data$source=="Twitter Web App"])
shapiro.test(data$display_text_width[data$source=="Twitter for Android"])
shapiro.test(data$display_text_width[data$source=="Twitter for iPhone"])
shapiro.test(data$display_text_width[data$source=="Twitter Web App"])
# Nothing is normal distribution
# homogeneity of variance
leveneTest(data$favorite_count, data$source, center=mean) #homogeneous
leveneTest(data$retweet_count, data$source, center=mean) #homogeneous
leveneTest(data$display_text_width, data$source, center=mean) # NOT homogeneous
########
# 5. Some ANOVAs -- a quasi-experiment
# So, in many cases, ANOVA results could be biased.
summary(aov(display_text_width ~ source, data=data))
summary(aov(favorite_count ~ source, data=data))
summary(aov(retweet_count ~ source, data=data))
########
# 6. Linear Regressions
fav <- lm(favorite_count ~ display_text_width, data=data)
ret <- lm(retweet_count ~ display_text_width, data=data)
summary(fav)
summary(ret)
# plot 1: Residuals vs Fitted: showing linear or non-linear relationship (line needs to be straight)
# plot 2: Normal Q-Q: showing if residuals are normaly distributed (if folloinwg the straight line) -- normality
# plot 3: Scale-Location: shows if residuals are spread equally along the ranges of predictors -- heteroscedasticity, Line needs to be horizontal.
# plot 4: Residual vs Leverage: Shows if there are outliers who are influential in deciding the regression line. Regression lines need to be outside of Cook's distance lines.
par(mfrow=c(2,2)) # init 4 charts in 1 panel
plot(fav)
plot(ret)
table(corona_tweets$is_quote)
just_test <- lm(is_quote ~ display_text_width + favorite_count + retweet_count, data = data)
summary(just_test)
car::vif(just_test) # it's safe from multicolinearity issue
#### some benchmarks
fav_bench <- rnorm(500, mean=mean(data$favorite_count), sd=sd(data$favorite_count))
ret_bench <- rnorm(500, mean=mean(data$retweet_count), sd=sd(data$retweet_count))
width_bench <- rnorm(500, mean=mean(data$display_text_width), sd=sd(data$display_text_width))
fav_lm <- lm(fav_bench ~ width_bench)
summary(fav_lm)
ret_lm <- lm(ret_bench ~ width_bench)
summary(ret_lm)
plot(fav_lm)
plot(ret_lm)
########
# 6. Logistic Regression
# this particular example is not a good case -- "is_quote" is too skewed.
lgit <- glm(is_quote ~ display_text_width + favorite_count + retweet_count,
data = data, family = "binomial")
summary(lgit)
|
e6f31d8bb44e26d4bf6b769d7e1887ce0094bce0
|
0171ecb9bfbbcc1181855a6790861dacad824b63
|
/src/differential_expression/Differential_expression_TR_BR_Subsample_3.R
|
ce50c399781b8aca4abaf591190d37fb93270b1e
|
[] |
no_license
|
aerugo/BB2490-RNASeq-Project
|
7f063bc86d0b77aed1cebb3e2beb3c1c196e2937
|
9ec36446be1efae262b0a69674e92d1b921fef89
|
refs/heads/master
| 2021-08-18T13:42:33.498687
| 2021-01-13T15:35:14
| 2021-01-13T15:35:14
| 51,921,805
| 1
| 2
| null | 2016-02-17T16:25:26
| 2016-02-17T12:43:24
|
Shell
|
UTF-8
|
R
| false
| false
| 18,638
|
r
|
Differential_expression_TR_BR_Subsample_3.R
|
## Differential Expression analysis for Subsample 3
###### Awk codes for combining files together for technical replicates (Possible for cooler solutions)
## join -j 1 SN11_UNST_TTAGGC_L003_count.txt SN11_UNST_TTAGGC_L002_count.txt| join -j 1 SN11_UNST_TTAGGC_L004_count.txt - | join -j 1 SN11_UNST_TTAGGC_L005_count.txt - > SN11_UNST_SS2.txt
## join -j 1 SN_11_LPS_TGACCA_L003_count.txt SN_11_LPS_TGACCA_L002_count.txt| join -j 1 SN_11_LPS_TGACCA_L004_count.txt - | join -j 1 SN_11_LPS_TGACCA_L005_count.txt - > SN11_LPS_SS2.txt
## join -j 1 SN_12_LPS_GCCAAT_L004_count.txt SN_12_LPS_GCCAAT_L003_count.txt| join -j 1 SN_12_LPS_GCCAAT_L005_count.txt - | join -j 1 SN_12_LPS_GCCAAT_L006_count.txt - > SN12_LPS_SS2.txt
## join -j 1 SN12_UNST_ACAGTG_L004_count.txt SN12_UNST_ACAGTG_L003_count.txt| join -j 1 SN12_UNST_ACAGTG_L005_count.txt - | join -j 1 SN12_UNST_ACAGTG_L006_count.txt - > SN12_UNST_SS2.txt
## For Biological Replicates
# awk '{sum=0; for (i=1; i<=NF; i++) { sum+= $i } print $1 " " sum}' SN11_LPS_SS3.txt > LPS_temp2
# awk '{sum=0; for (i=1; i<=NF; i++) { sum+= $i } print $1 " " sum}' SN12_LPS_SS3.txt > LPS_temp3
# awk '{sum=0; for (i=1; i<=NF; i++) { sum+= $i } print $1 " " sum}' SN11_UNST_SS3.txt > UNST_temp2
# awk '{sum=0; for (i=1; i<=NF; i++) { sum+= $i } print $1 " " sum}' SN12_UNST_SS3.txt > UNST_temp3
# join -j 1 LPS_temp2 LPS_temp3| join -j 1 LPS_temp1 - > LPS_SS3.txt
# join -j 1 UNST_temp2 UNST_temp3| join -j 1 UNST_temp1 - > UNST_SS3.txt
## Since the reads for SN10 is saturated at subsample scheme 3: We ponly taken into consideration technical replicates with sample S11 and S12
####################################################################################################################
## Differential expression analysis for technical replicates Sample S11 ##
setwd("~/BB2490-RNASeq-Project/data/count/Htseq/Subsample_3")
SN11_LPS_SS3 <- read.table("~/BB2490-RNASeq-Project/data/count/Htseq/Subsample_3/SN11_LPS_SS3.txt", quote="\"", comment.char="")
SN11_UNST_SS3 <- read.table("~/BB2490-RNASeq-Project/data/count/Htseq/Subsample_3/SN11_UNST_SS3.txt", quote="\"", comment.char="")
colnames (SN11_LPS_SS3) <- c("Gene_features", "LPS_Lane_4", "LPS_Lane_3", "LPS_Lane_2", "LPS_Lane_1")
colnames (SN11_UNST_SS3) <- c("Gene_features", "UNST_Lane_4", "UNST_Lane_3", "UNST_Lane_2", "UNST_Lane_1" )
data_SN11_SS3 <- merge (SN11_LPS_SS3, SN11_UNST_SS3, by.y ="Gene_features")
N = dim(data_SN11_SS3)[1]
rownames(data_SN11_SS3) = data_SN11_SS3[,1]
data_SN11_SS3 = data_SN11_SS3[,-1]
data_SN11_SS3 = data_SN11_SS3[c(6:N),]
data_SN11_SS3 <- data_SN11_SS3[ rowSums(data_SN11_SS3) > 1,]
## 28,325 genes in consideration
colData <- DataFrame(condition=factor(c("LPS", "LPS", "LPS", "LPS","UNST", "UNST", "UNST", "UNST")))
dds <- DESeqDataSetFromMatrix(data_SN11_SS3, colData, formula(~ condition))
dds <- DESeq(dds)
pdf("~/BB2490-RNASeq-Project/results/MA_plots/MA_technical_replicates_SN11_SS3.pdf", height=8, width=12)
plotMA(dds, main="Differential Gene Expression in S11 technical replicates at Subsample 3")
dev.off()
## See the above argument to follow the steps done in the following steps
res_SN11_SS3 <- results(dds)
res_clean_SN11_SS3 <- res_SN11_SS3[(!is.na(res_SN11_SS3$padj)) &
(res_SN11_SS3$padj != 0.000000e+00), ]
resOrdered_SN11_ss3 <- res_clean_SN11_SS3[order(res_clean_SN11_SS3$padj),]
head(resOrdered_SN11_ss3)
sig_S11_SS3 <- resOrdered_SN11_ss3[resOrdered_SN11_ss3$padj<0.05 &
abs(resOrdered_SN11_ss3$log2FoldChange)>=1,]
## This gave us 1150 genes that are differentially expressed using above criteria values
summary(res_clean_SN11_SS3)
## 3965 genes upregulated and 4255 genes downregulated
sum(res_clean_SN11_SS3$padj < 0.1, na.rm=TRUE)
## 6640 genes differentially expressed
## 6620 genes differentially expressed
sum(res_clean_SN11_SS3$padj < 0.05, na.rm=TRUE)
## 5827 genes differentially expressed
## 7347 gene differentially expressed
S11_genes_ss3 <- as.character(sig_S11_SS3@rownames)
####################################################################################################################
## Differential expression analysis for technical replicates Sample S12 ##
setwd("~/BB2490-RNASeq-Project/data/count/Htseq/Subsample_3")
SN12_LPS_SS3 <- read.table("~/BB2490-RNASeq-Project/data/count/Htseq/Subsample_3/SN12_LPS_SS3.txt", quote="\"", comment.char="")
SN12_UNST_SS3 <- read.table("~/BB2490-RNASeq-Project/data/count/Htseq/Subsample_3/SN12_UNST_SS3.txt", quote="\"", comment.char="")
colnames (SN12_LPS_SS3) <- c("Gene_features", "LPS_Lane_4", "LPS_Lane_3", "LPS_Lane_2", "LPS_Lane_1")
colnames (SN12_UNST_SS3) <- c("Gene_features", "UNST_Lane_4", "UNST_Lane_3", "UNST_Lane_2", "UNST_Lane_1")
data_SN12_SS3 <- merge (SN12_LPS_SS3, SN12_UNST_SS3, by.y ="Gene_features")
N = dim(data_SN12_SS3)[1]
rownames(data_SN12_SS3) = data_SN12_SS3[,1]
data_SN12_SS3 = data_SN12_SS3[,-1]
data_SN12_SS3 = data_SN12_SS3[c(6:N),]
data_SN12_SS3 <- data_SN12_SS3[rowSums(data_SN12_SS3) > 1,]
## 27,640 genes in consideration
colData <- DataFrame(condition=factor(c("LPS", "LPS", "LPS", "LPS","UNST", "UNST", "UNST", "UNST")))
dds <- DESeqDataSetFromMatrix(data_SN12_SS3, colData, formula(~ condition))
dds <- DESeq(dds)
pdf("~/BB2490-RNASeq-Project/results/MA_plots/MA_technical_replicates_SN12_SS3.pdf", height=8, width=12)
plotMA(dds, main="Differential Gene Expression in S12 technical replicates at Subsample 3")
dev.off()
## See the above argument to follow the steps done in the following steps
res_SN12_SS3 <- results(dds)
res_clean_SN12_SS3 <- res_SN12_SS3[(!is.na(res_SN12_SS3$padj)) &
(res_SN12_SS3$padj != 0.000000e+00), ]
resOrdered_SN12_SS3 <- res_clean_SN12_SS3[order(res_clean_SN12_SS3$padj),]
head(resOrdered_SN12_SS3)
sig_S12_SS3 <- resOrdered_SN12_SS3[resOrdered_SN12_SS3$padj<0.05 &
abs(resOrdered_SN12_SS3$log2FoldChange)>=1,]
## This gave us 736 genes that are differentially expressed using above criteria values
## This gave us 833 genes that are differentially expressed using above criteria values
## This gave us 897 genes that are differentially expressed using above criterua
summary(resOrdered_SN12_SS3)
## 2107 genes upregulated and 2530 genes downregulated
## 2970 Upregulated and 3376 downregulated genes
## 3585 3885
sum(res_clean_SN12_SS3$padj < 0.1, na.rm=TRUE)
## 6346 genes differentially expressed
## 7474
sum(res_clean_SN12_SS3$padj < 0.05, na.rm=TRUE)
## 5573 genes differentially expressed
## 6625
S12_genes_ss3 <- as.character(sig_S12_SS3@rownames)
####################################################################################################################
####################################################################################################################
## Differential expression analysis for biological replicates all samples ##
Treated_Subsample_SS3 <- read.table("~/BB2490-RNASeq-Project/data/count/Htseq/Subsample_3/LPS_SS3.txt", quote="\"", comment.char="")
Untreated_Subsample_SS3 <- read.table("~/BB2490-RNASeq-Project/data/count/Htseq/Subsample_3//UNST_SS3.txt", quote="\"", comment.char="")
colnames (Untreated_Subsample_SS3) <- c("Gene_features", "UNST_SN10", "UNST_SN11", "UNST_SN12")
colnames (Treated_Subsample_SS3) <- c("Gene_features", "LPS_SN10", "LPS_SN11", "LPS_SN12")
data_BS_SS3 <- merge (Treated_Subsample_SS3, Untreated_Subsample_SS3, by.y ="Gene_features" )
## Considering each lanes as technical replicates and hence given names based on lane numbers
N = dim(data_BS_SS3)[1]
rownames(data_BS_SS3) = data_BS_SS3[,1]
data_BS_SS3 = data_BS_SS3[,-1]
data_BS_SS3= data_BS_SS3[c(6:N),]
## removing last 5 rows which in our case turn out be in top 5 rows
data_BS_SS3 <- data_BS_SS3[ rowSums(data_BS_SS3) > 1, ]
## Filtering to reduce number of genes that have 0 count values
## 27554 ENSEMBL genes
## 29784 ENSEMBL genes
## 31106 ENSEMBL genes
colData <- DataFrame(condition=factor(c("LPS", "LPS", "LPS","UNST", "UNST", "UNST")))
dds <- DESeqDataSetFromMatrix(data_BS_SS3, colData, formula(~ condition))
dds <- DESeq(dds)
# plotMA(dds, main="Differential Gene Expression in Sample S10 at Subsample 100% data")
pdf("~/BB2490-RNASeq-Project/results/MA_plots/MA_biological_replicates_SS3.pdf", height=8, width=12)
plotMA(dds, main="Differential Gene Expression in all samples at Subsample 3")
dev.off()
## The MA plot kinda of supports the argument of large number of genes differential expressed between two condition
res_SS3 <- results(dds)
res_clean_SS3 <- res_SS3[(!is.na(res_SS3$padj)) &
(res_SS3$padj != 0.000000e+00), ]
## I did this filtering to remove genes with 0 padjusted values
## May be it would be interesting to see why there is padjusted to 0
resOrdered_SS3 <- res_clean_SS3[order(res_clean_SS3$padj),]
head(resOrdered_SS3)
sig_SS3 <- resOrdered_SS3[resOrdered_SS3$padj<0.05 &
abs(resOrdered_SS3$log2FoldChange)>=1,]
## This gave us 1067 genes that are differential expressed with the above
## criteria.
## Defning the criteria for the genes which are significant as ones
## with the Padjusted values lesser than 5% FDR and havin log2Fold change
## greater than 1. It would be interesting to see what happens with different
## cutoff. The above results ga
summary(res_clean_SS3)
#754 genes upregulated and 1247 genes downregulated
# 864 gene upregulated and 1449 genes downregulated
# 954 gene upregulated and 1569 downregulated
sum(res_clean_SS3$padj < 0.1, na.rm=TRUE)
## 2001 genes are differentially expressed at 10% FDR
## 2318 genes
## 2527
sum(res_clean_SS3$padj < 0.05, na.rm=TRUE)
## 1612 genes are differentially expressed at 5% FDR
## 1907 genes are differentially expressed at 5% FDR
## 2053
genes_BR_SS3 <- as.character(sig_SS3@rownames)
write.table(as.data.frame(resOrdered_SS3[resOrdered_SS3$padj<0.05,]),
"~/BB2490-RNASeq-Project/results/Differential_Expression_SS3.tsv",
sep="\t", quote =F)
########
library(gplots)
library(VennDiagram)
Common_genes_SS3<- Reduce(intersect, list(S10_genes_ss2,
S11_genes_ss3,
S12_genes_ss3))
test_2 <- Reduce(intersect, list(Common_genes_SS2,
genes_BR_SS2))
annots <- select(org.Hs.eg.db, keys=rownames(sig_SS3),
columns=c("SYMBOL","GENENAME"), keytype="ENSEMBL")
resultTable <- merge(sig_SS3, annots, by.x=0, by.y="ENSEMBL")
head(resultTable)
####################################################################
## Gene Ontology analysis for the subsample -3
source("http://bioconductor.org/biocLite.R")
biocLite("GO.db")
biocLite("topGO")
biocLite("GOstats")
library(org.Hs.eg.db)
library("AnnotationDbi")
columns(org.Hs.eg.db)
res_clean_SS3$symbol = mapIds(org.Hs.eg.db,
keys=row.names(res_clean_SS3),
column="SYMBOL",
keytype="ENSEMBL",
multiVals="first")
res_clean_SS3$entrez = mapIds(org.Hs.eg.db,
keys=row.names(res_clean_SS3),
column="ENTREZID",
keytype="ENSEMBL",
multiVals="first")
res_clean_SS3$name = mapIds(org.Hs.eg.db,
keys=row.names(res_clean_SS3),
column="GENENAME",
keytype="ENSEMBL",
multiVals="first")
head(res_clean_SS3, 10)
source("https://bioconductor.org/biocLite.R")
biocLite("gage")
source("https://bioconductor.org/biocLite.R")
biocLite("pathview")
source("https://bioconductor.org/biocLite.R")
biocLite("gageData")
library(pathview)
library(gage)
library(gageData)
data(go.sets.hs)
data(go.subs.hs)
lapply(go.subs.hs, head)
foldchanges = res_clean_SS3$log2FoldChange
names(foldchanges) = res_clean_SS3$entrez
head(foldchanges)
gobpsets = go.sets.hs[go.subs.hs$BP] ## Biological function
gobpres = gage(foldchanges, gsets=gobpsets, same.dir=TRUE)
lapply(gobpres, head , n=5)
gobpsets = go.sets.hs[go.subs.hs$CC] ## Biological function
gobpres = gage(foldchanges, gsets=gobpsets, same.dir=TRUE)
lapply(gobpres, head , n=5)
gobpsets = go.sets.hs[go.subs.hs$MF] ## Biological function
gobpres = gage(foldchanges, gsets=gobpsets, same.dir=TRUE)
lapply(gobpres, head , n=5)
res_SS3$symbol = mapIds(org.Hs.eg.db,
keys=row.names(res_SS3),
column="SYMBOL",
keytype="ENSEMBL",
multiVals="first")
res_SS3$entrez = mapIds(org.Hs.eg.db,
keys=row.names(res_SS3),
column="ENTREZID",
keytype="ENSEMBL",
multiVals="first")
res_SS3$name = mapIds(org.Hs.eg.db,
keys=row.names(res_SS3),
column="GENENAME",
keytype="ENSEMBL",
multiVals="first")
data(go.sets.hs)
data(go.subs.hs)
lapply(go.subs.hs, head)
foldchanges = res_SS3$log2FoldChange
names(foldchanges) = res_SS3$entrez
head(foldchanges)
gobpsets = go.sets.hs[go.subs.hs$BP] ## Biological function
gobpres = gage(foldchanges, gsets=gobpsets, same.dir=TRUE)
lapply(gobpres, head , n=10)
## Gene Ontology analysis for the subsample -2
source("http://bioconductor.org/biocLite.R")
biocLite("GO.db")
biocLite("topGO")
biocLite("GOstats")
library(org.Hs.eg.db)
library("AnnotationDbi")
columns(org.Hs.eg.db)
res_clean_SS2$symbol = mapIds(org.Hs.eg.db,
keys=row.names(res_clean_SS2),
column="SYMBOL",
keytype="ENSEMBL",
multiVals="first")
res_clean_SS2$entrez = mapIds(org.Hs.eg.db,
keys=row.names(res_clean_SS2),
column="ENTREZID",
keytype="ENSEMBL",
multiVals="first")
res_clean_SS2$name = mapIds(org.Hs.eg.db,
keys=row.names(res_clean_SS2),
column="GENENAME",
keytype="ENSEMBL",
multiVals="first")
head(res_clean_SS2, 10)
source("https://bioconductor.org/biocLite.R")
biocLite("gage")
source("https://bioconductor.org/biocLite.R")
biocLite("pathview")
source("https://bioconductor.org/biocLite.R")
biocLite("gageData")
library(pathview)
library(gage)
library(gageData)
data(go.sets.hs)
data(go.subs.hs)
lapply(go.subs.hs, head)
foldchanges = res_clean_SS2$log2FoldChange
names(foldchanges) = res_clean_SS2$entrez
head(foldchanges)
gobpsets = go.sets.hs[go.subs.hs$BP] ## Biological function
gobpres = gage(foldchanges, gsets=gobpsets, same.dir=TRUE)
lapply(gobpres, head , n=5)
gobpsets = go.sets.hs[go.subs.hs$CC] ## Cellular functions
gobpres = gage(foldchanges, gsets=gobpsets, same.dir=TRUE)
lapply(gobpres, head , n=5)
gobpsets = go.sets.hs[go.subs.hs$MF] ## Molecular functions
gobpres = gage(foldchanges, gsets=gobpsets, same.dir=TRUE)
lapply(gobpres, head , n=5)
###############################################################################################################
### Gene ontology enrichment analysis for DEG genes
Genes_Subsample3 <- resOrdered_SS3[resOrdered_SS3$padj<0.05,]
Genes_Subsample3$symbol = mapIds(org.Hs.eg.db,
keys=row.names(Genes_Subsample3),
column="SYMBOL",
keytype="ENSEMBL",
multiVals="first")
Genes_Subsample3$entrez = mapIds(org.Hs.eg.db,
keys=row.names(Genes_Subsample3),
column="ENTREZID",
keytype="ENSEMBL",
multiVals="first")
Genes_Subsample3$name = mapIds(org.Hs.eg.db,
keys=row.names(Genes_Subsample3),
column="GENENAME",
keytype="ENSEMBL",
multiVals="first")
Genes_Subsample3$GO = mapIds(org.Hs.eg.db,
keys=row.names(Genes_Subsample3),
column="GO",
keytype="ENSEMBL",
multiVals="first")
head(Genes_Subsample3, 10)
overallBaseMean <- as.matrix(resOrdered_SS3[, "baseMean", drop = F])
backG <- genefinder(overallBaseMean, Genes_Subsample3@rownames, 10, method = "manhattan")
backG <- rownames(overallBaseMean)[as.vector(sapply(backG, function(x)x$indices))]
backG <- setdiff(backG, Genes_Subsample3@rownames)
length(backG)
all= log2(resOrdered_SS3[,"baseMean"])
foreground =log2(resOrdered_SS3[Genes_Subsample3@rownames, "baseMean"])
background =log2(resOrdered_SS3[backG, "baseMean"])
plot.multi.dens <- function(s)
{
junk.x = NULL
junk.y = NULL
for(i in 1:length(s))
{
junk.x = c(junk.x, density(s[[i]])$x)
junk.y = c(junk.y, density(s[[i]])$y)
}
xr <- range(junk.x)
yr <- range(junk.y)
plot(density(s[[1]]), xlim = xr, ylim = yr, main = "")
for(i in 1:length(s))
{
lines(density(s[[i]]), xlim = xr, ylim = yr, col = i)
}
}
plot.multi.dens(list(all, foreground,background))
onts = c( "MF", "BP", "CC" )
geneIDs = rownames(overallBaseMean)
inUniverse = geneIDs %in% c(Genes_Subsample3@rownames, backG)
inSelection = geneIDs %in% Genes_Subsample3@rownames
alg <- factor( as.integer( inSelection[inUniverse] ) )
names(alg) <- geneIDs[inUniverse]
tab = as.list(onts)
names(tab) = onts
for(i in 1:3){
## prepare data
tgd <- new( "topGOdata", ontology=onts[i], allGenes = alg, nodeSize=5,
annot=annFUN.org, mapping="org.Hs.eg.db", ID = "ensembl" )
## run tests
resultTopGO.elim <- runTest(tgd, algorithm = "elim", statistic = "Fisher" )
resultTopGO.classic <- runTest(tgd, algorithm = "classic", statistic = "Fisher" )
## look at results
tab[[i]] <- GenTable( tgd, Fisher.elim = resultTopGO.elim,
Fisher.classic = resultTopGO.classic,
orderBy = "Fisher.classic" , topNodes = 5)
}
topGOResults <- rbind.fill(tab)
write.csv(topGOResults, file = "topGOResults_SS23.csv")
|
8433eea4174b927c28c9ca2452a2a1f6950813b0
|
88cb244ee0be3e5407995dbc15bf928325e9e44f
|
/tests/testthat/test-simframe.R
|
0bd5251039ff5f01cfb1e027d552806aee2f6cf0
|
[
"MIT"
] |
permissive
|
kant/individual
|
8d0af92928e4aa2b52d6fb72610069de4220d283
|
b58c7d7aa28c97190f0a9124ad63d56f721cf66b
|
refs/heads/master
| 2022-08-25T05:39:50.852454
| 2020-04-02T08:44:31
| 2020-04-02T08:44:31
| 264,268,090
| 0
| 0
| null | 2020-05-15T18:22:18
| 2020-05-15T18:22:17
| null |
UTF-8
|
R
| false
| false
| 1,626
|
r
|
test-simframe.R
|
test_that("getting the state works", {
S <- State$new('S', 10)
human <- Individual$new('test', list(S))
simulation <- Simulation$new(list(human), 1)
frame <- simulation$get_current_frame()
expect_length(frame$get_state(human, S), 10)
I <- State$new('I', 100)
human <- Individual$new('test', list(S, I))
simulation <- Simulation$new(list(human), 1)
frame <- simulation$get_current_frame()
expect_length(frame$get_state(human, I), 100)
})
test_that("Getting multiple states works", {
S <- State$new('S', 10)
I <- State$new('I', 100)
R <- State$new('R', 20)
human <- Individual$new('test', list(S, I, R))
simulation <- Simulation$new(list(human), 1)
frame <- simulation$get_current_frame()
expect_length(frame$get_state(human, S, R), 30)
})
test_that("getting a non registered state index fails", {
S <- State$new('S', 10)
I <- State$new('I', 100)
R <- State$new('R', 0)
human <- Individual$new('test', list(S, I))
simulation <- Simulation$new(list(human), 1)
frame <- simulation$get_current_frame()
expect_error(
frame$get_state(human, R),
'*'
)
})
test_that("getting variables works", {
S <- State$new('S', 10)
sequence <- Variable$new('sequence', function(size) seq_len(size))
sequence_2 <- Variable$new('sequence 2', function(size) seq_len(size) + 10)
human <- Individual$new('test', list(S), variables=list(sequence, sequence_2))
simulation <- Simulation$new(list(human), 1)
frame <- simulation$get_current_frame()
expect_equal(frame$get_variable(human, sequence), 1:10)
expect_equal(frame$get_variable(human, sequence_2), (1:10) + 10)
})
|
daee10859ed2d8a109f9e3ca0b42179d770f2532
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.analytics/man/glue_create_partition_index.Rd
|
d70467df4eec4a607fb28e35461bbb92efc99634
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 949
|
rd
|
glue_create_partition_index.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glue_operations.R
\name{glue_create_partition_index}
\alias{glue_create_partition_index}
\title{Creates a specified partition index in an existing table}
\usage{
glue_create_partition_index(
CatalogId = NULL,
DatabaseName,
TableName,
PartitionIndex
)
}
\arguments{
\item{CatalogId}{The catalog ID where the table resides.}
\item{DatabaseName}{[required] Specifies the name of a database in which you want to create a partition
index.}
\item{TableName}{[required] Specifies the name of a table in which you want to create a partition
index.}
\item{PartitionIndex}{[required] Specifies a \code{PartitionIndex} structure to create a partition index in an
existing table.}
}
\description{
Creates a specified partition index in an existing table.
See \url{https://www.paws-r-sdk.com/docs/glue_create_partition_index/} for full documentation.
}
\keyword{internal}
|
d7dd532963aaf98bdbb817f5eb9421434b410b48
|
d2034a80affde15fb3e69f2466bde58cca096013
|
/응용통계학/10장.R
|
43336f45f7989a1ca20ff7cf459ba9a4f329a490
|
[] |
no_license
|
SANGDONKIM/MASTER
|
dacb5f1b5cf032965c2fcf5f2076f56c510057f9
|
7720d6bdb2465d0b0908dbae70a8f6da570962d6
|
refs/heads/master
| 2021-04-13T14:03:06.281917
| 2021-03-24T12:18:09
| 2021-03-24T12:18:09
| 249,167,267
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,730
|
r
|
10장.R
|
# permutation test
# https://www.jwilber.me/permutationtest/
# ex 10.1
attach(chickwts)
head(chickwts)
x <- sort(weight[feed == 'soybean']) # feed = 'soybean' 인 weight 값
y <- sort(weight[feed == 'linseed']) # feed = 'linseed'인 weight 값
detach(chickwts)
R <- 999
z <- c(x, y)
K <- 1:26
reps <- numeric(R)
t0 <- t.test(x, y)$statistic # original
for (i in 1:R) {
k <- sample(K, size = 14, replace = F)
x1 <- z[k]
y1 <- z[-k]
reps[i] <- t.test(x1, y1)$statistic
}
# head(reps)
# tail(c(reps, t0)) # vector 합치기
p <- mean(c(t0, reps)>=t0) # alpha = 0.05 에서 기각 x
p
hist(reps, main = '', freq = F, xlab = 'T (p = 0.202)', breaks = 'scott')
points(t0, 0, cex = 1, pch = 16)
# ex 10.2
R <- 999
z <- c(x, y)
K <- 1:26
D <- numeric(R)
options(warn = -1)
D0 <- ks.test(x, y, exact = F)$statistic
for (i in 1:R) {
k <- sample(K, size = 14, replace = F)
x1 <- z[k]
y1 <- z[-k]
D[i] <- ks.test(x1, y1, exact = F)$statistic
}
p <- mean(c(D0, D)>=D0)
options(warn = 0)
p
hist(D, main = '', freq = F, xlab = 'D (p = 0.46)', breaks = 'scott')
points(D0, 0, cex = 1, pch = 16)
# ex 10.3
attach(chickwts)
head(chickwts)
x <- sort(weight[feed == 'sunflower']) # feed = 'soybean' 인 weight 값
y <- sort(weight[feed == 'linseed']) # feed = 'linseed'인 weight 값
detach(chickwts)
summary(cbind(x, y))
R <- 999
z <- c(x, y)
K <- 1:26
D <- numeric(R)
options(warn = -1)
D0 <- ks.test(x, y, exact = F)$statistic
for (i in 1:R) {
k <- sample(K, size = 14, replace = F)
x1 <- z[k]
y1 <- z[-k]
D[i] <- ks.test(x1, y1, exact = F)$statistic
}
p <- mean(c(D0, D)>=D0)
options(warn = 0)
p
# pr 10.3
attach(chickwts)
head(chickwts)
x1 <- sort(weight[feed == 'soybean']) # feed = 'soybean' 인 weight 값
x2 <- sort(weight[feed == 'linseed']) # feed = 'linseed'인 weight 값
x3 <- sort(weight[feed == 'sunflower'])
detach(chickwts)
# cramer von Mises test
stat <- function(x, y){
x <- sort(x)
y <- sort(y)
z <- c(x, y)
zrank <- rank(z, ties.method = 'random')
n <- length(x)
m <- length(y)
r <- zrank[1:n]
s <- zrank[(n+1):(n+m)]
i <- 1:n
j <- 1:m
U <- n*sum((r-i)^2) + m*sum((s-j)^2)
return (U/(n*m*(n+m))-(4*m*n-1)/(6*(n+m)))
}
R <- 999
K <- 1:26
z <- c(x1, x2)
t0 <- stat(x1, x2)
t0
n <- length(x1)
m <- length(x2)
reps <- numeric(R)
for (i in 1:R) {
k <- sample(K, size = 14, replace = F) # 26개 중에 14개 비복원
dat <- z[k]
x1 <- dat[1:n]
y1 <- dat[(n+1):m]
reps[i] <- stat(x1, y1)
}
p <- (sum(c(t0, reps)>=t0)+1)/(R+1)
p
|
c124e68cc251e83f770bd50c766d68964e1a011b
|
809984e753f947edadeefcda433baa3fccbfa280
|
/plot2.R
|
6edbc5f1cc2fc1fce7baf8cee1a03a57b1aa84ee
|
[] |
no_license
|
evanfish/ExData_Plotting1
|
5733628c470e3edc036d05c437266a028071c009
|
f911a4f3925c5a0a066d553486f5a479785e889e
|
refs/heads/master
| 2021-04-06T01:10:16.418245
| 2018-03-19T02:54:14
| 2018-03-19T02:54:14
| 124,826,924
| 0
| 0
| null | 2018-03-12T03:09:20
| 2018-03-12T03:09:20
| null |
UTF-8
|
R
| false
| false
| 1,204
|
r
|
plot2.R
|
#Exploratory Data Analysis - Johns Hopkins University
#Plot 2
data_loc <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temp <- tempfile()
download.file(data_loc, temp)
pwr_data <- read.table(unz(temp, filename = "household_power_consumption.txt"), header=TRUE, sep=";")
unlink(temp)
pwr_data$Global_active_power <- as.numeric(as.character(pwr_data$Global_active_power))
pwr_data$Date <- as.Date(pwr_data$Date, format="%d/%m/%Y")
sub_data <- subset(pwr_data, Date == as.Date("2007-02-01") | Date == as.Date("2007-02-02"))
sub_data$Date <- as.character(sub_data$Date)
sub_data$Time <- as.character(sub_data$Time)
len <- length(sub_data$Date)
date_time <- as.vector(NULL)
for(i in 1:len){
date_time[i] <- paste(sub_data$Date[i], sub_data$Time[i])
date_time <- c(date_time, date_time[i])
}
date_time2 <- strptime(date_time, "%Y-%m-%d %H:%M:%S")
new_data <- cbind(sub_data, date_time2[1:2880])
png('plot2.png', width=480, height=480)
with(new_data, plot(date_time2[1:2880], Global_active_power, pch="", xlab="", ylab="Global Active Power (kilowatts)"))
lines(new_data$date_time2[1:2880], new_data$Global_active_power)
dev.off()
|
13746f06436230a77be7bb706a64886d710c1d87
|
f6a1a598a76168efe4a32a4490ea4e88ecdad607
|
/r_review.R
|
57009798a672d580cc848bd7bddfd387bf8d2ead
|
[] |
no_license
|
siddisis/altmetrics
|
5bf07f628de16aa44f1c313b2668f0d76aeb6195
|
13372dbf720537edef27bfc2f63052f6422683e1
|
refs/heads/master
| 2020-05-30T16:13:12.119602
| 2015-09-16T16:45:09
| 2015-09-16T16:45:09
| 42,600,740
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 951
|
r
|
r_review.R
|
count.raw<-read.delim("/Users/siddisis/Desktop/SWC/altmetrics/data/counts-raw.txt.gz")
count.norm<-read.delim("/Users/siddisis/Desktop/SWC/altmetrics/data/counts-norm.txt.gz")
count.raw[1:3,10:12]
count.raw$pmid[1:3]
head(count.raw$daysSincePublished)/c(7,1)
str(count.raw$journal)
levels(count.raw$journal)
anyNA(count.raw$authorsCount[1:10])
summary(count.raw$wosCountThru2011)
hist(count.raw$wosCountThru2011, xlim=c(0,200), breaks=1000)
hist(sqrt(count.raw$wosCountThru2011))
smoothScatter(count.raw$f1000Factor, log(count.raw$wosCountThru2011))
smoothScatter(count.raw$authorsCount, log(count.raw$wosCountThru2011))
smoothScatter(count.raw$authorsCount, count.raw$f1000Factor)
cor(count.raw$authorsCount, count.raw$f1000Factor, use="complete.obs")
dim(count.raw[count.raw$journal %in% c("pone", "pbio"),])
dim(count.raw[grepl("Immu",count.raw$plosSubjectTags),])
if(any(is.na(count.raw$authorsCount))){
print("YO")
} else {print("YOYO")}
|
15bb26a6f2bae7a6d12ab183cdb8d81a56ddf92f
|
865cecdd703505d582b91242f9c37c0bed740095
|
/R/plot_sets.R
|
8daf3e3e9e8c6148037267de3830774cff5f0771
|
[] |
no_license
|
csiu/CEMTscripts
|
4fb78442e540e2ff6e95b734b8f96043e3509e74
|
74baea3a0a59f4e21405c96a91d65d7a36764048
|
refs/heads/master
| 2021-08-16T16:44:42.236037
| 2017-11-20T05:19:39
| 2017-11-20T05:19:39
| 71,932,230
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,459
|
r
|
plot_sets.R
|
#' Total unique elements in embedded list
#' @param x a list whereby each element is a character vector/set and
#' the name of the element is the name of the set
#' @param getunique boolean; if TRUE, will instead return the
#' list of unique elements
set_total <- function(x, getunique=FALSE) {
elements <- Reduce(union, x)
if (getunique) elements else length(elements)
}
# ------------------------------------------------------------------------
#' Plot sets
#'
#' @param x a list whereby each element is a character vector/set and
#' the name of the element is the name of the set
#' @param type what kind of plot to produce: one of "upset" (default)
#' or "venn"
#' @param ... additional arguments to \code{UpSetR::upset(...)} or
#' \code{VennDiagram::venn.diagram(...)}
#' @import dplyr
#' @export
#' @examples
#' x <-
#' list(
#' set1 = letters[1:10],
#' set2 = c("a", "e", "i", "o", "u"),
#' set3 = letters[1:3]
#' )
#' plot_sets(x)
#' plot_sets(x, type="venn")
plot_sets <- function(x, type="upset", ...){
if (type=="upset") {
UpSetR::fromList(x) %>%
UpSetR::upset(...)
} else if (type=="venn") {
## ignore VennDiagram*log
futile.logger::flog.threshold(futile.logger::ERROR,
name = "VennDiagramLogger")
grid::grid.newpage()
VennDiagram::venn.diagram(x, filename=NULL, ...) %>%
grid::grid.draw()
}
}
|
e207eeba0eeb60b793abdff72b356e0b3186f074
|
094e952da4fa8698b04fb88b69fbf67668218d24
|
/code/ch.8/run-model8-7.R
|
d48509056eed10154e107177b7aaf5cd25898b12
|
[
"MIT"
] |
permissive
|
rhyeu/study_rstan
|
42a773beef840f56f64fcd20c5b1b24f88d45e1b
|
a5b998772358ba64996bc7ca775566f0706fa8f3
|
refs/heads/master
| 2021-07-08T15:51:37.488890
| 2020-10-04T07:10:03
| 2020-10-04T07:10:03
| 195,388,081
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 376
|
r
|
run-model8-7.R
|
library(rstan)
d <- read.csv('ch.8/input/data-conc-2.txt')
N <- nrow(d)
Time <- c(1, 2, 4, 8, 12, 24)
T_new <- 60
Time_new <- seq(from=0, to=24, length=T_new)
data <- list(N=N, T=length(Time), Time=Time, Y=d[,-1],
T_new=T_new, Time_new=Time_new)
fit <- stan(file='ch.8/model/model8-7.stan', data=data, seed=1234)
save.image('ch.8/output/result-model8-7.RData')
|
a462bc638a31f2f4f8889d87a0cec8a38e0d8b80
|
e622cedeca09b762cdce8d93218ad215b648de2a
|
/week 6 assignment.r
|
cf5ee9c79bb6dfbb7eeacb2c3d050f52ae8cc65a
|
[] |
no_license
|
ct3080a/IS360-Data-Acquisiton-and-Management
|
2f5a126d333abccbfbf03e0c38c41292718894e8
|
240fc00ce90d3097406dec9542c81d02f7da1b36
|
refs/heads/master
| 2021-03-12T23:44:34.198145
| 2014-12-20T12:44:12
| 2014-12-20T12:44:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 164
|
r
|
week 6 assignment.r
|
#choose an interesting dataset
women
#scatterplot
plot(women$height, women$weight)
#create histogram
hist(women$height, breaks = 5)
#boxplot
boxplot(women$height)
|
9ed5cbe63e91963b8b9c7ce1a07a57a57a9ab269
|
34865809e852fdb92071a580e1da1d3732f48ec3
|
/plinker_dev.R
|
38ed7669ce85dfd9dd2f79049bd8f99861c1acce
|
[] |
no_license
|
quartzbio/plinker_pkg
|
5c4d436b29ee06fb9d7068b3f0940bf5dfda7e5a
|
0e137d20c03dcf8a0f64197be023b8e8c28eb0fe
|
refs/heads/master
| 2021-09-21T21:07:34.813166
| 2018-08-31T13:41:58
| 2018-08-31T13:41:58
| 110,011,530
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 888
|
r
|
plinker_dev.R
|
library(devtools)
check_man('plinker')
test('plinker')
test('plinker', 'annotations')
test('plinker', 'bed$')
test('plinker', 'bed_plink$')
test('plinker', 'bed_plink_lm')
test('plinker', 'bedmatrix')
test('plinker', 'bim')
test('plinker', 'convert')
test('plinker', 'covars')
test('plinker', 'dist')
test('plinker', 'fam')
test('plinker', 'filters')
test('plinker', 'fisher')
test('plinker', 'genotype')
test('plinker', 'missing')
test('plinker', 'phenotype')
test('plinker', '^plink$')
test('plinker', 'plink_lm')
test('plinker', 'plink_output')
test('plinker', 'sample_data')
test('plinker', 'stats')
test('plinker', 'subset')
test('plinker', 'utils')
test_pkg('plinker', 'bed$')
test_pkg('plinker', 'bed_plink$')
test_pkg('plinker', 'dist')
help2 <- function(..., help_type = 'html') {
doc <- utils::help(..., help_type = help_type)
utils:::print.help_files_with_topic(doc)
}
|
0ed9ef68000ca009ccd2fa67ecb51da050742cfa
|
3b0830c657f493cc609727cd92da3a3c9fa12d80
|
/R/MCMethod1.R
|
508dcb2da7371661e2cd768e40a213e44b68f6f0
|
[] |
no_license
|
HaiyangYu1999/CodeBackup
|
a1d8018fbbe2f7e05455042a1438d635f6f2a17f
|
a023f93e85b2c8172f98d833e3f7d2c1de67a51e
|
refs/heads/master
| 2021-04-20T23:43:12.813114
| 2021-01-03T13:30:48
| 2021-01-03T13:30:48
| 249,726,445
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 568
|
r
|
MCMethod1.R
|
set.seed(1000)
N=10000
P=matrix(NA,2,2)
X=rep(NA,N)
Y=rep(NA,N)
Px=0.8
Py=0.7
X[1]=rbinom(1,1,Px)
Y[1]=rbinom(1,1,Pygx[X[1]+1])
Pxgy=c(2/3,6/7)
Pygx=c(1/2,3/4)
for(i in 2:N)
{
X[i]=rbinom(1,1,Pxgy[Y[i-1]+1])
Y[i]=rbinom(1,1,Pygx[X[i]+1])
}
x00=0
x01=0
x10=0
x11=0
for(i in 1:N)
{
if(X[i]==0&&Y[i]==0)
x00=x00+1
if(X[i]==0&&Y[i]==1)
x01=x01+1
if(X[i]==1&&Y[i]==0)
x10=x10+1
if(X[i]==1&&Y[i]==1)
x11=x11+1
}
cat("mean(X)=",mean(X),sep="")
cat("mean(Y)=",mean(Y),sep="")
cat(" Y=0 Y=1")
cat("X=0",x00/N,x01/N)
cat("X=1",x10/N,x11/N)
|
ae3ee8b21f60a20a6c49a53b3cf8c3b180d943ce
|
a7325500905b466ac3f55f63280249e710a9c37e
|
/global.R
|
73c32297655b412958ed42202e627f15433ab536
|
[] |
no_license
|
DanTruong/NYS-Influenza-Map
|
406ef4a8133f6e504ee2efb997c72ce910689376
|
b6bf7422ab31f14ee58a31e0b3204a1760dd4e77
|
refs/heads/master
| 2023-04-07T17:47:49.712211
| 2023-04-02T02:28:40
| 2023-04-02T02:28:40
| 218,870,438
| 0
| 0
| null | 2019-11-05T19:48:57
| 2019-10-31T22:16:38
|
R
|
UTF-8
|
R
| false
| false
| 1,364
|
r
|
global.R
|
# Load requisite libraries
library(dplyr)
library(plyr)
library(tidyr)
# Load in dataset file
rawData <- read.csv("data/fluData.csv")
# Select specific columns to work with
fluData <- data.frame(
County = rawData$County,
Date = rawData$Week.Ending.Date,
Disease = rawData$Disease,
Incidents = as.integer(rawData$Count),
Coordinates = rawData$County.Centroid
)
# Split coordinates into Longitude/Latitude (Double)
fluData <- fluData %>% separate(Coordinates, c("Latitude", "Longitude"), ", ")
fluData$Latitude <- substring(fluData$Latitude, first = 2)
fluData$Longitude <- substring(fluData$Longitude, 1, nchar(fluData$Longitude) -
1)
fluData$Latitude <- as.double(fluData$Latitude)
fluData$Longitude <- as.double(fluData$Longitude)
# Separate date into Month, Day and Year (Integer)
fluData <- fluData %>% separate(Date,
sep = "/",
into = c("Month", "Day", "Year"))
fluData$Month <- as.integer(fluData$Month)
fluData$Day <- as.integer(fluData$Day)
fluData$Year <- as.integer(fluData$Year)
# Rename Influenza Labels
fluData$Disease <- revalue(fluData$Disease, c(
INFLUENZA_A = "A",
INFLUENZA_B = "B",
INFLUENZA_UNSPECIFIED = "Unspecified"
))
# Aggregate flu data by sum of incidents
fluDataCons <- fluData[-c(3)]
fluDataCons <- aggregate(Incidents ~ ., fluDataCons, sum)
|
250063e5421c05f116302073ec081f3c15b77e64
|
cb93cf0799e3eedca6f9e720e09bb60e0f77ff10
|
/tests/readDataFrame.R
|
b19200ae1598c602886f9966af1f621388836ed7
|
[] |
no_license
|
HenrikBengtsson/R.filesets
|
254c37b4546e8280b9972d06840b918e12e0b4e9
|
17181ae1c84dbf7bad1214d37e6f133ed2deeba4
|
refs/heads/master
| 2023-01-08T23:58:09.708417
| 2022-07-21T09:52:18
| 2022-07-21T09:52:18
| 20,844,863
| 3
| 1
| null | 2018-04-03T22:12:45
| 2014-06-15T00:25:31
|
R
|
UTF-8
|
R
| false
| false
| 1,221
|
r
|
readDataFrame.R
|
source("incl/start.R")
message("*** readDataFrame()")
path <- system.file("exData", "dataSetA,original", package="R.filesets")
pathnames <- list.files(path=path, pattern="[.]txt$", full.names=TRUE)
pathname <- pathnames[1]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Basic reading
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
data <- readDataFrame(pathname)
print(data)
data <- readDataFrame(basename(pathname), path=dirname(pathname))
print(data)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Reading gzip'ed file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
pathT <- tempdir()
pathnameZ <- file.path(pathT, sprintf("%s.gz", basename(pathname)))
R.utils::gzip(pathname, pathnameZ, remove=FALSE)
dataZ <- readDataFrame(pathnameZ)
print(dataZ)
## Validate
stopifnot(identical(dataZ, data))
## Cleanup
file.remove(pathnameZ)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Reading multiple files and stack them
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
pathnames <- rep(pathname, times=3L)
data <- readDataFrame(pathnames)
print(data)
source("incl/end.R")
|
2111a4900309c8a2580e7e8d8abacb5d82a9e2f1
|
0014e7337864cc16efbdea8215389c2267073e8f
|
/Digital Channel Atribution/markov_vs_lastclick.R
|
45ef550fd6203e3284abc9d1ec5b5f1edacaa354
|
[] |
no_license
|
azzikl/R
|
efadf75df6dad5501b5f7708f8a9c44627e673dc
|
cb3a517ede522c3be3b49c94e720315a32942165
|
refs/heads/master
| 2021-01-20T01:10:35.753966
| 2017-07-14T12:35:35
| 2017-07-14T12:35:35
| 89,227,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,502
|
r
|
markov_vs_lastclick.R
|
library(dplyr)
library(reshape2)
library(ggplot2)
library(ChannelAttribution)
library(RGA)
#generate ga token
authorize()
#set property ID, start and end dates
#ids = propertyid
start.date = as.Date("2017/01/18")
end.date = as.Date("2017/01/18")
#getting real data via google analytics API
paths_data = get_mcf(
ids,
start.date = start.date,
end.date = end.date,
metrics = "mcf:totalConversionValue, mcf:totalConversions",
dimensions = "mcf:mediumPath",
filters = "mcf:conversionType==Transaction"
)
#calculating Markov and standart last click model
model = markov_model(
paths_data,
var_path = 'mediumPath',
var_conv = 'totalConversions',
out_more = TRUE
)
last_click = paths_data %>%
mutate(mediumPath = sub('.*>', '', mediumPath),
mediumPath = sub(' ', '', mediumPath))
last_click = last_click %>%
group_by(mediumPath) %>%
summarise(lc_conversions = sum(totalConversions))
# comparing two models
comparison <- merge(last_click, model$result, by.x = 'mediumPath', by.y = 'channel_name')
names(comparison) = c("medium","last_click","markov_model")
#ploting transactions
for_plot = melt(comparison, id = "medium")
ggplot(for_plot, aes(medium, value, fill = variable)) +
geom_bar(stat='identity', position='dodge') +
ggtitle('TOTAL CONVERSIONS') +
theme(axis.title.x = element_text(vjust = -2)) +
theme(axis.title.y = element_text(vjust = +2)) +
theme(title = element_text(size = 16)) +
theme(plot.title=element_text(size = 20)) +
ylab("")
|
6284ceca392ef28c1e92d5375868e21938cef041
|
25923c6f6895f0f1b4c32786787e66d43cb01e3d
|
/man/maturity_assignment.Rd
|
7465cb73f3d037148e4a994e80e7e69e7d9d1299
|
[] |
no_license
|
pbs-assess/gfplot
|
4bb1fc9546eed8e9d4cda5cd2f42f1adb50cc8ca
|
7cebc7376ecf0d5576a486a7e260130973d21cb5
|
refs/heads/master
| 2023-07-19T21:36:10.716669
| 2023-07-12T00:30:06
| 2023-07-12T00:30:06
| 103,994,126
| 2
| 3
| null | 2023-05-31T19:11:41
| 2017-09-18T21:31:13
|
R
|
UTF-8
|
R
| false
| true
| 362
|
rd
|
maturity_assignment.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{maturity_assignment}
\alias{maturity_assignment}
\title{A data frame with maturity categories and assignments.}
\format{
A data frame
}
\usage{
maturity_assignment
}
\description{
A data frame with maturity categories and assignments.
}
\keyword{datasets}
|
d083b8aeaa8ec57e35f5f38e72e344f9585c7474
|
b23aa367ece060a8a3c6697d6cb59cb84c1bca0d
|
/app.R
|
89a1a814440f55659ba6c53ed5e13c9d16539fe6
|
[] |
no_license
|
DavidBarke/distributions
|
2d53d3e4891308920ce620a20242dc10926de919
|
6cc6fd558d204570aecc6351d44b669f0fe13687
|
refs/heads/main
| 2023-04-12T09:23:08.155376
| 2021-05-04T16:42:37
| 2021-05-04T16:42:37
| 350,862,494
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,523
|
r
|
app.R
|
library(shiny)
library(sortable)
library(magrittr)
source("init/source_directory.R")
source_directory("modules")
sass::sass(
sass::sass_file("www/scss/styles.scss"),
output = "www/css/styles.css",
options = sass::sass_options(
output_style = "compressed"
),
cache = FALSE
)
distribution_helper <- Distribution$new()
ui <- htmltools::tagList(
htmltools::includeScript("www/js/distribution-input.js"),
htmltools::includeScript("www/js/color-input.js"),
htmltools::includeScript("www/js/remove-icon.js"),
htmltools::includeScript("www/js/bin-drop-zone.js"),
htmltools::includeScript("www/js/update-sortable-handler.js"),
htmltools::includeScript("www/js/popover.js"),
htmltools::includeScript("www/js/tooltips.js"),
htmltools::includeScript("www/js/mathjax-typeset-handler.js"),
htmltools::includeCSS("www/css/styles.css"),
rintrojs::introjsUI(),
glouton::use_glouton(),
shiny::withMathJax(),
bs4Dash::bs4DashPage(
header = bs4Dash::bs4DashNavbar(
title = bs4Dash::bs4DashBrand(
title = "Distributions",
href = "https://github.com/DavidBarke/distributions"
),
status = "primary",
rightUi = navbar_right_ui(
id = "navbar_right"
),
fixed = TRUE
),
sidebar = bs4Dash::bs4DashSidebar(
disable = TRUE
),
body = bs4Dash::bs4DashBody(
body_ui(
id = "body"
)
),
footer = bs4Dash::bs4DashFooter(
left = bin_drop_zone_ui(
id = "bin_drop_zone"
),
fixed = TRUE
),
freshTheme = fresh::create_theme(
fresh::bs4dash_vars(
main_footer_padding = 0
)
),
dark = NULL
)
)
server <- function(input, output, session) {
.values <- new.env()
body_server(
id = "body",
.values = .values
)
navbar_right_server(
id = "navbar_right",
.values = .values
)
shiny::observeEvent(TRUE, {
needs_intro <- is.null(glouton::fetch_cookies()$intro)
if (needs_intro) {
glouton::add_cookie("intro", "true")
rintrojs::introjs(
session,
options = list(
showStepNumbers = FALSE
)
)
}
}, once = TRUE)
}
shinyApp(ui = ui, server = server)
|
bc0880f35ff1db49520c2a5987b8a7eb309eecc5
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/pseudorank/tests/testthat.R
|
9de052badeab02d686fbe49630621e51ce9100b1
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 66
|
r
|
testthat.R
|
library(testthat)
library(pseudorank)
test_check("pseudorank")
|
b6f285525bc24c40a056f3aef9fcd9dd1c34fa54
|
34a1872c598758ad8b6c0a82132f52b2f124489e
|
/ExtensionScripts/merging with HCAD functions/looking_for_vacant_buildings.R
|
34f22eb7a6c50fd9f86b07d2a44e54c6acf34ce4
|
[] |
no_license
|
DataAnalyticsinStudentHands/SyntheticDataSet
|
2f73599723d53c5ca0e04535de55bf05c44aaaac
|
82877e75367dbd9ff68976c61b9e8f237224cf2d
|
refs/heads/master
| 2023-08-22T20:00:35.665190
| 2023-08-16T17:14:39
| 2023-08-16T17:14:39
| 77,935,514
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,107
|
r
|
looking_for_vacant_buildings.R
|
#I ran this stuff before but am putting it here so you can see it
#library(sf)
#parcels <- st_read("../hcadparcelstuff/Parcels/Parcels.shp")
#parcels$valid=st_is_valid(parcels, reason = TRUE)
#validparcels=subset(parcels,parcels$valid=="Valid Geometry")
#Read in Texas Census Tract Files
#TXCensusTracts <- st_read("../hcadparcelstuff/TexasCensusTractShapefiles/gz_2010_48_140_00_500k.shp")
#Put them in same CRS as Parcels
#TXCensusTracts <- st_transform(TXCensusTracts,st_crs(validparcels))
#Get Census Tracts for each parcel
#CensusTractforHCADParcels=st_within(validparcels,TXCensusTracts)
#CensusTractforHCADParcelsunlisted=rapply(CensusTractforHCADParcels,function(x) ifelse(length(x)==0,9999999999999999999,x), how = "replace")
#CensusTractforHCADParcelsunlisted=unlist(CensusTractforHCADParcelsunlisted)
#validparcels$COUNTY=TXCensusTracts$COUNTY[CensusTractforHCADParcelsunlisted]
#validparcels$TRACT=TXCensusTracts$TRACT[CensusTractforHCADParcelsunlisted]
validparcels=readRDS("validparcels.RDS")
#So we need the non residential files
buildingfeatures=read.table('../hcadparcelstuff/building_other.txt',sep="\t", header=FALSE, fill=TRUE,colClasses = "character",strip.white = TRUE,quote = "")#, ,colClasses = "character",stringsAsFactors = FALSE
colnames(buildingfeatures)=c("ACCOUNT", "USE_CODE", "BUILDING_NUMBER","IMPRV_TYPE","BUILDING_STYLE_CODE","CLASS_STRUCTURE","CLASS_STRUC_DESCRIPTION","NOTICED_DEPR_VALUE","DEPRECIATION_VALUE","MS_REPLACEMENT_COST","CAMA_REPLACEMENT_COST","ACCRUED_DEPR_PCT","QUALITY","QUALITY_DESCRIPTION","DATE_ERECTED","EFFECTIVE_DATE","YR_REMODEL","YR_ROLL","APPRAISED_BY","APPRAISED_DATE","NOTE","IMPR_SQ_FT","ACTUAL_AREA","HEAT_AREA","GROSS_AREA","EFFECTIVE_AREA","BASE_AREA","PERIMETER","PERCENT_COMPLETE","CATEGORY","CATEGORY_DESC","PROPERTY_NAME","UNITS","NET_RENT_AREA","LEASE_RATE","OCCUPANCY","TOTAL_INCOME")
buildingfeatures$"HCAD_NUM"=buildingfeatures$ACCOUNT
library(tigris)
validparcels3=geo_join(validparcels,buildingfeatures,by="HCAD_NUM",how="inner")
#subset by what I believe are code for residential properties
places_people_live=subset(validparcels3,validparcels3$BUILDING_STYLE_CODE %in% c("660","8321","8324","8393","8424","8451","8589","101","107","108","109","125","8177","8178","8179",
"8338","8351","8354","8401","8548","8549","8550","8986","8988","102","103","104","105","8300","8352","8338","8459",
"8493","8546","8547","8596","8984","8987","8989"))
unique(places_people_live$OCCUPANCY)
#Merge to get Land Use code
land_stuff=read.table('../hcadparcelstuff/land.txt',sep="\t", header=FALSE, fill=TRUE,colClasses = "character",strip.white = TRUE,quote = "")#, ,colClasses = "character",stringsAsFactors = FALSE
colnames(land_stuff)=c("ACCOUNT","LINE_NUMBER","LAND_USE_CODE","SITE_CD","SITE_CD_DSCR","SITE_ADJ","UNIT_TYPE","UNITS","SIZE_FACTOR","SITE_FACT","APPR_OVERRIDE_FACTOR","APPR_OVERRIDE_REASON","TOT_ADJ","UNIT_PRICE","ADJ_UNIT_PRICE","VALUE","OVERRIDE_VALUE")
land_stuff$"HCAD_NUM"=land_stuff$ACCOUNT
validparcelsagain=geo_join(validparcels,land_stuff,by="HCAD_NUM",how="inner")
#Subset codes that have vacant in their description
#http://hcad.org/hcad-resources/hcad-appraisal-codes/hcad-land-codes/
maybe_this_is_what_youre_asking_for=subset(validparcelsagain,validparcelsagain$LAND_USE_CODE %in% c("1000","1002","2000","2002","7000","9999"))
saveRDS(places_people_live,"places_people_live_that_have_occupancy_rates.RDS")
saveRDS(maybe_this_is_what_youre_asking_for,"vacant_land_by_land_use_code.RDS")
maybe_this_is_what_youre_asking_for=readRDS("vacant_land_by_land_use_code.RDS")
maybe_this_is_what_youre_asking_for$Duplicated_HCAD_NUM=duplicated(maybe_this_is_what_youre_asking_for$HCAD_NUM)
look_at_duplicated_accounts=subset(maybe_this_is_what_youre_asking_for,maybe_this_is_what_youre_asking_for$Duplicated_HCAD_NUM==TRUE)
look_at_1_account=subset(maybe_this_is_what_youre_asking_for,maybe_this_is_what_youre_asking_for$HCAD_NUM=="0200730000016")
|
6d78be103a00fd75e0088734baa95921aa024ad0
|
7a343e1b7a3c2ef50d684a5ce991ebef800f32be
|
/shells/variant_calling/new/11-tests/xp-ehh/chr12.R
|
bf2d930e6c22b88c9e57f61c5ba3648c0e1f1187
|
[] |
no_license
|
melisakman/Helianthus
|
04d2dc8315f8d14d2f38faa8bce7282e2cc2b439
|
5500207a2bbfe2e63c639f3194f732a41d527cb7
|
refs/heads/master
| 2021-11-09T05:12:31.331797
| 2021-11-03T05:30:32
| 2021-11-03T05:30:32
| 60,215,326
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,417
|
r
|
chr12.R
|
library(rehh)
setwd("/global/scratch/makman/GATK/final/")
lr <- data2haplohh("chr12_SNP_lr_heteroFiltered.vcf", min_perc_geno.mrk = 30, polarize_vcf = FALSE)
wd <- data2haplohh("chr12_SNP_wd_heteroFiltered.vcf", min_perc_geno.mrk = 30, polarize_vcf = FALSE)
lr_scan = scan_hh(lr, limhaplo = 2, limehh = 0.05, limehhs = 0.05,
phased = FALSE, polarized = FALSE, scalegap = NA, maxgap = NA,
discard_integration_at_border = TRUE, lower_ehh_y_bound = 0.05,
lower_ehhs_y_bound = 0.05, threads = 4)
wd_scan = scan_hh(wd, limhaplo = 2, limehh = 0.05, limehhs = 0.05,
phased = FALSE, polarized = FALSE, scalegap = NA, maxgap = NA,
discard_integration_at_border = TRUE, lower_ehh_y_bound = 0.05,
lower_ehhs_y_bound = 0.05, threads = 4)
analyses = ies2xpehh(lr_scan, wd_scan, popname1 = "landrace", popname2 = "wild", include_freq =TRUE)
write.table(analyses, file = "chr12_lr_xpehh.txt", sep = "\t")
cvlr <- data2haplohh("chr12_SNP_cv_lr_heteroFiltered.vcf", min_perc_geno.mrk = 30, polarize_vcf = FALSE)
cvlr_scan = scan_hh(cvlr, limhaplo = 2, limehh = 0.05, limehhs = 0.05,
phased = FALSE, polarized = FALSE, scalegap = NA, maxgap = NA,
discard_integration_at_border = TRUE, lower_ehh_y_bound = 0.05,
lower_ehhs_y_bound = 0.05, threads = 4)
analyses = ies2xpehh(cvlr_scan, wd_scan, popname1 = "domesticates", popname2 = "wild", include_freq =TRUE)
write.table(analyses, file = "chr12_cvlr_xpehh.txt", sep = "\t")
|
644d48e36326b0aee94e2528127c075428bb201a
|
422403bc85c06475bda8e8f8d5c8c149b6c92dc9
|
/StanModelStage2.R
|
e8f4cc5e7b3e74e075ddf7474e315196e06fca8b
|
[] |
no_license
|
Shusei-E/B.A.Thesis
|
3c4ae24a2de7c725a5e7bb3825cd3486955527d4
|
8f3dc06c549e979ffd245ba91bbbb199e7dab1f4
|
refs/heads/master
| 2016-08-12T12:44:27.487522
| 2016-01-12T13:31:34
| 2016-01-12T13:31:34
| 49,500,331
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,541
|
r
|
StanModelStage2.R
|
stancode = "
data {
int<lower=0> n; #データの数
int<lower=0> time;
int<lower=1> L; #グループの数 (ここではDem OR Non-Dem)
int<lower=1,upper=L> ll[n]; #データの1点がどのグループに属しているかのindex
vector[n] p4_polity2;
vector[n] polity_lag;
vector[n] Giniall;
vector[n] Aid_All;
vector[n] GDP;
vector[n] Corruption;
vector[n] Population;
vector[n] TotalRents;
vector[n] WinningCoalition;
vector[n] Selectorate;
}
parameters {
real alpha[L]; #2つのalphaは一旦alpha[n]とはしないでおく
real<lower=0> sigma1;
real<lower=0> sigma2[L];
real mu[L];
real AidAll[L];
real Corup;
real WinCoa;
real Selec;
}
transformed parameters { #stage-2 #これもmodelに含めてみる
#この設定で大丈夫?
}
model {
for (l in 1:L){ #stage2 categoryごとに効きが違うと考えられるもの
mu[l] ~ normal(0,100);
sigma2[l] ~ uniform(0,1000);
alpha[l] ~ normal(mu[l],sigma2[l]);
AidAll[l] ~ normal(mu[l],sigma2[l]);
}
for (i in 1:n){ #stage2
Giniall[i] ~ normal(alpha[ll[i]] +
AidAll[ll[i]]*Aid_All[i] + Corup*Corruption[i] +
WinCoa*WinningCoalition[i] + Selec*Selectorate[i], sigma1);
#国ごとの固定効果をなくしてみた alpha[i]をやめた --> 一時的に入れてみた
}
#Prior;
sigma1 ~ uniform(0,1000);
}
" #stancodeここまで
|
35454ab7193c996a0855870e1233bc4dae0ce960
|
dff4be894fcfabd2b7a201a0372170bd865a9e3e
|
/R/simpson.R
|
a7333130236531b68cc8593a8d9a7dc797a3cee2
|
[] |
no_license
|
zhaozhg81/AIXZ
|
ba106545a1aae22984dcbb4453318e63b7262f1a
|
49f835bf9d2ca9c9642ea3a40eb36ec925172f57
|
refs/heads/master
| 2023-05-04T20:19:10.771017
| 2023-04-17T14:51:10
| 2023-04-17T14:51:10
| 213,441,232
| 7
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,393
|
r
|
simpson.R
|
## Simulation
## Two age group, 5-10, 50-55
n1 <- 10
n2 <- 10
x1 <- sample( c(55:75), n1, replace=TRUE)
x1 <- x1-55
y1 <- 10 + 0.8* x1 + rnorm(n1)
x2 <- sample(c(70:85), n2, replace=TRUE)
x2 <- x2-55
y2 <- -10 + 0.8* x2+rnorm(n2)
plot( x1, y1, col='black', xlim=c( min(x1,x2)-1, max(x1,x2)+1), ylim=c( min(y1,y2)-0.1, max(y1,y2)+0.1 ) )
points(x2,y2, col='black')
cor( c(x1,x2), c(y1,y2) )
plot( x1, y1, col='red', xlim=c( min(x1,x2)-1, max(x1,x2)+1), ylim=c( min(y1,y2)-0.1, max(y1,y2)+0.1 ) )
points(x2,y2, col='green')
## Kidney stone data
stone = NULL
stone$Z=c(0,0,0,0,1,1,1,1) ## 0 means small stone, 1 means large stone
stone$X=c(0,0,1,1,0,0,1,1) ## 0 means treatment A, 1 means treatment B
stone$Y=c(81,6,234,36,192,71,55,25) ## Number of recovery vs non-recovery
rates <- NULL
rates$A <- c( stone$Y[1]/(stone$Y[1]+stone$Y[2]), stone$Y[5]/(stone$Y[5]+stone$Y[6]))
rates$B <- c( stone$Y[3]/(stone$Y[3]+stone$Y[4]), stone$Y[7]/(stone$Y[7]+stone$Y[8]))
rates$overall <- c( sum( stone$Y[c(1,5)] )/sum(stone$Y[c(1,2,5,6)]) , sum( stone$Y[c(3,7)] )/sum(stone$Y[c(3,4,7,8)]) )
## Adjusted estimator
P.Z1 <- sum( stone$Z*stone$Y)/sum(stone$Y)
P.Z0 <- sum( (1-stone$Z)*stone$Y)/sum(stone$Y)
E.Y0 <- stone$Y[1]/(stone$Y[1]+stone$Y[2]) * P.Z0 + stone$Y[5]/(stone$Y[5]+stone$Y[6]) * P.Z1
E.Y1 <- stone$Y[3]/(stone$Y[3]+stone$Y[4]) * P.Z0 + stone$Y[7]/(stone$Y[7]+stone$Y[8]) * P.Z1
## Berkeley admission data
berkeley <- read.csv("data/berkeley.csv")
Accepted <- tapply( berkeley$Admission=="Accepted", list( berkeley$Major, berkeley$Gender), sum )
Rejected <- tapply( berkeley$Admission=="Rejected", list( berkeley$Major, berkeley$Gender), sum )
cond.accept.rate <- Accepted/(Accepted + Rejected )
marg.accept.rate <- apply( Accepted, 2, sum)/( apply(Accepted+Rejected,2,sum) )
## This is the estimator after ajusting for the confounding variable.
total.female <- apply( Accepted + Rejected, 2, sum )[1]
total.male <- apply( Accepted + Rejected, 2, sum )[2]
applicant.ratio <- apply(Accepted+Rejected,1,sum)/(total.female+total.male)
sum( cond.accept.rate[,1] * applicant.ratio - cond.accept.rate[,2] * applicant.ratio )
## ATT
P.dept.cond.Female <- (Accepted[,1]+Rejected[,1])/apply( Accepted+Rejected,2,sum)[1]
P.cond.male.dept <- Accepted[,2]/(Accepted[,2]+Rejected[,2])
E.Y0 <- mean( P.cond.male.dept )
E.Y1 <- sum( Accepted[,2])/(sum(Accepted[,2])+sum(Rejected[,2]))
|
643957a9c6db1c4b7c50db44f2c03c671d59d71c
|
803770e54b1ef0af4806041dc364d34145d2304f
|
/cachematrix.R
|
e574e58ceca4b371a892d7138690b0958c944f52
|
[] |
no_license
|
mfrechtling/ProgrammingAssignment2
|
aff30a365470463780b7bf56946a05c73fdfa264
|
96f9ffee10871b399244d753597d55099a6482ac
|
refs/heads/master
| 2021-01-15T09:36:54.544900
| 2014-12-19T08:12:32
| 2014-12-19T08:12:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,166
|
r
|
cachematrix.R
|
## The following functions will calculate and cached the inverse of an input
## matrix, allowing the inverse to be retreived from the cache at a later point
## rather than being recalculated
## Get and set functions for the input matrix and it's inverse
makeCacheMatrix <- function(x = matrix()) {
xinv <- NULL
set <- function(y) {
x <<- y
xinv <<- NULL
}
get <- function() x
setinv <- function(inv) xinv <<- inv
getinv <- function() xinv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Checks to see if the inverse of the input matrix is in the cache and returns
## it if found. Otherwise calculates and caches the inverse before returning it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinv(inv)
inv
}
|
b8991bdb6fb6763df4a700b51b38551da1dd6de8
|
2314161607114335089ecd0bb61116ea6bcbf62b
|
/simplesymmetric/code_from_sugitani.R
|
b2b59de5d953f5216e6df49bcc11d83f5265c204
|
[] |
no_license
|
floatofmath/cml8r
|
5a969a26e58c454e7142ab708f41932bc0db6ebc
|
5b19400186adc3ba85b603874896be852a9d185d
|
refs/heads/master
| 2016-09-06T07:56:04.189540
| 2013-12-12T14:47:50
| 2013-12-12T14:47:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,480
|
r
|
code_from_sugitani.R
|
library(mvtnorm)
library(gMCP)
#- Setting parameters -#
weight1 <- 0.5; weight2 <- 0.5;
alpha <- 0.025; seed <- 4989;
e <- 0.0000000000001
c <- alpha/2
#- Start of function "sim" -#
sim <- function(n,nsim,mean,gamma1,gamma2,p,q,rho){
#--- Step1: Data generation ---#
sigma <- diag(4)
sigma[1,2]<-sigma[2,1]<-sigma[3,4]<-sigma[4,3]<-0.5
sigma[1,3]<-sigma[3,1]<-sigma[2,4]<-sigma[4,2]<-rho
sigma[1,4]<-sigma[4,1]<-sigma[2,3]<-sigma[3,2]<-rho/2
set.seed(seed)
x <- rmvnorm(nsim, mean=sqrt(n)*mean, sigma=sigma)
pval <- 1-pnorm(x)
#--- Step2: Treatment selection rule ---#
select <- rbinom(n=nsim,size=1,p=p)
select[select==0] <- rbinom(n=length(select[select==0]),size=1,p=q)+2
pval <- cbind(pval,select)
#--- Step3: Hypothesis testing ---#
#- select=1: Continue with both treatments -#
#- Definition of the Bonferroni-based graph
m <- rbind(H1=c(0, gamma1, 1-gamma1, 0),
H2=c(gamma2, 0, 0, 1-gamma2),
H3=c(0, 1, 0, 0),
H4=c(1, 0, 0, 0))
weights <- c(weight1, weight2, 0, 0)
graph <- new("graphMCP", m=m, weights=weights)
if (length(select[select==1])!=0){
power.sub1 <- matrix(0,length(select[select==1]),4)
pval.sub1 <- pval[select==1,-5]
for (i in 1:length(select[select==1])){
result <-gMCP(graph, as.vector(pval.sub1[i,]), test="Bonferroni", alpha=0.025)
power.sub1[i,]<-matrix(ifelse(attributes(result)$rejected=="TRUE",1,0),1,4)
}
}
else {power.sub1 <- matrix(0,0,4)}
#- select=2: Continue with a randomly selected treatment -#
if (length(select[select==2])!=0){
power.sub2 <- matrix(0,length(select[select==2]),4)
pval.sub2 <- pval[select==2,-5]
for (i in 1:length(select[select==2])){
if (runif(1)<0.5){
power.sub2[i,1]<- ifelse(pval.sub2[i,1]<c,1,0)
power.sub2[i,3]<- ifelse(pval.sub2[i,1]<c&pval.sub2[i,3]<(1-gamma1)*c,1,0)
}
else{
power.sub2[i,2]<- ifelse(pval.sub2[i,2]<c,1,0)
power.sub2[i,4]<- ifelse(pval.sub2[i,2]<c&pval.sub2[i,4]<(1-gamma2)*c,1,0)
}
}
}
else {power.sub2 <- matrix(0,0,4)}
#- select=3: Continue with the treatment with the larger interim mean -#
power.sub3 <- matrix(0,length(select[select==3]),4)
pval.sub3 <- pval[select==3,-5]
for (i in 1:length(select[select==3])){
if (pval.sub3[i,1]<pval.sub3[i,2]){
power.sub3[i,1]<- ifelse(pval.sub3[i,1]<c,1,0)
power.sub3[i,3]<- ifelse(pval.sub3[i,1]<c&pval.sub3[i,3]<(1-gamma1)*c,1,0)
}
else{
power.sub3[i,2]<- ifelse(pval.sub3[i,2]<c,1,0)
power.sub3[i,4]<- ifelse(pval.sub3[i,2]<c&pval.sub3[i,4]<(1-gamma2)*c,1,0)
}
}
#- Combine the results from select1 to 3 -#
power <- rbind(power.sub1,power.sub2,power.sub3)
success <- ifelse(apply(power[,c(1,2)],1,sum)>=1,1,0)
colnames(power) <- c("H1","H2","H3","H4")
final <- cbind(success,power)
signif(apply(final,2,mean),digits=3)
}
#- End of function "sim" -#
#- Example -#
#- Case (A) -#
sim(n=106,nsim=1000000,mean=c(0,0,0,0),gamma1=0.5,gamma2=0.5,p=0.5,q=0.5,rho=0.5)
sim(n=106,nsim=1000000,mean=c(0,0,0,0),gamma1=0,gamma2=0,p=0.5,q=0.5,rho=0.5)
sim(n=106,nsim=1000000,mean=c(0,0,0,0),gamma1=1-e,gamma2=1-e,p=0.5,q=0.5,rho=0.5)
sim(n=106,nsim=1000000,mean=c(0,0,0,0),gamma1=0.5,gamma2=0.5,p=0 ,q=1,rho=0.5)
sim(n=106,nsim=1000000,mean=c(0,0,0,0),gamma1=0,gamma2=0,p=0 ,q=1,rho=0.5)
#- Case (B) -#
sim(n=106,nsim=100000,mean=c(0.3,0,0.3,0),gamma1=0.5,gamma2=0.5,p=0.5,q=0.5,rho=0.5)
sim(n=106,nsim=100000,mean=c(0.3,0,0.3,0),gamma1=0,gamma2=0,p=0.5,q=0.5,rho=0.5)
sim(n=106,nsim=100000,mean=c(0.3,0,0.3,0),gamma1=1-e,gamma2=1-e,p=0.5,q=0.5,rho=0.5)
sim(n=106,nsim=100000,mean=c(0.3,0,0.3,0),gamma1=0.5,gamma2=0.5,p=0,q=1,rho=0.5)
sim(n=106,nsim=100000,mean=c(0.3,0,0.3,0),gamma1=0,gamma2=0,p=0,q=1,rho=0.5)
#- Case (C) -#
sim(n=106,nsim=100000,mean=c(0.3,0.15,0.3,0.15),gamma1=0.5,gamma2=0.5,p=0.5,q=0.5,rho=0.5)
sim(n=106,nsim=100000,mean=c(0.3,0.15,0.3,0.15),gamma1=0,gamma2=0,p=0.5,q=0.5,rho=0.5)
sim(n=106,nsim=100000,mean=c(0.3,0.15,0.3,0.15),gamma1=1-e,gamma2=1-e,p=0.5,q=0.5,rho=0.5)
sim(n=106,nsim=100000,mean=c(0.3,0.15,0.3,0.15),gamma1=0.5,gamma2=0.5,p=0,q=1,rho=0.5)
sim(n=106,nsim=100000,mean=c(0.3,0.15,0.3,0.15),gamma1=0,gamma2=0,p=0,q=1,rho=0.5)
#- Case (D) -#
sim(n=106,nsim=100000,mean=c(0.3,0.3,0.3,0.3),gamma1=0.5,gamma2=0.5,p=0.5,q=0.5,rho=0.5)
sim(n=106,nsim=100000,mean=c(0.3,0.3,0.3,0.3),gamma1=0,gamma2=0,p=0.5,q=0.5,rho=0.5)
sim(n=106,nsim=100000,mean=c(0.3,0.3,0.3,0.3),gamma1=1-e,gamma2=1-e,p=0.5,q=0.5,rho=0.5)
sim(n=106,nsim=100000,mean=c(0.3,0.3,0.3,0.3),gamma1=0.5,gamma2=0.5,p=0,q=1,rho=0.5)
sim(n=106,nsim=100000,mean=c(0.3,0.3,0.3,0.3),gamma1=0,gamma2=0,p=0,q=1,rho=0.5)
|
ac9be3cc1236249bd688c565be31cbe7bc09ad54
|
78fb338b3a288c75f2f00e0e67ceaecdc5aeecdd
|
/man/getPlayerStatTypes.Rd
|
4b5e96d6f3834b11dbf05031670e56a760c66333
|
[
"MIT"
] |
permissive
|
pbulsink/nhlRapi
|
f5b9c8f80b5030b9fc5ce92c811937ccb7c7f2f6
|
88c65b93dbf0e8b787ffbf02ef915f84330b50f9
|
refs/heads/master
| 2020-04-22T06:01:11.780541
| 2019-06-26T13:27:45
| 2019-06-26T13:27:45
| 170,176,071
| 0
| 1
|
NOASSERTION
| 2019-06-26T13:27:46
| 2019-02-11T18:03:09
|
R
|
UTF-8
|
R
| false
| true
| 461
|
rd
|
getPlayerStatTypes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/players.R
\name{getPlayerStatTypes}
\alias{getPlayerStatTypes}
\title{Get Player Stat Types}
\usage{
getPlayerStatTypes()
}
\value{
a list of player stat types to call with \code{\link{getPlayerStats}()}
}
\description{
Only certain stat types are accepted for players. This returns the full valid list.
}
\examples{
#See the possible stat types:
statTypes <- getPlayerStatTypes()
}
|
fe8b4c40aaefaf0864dd125030bbf4f0e348c341
|
06cdfccf8d44f11742fec1162afdfe2421c22302
|
/man/compute_ABC_cpp.Rd
|
f97b319950dfe989a6e2b1a265a1d7bcdc00dbbc
|
[
"MIT"
] |
permissive
|
lgaborini/rdirdirgamma
|
06feabefb12a42d0496818ecc9a0f70f7ccc1c5c
|
f3087f0a81c9e4b08ff56efcc260873eaa16232d
|
refs/heads/master
| 2023-04-18T00:18:29.380512
| 2021-03-05T18:06:26
| 2021-03-05T18:06:26
| 290,997,800
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,982
|
rd
|
compute_ABC_cpp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{compute_ABC_cpp}
\alias{compute_ABC_cpp}
\title{Perform ABC sampling using the stick breaking procedure, returning the acceptance ratio.}
\usage{
compute_ABC_cpp(
n_sample,
m_sample,
alpha_0,
beta_0,
nu_0,
mtx_obs,
summarize_eps,
reps,
p_norm,
use_optimized_summary,
return_distances = FALSE
)
}
\arguments{
\item{n_sample}{hyperparameters that are used to generate data: number of samples per source}
\item{m_sample}{hyperparameters that are used to generate data: number of sources}
\item{alpha_0}{hyperparameters that are used to generate data}
\item{beta_0}{hyperparameters that are used to generate data}
\item{nu_0}{hyperparameters that are used to generate data}
\item{mtx_obs}{the observed data matrix}
\item{summarize_eps}{ABC thresholds: as many as summary statistics}
\item{reps}{number of ABC samples to generate}
\item{p_norm}{exponent of the L^p norm (can be \code{Inf}) (default: 2)}
\item{use_optimized_summary}{if TRUE, return the optimized summary statistics (mean, sd, kurtosis, skewness), else standard (mean, sd)}
\item{return_distances}{if TRUE, also return distances for all samples}
}
\value{
a list with components:
\itemize{
\item \code{n_accepted}: number of accepted samples
\item \code{accept_ratio}: the acceptance ratio, where 1 means that all max_iter samples were accepted.
\item \code{d_ABC}: a (max_iter x n_summary) matrix of distances (if \code{return_distances} is TRUE)
}
}
\description{
Perform ABC sampling using the stick breaking procedure, returning the acceptance ratio.
Similar to \code{\link[=sample_ABC_rdirdirgamma_beta_cpp]{sample_ABC_rdirdirgamma_beta_cpp()}} but also performs the acceptance step.
}
\seealso{
Other ABC functions:
\code{\link{compute_distances_gen_obs_cpp}()},
\code{\link{generate_acceptable_data_cpp}()},
\code{\link{sample_ABC_rdirdirgamma_beta_cpp}()}
}
\concept{ABC functions}
|
1d7a7f775ff01962cb9fe5160406587b91d3fb29
|
0d6d355a50a3e8dc4fcba663fa5e29d84e3d958e
|
/Spatial Data Analysis - Spring 2017/Final/Part_B.r
|
975741637cb0a14c7a8e507e1e9b855b913a01b7
|
[] |
no_license
|
JunchaoMei/Data-Science
|
d651415add2a0f4fde7d62dbedfb30d962a4200e
|
5ee831ce0ec8a75f629939f217cc26d5dfb412d7
|
refs/heads/master
| 2020-04-12T13:35:46.777260
| 2018-12-20T07:46:51
| 2018-12-20T07:46:51
| 162,526,357
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,099
|
r
|
Part_B.r
|
# Part_B
#import libraries
library(rgdal)
library(ggplot2)
library(spatstat)
library(maptools)
library(ape)
library(spdep)
library(spgwr)
library(sp)
library(gstat)
#read shape files
setwd("G:/GIS/DataFiles_FinalExam")
Community <- readOGR(dsn="2_Community", layer="2_Community")
nrow(Community)
summary(Community)
Community_data <- Community@data
Central_Locations <- coordinates(Community)
colnames(Central_Locations) <- c("long_central","lat_central")
Intensity_sample <- cbind(Community_data$Intensity,Community_data$S_Long,Community_data$S_Lat)
colnames(Intensity_sample) <- c("intensity","long","lat")
#convert [Intensity_sample] to SpatialPointsDataFrame
Intensity_sample <- as.data.frame(Intensity_sample)
coordinates(Intensity_sample) = ~long+lat
Intensity_sample@proj4string = Community@proj4string
#convert [Community] to SpatialPointsDataFrame
Community_sp <- SpatialPointsDataFrame(coordinates(Community), data = Community@data, proj4string = CRS(proj4string(Community)))
#create SpatialGridDataFrame
set.seed(888)
Community.grid = as.data.frame(spsample(Community,"regular",n=5000))
names(Community.grid)=c("long","lat")
coordinates(Community.grid)=c("long","lat")
gridded(Community.grid)=TRUE
fullgrid(Community.grid)=TRUE
Community.grid@proj4string=Community@proj4string
## OK
g.Intensity = gstat(id = "Intensity", formula = intensity ~ 1, data = Intensity_sample)
v.Intensity = fit.variogram(variogram(g.Intensity),vgm(1,"Exp",160,1))
ok.gstat = gstat(id="Intensity",formula = intensity ~ 1, data = Intensity_sample, model = v.Intensity)
#interpolation
ok.predict = predict(ok.gstat, Community_sp)
#plot
plot(Community)
plot(ok.predict["Intensity.pred"],add=T,pch=20)
title(main="OK intepolation - map")
spplot(ok.predict['Intensity.pred'])
#table
ok.result = ok.predict@data$Intensity.pred
ok.result.table = cbind(coordinates(ok.predict),ok.result)
colnames(ok.result.table) <- c("long","lat","ok.intensity.pred")
ok.result.table
#plot surface
ok.pred.surf=predict(ok.gstat,Community.grid)
ok.map=ggplot()+geom_tile(data=as.data.frame(ok.pred.surf),aes(x=long,y=lat,fill=Intensity.pred))
ok.map=ok.map+geom_path(data=Community,aes(x=long,y=lat,group=group),colour="grey",alpha = 0.8)
ok.map=ok.map+geom_point(data=as.data.frame(coordinates(Community)),aes(x=V1,y=V2),color="black")
ok.map=ok.map+labs(title="OK Surface")
ok.map=ok.map+geom_contour(data=as.data.frame(ok.pred.surf),aes(x=long,y=lat,z=Intensity.pred))
ok.map=ok.map+scale_fill_gradientn("Value",colors =rainbow(10))+coord_equal()
ok.map
## IDW
# different k - nearest neighbors
result.k = matrix(0, nrow = nrow(Intensity_sample), ncol = nrow(Intensity_sample))
diff.k = c()
for(i in 1:nrow(Intensity_sample))
{
idw.gstat = gstat(id = "Intensity", formula = intensity ~ 1, data = Intensity_sample, set = list(idp = 1), nmax = i)
idw.predict = predict(idw.gstat, Community_sp)
pred = c(idw.predict@data$Intensity.pred)
diff = 0
for(j in 1:nrow(Intensity_sample))
{
result.k[i, j] = pred[j]
diff = diff + abs(pred[j]-ok.result[j])
}
diff.k = c(diff.k, diff)
}
diff.k # k=12, mostly similar
idw.k.gstat = gstat(id = "Intensity", formula = intensity ~ 1, data = Intensity_sample, set = list(idp = 1), nmax = 12)
idw.k.predict = predict(idw.gstat, Community_sp)
#plot
plot(Community)
plot(idw.k.predict["Intensity.pred"],add=T,pch=20)
title(main="IDW intepolation (k=12) - map")
spplot(idw.predict["Intensity.pred"])
#table
idw.k.result = idw.k.predict@data$Intensity.pred
idw.k.result.table = cbind(coordinates(idw.k.predict),idw.k.result)
colnames(idw.k.result.table) <- c("long","lat","idw.k.intensity.pred")
idw.k.result.table
#plot surface
idw.k.pred.surf=predict(idw.k.gstat,Community.grid)
idw.k.map=ggplot()+geom_tile(data=as.data.frame(idw.k.pred.surf),aes(x=long,y=lat,fill=Intensity.pred))
idw.k.map=idw.k.map+geom_path(data=Community,aes(x=long,y=lat,group=group),colour="grey",alpha = 0.8)
idw.k.map=idw.k.map+geom_point(data=as.data.frame(coordinates(Community)),aes(x=V1,y=V2),color="black")
idw.k.map=idw.k.map+labs(title="IDW(k=12) Surface")
idw.k.map=idw.k.map+geom_contour(data=as.data.frame(idw.k.pred.surf),aes(x=long,y=lat,z=Intensity.pred))
idw.k.map=idw.k.map+scale_fill_gradientn("Value",colors =rainbow(10))+coord_equal()
idw.k.map
# different r - circle radius
result.r = matrix(0, nrow = length(seq(30,100,5)), ncol = nrow(Intensity_sample))
diff.r = c(nrow(seq(30,100,5)))
i = 1
for(r in seq(30,100,5))
{
idw.gstat = gstat(id = "Intensity", formula = intensity ~ 1, data = Intensity_sample, set = list(idp = 1), maxdist = r * 1.0, nmin = 1, force = T)
idw.predict = predict(idw.gstat, Community_sp)
pred = c(idw.predict@data$Intensity.pred)
diff = 0
for(j in 1:nrow(Intensity_sample))
{
result.r[i, j] = round(pred[j], 2)
diff = diff + abs(pred[j]-ok.result[j])
}
diff.r = c(diff.r, diff)
i = i + 1
}
diff.r # r=95, diff.r->min
idw.r.gstat = gstat(id = "Intensity", formula = intensity ~ 1, data = Intensity_sample, set = list(idp = 1), maxdist = 95 * 1.0, nmin = 1, force = T)
idw.r.predict = predict(idw.r.gstat, Community_sp)
#plot
plot(Community)
plot(idw.r.predict["Intensity.pred"],add=T,pch=20)
title(main="IDW intepolation (r=95) - map")
spplot(idw.r.predict["Intensity.pred"])
#table
idw.r.result = idw.r.predict@data$Intensity.pred
idw.r.result.table = cbind(coordinates(idw.r.predict),idw.r.result)
colnames(idw.r.result.table) <- c("long","lat","idw.r.intensity.pred")
idw.r.result.table
#plot surface
idw.r.pred.surf=predict(idw.r.gstat,Community.grid)
idw.r.map=ggplot()+geom_tile(data=as.data.frame(idw.r.pred.surf),aes(x=long,y=lat,fill=Intensity.pred))
idw.r.map=idw.r.map+geom_path(data=Community,aes(x=long,y=lat,group=group),colour="grey",alpha = 0.8)
idw.r.map=idw.r.map+geom_point(data=as.data.frame(coordinates(Community)),aes(x=V1,y=V2),color="black")
idw.r.map=idw.r.map+labs(title="IDW(r=95) Surface")
idw.r.map=idw.r.map+geom_contour(data=as.data.frame(idw.r.pred.surf),aes(x=long,y=lat,z=Intensity.pred))
idw.r.map=idw.r.map+scale_fill_gradientn("Value",colors =rainbow(10))+coord_equal()
idw.r.map
|
2a15bf85c48f68452cc8975284aace5f7d216ece
|
e3ce3ad557ebd51429ed7acfea936723149a8d4c
|
/R/sof.engvall.R
|
c314ccce57e61c80596e15d14ad2d470764eb4ba
|
[] |
permissive
|
jakobbossek/smoof
|
87512da9d488acfe3a7cc62aa3539a99e82d52ba
|
d65247258fab57d08a5a76df858329a25c0bb1b8
|
refs/heads/master
| 2023-03-20T02:05:12.632661
| 2023-03-08T13:59:27
| 2023-03-08T13:59:27
| 22,465,741
| 32
| 27
|
BSD-2-Clause
| 2022-01-21T10:02:19
| 2014-07-31T10:39:43
|
R
|
UTF-8
|
R
| false
| false
| 1,235
|
r
|
sof.engvall.R
|
#' @title Complex function.
#'
#' @description Two-dimensional test function based on the formula
#' \deqn{f(\mathbf{x}) = (x_1^4 + x_2^4 + 2 x_1^2 x_2^2 - 4 x_1 + 3}
#' with \eqn{\mathbf{x}_1, \mathbf{x}_2 \in [-2000, 2000]}.
#'
#' @references See \url{https://al-roomi.org/benchmarks/unconstrained/2-dimensions/116-engvall-s-function}.
#'
#' @template ret_smoof_single
#' @export
makeEngvallFunction = function() {
makeSingleObjectiveFunction(
name = "Engvall Function",
id = "engvall_2d",
fn = function(x) {
assertNumeric(x, len = 2L, any.missing = FALSE, all.missing = FALSE)
x[1]^4 + x[2]^4 + 2 * x[1]^2 * x[2]^2 - 4 * x[1] + 3
},
par.set = makeNumericParamSet(
len = 2L,
id = "x",
lower = c(-2000, -2000),
upper = c(2000, 2000),
vector = TRUE
),
tags = attr(makeEngvallFunction, "tags"),
global.opt.params = c(1, 0),
global.opt.value = 0
)
}
class(makeEngvallFunction) = c("function", "smoof_generator")
attr(makeEngvallFunction, "name") = c("Engvall")
attr(makeEngvallFunction, "type") = c("single-objective")
attr(makeEngvallFunction, "tags") = c("single-objective", "continuous", "differentiable", "non-separable", "non-scalable", "unimodal")
|
e2d41de26b0c1468701858c5495cb681e3d0b09e
|
b8f66037ac2f9008dcad89624a4f0bb5f4389784
|
/Calculate_RealizedNe.R
|
98050df24d9fb22853ee25f6e8a1ee231acb2320
|
[] |
no_license
|
kjgilbert/SlimSimCode
|
bca5fa3e665043bbf8895f2f67f610ec12eba8e2
|
1e91181c2cb721c008ca7ad2f8e0aea65262e672
|
refs/heads/master
| 2020-04-10T20:10:57.159157
| 2017-04-27T14:20:21
| 2017-04-27T14:20:21
| 68,229,248
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,435
|
r
|
Calculate_RealizedNe.R
|
setwd("/cap1/kgilbert/WestgridOutputs")
# calculate pi:
source('/cap1/kgilbert/WestgridOutputs/CalculatePi.R', chdir = TRUE)
# gives back: pi overall, pi_n, pi_s
## sample.output.files <- system("ls SampleOutput_Nov19_N10000_25mbp_*", intern=TRUE)
## full.output.files <- system("ls FullOutput_Nov19_N10000_25mbp_*", intern=TRUE)
## fixed.output.files <- system("ls FixedOutput_Nov19_N10000_25mbp_*", intern=TRUE)
## sequence.length <- 25000000
## pop.size <- 10000
## sample.size <- 100
## summ.stats.output.file <- "test"
est.ne <- function(sample.output.files, full.output.files, fixed.output.files, summ.stats.output.file, sample.size, sequence.length, pop.size){
results <- data.frame(matrix(nrow=0, ncol=4))
names(results) <- c("ignore", "file", "generation", "Ne.neutral")
write.table(results, append=FALSE, file=summ.stats.output.file, sep=",", col.names=TRUE)
iterate <- 1
for(i in 1:length(sample.output.files)){
# go through each file
sample.file <- sample.output.files[i]
full.file <- full.output.files[i]
fixed.file <- fixed.output.files[i]
## full data output
full.samp.muts.start <- as.numeric(unlist(strsplit(system(paste(c("grep -n Mutations ", full.file), collapse=""), intern=TRUE), split=":"))[1])
full.samp.inds.start <- as.numeric(strsplit(system(paste(c("grep -n Individuals ", full.file), collapse=""), intern=TRUE), split=":")[[1]][1])
full.samp.genomes.start <- as.numeric(strsplit(system(paste(c("grep -n Genomes ", full.file), collapse=""), intern=TRUE), split=":")[[1]][1])
full.samp.file.end <- as.numeric(head(tail(unlist(strsplit(system(paste(c("wc -l ", full.file), collapse=""), intern=TRUE), split=" ")), n=2), n=1))
## fixed data output
if(length(readLines(fixed.file)) == 2){ # then no mutations fixed
fixeddat <- NULL
}else{ # otherwise read in fixed mutations as normal
fixed.mut.id.start <- 2
fixeddat <- read.table(fixed.file, skip=fixed.mut.id.start)
names(fixeddat) <- c("mut.ID", "unique.mut.ID", "mut.type", "base_position", "seln_coeff", "dom_coeff", "subpop_ID", "gen_arose", "gen.fixed")
}
# for last, full time point
polydat <- read.table(full.file, skip=full.samp.muts.start, nrow=((full.samp.inds.start-1) - full.samp.muts.start), sep=" ")
names(polydat) <- c("mut.ID", "unique.mut.ID", "mut.type", "base_position", "seln_coeff", "dom_coeff", "subpop_ID", "generation_arose", "mut.prev")
genodat <- read.table(full.file, skip=full.samp.genomes.start, nrow=(pop.size*2), sep="A")
# sample from a vector of odd numbers since all inds have 2 paired genomes (diploid) and they start on an odd line and end on an even line
odd.nums <- seq(1, (pop.size * 2), by=2)
sub.samp <- sample(odd.nums, size=sample.size, replace=FALSE)
diploid.sub.samp <- sort(c(sub.samp, (sub.samp + 1)))
genodat <- genodat[diploid.sub.samp ,]
gen <- 10*pop.size # doing last gen, 10N
pi.stats <- calc.pi.stats(poly.dat=polydat, genome.dat=genodat, fixed.dat=fixeddat, generation=gen, num.inds.sampled=sample.size, genome.size=sequence.length, use.manual.sample=TRUE)
names(pi.stats) <- c("pi", "pi_n", "pi_s")
total.mut.rate <- 7*10^-9
neut.mut.rate <- 0.25*total.mut.rate
seln.mut.rate <- 0.75*total.mut.rate
# pi_s = 4 Ne mu_s # only want Ne from neutral data, so use synonymous
Ne_s <- pi.stats["pi_s"]/(4*neut.mut.rate)
## Ne_n <- pi.stats["pi_n"]/(4*seln.mut.rate)
## Ne_total <- pi.stats["pi"]/(4*total.mut.rate)
temp.results <- c(sample.file, gen, Ne_s)
write.table(t(temp.results), append=TRUE, file=summ.stats.output.file, sep=",", col.names=FALSE)
iterate <- iterate + 1
# clear previous data, see if this solves the weird plot results
polydat <- NULL
genodat <- NULL
fixeddat <- NULL
}
}
## # Ne*s bins:
## # 0-1
## # 1-10
## # 10-100
## # 100-inf
##
## # therefore s bins:
##
## zero.s.boundary <- 0
## one.s.boundary <- 1/Ne_s
## ten.s.boundary <- 10/Ne_s
## hundred.s.boundary <- 100/Ne_s
## inf.s.boundary <- 1
##
## # how many muts in each bin:
##
## # FIXED
##
## # POLY
##
##
##
##
## GLEMIN:
## geno.wide.U <- neut.mut.rate*sequence.length
## sd <- -0.01
## hd <- 0.3
## recomb <- 5*10^-8
##
## alpha <- e^-()
##
##
## pop.size <- 10000
##
## self.rate <- 0.99
## Fval <- self.rate/(2-self.rate)
##
## Ne <- (alpha*pop.size)/(1+Fval)
|
e29ee7cf123b22b803162805dfb0811f95db3812
|
9bdf92270f7476021959c1b39e6b92388fafb61b
|
/tests/testthat/test-bfactor-interpret.R
|
e134a8c51348d26dd4f3458aae1192233cbff9d3
|
[
"MIT"
] |
permissive
|
ptfonseca/pcal
|
7ed6df79bdd5c3917f4316145657e577676846e7
|
198659789d55cd3c669775f807e603e5c03faac0
|
refs/heads/master
| 2023-01-13T12:14:05.389014
| 2020-11-17T23:53:39
| 2020-11-17T23:53:39
| 264,692,683
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,377
|
r
|
test-bfactor-interpret.R
|
context("bfactor_interpret function - jeffreys")
test_that("bfactor_interpret test 1", {
expect_equal(
bfactor_interpret(
10 ^ c(
-3.10,
-1.78,
1.06,
-1.40,
1.21,
0.89,
-2.37,
1.23,
-8.88,
3.81,
-8.38,
0.62)
),
c(
"Negative",
"Negative",
"Strong",
"Negative",
"Strong",
"Substantial",
"Negative",
"Strong",
"Negative",
"Decisive",
"Negative",
"Substantial"
)
)
})
test_that("bfactor_interpret test 2", {
expect_equal(
bfactor_interpret(
10 ^ c(0.07, 1.29, 1.32, -0.62, -1.78, 1.55, -3.02, 1.25, 1.48)),
c(
"Weak",
"Strong",
"Strong",
"Negative",
"Negative",
"Very Strong",
"Negative",
"Strong",
"Strong"
)
)
})
test_that("bfactor_interpret scale", {
expect_equal(
bfactor_interpret(
10 ^ c(-3.10,
-1.78,
1.06,
-1.40,
1.21,
0.89,
-2.37,
1.23,
-8.88,
3.81,
-8.38,
0.62
)
),
bfactor_interpret(
10 ^ c(
-3.10,
-1.78,
1.06,
-1.40,
1.21,
0.89,
-2.37,
1.23,
-8.88,
3.81,
-8.38,
0.62
),
scale = "jeffreys")
)
})
test_that("bfactor_interpret scale case sensitiveness", {
expect_equal(
bfactor_interpret(
10 ^ c(
-3.10,
-1.78,
1.06,
-1.40,
1.21,
0.89,
-2.37,
1.23,
-8.88,
3.81,
-8.38,
0.62),
scale = "Jeffreys"),
bfactor_interpret(
10 ^ c(
-3.10,
-1.78,
1.06,
-1.40,
1.21,
0.89,
-2.37,
1.23,
-8.88,
3.81,
-8.38,
0.62),
scale = "jeffreys")
)
})
test_that("bfactor_interpret decisive", {
expect_equal(
bfactor_interpret(200),
"Decisive"
)
})
context("bfactor_interpret error and warning messages - jeffreys")
test_that("bfactor_interpret NULL test 1", {
expect_error(
bfactor_interpret(NULL)
)}
)
test_that("bfactor_interpret NULL test 2", {
expect_error(
bfactor_interpret(NULL, scale = "jeffreys")
)}
)
test_that("bfactor_interpret - empty vector", {
expect_error(
bfactor_interpret(vector())
)}
)
test_that("bfactor_interpret - empty list", {
expect_error(
bfactor_interpret(list())
)}
)
test_that("bfactor_interpret - NA test 1", {
expect_error(
bfactor_interpret(NA)
)}
)
test_that("bfactor_interpret NA test 2", {
expect_error(
bfactor_interpret(NA, scale = "jeffreys")
)}
)
test_that("bfactor_interpret NaN", {
expect_error(
bfactor_interpret(NaN)
)}
)
test_that("bfactor_interpret factor", {
expect_error(
bfactor_interpret(factor(10))
)}
)
test_that("bfactor_interpret character", {
expect_error(
bfactor_interpret("10")
)}
)
test_that("bfactor_interpret - list", {
expect_error(
bfactor_interpret(list(10))
)}
)
test_that("bfactor_interpret - negative bf", {
expect_error(
bfactor_interpret(-0.6)
)}
)
test_that("bfactor_interpret NA warning", {
expect_warning(
bfactor_interpret(c(10, NA))
)}
)
context("bfactor_interpret function kass-raftery")
test_that("bfactor_interpret kass-raftery test 1", {
expect_equal(
bfactor_interpret(c(0, 2, 4, 21, 151), scale = "kass-raftery"),
c("Negative", "Weak", "Positive", "Strong", "Very Strong"))
}
)
test_that("bfactor_interpret kass-raftery test 2", {
expect_equal(
bfactor_interpret(c(0.99, 2.99, 19.99, 149, 1510), scale = "kass-raftery"),
c("Negative", "Weak", "Positive", "Strong", "Very Strong"))
}
)
test_that("bfactor_interpret kass-raftery decisive", {
expect_equal(
bfactor_interpret(200, scale = "kass-raftery"),
"Very Strong"
)
})
context("bfactor_interpret error and warning messages - kass raftery")
test_that("bfactor_interpret kass-raftery NULL", {
expect_error(
bfactor_interpret(NULL, scale = "kass-raftery")
)}
)
test_that("bfactor_interpret kass-raftery empty vector", {
expect_error(
bfactor_interpret(vector(), scale = "kass-raftery")
)}
)
test_that("bfactor_interpret kass-raftery empty list", {
expect_error(
bfactor_interpret(list(), scale = "kass-raftery")
)}
)
test_that("bfactor_interpret kass-raftery NA", {
expect_error(
bfactor_interpret(NA, scale = "kass-raftery")
)}
)
test_that("bfactor_interpret kass-raftery NaN", {
expect_error(
bfactor_interpret(NaN, scale = "kass-raftery")
)}
)
test_that("bfactor_interpret kass-raftery factor", {
expect_error(
bfactor_interpret(factor(10), scale = "kass-raftery")
)}
)
test_that("bfactor_interpret kass-raftery character", {
expect_error(
bfactor_interpret("10", scale = "kass-raftery")
)}
)
test_that("bfactor_interpret kass-raftery list", {
expect_error(
bfactor_interpret(list(10), scale = "kass-raftery")
)}
)
test_that("bfactor_interpret kass-raftery negative bf", {
expect_error(
bfactor_interpret(-0.6, scale = "kass-raftery")
)}
)
test_that("bfactor_interpret kass-raftery NA warning", {
expect_warning(
bfactor_interpret(c(10, NA), scale = "kass-raftery")
)}
)
|
ff1ed1e799f38398163ad0282e6be22494ec6d33
|
5ab78268f2f47fc850c99ec48b1c650d7f4c7959
|
/inst/tests/test-macros.R
|
bf39b1c118391fdfe49acdc97f032d791d6e7cd1
|
[] |
no_license
|
rcodo/vadr
|
a08799c3218e4ea8e4e0fb5c4822cffeaf3227ec
|
ec837ec27e9bcd365eb40327fc04efc8c82ac6cd
|
refs/heads/master
| 2021-08-31T12:49:26.768373
| 2015-09-02T03:13:35
| 2015-09-02T03:13:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,935
|
r
|
test-macros.R
|
context("macros")
`%is%` <- expect_equal
test_that("quoting.env", {
en <- quoting.env(c('+', '(', 'a', 'b', '*'), environment())
expect_equal(evalq(a+b, en), quote(a+b))
expect_equal(evalq(a+(b*a), en), quote(a+(b*a)))
z <- 100
expect_equal(evalq(a+(b*a)*z, en), quote(a+(b*a)*100))
})
test_that("quoting.env and '...'", {
#some special casing is needed to make "..." eval to itself.
en <- quoting.env(c("a", "b", "c", "list", "..."))
expect_equal(evalq(c(a, list(sdf = b, ...)), en),
quote(c(a, list(sdf = b, ...))))
})
test_that("quoting.env and missings", {
en <- quoting.env(c('[', '<-', 'a', 'b', 'c'))
expect_equal(evalq(a[1, ] <- b[, 2], en),
quote(a[1, ] <- b[, 2]))
})
test_that("macro() turns a lexical substitutor function into a macro", {
d <- function(expr_b) {
call("+", expr_b, expr_b)
}
double <- macro(d)
expect_equal(double(5), 10)
x <- 5
side_effect <- function(){
x <<- x+1
}
expect_equal(double(side_effect()), 13)
expect_true("macro" %in% class(double))
expect_equal(attr(double, "orig"), d)
})
test_that("macro cache pays attention to tags", {
divmacro <- macro(function(a,b) qq(.(a)/.(b)))
expect_equal(divmacro(10, 5), 2)
expect_equal(divmacro(5, 10), 0.5)
expect_equal(divmacro(a=10, b=5), 2)
expect_equal(divmacro(b=10, a=5), 0.5)
expect_equal(divmacro(b=5, a=10), 2)
})
test_that("expand_macro expands all visible macros (by one step)", {
local({
addmacro <- macro(function(x, y) qq(.(x) + .(y)))
doublemacro <- macro(function(x, y) qq(.(x) * addmacro(.(y), .(y))))
#
expect_equal(expand_macros(quote(addmacro(a, b))), quote(a+b))
expect_equal(expand_macros_q(addmacro(a, b*y)), quote(a+b*y))
expect_equal(expand_macros_q(doublemacro(a, b)), quote(a * addmacro(b, b)))
#macros are expanded from the top down.
expect_equal(expand_macros_q(addmacro(a, addmacro(b,c))),
quote(a+addmacro(b,c)))
expect_equal(expand_macros_q(addmacro(a, addmacro(b,c)), recursive=TRUE),
quote(a+(b+c)))
})
})
test_that("quote_args", {
# Function that quotes arguments "like an argument list", returning
# a pairlist.
quote_args(a=1, b=y, c=x+y) %is% as.pairlist(alist(a=1, b=y, c=x+y))
quote_args(a=1, b, c) %is% as.pairlist(alist(a=1, b=, c=))
expect_error(quote_args(a, b, x+y))
expect_error(quote_args(a, b, 1))
expect_error(quote_args(a, , c))
})
test_that("with_arg", {
(with_arg(a=2, b=3, list(4), list(5))
%is% list(list(4, a=2, b=3), list(5, a=2, b=3)))
x <- 1; y <- 2
(with_arg(a=x+y, list(1), alist(1))
%is% list(list(1, a=3), alist(1, a=x+y)))
(with_arg(.collect=c, a=1, b=2, c(1, 2), c(1))
%is% c(1, 2, a=1, b=2,1, a=1, b=2))
})
## This would be a nice-to-have
## test_that("Formals", {
## addmacro <- macro(function(x, y) qq(.(x) + .(y)))
## expect_equal(formals(addmacro), quote_args(x, y))
## })
|
1295b27427dc710f739bda0812090e0a74645fab
|
d6f588905b2113dfca208a60d851a3c65f0a8d70
|
/wheat-ML-project.R
|
c2d12ea0e67f6ed05730398fe3cce43e9accab61
|
[] |
no_license
|
thilinik/Projects
|
38b1fd54d6e42d1eddba7bc679b655bf0666572a
|
a22adc9a602c8f71d8eaff5bbf557eb7f0aeac4c
|
refs/heads/master
| 2021-07-11T08:51:30.447492
| 2021-03-08T19:54:42
| 2021-03-08T19:54:42
| 56,555,293
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,610
|
r
|
wheat-ML-project.R
|
library(MASS)
library(nnet)
##load the source file
source("C:\\Users\\Toshiba\\Google Drive\\jobs\\ML\\unsupervised\\wheat-ML-functions.R")
##load the data set
Wheat.data <- read.csv("C:\\Users\\Toshiba\\Google Drive\\SFU\\stat\\fall15\\Stat852\\datasets\\wheat.csv",header=TRUE,sep=",")
head(Wheat.data)
##summarize the dataset
summary(Wheat.data)
##convert 'class' to numeric
Wheat.data <- manipulate_data(Wheat.data)
##Split the data
set.seed(67982193)
summary<- data.frame(loop=c(1:20), LDA_tr_error=NA, LDA_test_error=NA,
QDA_tr_error=NA,QDA_test_error=NA,logistic_tr_error=NA,
logistic_test_error=NA)
for(i in 1:20)
{
set1 <- split_datasets(Wheat.data)[[1]]
set2 <- split_datasets(Wheat.data)[[2]]
##LDA
lda_pred <- lda_predictions(set1,set2)
lda_misclassifications <- evaluate_models(set1,set2,lda_pred[[1]],lda_pred[[2]])
summary[i,2] <- lda_misclassifications[[1]]
summary[i,3] <- lda_misclassifications[[2]]
##QDA
qda_pred <- Qda_predictions(set1,set2)
Qda_misclassifications <- evaluate_models(set1,set2,qda_pred[[1]],qda_pred[[2]])
summary[i,4] <- Qda_misclassifications[[1]]
summary[i,5] <- Qda_misclassifications[[2]]
##Logistic
logistic_pred <- logistic_predictions(set1,set2)
logistic_misclassifications <- evaluate_models(set1,set2,logistic_pred[[1]],logistic_pred[[2]])
summary[i,6] <- logistic_misclassifications[[1]]
summary[i,7] <- logistic_misclassifications[[2]]
}
a <- rnorm(100)
b <- rnorm(100)
library(ggplot2)
library(tidyverse)
df <- tibble(a=a,b=b)
df %>% ggplot(aes(a,b))+geom_point()
|
89e77b480949ef146c4d240fd985f9ac1d378f3f
|
fe063f55b246e7c4ffb870b5ba507c2316f05700
|
/data-raw/municipal_status_data.R
|
907abd8afef711fafb6869b2102a4c2af7c9dc5c
|
[] |
no_license
|
DJSmallSquare/suistats
|
fdcbfc9d29390476d22473a9468f8d4c90827a29
|
b5b913bbbf2588f5d1fcd4c017e08c8e57362365
|
refs/heads/master
| 2020-06-12T01:40:58.364833
| 2019-06-27T20:19:41
| 2019-06-27T20:19:41
| 194,154,145
| 0
| 0
| null | 2019-06-27T19:46:31
| 2019-06-27T19:46:30
| null |
UTF-8
|
R
| false
| false
| 3,130
|
r
|
municipal_status_data.R
|
# ====================================================================================================================
### Preparing municipal merger data
# ====================================================================================================================
### Packages
check_packages <- function(pkg){
new_pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new_pkg) > 0) {
install.packages(new_pkg, dependencies = TRUE)
}
}
# Needed packages
packages <- c("tidyverse", "lubridate", "readxl")
check_packages(packages)
lapply(X = packages, FUN = library, character.only = TRUE)
rm(check_packages, packages)
# ====================================================================================================================
### Read in data
commune_mutations <-
read_excel(path = "data-raw/BFS/Communes_mutées.xlsx", sheet = "Données", skip = 1L) %>%
setNames(nm = c("mutation_id", "from_canton_abb", "from_district", "from_commune_id", "from_commune_name", "to_canton_abb", "to_district", "to_commune_id", "to_commune_name", "date")) %>%
mutate_at(.vars = vars(mutation_id, from_district, from_commune_id, to_commune_id, to_district), .funs = as.integer) %>%
mutate(
date = lubridate::date(date),
year = as.integer(lubridate::year(date))
) %>%
select(date, year, mutation_id, from_canton_abb, from_district, from_commune_id, from_commune_name, to_canton_abb, to_district, to_commune_id, to_commune_name) %>%
arrange(date, from_commune_id, to_commune_id)
### Problem cases
problem_cases <-
read_excel(path = "data-raw/BFS/problem_cases.xlsx", sheet = "Sheet1") %>%
setNames(nm = c("mutation_id", "from_canton_abb", "from_district", "from_commune_id", "from_commune_name", "to_canton_abb", "to_district", "to_commune_id", "to_commune_name", "date")) %>%
mutate_at(.vars = vars(mutation_id, from_district, from_commune_id, to_commune_id, to_district), .funs = as.integer) %>%
mutate(
date = lubridate::date(date),
year = as.integer(lubridate::year(date))
) %>%
select(date, year, mutation_id, from_canton_abb, from_district, from_commune_id, from_commune_name, to_canton_abb, to_district, to_commune_id, to_commune_name) %>%
arrange(date, from_commune_id, to_commune_id)
commune_mutations <-
rbind(
commune_mutations,
problem_cases
)
official_municipality_lists <-
list.files(path = "data-raw/BFS/Muncipality_Status", pattern = "xlsx?$", full.names = T) %>%
grep(pattern = ".*_\\d{4}_\\d{2}_\\d{2}.xlsx$", value = T) %>%
map(~readxl::read_excel(path = .x) %>%
mutate(date = as.Date(x = str_extract(string = .x, pattern = "\\d{4}_\\d{2}_\\d{2}"), format = "%Y_%m_%d"))) %>%
map(setNames, nm = c("history_number", "canton_abb", "district_id", "district_name", "commune_id", "commune_name", "registration_date", "date")) %>%
reduce(rbind) %>%
mutate(year = as.integer(lubridate::year(date))) %>%
arrange(year, commune_id) %>%
select(date, year, commune_id, commune_name, district_id, district_name, canton_abb)
usethis::use_data(
commune_mutations,
official_municipality_lists,
overwrite = T
)
|
5dd740f4ac4d59c7a4692231dd54480817e3593b
|
11b1e40e532a57ea775c1b711b308185b36ead6f
|
/scripts/integrate_portions.R
|
840e724777ee0beeffb9664cef471b988b8fb610
|
[
"MIT"
] |
permissive
|
connorcl/swiftkey-nlp
|
0ea41f676d53ab5dc02e94743a1ef5970f89a5ab
|
87f4c8f432f6ecac46ffb142d8515caa57012d4c
|
refs/heads/master
| 2020-03-27T06:12:34.004642
| 2019-08-14T21:00:39
| 2019-08-14T21:00:39
| 146,088,296
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 844
|
r
|
integrate_portions.R
|
library(dplyr)
path <- "~/Projects/swiftkey-nlp/data/"
setwd(path)
generate_ngram_re <- function(n) {
word_re <- "[a-z]+'{0,1}[a-z]*"
re <- paste0("^", word_re)
if (n > 1) {
for (i in 1:(n-1)) {
re <- paste0(re, "_", word_re)
}
}
re <- paste0(re, "$")
return(re)
}
for (n in 1:5) {
freq_df <- NULL
for (i in 1:10) {
filename <- paste0("portion_", i, "_", n,"gram_freq.csv")
portion_df <- read.csv(filename, stringsAsFactors = FALSE)
freq_df <- rbind(freq_df, portion_df)
}
freq_df <- freq_df %>%
select(feature, frequency) %>%
group_by(feature) %>%
summarize_all(sum) %>%
filter(grepl(generate_ngram_re(n), feature) & frequency >= 4)
output_filename <- paste0("freq_df_", n, "gram.csv")
write.csv(freq_df, output_filename, row.names = FALSE)
}
|
598c35fd37b51f661d252bc5840ec0619efcf431
|
75f3fa8a6040edcd8c34668b4a19f2b07dde7ab1
|
/열추가 하는 벡터 생성 .R
|
0cc759820368abca385311e5deab0600e30a7821
|
[] |
no_license
|
Yu-Hayung/w3resource_R-Language
|
9452ef505f81fc448b6f01f09427496ac128e728
|
672151ba2ce154005491beb3d09d992a659f6595
|
refs/heads/main
| 2023-04-03T17:56:31.543520
| 2021-04-18T13:18:23
| 2021-04-18T13:18:23
| 350,978,553
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 323
|
r
|
열추가 하는 벡터 생성 .R
|
# 3 개의 정수로 3 개의 벡터 a, b, c를 생성하는 R 프로그램을 작성하십시오.
# 세 벡터를 결합하여 각 열이 벡터를 나타내는 3x3 행렬이됩니다. 행렬의 내용을 인쇄합니다.
a<-c(1,2,3)
b<-c(4,5,6)
c<-c(7,8,9)
m<-cbind(a,b,c)
print("Content of the said matrix:")
print(m)
|
00b3632fa344914776b0288590c70ccd5835d0c2
|
bcb3643083a56b1aad8879354bae95f926c308e9
|
/code_rmd.R
|
cd223b023680eebf5a6134095d9b09b53832ccea
|
[] |
no_license
|
anarinsk/cossim
|
3bdb65549ce2f268bfa0bc93da4d14883cdfca8f
|
a2870a0841eb49062eb2a65799e0fc1f5ff0bd10
|
refs/heads/master
| 2020-06-23T23:24:37.584240
| 2019-11-19T05:51:01
| 2019-11-19T05:51:01
| 198,783,609
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,931
|
r
|
code_rmd.R
|
#devtools::install_github("yihui/xfun")
#install.packages("Rcpp", dependencies=TRUE, INSTALL_opts = c('--no-lock'))
#install.packages("testthat", dependencies=TRUE, INSTALL_opts = c('--no-lock'))
#devtools::install_github("tidyverse/tidyverse", dependencies=TRUE, INSTALL_opts = c('--no-lock'))
#devtools::install_github("tidyverse/tidyr", INSTALL_opts = c('--no-lock')
#devtools::install_github('rstudio/fontawesome')
#devtools::install_github('juba/rmdformats')
#install.packages('DT')
#install.packages('showtext')
xfun::pkg_attach(c("tidyverse", 'DT', 'showtext'))
all_data_top_1k <- readRDS("all_data_top_1k.rds")
view_songinfo <- function(tbl, left_join_col_name=song_id){
tbl %>% rename(
song_id = {{left_join_col_name}}
) -> tbl
song_data_1k <- all_data_top_1k %>%
ungroup() %>%
select(-c(user, plays)) %>%
distinct(song_id, .keep_all=TRUE)
tbl %>% left_join(song_data_1k, by = 'song_id')
}
calc_cos_sim <- function(vec_x, vec_y){
vec_x %>% ungroup() %>% select(user, plays) -> vec_x
vec_y %>% ungroup() %>% select(user, plays) -> vec_y
vec_x %>%
full_join(vec_y, by = 'user') %>%
replace_na(list(plays.x = 0, plays.y = 0)) %>%
mutate(
prod = plays.x * plays.y
) %>%
summarise(
norm.x = sqrt(sum(plays.x^2)),
norm.y = sqrt(sum(plays.y^2)),
dot_prod = sum(prod)
) %>%
mutate(
cos_sim = dot_prod / (norm.x * norm.y)
)
}
# Wrapping function
generate_song_list_by_cos_sim <- function(song_id_x, input_tbl){
input_tbl -> tblf0
tblf1 <- tblf0 %>%
ungroup() %>%
distinct(user, song_id, plays) %>%
arrange(song_id, user)
tblf1 %>% filter(song_id == song_id_x) -> vector_x
tblf1 %>%
group_by(song_id) %>%
group_modify( ~ calc_cos_sim(vector_x, .)) %>%
arrange(-cos_sim) %>% view_songinfo()
}
|
43ea40b2f763d84357fdb2233781e6d52639df9e
|
2d47450c41c23f6d008bfca5bf08d3161bb13491
|
/tests/sweetpotatobase/test_sp_phenotypes_search_post.R
|
a8ae2469a1ff24f1462ee8290e83f8a457684e50
|
[] |
no_license
|
khaled-alshamaa/brapi
|
2c14727d65fc82a77d243bdc40c10b67955a04d5
|
5f2a5caa48d72e2412ead128b9143cc1882a060c
|
refs/heads/master
| 2022-03-21T20:19:07.470329
| 2019-10-16T15:51:00
| 2019-10-16T15:51:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 500
|
r
|
test_sp_phenotypes_search_post.R
|
context("sp phenotypes_search_post")
con <- ba_db()$sweetpotatobase
test_that(" are present", {
#skip("Very slow implementation")
res <- ba_phenotypes_search_post(con = con, pageSize = 1, studyDbIds = "136")
expect_true(nrow(res) > 1)
})
test_that(" out formats work", {
#skip("Very slow implementation")
out <- ba_phenotypes_search_post(con = con, pageSize = 1, studyDbIds = "136",
rclass = "tibble")
expect_true("tbl_df" %in% class(out))
})
|
60a0078d1ce2712a057afd3a9b1858f62a6f9a84
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/HDclust/man/HMM-class.Rd
|
7d4fc22a4230f9984255d64906eb0812229f7e38
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,791
|
rd
|
HMM-class.Rd
|
\docType{class}
\name{HMM-class}
\alias{HMM-class}
\alias{HMM}
\alias{show,HMM-method}
\alias{getPrenumst,HMM-method}
\alias{getHmmParam,HMM-method}
\title{Class "HMM" to represent parameters associated with a variable block in the HMM-VB}
\description{
An S4 class to represent the model parameters associated with one variable block in the HMM-VB.
For brevity, we call this part of HMM-VB, specific to a particular variable block, an "HMM" for the block. New instances of the class are created by \code{\link{hmmvbTrain}}.
}
\section{Methods}{
\itemize{
\item \bold{show} signature(object = "HMM") : show parameters of the HMM object.
\item \bold{getPrenumst} signature(object = "HMM") : accessor for 'prenumst' slot.
\item \bold{getHmmParam} signature(object = "HMM") : accessor for parameters of the HMM object. This function outputs a list with means, covariance matrices, inverse covarince matrices and logarithms of the determinants of the covariance matrices for all states of the HMM.
}}
\section{Slots}{
\describe{
\item{\code{dim}}{Dimensionality of the data in HMM.}
\item{\code{numst}}{An integer vector specifying the number of HMM states.}
\item{\code{prenumst}}{An integer vector specifying the number of states of previous
variable block HMM.}
\item{\code{a00}}{Probabilities of HMM states.}
\item{\code{a}}{Transition probability matrix from states in the previous variable block
to the states in the current one.}
\item{\code{mean}}{A numerical matrix with state means. \emph{k}th row corresponds to the
\emph{k}th state.}
\item{\code{sigma}}{A list containing the covariance matrices of states.}
\item{\code{sigmaInv}}{A list containing the inverse covariance matrices of states.}
\item{\code{sigmaDetLog}}{A vector with \eqn{log(|sigma|)} for each state.}
}}
|
b36f338f02313061803656f230bb5c5bceb30b79
|
31b5ffe76f689c8ad96877a81b40fecb698f2de2
|
/R/skellam.reg.R
|
d2ab937969301fd5b0609da27982538449cd7d80
|
[] |
no_license
|
cran/skellam
|
424ededa24b7ee726221f6e5540c2a445782ce36
|
b1d3322e64490ac2d7c7e0586f291686951dcc8f
|
refs/heads/master
| 2020-06-05T18:47:43.203205
| 2016-12-15T14:57:06
| 2016-12-15T14:57:06
| 17,719,412
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,219
|
r
|
skellam.reg.R
|
#' @export
skellam.reg <- function(y, x) {
n <- length(y)
x <- stats::model.matrix( ~., data.frame(x) )
p <- dim(x)[2]
skelreg <- function(pa) {
b1 <- pa[1:p] ; b2 <- pa[ -c(1:p) ]
a1 <- x %*% b1 ; a2 <- x %*% b2
lam1 <- exp(a1) ; lam2 <- exp(a2)
a <- 2 * sqrt(lam1 * lam2)
sum(lam1 + lam2) + 0.5 * sum(y * (a1 - a2) ) - sum( log( besselI(a, y) ) )
}
options(warn = -1)
mod <- stats::nlm(skelreg, stats::rnorm(2 * p), iterlim = 5000 )
mod <- stats::nlm(skelreg, mod$estimate, iterlim = 5000 )
mod <- stats::optim(mod$estimate, skelreg, hessian = TRUE, control = list(maxit = 5000) )
b1 <- mod$par[1:p] ; b2 <- mod$par[ -c(1:p) ]
s <- diag( solve(mod$hessian) )
s1 <- sqrt(s[1:p]) ; s2 <- sqrt(s[ -c(1:p) ])
param1 <- cbind(b1, s1, b1 / s1, stats::pchisq( (b1 / s1)^2, 1, lower.tail = FALSE) )
param2 <- cbind(b2, s2, b2 / s2, stats::pchisq( (b2 / s2)^2, 1, lower.tail = FALSE) )
rownames(param1) <- rownames(param2) <- colnames(x)
colnames(param1) <- colnames(param2) <- c("Estimate", "Std. Error", "Wald value", "p-value")
list(loglik = -mod$value, param1 = param1, param2 = param2)
}
|
d923ade64fd8845b00b7fddfdac243cb3a37487c
|
8cee9f0ff3e587075e3a588477cdf0a785ed3e66
|
/install.R
|
9ba2e46d6338001e9c08289af077fd9fb02a1228
|
[
"Apache-2.0"
] |
permissive
|
n8mauer/VisionEval
|
2ae15902cebbf63573c232cf32dbc1174e842e92
|
35b7398a10d6f2793a199141bb4f09b5e71dff3b
|
refs/heads/master
| 2020-03-19T07:31:16.838580
| 2018-04-16T17:24:01
| 2018-04-16T17:24:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,952
|
r
|
install.R
|
#Download and Install VisionEval Resources
#library(httr)
#If working within a proxy server, run the following commands to enable install from GitHub
#set_config(use_proxy(url="proxynew.odot.state.or.us", port=8080))
#set_config( config( ssl_verifypeer = 0L ) )
# Download and install the required libraries and their dependencies
install.packages(c("curl","devtools", "roxygen2", "stringr", "knitr", "digest"), dependencies = TRUE)
install.packages(c("shiny", "shinyjs", "shinyFiles", "data.table", "DT", "shinyBS", "future", "testit", "jsonlite", "shinyAce", "envDocument", "rhandsontable","shinyTree"), dependencies = TRUE)
devtools::install_github("tdhock/namedCapture")
source("https://bioconductor.org/biocLite.R")
biocLite(c("rhdf5","zlibbioc"), suppressUpdates=TRUE)
#Download and install the required VE framework package
devtools::install_github("gregorbj/VisionEval/sources/framework/visioneval")
#Download and install the required VE modules for VERPAT and VERSPM
devtools::install_github("gregorbj/VisionEval/sources/modules/VESyntheticFirms")
devtools::install_github("gregorbj/VisionEval/sources/modules/VESimHouseholds")
devtools::install_github("gregorbj/VisionEval/sources/modules/VELandUse")
devtools::install_github("gregorbj/VisionEval/sources/modules/VETransportSupply")
devtools::install_github("gregorbj/VisionEval/sources/modules/VETransportSupplyUse")
devtools::install_github("gregorbj/VisionEval/sources/modules/VEHouseholdVehicles")
devtools::install_github("gregorbj/VisionEval/sources/modules/VEHouseholdTravel")
devtools::install_github("gregorbj/VisionEval/sources/modules/VETransportSupplyUse")
devtools::install_github("gregorbj/VisionEval/sources/modules/VERoadPerformance")
devtools::install_github("gregorbj/VisionEval/sources/modules/VEEnergyAndEmissions")
devtools::install_github("gregorbj/VisionEval/sources/modules/VETravelCost")
devtools::install_github("gregorbj/VisionEval/sources/modules/VEReports")
|
b5ed4e19cd8f41eb554c38873458c098f7776d5d
|
348d080bff29e2bd962a0e588d1d2e59024e928a
|
/man/scAlignOptions.Rd
|
c252102dfb971185d44b41e639e1ce904fd39df7
|
[
"Apache-2.0"
] |
permissive
|
zorrodong/scAlign
|
5975cfb3f0aeb75d18ef6040c24635bf87886cee
|
4100daa165e79b74cd4cd0be646950b38b08a769
|
refs/heads/master
| 2020-07-05T05:38:24.571835
| 2019-08-09T18:43:59
| 2019-08-09T18:43:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,542
|
rd
|
scAlignOptions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scAlignClass.R
\name{scAlignOptions}
\alias{scAlignOptions}
\title{Set training options}
\usage{
scAlignOptions(steps = 15000, batch.size = 150,
learning.rate = 1e-04, log.every = 5000, architecture = "large",
batch.norm.layer = TRUE, dropout.layer = TRUE, num.dim = 32,
perplexity = 30, betas = 0, norm = TRUE, full.norm = FALSE,
early.stop = FALSE, walker.loss = TRUE, reconc.loss = FALSE,
walker.weight = 1, classifier.weight = 1, classifier.delay = NA,
gpu.device = "0", seed = 1234)
}
\arguments{
\item{steps}{(default: 15000) Number of training iterations for neural networks.}
\item{batch.size}{(default: 150) Number of input samples per training batch.}
\item{learning.rate}{(default: 1e-4) Initial learning rate for ADAM.}
\item{log.every}{(default: 5000) Number of steps before saving results.}
\item{architecture}{(default: "small") Network function name for scAlign.}
\item{batch.norm.layer}{(default: FALSE) Include batch normalization in the network structure.}
\item{dropout.layer}{(default: TRUE) Include dropout in the network.}
\item{num.dim}{(default: 32) Number of dimensions for joint embedding space.}
\item{perplexity}{(default: 30) Determines the neighborhood size for each sample.}
\item{betas}{(default: 0) Sets the bandwidth of the gaussians to be the same if > 0. Otherwise per cell beta is computed.}
\item{norm}{(default: TRUE) Normalize the data mini batches while training scAlign (repeated).}
\item{full.norm}{(default: FALSE) Normalize the data matrix prior to scAlign (done once).}
\item{early.stop}{(default: TRUE) Early stopping during network training.}
\item{walker.loss}{(default: TRUE) Add walker loss to model.}
\item{reconc.loss}{(default: FALSE) Add reconstruction loss to model during alignment.}
\item{walker.weight}{(default: 1.0) Weight on walker loss component}
\item{classifier.weight}{(default: 1.0) Weight on classifier loss component}
\item{classifier.delay}{(default: NULL) Delay classifier component of loss function until specific training step. Defaults to (2/3)*steps.}
\item{gpu.device}{(default: '0') Which gpu to use.}
\item{seed}{(default: 1245) Sets graph level random seed in tensorflow.}
}
\value{
Options data.frame
}
\description{
Defines parameters for optimizer and training procedure.
}
\examples{
options=scAlignOptions(steps=15000,
log.every=5000,
early.stop=FALSE,
architecture="large")
}
|
a353347c68267c2f2f367afcd96d73cdc6f67afe
|
4f163a508f9d79967f058a1b12d12312f68b330f
|
/run_analysis.R
|
038eee02c74ce877202b343b7c7c3d00f2a6fd26
|
[] |
no_license
|
eluciv/Getting-and-Cleaning-Data-Course-Project
|
a5594c2cfadadda41acf39d96e37c5deceb42977
|
cc381899b1095119e9c1f8b574db8d2f42b15c1a
|
refs/heads/master
| 2021-01-12T12:13:24.306200
| 2016-10-31T15:46:06
| 2016-10-31T15:46:06
| 72,370,550
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,608
|
r
|
run_analysis.R
|
library(dplyr)
run_analysis <- function(data_dir = ".") {
data_directory <- paste(data_dir, "/UCI HAR Dataset", sep = "")
read_and_prepare_data <- function(data_set_file_name, activity_id_file_name, subject_id_file_name, activity_labels, feature_names) {
#Read train data set
data_set <-
read.fwf(
data_set_file_name,
header = FALSE,
col.names = feature_names,
widths = rep.int(16, length(feature_names)),
stringsAsFactors = FALSE,
colClasses = rep("numeric", length(feature_names))
)
names(data_set) <- gsub("[.]+", "_", names(data_set))
#Read train activity ids
data_activity_id <-
read.csv(
activity_id_file_name,
header = FALSE,
col.names = "activity_id",
stringsAsFactors = FALSE
)
#Read train subject ids
data_subject_id <-
read.csv(
subject_id_file_name,
header = FALSE,
col.names = "subject_id",
stringsAsFactors = FALSE
)
data_set %>%
#Leave only columns with means and standard deviations
select(contains("_mean_"), contains("_std_")) %>%
#Get data together
bind_cols(data_subject_id) %>%
bind_cols(data_activity_id) %>%
#Add activity labels
inner_join(activity_labels, by = "activity_id") %>%
select(-activity_id)
}
#Read activity labels
activity_labels <- read.csv(paste(data_directory, "/activity_labels.txt", sep = ""), sep = " ", header = FALSE, col.names = c("activity_id", "activity"), stringsAsFactors = FALSE)
#Read feature names
features <- read.csv(paste(data_directory, "/features.txt", sep = ""), sep = " ", header = FALSE, col.names = c("id", "name"), stringsAsFactors = FALSE)
feature_names <- features$name
#Read data
train_data <-
read_and_prepare_data(
paste(data_directory, "/train/X_train.txt", sep = ""),
paste(data_directory, "/train/y_train.txt", sep = ""),
paste(data_directory, "/train/subject_train.txt", sep = ""),
activity_labels,
feature_names
)
test_data <-
read_and_prepare_data(
paste(data_directory, "/test/X_test.txt", sep = ""),
paste(data_directory, "/test/y_test.txt", sep = ""),
paste(data_directory, "/test/subject_test.txt", sep = ""),
activity_labels,
feature_names
)
#Union the datasets
result <- train_data %>%
bind_rows(test_data) %>%
group_by(activity, subject_id) %>%
summarise_each(funs(mean))
write.table(result, file = "./tidy_data_set.txt")
result
}
|
53512e6d7f049e217d4e6eae217c54791716befc
|
d7f4e9860534b2799c89d6bf29ac949e9131d19d
|
/func.R
|
be347d06af2ab3bf0f2d1209132fab1626782f53
|
[] |
no_license
|
cperez58/chem160module10
|
d2ba942c6c0608cfa28dda815eaa6d973c041250
|
7299f09ebfdc90aa29daff3d47414bfc213e1dde
|
refs/heads/master
| 2020-08-22T13:43:15.087107
| 2019-10-20T18:26:20
| 2019-10-20T18:26:20
| 216,407,637
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 184
|
r
|
func.R
|
func=function(x) {
return(-x^2*exp(-3*x))
}
opt=optimize(func,c(0.,5.))
cat(opt$minimum,opt$objective)
plot(func,0,5)
abline(h=opt$objective,col=4)
abline(v=opt$minimum,col=4)
|
38e6474f244670fbc3942b052e66d0abb4302872
|
bff946953e51145a5f7c85dbb0d9358d021e9c8d
|
/R/geom_signif.R
|
96ddca6d6e0bdf80cd94a888e9b782c6edfcee82
|
[] |
no_license
|
ZhonghuiGai/ggroup
|
cd4a66689707acc1743d89eb83a7b1700980c3d0
|
f561baa88f0db7a79d7b37cdb899ad506ddb2f84
|
refs/heads/main
| 2023-08-11T05:24:53.026446
| 2021-09-25T08:30:55
| 2021-09-25T08:30:55
| 380,489,489
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,687
|
r
|
geom_signif.R
|
#' Create significance layer
#'
#' @param comparisons A list of length-2 vectors. The entries in the vector are
#' either the names of 2 values on the x-axis or the 2 integers that
#' correspond to the index of the columns of interest.
#' @param test the name of the statistical test that is applied to the values of
#' the 2 columns (e.g. `t.test`, `wilcox.test` etc.). If you implement a
#' custom test make sure that it returns a list that has an entry called
#' `p.value`.
#' @param test.args additional arguments for the test method
#' @param annotations character vector with alternative annotations, if not null
#' test is ignored
#' @param map_signif_level Boolean value, if the p-value are directly written as
#' annotation or asterisks are used instead. Alternatively one can provide a
#' named numeric vector to create custom mappings from p-values to annotation:
#' For example: `c("***"=0.001, "**"=0.01, "*"=0.05)`.
#' Alternatively, one can provide a function that takes a numeric argument
#' (the p-value) and returns a string.
#' @param xmin,xmax numeric vector with the positions of the left and right
#' sides of the brackets, respectively
#' @param y_position numeric vector with the y positions of the brackets
#' @param size change the width of the lines of the bracket
#' @param textsize change the size of the text
#' @param family change the font used for the text
#' @param vjust move the text up or down relative to the bracket
#' @param margin_top numeric vector how much higher that the maximum value that
#' bars start as fraction of total height
#' @param step_increase numeric vector with the increase in fraction of total
#' height for every additional comparison to minimize overlap.
#' @param extend_line Numeric that allows to shorten (negative values) or extend
#' (positive value) the horizontal line between groups for each comparison;
#' defaults to 0.
#' @param tip_length numeric vector with the fraction of total height that the
#' bar goes down to indicate the precise column
#' @param parse If `TRUE`, the labels will be parsed into expressions and
#' displayed as described in `?plotmath`.
#' @param manual Boolean flag that indicates that the parameters are provided
#' with a data.frame. This option is necessary if one wants to plot different
#' annotations per facet.
#' @param na.rm If `FALSE` (the default), removes missing values with
#' a warning. If `TRUE` silently removes missing values.
#' @param orientation The orientation of the layer. The default (‘NA’)
#' automatically determines the orientation from the aesthetic mapping.
#' In the rare event that this fails it can be given explicitly by setting
#' 'orientation' to either "x" or "y"
#' @param ... other arguments passed on to `layer`. These are
#' often aesthetics, used to set an aesthetic to a fixed value, like
#' `color = "red"` or `size = 3`. They may also be parameters
#' to the paired geom/stat.
#'
#' @inheritParams ggplot2::layer
#'
#' @examples
#' \dontrun{
#' library(ggplot2)
#' library(ggsignif)
#'
#' ggplot(mpg, aes(class, hwy)) +
#' geom_boxplot() +
#' geom_signif(comparisons = list(
#' c("compact", "pickup"),
#' c("subcompact", "suv")
#' ))
#'
#' ggplot(mpg, aes(class, hwy)) +
#' geom_boxplot() +
#' geom_signif(
#' comparisons = list(
#' c("compact", "pickup"),
#' c("subcompact", "suv")
#' ),
#' map_signif_level = function(p) sprintf("p = %.2g", p)
#' )
#'
#' ggplot(mpg, aes(class, hwy)) +
#' geom_boxplot() +
#' geom_signif(
#' annotations = c("First", "Second"),
#' y_position = c(30, 40), xmin = c(4, 1), xmax = c(5, 3)
#' )
#' }
#'
#' @export
stat_signif <- function(mapping = NULL,
data = NULL,
position = "identity",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
comparisons = NULL,
test = "wilcox.test",
test.args = NULL,
annotations = NULL,
map_signif_level = FALSE,
y_position = NULL,
xmin = NULL,
xmax = NULL,
margin_top = 0.05,
step_increase = 0,
tip_length = 0.03,
size = 0.5,
textsize = 3.88,
family = "",
vjust = 0,
parse = FALSE,
manual = FALSE,
orientation = NA,
...) {
if (manual) {
if (!is.null(data) & !is.null(mapping)) {
if (!"x" %in% names(data)) mapping$x <- 1
if (!"y" %in% names(data)) mapping$y <- 1
} else {
stop("If manual mode is selected you need to provide the data and mapping parameters")
}
}
ggplot2::layer(
stat = StatSignif, data = data, mapping = mapping, geom = "signif",
position = position, show.legend = show.legend, inherit.aes = inherit.aes,
params = list(
comparisons = comparisons,
test = test,
test.args = test.args,
annotations = annotations,
map_signif_level = map_signif_level,
y_position = y_position,
xmin = xmin,
xmax = xmax,
margin_top = margin_top,
step_increase = step_increase,
tip_length = tip_length,
size = size,
textsize = textsize,
family = family,
vjust = vjust,
parse = parse,
manual = manual,
na.rm = na.rm,
orientation = orientation,
...
)
)
}
GeomSignif <- ggplot2::ggproto(
"GeomSignif",
ggplot2::Geom,
required_aes = c("x", "xend", "y", "yend", "annotation"),
default_aes = ggplot2::aes(
shape = 19,
colour = "black",
textsize = 3.88,
angle = 0,
hjust = 0.5,
vjust = 0,
alpha = NA,
family = "",
fontface = 1,
lineheight = 1.2,
linetype = 1,
size = 0.5
),
extra_params = c("na.rm", "orientation"),
setup_params = function(data, params) {
params$flipped_aes <- ggplot2::has_flipped_aes(data, params)
return(params)
},
draw_key = function(...) {
grid::nullGrob()
},
draw_group = function(data,
panel_params,
coord,
parse = FALSE,
extend_line = 0,
flipped_aes = FALSE) {
lab <- as.character(data$annotation)
if (parse) {
lab <- parse_safe(as.character(lab))
}
coords <- coord$transform(data, panel_params)
if (extend_line != 0 && nrow(coords) == 3) {
if (coords[2, "x"] > coords[2, "xend"]) {
extend_line <- -extend_line
}
# left vertical segment
coords[1, "x"] <- coords[1, "x"] - extend_line
coords[1, "xend"] <- coords[1, "xend"] - extend_line
# horizontal line
coords[2, "x"] <- coords[2, "x"] - extend_line
coords[2, "xend"] <- coords[2, "xend"] + extend_line
# right vertical segment
coords[3, "x"] <- coords[3, "x"] + extend_line
coords[3, "xend"] <- coords[3, "xend"] + extend_line
}
clp_flag <- inherits(coord, "CoordFlip")
if (!any(flipped_aes, clp_flag) || all(flipped_aes, clp_flag)) {
text_x <- mean(c(coords$x[1], tail(coords$xend, n = 1)))
text_y <- max(c(coords$y, coords$yend)) + 0.01
} else {
text_x <- max(c(coords$x, coords$xend)) + 0.01
text_y <- mean(c(coords$y[1], tail(coords$yend, n = 1)))
if (all(coords$angle == 0)) {
coords$angle <- 270
}
}
grid::gList(
grid::textGrob(
label = lab,
x = text_x, # mean(c(coords$x[1], tail(coords$xend, n = 1))),
y = text_y, # max(c(coords$y, coords$yend)) + 0.01,
default.units = "native",
hjust = coords$hjust, vjust = coords$vjust,
rot = coords$angle,
gp = grid::gpar(
col = scales::alpha(coords$colour, coords$alpha),
fontsize = coords$textsize * ggplot2::.pt,
fontfamily = coords$family,
fontface = coords$fontface,
lineheight = coords$lineheight
)
),
grid::segmentsGrob(
coords$x, coords$y,
default.units = "native",
coords$xend, coords$yend,
gp = grid::gpar(
col = scales::alpha(coords$colour, coords$alpha),
lty = coords$linetype,
lwd = coords$size * ggplot2::.pt
)
)
)
}
)
#' @rdname stat_signif
#' @export
geom_signif <- function(mapping = NULL,
data = NULL,
stat = "signif",
position = "identity",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
comparisons = NULL,
test = "wilcox.test",
test.args = NULL,
annotations = NULL,
map_signif_level = FALSE,
y_position = NULL,
xmin = NULL,
xmax = NULL,
margin_top = 0.05,
step_increase = 0,
extend_line = 0,
tip_length = 0.03,
size = 0.5,
textsize = 3.88,
family = "",
vjust = 0,
parse = FALSE,
manual = FALSE,
orientation = NA,
...) {
params <- list(na.rm = na.rm, ...)
if (identical(stat, "signif")) {
if (!is.null(data) & !is.null(mapping) & !manual) {
warning("You have set data and mapping, are you sure that manual = FALSE is correct?")
}
if (manual) {
if (is.null(mapping$annotations)) {
stop("Manual mode only works if with 'annotations' is provided in mapping")
}
if (!is.null(data) & !is.null(mapping)) {
if (!"x" %in% names(mapping)) {
if ("xmin" %in% names(mapping)) {
mapping$x <- mapping$xmin
} else {
mapping$x <- xmin
}
}
if (!"y" %in% names(mapping)) {
if ("y_position" %in% names(mapping)) {
mapping$y <- mapping$y_position
} else {
mapping$y <- y_position
}
}
} else {
stop("If manual mode is selected you need to provide the data and mapping parameters")
}
}
params <- c(
params,
list(
comparisons = comparisons,
test = test,
test.args = test.args,
annotations = annotations,
map_signif_level = map_signif_level,
y_position = y_position,
xmin = xmin,
xmax = xmax,
margin_top = margin_top,
step_increase = step_increase,
extend_line = extend_line,
tip_length = tip_length,
size = size,
textsize = textsize,
family = family,
vjust = vjust,
parse = parse,
manual = manual,
orientation = orientation
)
)
}
ggplot2::layer(
stat = stat,
geom = GeomSignif,
mapping = mapping,
data = data,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = params
)
}
StatSignif <- ggplot2::ggproto(
"StatSignif",
ggplot2::Stat,
required_aes = c("x", "y", "group"),
extra_params = c("na.rm", "orientation"),
setup_params = function(data, params) {
# if(any(data$group == -1)|| any(data$group != data$x)){
params$flipped_aes <- ggplot2::has_flipped_aes(data, params)
data <- ggplot2::flip_data(data, params$flipped_aes)
if (any(data$group == -1)) {
stop("Can only handle data with groups that are plotted on the x-axis")
}
if (is.character(params$test)) params$test <- match.fun(params$test)
params$complete_data <- data
if (is.null(params$xmin) != is.null(params$xmax) || length(params$xmin) != length(params$xmax)) {
stop("If xmin or xmax is set, the other one also needs to be set and they need to contain the same number of values")
}
if (!is.null(params$xmin) && !is.null(params$comparisons)) {
stop("Set either the xmin, xmax values or the comparisons")
}
if (!is.null(params$xmin) && is.null(params$y_position)) {
stop("If xmin, xmax are defined also define y_position")
}
if (!is.null(params$y_position) && length(params$y_position) == 1) {
params$y_position <- rep(params$y_position, max(length(params$comparisons), length(params$xmin), 1))
}
if (length(params$margin_top) == 1) params$margin_top <- rep(params$margin_top, max(length(params$comparisons), length(params$xmin), 1))
if (length(params$step_increase) == 1) params$step_increase <- rep(params$step_increase, max(length(params$comparisons), length(params$xmin), 1))
if (length(params$tip_length) == 1) params$tip_length <- rep(params$tip_length, max(length(params$comparisons), length(params$xmin), 1) * 2)
if (length(params$tip_length) == length(params$comparisons)) params$tip_length <- rep(params$tip_length, each = 2)
if (length(params$tip_length) == length(params$xmin)) params$tip_length <- rep(params$tip_length, each = 2)
if (!is.null(params$annotations) && length(params$annotations) == 1) {
params$annotations <- rep(params$annotations, max(length(params$comparisons), length(params$xmin), 1))
}
if (!is.null(params$annotations) && length(params$annotations) != max(length(params$comparisons), length(params$xmin), 1)) {
stop(paste0(
"annotations contains a different number of elements (", length(params$annotations),
") than comparisons or xmin (", max(length(params$comparisons), length(params$xmin), 1), ")."
))
}
if (all(is.logical(params$map_signif_level)) && all(params$map_signif_level == TRUE)) {
params$map_signif_level <- c("***" = 0.001, "**" = 0.01, "*" = 0.05)
} else if (is.numeric(params$map_signif_level)) {
if (is.null(names(params$map_signif_level))) {
if (length(params$map_signif_level) <= 3) {
names(params$map_signif_level) <- tail(c("***", "**", "*"), n = length(params$map_signif_level))
} else {
stop('Cannot handle un-named map for significance values, please provide in the following format: c("***"=0.001, "**"=0.01, "*"=0.05)')
}
}
}
return(params)
},
compute_group = function(data,
scales,
comparisons,
test,
test.args,
complete_data,
annotations,
map_signif_level,
y_position,
xmax,
xmin,
margin_top,
step_increase,
tip_length,
manual,
flipped_aes = FALSE) {
data <- ggplot2::flip_data(data, flipped_aes)
scales <- ggplot2::flip_data(scales, flipped_aes)
if ("annotations" %in% colnames(data)) {
annotations <- data[["annotations"]]
}
if ("y_position" %in% colnames(data)) {
y_position <- data[["y_position"]]
}
if ("xmax" %in% colnames(data)) {
xmax <- data[["xmax"]]
}
if ("xmin" %in% colnames(data)) {
xmin <- data[["xmin"]]
}
if ("map_signif_level" %in% colnames(data)) {
map_signif_level <- data[["map_signif_level"]]
}
if ("tip_length" %in% colnames(data)) {
tip_length <- rep(data[["tip_length"]], each = 2)
}
if (!is.null(comparisons)) {
i <- 0
result <- lapply(comparisons, function(comp) {
i <<- i + 1
# All entries in group should be the same
if (scales$x$map(comp[1]) == data$group[1] | manual) {
test_result <- if (is.null(annotations)) {
group_1 <- complete_data$y[complete_data$x == scales$x$map(comp[1]) &
complete_data$PANEL == data$PANEL[1]]
group_2 <- complete_data$y[complete_data$x == scales$x$map(comp[2]) &
complete_data$PANEL == data$PANEL[1]]
p_value <- do.call(test, c(list(group_1, group_2), test.args))$p.value
if (is.numeric(map_signif_level)) {
temp_value <- names(which.min(map_signif_level[which(map_signif_level > p_value)]))
if (is.null(temp_value)) {
"NS."
} else {
temp_value
}
} else if (is.function(map_signif_level)) {
map_signif_level(p_value)
} else {
if (is.numeric(p_value)) {
if (p_value < .Machine$double.eps) {
sprintf("p < %.2e", .Machine$double.eps)
} else {
as.character(sprintf("%.2g", p_value))
}
} else {
as.character(p_value)
}
}
} else {
annotations[i]
}
y_scale_range <- (scales$y$range$range[2] - scales$y$range$range[1])
if (is.null(y_position)) {
y_pos <- scales$y$range$range[2] + y_scale_range * margin_top[i] + y_scale_range * step_increase[i] * (i - 1)
} else {
y_pos <- y_position[i] + y_scale_range * margin_top[i] + y_scale_range * step_increase[i] * (i - 1)
}
data.frame(
x = c(min(comp[1], comp[2]), min(comp[1], comp[2]), max(comp[1], comp[2])),
xend = c(min(comp[1], comp[2]), max(comp[1], comp[2]), max(comp[1], comp[2])),
y = c(y_pos - y_scale_range * tip_length[(i - 1) * 2 + 1], y_pos, y_pos),
yend = c(y_pos, y_pos, y_pos - y_scale_range * tip_length[(i - 1) * 2 + 2]),
annotation = test_result, group = paste(c(comp, i), collapse = "-")
)
}
})
df <- do.call(rbind, result)
} else {
if ((data$x[1] == min(complete_data$x) & data$group[1] == min(complete_data$group)) | manual) {
y_scale_range <- (scales$y$range$range[2] - scales$y$range$range[1])
if (is.character(xmin)) {
xmin <- scales$x$map(xmin)
}
if (is.character(xmax)) {
xmax <- scales$x$map(xmax)
}
if ("expression" %in% class(annotations)) {
stop("annotations must be a character vector. To use plotmath set parse=TRUE.")
}
df <- data.frame(
x = c(xmin, xmin, xmax),
xend = c(xmin, xmax, xmax),
y = c(
y_position - y_scale_range * tip_length[seq_len(length(tip_length)) %% 2 == 1],
y_position,
y_position
),
yend = c(
y_position,
y_position,
y_position - y_scale_range * tip_length[seq_len(length(tip_length)) %% 2 == 0]
),
annotation = rep(annotations, times = 3), group = if (manual) {
rep(data$group, times = 3)
} else {
rep(seq_along(xmin), times = 3)
}
)
} else {
df <- NULL
}
}
if (!is.null(df)) {
df$flipped_aes <- flipped_aes
df <- ggplot2::flip_data(df, flipped_aes)
}
return(df)
}
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.