blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3688f90e6a6ff0008fec30ffe5a75cd27fc11849
|
a0d43f26abeafbd8c159b9afbfca2e7582636092
|
/Udemy/ajuda.R
|
f40591e2740ce45b918012824df4e50aa4bce5b9
|
[
"MIT"
] |
permissive
|
tarsoqueiroz/Rlang
|
f22038a0ada392d641cafecee1a0a91ba8574110
|
b2d4fdd967ec376fbf9ddb4a7250c11d3abab52e
|
refs/heads/master
| 2021-06-02T11:47:56.188320
| 2021-04-09T20:37:38
| 2021-04-09T20:37:38
| 132,541,417
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 291
|
r
|
ajuda.R
|
#ajuda dentro do R
help.start() #ajuda geral do R
help("*")
help(mean)
help.search("mean")
?sd #mesmo que help
??sd #mesmo que help.search
apropos("help") #funções contendo "help"
apropos("mean")
#exemplos de uso
example(mean)# exemplo de uso
example(plot)
|
f134373d6e58cff6f035367fb47c18fba741aa38
|
8af238c144d20c1060fd670e091364fffc910df3
|
/draw_every_species.R
|
6412b3dfec23ba6b26d1501a0a7652d538cc6bb0
|
[] |
no_license
|
AfredComma/bioinfor_sth
|
0c03da446a4f44cbe8be79b06e281f7bcb9b917f
|
0be0eee4d676636acc8791a1b6cdb8d719a4f749
|
refs/heads/master
| 2021-04-09T15:03:54.292192
| 2019-10-28T06:03:45
| 2019-10-28T06:03:45
| 125,479,914
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 930
|
r
|
draw_every_species.R
|
setwd("D:/BaiduNetdiskDownload/")
df <- read.csv("Species_merge_abundance.tsv",sep = '\t',row.names = 1,check.names = FALSE)
df2<-as.data.frame(t(df))
dfmap<-read.csv("merge_stages_map.tsv",sep = '\t')
dfmap$time<-sapply(dfmap$Types,FUN = function(x) strsplit(as.vector(x),'_')[[1]][1])
dfmap$cl<-sapply(dfmap$Types,FUN = function(x) strsplit(as.vector(x),'_')[[1]][2])
rownames(dfmap)<-dfmap$Sample
rr = colnames(df2)
for (i in seq(ncol(df))) {
a = rr[i]
b = gsub(' ','_',a)
b = gsub('/','_',b)
c = dfmap
d <-cbind(c,df2[rownames(c),][,i])
colnames(d)<-c("Sample","types","time","cl","value")
d$time<-factor(d$time , levels=c("4", "8", "12", "16","20"))
png(filename = paste0(b,'.png'),width = 900,height = 700)
boxplot(value~time*cl,data = d,col=c("slateblue1" , "tomato"),
boxwex=0.4 , ylab="Relative abundance")
for(i in seq(0.5 , 20 , 5)){
abline(v=i,lty=1, col="grey")
}
dev.off()
}
|
f21fbad59010296225ec3bf71fc8290678d96501
|
56a98c60765e9c2df99061666760285d1a492c29
|
/srs-cran/src/run/trainmodels/RunRandomForestSignals.R
|
9f4f6e798b9d9e46422b2925eb4a0bfdc832ce90
|
[] |
no_license
|
ajinkya-github/stocksimulation
|
52263a7ab03031b9f426751e05871f70cdb0b871
|
1ffc092495534c58e42f3338e05fb31f58a611f2
|
refs/heads/master
| 2021-05-05T15:41:25.320326
| 2018-01-14T10:01:20
| 2018-01-14T10:01:20
| 117,318,030
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,221
|
r
|
RunRandomForestSignals.R
|
# TODO: Add comment
#
# Author: ajinkya
###############################################################################
library(randomForest)
library(quantmod)
library(TTR)
library(tseries)
closeAllConnections()
memory.limit(size=2566)
rm(list = ls())
now1 <- Sys.time()
tickerSymbol <- "IBM"
source("/Users/ajinkyaingale/srs-cran/src/utils/RemoveAll.R")
source("/Users/ajinkyaingale/srs-cran/src/technicalindicators/TickerBBandsSignals.R")
source("/Users/ajinkyaingale/srs-cran/src/technicalindicators/TickerDailyReturns.R")
source("/Users/ajinkyaingale/srs-cran/src/technicalindicators/TickerMultiSmaSignals.R")
source("/Users/ajinkyaingale/src-cran/src/technicalindicators/TickerRsiSignals.R")
source("/Users/ajinkyaingale/srs-cran/src/technicalindicators/TickerSmaSignals.R")
source("/Users/ajinkyaingale/srs-cran/src/utils/IDOperator.R")
source("/Users/ajinkyaingale/srs-cran/src/persistence/CsvPersistence.R")
source("/Users/ajinkyaingale/srs-cran/src/etl/Load.R")
source("/Users/ajinkyaingale/srs-cran/src/models/randomforest/RandomForestModelSignals.R")
source("/Users/ajinkyaingale/srs-cran/src/run/testmodels/PredictTickerSignals.R")
now2 <- Sys.time()
print(difftime(now2,now1,unit="sec"))
|
1aeab333fcb591bbd9328d4c8392970dff623e12
|
0c0cba7db4fb25c26dbd5b61a0857772e3984cf3
|
/gengraphs.R
|
4b8cea346ea70cdca3de64f453176531327fb1dc
|
[
"CC-BY-4.0"
] |
permissive
|
b4winckler/lorenz-empty
|
b92fec4d6cac399daf7217ebeac6c6386b2eb72f
|
c77a4621aef59077c5d0680f73f6bc84bc4a16ef
|
refs/heads/master
| 2021-01-22T23:26:42.393137
| 2014-06-01T17:47:43
| 2014-06-01T17:47:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,599
|
r
|
gengraphs.R
|
library(ggplot2)
# Path to read input from and write output to
path = function(fname) file.path('data', fname)
df = read.table(path('empty.txt'), header=TRUE)
hyp = function(a,b,c,d) log((c-a)*(d-b)/(b-a)/(d-c))
pdf.a4r = function(file, ...)
pdf(file, paper='a4r', width=16.54, height=11.69, ...)
pdf.a4r(path('hyperbolic.pdf'))
print(qplot(c, hyp(vconj, p, q, u), data=df, geom='line',
main="Hyperbolic length of return interval inside critical values"))
dev.off()
pdf.a4r(path('relative_length_L_R.pdf'))
dg = data.frame(c=rep(df$c, 2),
ratio=c((df$q-df$c)/df$u, (df$c-df$p)/(1-df$vconj)),
side=rep(c('R/u', 'L/v'), each=nrow(df)))
print(qplot(c, ratio, data=dg, geom='line', col=side,
main="Relative length of R in [0,u] and L in [vconj,1]"))
dg$ratio = c( (df$c-df$p)/(1-df$c), (df$q-df$c)/df$c )
dg$side = rep(c('L/[c,1]', 'R/[0,c]'), each=nrow(df))
print(qplot(c, ratio, data=dg, geom='line', col=side, log="y",
main="Relative length of L in [c,1] and R in [0,c]"))
dev.off()
pdf.a4r(path('distortion.pdf'))
dg = data.frame(c=rep(df$c, 2),
distortion=c(df$dist_left, df$dist_right),
branch=rep(c('left','right'), each=nrow(df)))
print(qplot(c, distortion, data=dg, geom='line', col=branch,
main="Distortion of left and right branch of first-return map"))
dev.off()
pdf.a4r(path('crenorm.pdf'))
print(qplot(c, (c-p)/(q-p), data=df, geom='line',
main="Critical value of the renormalization") +
geom_abline(intercept=0, slope=1, col='gray'))
dev.off()
|
6c61da620f33c0690e31aa1031b34a36afd51f3e
|
d187ba2ef23e622dd8ed63902e42233e5ebb352f
|
/after_CP.r
|
59a3ab8628c0b61ca9d99ca057e4cc3182409486
|
[
"MIT"
] |
permissive
|
ErasmusMC-Bioinformatics/KREAP
|
5ef2fe6813b0240029a42e7c7bebcb76754e2c57
|
c29c895af164359bd67fd5ad8bd4a8c6b3d7a7db
|
refs/heads/master
| 2021-03-27T16:30:29.956812
| 2018-05-26T07:43:59
| 2018-05-26T07:43:59
| 93,743,830
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,689
|
r
|
after_CP.r
|
library(ggplot2)
library(reshape2)
args <- commandArgs(trailingOnly = TRUE)
inputfile = "D:/wd/wur/WellG07.cpout"
inputfile = args[1]
plot_file_names = unlist(strsplit(args[2], ","))
outputdir = args[3]
setwd(outputdir)
inputdata = read.table(inputfile, sep="\t", header=TRUE, fill=T, comment.char="")
cpout = inputdata[inputdata$ImageNumber == 1,]
# ---------------------- find biggest gap in first image ----------------------
y_freq = data.frame(table(cpout$AreaShape_Center_Y))
names(y_freq) = c("y", "freq")
y_freq$y = as.numeric(as.character(y_freq$y))
y_freq$freq = as.numeric(y_freq$freq)
biggest_gap_size = 0
biggest_gap_start = 0
biggest_gap_start_index = 0
biggest_gap_end = 0
biggest_gap_end_index = 0
#print(y_freq)
for(i in 1:(nrow(y_freq)-1)){
gap_size = y_freq[i+1,]$y - y_freq[i,"y"]
if(gap_size > biggest_gap_size){
biggest_gap_size = gap_size
biggest_gap_start = y_freq[i,"y"]
biggest_gap_start_index = i
biggest_gap_end_index = i+1
biggest_gap_end = y_freq[i,"y"]
}
}
#search up/down through the image for smaller gaps
while((y_freq[biggest_gap_start_index,"y"] - y_freq[biggest_gap_start_index-1, "y"]) > 1){
biggest_gap_start_index = biggest_gap_start_index - 1
}
biggest_gap_start = y_freq[biggest_gap_start_index,"y"]
while(y_freq[biggest_gap_end_index + 1,"y"] - y_freq[biggest_gap_end_index,"y"] > 1){
biggest_gap_end_index = biggest_gap_end_index + 1
}
biggest_gap_end = y_freq[biggest_gap_end_index,"y"]
# ---------- for every image, find how many are inside the gap, plot them ----------
image_numbers = unique(inputdata$ImageNumber)
number_of_images = length(image_numbers)
result = data.frame(image=1:number_of_images, inside=1:number_of_images, outside=1:number_of_images, total=1:number_of_images)
for(i in image_numbers){
name = paste("Image_", i, ".cpout", sep="")
cpout = inputdata[inputdata$ImageNumber == i,]
inside_rows = cpout$AreaShape_Center_Y >= biggest_gap_start & cpout$AreaShape_Center_Y <= biggest_gap_end
inside = sum(inside_rows)
result[i,"inside"] = inside
total = nrow(cpout)
result[i,"total"] = total
outside = total - inside
result[i,"outside"] = outside
cpout$col = factor(ifelse(cpout$AreaShape_Center_Y >= biggest_gap_start & cpout$AreaShape_Center_Y <= biggest_gap_end, paste("inside -", inside), paste("outside -", outside)))
p = ggplot(cpout, aes(AreaShape_Center_X, AreaShape_Center_Y))
p = p + geom_point(aes(colour = col)) #+ scale_colour_manual(values=c("red", "blue"))
p = p + geom_rect(xmin = 0, xmax = Inf, ymin = biggest_gap_start, ymax = biggest_gap_end, fill = "red", alpha = 0.0002)
p = p + ggtitle(paste("Nuclei_", plot_file_names[i], " - " , total, sep=""))
png(paste("Nuclei_", plot_file_names[i], ".png", sep=""))
print(p)
dev.off()
cpout$col = gsub(" - .*", "", cpout$col)
write.table(cpout[,c("AreaShape_Center_X", "AreaShape_Center_Y", "col")], paste("Nuclei_", plot_file_names[i], ".txt", sep=""), sep="\t", row.names=F, col.names=T, quote=F)
}
test = melt(result, id.vars=c("image"))
png("bar.png")
ggplot(test, aes(x=image, y = value, fill=variable, colour=variable)) + geom_bar(stat='identity', position='dodge' )
dev.off()
png("line.png")
ggplot(test, aes(x=image, y = value, fill=variable, colour=variable)) + geom_line()
dev.off()
write.table(result, "numbers.txt", sep="\t", row.names=F, col.names=T, quote=F)
write.table(result, "numbers_no_header.txt", sep="\t", row.names=F, col.names=F, quote=F)
growth = data.frame(image=result$image[2:nrow(result)], inside_growth=diff(result$inside), outside_growth=diff(result$outside), total_growth=diff(result$total))
write.table(growth, "growth.txt", sep="\t", row.names=F, col.names=T, quote=F)
summ <- do.call(data.frame,
list(mean = apply(growth, 2, mean),
sd = apply(growth, 2, sd),
median = apply(growth, 2, median),
min = apply(growth, 2, min),
max = apply(growth, 2, max),
n = apply(growth, 2, length)))
summ_numeric_colls = sapply(summ, is.numeric)
summ[,summ_numeric_colls] = round(summ[,summ_numeric_colls], 2)
write.table(summ, "summary.txt", sep="\t", row.names=T, col.names=NA, quote=F)
# ---------- for follow up analysis ----------
result=result[,c("total", "inside")]
result$perc = round((result$inside / result$total) * 100, 2)
write.table(result, "in_out_perc.txt", sep="\t", row.names=F, col.names=F)
# ---------- csv for d3.js? ----------
write.table(inputdata[,c("ImageNumber", "ObjectNumber", "AreaShape_Center_X", "AreaShape_Center_Y")], file="objects.csv", quote=F, sep=",", row.names=F, col.names=T)
|
a022900fb34fe5ed7f1aef23230fdeebdf78bf47
|
29d34e3302b71d41d77af715727e963aea119392
|
/man/d.ICA.Rd
|
7ac15f3fa7ba5917ad51592aa4611613ff98d096
|
[] |
no_license
|
bakaibaiazbekov/rtemis
|
1f5721990d31ec5000b38354cb7768bd625e185f
|
a0c47e5f7fed297af5ad20ae821274b328696e5e
|
refs/heads/master
| 2020-05-14T20:21:40.137680
| 2019-04-17T15:42:33
| 2019-04-17T15:42:33
| 181,943,092
| 1
| 0
| null | 2019-04-17T18:00:09
| 2019-04-17T18:00:09
| null |
UTF-8
|
R
| false
| true
| 1,978
|
rd
|
d.ICA.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/d.ICA.R
\name{d.ICA}
\alias{d.ICA}
\title{Independent Component Analysis}
\usage{
d.ICA(x, k = 3, package = c("fastICA", "ica"), alg.type = "parallel",
maxit = 100, scale = TRUE, center = TRUE, verbose = TRUE,
trace = 0, ...)
}
\arguments{
\item{x}{Input data}
\item{k}{Integer vector of length 1 or greater. Rank of decomposition}
\item{package}{String: Which package to use for ICA. "fastICA" will use \code{fastICA::fastICA},
"ica" will use \code{ica::fastica}. Default = "fastICA".
Note: only \code{fastICA} works with \code{k = 1}}
\item{alg.type}{String: For \code{package = "fastICA"}, "parallel" or "deflation". Default = "parallel"}
\item{maxit}{Integer: Maximum N of iterations}
\item{scale}{Logical: If TRUE, scale input data before decomposition. Default = TRUE}
\item{center}{Logical: If TRUE, also center input data if \code{scale} is \code{TRUE}.
Default = TRUE}
\item{verbose}{Logical: If TRUE, print messages to screen}
\item{...}{Additional parameters to be passed to \code{fastICA::fastICA} or \code{ica::icafast}}
}
\value{
\link{rtDecom} object
}
\description{
Calculates ICA decomposition and projections using the fastICA algorithm in \code{fastICA::fastICA}
}
\details{
Project scaled variables to ICA components.
Input must be n by p,
where n represents number of cases,
and p represents number of features.
fastICA will be applied to the transpose of the n x p matrix.
fastICA will fail if there are any NA values or constant features: remove them using \link{preprocess}
}
\seealso{
Other Decomposition: \code{\link{d.CUR}},
\code{\link{d.H2OAE}}, \code{\link{d.H2OGLRM}},
\code{\link{d.ISOMAP}}, \code{\link{d.KPCA}},
\code{\link{d.LLE}}, \code{\link{d.MDS}},
\code{\link{d.NMF}}, \code{\link{d.PCA}},
\code{\link{d.SPCA}}, \code{\link{d.SVD}},
\code{\link{d.TSNE}}, \code{\link{d.UMAP}}
}
\author{
Efstathios D. Gennatas
}
\concept{Decomposition}
|
8c7b35019244d828b988c37aeeda8604e450deae
|
5b5a18142a86e49a7deb2c349b484dadc335920a
|
/man/img_typicality.Rd
|
7de2f93f82cf035730b929c288ac5be53cdb3c12
|
[] |
no_license
|
stm/imagefluency
|
9067b79f3ad3d6c3e5c683761f89ef2e202cf0ee
|
d9e6d1e9bea92a20bd464ca3d1b71942cb7cc79e
|
refs/heads/master
| 2023-04-18T05:55:46.240210
| 2022-09-29T17:33:07
| 2022-09-29T17:33:07
| 78,774,174
| 4
| 1
| null | 2021-11-03T15:00:19
| 2017-01-12T18:23:32
|
R
|
UTF-8
|
R
| false
| true
| 2,860
|
rd
|
img_typicality.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/typicality.R
\name{img_typicality}
\alias{img_typicality}
\title{Typicality of images relative to each other}
\usage{
img_typicality(imglist, rescale = NULL)
}
\arguments{
\item{imglist}{A \emph{list} of arrays or matrices with numeric values. Use
e.g. \code{\link{img_read}()} to read image files into \code{R} (see
example).}
\item{rescale}{numeric. Rescales the images prior to computing the typicality
scores (per default no rescaling is performed). Rescaling is performed by
\code{OpenImageR}'s \code{\link[OpenImageR]{resizeImage}} function
(bilinear rescaling)}
}
\value{
a named matrix of numeric values (typicality scores)
}
\description{
\code{img_typicality} returns the visual typicality of a list of images
relative to each other. Higher values indicate larger typicality.
}
\details{
The function returns the visual typicality of a \emph{list} of image
arrays or matrices \code{imglist} relative to each other. Values can range
between -1 (inversely typical) over 0 (not typical) to 1 (perfectly typical).
That is, higher absolute values indicate a larger typicality.
The typicality score is computed as the correlation of a particular image
with the average representation of all images, i.e. the mean of all images.
For color images, the weighted average between each color channel's values
is computed. If the images have different dimensions they are automatically
resized to the smallest height and width.
Rescaling of the images prior to computing the typicality scores can be
specified with the optional rescaling parameter (must be a numeric value).
Most users won't need any rescaling and can use the default (\code{rescale
= NULL}). See Mayer & Landwehr (2018) for more details.
}
\examples{
# Example images depicting valleys: valley_green, valley_white
# Example image depicting fireworks: fireworks
valley_green <- img_read(
system.file("example_images", "valley_green.jpg", package = "imagefluency")
)
valley_white <- img_read(
system.file("example_images", "valley_white.jpg", package = "imagefluency")
)
fireworks <- img_read(
system.file("example_images", "fireworks.jpg", package = "imagefluency")
)
#
# display images
grid::grid.raster(valley_green)
grid::grid.raster(valley_white)
grid::grid.raster(fireworks)
# create image set as list
imglist <- list(fireworks, valley_green, valley_white)
# get typicality
img_typicality(imglist)
}
\references{
Mayer, S. & Landwehr, J. R. (2018). Objective measures of design
typicality. \emph{Design Studies}, \emph{54}, 146--161.
\doi{10.1016/j.destud.2017.09.004}
}
\seealso{
\code{\link{img_read}}, \code{\link{img_contrast}},
\code{\link{img_complexity}}, \code{\link{img_self_similarity}}
\code{\link{img_simplicity}}, \code{\link{img_symmetry}}
}
|
681b8b68eb333a3f7faf04b5fc57069f9f061a21
|
2e7ff4c210137d0756af58b08ded8f064d99e9ad
|
/01_create-data.R
|
7f27fab8bd09105f444a31db0f1e2d121442c4e8
|
[] |
no_license
|
erikgahner/epsr-bailout
|
92fbf385b113404a7d6cd11e4d2151048b088880
|
f0829edc371f97a2a6fbeb36e74b1e9cf2b6dafd
|
refs/heads/master
| 2020-04-30T04:32:04.952923
| 2019-07-18T21:59:09
| 2019-07-18T21:59:09
| 176,612,934
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,815
|
r
|
01_create-data.R
|
###
##
## Article: Bailout or bust? Government evaluations in the wake of a bailout
##
## European Political Science Review
##
## Erik Gahner Larsen Robert Klemmensen Michael Baggesen Klitgaard
## E.G.Larsen@kent.ac.uk rkl@sam.sdu.dk mbk@sam.sdu.dk
##
##
## Data: European Social Survey: http://www.europeansocialsurvey.org/
## Longitudinal Internet Studies for the Social Sciences: https://lissdata.nl
## Eurobarometer: http://ec.europa.eu/commfrontoffice/publicopinion/
##
###
library("tidyverse")
library("rio")
liss07 <- import("~/Google Drev/data/liss/po/cv08a_1.1p_EN.dta")
liss08 <- import("~/Google Drev/data/liss/po/cv09b_2.1p_EN.dta")
liss.inc <- import("~/Google Drev/data/liss/in/ci08a_1.0p_EN.dta")
liss <- left_join(liss07, liss08, by="nomem_encr")
liss <- left_join(liss, liss.inc, by="nomem_encr")
ess <- import("../data/ESS4NL.dta")
eb04 <- import("~/Google Drev/data/eurobarometer/200804/ZA4744_v5-0-0.dta")
eb10 <- import("~/Google Drev/data/eurobarometer/200810/ZA4819_v3-0-2.dta")
liss <- liss %>%
mutate(
income = case_when(ci08a229 < 8000 ~ 1,
ci08a228 > 8000 & ci08a228 <= 16000 ~ 2,
ci08a228 > 16000 & ci08a228 <= 24000 ~ 3,
ci08a228 > 24000 & ci08a228 <= 36000 ~ 4,
ci08a228 > 36000 & ci08a228 <= 48000 ~ 5,
ci08a228 > 48000 & ci08a228 <= 60000 ~ 6,
ci08a228 > 60000 & ci08a228 <= 200000 ~ 7,
TRUE ~ NA_real_),
# Generate government variable
government = case_when(
cv08a058 == 3 | cv08a058 == 4 | cv08a058 == 9 ~ 1,
cv08a058 != 998 & cv08a058 != 999 ~ 0,
TRUE ~ NA_real_)
) %>%
# Recode missing values (999 in LISS) to NA
mutate_at(vars(cv08a030, cv08a043, cv09b030, cv09b043,
cv08a038, cv09b038, cv08a039, cv09b039,
cv08a044, cv09b044, cv08a039, cv09b039,
cv08a040, cv09b040, cv08a041, cv09b041,
cv08a042, cv09b042, cv08a044, cv09b044,
cv08a045, cv09b045, cv08a046, cv09b046),
function(x) case_when(x == 999 ~ NA_real_, TRUE ~ as.numeric(x))) %>%
drop_na(cv08a030, cv09b030, cv08a043, cv09b043) %>%
mutate(eco = cv09b043 - cv08a043,
gov = cv09b030 - cv08a030)
ess <- ess %>%
# Recode missing
mutate_at(vars(stfgov, stfeco, stflife, stfdem),
function(x) case_when(x > 50 ~ NA_real_, TRUE ~ as.numeric(x))) %>%
mutate(
bailout = case_when(
inwmme == 9 & inwdde < 28 ~ 0,
inwmme == 9 & inwdde > 19 ~ 1,
inwmme == 10 ~ 1,
inwmme == 11 ~ 1,
TRUE ~ NA_real_
),
Bailout = bailout,
income = ifelse(hinctnta < 11, hinctnta, NA),
government = case_when(
prtvtcnl == 66 | prtvtcnl == 77 ~ NA_real_,
prtvtcnl == 1 | prtvtcnl == 2 | prtvtcnl == 8 ~ 1,
TRUE ~ 0),
eduyrs = ifelse(eduyrs == 77 | eduyrs == 88, NA, eduyrs),
lrscale = ifelse(lrscale == 77 | lrscale == 88, NA, lrscale),
hinctnta = ifelse(hinctnta == 77 | hinctnta == 88, NA, hinctnta),
uemp3m = ifelse(uemp3m == 7 | hinctnta == 8, NA, uemp3m),
region_north = case_when(regionnl > 100 & regionnl < 200 ~ 1, TRUE ~ 0),
region_east = case_when(regionnl > 200 & regionnl < 300 ~ 1, TRUE ~ 0),
region_west = case_when(regionnl > 300 & regionnl < 400 ~ 1, TRUE ~ 0),
region_south = case_when(regionnl > 400 & regionnl < 500 ~ 1, TRUE ~ 0)
) %>%
drop_na(bailout)
eb04 <- eb04 %>%
filter(v6 == 3) %>%
mutate(
tr = 0,
eco = case_when(
v90 == 1 ~ 2,
v90 == 2 ~ 0,
v90 == 3 ~ 1,
TRUE ~ NA_real_
),
gov = case_when(
v213 == 2 ~ 0,
v213 == 1 ~ 1,
TRUE ~ NA_real_
)
) %>%
select(tr, eco, gov)
eb10 <- eb10 %>%
filter(v6 == 3) %>%
mutate(
tr = 1,
eco = case_when(
v124 == 1 ~ 2,
v124 == 2 ~ 0,
v124 == 3 ~ 1,
TRUE ~ NA_real_
),
gov = case_when(
v228 == 2 ~ 0,
v228 == 1 ~ 1,
TRUE ~ NA_real_
)
) %>%
select(tr, eco, gov)
eb <- rbind(eb04, eb10)
write_csv(eb, "data_eb.csv")
ess %>%
select(stfeco, stfgov, stflife, stfdem, bailout, Bailout, income, government, gndr, agea, eduyrs, hinctnta, partner, uemp3m, polintr, lrscale, region_north, region_east, region_west, region_south) %>%
write_csv(., "data_ess.csv")
liss %>%
select(nomem_encr,
cv08a043, cv09b043, cv08a030, cv09b030,
income, government, eco, gov,
cv09b038, cv08a038, cv09b039, cv08a039, cv09b040, cv08a040, cv09b041, cv08a041,
cv09b042, cv08a042, cv09b044, cv08a044, cv09b045, cv08a045, cv09b046, cv08a046) %>%
write_csv(., "data_liss.csv")
|
9d66413d2d04cd40340aab180e06d024d6aeaee5
|
7f4bbb0dd5be6a704b618cf0fbf0c80fff699235
|
/man/vieillissement_ppe.Rd
|
fbe0992e1f33142a0ddf34d4ac08c779f2628ea4
|
[] |
no_license
|
DTichit/ALModel
|
67c7b041a84dd9fdaec3c084d3cbd1f322b5a827
|
820faab78ff29d404b522b737170393930132c77
|
refs/heads/master
| 2023-03-08T09:40:48.628155
| 2021-02-10T18:24:52
| 2021-02-10T18:24:52
| 337,813,431
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 444
|
rd
|
vieillissement_ppe.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PPE-vieilissement.R
\docType{methods}
\name{vieillissement_ppe}
\alias{vieillissement_ppe}
\title{Fonction \code{vieillissement_ppe}.}
\usage{
vieillissement_ppe(ppe)
}
\arguments{
\item{ppe}{est un objet de type \code{\link{PPE}}.}
}
\description{
Cette fonction permet de vieillir d'une annee l'objet \code{\link{PPE}}
}
\author{
Damien Tichit pour Sia Partners
}
|
69fc80c914dbc8f295268ff94b6af3afa18f8a8b
|
5517dab1f9d77cda25ce5fd1fa4e2b86baa6373b
|
/code/ch07/nb03_deviance.R
|
a543f709dc10ab562eb7ca471cfef228ddb5c0dd
|
[] |
no_license
|
somnath1077/statistical_rethinking
|
e1df98ba631fd7ee64b8c3ec6912c826bb95a3ff
|
22fd65b629928fdf138dcaa938d6d283750f7e65
|
refs/heads/master
| 2023-06-05T19:50:47.842485
| 2021-06-30T09:32:09
| 2021-06-30T09:32:09
| 164,726,433
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,055
|
r
|
nb03_deviance.R
|
library(rethinking)
# Create the data
sppnames <- c("afarensis",
"africanus",
"habilis",
"boisei",
"rudolfensis",
"ergaster",
"sapiens")
brainvolcc <- c(438 , 452 , 612, 521, 752, 871, 1350)
masskg <- c(37.0 , 35.5 , 34.5 , 41.5 , 55.5 , 61.0 , 53.5)
d <- data.frame(species = sppnames ,
brain = brainvolcc ,
mass = masskg)
# Normalize
d$mass_std <- (d$mass - mean(d$mass)) / sd(d$mass)
d$brain_std <- d$brain / max(d$brain)
# utility functions
R2_is_bad <- function(quap_fit) {
s <- sim(quap_fit , refresh = 0)
r <- apply(s, 2, mean) - d$brain_std
1 - var2(r) / var2(d$brain_std)
}
# Models
m7.1 <- quap(
alist(
brain_std ~ dnorm(mu , exp(log_sigma)),
mu <- a + b * mass_std,
a ~ dnorm(0.5 , 1),
b ~ dnorm(0 , 10),
log_sigma ~ dnorm(0 , 1)
),
data = d
)
m7.2 <- quap(
alist(
brain_std ~ dnorm(mu , exp(log_sigma)),
mu <- a + b[1] * mass_std + b[2] * mass_std ^ 2,
a ~ dnorm(0.5 , 1),
b ~ dnorm(0 , 10),
log_sigma ~ dnorm(0 , 1)
),
data = d ,
start = list(b = rep(0, 2))
)
m7.3 <- quap(
alist(
brain_std ~ dnorm( mu , exp(log_sigma) ),
mu <- a + b[1]*mass_std + b[2]*mass_std^2 +
b[3]*mass_std^3,
a ~ dnorm( 0.5 , 1 ),
b ~ dnorm( 0 , 10 ),
log_sigma ~ dnorm( 0 , 1 )
), data=d , start=list(b=rep(0,3)) )
m7.4 <- quap(
alist(
brain_std ~ dnorm( mu , exp(log_sigma) ),
mu <- a + b[1]*mass_std + b[2]*mass_std^2 +
b[3]*mass_std^3 + b[4]*mass_std^4,
a ~ dnorm( 0.5 , 1 ),
b ~ dnorm( 0 , 10 ),
log_sigma ~ dnorm( 0 , 1 )
), data=d , start=list(b=rep(0,4)) )
m7.5 <- quap(
alist(
brain_std ~ dnorm( mu , exp(log_sigma) ),
mu <- a + b[1]*mass_std + b[2]*mass_std^2 +
b[3]*mass_std^3 + b[4]*mass_std^4 +
b[5]*mass_std^5,
a ~ dnorm( 0.5 , 1 ),
b ~ dnorm( 0 , 10 ),
log_sigma ~ dnorm( 0 , 1 )
), data=d , start=list(b=rep(0,5)) )
m7.6 <- quap(
alist(
brain_std ~ dnorm( mu , 0.001 ),
mu <- a + b[1]*mass_std + b[2]*mass_std^2 +
b[3]*mass_std^3 + b[4]*mass_std^4 +
b[5]*mass_std^5 + b[6]*mass_std^6,
a ~ dnorm( 0.5 , 1 ),
b ~ dnorm( 0 , 10 )
), data=d , start=list(b=rep(0,6)) )
# LPPD calculations
set.seed(1)
lppd( m7.1 , n=1e4 )
set.seed(1)
sapply( list(m7.1,m7.2,m7.3,m7.4,m7.5,m7.6) , function(m) {sum(lppd(m))} )
# Simulations
N <- 20
kseq <- 1:5
dev <- sapply( kseq , function(k) {
print(k);
r <- mcreplicate( 100 , sim_train_test( N=N, k=k ) , mc.cores=6 )
c( mean(r[1,]) , mean(r[2,]) , sd(r[1,]) , sd(r[2,]) )
} )
plot( 1:5 , dev[1,] , ylim=c( min(dev[1:2,])-5 , max(dev[1:2,])+10 ) ,
xlim=c(1,5.1) , xlab="number of parameters" , ylab="deviance" ,
pch=16 , col=rangi2 )
mtext( concat( "N = ",N ) )
points( (1:5)+0.1 , dev[2,] )
for ( i in kseq ) {
pts_in <- dev[1,i] + c(-1,+1)*dev[3,i]
pts_out <- dev[2,i] + c(-1,+1)*dev[4,i]
lines( c(i,i) , pts_in , col=rangi2 )
lines( c(i,i)+0.1 , pts_out )
}
|
540179dd860cf2fdd5bdb451e3bb1cccec70b32b
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.iot/man/describe_thing_group.Rd
|
5c377a974200913b1faec0552cdc8e81b86b4a09
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 454
|
rd
|
describe_thing_group.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.iot_operations.R
\name{describe_thing_group}
\alias{describe_thing_group}
\title{Describe a thing group}
\usage{
describe_thing_group(thingGroupName)
}
\arguments{
\item{thingGroupName}{[required] The name of the thing group.}
}
\description{
Describe a thing group.
}
\section{Accepted Parameters}{
\preformatted{describe_thing_group(
thingGroupName = "string"
)
}
}
|
ace59701183b458f1911a114299b7e5bb63bad2a
|
15955f0f66730020cbacffabd4982ceeec24fd93
|
/charge_sero_variance.R
|
02694a6c100bcf6a3904c1df15073161cf26f354
|
[
"BSD-3-Clause"
] |
permissive
|
johnlees/meningene
|
21fd005cf2484bf9e0213150bd53c616d1d77eea
|
e8d73c39eb54ad97271d9edf814c0e61737757ef
|
refs/heads/master
| 2021-03-22T02:12:59.837985
| 2019-02-15T21:49:28
| 2019-02-15T21:49:28
| 110,558,650
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,001
|
r
|
charge_sero_variance.R
|
require(fmsb)
require(glmnet)
setwd("~/Documents/PhD/dutch_carriage/charge/")
sero_charge <- read.delim("~/Documents/PhD/dutch_carriage/charge/sero_charge.covar", header=FALSE, stringsAsFactors=FALSE)
lane_serotypes <- read.delim("~/Documents/PhD/dutch_carriage/charge/lane_serotypes.txt", header=FALSE, stringsAsFactors=FALSE)
for_R_invasive <- read.delim("~/Documents/PhD/dutch_carriage/charge/for_R_invasive.pheno", header=FALSE, stringsAsFactors=FALSE)
charge_data <- merge(sero_charge, for_R_invasive, by.x = "V1", by.y="V1")
charge_data <- merge(charge_data, lane_serotypes, by.x = "V1", by.y="V1", all.x = T, all.y = T)
rownames(charge_data) <- charge_data$V1
charge_data <- charge_data[,c(3,6,4)]
colnames(charge_data) <- c("charge", "serotype", "invasive")
charge_glm <- glm(factor(invasive) ~ charge, data = charge_data, family = binomial())
summary(charge_glm)
#charge 0.12392 0.01377 9.001 <2e-16 ***
NagelkerkeR2(charge_glm)
#$N
#[1] 1893
#
#$R2
#[1] 0.06184394
charge_glm <- glm(factor(invasive) ~ charge, data = charge_data[charge_data$charge!=-12.710,], family = binomial())
NagelkerkeR2(charge_glm)
#$N
#[1] 1458
#
#$R2
#[1] 0.07964819
lasso_X <- as.matrix(model.matrix(factor(invasive) ~ serotype, charge_data)[,-1])
lasso_y = charge_data[!is.na(charge_data$serotype),"invasive"]
lasso.sero <- glmnet(lasso_X, lasso_y, alpha = 1,
family = 'binomial')
plot(lasso.sero,xvar="lambda",label=T)
cv.lasso.sero <- cv.glmnet(lasso_X, lasso_y, family = 'binomial',
alpha = 1, nfolds = length(lasso_y))
plot(cv.lasso.sero)
coef(cv.lasso.sero, s="lambda.1se")
# use selected predictors in regression
selected <- lasso_X[,which(coef(cv.lasso.sero, s="lambda.1se")[-1] != 0)]
glm.sero <- glm(lasso_y ~ selected, family = binomial())
summary(glm.sero)
NagelkerkeR2(glm.sero)
#$N
#[1] 1735
#
#$R2
#[1] 0.4493011
# without selection
#NagelkerkeR2(glm(lasso_y ~ lasso_X, family = binomial()))
#$N
#[1] 1735
#
#$R2
#[1] 0.459293
|
bab2089010d90c186f95fd2d562e32fda5a5ee52
|
d86217683cbc6a08aeb60971193c46efe4b89b00
|
/cachematrix.R
|
8f3c20d1fa62e40bc096db5e764d0bc2c499bf25
|
[] |
no_license
|
bezoarboy/ProgrammingAssignment2
|
39b5d76a628cc4f76d936b3c3466b6e219cbec4a
|
f7a1c7cc5e6320e9b635e256e583e72e39b7383f
|
refs/heads/master
| 2020-12-11T05:52:28.858552
| 2014-05-26T01:13:00
| 2014-05-26T01:13:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,524
|
r
|
cachematrix.R
|
##################################################
##
## makeCacheMatrix() and cacheSolve()
## - pair of functions that cache the inverse of a matrix
## - v1.0, 2014-05-17
##
## <<- operator assigns a value to an object in an environment different from the current environment
## - can be thought of as a global variable
##
##########
## makeCacheMatrix
## - creates a special "matrix" object that can cache its inverse
## - consists of a list containing functions to
## - set the value of the matrix
## - get the value of the matrix
## - set the value of the inverse
## - get the value of the inverse
##
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
##########
## cacheSolve
## - computes the inverse of the special "matrix" returned by makeCacheMatrix and cache it
## - first checks to see if the inverse has already been calculated
## - if inverse already calculated (and matrix unchanged), then retrieve the inverse from the cache
## - otherwise, it calculates the inverse of the data and caches it via the setInverse function
##
cacheSolve <- function(x, ...) {
i <- x$getInverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i
}
|
450fcc7268b025739342e65f58d84114f29cafbb
|
80d328e2bbfbf3aea2b55a02ca962a6cc6fb26c8
|
/war and life expectancy/war_and_life_expectancy_graph.R
|
c819df12bc7463d0dbbdce390faab451ca2aa1f1
|
[] |
no_license
|
AjeetParmar1/gapminder-parsing
|
c11ce09138aaaeb0ee77e69aca0dc78fc65a26d1
|
35236414f05022911a7459027cdb23e9f7aa0563
|
refs/heads/master
| 2022-11-17T14:42:24.082984
| 2020-07-14T17:50:18
| 2020-07-14T17:50:18
| 277,646,868
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 348
|
r
|
war_and_life_expectancy_graph.R
|
library(dslabs)
library(tidyverse)
library(gtools)
data("gapminder")
gapminder %>% filter(country %in% c("China", "Vietnam", "United States",)) %>%
ggplot(aes(x = year, y = life_expectancy, color = country)) + geom_line() +
ggtitle("Life Expectancy of China, Vietnam, and the US from 1960-2010") +
xlab("year") + ylab("life expectancy")
|
8c35637a7be5035320c202c2c165f51a47b2d34e
|
c78014e5b3a00181ceefd491eb566cd04e7c4c4a
|
/R/convert_doc_to_tex.R
|
8118bc1bd68fdd594b93b398a75e219d517390d7
|
[] |
no_license
|
wfmackey/word2grattex
|
923a377c8978ee4421f1bffd67bbb8e8b5dbf5a8
|
b4b10cccb9a03d86a66ad60bff88928140eb0fc6
|
refs/heads/master
| 2021-06-16T19:32:50.213314
| 2021-02-19T01:37:46
| 2021-02-19T01:37:46
| 162,081,270
| 2
| 0
| null | 2019-03-29T02:27:44
| 2018-12-17T05:53:14
|
R
|
UTF-8
|
R
| false
| false
| 1,034
|
r
|
convert_doc_to_tex.R
|
#' Use pandoc to convert a Word document to a .tex file.
#'
#' @param file.docx The path to the .docx file to be converted.
#' @param tex_file The desired path of the exported .tex file
#'
#' @importFrom readr read_lines
#'
#' @export
#'
convert_doc_to_tex <- function(file.docx,
tex_file) {
# Check for pandoc
if (!nzchar(Sys.which("pandoc"))) {
stop("pandoc not found on the system path. See https://pandoc.org/installing.html to download it.")
}
# Convert using pandoc via bash
message(paste0("Converting .docx to .tex using Pandoc"))
if (tolower(.Platform$OS.type) == "windows") {
shell(sprintf("pandoc --wrap=none --top-level=chapter -s %s -o %s", file.docx, tex_file))
} else {
system(sprintf("pandoc --wrap=none --top-level=chapter -s %s -o %s", file.docx, tex_file))
}
# Read lines from pandoc output
message(paste0("Reading .tex lines for processing"))
out_tex_lines <- readr::read_lines(tex_file)
return(out_tex_lines)
}
|
8eab4d22d28b3b1ad028d590a20f005b8ae1506b
|
6bdded425c43483585ee21a7704db5e34f5717e5
|
/man/plot.mfso.Rd
|
d4fb1eed48ab045c81a6d2c173d3496df5f91ffb
|
[] |
no_license
|
cran/fso
|
d709b5922bf67bdf41cfb21d94f1182ff876f5bd
|
cabc7e93c4ce645ff77b976b337a48610ec089ac
|
refs/heads/master
| 2022-10-03T09:59:10.244422
| 2022-09-26T14:30:02
| 2022-09-26T14:30:02
| 17,696,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,148
|
rd
|
plot.mfso.Rd
|
\name{plot.mfso}
\alias{plot.mfso}
\alias{points.mfso}
\alias{plotid.mfso}
\alias{hilight.mfso}
\alias{chullord.mfso}
\alias{boxplot.mfso}
\alias{thull.mfso}
\title{Plotting Routines for Multidimensional Fuzzy Set Ordinations}
\description{A set of routines for plotting, identifying, or
highlighting points in a multidimensional fuzzy set ordination (MFSO).}
\usage{
\method{plot}{mfso}(x, dis=NULL, pch=1, ax=NULL, ay=NULL, \dots)
\method{points}{mfso}(x, overlay, col=2, pch=1, ...)
\method{plotid}{mfso}(ord, dis=NULL, labels=NULL, \dots)
\method{hilight}{mfso}(ord, overlay, cols = c(2, 3, 4, 5,
6, 7), symbol = c(1, 3, 5), \dots)
\method{chullord}{mfso}(ord, overlay, cols = c(2, 3, 4, 5,
6, 7), ltys = c(1, 2, 3), \dots)
\method{boxplot}{mfso}(x, \dots)
\method{thull}{mfso}(ord,var,grain,ax=1,ay=2,col=2,grid=50,
nlevels=5,levels=NULL,lty=1,numitr=100,...)
}
\arguments{
\item{x}{an object of class \sQuote{mfso}}
\item{ax}{X axis number}
\item{ay}{Y axis number}
\item{ord}{an object of class \sQuote{mfso}}
\item{mfso}{an object of class \sQuote{mfso}}
\item{dis}{an object of class \sQuote{dist} from \code{dist},
\code{\link{dsvdis}}, or
\sQuote{vegdist}}
\item{overlay}{a logical vector of the same length as the number of
points in the plot}
\item{labels}{a vector of labels to print next to the identified points}
\item{symbol}{an integer or vector of integers to control which symbols
are printed in which order on the plot by specifying values to
\code{pch}}
\item{ltys}{an integer or vector of integers to control the line styles
of convex hull polygons}
\item{pch}{the symbol to plot}
\item{col}{the color to use for plotted symbols}
\item{cols}{an integer vector for color order}
\item{var}{a variable to fit with a tensioned hull}
\item{grain}{the size of the moving window used to calculate the
tensioned hull}
\item{grid}{the number of cells in the image version of the tensioned hull}
\item{nlevels}{the number of contour levels to plot the tensioned hull}
\item{levels}{a logical variable to control plotting the contours on the
tensioned hull}
\item{lty}{the line type to use in drawing the contours}
\item{numitr}{the number of random iterations to use to compute the
probability of obtaining as small a tensioned hull as observed}
\item{\dots}{arguments to pass to function points}
}
\details{
Multidimensional fuzzy set ordinations (MFSO) are almost inherently graphical,
and routines to facilitate plotting and overlaying are essential to work
effectively with them.
A multidimensional fuzzy set ordination object (an object of class
\sQuote{mfso}) generally contains at least two axes, and may contain many more.
By default, the \code{plot} routine plots all possible axis pairs in order.
If \sQuote{ax} and \sQuote{ay} are specified only a single plot is produced
with X axis ax and Y axis ay. If
\sQuote{dist} object is passed with the \sQuote{dis=} argument, the final panel
is a plot of the dissimilarity or distance matrix values on the X axis and the
pair-wise ordination distances on the Y axis with the correlation coefficient in
the upper left hand corner.
The \sQuote{points} function can be used to highlight or identify specific
points in the plot. The \sQuote{points} function requires a logical vector
(TRUE/FALSE) of the same length as the number of points in the plot. The
default behavior is to color the points with a respective TRUE value red. It is
possible to control the color (with col=), size (with cex=) and symbol (with
pch=) of the points.
The \sQuote{plotid} function can be used to label or identify specific points
with the mouse. Clicking the left mouse button adjacent to a point causes
the point to be labeled,
offset in the direction of the click relative to the point. Clicking the right
mouse button exits the routine. The default
(labels=NULL) is to label points with the row number in the data.frame (or
position in the vector) for the point. Alternatively, specifying a vector of
labels (labels=) prints the respective labels. If the data were derived from a
data.frame, the row.names of the data.frame are often a good choice, but the
labels can also be used with a factor vector to identify the distribution of
values of a factor in the ordination (but see hilight as well).
The \sQuote{hilight} function identifies the factor values of points in the
ordination, using color and symbols to identify unique values (up to 18 values by
default). The colors and symbols used can be specified by the \sQuote{col=} and
\sQuote{symbol=} arguments, which should both be integers or integer vectors.
The default of colors 2, 3, 4, 5, 6, 7 and symbols 1, 3, 5 shows well in most
cases, but on colored backgrounds you may need to adjust \sQuote{col=}. If you
have a factor with more than 18 classes you will need to augment the
\sQuote{symbol=} vector with more values.
The \sQuote{chullord} function plots convex hulls around all points sharing the
same value for a factor variable, and colors all points of that value to match.
The convention on colors follows
\code{hilight}.
The \sQuote{boxplot} function plots boxplots of the \eqn{\mu} membership values
in the MFSO.
The \sQuote{thull} funntion drapes a tensioned hull for variable \sQuote{var}
over the plotted mfso.
}
\value{none}
\note{The plotting and highlighting routines for mfso are designed to match the
same routines for other ordinations in package \code{labdsv}.
}
\author{
David W. Roberts
\email{droberts@montana.edu}
}
\examples{
require(labdsv) # to obtain access to data sets and dissimilarity function
data(bryceveg) # vegetation data
data(brycesite) # environmental data
dis.bc <- dsvdis(bryceveg,'bray/curtis') # produce \sQuote{dist} object
demo.mfso <- mfso(~elev+slope+av,dis.bc,data=brycesite)
plot(demo.mfso)
\dontrun{hilight(demo.mfso,brycesite$quad) # requires interaction}
}
\keyword{hplot}
\keyword{aplot}
\keyword{iplot}
|
b66cda36bddd79b347e8612df75ae1ff6e914693
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/FastHCS/examples/Tablets.Rd.R
|
f66c1a7812d2af9b1751178b346275a87de51a06
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 463
|
r
|
Tablets.Rd.R
|
library(FastHCS)
### Name: Tablets
### Title: Near-infrared (NIR) spectroscopy of a sample of 310 tablets.
### Aliases: Tablets
### Keywords: datasets
### ** Examples
data(Tablets)
alpha<-0.5
Q<-15
p<-ncol(Tablets[,-1])
ns<-FHCSnumStarts(q=Q,eps=(1-alpha)*4/5)
RunExample<-FALSE
if(RunExample){
Fit<-FastHCS(x=Tablets[,-1],q=Q,nSamp=ns,seed=1,alpha=0.5)
colvec<-rep("orange",nrow(Tablets))
colvec[Tablets[,1]==1]<-"blue"
plot(Fit,col=colvec,pch=16)
}
|
2d98350e345bea04a2ad73befdf064d3dfa04ac4
|
88147e2bddc2add4f51b507dbf1eed86de849495
|
/vinetests/discretebvnvine-test.r
|
31899f4b2eecc76e7ddd456a94a1ec01694d8d0f
|
[] |
no_license
|
hoanguc3m/CopulaModel
|
39906379ed88d56f579851d45f157733c42bf926
|
1522b9a6476c5550736f652f902c61b3ac5e8fd3
|
refs/heads/master
| 2020-03-27T14:29:49.345584
| 2019-11-03T22:18:14
| 2019-11-03T22:18:14
| 146,665,628
| 2
| 0
| null | 2018-08-29T22:25:56
| 2018-08-29T22:25:56
| null |
UTF-8
|
R
| false
| false
| 2,709
|
r
|
discretebvnvine-test.r
|
# checks for discrete vine with bivariate Gaussian pair-copulas
library(CopulaModel)
d=4
A=Dvinearray(d)
out=varray2M(A); M=out$mxarray
ucuts4=matrix(c(.4,.5,.4,.3, .7,.8,.6,.6),2,4,byrow=T)
ucuts=rbind(rep(0.00001,d),ucuts4,rep(.99999,d))
parvec=c(.5,.5,.5,.1,.1,0)
pr=rep(1/81,81)
out=f90rvineKL(parvec, ucuts, A, M, pr)
print(out)
# 0.3789605
data(ordinalex)
xvec=c(t(ordinalex$xx))
yvec=c(t(ordinalex$yy))
out=ordprobit.univar(xvec,yvec,iprint=F)
latentdat=mord2uu(xvec,yvec,nrep=4,out$cutpts,out$beta)
uudat=latentdat$uudat
zzdat=latentdat$zzdat
C4=Cvinearray(4)
D4=Dvinearray(4)
param=c(.5,.25,.5,.125,.25,.5)
out=varray2M(C4); MC4=out$mxarray
tem1z=rvinediscbvnnllk(param,zzdat,C4)
print(tem1z)
# 847.9132
out=varray2M(D4); MD4=out$mxarray
tem2z=rvinediscbvnnllk(param,zzdat,D4)
print(tem2z)
# 853.781
mlz1=nlm(rvinediscbvnnllk,p=param,zzdat=zzdat,A=C4,hessian=T,print.level=1)
#iteration = 8
#Parameter:
#[1] 0.36887825 0.16064335 0.02171784 0.44478355 0.34654634 0.35107517
#Function Value
#[1] 819.1659
#Gradient:
#[1] 2.169145e-04 5.229595e-05 -4.222329e-04 3.387868e-05 -1.826947e-04
#[6] 3.213927e-04
fparam=c(-0.5285182,0.5041516,0.3642926,
0.37107109,0.46115563,0.43073832,0.03254656,0.20708160,-0.10396673)
fnllk=rvinediscbvnfullnllk(fparam,D4,xvec,yvec,nrep=4,ncateg=3)
print(fnllk)
# 818.8315
fmlz=nlm(rvinediscbvnfullnllk,p=fparam,xmat=xvec,yvec=yvec,A=D4,
nrep=4,ncateg=3, hessian=T,print.level=1)
#iteration = 9
#Parameter:
#[1] -0.51766478 0.51393623 0.39621155 0.37171336 0.46012778 0.43498099
#[7] 0.03577443 0.20344139 -0.10423191
#Function Value
#[1] 818.6813
#Gradient:
#[1] 1.733724e-04 -3.198011e-04 1.608669e-04 -6.298251e-05 2.953584e-04
#[6] 4.683898e-05 -1.675744e-04 -2.332854e-04 1.938361e-04
hess=fmlz$hess
acov=solve(hess)
cat("SEs\n")
print(sqrt(diag(acov)))
#[1] 0.05762959 0.05737333 0.06379935 0.08012361 0.07086124 0.07365039 0.09240526
#[8] 0.08847101 0.09207092
# test example with second random covariate
set.seed(123)
xvec2=runif(800,-1,1)
xmat=cbind(xvec,xvec2)
fparam2=c(-0.5285182,0.5041516,0.3642926,0.1,
0.37107109,0.46115563,0.43073832,0.03254656,0.20708160,-0.10396673)
fnllk2=rvinediscbvnfullnllk(fparam2,D4,xmat,yvec,nrep=4,ncateg=3)
print(fnllk2)
# 818.649
fmlz2=nlm(rvinediscbvnfullnllk,p=fparam2,xmat=xmat,yvec=yvec,A=D4,
nrep=4,ncateg=3, hessian=T,print.level=1)
#iteration = 9
#Parameter:
# [1] -0.51758844 0.51461501 0.39734373 0.05966108 0.37206112 0.45438427
# [7] 0.43613973 0.03335408 0.21310673 -0.11407821
#Function Value
#[1] 818.2311
#Gradient:
# [1] -5.309175e-05 -1.242597e-04 5.036327e-05 -1.867875e-04 9.060841e-05
# [6] 1.548415e-04 2.330580e-05 1.496119e-04 -8.014922e-05 6.730261e-05
|
2158ee8807a11f860efda066acfe95d91f070a31
|
4cbaba16e08a77312782eaa95d587e055e9fdb75
|
/R/dexcarbon_play.R
|
0aa9f7e6965cf42f2cbab3ce01c5e5026d4c778b
|
[] |
no_license
|
schattle/forestcarbonmodel
|
a51b5615037b9636e219c797a410e0a5c71571d1
|
547c9d0cbbd7865d95e072705a6b784392c23a40
|
refs/heads/master
| 2023-04-21T23:33:04.839935
| 2021-05-10T18:13:14
| 2021-05-10T18:13:14
| 364,629,478
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 702
|
r
|
dexcarbon_play.R
|
#' Forest Carbon Growth
#' @param T period of growth
#' @param C initial carbon
#' @param parms$cc - canopy closure threshold
#' @param parms$r - base forest growth rate
#' @parms parms$K - carrying capacity
#' @parms parms$g - linear growth rate
#' @return change in carbon
dexcarbon_play = function(time, C, parms) {
# compute rate of change of forest size when C is below canopy closure threshold
dexcarbon = parms$r*C
# set rate of change to g if C is at or above threshold canopy closure
dexcarbon = ifelse(C >= parms$cc, g, dexcarbon)
# set rate of change to 0 once carrying capacity (K) is reached
dexcarbon = ifelse(C > parms$K, 0, dexcarbon)
return(list(dexcarbon))
}
|
4ffe5ed66e4f5840e83ec8675c95469667216558
|
c247d6e7a9c838396bd1071d7ff95262e30679cf
|
/Scripts/11_postprocess.R
|
f8402642a26a228401657bc9a2e762bd0f88f4ee
|
[] |
no_license
|
ysLeePhD/LC_multimodality
|
b9565fe0a09cd63913dd44cbed22aa2ffe9be464
|
92a7389579a5a590783c6e7b0e67b67968cded76
|
refs/heads/master
| 2020-04-08T13:09:23.847182
| 2019-01-25T20:13:00
| 2019-01-25T20:13:00
| 159,377,675
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,078
|
r
|
11_postprocess.R
|
install.packages("tidyverse", dependencies = TRUE)
install.packages("RColorBrew", dependencies = TRUE)
install.packages("tableone", dependencies = TRUE)
#install.packages("Matrix", dependencies = TRUE)
install.packages("survival", dependencies = TRUE)
install.packages("survey", dependencies = TRUE)
library(tidyverse)
library(RColorBrewer)
library(tableone)
library(grid)
library(Matrix)
library(survival)
library(bit)
library(bit64)
library(blob)
library(mitools)
library(RSQLite)
library(RODBC)
library(CompQuadForm)
library(survey)
#1. read the result of the latent class mplus model
stat00 <- read.csv("M:/Millennial_CA/15_MC_multimodality/33_reMplus/run32_results.csv")
data11 <- read.csv("M:/Millennial_CA/15_MC_multimodality/33_reMplus/data11.csv")
#2. create new categorical variables
# also, reverse the log-transformation of commute distance and transit quality measure
stat00$wprob1 <- stat00$prob1*stat00$wt
stat00$wprob2 <- stat00$prob2*stat00$wt
stat00$wprob3 <- stat00$prob3*stat00$wt
stat00$wprob4 <- stat00$prob4*stat00$wt
stat00$PID <- stat00$id
colnames(stat00)
stat01 <- stat00[, c(41, 37:40, 34)]
stat02 <- left_join(stat01, data11, by="PID")
colnames(stat02)
stat02$TeleFreq <- 0
stat02$TeleFreq <- ifelse(stat02$TeleFreq1==1, 1, stat02$TeleFreq)
stat02$TeleFreq <- ifelse(stat02$TeleFreq2==1, 2, stat02$TeleFreq)
stat02$TeleFreq <- factor(stat02$TeleFreq, labels=c("No", "Less than once a week", "At least once a week"), ordered=TRUE)
table(stat02$TeleFreq)
#stat02$TeleFreq0 <- NULL
#stat02$TeleFreq1 <- NULL
#stat02$TeleFreq2 <- NULL
stat02$WorkStudyStat <- 0
stat02$WorkStudyStat <- ifelse(stat02$FTwork==1, 1, stat02$WorkStudyStat)
stat02$WorkStudyStat <- ifelse(stat02$PTwork==1, 2, stat02$WorkStudyStat)
stat02$WorkStudyStat <- ifelse(stat02$FTstudy==1, 3, stat02$WorkStudyStat)
stat02$WorkStudyStat <- ifelse(stat02$PTstudy==1, 4, stat02$WorkStudyStat)
stat02$WorkStudyStat <- factor(stat02$WorkStudyStat, labels=c("Unpaid work", "Work fulltime", "Work parttime",
"Study fulltime", "Study parttime"), ordered=TRUE)
table(stat02$WorkStudyStat)
#stat02$FTwork <- NULL
#stat02$PTwork <- NULL
#stat02$FTstudy <- NULL
#stat02$PTstudy <- NULL
stat02$Education <- 1
stat02$Education <- ifelse(stat02$ednoanswer==1, 0, stat02$Education)
stat02$Education <- ifelse(stat02$somecoll==1, 2, stat02$Education)
stat02$Education <- ifelse(stat02$bachelor==1, 3, stat02$Education)
stat02$Education <- ifelse(stat02$graduate==1, 4, stat02$Education)
stat02$Education <- factor(stat02$Education, labels=c("Decline to answer", "Up to highschool", "Associate's degree",
"Bachelor's degree", "Graduate degree"), ordered=TRUE)
table(stat02$Education)
#stat02$somecoll <- NULL
#stat02$bachelor <- NULL
#stat02$graduate <- NULL
stat02$HHincome <- 0
stat02$HHincome <- ifelse(stat02$lowhhinc==1, 1, stat02$HHincome)
stat02$HHincome <- ifelse(stat02$midhhinc==1, 2, stat02$HHincome)
stat02$HHincome <- ifelse(stat02$highhhinc==1, 3, stat02$HHincome)
stat02$HHincome <- factor(stat02$HHincome, labels=c("Decline to answer", "~$60,000",
"$60,001~$120,000", "More than $120,000"), ordered=TRUE)
table(stat02$HHincome)
stat02$E1car_rating <- factor(stat02$E1car_rating, labels=c("Very bad", "Bad", "Neutral", "Good", "Very good"), ordered=TRUE)
table(stat02$E1car_rating)
stat02$E2pt_rating <- factor(stat02$E2pt_rating, labels=c("Very bad", "Bad", "Neutral", "Good", "Very good"), ordered=TRUE)
table(stat02$E2pt_rating)
stat02$E3at_rating <- factor(stat02$E3at_rating, labels=c("Very bad", "Bad", "Neutral", "Good", "Very good"), ordered=TRUE)
table(stat02$E3at_rating)
stat02$NHtype <- 0
stat02$NHtype <- ifelse(stat02$ccity==1, 1, stat02$NHtype)
stat02$NHtype <- ifelse(stat02$urban==1, 2, stat02$NHtype)
stat02$NHtype <- ifelse(stat02$suburb==1, 3, stat02$NHtype)
stat02$NHtype <- ifelse(stat02$rural_in_urban==1, 4, stat02$NHtype)
stat02$NHtype <- ifelse(stat02$rural==1, 5, stat02$NHtype)
stat02$NHtype <- factor(stat02$NHtype, labels=c("Central city", "Urban", "Suburban", "Rural in urban", "Rural"), ordered=TRUE)
table(stat02$NHtype)
stat02$ComDist <- exp(stat02$lndistance)-1
stat02$TQ2 <- exp(stat02$lnTQ2)-1
#3. create a long-formed table to compute weighted summary statistics
length(colnames(stat02))
stat03a <- stat02[stat02$wprob1>0, c(1, 2, 6:length(colnames(stat02)))]
stat03a$wprob <- stat03a$wprob1
stat03a$wprob1<- NULL
stat03a$class <- 1
stat03b <- stat02[stat02$wprob2>0, c(1, 3, 6:length(colnames(stat02)))]
stat03b$wprob <- stat03b$wprob2
stat03b$wprob2<- NULL
stat03b$class <- 2
stat03c <- stat02[stat02$wprob3>0, c(1, 4, 6:length(colnames(stat02)))]
stat03c$wprob <- stat03c$wprob3
stat03c$wprob3<- NULL
stat03c$class <- 3
stat03d <- stat02[stat02$wprob4>0, c(1, 5, 6:length(colnames(stat02)))]
stat03d$wprob <- stat03d$wprob4
stat03d$wprob4<- NULL
stat03d$class <- 4
stat04 <- rbind(stat03a, stat03b)
stat04 <- rbind(stat04, stat03c)
stat04 <- rbind(stat04, stat03d)
colnames(stat04)
stat05 <- merge(stat04, data03[, c(1, 9, 7, 10)], by="PID")
stat06 <- merge(stat05, data01[, c(1, 14, 15)], by="PID")
stat06$Millennials <- ifelse(stat06$Millennials==2, 0, stat06$Millennials)
stat06$commute_total <- stat06$commute_drv + stat06$commute_carpassenger + stat06$commute_pt + stat06$commute_bikewalk
stat06$leisure_total <- stat06$leisure_drv + stat06$leisure_carpassenger + stat06$leisure_pt + stat06$leisure_bikewalk +
stat06$leisure_emerging
xvars <- c("commute_drv", "commute_carpassenger", "commute_pt", "commute_bikewalk", "commute_total",
"leisure_drv", "leisure_carpassenger", "leisure_pt", "leisure_bikewalk", "leisure_emerging", "leisure_total",
"cdaypw", "ComDist", "TeleFreq", "withlicense", "carpadlt",
"HHSize", "withParent", "withPartner", "withOwnChild", "withChild", "nchild",
"WorkStudyStat", "Education", "HHincome",
"Zpro_suburban_18F", "Zlong_term_suburbanite_18F", "Zmust_own_car_18F", "Zcar_as_atool_18F",
"Zmaterialism_18F", "Ztech_embracing_18F", "Zinternet_smarthphone_lover_18F", "Zpro_env_policies_18F",
"Ztime_mode_constrained_18F", "Zpro_exercise_18F", "ZA3p_likebike", "Zadv_var_seeker_18F",
"Zstablished_in_life_18F", "Ztraditional_shopper_18F", "Zpro_social_18F", "ZA3f_uncomfortpeople",
"E1car_rating", "E2pt_rating", "E3at_rating",
"Activity_intensity", "Landuse_diversity", "TQ2",
"Age", "Millennials", "wPTpass", "VMDpw", "pctCarAvail", "NHtype")
wt.table1 <- svydesign(ids = ~1, data = stat06, weights = ~wprob)
wt.table2 <- svyCreateTableOne(vars= xvars, strata = "class", data=wt.table1)
# https://www.rdocumentation.org/packages/tableone/versions/0.9.3/topics/svyCreateTableOne
print(wt.table2, contDigits = 3, catDigits = 3)
#4. create a chart showing the shares of four classes by age
colnames(stat02)
stat11 <- merge(stat02, data01[, c(1, 14, 15)], by="PID")
colnames(stat11)
# revise age 17 -> 18 and 51 -> 50
stat11$Ager <- stat11$Age
stat11$Ager <- ifelse(stat11$Age==17, 18, stat11$Ager)
stat11$Ager <- ifelse(stat11$Age==51, 50, stat11$Ager)
pct_class_by_group <- data.frame(
AgeNHgroup = "certain group",
MultimodalClass = rep(c("Monomodal driver","Carpooler", "Active traveler", "Transit rider"), 34),
# 18-22 to 46-50 (29 groups) + NHtype (5 groups)
WtProbSum = abs(rnorm(136, 0, 1)),
stringsAsFactors = FALSE)
head(pct_class_by_group)
sapply(pct_class_by_group, class)
for (i in 1:29) {
temp00 <- stat11[stat11$Age>=i+17 & stat11$Age<i+22, ]
a <- 4*(i-1)+1
b <- 4*i
pct_class_by_group[a:b, 1] <- paste0(as.character(i+17), "~", as.character(i+21))
pct_class_by_group[a, 3] <- sum(temp00[, 5])/sum(temp00[, 2:5])
pct_class_by_group[a+1, 3] <- sum(temp00[, 3])/sum(temp00[, 2:5])
pct_class_by_group[a+2, 3] <- sum(temp00[, 2])/sum(temp00[, 2:5])
pct_class_by_group[a+3, 3] <- sum(temp00[, 4])/sum(temp00[, 2:5])
}
NHtypeList <- c("Central city", "Urban", "Suburban", "Rural in urban", "Rural")
for (i in 1:5) {
temp00 <- stat11[stat11$NHtype == NHtypeList[i], ]
a <- 4*(i+28)+1
b <- 4*(i+29)
pct_class_by_group[a:b, 1] <- NHtypeList[i]
pct_class_by_group[a, 3] <- sum(temp00[, 5])/sum(temp00[, 2:5])
pct_class_by_group[a+1, 3] <- sum(temp00[, 3])/sum(temp00[, 2:5])
pct_class_by_group[a+2, 3] <- sum(temp00[, 2])/sum(temp00[, 2:5])
pct_class_by_group[a+3, 3] <- sum(temp00[, 4])/sum(temp00[, 2:5])
}
pct_class_by_group$label1 <- paste(round(pct_class_by_group$WtProbSum*100, digits=1), "%")
pct_class_by_group$label1 <- ifelse(pct_class_by_group$MultimodalClass=="Monomodal driver", "", pct_class_by_group$label1)
pct_class_by_group
pct_class_by_group$label2 <- paste(round(pct_class_by_group$WtProbSum*100, digits=1), "%")
pct_class_by_group$label2 <- ifelse(pct_class_by_group$MultimodalClass !="Monomodal driver", "", pct_class_by_group$label2)
pct_class_by_group
# https://www.r-graph-gallery.com/48-grouped-barplot-with-ggplot2/
# https://stackoverflow.com/questions/32345923/how-to-control-ordering-of-stacked-bar-chart-using-identity-on-ggplot2
# https://stackoverflow.com/questions/3606697/how-to-set-limits-for-axes-in-ggplot2-r-plots
# https://stackoverflow.com/questions/14622421/how-to-change-legend-title-in-ggplot
# http://www.sthda.com/english/wiki/ggplot2-legend-easy-steps-to-change-the-position-and-the-appearance-of-a-graph-legend-in-r-software
# https://stackoverflow.com/questions/1330989/rotating-and-spacing-axis-labels-in-ggplot2
# http://www.sthda.com/english/wiki/ggplot2-colors-how-to-change-colors-automatically-and-manually
# https://stackoverflow.com/questions/8750871/ggplot2-reverse-order-of-scale-brewer
# https://ggplot2.tidyverse.org/reference/geom_text.html
# https://www.rstudio.com/wp-content/uploads/2015/03/ggplot2-cheatsheet.pdf
ggplot(pct_class_by_group[1:116, ],
aes(
x=factor(AgeNHgroup,
levels=c("18~22", "19~23", "20~24", "21~25", "22~26", "23~27", "24~28", "25~29",
"26~30", "27~31", "28~32", "29~33", "30~34", "31~35", "32~36", "33~37",
"34~38", "35~39", "36~40", "37~41", "38~42", "39~43", "40~44", "41~45",
"42~46", "43~47", "44~48", "45~49", "46~50",
"Central city", "Urban", "Suburban", "Rural in urban", "Rural")),
y=WtProbSum,
fill=factor(MultimodalClass,
levels=c("Transit rider", "Active traveler", "Carpooler","Monomodal driver")),
color=factor(MultimodalClass,
levels=c("Transit rider", "Active traveler", "Carpooler","Monomodal driver"))
)) +
geom_bar(stat="identity", position="fill") +
coord_cartesian(ylim=c(0.7, 1)) +
guides(fill=guide_legend(title="", reverse = TRUE)) +
guides(color=FALSE) +
ylab("") +
xlab("") +
theme_classic() +
theme(legend.position="bottom",
legend.margin=margin(0,0,0,0),
legend.box.margin=margin(-20,0,00,0)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
scale_fill_brewer(palette = "YlGnBu", direction=1) + #scale_color_brewer(palette = "YlGnBu", direction=1) +
scale_colour_manual(values = c("white", "white", "white", "white")) +
geom_text(aes(label=label1), color = "black", position=position_stack(vjust=0.5), size=3.5, angle=90)+
geom_text(aes(label=label2), color = "white", position=position_stack(vjust=0.95), size=3.5, angle=90) # colour="white", , fontface = "bold") +
# https://ggplot2.tidyverse.org/reference/ggsave.html
ggsave(file="C:/Users/ylee366/Dropbox (GaTech)/3a_ResearchCEE/02_Transp_LCA_multimodal/lca_by_age.jpg",
units=c("in"), dpi=300)
#theme_classic()+ # remove the backgroud grey
# add millennials, gen Xers, and by neighborhood type
# x/y axis labels
# reorder the legend
# check if okay in b/w
|
f1c5ef68b08f56536a99b8225a06a5e3e3631c02
|
b4164d8023348a7491b73d21261e28c61c2a7353
|
/man/fars_map_state.Rd
|
d8abe03216043cd70266c3b9d4f918baccc45fc7
|
[] |
no_license
|
pez-espada/fars2
|
5a5cef3f3af2b7aec49abead39e7a6ce404dd2f5
|
1dd121cf29f4a82a52e59d78f223398c7b1efe1f
|
refs/heads/master
| 2021-04-28T16:42:59.086143
| 2018-02-22T20:16:34
| 2018-02-22T20:16:34
| 122,020,742
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 616
|
rd
|
fars_map_state.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars2.R
\name{fars_map_state}
\alias{fars_map_state}
\title{Function `fars_map_state`: Plot a map with geolocated observations (cars accidents)}
\usage{
fars_map_state(state_num, year)
}
\description{
This functions takes a valid state_number and a valid year
and produces a map with geolocated observations (accidents)
plotted as dots on the map.
}
\details{
@param state_num valid state_number (between 1 and 51)
@param year valid year
@return A map with geolocated accidents as dots on the map
@examples
fars_map_state(51, 2015)
}
|
bdac3a48ee83bcc23d1451b38980aefce8495aac
|
9126d2396ce4536cfd934d9c2b09bb8511fa64a9
|
/man/getweight.Rd
|
21fbe9ab738e4d8ba2887e4e1aded1ed0f86a265
|
[] |
no_license
|
cran/RobAStBase
|
97d818333ca03d01c1eb1fa904ffddf8b8db88cb
|
63a5c3d20b6440e23f3c9787c0955bb7bf571854
|
refs/heads/master
| 2022-11-24T09:42:41.138259
| 2022-11-16T02:50:25
| 2022-11-16T02:50:25
| 17,693,378
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,578
|
rd
|
getweight.Rd
|
\name{getweight-methods}
\docType{methods}
\alias{getweight-methods}
\alias{getweight}
\alias{getweight,HampelWeight,ContNeighborhood,BiasType-method}
\alias{getweight,HampelWeight,ContNeighborhood,onesidedBias-method}
\alias{getweight,HampelWeight,ContNeighborhood,asymmetricBias-method}
\alias{getweight,BdStWeight,TotalVarNeighborhood,BiasType-method}
\alias{minbiasweight-methods}
\alias{minbiasweight}
\alias{minbiasweight,HampelWeight,ContNeighborhood,BiasType-method}
\alias{minbiasweight,HampelWeight,ContNeighborhood,onesidedBias-method}
\alias{minbiasweight,HampelWeight,ContNeighborhood,asymmetricBias-method}
\alias{minbiasweight,BdStWeight,TotalVarNeighborhood,BiasType-method}
\title{Generating weights}
\description{
Generates weight functions of Hampel / BdSt type for different bias and norm types.
}
\usage{
getweight(Weight, neighbor, biastype, ...)
minbiasweight(Weight, neighbor, biastype, ...)
\S4method{getweight}{HampelWeight,ContNeighborhood,BiasType}(Weight, neighbor, biastype, normW)
\S4method{minbiasweight}{HampelWeight,ContNeighborhood,BiasType}(Weight, neighbor, biastype, normW)
\S4method{getweight}{HampelWeight,ContNeighborhood,onesidedBias}(Weight, neighbor, biastype, ...)
\S4method{minbiasweight}{HampelWeight,ContNeighborhood,onesidedBias}(Weight, neighbor, biastype, ...)
\S4method{getweight}{HampelWeight,ContNeighborhood,asymmetricBias}(Weight, neighbor, biastype, ...)
\S4method{minbiasweight}{HampelWeight,ContNeighborhood,asymmetricBias}(Weight, neighbor, biastype, ...)
\S4method{getweight}{BdStWeight,TotalVarNeighborhood,BiasType}(Weight, neighbor, biastype, ...)
\S4method{minbiasweight}{BdStWeight,TotalVarNeighborhood,BiasType}(Weight, neighbor, biastype, ...)
}
\arguments{
\item{Weight}{ Object of class \code{"RobWeight"}. }
\item{neighbor}{ Object of class \code{"Neighborhood"}. }
\item{biastype}{ Object of class \code{"BiasType"}. }
\item{normW}{ Object of class \code{"NormType"} --- only for signature \code{HampelWeight,ContNeighborhood,BiasType}. }
\item{\dots}{possibly additional (unused) arguments --- like in a call to the less specific methods.}
}
%\details{}
\value{Object of class \code{"HampelWeight"} resp. \code{"BdStWeight"}}
\references{
Hampel et al. (1986) \emph{Robust Statistics}.
The Approach Based on Influence Functions. New York: Wiley.
Rieder, H. (1994) \emph{Robust Asymptotic Statistics}. New York: Springer.
Kohl, M. (2005) \emph{Numerical Contributions to the Asymptotic Theory of Robustness}.
Bayreuth: Dissertation.
}
\details{These functions generate the weight function in slot \code{weight} in a corresp.
object of class \code{RobWeight} and descendants.}
\section{Methods}{\describe{
\item{getweight}{\code{signature(Weight = "HampelWeight", neighbor = "ContNeighborhood",
biastype = "BiasType")} with additional argument \code{biastype} of class
\code{"BiasType"}: produces weight slot...}
\item{minbiasweight}{\code{signature(Weight = "HampelWeight", neighbor = "ContNeighborhood",
biastype = "BiasType")} with additional argument \code{biastype} of class
\code{"BiasType"}: produces weight slot...}
\item{getweight}{\code{signature(Weight = "HampelWeight", neighbor = "ContNeighborhood",
biastype = "onesidedBias")}: produces weight slot...}
\item{minbiasweight}{\code{signature(Weight = "HampelWeight", neighbor = "ContNeighborhood",
biastype = "onesidedBias")}: produces weight slot...}
\item{getweight}{\code{signature(Weight = "HampelWeight", neighbor = "ContNeighborhood",
biastype = "asymmetricBias")}: produces weight slot...}
\item{minbiasweight}{\code{signature(Weight = "HampelWeight", neighbor = "ContNeighborhood",
biastype = "asymmetricBias")}: produces weight slot...}
\item{getweight}{\code{signature(Weight = "BdStWeight", neighbor = "TotalVarNeighborhood",
biastype = "BiasType")}: produces weight slot...}
\item{minbiasweight}{\code{signature(Weight = "BdStWeight", neighbor = "TotalVarNeighborhood",
biastype = "BiasType")}: produces weight slot...}
}}
\author{Peter Ruckdeschel \email{peter.ruckdeschel@uni-oldenburg.de}}
%\note{}
\seealso{\code{\link{BdStWeight-class}},
\code{\link{HampelWeight-class}},
\code{\link{IC-class}}}
%\examples{}
\concept{influence curve}
\keyword{robust}
|
0433018882fe64b3974b2c95bbdd6d40229e2c96
|
16f145ea44c14f50f4ebb27b549602c588301ea2
|
/si_figure_move_history.R
|
46630ef1c87d322096bf2160cf747c28fe47a91c
|
[] |
no_license
|
aterui/public-proj_sc-disp
|
c69303e2f625946f953e6e9ad8c17dc402c56778
|
bbc480e28c982bf459a5c8801071f5457f841c56
|
refs/heads/master
| 2023-04-10T11:02:51.068102
| 2021-07-05T17:45:17
| 2021-07-05T17:45:17
| 172,434,248
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,142
|
r
|
si_figure_move_history.R
|
# setup -------------------------------------------------------------------
rm(list = ls(all.names = TRUE))
pacman::p_load(tidyverse)
dat <- read_csv('data_fmt/vector_data.csv') %>%
mutate(Species = case_when(species == "BHC" ~ "Bluehead chub",
species == "CRC" ~ "Creek chub",
species == "STJ" ~ "Striped jumprock"))
# back & forth movement ---------------------------------------------------
set.seed(123)
## select individuals with repeated catch
tag_id_rep <- dat %>%
group_by(Species, tag_id) %>%
summarize(n_release = n()) %>%
filter(n_release > 5) %>%
sample_n(10) %>%
pull(tag_id)
g <- dat %>%
filter(tag_id %in% tag_id_rep) %>%
ggplot() +
geom_point(aes(x = occasion, y = section_1, color = tag_id, shape = stream),
alpha = 0.5, size = 3) +
geom_line(aes(x = occasion, y = section_1, color = tag_id),
alpha = 0.5) +
facet_wrap(facets = ~ Species) +
ylab("Section ID") +
xlab("Period") +
theme_bw() +
theme(legend.position = "none")
print(g)
|
728a032a5d13a8dbfbc9cd364ffecd349f368171
|
18cd17e606c17ecdf02ba45ebf7d1bbaddaa40b5
|
/GWAS/gwas.preproc.R
|
c06a591e3e38006d1b94420df56f0596c092eac6
|
[] |
no_license
|
derekbeaton/Misc
|
1c6e8547827a18b67badee8008fa93c56771b026
|
eb7e2a29be38385891ae618f42da9abe24bce951
|
refs/heads/master
| 2022-04-13T08:36:14.350676
| 2020-04-04T15:01:45
| 2020-04-04T15:01:45
| 110,385,729
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,544
|
r
|
gwas.preproc.R
|
gwas.preproc <- function(data.in,geno=0.1,mind=0.1,maf=0.05){
##zero-th: find any columns that are only 1 genotype.
na.ret.thing <- apply(data.in,2,function(x){unique(x[!is.na(x)])})
if(is.list(na.ret.thing)){
single.genos <- which( unlist( lapply(na.ret.thing,length) )<2 )
if(length(single.genos)>=1){
data.in <- data.in[,-c(single.genos)]
}
}
##first: SNP missingness
geno.missing <- which( colSums(is.na(data.in)) > nrow(data.in)*geno )
if(length(geno.missing)>=1){
data.in <- data.in[,-c(geno.missing)]
}
##second: person missingness
mind.missing <- which( rowSums(is.na(data.in)) > ncol(data.in)*mind )
if(length(mind.missing)>=1){
data.in <- data.in[-c(mind.missing),]
}
##third: minor allele frequencies
ma <- tapply(data.in,col(data.in),function(x){table(unlist(strsplit(as.vector(x),"")))},simplify=F)
names(ma) <- colnames(data.in)
mafreqs <- lapply(ma,function(x){x/sum(x)})
maf.drop <- which(lapply(mafreqs,function(x){sum(x<maf)})==1)
if(length(maf.drop)>=1){
data.in <- data.in[,-c(maf.drop)]
}
## remove this as its not standard. will make custom code to handle this.
# ##fourth: find new minor alleles, so we can combine genotypes.
# ma <- tapply(data.in,col(data.in),function(x){table(unlist(strsplit(as.vector(x),"")))},simplify=F)
# names(ma) <- colnames(data.in)
# ma.finder <- lapply(ma,function(x){ a<-x/sum(x); a==min(a) }) ## apparently not used...
# geno.counts <- tapply(data.in,col(data.in),function(x){ summary(as.factor(x[!is.na(x)])) },simplify=F)
# names(geno.counts) <- colnames(data.in)
# find.low.genos <- tapply(data.in,col(data.in),function(x){ summary(as.factor(x[!is.na(x)])) < (length(x)*maf) },simplify=F)
# names(find.low.genos) <- colnames(data.in)
#
# find.combos <- lapply(find.low.genos,function(x){
# a=which(x); ##tells me which item is below threshold
# if(length(a)==0){
# thing=NULL
# }else{
# thing=unique(names(x)[c(2,a)]) ##tells me the two items to combine into one...?
# }
# })
#
# change.these <- find.combos[which(unlist(lapply(find.combos,function(x){!is.null(x)})))]
# for(i in 1:length(change.these)){
# this.one <- change.these[i]
# change.to <- paste(unlist(this.one),collapse="+")
# data.in[c(which(data.in[,names(this.one)]==unlist(this.one)[1]),which(data.in[,names(this.one)]== unlist(this.one)[2])),names(this.one)] <- change.to
# }
return(data.in)
}
|
4289dcfc3659dd6d70146c620499a052c3a91aa0
|
b9dec7b90284521e5ba4839397d0eddb0c3b5ea9
|
/fitChosenDists.R
|
d90ba9e89a60b537ee4f904fbbd1b4a275efd056
|
[] |
no_license
|
texmiller/IPM_size_transitions
|
524b373b32699c260ed539b1ea60b55d98b5d665
|
d4e6cfa09f543d694d4885506fcdb6172879f8c4
|
refs/heads/master
| 2023-08-23T17:02:01.933300
| 2023-08-21T03:48:44
| 2023-08-21T03:48:44
| 221,753,591
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,365
|
r
|
fitChosenDists.R
|
###########################################################################################
# Fit a gamlss distribution family to a set of values by maximum likelihood using MaxLik.
# y is the set of values
# DIST is the name of the distribution family (e.g., DIST = "JSU")
#
# The function can accommodate 2-, 3-, and 4-parameter gamlss distribution families
# (this covers all of the continuous gamlss families except exponential)
#
# The function takes advantage of the structure of a gamlss family, to specify start
# values for parameters based on the data, and to guarantee valid parameters through the
# use of the family's link and link-inverse functions. The multiple uses of the
# .linkfun and .lininv for distribution parameters are (sadly) necessary to make
# sure that the optimizer can send any real numbers as arguments to the likelihood
# function, and they translate to valid distribution parameters (e.g., sigma > 0).
#
# Maximization uses 10-fold multistart with jittered initial parameters, plus a final fit
# starting from the best of them. It's not entirely unreasonable to consider that it might
# be somewhat reliable, though 20 or 100 would be more reassuring.
#
# RETURNED VALUE is a maxLik() fit, with parameters on the family's link-transformed
# scale (e.g, typically log(sigma) rather than sigma).
#
###########################################################################################
gamlssMaxlik <- function(y,DIST) {
out <- list()
aics <- numeric(length=length(DIST))
for(d in 1:length(DIST)){
fam = as.gamlss.family(DIST[d])
n_par <- fam$nopar
## assume at least two parameters
mu = mean(eval(fam$mu.initial, list(y = y)))
sigma = mean(eval(fam$sigma.initial, list(y = y, mu = mu)))
start=c(eta.mu=fam$mu.linkfun(mu),eta.sigma=fam$sigma.linkfun(sigma))
## and maybe a third
if(n_par==3){
nu=mean(eval(fam$nu.initial, list(y = y, mu = mu, sigma = sigma)))
start=c(start,eta.nu=fam$nu.linkfun(nu))
}
## and maybe a fourth
if(n_par==4){
nu=mean(eval(fam$nu.initial, list(y = y, mu = mu, sigma = sigma)))
tau=mean(eval(fam$tau.initial, list(y = y, mu = mu, sigma = sigma, nu = nu)))
start=c(start,eta.nu=fam$nu.linkfun(nu),eta.tau=fam$tau.linkfun(tau))
}
LogLik1=function(pars,response){
if(n_par==2) fun_args = list(x=response, mu=fam$mu.linkinv(pars[1]), sigma=fam$sigma.linkinv(pars[2]),log=TRUE)
if(n_par==3) fun_args = list(x=response, mu=fam$mu.linkinv(pars[1]),
sigma=fam$sigma.linkinv(pars[2]),
nu=fam$nu.linkinv(pars[3]),log=TRUE)
if(n_par==4) fun_args = list(x=response, mu=fam$mu.linkinv(pars[1]),
sigma=fam$sigma.linkinv(pars[2]),
nu=fam$nu.linkinv(pars[3]),
tau=fam$tau.linkinv(pars[4]),log=TRUE)
val = do.call(paste("d",DIST[d],sep=""),fun_args)
return(val);
}
bestPars=numeric(n_par); bestMax=-10^17;
for(jrep in 1:40) {
startj = start*exp(0.1*rnorm(n_par));
fit = maxLik(logLik=LogLik1,start=startj, response=y, method="BHHH",control=list(iterlim=5000,printLevel=0),
finalHessian=FALSE);
if(fit$maximum==0) fit$maximum = -10^16; # failed fit
if(fit$code>2) fit$maximum = -10^16; # failed fit
if(fit$maximum > bestMax) {bestPars=fit$estimate; bestMax=fit$maximum; bestFit=fit;}
cat(DIST[d],jrep,fit$maximum,"\n");
}
#fit = maxLik(logLik=LogLik1,start=bestPars, response=y, method="BHHH",control=list(iterlim=5000,printLevel=0),
# finalHessian=FALSE);
#if(fit$maximum==0) fit$maximum = -10^16; # failed fit
#if(fit$code>2) fit$maximum = -10^16; # failed fit
fit=bestFit;
aics[d] <- 2*n_par - 2*fit$maximum
out[[d]] <- fit
}
return(list(out=out,aics=aics));
}
if(FALSE) {
## can I recover parameters of a normal distribution?
y = rNO(5000,mu=1,sigma=1)
out.NO = gamlssMaxlik(y=y, DIST="NO")
out.NO$estimate[1];exp(out.NO$estimate[2])
# yes. does it favor a normal over other candidates? -- picking a few favorites
out.JSU = gamlssMaxlik(y=y, DIST="JSU")
out.SHASH = gamlssMaxlik(y=y, DIST="SHASH")
out.ST1 = gamlssMaxlik(y=y, DIST="ST1")
# yes, normal is selected
out.NO$AIC;out.JSU$AIC;out.SHASH$AIC;out.ST1$AIC
}
|
80572315401ed7242359c624751ec388171f6d54
|
f5faeeb7362ac4a97a4a4d61d8320ff9079e2dd0
|
/models/sipnet/man/write.restart.SIPNET.Rd
|
e20e47f69faee5d708f5d87e5086978e41cc2649
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Viskari/pecan
|
0b1793d2994afac9f312369880862a2d3a408dc3
|
7489a97ef4d3deaf5b62a10a86a5ae14f859aaa8
|
refs/heads/master
| 2021-01-17T21:16:08.800080
| 2016-10-03T20:23:28
| 2016-10-03T20:23:28
| 28,244,459
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 697
|
rd
|
write.restart.SIPNET.Rd
|
\name{write.restart.SIPNET}
\alias{write.restart.SIPNET}
\title{write.restart.SIPNET}
\usage{
write.restart.SIPNET(out.dir, runid, time, settings, analysis.vec,
RENAME = TRUE, variables, sample_parameters = FALSE, trait.values = NA,
met)
}
\arguments{
\item{out.dir}{output directory}
\item{runid}{run ID}
\item{time}{year that is being read}
\item{settings}{PEcAn settings object}
\item{analysis.vec}{analysis vector}
\item{RENAME}{flag to either rename output file or not}
\item{variables}{}
\item{sample_parameters}{}
\item{trait.values}{}
\item{met}{}
}
\value{
NONE
}
\description{
Write restart files for SIPNET
}
\author{
Ann Raiho \email{araiho@nd.edu}
}
|
544094bf3d49987db2316a59f9d6e11f24ed4b5c
|
308b452178b83e533c6451aff9a320c9c03fd89e
|
/app_ui.R
|
970ee19b04ca73366f46cfd83d4c34ca506fe3fc
|
[
"MIT"
] |
permissive
|
jshifman/shiny-midwest-analysis
|
c7164412ebb95286bd95341e610ac26a0d1c2b64
|
0c75a4e32b56825a14786d51912c53b3dad7c40c
|
refs/heads/master
| 2023-07-21T14:19:46.438047
| 2020-03-08T06:46:14
| 2020-03-08T06:46:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,564
|
r
|
app_ui.R
|
library(shiny)
library(ggplot2)
library(plotly)
selected_values1 <- colnames(midwest)
# Column names for bar chart data
selected_values2 <- c(
"avg_percwhite",
"avg_percblack",
"avg_percamerindan",
"avg_percasian",
"avg_percother",
"avg_percbelowpoverty",
"avg_percchildbelowpovert"
)
x_input <- selectInput(
"xaxis",
label = "X-Axis Value",
choices = selected_values1,
selected = "poptotal"
)
y_input <- selectInput(
"yaxis",
label = "Y-Axis Value",
choices = selected_values1,
selected = "percwhite"
)
# Bar chart y-axis value
y_input2 <- selectInput(
"yaxis2",
label = "Y-Axis Value",
choices = selected_values2,
selected = "avg_percbelowpoverty"
)
size_input <- sliderInput(
"size",
label = "Size of point", min = 1, max = 10, value = 5
)
color_input <- selectInput(
"color",
label = "Color",
choices = list("Red" = "red", "Blue" = "blue", "Green" = "green")
)
population_input <- sliderInput(
"population",
label = "Minimum Population",
min = 1700,
max = 5000000,
value = 200000,
step = 100000,
)
# Scatter plot tab
tab1 <- tabPanel(
"Scatter plot",
h1("A6 Homework"),
sidebarLayout(
sidebarPanel(
x_input,
y_input,
size_input
),
mainPanel(
plotlyOutput("scatter")
)
)
)
# Bar plot tab
tab2 <- tabPanel(
"Bar Graph",
h1("A6 Homework"),
sidebarLayout(
sidebarPanel(
y_input2,
population_input,
color_input,
),
mainPanel(
plotlyOutput("bar")
)
)
)
ui <- navbarPage(
"Graphs",
tab1,
tab2
)
|
008a965dc57d002737be0aa0d237506b61b24caf
|
3659f55c5add532a015324e4efb26874b34d51db
|
/NNEvaluation.R
|
b7dd6a4665d435c8530df96b636d7b29f09e0597
|
[] |
no_license
|
arifkhaan/5GWF2020
|
75c53fe6367fbbb83e928ff1c73f7a1327c6c45f
|
7984a4ed1ecd8fd7746624d4f469e0265a765d19
|
refs/heads/master
| 2023-04-23T09:51:03.038126
| 2021-05-08T10:55:51
| 2021-05-08T10:55:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,440
|
r
|
NNEvaluation.R
|
##Evaluate the neural network. Calculate the nearest-BS association coverage probability using the stochastic geometry derivations using our prior papers
##Then use simulations to evaluate the trained neural network, and the strongest omnidirectional SINR association
rm(list=ls())
library(spatstat)
library(VGAM)
library(hypergeo)
library(keras)
library(parallel)
library(matlab)
source("MiscFunctions.R")
source("ClosedFormFunctions.R")
#number of MC trials and cores to use for the calculation
MCtrials = 5000
cores=16
alpha = 0.5
beta = 300
gamma = 20
#building parameters
buildDens = 300/(1000^2)
buildWidth = 40
buildR = buildWidth/(2*sin(pi/4))
heightParam = 20
#UAV velocity
velocity = 10
#number of antenna elements
Nt= 8
BSh = 30
windowWidth = 5000
BHdensity = 5/(1000^2)
#UAVdensity = 25/(1000^2)#5*BSdensity
skipN = 1
UAVBHBW = pi*1/4
#coverage threshold
Tu = 0
#channel parameters, noise power, BS transmit power
al = 2.1
an = 4
mal = 1
man = 1
BStx = 40
#carrier frequency
Freq = 2*10^9
#noise power
N = -174+10*log10(20*10^6)+10
N = 10^(N/10)/1000
#BS antenna downtilt
BHtilt=-10
#Raytracing function for checking LOS
isLOS = function(buildings,buildR,buildH,x0,y0,x,y,h,BSh){
angle = atan2((y-y0),(x-x0))
dist = sqrt((x-x0)^2+(y-y0)^2)
build = buildings
build = shift(build,c(-x0,-y0))
build = rotate(build,angle=-angle)
buildX = build$x
buildY = build$y
foo = which(buildX<dist)
buildX = buildX[foo]
buildY = buildY[foo]
buildH = buildH[foo]
foo = which(buildX>0)
buildX = buildX[foo]
buildY = buildY[foo]
buildH = buildH[foo]
foo = which(abs(buildY)<=buildR)
buildX = buildX[foo]
buildY = buildY[foo]
buildH = buildH[foo]
foo = buildH>((abs(h-BSh)*(buildX/dist)+min(BSh,h)))
if(length(which(foo==TRUE))>0){
return(FALSE)
}
else{
return(TRUE)
}
}
antennaGain = function(r,BSh,h,Nt){
angle = (atan2(BSh-h,r))
g=(1/Nt)*((sin(Nt*pi*(sin(angle))/2)^2)/(sin(pi*(sin(angle))/2)^2))
g=g*BStx*K
return(g)
}
maxSINR = 10.45007
#normalise the neural network input data (same was we did for the training)
normaliseData = function(P,Int,Dist,h){
#normalise observed power
foo=min(P)
P= log10(P/foo)
mnfoo=min(P)
mxfoo=max(P)
if(mxfoo>0){
P=(P-mnfoo)/(mxfoo-mnfoo)
}
#normalise interference score
mnfoo=min(Int)
mxfoo=max(Int)
if(mxfoo>0){
Int=(Int-mnfoo)/(mxfoo-mnfoo)
}
mnfoo=min(Dist)
mxfoo=max(Dist)
if(mxfoo>0){
Dist=(Dist-mnfoo)/(mxfoo-mnfoo)
}
h = h/300
return(c(P,Int,Dist,h))
}
#generate the model
model <- keras_model_sequential()
model %>%
layer_dense(units=221,input_shape = c(221)) %>%
layer_dense(units = 500, activation = 'relu') %>%
layer_dense(units = 500, activation = 'relu') %>%
layer_dense(units = 10, activation = 'softmax')
model %>% compile(
optimizer = 'adamax',
loss = 'categorical_crossentropy',
metrics = c('accuracy')
)
#load the weights from the training script
model %>% load_model_weights_hdf5("NNweights.h5")
#testing the model performance
h=seq(from=20,to=200,by=10)
covProb= zeros(nrow=length(h),ncol=length(Tu))
covProbnearest = zeros(nrow=length(h),ncol=length(Tu))
covProbmulti= zeros(nrow=length(h),ncol=length(Tu))
covProbML= zeros(nrow=length(h),ncol=length(Tu))
covProbMLmulti= zeros(nrow=length(h),ncol=length(Tu))
h=seq(from=20,to=200,by=6)
CFcoverageProb = zeros(nrow=length(h),ncol=length(Tu))
CFcoverageProbMulti = zeros(nrow=length(h),ncol=length(Tu))
nthBS = zeros(nrow=length(h),ncol=10)
h=seq(from=20,to=200,by=6)
#calculate nearest distance association using the analytical results
#we calculate it over a range of heights
for(j in 1:length(h)){
for(l in 1:length(Tu)){ #this code will let us also calculate over a range of coverage thresholds (not used in these results)
print(h[j])
UAVBHgain = 4*pi/((UAVBHBW/2)^2)
K = (((3*10^8)/Freq)/(4*pi))^2
CFcoverageProb[j,l]=MCCovProbRamyNakApprox(rxh=h[j],txh=BSh,angle=UAVBHBW,rmax=windowWidth/2,density=BHdensity,uptilt=BHtilt,T=Tu[l],gain=BStx*UAVBHgain*K,Nt=Nt,al=al,an=an,mal=mal,man=man,alpha=alpha,beta=beta,gamma=gamma,dx=1000,cores=cores)
save(CFcoverageProb,covProb,covProbML,h,file="HeightEvaluation.RData")
}
}
h=seq(from=20,to=200,by=10)
##UAV antenna gain
UAVBHgain = 4*pi/((UAVBHBW/2)^2)
K = (((3*10^8)/Freq)/(4*pi))^2
for(j in 1:(length(h))){
for(l in 1:length(Tu)){#this code will let us also calculate over a range of coverage thresholds (not used in these results)
print(h[j])
if(h[j]==30){h[j]=30.1} #if the UAV height is exactly equal to the BS height this causes an error in the calculation. I just offset the UAV height by a tiny amount of get around this
mcS =zeros(nrow=MCtrials,ncol=10)
mcInt =zeros(nrow=MCtrials,ncol=200)
mcDist =zeros(nrow=MCtrials,ncol=10)
mcCov =zeros(nrow=MCtrials,ncol=10)
mcbestSINR =zeros(nrow=MCtrials,ncol=1)
mcnearestSINR =zeros(nrow=MCtrials,ncol=1)
cPm= vector(length=MCtrials)
cPmn = vector(length=MCtrials)
cPmML= vector(length=MCtrials)
whichNthBS = vector(length=MCtrials)
uavx= 0
uavy=0
num = vector(length=MCtrials)
BScand = 10
BScandi = 20
#generate MC trials for the NN
iteration = function(m){
if(m%%10000==0){
print(m)
}
##window in which we simulate BS distribution
sWindow = owin(xrange=c(-windowWidth/2,windowWidth/2),yrange=c(-windowWidth/2,windowWidth/2))
gen=FALSE
while(gen==FALSE){
BHppp = rpoispp(lambda=BHdensity,win=sWindow)
if(BHppp$n>BScand){
gen=TRUE
}
}
buildings = gridcenters(window=sWindow,nx=floor(sqrt(buildDens*(5000^2))),ny=ceil(sqrt(buildDens*(5000^2))))#rpoispp(lambda=buildDens,win=windowBS)
build = rpoispp(lambda=buildDens,win=sWindow)
build$x = buildings$x
build$y = buildings$y
build$n = length(buildings$x)
buildings = build
buildH = rrayleigh(n=buildings$n,scale=heightParam)
##which network a given BS belongs to
##Note, I wrote this code for multi-network scenarios where the UAV can choose from several different operator networks
##I've decided to drop that in the paper we're writing as it gives us nothing of value. In the paper it's just one network
##Nonetheless the dataset and the resulting neural network is capable of distinguishing between different networks, we simply don't use that function in the simulations
whichNetwork=ones(nrow=BHppp$n,ncol=1)#floor(runif(n=BHppp$n,min=1,max=4))
LOS = vector(length=BHppp$n)
measuredpower = vector(length=BHppp$n)
load = round(runif(n=BScand,min=0,max=100)) ##the number of RBs that are in use for a BS (we don't use this in the current version)
connectedto=floor(runif(n=1,min=1,max=(BScand+1))) #the UAV is connected to one of the BScand closest BSs (so it then decides whether to stay connected or change)
numInterferers = zeros(nrow=BScand,ncol=BScandi)
distances = vector(length=BScand)
cov = vector(length=BScand)
#for each BS get the distance, channel type (LOS/NLOS) and the signal power received by the omnidirectional antenna
rdist = vector(length=BHppp$n)
for(i in 1:BHppp$n){
foo = runif(n=1,min=0,max=1)
if(isLOS(buildings=buildings,buildR=buildR,buildH=buildH,x0=0,y0=0,x=BHppp$x[i],y=BHppp$y[i],h=h[j],BSh=BSh)){
LOS[i]=TRUE
}
else{LOS[i]=FALSE
}
angle = (atan2(BSh-h[j],sqrt((BHppp$x[i])^2+(BHppp$y[i])^2)))
rdist[i] = (sqrt((BHppp$x[i])^2+(BHppp$y[i])^2))
g=(1/Nt)*((sin(Nt*pi*(sin(angle))/2)^2)/(sin(pi*(sin(angle))/2)^2))
g = g*BStx*K
if(LOS[i]==TRUE){
measuredpower[i]=g*(sqrt((BHppp$x[i])^2+(BHppp$y[i])^2+(BSh-h[j])^2))^(-al)
}else{measuredpower[i]=g*(sqrt((BHppp$x[i])^2+(BHppp$y[i])^2+(BSh-h[j])^2))^(-an)}
}
#now get the SINR for each BS
oSINR = vector(length=BHppp$n)
for(i in 1:BHppp$n){
foo = 1:BHppp$n
foo = foo[foo!=i]
oSINR[i] = measuredpower[i]/(sum(measuredpower[foo])+N)
}
iBSBHHeight = vector(length=BHppp$n)
iBSBHHeight[LOS==TRUE]=0
iBSBHHeight[LOS==FALSE]=Inf
order =order(rdist,decreasing=FALSE)
achieveableRate = vector(length=BScand)
SINR = vector(length=BScand)
RBsneeded = vector(length=BScand)
##get the rate that would be achieved from each BS through the directional antenna
for(i in 1:BScand){
BHdist = sqrt((BHppp$x)^2+(BHppp$y)^2)
BHBS = c(BHppp$x[order[i]],BHppp$y[order[i]])
ind = order[i]
BHint = 1:BHppp$n
BHint = BHint[BHint!=ind] #indices of interfering BSs
BSdist = BHdist[ind] #distance to serving BS
BHdist = BHdist[BHint] #distances to interfering BSs
distances[i]=BSdist/1000
hopt = h[j]
angle = atan2(hopt-BSh,BSdist)
#exclude the BSs that are outside the antenna radiation lobe
if(UAVBHBW<pi/2){
if((angle < (pi/2-(UAVBHBW/2))) && (angle > (UAVBHBW/2))){
uthreshold = (hopt-BSh)/tan(angle-(UAVBHBW/2))
}
else if(angle>(pi/2-(UAVBHBW/2))){
uthreshold = (hopt-BSh)/tan(pi/2 - UAVBHBW)
}
else{
uthreshold = windowWidth
}
}
else{uthreshold = windowWidth}
if(angle<(pi/2-UAVBHBW/2)){
lthreshold = (hopt-BSh)/tan(angle+(UAVBHBW/2))
}else{lthreshold=0}
BHint = BHint[find(BHdist<=uthreshold)]
BHdist = BHdist[BHdist<=uthreshold]
BHint = BHint[find(BHdist>=lthreshold)]
BHdist = BHdist[BHdist>=lthreshold]
BHint = getInt2(x=c(0,0),int=BHint,BHBS=BHBS,grid=BHppp,UAVBHBW=UAVBHBW)
BHH=iBSBHHeight[BHint]
BHint = cbind(BHppp$x[BHint],BHppp$y[BHint])
#store the distances of the BSi closest interfering BSs in the numInterfers matrix
if(length(BHint)>0){
iD = sqrt((BHint[,1]/1000)^2+(BHint[,2]/1000)^2)
f = order(iD,decreasing=FALSE)
for(k in 1:min(BScandi,length(f))){
numInterferers[i,k] = 1/(iD[f[k]])
}
}
# numInterferers[i] = sum(1/sqrt((BHint[,1]/1000)^2+(BHint[,2]/1000)^2))
##get get the observed omnidirectional SINR with averaged out multipath effects (as the UAV is assumed to measure the values over a period of time and cancel out the small scale fading effects)
specEff = getDualRateRamyAntenna(x=c(0,0,hopt),BHBS=BHBS,BSh=BSh,withFading=FALSE,LOS=LOS[ind],iBSBH=cbind(BHint[,1],BHint[,2]),BHtilt=BHtilt,iBSBHHeight=BHH,Nt=Nt,al=al,an=an,mal=mal,man=man,PWRgain=BStx*UAVBHgain*K,N=N,alpha=alpha,beta=beta,gamma=gamma)
RBsneeded[i] = 0#for resource blocks if we were considering throughput with RBs, can ignore
SINR[i]=2^(specEff)-1
#get the instantaneous SINR (which IS affected by multipath effects) and which determines our coverage probability
specEff = getDualRateRamyAntenna(x=c(0,0,hopt),BHBS=BHBS,BSh=BSh,withFading=TRUE,LOS=LOS[ind],iBSBH=cbind(BHint[,1],BHint[,2]),BHtilt=BHtilt,iBSBHHeight=BHH,Nt=Nt,al=al,an=an,mal=mal,man=man,PWRgain=BStx*UAVBHgain*K,N=N,alpha=alpha,beta=beta,gamma=gamma)
RBsneeded[i] = 0#
cov[i]=2^(specEff)-1
}
order =order(rdist,decreasing=FALSE)
foo = 1:BScand
##return the observations for the 5 BSs, and label of which BS has the highest directional SINR
##note that we're storing the resource block load metrics as well and the network the BS belongs to, even though we don't use those parameters
return(c(measuredpower[order[foo]],as.vector(t(numInterferers)),distances[foo],cov[foo],cov[1]))
}
#run the above function in multi-core over many iterations
X=1:MCtrials
opt = mclapply(X=X,FUN=iteration,mc.cores=cores)
#as above, but this time we're generating MC results for the strongest BS association case
iteration = function(m){
if(m%%10000==0){
print(m)
}
##window in which we simulate BS distribution
sWindow = owin(xrange=c(-windowWidth/2,windowWidth/2),yrange=c(-windowWidth/2,windowWidth/2))
gen=FALSE
while(gen==FALSE){
BHppp = rpoispp(lambda=BHdensity,win=sWindow)
if(BHppp$n>BScand){
gen=TRUE
}
}
buildings = gridcenters(window=sWindow,nx=floor(sqrt(buildDens*(5000^2))),ny=ceil(sqrt(buildDens*(5000^2))))#rpoispp(lambda=buildDens,win=windowBS)
build = rpoispp(lambda=buildDens,win=sWindow)
build$x = buildings$x
build$y = buildings$y
build$n = length(buildings$x)
buildings = build
buildH = rrayleigh(n=buildings$n,scale=heightParam)
##which network a given BS belongs to
##Note, I wrote this code for multi-network scenarios where the UAV can choose from several different operator networks
##I've decided to drop that in the paper we're writing as it gives us nothing of value. In the paper it's just one network
##Nonetheless the dataset and the resulting neural network is capable of distinguishing between different networks, we simply don't use that function in the simulations
whichNetwork=ones(nrow=BHppp$n,ncol=1)
LOS = vector(length=BHppp$n)
measuredpower = vector(length=BHppp$n)
load = round(runif(n=BScand,min=0,max=100)) ##the number of RBs that are in use for a BS (we don't use this in the current version)
connectedto=floor(runif(n=1,min=1,max=(BScand+1))) #the UAV is connected to one of the BScand closest BSs (so it then decides whether to stay connected or change)
softhandoverpenalty=runif(n=1,min=0.1,max=1) #soft handover penalty (intra-operator)
hardhandoverpenalty = runif(n=1,min=0,max=softhandoverpenalty) #penalty for inter-operator handover
numInterferers = zeros(nrow=BScand,ncol=BScand)
distances = vector(length=BScand)
cov = vector(length=BScand)
#for each BS get the distance, channel type (LOS/NLOS) and the signal power received by the omnidirectional antenna
rdist = vector(length=BHppp$n)
for(i in 1:BHppp$n){
foo = runif(n=1,min=0,max=1)
if(isLOS(buildings=buildings,buildR=buildR,buildH=buildH,x0=0,y0=0,x=BHppp$x[i],y=BHppp$y[i],h=h[j],BSh=BSh)){
LOS[i]=TRUE
}
else{LOS[i]=FALSE
}
angle = (atan2(BSh-h[j],sqrt((BHppp$x[i])^2+(BHppp$y[i])^2)))
rdist[i] = (sqrt((BHppp$x[i])^2+(BHppp$y[i])^2))
g=(1/Nt)*((sin(Nt*pi*(sin(angle))/2)^2)/(sin(pi*(sin(angle))/2)^2))
g = g*BStx*K
if(LOS[i]==TRUE){
measuredpower[i]=g*(sqrt((BHppp$x[i])^2+(BHppp$y[i])^2+(BSh-h[j])^2))^(-al)
}else{measuredpower[i]=g*(sqrt((BHppp$x[i])^2+(BHppp$y[i])^2+(BSh-h[j])^2))^(-an)}
}
#now get the SINR for each BS
oSINR = vector(length=BHppp$n)
for(i in 1:BHppp$n){
foo = 1:BHppp$n
foo = foo[foo!=i]
oSINR[i] = measuredpower[i]/(sum(measuredpower[foo])+N)
}
iBSBHHeight = vector(length=BHppp$n)
iBSBHHeight[LOS==TRUE]=0
iBSBHHeight[LOS==FALSE]=Inf
# order =order(rdist,decreasing=FALSE)
achieveableRate = vector(length=BScand)
SINR = vector(length=BScand)
RBsneeded = vector(length=BScand)
#determine which BS has the strongest omnidirectional SINR as connect UAV to that
order =which.max(oSINR)
BHdist = sqrt((BHppp$x)^2+(BHppp$y)^2)
BHBS = c(BHppp$x[order],BHppp$y[order])
ind = order
BHint = 1:BHppp$n
BHint = BHint[BHint!=ind]
BSdist = BHdist[ind]
BHdist = BHdist[BHint]
hopt = h[j]
angle = atan2(hopt-BSh,BSdist)
#exclude the BSs that are outside the antenna radiation lobe
if(UAVBHBW<pi/2){
if((angle < (pi/2-(UAVBHBW/2))) && (angle > (UAVBHBW/2))){
uthreshold = (hopt-BSh)/tan(angle-(UAVBHBW/2))
}
else if(angle>(pi/2-(UAVBHBW/2))){
uthreshold = (hopt-BSh)/tan(pi/2 - UAVBHBW)
}
else{
uthreshold = windowWidth
}
}
else{uthreshold = windowWidth}
if(angle<(pi/2-UAVBHBW/2)){
lthreshold = (hopt-BSh)/tan(angle+(UAVBHBW/2))
}else{lthreshold=0}
BHint = BHint[find(BHdist<=uthreshold)]
BHdist = BHdist[BHdist<=uthreshold]
BHint = BHint[find(BHdist>=lthreshold)]
BHdist = BHdist[BHdist>=lthreshold]
BHint = getInt2(x=c(0,0),int=BHint,BHBS=BHBS,grid=BHppp,UAVBHBW=UAVBHBW)
BHH=iBSBHHeight[BHint]
BHint = cbind(BHppp$x[BHint],BHppp$y[BHint])
##get the directional SINR of the BS with the strongest omnidirectional SINR
specEff = getDualRateRamyAntenna(x=c(0,0,hopt),BHBS=BHBS,BSh=BSh,withFading=TRUE,LOS=LOS[ind],iBSBH=cbind(BHint[,1],BHint[,2]),BHtilt=BHtilt,iBSBHHeight=BHH,Nt=Nt,al=al,an=an,mal=mal,man=man,PWRgain=BStx*UAVBHgain*K,N=N,alpha=alpha,beta=beta,gamma=gamma)
strongestSINR=2^(specEff)-1
# order =order(rdist,decreasing=FALSE)
# foo = 1:BScand
#return the strongest SINR value
return(c(strongestSINR))
}
X=1:MCtrials
opt2 = mclapply(X=X,FUN=iteration,mc.cores=cores)
mP = 1:(BScand)
Int = (mP[BScand]+1):(mP[BScand]+BScand*BScandi)
Distances = (Int[BScand*BScandi]+1):(Int[BScand*BScandi]+BScand)
C = (Distances[BScand]+1):(Distances[BScand]+BScand)
sSINR = C[BScand]+1
# nSINR = C[BScand]+2
for(k in 1:MCtrials){
mcS[k,]=opt[[k]][mP] #observed RSRP
mcInt[k,]=opt[[k]][Int] #observed int distances
mcDist[k,]=opt[[k]][Distances] #candidate BS distances
mcCov[k,] =opt[[k]][C] #the corresponding directional SINR values
mcbestSINR[k] =opt2[[k]] #the vector of the SINR values for the strongest oSINR association
mcnearestSINR[k] =opt[[k]][sSINR] #nearest SINR values (not used here, because we use the )
}
#Now we go through the iterations and find the coverage probability, for the dumb strongest SINR association, and the neural network result
associated=0
associatedML=0
pastSINRML = 0
for(m in 1:MCtrials){
if(mcbestSINR[m]>10^(Tu[l]/10)){cPm[m]=TRUE}
else{cPm[m]=FALSE}
if(mcnearestSINR[m]>10^(Tu[l]/10)){cPmn[m]=TRUE}
else{cPmn[m]=FALSE}
#get the normalised observed state for the given MC trial
test = normaliseData(P=mcS[m,],Int=mcInt[m,],Dist=mcDist[m,],h=h[j])
foo = rbind(test[1:221],vector(length=221))
#predict the best BS via the NN
newAss = model %>% predict_classes(foo)
newAss=newAss[1]+1
if(newAss>10 | newAss < 1){newAss=1}
#get the directional SINR of that chosen BS
cov = mcCov[m,(newAss)]
if(cov>10^(Tu[l]/10)){cPmML[m]=TRUE}
else{cPmML[m]=FALSE}
#which BS did UAV associate with, closest, 2nd closest...?
order = order(mcDist[k,],decreasing=FALSE)
whichNthBS[m] = which(order==newAss)
}
#get coverage over the MC trials
covProb[j,l] = sum(cPm)/(MCtrials)
covProbnearest[j,l]= sum(cPmn)/(MCtrials)
covProbML[j,l] = sum(cPmML)/(MCtrials)
for(i in 1:10){
nthBS[j,i] = sum(whichNthBS==i)/MCtrials
}
h=seq(from=20,to=200,by=6)
plot(x=h,y=CFcoverageProb[,1],type='l',lwd=3,lty=1,cex.lab=1.2,cex.axis=1.2,xlab="UAV height (m)",ylab="Coverage Probability",ylim=c(0,1))
h=seq(from=20,to=200,by=10)
lines(x=h,y=covProb[,1],lwd=3,col='red')
lines(x=h,y=covProbML[,1],lwd=3,col='blue')
save(nthBS,covProbnearest,CFcoverageProb,covProb,covProbML,covProbMLmulti,h,file="HeightEvaluation.RData")
}
}
foo = rbind(nthBS[4,],nthBS[9,],nthBS[14,])
#plot the barchart showing which BS the NN chose to associate with
#barplot(height=foo,beside=TRUE,col=c("black","red","blue"),legend=c("50m","100m","150m"),xlab=("n-th Closest BS"),ylab=("Probability of Association"),names.arg = 1:10,cex.names=1.2,cex.axis = 1.2,cex.lab=1.2)
grid(nx = NULL, ny = NULL, col = "darkgray", lty = "dotted",
lwd = par("lwd"), equilogs = TRUE)
legend("bottomleft", # x-y coordinates for location of the legend
legend=c("Nearest Association","SINR Association (omni)","Neural Network Association"), # Legend labels
col=c("black","red","blue"), # Color of points or lines
lty=c(1,1,1,1,1,1), # Line type
lwd=c(8,8,8,8,8), # Line width
# pch=c(15,16,17),
cex=1.2
)
|
26069dd4afb56ed189172a95f9fb0a7d9d84e57e
|
de7e39c2f4640396f9ae666d740d0961c0074fed
|
/man/posit8-class.Rd
|
37923b1b28101fb8b9798fbc89b0ac7108c44a60
|
[] |
no_license
|
HPCC-UNUM/SoftPosit
|
a93df940e274b6153f6908fdbd1d07a64c8d53ea
|
92a513fcf3d4b21c1f37ee78c989e7667974069b
|
refs/heads/master
| 2023-04-19T01:54:02.263279
| 2021-04-02T04:33:16
| 2021-04-02T04:33:16
| 177,122,029
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 366
|
rd
|
posit8-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rcript.R
\docType{class}
\name{posit8-class}
\alias{posit8-class}
\title{posit8}
\description{
An S4 class to represent posit8
}
\section{Slots}{
\describe{
\item{\code{v}}{An input value to be converted to posit8}
}}
\examples{
x = new ("posit8" , v = 4L)
y = new ("posit8" , v = 5)
}
|
1c440facb73f0bdbc01c980cf36c771e70a1af5a
|
407350d22d3a58e93dd9b02bdb5e2213d8c55cc8
|
/Other/custom-linear-regression.R
|
4ed65b6f2546933cd6c26364e85b3c2b5909a6a8
|
[] |
no_license
|
vonElfvin/Machine-Learning-Practice
|
e679fa2b111c8178273c9b3e375abe528ceac213
|
65334ae79d29673c7ae9e5e4833728cf309be22e
|
refs/heads/master
| 2021-05-14T16:23:43.328273
| 2018-11-23T19:46:05
| 2018-11-23T19:46:05
| 116,019,701
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,422
|
r
|
custom-linear-regression.R
|
#### Gradient Descent - Linear Regression
### Setup
set.seed(12345)
X.linear = as.matrix(runif(100, -5, 5))
Y.linear = X.linear + rnorm(100) + 3
data.linear = data.frame(X=X.linear, Y=Y.linear)
### Functions
## Returns beta_hat using closed-form solution
get_beta_hat = function(X, Y){
X = cbind(1, X)
β.hat = solve(t(X)%*%X)%*%t(X)%*%Y
rownames(β.hat)[1] = "Intercept"
return(β.hat)
}
## Returns beta_hat using gradient decent
get_beta_hat_GD = function(X, Y, learning.rate=0.1, plot=FALSE){
if(plot){ plot(X, Y) }
X = cbind(1, X)
k = dim(X)[2]
n = dim(X)[1]
learning.rate = 0.1
# Start with random guesses
β.hat = rep(0.1, k)
# Gradient Descent
N.iterations = 200
for(i in 1:N.iterations){
# Calculate the error: yfit - y, yfit = Xβ, the "variable" in MSE
residuals = X %*% β.hat - Y
# Calculate the gradient at that point, from derivative of 1/2 * MSE
delta = (t(X) %*% residuals) / n
# Move β.hat in opposite direction of gradient, to find local error minima
β.hat = β.hat - learning.rate * delta
if(plot){ abline(β.hat[1], β.hat[2], col=rgb(0, 0, 1, 0.1)) }
}
rownames(β.hat)[1] = "Intercept"
return(β.hat)
}
### Implementation
beta.lm = lm(Y~X, data=data.linear)$coefficients # lm
beta.solution = get_beta_hat(data.linear$X, data.linear$Y) # solution
beta.GD = get_beta_hat_GD(data.linear$X, data.linear$Y, 0.1, TRUE)
|
7aa8b94e159f5de4446ba712fbf4e66645e31a08
|
3e5955fe84da5a29e24e432666f597a25b192eab
|
/Poisson-Regression.R
|
b3af59c91c9e00a83402359adb0443378c3cca26
|
[] |
no_license
|
Fabio-Vieira/Poisson_Regression
|
7eb9dab466d2b0121876038c119b3394000cf8af
|
00b9673338073f64a6269d3769c38076899eda26
|
refs/heads/master
| 2020-04-28T06:49:09.146703
| 2019-03-11T19:44:29
| 2019-03-11T19:44:29
| 175,072,413
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,755
|
r
|
Poisson-Regression.R
|
#Bayesian Poisson Regression, with prior Student's t and Metropolis Hastings
#with random walk log adaptive proposal
###########################################################################
####Simulating regression model
N <- 20
beta <- c(0.5,2)
x <- cbind(rep(1,N),runif(N,min=0,max=1))
lambda <- exp(x%*%beta)
y <- rpois(N,lambda = lambda)
cbind(y,x)
hist(y)
###########################################################################
#Posterior distribution of Betas with prior Student's with df = 1, as in
#http://www.stat.columbia.edu/~gelman/research/published/priors11.pdf
PostBeta <- function(Beta, Y, X){
N <- length(Y)
p <- length(Beta)
Sigma <- solve(diag(1,p,p))
#log of the core of a multivariate student's t, with df = 1
prior <- -(1+p)/2 * log(1 + t(beta)%*%Sigma%*%beta)
#log of poisson likelihood
like <- sum(-exp(X%*%Beta) + X%*%Beta * Y)
return(prior + like) #bayes rule
}
#Metropolis Hastings algorithm with log adaptive proposal, as described in
#http://www.personal.psu.edu/bas59/papers/adaptive_techreport.pdf
updateBeta <- function(Y,X,Beta,Sm,Sigma,t,BetaMean){
accept <- NULL
count <- 0
c <- .8
ggamma <- 1/t^c
proposal <- Beta + sqrt(Sm*Sigma) * rnorm(1)
prob <- min(1,exp(PostBeta(proposal,Y,X) - PostBeta(Beta,Y,X)))
if(runif(1) < prob){
accept <- proposal
count <- 1
} else {
accept <- Beta
}
#Adapting the proposal
lSm <- log(Sm)+ggamma*(prob-.234)
Sm <- exp(lSm)
Sigma <- Sigma+ggamma*(((Beta-BetaMean)^2)-Sigma)
BetaMean <- BetaMean+ggamma*(Beta-BetaMean)
return(list(accept,count,Sm,Sigma,BetaMean))
}
#################################################################################
#MCMC
Niter <- 20000
Y <- y
X <- x
Beta.out <- array(NA, dim = c(Niter,dim(X)[2]))
Count <- array(0, dim = Niter)
#Initial values
Beta.out[1,] <- beta
Sm <- 2.4^2 #Described in Shaby and Wells
Sigma <- 1
BetaMean <- 1
for(i in 2:Niter){
Updating <- updateBeta(Y,X,Beta.out[i-1,],Sm,Sigma,i,BetaMean)
Beta.out[i,] <- Updating[[1]]
Count[i] <- Updating[[2]]
Sm <- Updating[[3]]
Sigma <- Updating[[4]]
BetaMean <- Updating[[5]]
print(i)
}
#Checking convergence
plot(Beta.out[,1],type='l')
abline(h=beta[1],col='red')
hist(Beta.out[,1])
abline(v=quantile(Beta.out[,1],probs=0.025),lty=2,col='blue')
abline(v=quantile(Beta.out[,1],probs=0.975),lty=2,col='blue')
abline(v=beta[1],col='red')
mean(Beta.out[,1])
plot(Beta.out[,2],type='l')
abline(h=beta[2],col='red')
hist(Beta.out[,2])
abline(v=quantile(Beta.out[,2],probs=0.025),lty=2,col='blue')
abline(v=quantile(Beta.out[,2],probs=0.975),lty=2,col='blue')
abline(v=beta[2],col='red')
mean(Beta.out[,2])
|
e3708f75359ff669f195221078ef1a984aa239fa
|
e80caef11a68003ad7aa47a6354cf946d251bc25
|
/man/plot.mimf.Rd
|
973d753f7cd1625f4193499f98cca18526b14307
|
[] |
no_license
|
PierreMasselot/emdr
|
6b657807742f2569371c8bc2d06a17cc1ee9fe95
|
a802208b98e506071bb4f3c7eea4d2e5ad0a16f7
|
refs/heads/master
| 2021-06-25T14:32:37.828294
| 2021-06-16T17:44:31
| 2021-06-16T17:44:31
| 179,749,987
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,856
|
rd
|
plot.mimf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mimf_description.R
\name{plot.mimf}
\alias{plot.mimf}
\title{Plot IMFs}
\usage{
\method{plot}{mimf}(
x,
tt = NULL,
select.var = NULL,
select.imf = NULL,
input = TRUE,
input.lab = "X",
imf.lab = NULL,
grid = c("zeroline", "complete", "none"),
grid.col = "lightgray",
space = 1,
add.legend = p > 1,
legend.pars = list(),
...
)
}
\arguments{
\item{x}{\code{mimf} x or array to plot.}
\item{tt}{Vector containing custom time indices for the IMFs. If NULL, looks
for the \code{tt} attribute of \code{x}.}
\item{select.var}{Character or numeric vector giving a subset of variables
for which to plot the IMFs.}
\item{select.imf}{Character or numeric vector giving a subset of IMFs to
plot.}
\item{input}{Logical. If TRUE, the top panel shows the original signal.
Only considered if \code{x} is a \code{mimf} object.}
\item{input.lab}{The label of the panel showing the input signal.}
\item{imf.lab}{Character vector giving labels for the IMFs. NULL displays the
dimnames of \code{x} and NA removes the labels.}
\item{grid}{Character giving the type of grid to plot. "zeroline"
(the default) only draws the zeroline, "complete" draws a complete grid
and "none" draws no grid at all.}
\item{grid.col}{The grid color.}
\item{space}{Numeric value giving the margin between two panels in the plot.}
\item{add.legend}{Logical value. If TRUE (the default) a legend is
automatically drawn at the top of the plot.}
\item{legend.pars}{List of optional parameters for the legend.
Can be useful to indicate custom names for the variables for instance.
See \code{\link[graphics]{legend}}.}
\item{...}{Other graphical parameters. See
\code{\link[graphics]{par}}.}
}
\description{
Method to display the (M)IMFs obtained by the function \code{\link{memd}}.
}
\details{
One panel is drawn for each IMF. In the multivariate case,
by default all IMF's variables are scaled and displayed on the same panel.
To obtain the true amplitude of each variable, they must be plotted
separately.
If noise channel are present in the signal, i.e. if the argument
\code{keep.noise = TRUE} in \code{\link[emdr]{memd}},
they are not displayed by default. To display them, the argument
\code{select.var} must be set manually. Note that, in this case, it
automatically set the argument \code{input} to \code{FALSE}.
}
\examples{
library(dlnm)
# Decompose both temperature and relative humidity with NA-MEMD
# Adding two noise variables
X <- chicagoNMMAPS[,c("temp", "rhum")]
mimfs <- memd(X, l = 2) # Takes a couple of minutes
# Plot the two variables on the same graph
plot(mimfs)
# Plot the two variables separately
plot(mimfs, select.var = "temp", col = "red")
plot(mimfs, select.var = "rhum", col = "blue")
}
|
a11dfec920cdb89e7b3be06fa80163f409900e41
|
69bfd5799ec503d4043b15e84ac3fe048535958a
|
/plot2.R
|
8e235883d2258dfc8053b7cd0dfaebcfab274482
|
[] |
no_license
|
Rajeethkumar/Exploratory_analysis_project2
|
7dcd60cb1e2178e8fbb0b2933aae3d7474a1efe3
|
54431c5cbf4fa346a1953f6217d408abccf207dc
|
refs/heads/master
| 2021-04-06T10:36:43.558266
| 2018-03-11T14:20:08
| 2018-03-11T14:20:08
| 124,761,280
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 358
|
r
|
plot2.R
|
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
baltimore <- (subset(NEI,Emissions>0))[c(4,6)]
png("plot2.png", width=800, height=700)
barplot((tapply(baltimore$Emissions, baltimore$year , FUN="sum")),xlab="Year" , ylab="Emissions of PM2.5s" ,
main="Emissions at baltimore in various years")
dev.off()
|
f9f7999c2c7bd3ec91249ab3eb5d7e04f044002b
|
c7607eb2c7074cc35b9a20a2c1f22c3654b54d6d
|
/man/get_metadata360.Rd
|
813ccfde79f8aa6b700cfbaf71b7b9f7f11ffa77
|
[
"MIT"
] |
permissive
|
asRodelgo/data360r
|
9ab9654a8131c18c7ff2ab95d3f030cd9670110d
|
1ca85cbf9aa8cf48c0b61c356f99c3fd570c6edf
|
refs/heads/master
| 2021-01-23T17:57:54.879540
| 2017-09-07T19:25:51
| 2017-09-07T19:25:51
| 102,785,930
| 1
| 0
| null | 2017-09-07T21:24:54
| 2017-09-07T21:10:06
|
R
|
UTF-8
|
R
| false
| true
| 1,657
|
rd
|
get_metadata360.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_metadata360.R
\name{get_metadata360}
\alias{get_metadata360}
\title{Get TC/Govdata360 metadata from API}
\usage{
get_metadata360(site = "tc", metadata_type = "countries")
}
\arguments{
\item{site}{string pertaining to the data360 site to download data from.
Possible choices: 'tc' for TCdata360, 'gov' for Govdata360}
\item{metadata_type}{string pertaining to the metadata to be downloaded.
Possible choices: 'countries' == Lists metadata for all countries and regions.
'indicators' == Lists metadata for all indicators. Does not return actual data.
'datasets' == Lists metadata for all datasets.}
}
\value{
Data frame (wide) containing requested metadata
}
\description{
Downloads the requested metadata by using the TCdata360 API at \url{tcdata360.worldbank.org/docs}
or Govdata360 API at \url{govdata360.worldbank.org/docs}.
The function generates a wide dataframe.
}
\details{
Hint: Want to get other data? Helpful functions include:
\itemize{
\item See \code{\link{search_360}} to get search TC/Govdata360 indicators, countries, categories, and dataset lists.
\item See \code{\link{get_data360}} to get actual indicator/dataset/country-level data.
\item See \code{\link{get_resources360}} to get additional resource information.
}
}
\examples{
#get all indicator metadata in Govdata360
df_indicators <- get_metadata360(site="gov", metadata_type = "indicators")
#get all country metadata in TCdata360
df_countries <- get_metadata360(metadata_type = 'countries')
#get all dataset metadata in TCdata360
df_datasets <- get_metadata360(metadata_type = 'datasets')
}
|
5f916aaa207ccef8a3b518ac246540968e5a8898
|
3ec044ea23fe0a2aaf8a3a37b9d85d5fa04d69a6
|
/cachematrix.R
|
16b3b530a3f2f7a2897bc773b2d9c6e65a031cf7
|
[] |
no_license
|
ErikSeras/ProgrammingAssignment2
|
692aa7e0a7e346eed8fac92c7bc73949638e7c00
|
fe04ea0d3bef54b41adeb72513b2e5e9d2209472
|
refs/heads/master
| 2022-11-08T03:17:30.723546
| 2020-06-18T12:48:49
| 2020-06-18T12:48:49
| 273,212,408
| 0
| 0
| null | 2020-06-18T10:47:15
| 2020-06-18T10:47:14
| null |
UTF-8
|
R
| false
| false
| 3,642
|
r
|
cachematrix.R
|
################################################################################
## A pair of functions that cache the inverse of a matrix
## Un par de funciones que almacenan en caché el inverso de una matriz
## Function that creates a special matrix object that can cache its inverse
## Función que crea un objeto matriz especial que puede almacenar en caché
## su inverso
makeCacheMatrix <- function(m = matrix()) {
## Initialize the inverse property
## Inicializa la propiedad inversa
i <- NULL
## Method to set the matrix
## Método para establecer la matriz
set <- function(matrix){
m <<- matrix
i <<- NULL
}
## Method to get the matrix
## Método para obtener la matriz
get <- function(){
## Return the matrix
## Retornar la matriz
m
}
## Method to set the inverse of the matrix
## Método para establecer la inversa de la matriz
setInverse <- function(inverse){
i <<- inverse
}
## Method to get the inverse of the matrix
## Método para obtener la inversa de la matriz
getInverse <- function(){
## Return the inverse property
## Retornar la propiedad inversa
i
}
## Return a list of the methods
## Retorna una lista de los métodos
list(
set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse
)
}
################################################################################
## Compute the inverse of the special array returned by "makeCacheMatrix"
## above.
## If the inverse has already been calculated (and the array has not
## changed), then the "cachesolve" should retrieve the inverse from the cache.
## Calcular el inverso de la matriz especial devuelta por "makeCacheMatrix"
## arriba.
## Si el inverso ya se ha calculado (y la matriz no ha cambiado),
## entonces "cachesolve" debería recuperar el inverso del caché.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## Retornar una matriz que is el inverso de 'x'
m <- x$getInverse()
## Just return the inverse if it's already set
## Solo devolver el inverso si ya está configurado
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from object
## Obtener la matriz del objeto
data <- x$get()
## Calculate the inverse using matrix multiplication
## Calcular el invero usando la multiplicación de matrices
m <- solve(data, ...)
## Another way to compute the above
## The values of the resulting matrix are in scientific notation
## Otra forma de calcular lo anterior
## Los valores de la matriz resultante están en notación científica.
## m <- solve(data) %*% data
## Set the inverse to the object
## Establecer el inverso al objeto
x$setInverse(m)
## Return the matrix
## Devolver la matriz
m
}
################################################################################
# Testing
# Prueba
normal_matrix <- matrix(c(1,7,5,5,5,5,1,8,9,1,4,7,5,9,1,4), 4, 4)
normal_matrix
inverse_matrix<- makeCacheMatrix(normal_matrix)
cacheSolve(inverse_matrix)
|
3a3bde03c6f59f3ebef7042496e9269c3327cc1d
|
b6637bffde58d61c3f60efa3ae9e3f45190a670a
|
/adjclust.R
|
0af9ef303c8ad5f05ae9eda82ba0ab8c4a6ca15b
|
[] |
no_license
|
asmanouira/MuGLasso_GWAS
|
e4e6bd72ddd629c4a0625ee8afcdd9006e942ea2
|
ec6f119c1e99e6d665f739feac03536c4ce36f5b
|
refs/heads/main
| 2023-08-25T15:00:36.290651
| 2022-02-18T11:42:06
| 2022-02-18T11:42:06
| 390,799,332
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 869
|
r
|
adjclust.R
|
library("adjclust")
library("matrixStats")
library("snpStats")
adjclust <- function(bed = 'data.bed', bim = 'data.bim', fam = 'data.fam')
{
# Read plink data as SNPmtarix object
geno <- read.plink(bed, bim, fam)
# Exctrat genotype
genotype <- geno$genotypes
p <- ncol(genotype)
# Plot LD between SNPs
ld_ <- snpStats::ld(genotype, stats = "R.squared", depth = p-1)
png(file="LD_plot.png", width = 1000, height = 1000)
image(ld_, lwd = 0)
dev.off()
# Adjacency-constrained Hierarchical Agglomerative Clustering
fit <- snpClust(genotype, stats = "R.squared")
sel_clust <- select(fit)
# Save LD-groups labels for further analysis
saveRDS(sel_clust, file = "LD_groups.rds")
# Display rectangular dendrogram
png(file ="dendogram_rec.png", width = 1000, height = 1000)
plot(fit, type = "rectangle", leaflab = "perpendicular")
dev.off()
}
|
250a0640e4656b0e592a260304604749bf0f5bf5
|
248739c10a827305b54928f07675742e273a0892
|
/Talleres/Taller1/Taller1_punto2.R
|
e43dd32aa9fa7372aa3d921a4e252842eb3ad9aa
|
[] |
no_license
|
vompaula/Analisis_numerico
|
7aa5245ce06bd5d9217d7da82d9d3c0abb3a7d3f
|
82937e5c8ad81cfe9b72f83e46d5be9a27397296
|
refs/heads/master
| 2023-01-23T03:04:48.845738
| 2020-11-26T12:57:25
| 2020-11-26T12:57:25
| 285,422,157
| 0
| 0
| null | 2020-09-05T02:10:14
| 2020-08-05T22:59:16
| null |
ISO-8859-1
|
R
| false
| false
| 1,815
|
r
|
Taller1_punto2.R
|
# *** Analisis Numerico 2020-3 ***
#--Trabajo realizado por:
# Laura Mariana Jiménez Jiménez
# Paula Valentina Sánchez Peña
# Sebastián Gutiérrez Zambrano
# Taller 1 punto 2: Raíces de una Ecuación
library(Rmpfr)
library(pracma)
library(Matrix)
#Imprimir los elementos de la diagonal superior de una matriz
sumaTriangularSuperior <- function(tam)
{
res = c()
for (n in tam)
{
A<-ones(n, m = n)
S = upper.tri(A)
res = c(res,sum(S))
}
print(res)
return(res)
}
graficar = sumaTriangularSuperior(seq(2,100,2))
plot(seq(2,100,2),graficar,xlab = "n",ylab = "Suma", type = 'o',col = "red")
#-------------------------------------------------------------------------------------
#Sumar los primeros n^2 números naturales
sumaNumerosNaturales <- function(tam)
{
num = c()
res = c()
for (n in tam)
{
num = c(num,n^2)
res = c(res,sum(num))
}
print(res)
return(res)
}
graficar = sumaNumerosNaturales(seq(1,100))
plot(seq(1,100),graficar,xlab = "n",ylab = "Suma", type = 'o',col = "blue")
#------------------------------------------------------------------------------------
#Hallar los maxima altura de la función de vuelo de un cohete y(t) = 6+2.13t^2-0.0013t^4
Fx <- function(x) 6+2.13*x^2 -0.0013*x^4
f <- expression( 6+2.13*x^2 -0.0013*x^4 )
dx = D(f,"x")
fdx <- function(x) eval(dx)
maximo = newtonRaphson(fdx,x0=mpfr(20,128),maxiter = 10000,tol = 1e-8)
alturaMaxima = Fx(maximo[[1]])
cat("Newton-Raphson: La altura maxima que puede alcanzar el cohete (en metros) es de")
print(alturaMaxima)
maximo = bisect(fdx,a=mpfr(20,128),b=mpfr(40,128),maxiter = 1000)
alturaMaxima = Fx(maximo[[1]])
cat("Bisección: La altura maxima que puede alcanzar el cohete (en metros) es de")
print(alturaMaxima)
|
cdd69bd8c11855a86ca00c9a12b8707cb2690481
|
b013908e5c154e8932cfc4d672aa09de884fdd4a
|
/man/below_percent.Rd
|
256b9268e85301a3b8f82be04c7d4fdbbd8d943f
|
[] |
no_license
|
trippsapientae/iglu
|
3127f78aafc9b51c172841d7c1b9fc9a790cd2c1
|
06bad5213a4e0eb526ae880cc74cf9eea524163d
|
refs/heads/master
| 2020-08-29T06:11:28.315321
| 2019-09-11T05:15:57
| 2019-09-11T05:15:57
| 217,951,416
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 594
|
rd
|
below_percent.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/below_percent.R
\name{below_percent}
\alias{below_percent}
\title{Calculate percentage below targeted values}
\usage{
below_percent(data, targets = c(50, 80))
}
\arguments{
\item{data}{DataFrame with column names ("id", "time", and "gl"),
or vector of glucose values as integer, numeric, or double.}
\item{targets}{List of target values. Default list is (50,80).}
}
\value{
}
\description{
Calculate percentage below targeted values
}
\examples{
below_percent(data)
below_percent(data, targets = c(50,100, 180))
}
|
7f7b03e34d05305ae02178b050356338a524b54e
|
435ab223715d6465a5b10469b594b26c273e4609
|
/shiny/news/ui.R
|
c2ac15b7183c583faa73090a7fe5fc7caf7eb486
|
[] |
no_license
|
oleglr/forecast
|
6a5b5a9894b5dc6e74fd62f06e7ed4d637ac823b
|
560a92e2d294f71f61080911d65694e37fdf58bb
|
refs/heads/master
| 2022-04-15T20:54:39.745193
| 2016-12-06T00:05:07
| 2016-12-06T00:05:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 323
|
r
|
ui.R
|
library(shiny)
# Define UI for application that plots random distributions
shinyUI(bootstrapPage(
# Application title
headerPanel("news"),
dateInput("date", label = h3("Date input"), value = "2016-05-31"),
h4("ニュース"),
dataTableOutput("newsView"),
h4("Tdnet"),
dataTableOutput("tdnetView")
))
|
337c16fc4148245e67f67be8d29e0eb8f3560ab8
|
e478018b739a8e4b779490cf5fecc1452fe3ee4b
|
/Plot1.r
|
1baa6adc6bb5aa0aa268ddb79c824d8b2945ec56
|
[] |
no_license
|
Ravi-Pratap/Exploratory-Data-Analysis_Course-Project-2
|
d7b708500def8cff118e2834727f669c53ea73ae
|
7f26daab8f346229224f7858d916a48d22f05966
|
refs/heads/master
| 2020-04-09T09:29:50.777697
| 2018-12-03T20:47:54
| 2018-12-03T20:47:54
| 160,235,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,514
|
r
|
Plot1.r
|
#----------------------------------------------------------------
# Ravinendra Pratap
# Exploratory data Analysis
# Course Project 2
# Date: 04/12/2018
#----------------------------------------------------------------
{#---Exploratory Data Analysis : Course Project 2018
#----------------------------------------------------------------
## Load the data
getwd()
setwd("D:/LND/COURSERA_DATA_SCIENCE/COURSERA_04_Exploratory Data Analysis/WEEK4- 04EDA Case Studies")
path <- getwd()
download.file(url = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
, destfile = paste(path, "dataFiles.zip", sep = "/"))
unzip(zipfile = "dataFiles.zip")
SCC <- readRDS(file = "Source_Classification_Code.rds")
NEI <- readRDS(file = "summarySCC_PM25.rds")
aggdata <- aggregate(Emissions~year,data = NEI,FUN = sum)
# str(NEI)
# dim(NEI)
# head(NEI)
# str(SCC)
# dim(SCC)
# head(SCC)
## Create BarPlot and Export as PNG file
# dir()
#1. Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? Using the base plotting system, make a plot showing the total PM2.5 emission from all sources for each of the years 1999, 2002, 2005, and 2008.
png(filename = "plot1.png",width = 480, height = 480,units = "px")
barplot(
(aggdata$Emissions)/10^6,
names.arg = aggdata$year,
col = "blue",
xlab = "Year",
ylab = "PM2.5 Emissions (10^6 Tons)",
main = "PM2.5 Emissions for all US Sources (Total)"
)
dev.off()
}
|
1c695c78d251416b6f01e06e8744f59909b5db4a
|
7bd4158d7a2b701f3d33c1335ef62635b6f8f05a
|
/tests/testthat/test-get_lai_forcing.R
|
880b4a8fade0f10c13eb7c04603d954b34685cb8
|
[] |
no_license
|
SticsRPacks/SticsRFiles
|
f7ceccf81a805cd87939021b76e1d174b5d54c52
|
ed3ef394244f3e7a44093dd3c3424ee710282020
|
refs/heads/main
| 2023-08-30T21:41:20.020763
| 2023-07-28T08:44:29
| 2023-07-28T08:44:29
| 187,986,787
| 2
| 3
| null | 2023-07-28T08:44:30
| 2019-05-22T07:32:34
|
R
|
UTF-8
|
R
| false
| false
| 386
|
r
|
test-get_lai_forcing.R
|
library(SticsRFiles)
stics_version <- get_stics_versions_compat()$latest_version
version_num <- get_version_num()
context("Test LAI Forcing")
workspace_path <- get_examples_path("xml", stics_version = stics_version)
xml_usms <- file.path(workspace_path, "usms.xml")
get_lai_forcing(xml_usms, "wheat")
test_that("Lai Forcing", {
expect_false(get_lai_forcing(xml_usms, "wheat"))
})
|
bf7f78c73eeb03af6c5bce81b56a75e9873f0b4c
|
0a1bc1eb634a00bc9d540ef166316f1920ec2df8
|
/man/v6_deleteFile.Rd
|
6832d8a2dbcde8321f35a9bb690fed62fc7873ce
|
[] |
no_license
|
gorcha/vision6
|
58809b277e3f8052ad348d5d1755d2a776ba5890
|
e3d2a1036bbe88d0237f5686b2de450c7bd563b9
|
refs/heads/master
| 2021-06-03T05:32:29.461543
| 2019-01-31T00:33:49
| 2019-01-31T00:33:49
| 26,483,732
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 407
|
rd
|
v6_deleteFile.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/file.R
\name{v6_deleteFile}
\alias{v6_deleteFile}
\title{Deletes the specified File.}
\usage{
v6_deleteFile(file_id)
}
\arguments{
\item{file_id}{(int) The ID of the File to delete.}
}
\value{
true on success.
}
\description{
Deletes the specified File.
}
\seealso{
\url{http://developers.vision6.com.au/3.0/method/deleteFile}
}
|
d48394e6fe16efd893e99be9ec44e7450e2d0587
|
2b9cac18d5609925c6e9950c8025f4f1dc849bc4
|
/Import_Financialdata.R
|
e34c87de2f8a00a17003eb86797cfafd20f3dd45
|
[] |
no_license
|
noisefallacy/R_repository
|
6c02b990aa81436a15952d0dfbbf8803257bfbd2
|
e5189a33bf62a925fa64f20411d6bf65187e14a7
|
refs/heads/master
| 2020-04-01T12:11:10.355432
| 2018-10-23T08:07:55
| 2018-10-23T08:07:55
| 153,195,357
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,257
|
r
|
Import_Financialdata.R
|
#Importing and Managing Financial Data in R
##Introduction and Downloading Data
# Load the quantmod package
library(quantmod)
# Import QQQ data from Yahoo! Finance
getSymbols("QQQ", auto.assign = T)
# Look at the structure of the object getSymbols created
str(QQQ)
# Look at the first few rows of QQQ
head(QQQ)
# Import QQQ data from Alpha Vantage
getSymbols("QQQ", src="av", auto.assign = T)
# Look at the structure of QQQ
str(QQQ)
# Import GDP data from FRED
getSymbols("GDP", src="FRED",auto.assign = T)
# Look at the structure of GDP
str(GDP)
# Assign SPY data to 'spy' using auto.assign argument
spy <- getSymbols("SPY", auto.assign=F)
# Look at the structure of the 'spy' object
str(spy)
# Assign JNJ data to 'jnj' using env argument
jnj <- getSymbols('JNJ', env=NULL)
# Look at the structure of the 'jnj' object
str(jnj)
# Load the Quandl package
library(Quandl)
Quandl.api_key("bSmr7LzzmhMQyAGVkVzF")
# Import GDP data from FRED.
# For Quandl specify source with "code" (i.e. database/series).
gdp <- Quandl(code="FRED/GDP")
# Look at the structure of the object returned by Quandl
str(gdp)
# Import GDP data from FRED as xts
gdp_xts <- Quandl(code="FRED/GDP", type="xts")
# Look at the structure of gdp_xts
str(gdp_xts)
# Import GDP data from FRED as zoo
gdp_zoo <- Quandl(code="FRED/GDP", type="zoo")
# Look at the structure of gdp_zoo
str(gdp_zoo)
# Create an object containing the Pfizer ticker symbol
symbol <- "PFE"
# Use getSymbols to import the data
getSymbols(symbol, auto.assign=TRUE)
# Look at the first few rows of data
head(PFE)
#quantmod::oanda.currencies contains a
#list of currencies provided by Oanda
# Create a currency_pair object
currency_pair <- "GBP/CAD"
# Load British Pound to Canadian Dollar exchange rate data
getSymbols(currency_pair, src="oanda")
# Examine object using str()
str(GBPCAD)
# Try to load data from 190 days ago
getSymbols(currency_pair, from = Sys.Date() - 190, to = Sys.Date(),
src = "oanda")
# Create a series_name object
series_name <- "UNRATE"
# Load the data using getSymbols
getSymbols(series_name, src="FRED")
# Create a quandl_code object
quandl_code <- paste("FRED",series_name,sep="/")
# Load the data using Quandl
unemploy_rate <- Quandl(quandl_code)
|
2f9d0d695fd2cace647a2be2e603f16369e4bfe8
|
6782e6efbd940fcce65da21fff2ad140e87783c1
|
/shiny_rclub.R
|
2f51e1dfbcd31463e2bfd273dad22833064287d5
|
[] |
no_license
|
JWellsBio/shiny_r_club
|
ae99b2fab79c3648ff71bab9180a412c1b4bfdcd
|
af97972748d3cfdd5c87413eae653c21b767046f
|
refs/heads/master
| 2020-12-22T14:32:06.024856
| 2020-01-29T17:31:56
| 2020-01-29T17:31:56
| 236,824,338
| 1
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,525
|
r
|
shiny_rclub.R
|
## shiny app for r club Jan 29
# an app that plots incidences of baby names for the 2000s
# interface will include choosing girls name and boys name
# default colors will be purple and springgreen
# load libraries ----
if (!require ('ggplot2')) install.packages('ggplot2')
library(ggplot2) # for general plotting
if (!require ('shiny')) install.packages('shiny')
library(shiny) # for creating app
if (!require ('babynames')) install.packages('babynames')
library(babynames) # for data on baby names: it is raw counts by year from 1880-2017 https://cran.r-project.org/web/packages/babynames/babynames.pdf
if (!require ('shinythemes')) install.packages('shinythemes')
library(shinythemes) # for some custom looks if you want them https://cran.r-project.org/web/packages/shinythemes/shinythemes.pdf
# Define the user interface ---- #this is everything the user interacts with
ui <- fluidPage(theme = shinytheme('slate'),
# App title ----
titlePanel('Baby Names in the 2000\'s'),
# Sidebar layout with input and output definitions ----
sidebarLayout( # set the layout
# Sidebar panel for inputs ----
sidebarPanel( # this is the sidebar where all the inputs can be selected
# Input: Selector for variable to indicate girl's name ----
selectInput('girl_name', 'Girl\'s Name:', # format: 'variable_name_for_server_use', 'What the user sees'
c('Annie' = 'Annie', # 'Option the user sees', 'what_the_server_uses'
'B' = 'Bertha',
'Chloe' = 'Chloe',
'Dorothy' = 'Dorothy',
'Effie' = 'Effie',
'Frankie' = 'Frankie',
'Gertrude' = 'Gertrude',
'Hilda' = 'Hilda',
'Irene' = 'Irene',
'Jenny' = 'Jenny',
'Kitty' = 'Kitty',
'Lucy' = 'Lucy',
'Mary' = 'Mary',
'Nancy' = 'Nancy',
'Olivia' = 'Olivia',
'Pearl' = 'Pearl',
'Queen' = 'Queen',
'Rachel' = 'Rachel',
'Sue' = 'Sue',
'Teresa' = 'Teresa',
'Una' = 'Una',
'Virginia' = 'Virginia',
'Winona' = 'Winona',
'Zoe' = 'Zoe')),
# Input: Selector for variable to indicate boy's name ----
selectInput('boy_name', 'Boy\'s Name:',
c('Archie' = 'Archie',
'Bill' = 'Bill',
'Christopher' = 'Christopher',
'Daniel' = 'Daniel',
'Elias' = 'Elias',
'Felix' = 'Felix',
'Garrett' = 'Garrett',
'Hubert' = 'Hubert',
'Irving' = 'Irving',
'Joel' = 'Joel',
'King' = 'King',
'Logan' = 'Logan',
'Mason' = 'Mason',
'Norris' = 'Norris',
'Orlando' = 'Orlando',
'Philip' = 'Philip',
'Ralph' = 'Ralph',
'Sherman' = 'Sherman',
'Ted' = 'Ted',
'Ulysses' = 'Ulysses',
'Virgil' = 'Virgil',
'Webster' = 'Webster',
'Zack' = 'Zack')),
),
# Main panel for displaying all outputs ----
mainPanel(
# Output: Formatted text for caption ----
h3(textOutput('caption')), # this comes from the server section
# Output: Plot of the selected baby names ----
plotOutput('baby_plot') # this comes from the server section
)
)
)
# Define server logic to plot selected baby names in the 2000's ----
server <- function(input, output) {
# Compute the formula text ----
# This is in a reactive expression since it is changed by the
# output$caption function below
formulaText <- reactive({
paste('Number of Babies named', input$girl_name, 'and', input$boy_name, sep = ' ')
})
# Return the formula text for printing as a caption ----
output$caption <- renderText({
formulaText() # this is redundant if we are plotting with a title
})
# Generate a plot of the selected baby names ----
# all the plotting and data manipulation is done here based on what we selected
output$baby_plot <- renderPlot({
#subset baby names to chosen names
baby_girl <- babynames[babynames$name == input$girl_name & babynames$sex == 'F',] # be sure to only capture girls named girl
baby_boy <- babynames[babynames$name == input$boy_name & babynames$sex == 'M', ] # be sure to only capture boys named boy
#subset to 2000's
baby_girl_2000 <- baby_girl[baby_girl$year >= 2000 & baby_girl$year <= 2009, ]
baby_boy_2000 <- baby_boy[baby_boy$year >= 2000 & baby_boy$year <= 2009, ]
#combine for plotting
baby_combined <- rbind(baby_girl_2000, baby_boy_2000)
#plot
ggplot(baby_combined, aes(x = year, y = n, fill = name))+ #barplot number per year, filled by the names we chose
geom_bar(stat = 'identity', position = 'dodge')+ #side-by-side plotting by year
ggtitle(paste('Number of Babies named', input$girl_name, 'and', input$boy_name, 'during 2000\'s', sep = ' ')) + # main title, again this is redundant based on caption
scale_fill_manual(values = c('springgreen', 'purple')) + # colors for bars, THESE DO NOT SWITCH BY BOY/GIRL!!
scale_x_continuous(breaks = seq(2000, 2009, 1)) + # label every year
labs(y = 'Number of Babies', x = 'Year', fill = 'Name') + theme_bw() + # axis labels and getting rid of default gray background
theme(plot.title = element_text(face = 'bold', size = 15)) + # bolding everything and increasing sizes
theme(axis.title = element_text(face = 'bold', size = 15)) +
theme(axis.text = element_text(face = 'bold', colour = 'black', size = 15)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) + # tilt year labels
theme(legend.text = element_text(face = 'bold', size = 15)) +
theme(legend.title = element_text(face = 'bold', size = 15))
})
}
shinyApp(ui, server)
## TWO (SUGGESTED) THINGS TO ALTER ----
# 1. Can we introduce an option to select decade? (And graph accordingly)
# 2. Can we set the colors to always pertain to the boy/girl name?
|
2e2bd44e0ff0947ac38f0d543bf014e83925b558
|
a0a97ef59eb91053161b4258b56a938591ad92d4
|
/Scripts/8_DataTypeNameAttribute.R
|
29fa81cdc96cb10cda957f59253e0365af5a425a
|
[] |
no_license
|
sanudelhi1199/Project_1_Rstudio
|
f30486e6f8f37b367b0bea5dd632bc850d52ec25
|
49547fe6944961617cbbaa64c0937d43fa6a7454
|
refs/heads/main
| 2023-04-20T03:57:48.669358
| 2021-05-13T05:43:25
| 2021-05-13T05:43:25
| 365,517,085
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 549
|
r
|
8_DataTypeNameAttribute.R
|
# how to give name attribute to data type
x <- c("Gajuji","Manjulaben", "Hitesh", "Rashmika")
x
names(x) <- c("Father", "Mother" , "Son", "Doughter")
x
attributes(x)
# giving name attribute to list
y <- list("Hitesh",22,162,"sanudelhi1199@gmail.com")
y
names(y) <- c("Name","Age","Height","e-Mail")
y
# creating matrix ad giving name
a <- c("sanudelhi1199@gmail.com", "@isinh")
b <- c("sanudelhi2603@gmail.com", "@isanudelhi")
d <- rbind(a,b)
d
dimnames(d) <- list(c("old data", "New data"), c("e-mail", "Instagram"))
d
|
407d8ae4d39e051d93a9491b8313a649f4dcc948
|
d626528505226594530f3500133f3de54af4940f
|
/projekt/Praktikum Stat UNPAD/Pertemuan 11.R
|
3e59928e38d7a125fe1c81ac8f61861a6efddc6c
|
[] |
no_license
|
jakajek/R-Mighty
|
4541a87c1abcb6be7372939ba3621fd61390b9e6
|
5646cf26daf9dcca5f0cee9633d640f636aaa4a2
|
refs/heads/main
| 2023-07-13T03:10:24.273816
| 2021-08-18T13:57:09
| 2021-08-18T13:57:09
| 397,247,866
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,293
|
r
|
Pertemuan 11.R
|
#1
my_data <- ToothGrowth
#install.packages("dplyr")
library(dplyr)
set.seed(1234)
dplyr::sample_n(my_data, 10)
#2
str(my_data)
# Convert dose jadi factor dan recode levelnya
# as "D0.5", "D1", "D2"
my_data$dose <- factor(my_data$dose,
levels = c(0.5, 1, 2),
labels = c("D0.5", "D1", "D2"))
head(my_data)
table(my_data$supp, my_data$dose)
#2.5
require("dplyr")
group_by(my_data, supp, dose) %>%
summarise(
count = n(),
mean = mean(len, na.rm = TRUE),
sd = sd(len, na.rm = TRUE)
)
#3
#visualisasi data -1-
# Box plot dengan dua variabel faktor
boxplot(len ~ supp * dose, data=my_data, frame = FALSE,
col = c("#00AFBB", "#E7B800"), ylab="Tooth Length")
# Two-way interaction plot
interaction.plot(x.factor = my_data$dose, trace.factor = my_data$supp,
response = my_data$len, fun = mean,
type = "b", legend = TRUE,
xlab = "Dose", ylab="Tooth Length",
pch=c(1,19), col = c("#00AFBB", "#E7B800"))
#visualisasi data -2-
# Install
#install.packages("ggpubr")
# Box plot dengan multiple groups
library("ggpubr")
ggboxplot(my_data, x = "dose", y = "len", color = "supp",
palette = c("#00AFBB", "#E7B800"))
# Line plots dengan multiple groups
ggline(my_data, x = "dose", y = "len", color = "supp",
add = c("mean_se", "dotplot"),
palette = c("#00AFBB", "#E7B800"))
#4
res.aov2 <- aov(len ~ supp + dose, data = my_data)
summary(res.aov2)
# Two-way ANOVA with interaction effect
# These two calls are equivalent
res.aov3 <- aov(len ~ supp * dose, data = my_data)
res.aov3 <- aov(len ~ supp + dose + supp:dose, data = my_data)
summary(res.aov3)
#5
TukeyHSD(res.aov3, which = "dose")
pairwise.t.test(my_data$len, my_data$dose,
p.adjust.method = "BH")
#6
#Homogenitas varians
plot(res.aov3, 1)
#Bartlett lavene
library(car)
leveneTest(len ~ supp*dose, data = my_data)
#7
#Normalitas
plot(res.aov3, 2)
#saphiro-wilk test
# Ekstrak residu
aov_residuals <- residuals(object = res.aov3 )
# Uji Shapiro-Wilk
shapiro.test(x = aov_residuals )
#8
library(car)
my_anova <- aov(len ~ supp * dose, data = my_data)
Anova(my_anova, type = "III")
|
df98955a42459145f64f1769f26aeb30a29d05ca
|
47088551d33d6fa167a2bb4b308b52b0300d1f19
|
/problem/SingleCellGrow/MethodsPerformance/slowmodel/gridsearch/adk/basic.r
|
7c51481c8b2b1583a4dbedf3d136be5998e7a439
|
[] |
no_license
|
hydrays/sspat
|
1b0f2f7d581ce1ed84d4a61afe964d28858cfd05
|
9c4bc01ec124b31679199b1abac8643fac9a6777
|
refs/heads/master
| 2021-01-21T02:31:02.743226
| 2020-12-19T03:52:34
| 2020-12-19T03:52:34
| 39,364,802
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,082
|
r
|
basic.r
|
library('adk')
dyn.load('simulator1.so')
source('simulator1n.r')
# Simulated data
T <- 8
r1 <- 0
r2 <- 0.6
d1 <- 0
d2 <- 0.3
v <- 0
w <- 0
N <- 100
#mcell <- as.matrix(read.csv('data/simcella.csv'))
#mcell <- simulator1n(T, r1, r2, d1, d2, v, w, N)
Nsample = 10000
.lenr2 <- 101
.lend2 <- 101
x <- seq(Nsample)
pvalue1 <- matrix(0, .lenr2, .lend2)
pvalue2 <- matrix(0, .lenr2, .lend2)
r2 <- seq(0, 1, length=.lenr2)
d2 <- seq(0, 1, length=.lend2)
for ( i in seq(.lenr2) ){
for ( j in seq(.lend2) ){
x <- simulator1n(T, 0, r2[i], 0, d2[j], 0, 0, Nsample)
dis <- adk.test(mcell, x)
pvalue1[i, j] <- dis$adk[1,2]
pvalue2[i, j] <- dis$adk[2,2]
cat(c(pvalue1[i, j], r2[i], d2[j], i, j),'\n')
}
}
res <- which(pvalue2==max(pvalue2), arr.ind=T)
r2max <- r2[res[1]]
d2max <- d2[res[2]]
cat('optimal value found at', '\n')
cat(r2max, d2max, '\n')
## ## -----------------------------------
## ## Plot the contour
## ## -----------------------------------
## filled.contour(x = seq(0, 1, length.out=101),
## y = seq(0, 1, length.out=101),
## d,
## color=terrain.colors,
## plot.title = title(main = "KS-distance between ECDFs [Good Cells]",
## xlab = "proliferation rate",
## ylab = "death rate"),
## asp = 1,
## plot.axes={ axis(1); axis(2); points(0.6,0.3,pch=17) },
## level=c(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.65, 0.7))
## mtext(paste("@", date()), side=1, line=4, adj=1.04, cex=.66)
## text(0.4, 1.3, "Grid search for best fit using one-species model.")
## text(0.4, 1.25, "The best fit is located at")
## text(0.4, 1.2, "r1 = 0.84, d1 = 0.44 (triangle)")
## dev.copy(pdf,'search5.pdf')
## dev.off()
## ## -----------------------------------
## ## Plot the fit
## ## -----------------------------------
## par(mfrow=c(1,1))
## Nsample = 10000
## T = 8
## r1 = 1.0
## r2 = 0.6
## d1 = 0.35
## d2 = 0.3
## v = 0.35
## w = 0.75
## x <- seq(Nsample)
## for (i in seq(Nsample)){
## y <- Csimulator1(T, r1, r2, d1, d2, v, w)
## x[i] <- sum(y)
## }
## data7 <- read.csv('data/8dayfast.csv')
## mcell <- as.numeric(data7[8,3:ncol(data7[1,])])
## mcell[is.na(mcell)] <- 0
## Fn <- ecdf(x)
## Fe <- ecdf(mcell)
## plot(Fn, xlim=c(0,300),
## main = "ECDF: simulation vs data",
## ylab = "Cumulative probability",
## xlab = "8-day Cell number ")
## lines(Fe, col='red')
## text(200, 0.6, "Goodness of fit")
## text(200, 0.55, "Black: simulated using parameters from grid search")
## text(200, 0.5, "Red: experiment data of the mixed population")
## text(200, 0.45, "Result (r1=1, r2=0.6, d1=0.4, d2=0.3, v=0, w=0.3):")
## text(200, 0.4, "KS-distance: 0.08")
## text(200, 0.35, "p-value: 0.81")
## dis <- adk.test(mcell, x)
## cat(dis$adk[1,2])
## library('png')
## if( require(png) ) {
## img <- readPNG("bear.png")
## my.symbols( x = unit(0.8, "npc"), 0, ms.image, MoreArgs=list(img=img),
## inches=0.2, symb.plots=TRUE, add=TRUE)
## }
## mtext(paste("@", date()), side=1, line=4, adj=1.04, cex=.66)
## dev.copy(jpeg,'search6.jpg')
## dev.off()
|
fe1158615a388d6acf7230d32ba5ed46bdec3d83
|
ecc8c901094530f0954a2f98dfec4e0bd01f3dd3
|
/Listas y vectores.R
|
880373c80cd98329a16911dedf6b0260aa931d03
|
[] |
no_license
|
SergioContreras20152077/Estadistica-en-R
|
17bad1575f6ffa7d1090a6c88ca4c818bc03401c
|
59e45cbb85997f0ed478c7f67ea66839e3876c07
|
refs/heads/master
| 2020-12-14T15:43:01.704799
| 2020-01-18T22:00:39
| 2020-01-18T22:00:39
| 234,793,315
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 606
|
r
|
Listas y vectores.R
|
numeroenlista=list(1,2,3)
numeroenlista
vectorlista=c(1,2,3)
vectorlista
#se pone comillas cuando se trabaja con palabras para que R lo reconozca
x=c("a","b","c","d")
x1=c(20,32,53,56)
x2=c("hombre", "mujer", "hombre", "mujer")
Dataframe=data.frame(casos=x, edad=x1, sexo=x2)
Dataframe
#importar datos
datos=read.spss("/Users/air/Downloads/Estadística en R/Employee data.sav")
head(datos)
library(foreign)
summary(datos)
datos=read.spss("/Users/air/Downloads/Estadística en R/Employee data.sav", to.data.frame = T)
summary(datos)
datos2=read.csv("/Users/air/Downloads/survey_sample.csv")
summary(datos2)
|
8df31166182b2b0dd87f172a658c0538c2d502c3
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Miller-Marin/trafficlight-controller/tlc02-nonuniform-depth-132/tlc02-nonuniform-depth-132.R
|
ff793e4eb49841f1ef5205f9cf53eb2318ee25f0
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 79
|
r
|
tlc02-nonuniform-depth-132.R
|
aaab70c3ee29ca5c413c4e40723f1332 tlc02-nonuniform-depth-132.qdimacs 30458 80324
|
f3fe56b56fd7466a2c05ad5bcbf5948c5b4dd5dc
|
11a34b0073a682ffe3de8fc2e80e32d4a69d93a5
|
/F1_snpfilter/2_5Bsnp/snp_freebaysfilter.R
|
53eeb19c03d9564ba3756a494e0ddd3c3d9f13b8
|
[] |
no_license
|
xinwenzhg/yeastAse_stats
|
60ec644f5604da44f36613be9298acc90582b2a6
|
cc25a702c713c8038f6ded46032ae8ba35ee6e8d
|
refs/heads/master
| 2022-04-16T16:14:31.110385
| 2020-03-17T22:52:29
| 2020-03-17T22:52:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,928
|
r
|
snp_freebaysfilter.R
|
mydisc <- function(chr1,pos1,chr2,pos2) {
if(chr1 != chr2) {
return(Inf)
}else{
return(abs(as.numeric(pos1)-as.numeric(pos2)))
}
}
tf2groupName <- function(x) {
ans <- vector(length=length(x))
k <- 1
ans[1] <- 1
for(i in 2:length(x)) {
if(x[i] == TRUE) {
k <- k+1
ans[i] <- k
}else{
ans[i] <- ans[i-1]
}
}
return(ans)
}
setwd('~/cloud/project/otherRaw')
# remove mummer unmatch snps
yr_pos <- read.table(header=T,file='yr5B.posmap')
names(yr_pos)[1:6] <- c('ypsChrom','ypsPosit','ypsN','rmChrom','rmPosit','rmN')
ry_pos <- read.table(header=T,file='ryB5.posmap')
names(ry_pos)[1:6] <- c('rmChrom','rmPosit','rmN','ypsChrom','ypsPosit','ypsN')
ry_pos_exchg <- ry_pos[,c(4,5,6,1,2,3,7)]
res <- merge.data.frame(yr_pos,ry_pos_exchg,by.x = c(1,2,4,5),by.y=c(1,2,4,5),all=T,sort=F)
res_good <- res[which(res$drct.x == res$drct.y),] # 86351
## rm11_B_bad.pos is from nnp/1_0_0_pion_freebayes vcf file.
rm11_bad_pos <- read.table("~/cloud/project/snpfilter/2_5Bsnp/rm11_B_bad.pos", header=F, stringsAsFactors = F)
yps128_bad_pos <- read.table("~/cloud/project/snpfilter/2_5Bsnp/yps128_5_bad.pos", header=F,stringsAsFactors = F)
colnames(rm11_bad_pos) <- c("rmChrom","rmPosit")
colnames(yps128_bad_pos) <- c("ypsChrom","ypsPosit")
# rm11_B_bad_position: within 125 bp , delete them, 125 = 75 + 50 ,
# 50 is the longest indel in vcf file: rm11_B.vcf
# yps128_5_bad_position; within 78 bp, dlelete them, 78 = 75 + 3
# 3 is the longest indel in vcf file: yps128_5.vcf
yps128_bad_upper <- data.frame(ypsChrom = yps128_bad_pos$ypsChrom, ypsPosit= yps128_bad_pos$ypsPosit + 78, ypsFlag=1:nrow(yps128_bad_pos) )
yps128_bad_lower <- data.frame(ypsChrom = yps128_bad_pos$ypsChrom, ypsPosit = yps128_bad_pos$ypsPosit - 78, ypsFlag=1:nrow(yps128_bad_pos))
yps128_bad_all <- rbind(yps128_bad_upper, yps128_bad_lower)
res1 <- merge.data.frame(res_good,yps128_bad_all,all=T,by=c("ypsChrom","ypsPosit"))
decision <- vector(mode = "integer", length=nrow(res1))
for(i in 1:max(yps128_bad_all$ypsFlag)) {
lower_bound <- which(res1$ypsFlag == i)[1]
upper_bound <- which(res1$ypsFlag == i)[2]
decision[lower_bound:upper_bound] <- i
print(i)
}
res2 <- res1[-which(decision>0), ]
rm11_bad_upper <- data.frame(rmChrom = rm11_bad_pos$rmChrom, rmPosit = rm11_bad_pos$rmPosit + 125, rmFlag=1:nrow(rm11_bad_pos))
rm11_bad_lower <- data.frame(rmChrom = rm11_bad_pos$rmChrom, rmPosit = rm11_bad_pos$rmPosit - 125, rmFlag= 1 :nrow(rm11_bad_pos))
rm11_bad_all <- rbind(rm11_bad_upper, rm11_bad_lower)
res3 <- merge.data.frame(res2,rm11_bad_all,all=T,by=c("rmChrom","rmPosit"))
decision2 <- vector(mode = "integer", length=nrow(res3))
for(i in 1:max(rm11_bad_all$rmFlag)) {
lower_bound <- which(res3$rmFlag == i)[1]
upper_bound <- which(res3$rmFlag == i)[2]
decision2[lower_bound:upper_bound] <- i
print(i)
}
res4 <- res3[-which(decision2>0), ]
yps_block <- res4[,c(3,4,1,2)] %>% arrange(ypsChrom,ypsPosit)
yps_block_tf <- vector(length=nrow(yps_block))
yps_block_tf[1] <- TRUE
for(i in 2:nrow(yps_block)) {
yps_block_tf[i] <-
mydisc(yps_block[i-1,"ypsChrom"], yps_block[i-1,"ypsPosit"],
yps_block[i,"ypsChrom"], yps_block[i,"ypsPosit"]) > 700000
}
yps_rm_84349_group <- cbind(yps_block,yps_block_tf,gN= tf2groupName(yps_block_tf))
options(scipen=999)
write.table(unique(yps_rm_84349_group[,c(1,2)]),file="~/cloud/project/snpfilter/2_5Bsnp/yps128_5_snpls_bys",row.names = F,quote=F,col.names = F)
write.table(unique(yps_rm_84349_group[,c(3,4)]),file="~/cloud/project/snpfilter/2_5Bsnp/rm11_B_snpls_bys",row.names = F,quote=F,col.names = F)
write.table(unique(yps_rm_84349_group[,c(1,2,6)]),file="~/cloud/project/snpfilter/2_5Bsnp/yps128_5_snpls_bys_group",row.names = F,quote=F,col.names = F)
write.table(unique(yps_rm_84349_group[,c(3,4,6)]),file="~/cloud/project/snpfilter/2_5Bsnp/rm11_B_snpls_bys_group",row.names = F,quote=F,col.names = F)
|
3b1a3ac223e23ef333ca6740687535a9f7266545
|
79be2d33dde823dc51d0729ef17385b95ddc844f
|
/routines/update.b.R
|
6606e91a155633129dcd32d2a5b6d47536a8ecab
|
[] |
no_license
|
sungkyujung/COBS
|
cff4322dc8feb810daf749cb3ec70cb758c51eaf
|
526e47b445fa9b4600d6ba37b4a2d95a56d6ec4a
|
refs/heads/master
| 2022-11-17T22:58:30.223135
| 2020-07-10T04:21:44
| 2020-07-10T04:21:44
| 278,534,435
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,684
|
r
|
update.b.R
|
update.b <- function(X = X, Y = Y, v = v.now,
param = list(alpha = 1, lambda = NA),
grp.index.X) {
# lambda.b > 0 (overall sparsity parameter for coefficient B)
# alpha.b in (0,1)
# (if 0, group lasso, if 1. lasso.
# If 1 (or 0), the (group) lasso parameter is lambda.b)
#
# update b by minimizing || y - xb || + penalty(b).
# assuming there is no intercept (when written as a regression y ~ x)
# if an intercept is needed, explicitly form the x matrix accordingly
#
# Rewrite as a regression problem
x <- X
y <- Y %*% v
# Two options,
# 1. Lambda is supplied. (Lambda.choice == FALSE)
# 2. Lambda is chosen inside. (Lambda.choice == TRUE)
Lambda.choice <- is.na(param$lambda)
# three cases:
# 1. There is no penalty (lambda = 0 or only one covariate in x)
# 2. Only variable selection is needed (alpha == 1, Gaussian lasso using glmnet)
# 3. Both variable and group selections are needed (alpha < 1, using SGL)
# Case I
if (NCOL(x) == 1 || identical(param$lambda,0)) {
bhat <- lm( y ~ x - 1)$coefficient
bhat <- ifelse(is.na(bhat),0,bhat)
} else{
if ( identical(param$alpha,1) ){
# Case II (Gaussian lasso using glmnet)
#
require(glmnet)
if (Lambda.choice){ # option I
a <- cv.glmnet(x,y, alpha = param$alpha,
standardize = FALSE, intercept = FALSE)
bhat <- coef(a, s = "lambda.1se")[2:(q+1)]
param$lambda <- a$lambda.1se
}else{ # option II
a <- glmnet(x,y, alpha = param$alpha, lambda = param$lambda,
standardize = FALSE, intercept = FALSE)
bhat <- as.vector(a$beta)
}
}else{
# Case III (using SGL, for the case: alpha < 1)
require(SGL)
#if (is.na(grp.index.X)) {
# grp.index.X <- rep(1,ncol(X))
#}
if (Lambda.choice){ # option I
a <- cvSGL(data = list(x = x, y = y), alpha = param$alpha,
index = grp.index.X, type = "linear",
standardize = FALSE)
l.min.index <- which.min(a$lldiff)
l.1se.index <- which( a$lldiff - (a$lldiff[ l.min.index ] + a$llSD[ l.min.index ]) < 0 )
bhat <- a$fit$beta[,l.1se.index[1]]
param$lambda <- a$lambdas[l.1se.index[1]]
}else{ # option II
a <- SGL(data = list(x = x, y = y), alpha = param$alpha, lambdas = param$lambda,
index = grp.index.X, type = "linear",
standardize = FALSE)
bhat <- a$beta
}
}
}
return(list(bhat = bhat, lambda = param$lambda))
}
|
caf6a2628d953987870f29f5bd0d63f8b4664582
|
a226f4b4cf54dd0e8164a727d24dca99e79e1354
|
/tests/testthat/test_write_spss.R
|
83f4ffd54f5675c4ea791494833e82cbe99a621b
|
[] |
no_license
|
beckerbenj/eatGADS
|
5ef0bdc3ce52b1895aaaf40349cbac4adcaa293a
|
e16b423bd085f703f5a548c5252da61703bfc9bb
|
refs/heads/master
| 2023-09-04T07:06:12.720324
| 2023-08-25T11:08:48
| 2023-08-25T11:08:48
| 150,725,511
| 0
| 1
| null | 2023-09-12T06:44:54
| 2018-09-28T10:41:21
|
R
|
UTF-8
|
R
| false
| false
| 5,632
|
r
|
test_write_spss.R
|
### load test data
# df <- getGADS(filePath = "tests/testthat/helper_database.db")
df <- getGADS(filePath = "helper_dataBase.db")
# label_df <- labelsGADS(filePath = "tests/testthat/helper_database.db")
label_df <- labelsGADS(filePath = "helper_dataBase.db")
label_df_V2 <- label_df[which(label_df == "V2"), ]
expected_ID1 <- list(format.spss = "F8.2")
expected_V2 <- list(label = "Variable 2",
format.spss = "F10.2",
na_values = 99,
class = c("haven_labelled_spss", "haven_labelled"),
labels = c(mis = 99))
### write SPSS
test_that("GADSdat correctly written to sav", {
# write_spss("c:/Benjamin_Becker/02_Repositories/packages/eatGADS/tests/testthat/helper_write_spss.sav")
sav_path <- tempfile(fileext = ".sav")
write_spss(df, filePath = sav_path)
#test_df <- export_tibble(df)
#test_df2 <- haven::read_sav("c:/Benjamin_Becker/02_Repositories/packages/eatGADS/other_code/helper_write_spss_manual.sav", user_na = TRUE)
#str(test_df)
#str(test_df2)
df2 <- import_spss(sav_path)
# df2 <- import_spss("c:/Benjamin_Becker/02_Repositories/packages/eatGADS/tests/testthat/helper_write_spss.sav")
expect_equal(df$dat, df2$dat)
rownames(df$labels) <- NULL
expect_equal(df$labels, df2$labels)
})
test_that("Full workflow with haven", {
#test_df <- import_spss("tests/testthat/helper_spss_havenbug.sav")
suppressWarnings(test_df <- import_spss("helper_spss_havenbug.sav"))
test_tbl <- export_tibble(test_df)
expect_silent(write_spss(test_df, filePath = tempfile()))
})
test_that("Full workflow with haven with factor and reuseMeta", {
f <- tempfile(fileext = ".sav")
iris_gads <- import_DF(iris)
iris_gads <- reuseMeta(iris_gads, "Sepal_Length", df, "V2")
expect_silent(write_spss(iris_gads, filePath = f))
})
### Possible Problems when writing with haven
test_that("Check haven behaviour", {
raw_df <- export_tibble(df)
test3 <- test1 <- test2 <- raw_df
attributes(test1$V2) <- list(label = "Variable - 2",
format.spss = "F10.2")
expect_silent(haven::write_sav(test1, tempfile()))
attributes(test2$V2) <- list(na_values = c(99, 98, 97, 95),
class = c("haven_labelled_spss", "haven_labelled"),
format.spss = "F10.2")
expect_error(haven::write_sav(test2, tempfile()), "Writing failure: The number of defined missing values exceeds the format limit.")
attributes(test3$V2) <- list(label = "Schul-ID",
display_width = 14,
format.spss = "F6.0",
class = c("haven_labelled_spss", "haven_labelled"))
#expect_error(haven::write_sav(test3, tempfile())
## Note to myself: There is still a haven bug regarding wide string variable when reading (missing codes are dropped)
# and writing (sometimes all missing codes and value labels are dropped; haven 2.2.0, 24.03.2020)
# but it is difficult to produce a minimal reprex
})
### documentation haven bug for long strings and specific variable names
#df <- data.frame(Pflub1516_c = 1, Pflub1516_d = paste(rep("a", 1000), collapse = ""), stringsAsFactors = FALSE)
#haven::write_sav(df, path = "tests/testthat/helper_longstring.sav")
test_that("Write strings longer than 255", {
#g <- import_spss("tests/testthat/helper_longstring.sav")
suppressWarnings(g <- import_spss("helper_longstring.sav"))
f <- tempfile(fileext = ".sav")
write_spss(g, filePath = f)
out <- haven::read_spss(f)
expect_equal(dim(out), c(1, 2))
})
test_that("Haven and eatGADS import and export missing codes correctly", {
rawDat_missings <- haven::read_spss("helper_spss_missings.sav", user_na = TRUE)
f <- tempfile(fileext = ".sav")
haven::write_sav(rawDat_missings, f)
out <- haven::read_spss(f, user_na = TRUE)
expect_equal(attributes(rawDat_missings$VAR1), attributes(out$VAR1))
###character variables
#rawDat_missings2 <- haven::read_spss("tests/testthat/helper_spss_havenbug.sav", user_na = TRUE)
rawDat_missings2 <- haven::read_spss("helper_spss_havenbug.sav", user_na = TRUE)
f2 <- paste0(tempfile(), ".sav")
haven::write_sav(rawDat_missings2, f2)
out2 <- haven::read_spss(f2, user_na = TRUE)
expect_equal(attributes(rawDat_missings2$v2)$na_values, attributes(out2$v2)$na_values)
})
test_that("Write variables with missing codes", {
g <- import_raw(df = data.frame(v1 = "abc", v2 = 1, stringsAsFactors = FALSE),
varLabels = data.frame(varName = c("v1", "v2"), varLabel = NA, stringsAsFactors = FALSE),
valLabels = data.frame(varName = c("v1", "v1", "v2"), value = c(-96, -99, -99),
valLabel = c("miss1", "miss2", "miss1"),
missings = c("miss", "miss", "miss"), stringsAsFactors = FALSE))
g <- changeSPSSformat(g, varName = "v1", format = "A3")
f <- tempfile(fileext = ".sav")
write_spss(g, filePath = f)
out <- haven::read_spss(f, user_na = TRUE)
tib <- export_tibble(g)
attributes(tib$v1)
attributes(tib$v2)
# numeric
expect_equal(attributes(out$v2)$labels, c(miss1 = -99))
expect_equal(attributes(out$v2)$na_values, c(-99))
# character
expect_equal(attributes(out$v1)$labels, c(miss2 = "-99", miss1 = "-96"))
# (implemented since haven 2.4.0, see https://github.com/tidyverse/haven/issues/409)
expect_equal(attributes(out$v1)$na_values, c("-99", "-96"))
})
|
d887c7750840357e6da9a204abd897073b86364b
|
a17576eece9911c1e48dab9e05734bc0a3c15702
|
/Trang-Dennis/factor-filter.R
|
957f8e5b6b72b0e6c2d703d510b5cff4d3948e75
|
[] |
no_license
|
nguyentr17/S17MAP
|
83284caeede870f1ddeab680dce3137920776076
|
1673c95607b62c19ed26bdfcb7c07bd8556d7213
|
refs/heads/master
| 2021-01-11T16:16:01.512199
| 2017-05-09T01:48:10
| 2017-05-09T01:48:10
| 80,052,109
| 0
| 5
| null | 2017-03-14T19:14:20
| 2017-01-25T19:58:33
|
HTML
|
UTF-8
|
R
| false
| false
| 3,146
|
r
|
factor-filter.R
|
### R=script : factor-filter.R
### Description : This R script is used to create solid factor columns and clean corresponding levels.
### In-the-context: It will be used for analysis and filtering in Shiny app.
### 1. Factor to be filtered: GENDER
### A set of criteria for regular expressions/ key patterns
gen = c("^gen","^sex")
gen_male = c("^m","^h")
gen_female = c("^f", "^mu")
### Supporting functions
### ismatch
### @input: cond: a vector of key patterns (e.g. gen)
### x, y, z: 3 columns to look for key
### @return: 0 if no match
### index of the factor (1, 2, 3() if there is match
ismatch <- function(cond, x, y, z) {
x <- grep(paste(cond, collapse = "|"), c(x, y, z), ignore.case = T, value = FALSE)
return (ifelse(length(x), as.numeric(x), as.numeric(0)))
}
### level_gen_fun
### @input: x is the col index (given by factor_gender)
### y is the row index
### @return: -1 if no gender factor indicated
### -2 if gender factor indicated but level_gen key patterns not matched (require future manual check)
### M/F
### @note: this function is specific to gender only. Needs to think about how to generalize it.
level_gen_fun <- function(x, y) {
return (ifelse(x == 0, -1,
ifelse(grepl(paste(gen_female, collapse = "|"),tangram[y,2*x+3], ignore.case = T),
"F",
ifelse(grepl(paste(gen_male, collapse = "|"),tangram[y,2*x+3], ignore.case = T),
"M",-2))))
} ## cannot be used for data table
# WRONG CODE: tangram$factor_gender <- mapply(ismatch, gen, tangram$Factor1, tangram$Factor2, tangram$Factor3, SIMPLIFY = TRUE) ## wrong: clase301
tangram <- as.data.table(tangram)
tangram[, factor_gender := ismatch(gen, Factor1, Factor2, Factor3), by = 1:nrow(tangram)]
tangram <- as.data.frame(tangram)
tangram$level_gender <- mapply(level_gen_fun, tangram$factor_gender, 1:nrow(tangram), SIMPLIFY = TRUE)
### Stem major
stem <- c("stem")
stem_Y <- c("^y")
stem_N <- c("^n")
#tangram$factor_STEM <- mapply(ismatch, stem, tangram$Factor1, tangram$Factor2, tangram$Factor3, SIMPLIFY = TRUE)
tangram[, factor_STEM := ismatch(stem, Factor1, Factor2, Factor3), by = 1:nrow(tangram)]
level_stem_fun <- function(x, y) {
return (ifelse(x == 0, -1,
ifelse(grepl(paste(stem_Y, collapse = "|"),tangram[y,2*x+3], ignore.case = T),
"Y",
ifelse(grepl(paste(stem_N, collapse = "|"),tangram[y,2*x+3], ignore.case = T),
"N",-2))))
}
tangram$level_stem <- mapply(level_stem_fun, tangram$factor_STEM, 1:nrow(tangram), SIMPLIFY = TRUE)
### 3. Age
age <- c("^age")
tangram$factor_age <- mapply(ismatch, age, tangram$Factor1, tangram$Factor2, tangram$Factor3, SIMPLIFY = TRUE)
level_age_fun <- function(x, y) {
return (ifelse(x == 0, -1,
ifelse(!is.na(as.numeric(tangram[y,2*x+3])),
as.numeric(tangram[y,2*x+3]),
-2)))
}
tangram$level_age <- mapply(level_age_fun, tangram$factor_age, 1:nrow(tangram), SIMPLIFY = TRUE)
|
b5468f8d487af9217dfb070d5267876cd14c8bb4
|
88ed4aca19097614e87ff5e2512eb5082f418e12
|
/scripts/pValues.R
|
e353604abf7ab23107f0d685c086472799d49ff5
|
[
"MIT"
] |
permissive
|
tomfaulkenberry/RworkshopTLU
|
021541477b440ac4bb963b7c830751ca27cef692
|
4c933ce0cb2135bd4e6b85c49f0d571246a8a7fb
|
refs/heads/master
| 2020-03-26T06:16:22.334931
| 2018-08-15T09:11:10
| 2018-08-15T09:11:10
| 144,597,202
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,210
|
r
|
pValues.R
|
#Disable scientific notation (1.05e10)
options(scipen=999)
#Set number of simulations
nSims = 50000 #number of simulated experiments
M = 109
n = 50 #set sample size
SD = 15 #SD of the simulated data
p = numeric(nSims) #set up empty variable to store all simulated p-values
bars = 100
#Run simulation
for(i in 1:nSims){ #for each simulated experiment
x = rnorm(n = n, mean = M, sd = SD) #Simulate data with specified mean, standard deviation, and sample size
z = t.test(x, mu=100) #perform the t-test against mu (set to value you want to test against)
p[i] = z$p.value #get the p-value and store it
cat(sprintf("Simulation %d of %d -- p=%f\n",i,nSims,p[i]))
}
numSig = length(p[p<0.05])
propSig = 100*numSig/nSims
#Plot figure
op = par(mar = c(5,7,4,4)) #change white-space around graph
hist(p, breaks=bars, xlab="p-values", ylab="number of experiments\n", axes=FALSE,
main=paste("Proportion of significant results = ",propSig,"%"),
col="grey", xlim=c(0,0.05), ylim=c(0, nSims/10))
axis(side=1, at=seq(0,0.05, 0.01), labels=seq(0,0.05,0.01))
#axis(side=1, at=seq(0,1,0.1), labels=seq(0,1,0.1))
axis(side=2, las=2)
abline(h=nSims/bars, col="red", lty=2)
abline(v=0.05,lty=2,col="red")
|
f0b1b5b03bb86312ca41d8f1391c88d7e65cdfef
|
4073274c2d2b6bf0eafa9ffdef36abd1aefdb70d
|
/man/draw_isolation_delay_period.Rd
|
c6776f65925aaa47ab892ff3c90a836b610705f1
|
[
"Apache-2.0"
] |
permissive
|
bcgov/epi.branch.sim
|
4ff0d7dee7b135e409eed6881f3cd7485f96fdd9
|
4e06cd4dcca9309902c8791fdf69783e37db5e64
|
refs/heads/main
| 2023-01-29T08:55:54.471932
| 2020-12-14T22:10:19
| 2020-12-14T22:10:19
| 285,740,030
| 9
| 0
|
Apache-2.0
| 2020-12-14T22:10:21
| 2020-08-07T04:51:07
|
R
|
UTF-8
|
R
| false
| true
| 2,985
|
rd
|
draw_isolation_delay_period.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/draw_distributions.R
\name{draw_isolation_delay_period}
\alias{draw_isolation_delay_period}
\title{Draw delay to isolation periods for new cases}
\usage{
draw_isolation_delay_period(
state_df,
sim_params,
primary_state_df = NULL,
primary_case_ids = NULL
)
}
\arguments{
\item{state_df}{\code{state_df} object for the simulation}
\item{sim_params}{\code{sim_params} object (a list) containing simulation parameters, where
all of the information needed to describe the distribution is found within
\code{sim_params$iso_delay_params}. Current distributions possible are:
\itemize{
\item \code{uniform}: Delay is drawn from uniform distributions with
attributes "min" and "max" given in \code{sim_params$iso_delay_params} for
each type of case. Min/Max values to be provided are:
\itemize{
\item \code{iso_delay_traced_[min|max]}: Range for manually traced cases
\item \code{iso_delay_untraced_[min|max]}: Range for untraced cases from the
non-distancing population.
\item \code{iso_delay_untraced_pd_[min|max]}: Rnage for untraced cases from
the distancing population (any case with contact_rate less than 1).
}
Cases traced by the app (require both index and secondary cases to be app users
\emph{and} for the secondary case to be app-compliant) have a zero day delay.
\item \code{Hellewell}: Delay is drawn from a Weibull distribution with
attributes "shape" and "scale" given in \code{sim_params$iso_delay_params}.
Traced cases have their delay set to zero days.
}}
\item{primary_state_df}{The \code{state_df} object for the index/primary cases. Defaults to \code{NULL}.
Only required for traced cases (i.e. not needed when generating initial or
imported cases).}
\item{primary_case_ids}{A list of case_ids for the primary cases. Not required for initial or imported
cases. Defaults to \code{NULL}.}
}
\value{
A vector of length n for case delay to isolation, measured from start of symptom onset (double)
}
\description{
The number of days between symptom onset and isolation, which may be negative if isolation
occurs prior to symptom onset (as sometimes the case with traced cases).
}
\details{
Within the list object \code{sim_params$iso_delay_params}, the user can define several delay
periods based on tracing type, tracing status, and whether the case is practicing distancing.
Traced cases have their delays measured from the index case's isolation time, so traced cases
may isolate prior to their own symptom onset. Untraced cases have delays measured from the start
of their own symptom onset. The untraced timeline is always considered for traced cases, so that
if a traced case would have been isolated earlier just because of their symptom onset timeline,
they would isolate at this earlier time.
Although this function returns the number of days between symptom onset and isolation, the delay
returned by this function may be negative if isolation occurs prior to symptom onset.
}
|
b311150e70e5c408c8b0aaea8aa082dc6d4a98eb
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.end.user.computing/man/appstream_describe_image_permissions.Rd
|
a6a4d22b613cf27fafabad27bbfd950910dab32f
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,151
|
rd
|
appstream_describe_image_permissions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appstream_operations.R
\name{appstream_describe_image_permissions}
\alias{appstream_describe_image_permissions}
\title{Retrieves a list that describes the permissions for shared AWS account
IDs on a private image that you own}
\usage{
appstream_describe_image_permissions(
Name,
MaxResults = NULL,
SharedAwsAccountIds = NULL,
NextToken = NULL
)
}
\arguments{
\item{Name}{[required] The name of the private image for which to describe permissions. The
image must be one that you own.}
\item{MaxResults}{The maximum size of each page of results.}
\item{SharedAwsAccountIds}{The 12-digit identifier of one or more AWS accounts with which the image
is shared.}
\item{NextToken}{The pagination token to use to retrieve the next page of results for
this operation. If this value is null, it retrieves the first page.}
}
\description{
Retrieves a list that describes the permissions for shared AWS account IDs on a private image that you own.
See \url{https://www.paws-r-sdk.com/docs/appstream_describe_image_permissions/} for full documentation.
}
\keyword{internal}
|
de0fa4da4f9be15e8643c2f54365133424855773
|
2a249957bef6cb690405bf09ff2eda8899f537bc
|
/5/app.R
|
b02ff715add3fb2c025956687731113a83099b89
|
[] |
no_license
|
clobos/Renata_Cristian_Clarice
|
9dd216a819ded7f1ddfa1edf0f1fe5f8ddc0e53b
|
5fe6d36c722e684bcc2a3b3e5e69f932021fefcd
|
refs/heads/master
| 2020-08-07T14:33:50.567595
| 2019-10-07T21:33:40
| 2019-10-07T21:33:40
| 213,489,755
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 147
|
r
|
app.R
|
library(shiny)
ui <- fluidPage(
titlePanel("Meu primeiro App em Shiny")
)
server <- function(input, output){}
shinyApp(ui = ui, server = server)
|
509039c32918d20c6d0951c3813c9a5586d17d93
|
bec75f7b755d0c79f6c973069802312740fd9e09
|
/Tarea5/Regresion_lineal.R
|
891600896e3ab5030aa392f4cd3f7fbb9ba71ef0
|
[] |
no_license
|
IMNOTAROBOT/CompuStat
|
c85b078621e759458730f8de65986024317b892f
|
87eebecfaeea34174c01726ef6df9e2a92d90e88
|
refs/heads/master
| 2021-01-23T20:21:27.984189
| 2016-01-08T13:00:24
| 2016-01-08T13:00:24
| 41,065,717
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,641
|
r
|
Regresion_lineal.R
|
library(Rcpp)
library(ggplot2)
# Estimación de media
data(iris)
head(iris)
names(iris)
## Regresión hecha por R
mod <- lm(iris$Sepal.Length ~ iris$Sepal.Width + iris$Petal.Length + iris$Petal.Width)
summary(mod)
plot(mod)
##A mano
X<- as.matrix(cbind(1,iris[,2:4]))
Y<- iris[,1]
beta.hat <- solve(t(X) %*% X, t(X) %*% Y)
Y.pred <- X %*% beta.hat
errors <- Y-Y.pred
hist(errors)
qqnorm(errors)
plot(errors,Y.pred)
beta.hat
##BAYES STATISTICS
prior.beta1 <- function(x) dnorm(x, 1, 0.2)
prior.beta2 <- function(x) dnorm(x, 1, 0.2)
prior.beta3 <- function(x) dnorm(x, 1, 0.2)
plot(prior.beta1, col="darkblue", xlim=c(-5,18), lwd="2", main="Prior for beta1", ylab="density")
plot(prior.beta2 , col="darkred", xlim=c(-5,18), lwd="2", main="Prior for beta2", ylab="density")
plot(prior.beta3 , col="darkgreen", xlim=c(-5,18), lwd="2", main="Prior for beta3", ylab="density")
#Variables independientes
data <- data.frame(Sepal.Width=iris$Sepal.Width,Petal.Width=iris$Petal.Width,Petal.Length=iris$Petal.Length)
dataMat <- as.matrix(data)
#La que queremos aproximar
Y <- matrix(iris$Sepal.Length, ncol=1)
cppFunction('
NumericVector proposal(NumericVector theta,NumericMatrix X){
int nparam = theta.size();
int n = X.nrow();
double jump = 0.25/sqrt(n);
NumericVector newtheta(nparam);
for (int i=0; i<nparam; i++){
newtheta[i] = R::rnorm(theta[i], jump);
}
return newtheta;
}')
proposal(c(1,1,1,1),dataMat)
#Funcion objetivo
cppFunction('
double objdens(NumericMatrix x, NumericVector theta, NumericMatrix y){
int i;
double lkh, logprior, yhat;
int m = x.nrow();
int p =x.ncol();
NumericVector beta(p-1);
double sd;
//Primer valor de las betas
for(i=0;i<p-1;i++){
beta[i]=theta[i];
}
sd = theta[p-1]; //El error
NumericVector aux(m);
// Compute loglikelihood
lkh=0;
for (int i=0; i<m; i++){
aux= x(i,_) * beta;
yhat = std::accumulate(aux.begin(),aux.end(),0.0);
lkh += -0.5*pow((y[i]-yhat)/sd,2)-log(sd);
}
// Compute logprior
logprior = 0.0;
for(int j = 0; j<p-1;j++){
logprior += R::dnorm(beta[j],0.0,100,true);
}
logprior += R::dgamma(sd, 3.0, 0.5, true);
// Log of target density
return lkh + logprior;
}')
objdens(dataMat, c(1,1,1,1), Y)
sourceCpp("BayesianMHlineal.cpp")
nsim <- 10000
init <- c(1,1,1,1)
mh.samp <- MHBayes(nsim, init, objdens, proposal, dataMat, Y)
estims <- mh.samp$theta
# SOME DIAGNOSTIC IMPORTANT STUFF
# Exploration graph:
library(calibrate)
pts <- seq(1,100,by=5)
plot(estims[pts, ], type="l", xlab="mean", ylab="sd")
textxy(estims[pts,1], estims[pts,2], pts)
cor(estims)
### 1) REJECTION RATES
rejections <- mh.samp$rejections[-1]
trials <- rejections + 1
rej.rate <- cumsum(rejections)/cumsum(trials)
plot(rej.rate, type="l", ylim=c(0,1), main="Rejection rate")
plot(trials[-1], type="l", main="Number of trials")
### 2) AUTOCORRELATION
acf(estims[ , 1])
acf(estims[ , 2])
acf(estims[ , 3])
acf(estims[ , 4])
# burnin and subsampling
burnin <- 100
estim <- estims[-(1:burnin),]
thinning <- 0.75 # meaning we'll keep 75% of observations to reduce autocorrelation
# OBS: thinning is rarely usefull!!!! check that nothing changes
sub <- sample.int(nsim-burnin, size=round(thinning*nsim))
estims <- estims[sub, ]
acf(estims[ , 1])
acf(estims[ , 2])
acf(estims[ , 3])
acf(estims[ , 4])
# LET'S COMPARE PRIORS AND POSTERIORS AND DO INFERENCE
hist(estims[ ,1], prob=TRUE, xlim=c(0.5,2.5), breaks=20, col="lightgreen",
main="Histogram and Posterior(blue) vs Prior(red) of beta1") # posterior distribution of mean
plot(prior.beta1, xlim=c(0.5,2.5), col="darkred", lwd="2", ylim=c(0,10), add=TRUE)
lines(density(estims[ ,1]), col="darkblue", lwd="2")
hist(estims[ ,2], prob=TRUE, xlim=c(0,1), breaks=40, col="yellow",
main="Histogram and Posterior(blue) vs Prior(red) of beta2") # posterior distribution of mean
plot(prior.sd, xlim=c(0,1), col="darkred", lwd="2", ylim=c(0,10), add=TRUE)
lines(density(estims[ ,2]), col="darkblue", lwd="2")
hist(estims[ ,3], prob=TRUE, xlim=c(0,1), breaks=40, col="yellow",
main="Histogram and Posterior(blue) vs Prior(red) of beta2") # posterior distribution of mean
plot(prior.sd, xlim=c(0,1), col="darkred", lwd="2", ylim=c(0,10), add=TRUE)
lines(density(estims[ ,3]), col="darkblue", lwd="2")
hist(estims[ ,4], prob=TRUE, xlim=c(0,1), breaks=40, col="yellow",
main="Histogram and Posterior(blue) vs Prior(red) of beta2") # posterior distribution of mean
plot(prior.sd, xlim=c(0,1), col="darkred", lwd="2", ylim=c(0,10), add=TRUE)
lines(density(estims[ ,4]), col="darkblue", lwd="2")
mean(estims[ ,1]) # approx. mean-value of the posterior of beta1
mean(estims[ ,2]) # approx. mean-value of the posterior of beta2
mean(estims[ ,3]) # mean-value of the posterior beta3
mean(estims[ ,4]) # mean-value del error
# CERTAINTY INTERVALS
alpha <- 0.05
intervals1 <- quantile(estims[ ,1], c(alpha/2, 1-alpha/2))
intervals1
intervals2 <-quantile(estims[ ,2], c(alpha/2, 1-alpha/2))
intervals2
intervals3 <-quantile(estims[ ,3], c(alpha/2, 1-alpha/2))
intervals3
intervals4 <-quantile(estims[ ,4], c(alpha/2, 1-alpha/2))
intervals4
#Comparacion
estimaciones <- c(mean(estims[ ,4]),mean(estims[ ,1]),mean(estims[ ,2]),mean(estims[ ,3]))
summary(mod)
beta.hat
|
aee588bb45744d8d65d4edd590657738e99bdfde
|
3059b99e4aa4b3ddbdf19ab0570ad322c5604a88
|
/Nadi/server.R
|
8aa602ec29afabf6911009a9b0dff4e519431ed2
|
[] |
no_license
|
witusj/Quizii
|
f75f540d8b1c41c9c5b5c22043b7860c8541be18
|
ca053f1cdb576d32941c1dddbc6ea4b9ccec39d8
|
refs/heads/master
| 2020-06-04T20:05:51.638894
| 2014-09-23T12:03:43
| 2014-09-23T12:03:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,587
|
r
|
server.R
|
#Load all necessary supporting files
library(shiny)
library(plyr)
source('Setup.R')
#Set up server
shinyServer(function(input, output, session) {
source('Globals.R')
##Sample n MC from database. For each MC question randomly choose
##order of answers. Save correct answers in reactive value variables.
print(length(questionsMC$txt))
n <- sample(1:length(questionsMC$txt), size =6)
a <- sample(c(2:5), size =4, replace=FALSE)
b <- sample(c(2:5), size =4, replace=FALSE)
d <- sample(c(2:5), size =4, replace=FALSE)
e <- sample(c(2:5), size =4, replace=FALSE)
f <- sample(c(2:5), size =4, replace=FALSE)
g <- sample(c(2:5), size =4, replace=FALSE)
h <- sample(c(2:5), size =4, replace=FALSE)
i <- sample(c(2:5), size =4, replace=FALSE)
j <- sample(c(2:5), size =4, replace=FALSE)
k <- sample(c(2:5), size =4, replace=FALSE)
vraagMC <<- questionsMC[n,]
print(vraagMC$txt[5])
print(vraagMC$corr[5])
values <- reactiveValues()
values[['corr1']] <- vraagMC$corr[1]
values[['corr2']] <- vraagMC$corr[2]
values[['corr3']] <- vraagMC$corr[3]
values[['corr4']] <- vraagMC$corr[4]
values[['corr5']] <- vraagMC$corr[5]
values[['corr6']] <- vraagMC$corr[6]
# values[['corr7']] <- vraagMC$corr[7]
# values[['corr8']] <- vraagMC$corr[8]
# values[['corr9']] <- vraagMC$corr[9]
# values[['corr10']] <- vraagMC$corr[10]
##Build user interface with 6 MC. Save choices
##in reactive variables. Set initial choice of radio buttons to zero (no choice)
output$ui1 <- renderUI({fluidPage(withMathJax(
radioButtons('answ1', paste('V1:',vraagMC$txt[1]),
c(vraagMC[1,a[1]],
vraagMC[1,a[2]],
vraagMC[1,a[3]],
vraagMC[1,a[4]]), selected = 0),
radioButtons('answ2', paste('V2:',vraagMC$txt[2]),
c(vraagMC[2,b[1]],
vraagMC[2,b[2]],
vraagMC[2,b[3]],
vraagMC[2,b[4]]), selected = 0),
radioButtons('answ3', paste('V3:',vraagMC$txt[3]),
c(vraagMC[3,d[1]],
vraagMC[3,d[2]],
vraagMC[3,d[3]],
vraagMC[3,d[4]]), selected = 0)
# radioButtons('answ4', paste('V4:',vraagMC$txt[4]),
# c(vraagMC[4,e[1]],
# vraagMC[4,e[2]],
# vraagMC[4,e[3]],
# vraagMC[4,e[4]]), selected = 0),
#
# radioButtons('answ5', paste('V5:',vraagMC$txt[5]),
# c(vraagMC[5,f[1]],
# vraagMC[5,f[2]],
# vraagMC[5,f[3]],
# vraagMC[5,f[4]]), selected = 0)
))
})
output$ui2 <- renderUI({fluidPage(withMathJax(
radioButtons('answ4', paste('V4:',vraagMC$txt[4]),
c(vraagMC[4,g[1]],
vraagMC[4,g[2]],
vraagMC[4,g[3]],
vraagMC[4,g[4]]), selected = 0),
radioButtons('answ5', paste('V5:',vraagMC$txt[5]),
c(vraagMC[5,h[1]],
vraagMC[5,h[2]],
vraagMC[5,h[3]],
vraagMC[5,h[4]]), selected = 0),
radioButtons('answ6', paste('V6:',vraagMC$txt[6]),
c(vraagMC[6,i[1]],
vraagMC[6,i[2]],
vraagMC[6,i[3]],
vraagMC[6,i[4]]), selected = 0),
# radioButtons('answ9', paste('V9:',vraagMC$txt[9]),
# c(vraagMC[9,j[1]],
# vraagMC[9,j[2]],
# vraagMC[9,j[3]],
# vraagMC[9,j[4]]), selected = 0),
#
# radioButtons('answ10', paste('V10:',vraagMC$txt[10]),
# c(vraagMC[10,k[1]],
# vraagMC[10,k[2]],
# vraagMC[10,k[3]],
# vraagMC[10,k[4]]), selected = 0),
actionButton('goButton', 'Verzenden')
))
})
#Build server output
##Check questions and put results (Correct/False) in reactive variable.
##Initial choice (no choice) is neutral and equals 'Choose'
rltInput1 <- reactive({input$goButton
isolate(try_default(chkQuestion(input$answ1, values$corr1,1),
default = 'Leeg', quiet = TRUE))
})
rltInput2 <- reactive({input$goButton
isolate(try_default(chkQuestion(input$answ2, values$corr2,2),
default = 'Leeg', quiet = TRUE))
})
rltInput3 <- reactive({input$goButton
isolate(try_default(chkQuestion(input$answ3, values$corr3,3),
default = 'Leeg', quiet = TRUE))
})
rltInput4 <- reactive({input$goButton
isolate(try_default(chkQuestion(input$answ4, values$corr4,4),
default = 'Leeg', quiet = TRUE))
})
rltInput5 <- reactive({input$goButton
isolate(try_default(chkQuestion(input$answ5, values$corr5,5),
default = 'Leeg', quiet = TRUE))
})
rltInput6 <- reactive({input$goButton
isolate(try_default(chkQuestion(input$answ6, values$corr6,6),
default = 'Leeg', quiet = TRUE))
})
#
# rltInput7 <- reactive({input$goButton
# isolate(try_default(chkQuestion(input$answ7, values$corr7),
# default = 'Leeg', quiet = TRUE))
# })
#
# rltInput8 <- reactive({input$goButton
# isolate(try_default(chkQuestion(input$answ8, values$corr8),
# default = 'Leeg', quiet = TRUE))
# })
#
# rltInput9 <- reactive({input$goButton
# isolate(try_default(chkQuestion(input$answ9, values$corr9),
# default = 'Leeg', quiet = TRUE))
# })
#
# rltInput10 <- reactive({input$goButton
# isolate(try_default(chkQuestion(input$answ10, values$corr10),
# default = 'Leeg', quiet = TRUE))
# })
##Print results
output$result1 <- renderText({paste('V1:',rltInput1())})
output$result2 <- renderText({paste('V2:',rltInput2())})
output$result3 <- renderText({paste('V3:',rltInput3())})
output$result4 <- renderText({paste('V4:',rltInput4())})
output$result5 <- renderText({paste('V5:',rltInput5())})
output$result6 <- renderText({paste('V6:',rltInput6())})
# output$result7 <- renderText({paste('V7:',rltInput7())})
# output$result8 <- renderText({paste('V8:',rltInput8())})
# output$result9 <- renderText({paste('V9:',rltInput9())})
# output$result10 <- renderText({paste('V10:',rltInput10())})
##Calculate points left per MC question and only add them when answer is correct
scrInput1 <- reactive({scrQuestion(rltInput1(),1)*(rltInput1()=='Correct')})
scrInput2 <- reactive({scrQuestion(rltInput2(),2)*(rltInput2()=='Correct')})
scrInput3 <- reactive({scrQuestion(rltInput3(),3)*(rltInput3()=='Correct')})
scrInput4 <- reactive({scrQuestion(rltInput4(),4)*(rltInput4()=='Correct')})
scrInput5 <- reactive({scrQuestion(rltInput5(),5)*(rltInput5()=='Correct')})
scrInput6 <- reactive({scrQuestion(rltInput6(),6)*(rltInput6()=='Correct')})
# scrInput7 <- reactive({scrQuestion(rltInput7(),7)*(rltInput7()=='Correct')})
# scrInput8 <- reactive({scrQuestion(rltInput8(),8)*(rltInput8()=='Correct')})
# scrInput9 <- reactive({scrQuestion(rltInput9(),9)*(rltInput9()=='Correct')})
# scrInput10 <- reactive({scrQuestion(rltInput10(),10)*(rltInput10()=='Correct')})
##Calculate total points left and print result
output$scr <- renderText({paste0('Je score is: ',scrInput1() + scrInput2() + scrInput3() + scrInput4() + scrInput5() + scrInput6(),
' (of ', round((scrInput1() + scrInput2() + scrInput3() + scrInput4() + scrInput5() + scrInput6())*100/12,0),
'%)')})
##Prepare user data and print
userData <- reactive({input$goButton
strsplit(isolate(input$userData),',')
})
output$user <- renderText(paste0("User name: ",userData()[[1]][1],", From: ",userData()[[1]][2]))
##Assess number of trials and print
trials <- reactive({input$goButton})
output$value <- renderText(paste0('Pogingen: ', trials()))
urlText <- renderText({
paste(sep = "",
session$clientData$url_protocol, "//",
session$clientData$url_hostname, ":",
session$clientData$url_port,"",
session$clientData$url_pathname
)
})
##Check whether number of trials variable is empty and if not save data to csv
observe({if(length(trials()) != 0) {
quiz <- data.frame(Sys.time(), urlText(), session$clientData$url_search,
userData()[[1]][1], userData()[[1]][2], trials(),
scrInput1(), scrInput2(),scrInput3(), scrInput4(), scrInput5(), scrInput6())
write.table(quiz, file='results.csv', append=TRUE, sep=",", row.names=FALSE,
col.names=FALSE)
}
})
})
|
8da45d8ac4f77e27e45ebecdfb2f5333319b56ee
|
bf3edc3f036a643e185bee233055d1a0ecc47d80
|
/pvalue convergence - spinable.R
|
f4a5be077542aee22316fa2ba48347bdb4f37782
|
[] |
no_license
|
dmarx/Target-Shuffling
|
da0f88601ac38d6f89a6685241cce54b63899161
|
6b8132cbf4123d761e6711930c7066d62a1356eb
|
refs/heads/master
| 2021-01-10T19:02:49.029727
| 2016-02-19T23:00:34
| 2016-02-19T23:00:34
| 40,012,744
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,695
|
r
|
pvalue convergence - spinable.R
|
#' ---
#' title: "Demonstration of Generic Target Shuffling"
#' author: "David Marx"
#' date: "September 15, 2014"
#' ---
# /*
# #################################################################################
# # NB: This R script can be compiled into a static notebook by using #
# # RStudio's "Compile Notebook" command or by calling knitr::spin on the script. #
# #################################################################################
# */
#' Target shuffling is an extremely simple algorithm for estimating the significance of a model.
#' The idea is to use resampling to estimate the distribution of a given test statistic under the
#' null hypothesis, and then compare our observed test statistic to the eCDF of the null derived
#' from our resampling.
#'
#' This is really all there is to it:
# Generic Target shuffling function
tShuffle = function(k, x, y, fitModel, stat, returnValues=TRUE, estimateSignificance=TRUE, ...){
results = rep(NULL, k)
for(i in 1:k){
shuffled_y = sample(y, length(y), ...)
results[i] = stat(fitModel(x,shuffled_y))
}
retval=list()
if(returnValues) retval$values = results
if(estimateSignificance) retval$est_pval = 1- sum(stat(fitModel(x,y)) > results)/k
retval
}
#' To make this function generic, I've parameterized it such that it can take any function to fit
#' a supervised model on a training dataset `x` and a target dataset `y` such that the call
#' `fitModel(x,y)` returns an object that can be operated on by the `stat` function to return a scalar
#' value.
#'
#' In addition to estimating significance, this implementation returns the raw results of our target shuffling
#' experiments so we can examine the estimated null distribution.
#'
#' To demonstrate, let's run a linear regression and target shuffle the R^2 to determine model significance.
#' In retrospect, really I should be target shuffling the F-statistic of the model, since that's what is
#' actually used to determine model significance, but for the purpose of generality I use r-squared here (which
#' can be calculated for any model) and you will see that we are able to make good estimates on the p-value
#' by target shuffling on this statistic instead of the F-statistic.
#'
#' To do this, we need to define a `fitModel` function and `stat` function. The first is just a wrapper on
#' the `lm` function, the second just extracts the r.squared attribute from calling `summary` on the model
#' object returned by `lm`. These functions will be passed to the tshuffle function. Last, I'll define a
#' function to simplify extracting the observed p-value from my model.
fitModel = function(x,y){
lm(y~x)
}
stat = function(mod){
summary(mod)$r.squared
}
extract_pval = function(mod){
unlist(anova(mod)[5])[1]
}
#' Let's fit our models to the famous iris data set. We'll start by fitting a "good" model (i.e. one in which
#' we expect to observe significance).
names(iris)
#mod_a = lm(Petal.Length~Petal.Width, iris)
mod_a =fitModel(iris$Petal.Width, iris$Petal.Length)
summary(mod_a)
plot(Petal.Length~Petal.Width, iris)
abline(mod_a, col='blue',lty=2)
stat(mod_a) # r-squared
#' Target shuffle our model
n=1e3
results_a = tShuffle(n, iris$Petal.Width, iris$Petal.Length, fitModel, stat, replace=TRUE)
#' Visualize results of target shuffling
plot(density(results_a$values), xlim=c(0,1), main='KDE of Target Shuffling Results')
abline(v=stat(mod_a), lty=2, col='red')
legend('top', c('Target Shuffled R2', 'Observed R2'), lty=1:2, col=1:2)
#' Compare estimated p-value with observed
print(paste("Estimated p-value:", results_a$est_pval))
print(paste("Observed p-value:",extract_pval(mod_a)))
summary(results_a$values)
#################
#' Let's contrast these results with an attempt to fit a bad model.
mod_b = lm(Sepal.Length~Sepal.Width, iris)
summary(mod_b)
plot(Sepal.Length~Sepal.Width, iris)
abline(mod_b, col='blue', lty=2)
stat(mod_b)
#' Target shuffle our model
results_b = tShuffle(n, iris$Sepal.Width, iris$Sepal.Length, fitModel, stat, replace=TRUE)
#' Visualize results of target shuffling
plot(density(results_b$values), xlim=c(0,1), main='KDE of Target Shuffling Results')
abline(v=stat(mod_b), lty=2, col='red')
legend('top', c('Target Shuffled R2', 'Observed R2'), lty=1:2, col=1:2)
print(paste("Estimated p-value:", results_b$est_pval))
print(paste("Observed p-value:",extract_pval(mod_b)))
summary(results_b$values)
#' Let's run a little experiment to observe how quickly the target shuffled significance
#' on the r^2 statistics converges to the analytic p-value for the model, and see if incorporating
#' bootstrap resampling in our target shuffle makes any difference.
run_experiment = function(x, y, k, fitModel, stat, ...){
mod = lm(y~x)
results = tShuffle(k, x, y, fitModel, stat, ...)
p_sim = results$est_pval #sum(stat(mod) < results)/length(results)
num = cumsum(stat(mod) < results$values)
denom = 1: length(results$values)
plot(denom,
num/denom,
type='l',
main="Running simulated p-value",
xlab="Iteration",
ylab="Simulated p-value"
)
abline(h=p_sim, col='red', lty=2)
p_anlyt = extract_pval(mod) #unlist(anova(mod)[c("Pr(>F)")])[1]
abline(h=p_anlyt, col='blue', lty=2)
legend('topright', c('Running estimated p-value','Final estimated p-value', 'Observed analytic p-value'),
lty=c(1,2,2), col=c(1:2,'blue')
)
abs_error=abs(num/denom - p_anlyt)
plot(denom, abs_error,
type='l',
main="Convergence to analytic p-value",
xlab="Iteration",
ylab="Absolute Error (|p_sim - p_anlyt|)",
ylim=c(0,.05)
)
return(list(results=results, p_sim=p_sim, p_analytic=p_anlyt, abs_error=abs_error))
}
#' Generic Target Shuffling
exp_f = run_experiment(iris$Sepal.Width, iris$Sepal.Length, n, fitModel, stat, replace=FALSE)
exp_f$p_sim; exp_f$p_analytic
#' Bootstrapped Target Shuffling
exp_t = run_experiment(iris$Sepal.Width, iris$Sepal.Length, n, fitModel, stat, replace=TRUE)
exp_t$p_sim; exp_t$p_analytic
#' It appears that incorporating bootstrapping with target shuffling probablydoesn't really make much
#' of a difference.
#'
#' **Class Imbalance**
#'
#' Target shuffling is especially useful in situations where the analytic p-value may not be representative
#' of the true null distribution. A textbook exaple is in the case of significant class imbalance: a naive
#' model will be able to achieve significant accuracy by effectively predicting that any input will be of the
#' majority class.
#' To demonstrate, let's construct a dataset with two randomly distributed classes (i.e. inseparable given
#' the available features) where one class is a significant minority relative to the other.
n=1e3
p=0.05
x = runif(n)
y = rbinom(n,1,p)
table(y)/n
#' Since we know that there is no conditional relationship between `x` and `y`, we know that, theoretically,
#' trying to train a logistic regression on this data would be fruitless. But the class imbalance will give
#' us deceptive results.
fitModel_log = function(x,y){
glm(y~x, data.frame(x,y),family='binomial')
}
accuracy = function(mod, Y=y){
mean(Y == (predict(mod)>0) )
}
mod_c = fitModel_log(x,y)
acc = accuracy(mod_c)
print(paste("Accuracy:",acc))
#' Wow! That's an accurate model! But does it mean we're really achieving good class separation?
#' Let's target shuffle it and see how significant this result really is.
n=1e3
#+ message=F, warning=F
results_c = tShuffle(n, x, y, fitModel_log, accuracy, replace=TRUE)
#' Visualize results of target shuffling
plot(density(results_c$values), xlim=c(0,1), main='KDE of Target Shuffling Results')
abline(v=accuracy(mod_c), lty=2, col='red')
legend('topleft', c('Target Shuffled Accuracy', 'Observed Accuracy'), lty=1:2, col=1:2)
#' Our model sure doesn't look so special now.
print(paste("Estimated p-value:", results_c$est_pval))
summary(results_c$values)
#' ### Bootstrap
#'
#' Target shuffling gives us a distribution over the null hypothesis. The *bootstrap*
#' technique gives us a distribution over model solutions and/or evaluation statistics,
#' AKA distributions over the alternative hypothesis. We estimated our p-value by
#' comparing a unique model solution against our distribution of null hypotheses, but
#' we can actually infer a distribution over p-values, giving us a confidence interval
#' on our p-value (!!!)
boot_sample = function(x){
n = nrow(x)
ix = sample(n, replace=TRUE)
x[ix,]
}
boot_stat = function(k, x, y, fitModel, stat){
results = rep(NULL, k)
full_dat = cbind(y,x)
for(i in 1:k){
boot_dat = boot_sample(full_dat)
xb = boot_dat[,2:(NCOL(x)+1)]
yb = boot_dat[,1]
results[i] = stat(fitModel(xb, yb))
}
results
}
# results_a = tShuffle(n, iris$Petal.Width, iris$Petal.Length, fitModel, stat, replace=TRUE)
boot_r2 = boot_stat(n, iris$Petal.Width, iris$Petal.Length, fitModel, stat)
plot(density(results_a$values), xlim=c(0,1), main='KDE of Target Shuffling Results')
lines(density(boot_r2), col='blue')
abline(v=stat(mod_a), lty=2, col='red')
legend('top', c('Target Shuffled R2', 'Observed R2', 'Bootstrapped R2'), lty=c(1,2,1), col=c(1,'red', 'blue'))
#' I like this. Let's wrap the whole process.
simulate_distributional_hypothesis_test = function(k, x, y, fitModel, stat, plt=TRUE){
null_dist = tShuffle(k, x, y, fitModel, stat, replace=TRUE)
alt_dist = boot_stat(k, x, y, fitModel, stat)
mod = fitModel(x,y)
obs_stat = stat(mod)
p_dist = sapply(alt_dist, function(v) 1 - mean(v>null_dist$values))
p_thsuffle = null_dist$est_pval
if(plt){
plot(density(null_dist$values), xlim=range(c(null_dist, alt_dist)), main="Target shuffling vs. bootstrap")
lines(density(alt_dist), col='blue')
abline(v=obs_stat, lty=2, col='red')
legend('top', c('Target Shuffled stat', 'Observed stat', 'Bootstrapped stat'), lty=c(1,2,1), col=c(1,'red', 'blue'))
}
list(null_hypothesis=null_dist$values,
alternative_hypothesis=alt_dist,
fitted_stat=obs_stat,
fitted_model=mod,
fitted_pval=extract_pval(mod),
pvalue_distribution=p_dist,
tshuffle_pvalue=p_thsuffle)
}
#' Where we have wel defined significance, the two distributions are well separated.
sim_dist = simulate_distributional_hypothesis_test(n, iris$Petal.Width, iris$Petal.Length, fitModel, stat)
#' Where we don't have strong significance, there is overlap in the target shuffled and bootstrapped distributions, and
#' we get a range (distribution) of pvalues
sim_dist2 = simulate_distributional_hypothesis_test(n, iris$Sepal.Width, iris$Sepal.Length, fitModel, stat)
plot(density(sim_dist2$pvalue_distribution), main="p-value distribution", xlim=c(0,1))
abline(v=sim_dist2$fitted_pval, lty=2, col="red")
legend('topright', c('Estimated p-value distribution', 'Analytic p-value'), lty=c(1,2), col=c(1,'red'))
#' The median of our estimation distribution is pretty close to our analytic and target-shuffled p-values.
sim_dist2$fitted_pval
sim_dist2$tshuffle_pvalue
quantile(sim_dist2$pvalue_distribution, c(.05, .5, .95))
#' interesting, the mode and mean are not.
dens = density(sim_dist2$pvalue_distribution)
dens$x[which.max(dens$y)] # pval distr mode
mean(sim_dist2$pvalue_distribution) # pval distr mean
#' being a bit more traditional, let's look at the distribution of the difference between the null and the alternative
#' hypothesis. If this distribution contains 0 at our significance level of interest, we fail to reject the null.
dist_diff = sim_dist2$alternative_hypothesis - sim_dist2$null_hypothesis
plot(density(dist_diff))
quantile(dist_diff, c(.05, .5, .95))
#' ### Discussion
#'
#' This obviously invites the question: how useful is a confidence bound about a p-value? I've certainly never seen this
#' approach used anywhere, but at face value it doesn't strike me as completely useless. In fact, having computed this,
#' this seems like a good way of doing things. Why shouldn't we calculate bounds about our p-values? How useful is a p-value,
#' or really any statistic, in complete isolation? Although this seems reasonable at first, I have to admit: I'm struggling to
#' find real value in this range I've constructed. My initial result from target shuffling against a single point estimate for
#' my statistics demonstrated that my model was not significant at a 10% threshold. Querying the p-value distribution told me that
#' there are random models that are significant (the tail of the distribution that lies outside of the range of the target shuffled
#' statistic) and there are random models that are stringly not significant (the distributional overlap). So what?
|
663de0a58b734308be66a387bd8875d10c8f0e31
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/A_github/sources/authors/2691/stacomiR/ref_timestep.R
|
aec26e9962f89cd142b5b5b79d2abbee89e4ddf8
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,975
|
r
|
ref_timestep.R
|
UNE_SECONDE = as.difftime(c("0:0:1")) ;
UNE_MINUTE = 60 * UNE_SECONDE ;
DIX_MINUTES = 10 * UNE_MINUTE ;
QUINZE_MINUTES = 15 * UNE_MINUTE ;
TRENTE_MINUTES = 30 * UNE_MINUTE ;
UNE_HEURE = 60 * UNE_MINUTE ;
DOUZE_HEURES = 12 * UNE_HEURE ;
UN_JOUR = 24 * UNE_HEURE ;
UNE_SEMAINE = 7 * UN_JOUR ;
DEUX_SEMAINES = 2 * UNE_SEMAINE ;
UN_MOIS = 30 * UN_JOUR ;
TROIS_MOIS = 91 * UN_JOUR ;
SIX_MOIS = 182 * UN_JOUR ;
UN_AN = 365 * UN_JOUR ;
Valeurref_timestep=c(UNE_SECONDE,UNE_MINUTE,DIX_MINUTES,QUINZE_MINUTES,TRENTE_MINUTES,UNE_HEURE,DOUZE_HEURES,
UN_JOUR,UNE_SEMAINE,DEUX_SEMAINES,UN_MOIS,TROIS_MOIS,SIX_MOIS,UN_AN)
Labelref_timestep=c(
"1 sec",
"1 min",
"10 min" ,
"15 min" ,
"30 min",
"1 h" ,
"12 h" ,
"1 jour" ,
"1 sem" ,
"2 sem" ,
"1 mois" ,
"3 mois" ,
"6 mois" ,
"1 an" )
Lesref_timestep=data.frame("Valeurref_timestep"=Valeurref_timestep)
Lesref_timestep[,"Labelref_timestep"]=Labelref_timestep
rownames(Lesref_timestep)=
c("UNE_SECONDE","UNE_MINUTE","DIX_MINUTES","QUINZE_MINUTES","TRENTE_MINUTES","UNE_HEURE","DOUZE_HEURES",
"UN_JOUR","UNE_SEMAINE","DEUX_SEMAINES","UN_MOIS","TROIS_MOIS","SIX_MOIS","UN_AN")
rm(UNE_SECONDE,UNE_MINUTE,DIX_MINUTES,QUINZE_MINUTES,TRENTE_MINUTES,UNE_HEURE,DOUZE_HEURES,
UN_JOUR,UNE_SEMAINE,DEUX_SEMAINES,UN_MOIS,TROIS_MOIS,SIX_MOIS,UN_AN,Labelref_timestep)
validity_ref_timestep=function(object)
{
retValue=NULL
rep1= class(object@dateDebut)[1]=="POSIXlt"
if (!rep1) retValue="object@dateDebut is not of class POSIXlt"
rep2=length(object@step_duration)==1
if (!rep2) retValue=paste(retValue,"length(object@step_duration) !=1")
rep3=length(object@nb_step)==1
if (!rep3) retValue=paste(retValue,"length(object@nb_step) !=1")
rep4=length(object@nocurrent_step)==1
if (!rep4) retValue=paste(retValue,"length(object@nocurrent_step) !=1")
return(ifelse(rep1 & rep2 & rep3 & rep4,TRUE,retValue))
}
#' Class "ref_timestep"
#'
#' Describes a time step
#'
#'
#' @section Objects from the Class: Objects can be created by calls of the form
#' \code{new("ref_timestep",
#' dateDebut="POSIXt",step_duration=numeric(),nb_step=numeric(),nocurrent_step=integer())}.
#' \describe{
#' \item{list("dateDebut")}{Object of class \code{"POSIXt"} Starting
#' date }
#' \item{:}{Object of class \code{"POSIXt"} Starting date }
#' \item{list("step_duration")}{Object of class \code{"numeric"} Step length
#' }\item{:}{Object of class \code{"numeric"} Step length }
#' \item{list("nb_step")}{Object of class \code{"numeric"} Number of steps
#' }\item{:}{Object of class \code{"numeric"} Number of steps }
#' \item{list("nocurrent_step")}{Object of class \code{"integer"} Number of the
#' current step }\item{:}{Object of class \code{"integer"} Number of the
#' current step } }
#' @author cedric.briand"at"eptb-vilaine.fr
#' @seealso \code{\linkS4class{ref_timestep_daily}}
#' @concept report Object
setClass(Class="ref_timestep",representation=
representation(dateDebut="POSIXlt",step_duration="numeric",nb_step="numeric",nocurrent_step="integer"),
validity=validity_ref_timestep,
prototype=prototype(dateDebut=as.POSIXlt(Hmisc::truncPOSIXt(Sys.time(),"year")),
step_duration=as.numeric(86400),
nb_step=as.numeric(1),
nocurrent_step=as.integer(0) ) )
# timestep= new("ref_timestep")
validity_ref_timestepChar=function(object)
{
rep1= class(object@dateDebut)[1]=="POSIXlt"
rep2=length(object@step_duration)==1
rep3=length(object@nb_step)==1
rep4=length(object@nocurrent_step)==1
rep5= object@step_duration%in%Lesref_timestep[,"Labelref_timestep"]
return(ifelse(rep1 & rep2 & rep3 & rep4 & rep5,TRUE,c(1:5)[!c(rep1, rep2, rep3, rep4,rep5)]))
}
#' Class "ref_timestepChar"
#'
#' Character to represent a ref_timestep
#'
#'
#' @section Objects from the Class: Objects can be created by calls of the form
#' \code{new("ref_timestepChar", \dots{})}
#' @author cedric.briand"at"eptb-vilaine.fr
#' @seealso \code{\linkS4class{ref_timestep}}
#' @keywords classes
#' @examples
#'
#' showClass("ref_timestepChar")
#'
setClass(Class="ref_timestepChar",representation=
representation(dateDebut="POSIXlt",step_duration="character",nb_step="numeric",nocurrent_step="integer"),
validity=validity_ref_timestepChar,
prototype=prototype(dateDebut=as.POSIXlt(strptime("2008-01-01 00:00:00",format="%Y-%m-%d %H:%M:%S"),tz="GMT"),
step_duration=as.character("1 jour"),
nb_step=as.numeric(1),
nocurrent_step=as.integer(0) ))
setAs("ref_timestepChar","ref_timestep", # from to
function(from,to){
index=Lesref_timestep[,"Labelref_timestep"]%in%from@step_duration
newstep_duration=Lesref_timestep[index,"Valeurref_timestep"]
new("ref_timestep",dateDebut=from@dateDebut,
step_duration=newstep_duration,
nb_step=from@nb_step,
nocurrent_step=from@nocurrent_step)})
# timestep=as(timestepChar,"ref_timestep")
#' Generic method to get current time step
#' @param object An object
#' @param ... Additional parameters passed to the method
setGeneric("getnocurrent_step",def=function(object,...) standardGeneric("getnocurrent_step"))
#' Gets the current time step of an object of class \link{ref_timestep-class}
#' @param object An object of class \link{ref_timestep-class}
#' @return the current time step of the object
#' @keywords internal
setMethod("getnocurrent_step",signature=signature("ref_timestep"),definition=function(object) object@nocurrent_step)
#' Generic method for getting the final date
#' @param object An object
#' @param ... Additional parameters passed to the method
#' @keywords internal
setGeneric("end_date",def=function(object,...) standardGeneric("end_date"))
#' Gets the final horodate for an object of class \link{ref_timestep-class}
#' @param object An object of class \link{ref_timestep-class}
#' @return end_date, The final date corresponding to nb_step*time duration + initial date
#' @export
#' @keywords internal
setMethod("end_date",signature=signature("ref_timestep"),definition=function(object){
end_date=object@dateDebut+ object@step_duration*(object@nb_step)
# pour les pb de changement d'heure
return(end_date)
})
#' Generic method for getting the beginning date for current time step
#' @param object An object
#' @param ... Additional parameters passed to the method
#' @keywords internal
setGeneric("currentstart_date",def=function(object,...) standardGeneric("currentstart_date"))
#' Gets the starting date of a time step for an object of class \link{ref_timestep-class}
#' @param object An object of class \link{ref_timestep-class}
#' @return current_start_date, The starting date for the current timestep
#' @keywords internal
setMethod("currentstart_date",signature=signature("ref_timestep"),definition=function(object){
current_start_date=object@dateDebut+ object@step_duration*object@nocurrent_step
# bug cht heure
if (object@step_duration==86400) {
current_start_date=Hmisc::roundPOSIXt(current_start_date,"days")
}
return(current_start_date)
})
#' Generic method for getting the ending date for current time step
#' @param object An object
#' @param ... Additional parameters passed to the method
#' @keywords internal
setGeneric("current_end_date",def=function(object,...) standardGeneric("current_end_date"))
#' Gets the ending date of a time step for an object of class \link{ref_timestep-class}
#' @param object An object of class \link{ref_timestep-class}
#' @return Currentend_date, The ending date for the current timestep
setMethod("current_end_date",signature=signature("ref_timestep"),definition=function(object){
the_current_end_date=object@dateDebut+ object@step_duration*(object@nocurrent_step+as.integer(1))
if (object@step_duration==86400) {
the_current_end_date=Hmisc::roundPOSIXt(the_current_end_date,"days")
}
return(the_current_end_date)
})
#' Generic method the get starting date
#' @param object An object
#' @param ... Additional parameters passed to the method
#' @keywords internal
setGeneric("getdateDebut",def=function(object,...) standardGeneric("getdateDebut"))
#' Returns the starting date as character
#' @param object An object of class \link{ref_timestep-class}
#' @param ... Additional parameters passed to the method
#' @keywords internal
setMethod("getdateDebut",signature=signature("ref_timestep"),definition=function(object){
return ( strftime(as.POSIXlt(object@dateDebut),format="%Y-%m-%d %H:%M:%S") )
})
#' Generic method to set the starting date
#' @param object An object
#' @param ... Additional parameters passed to the method
#' @keywords internal
setGeneric("set_starting_date",def=function(object,...) standardGeneric("set_starting_date"))
#' Sets starting date from a character
#'
#'
#' @param object An object of class \link{ref_timestep-class}
#' @param string Character string of type"\%Y-\%m-\%d \%H:\%M:\%S" or "\%Y-\%m-\%d".
#' this allows to use either horodate or date
#' @return An object of class \link{ref_timestep-class}
#' @keywords internal
setMethod("set_starting_date",signature=signature("ref_timestep"),definition=function(object,string){
object@dateDebut=if (!is.na(strptime(string,format="%Y-%m-%d %H:%M:%S"))) strptime(string,format="%Y-%m-%d %H:%M:%S") else
strptime(string,format="%Y-%m-%d")
return(object)
})
#' Generic method to get the string value of time step
#' @param object An object
#' @param ... Additional parameters passed to the method
#' @keywords internal
setGeneric("get_step_label",def=function(object,...) standardGeneric("get_step_label"))
#' Gets the string value of time step
#'
#' @param object An object of class \link{ref_timestep-class}
#' @return A string corresponding to the value of current time step
#' @keywords internal
setMethod("get_step_label",signature=signature("ref_timestep"),definition=function(object){
ret=paste(Lesref_timestep$Labelref_timestep)
return (ret )
})
#' Generic method to get the years
#' @param object An object
#' @param ... Additional parameters passed to the method
#' @keywords internal
setGeneric("get_year",def=function(object,...) standardGeneric("get_year"))
#' Gets the year or a vector of years corresponding to the timestep ("ref_timestep") object
#' @param object An object of class \link{ref_timestep-class}
#' @keywords internal
setMethod("get_year",signature=signature("ref_timestep"),definition=function(object){
dateFin=end_date(object)
dateDebut=object@dateDebut
seq=seq.POSIXt(from=dateDebut,to=dateFin,by="day")
seq=seq[-length(seq)]
annees=unique(strftime(seq,"%Y"))
return (as.numeric(annees))
})
#' Method to select timesteps from the graphical interface
#' @param object An object of class \link{ref_timestep-class}
#' @keywords internal
setMethod("choice",signature=signature("ref_timestep"),definition=function(object) {
if (length(Lesref_timestep$Labelref_timestep) > 0){
hwinpa=function(h,...){
pas=svalue(choicepas)
nb_step=as.numeric(svalue(choicenb_step))
object@nb_step<-nb_step
object@step_duration<-as.numeric(Lesref_timestep$Valeurref_timestep[Lesref_timestep$Labelref_timestep%in%pas])
object=set_starting_date(object,svalue(datedeb))
assign("timestep",object,envir_stacomi)
}
hchoicepas=function(h,...){
pas=svalue(choicepas)
nb_step=as.numeric(svalue(choicenb_step))
object@step_duration<-as.numeric(Lesref_timestep$Valeurref_timestep[Lesref_timestep$Labelref_timestep%in%pas])
object@nb_step<-nb_step
object=set_starting_date(object,svalue(datedeb))
add(datedefin,strftime(as.POSIXlt(end_date(object)),format="%Y-%m-%d %H:%M:%S"),
font.attr=c(foreground.colors="red") )
hwinpa(h)
}
group<-get("group",envir=envir_stacomi)
winpa=gframe(gettext("Time steps choice",domain="R-stacomiR"),container=group,horizontal=FALSE)
pg<-ggroup(horizontal=FALSE,container=winpa)
glabel(gettext("Starting date",domain="R-stacomiR"),container=pg)
datedeb<-gedit(getdateDebut(object),
container=pg,handler=hchoicepas,width=15)
datedebut2=as.character(strftime(object@dateDebut,"%Y-%m-%d"))
datedeb2<-gcalendar(datedebut2,container=pg,handler=function(h,...){
svalue(datedeb)<-as.character(strftime(
strptime(svalue(datedeb2),"%Y-%m-%d"),
"%Y-%m-%d %H:%M:%S"))
hchoicepas(h)
} )
glabel(gettext("Time steps choice",domain="R-stacomiR"),container=winpa)
pas_libelle=fun_char_spe(Lesref_timestep$Labelref_timestep)
choicepas=gdroplist(pas_libelle,selected = 8,container=winpa,handler=hchoicepas)
glabel(gettext("Number of time step choice",domain="R-stacomiR"),container=winpa)
choicenb_step=gedit("365",container=winpa,coerce.with=as.numeric,handler=hchoicepas,width=15)
datedefin<-gtext(gettext("End date",domain="R-stacomiR"),height=50,container=winpa) # Date de fin
gbutton("OK", container=winpa,handler=hwinpa,icon="execute")
} else funout(gettext("Internal error : no entry in time steps table\n",domain="R-stacomiR"), arret=TRUE)
})
#' Graphical interface for multiple choice method for PasdeTemps (used in report_mig_mult)
#' @param object An object of class \link{ref_timestep-class}
#' @note this method differs from choice as it is called within a notebook,
#' it does not allow for multiple choice to be made
#' @author Cedric Briand \email{cedric.briand"at"eptb-vilaine.fr}
#' @keywords internal
setMethod("choicemult",signature=signature("ref_timestep"),definition=function(object) {
if (length(Lesref_timestep$Labelref_timestep) > 0){
hwinpa=function(h,...){
pas=svalue(choicepas)
nb_step=as.numeric(svalue(choicenb_step))
object@nb_step<<-nb_step
object@step_duration<<-as.numeric(Lesref_timestep$Valeurref_timestep[Lesref_timestep$Labelref_timestep%in%pas])
object=set_starting_date(object,svalue(datedeb))
assign("timestep",object,envir_stacomi)
funout(gettext("Timesteps loaded\n",domain="R-stacomiR"))
# charge le deuxieme onglet du notebook
svalue(notebook)<-2
}
hchoicepas=function(h,...){
#browser()
pas=svalue(choicepas)
nb_step=as.numeric(svalue(choicenb_step))
object@step_duration<-as.numeric(Lesref_timestep$Valeurref_timestep[Lesref_timestep$Labelref_timestep%in%pas])
object@nb_step<-nb_step
object=set_starting_date(object,svalue(datedeb))
add(datedefin,strftime(as.POSIXlt(end_date(object)),format="%Y-%m-%d %H:%M:%S"),
font.attr=c(foreground.colors="red") )
hwinpa(h)
}
hchoicedatedebut=function(h,...){
# TODO to develop
}
notebook<-get("notebook",envir=envir_stacomi)
groupdate<-ggroup(container=notebook, label="periode") ## "add" called by constructor this is a tab of the notebook
assign("groupdate",groupdate,envir=envir_stacomi)
winpa=gframe(gettext("Time steps choice",domain="R-stacomiR"),container=groupdate,horizontal=FALSE)
pg<-ggroup(horizontal=FALSE,container=winpa)
glabel(gettext("Starting date",domain="R-stacomiR"),container=pg)
datedeb<-gedit(getdateDebut(object),container=pg,handler=hchoicepas,width=15)
datedebut2=as.character(strftime(object@dateDebut,"%Y-%m-%d"))
datedeb2<-gcalendar(datedebut2,container=pg,handler=function(h,...){
svalue(datedeb)<-as.character(strftime(
strptime(svalue(datedeb2),"%Y-%m-%d"),
"%Y-%m-%d %H:%M:%S"))
hchoicepas(h)
} )
glabel(gettext("Time steps choice",domain="R-stacomiR"),container=winpa)
pas_libelle=fun_char_spe(Lesref_timestep$Labelref_timestep)
choicepas=gdroplist(pas_libelle,selected = 8,container=winpa,handler=hchoicepas)
glabel(gettext("Number of time steps choice",domain="R-stacomiR"),container=winpa)
choicenb_step=gedit("365",container=winpa,coerce.with=as.numeric,handler=hchoicepas,width=15)
datedefin<-gtext(gettext("Ending date",domain="R-stacomiR"),height=50,container=winpa) # Date de fin)
gbutton("OK", container=winpa,handler=hwinpa,icon="execute")
} else funout(gettext("Internal error : no entry in time steps table\n",domain="R-stacomiR"), arret=TRUE)
})
|
cacc31a752618bf3d0ff72dd8cb334130ea7573f
|
6f56e71fc7d220998d498971fd37ac548077f35e
|
/man/mfisher.test.Rd
|
ad44e0a631708726dedc29192ac57bf1f5d29d4f
|
[] |
no_license
|
cran/multfisher
|
0e48321cfb09dbed3b8a589126e988aabe62072a
|
a8d892b91659a9adeff0eac52b41911b7c7750e2
|
refs/heads/master
| 2021-01-20T03:59:47.855035
| 2018-02-23T09:29:06
| 2018-02-23T09:29:06
| 89,617,853
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 10,461
|
rd
|
mfisher.test.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multBinExact_2-4.R
\name{mfisher.test}
\alias{mfisher.test}
\title{Optimal Exact Tests for Multiple Binary Endpoints}
\usage{
mfisher.test(x, y = NULL, method = c("alpha.greedy", "alpha", "number",
"power", "bonferroni.greedy"), alpha = 0.025, p1 = NULL, p0 = NULL,
max.iter = 10^5, limit = 0, show.region = FALSE, closed.test = FALSE,
consonant = FALSE)
}
\arguments{
\item{x}{a data frame of binary response vectors, or an array of numbers of failures and successes in the treatment group, or a list of marginal \emph{2 by 2} tables, see details.}
\item{y}{a vector of group allocations, or an array of numbers of failures and successes in the reference group, see details.}
\item{method}{a character variable indicating which optimization procedure to use.
This can be one of \code{"alpha.greedy"}, \code{"alpha"}, \code{"number"}, \code{"power"} or \code{"bonferroni.greedy"}, see details.}
\item{alpha}{nominal significance level, the default is 0.025. Note that the test is one-sided.}
\item{p1}{an array of assumed probabilities for failure and success in the treatment group, see details.}
\item{p0}{an array of assumed probabilities for failure and success in the reference group, see details.}
\item{max.iter}{the maximal number of iterations in the branch and bound optimzation algorithm. Defaults to 10^5.}
\item{limit}{the value below which contributions to alpha are set to zero (and alpha is lowered accordingly) to speed up computation. Defaults to 0.}
\item{show.region}{logical, if \code{TRUE} a data frame indicating which possible outcome is element of the rejection region of the global test is added to the output. Defaults to \code{FALSE}.}
\item{closed.test}{logical, if \code{TRUE} adjusted p-values for the elementary null hypotheses are calculated by applying the specified test to all intersection hypotheses in a closed testing scheme. This can be
computer intensive, depending on the number of endpoints.}
\item{consonant}{logical indicating if the global test should be constrained such that the resulting closed test is consonant. This option is only available for two endpoints. Note that the
Bonferroni greedy method is always consonant by construction.}
}
\value{
A list with class \code{multfisher} containing the following components:
\describe{
\item{\code{call}}{the function call.}
\item{\code{data}}{a data frame showing the aggregated input data. If \code{p1} and \code{p0} are provided they are included in vectorized form.}
\item{\code{alpha}}{the value of \code{alpha}.}
\item{\code{method}}{the chosen method as found by argument match to \code{method}.}
\item{\code{statistic}}{the vector of test statistics, these are the marginal numbers of successes in the treatment group.}
\item{\code{p.value}}{the p-value of the global test. See reference for details on the calculation.}
\item{\code{conditional.properties}}{a list of the actual significance level, the number of elements and the power of the global test. The values are calculated from the permutation
distribution of the date and they are conditional on the observed total numbers of successes and failures. The power is calculated for the alternative defined through
\code{p1} and \code{p0}. If \code{p1} and \code{p0} are not specified, the value for power is \code{NA}.}
\item{\code{rej.region}}{Provided if \code{show.region} is \code{TRUE} and method is in \code{c("alpha","number","power","alpha.greedy")}. A data frame showing in the column rejection.region
if a multidimensional test statistic, indicated by the previous columns, is element of the rejection region (value of 1) or not (value of 0) for the global level alpha test.
The column alpha gives the probability of observing the particular vector of test statistics under the null hypothesis and conditional on the observed total numbers of
successes and failures. Values of 0 occur if a combination of test statistics is not possible in the conditional distribution. The column power shows the conditional probability
under the alternative defined through \code{p1} and \code{p0}. If \code{p1} and \code{p0} are not specified, the values for power are \code{NA}.}
\item{\code{elementary.tests}}{a data frame showing for each endpoint the marginal odds ratio, the unadjusted one-sided p-value of Fisher's exact test and the adjusted
p-value resulting from application of the optimal exact test in a closed testing procedure.}
\item{\code{closed.test}}{a data frame indicating all intersection hypotheses in the closed test and giving their p-values.}
\item{\code{consonant.constraint}}{logical indicating whether the consonance constraint was used.}
\item{\code{OPT}}{a list summarizing the optimization success, if applicable. The number of iterations of the branch and bound algorithm is given, as well as the
specified maximal iteration number and a logical variable indicating whether the optimization (in all steps of the closed test, if applicable) was finished.
The number of iterations may be 0, which indicates that the optimization problem was solved in a pre-processing step.}
}
}
\description{
Calculates global tests and multiple testing procedures to compare two groups with respect to multiple binary endpoints based on optimal rejection regions.
}
\details{
The null hypothesis for the global test is an identical multidimensional distribution of successes and failures in both groups.
The alternative hypothesis is a larger success proportion in the treatment group in at least one endpoint.
\code{x} can be a data frame with one row per subject and one column for each endpoint. Only values of 0 or 1 are allowed,
with 0 indicating failure and 1 indicating success of the subject for the particular endpoint. In that case \code{y} needs to be a vector of group assignemnts with values 0 and 1,
where 0 is the reference group and 1 the treatment group.
Alternatively, \code{x} and \code{y} can be contingency tables in terms of \emph{2 by 2 by ... by 2} arrays. Each dimension of the array corresponds to one endpoint, the first coordinate position
in each dimension refers to failure in that endpoint, the second coordinate position refers to success. The array contains the number of subjects that were observed
for each possible combination of failures and successes.
If \code{x} is a list of marginal \emph{2 by 2} tables, the Bonferroni greedy method is used. Matching the other input
variants, the \emph{2 by 2} tables are assumed to have the number of failures in the first row and the number of successes in the second row, and the first column to correspond to
the reference group, the second column to the treatment group.
The methods \code{"alpha.greedy"}, \code{"alpha"}, \code{"number"} and \code{"power"} are based on the multivariate permutation distribution of the data conditional
on the observed numbers of successes and failures across both groups. The method \code{"alpha.greedy"} uses a greedy algorithm aiming to exhaust the nominal significance level.
The methods \code{"alpha"}, \code{"number"} and \code{"power"} use a branch and bound algorithm to find rejection regions with, respectively,
maximal exhaustion of the nominal significance level, maximal number of elements or maximal power for the alternative given by \code{p1} and \code{p0}.
The method \code{"bonferroni.greedy"} uses a greedy algorithm aiming to exhaust the nominal significance level of a weighted Bonferroni adjustment of multiple Fisher's exact tests.
See reference for further details.
\code{p1} and \code{p0} are \emph{2 by 2 by ... by 2} arrays. Each dimension of the array corresponds to one endpoint, the first coordinate position
in each dimension refers to failure in that endpoint, the second coordinate position refers to success.
The array contains the assumed true probabilities for each possible combination of failures and successes.
}
\examples{
## Examples with two endpoints
data<-data.frame(endpoint1=c(0,0,1,1,1,0,0,0,0,1,1,1,1,1,1, 0,0,1,0,0,1,1,1,1,1,1,1,1,1,1),
endpoint2=c(0,0,0,0,0,1,1,1,1,1,1,1,1,1,1, 0,0,0,1,1,1,1,1,1,1,1,1,1,1,1),
group=rep(c(0,1),each=15))
## maximal power under a specified alternative
p1<-matrix(c(0.1,0.2,0.2,0.5),2,2)
p0<-matrix(c(0.75,0.1,0.1,0.05),2,2)
rownames(p1)<-rownames(p0)<-c("EP1_failure","EP1_success")
colnames(p1)<-colnames(p0)<-c("EP2_failure","EP2_success")
testpower<-mfisher.test(x=data[,c(1:2)],y=data$group,method="power",
p1=p1,p0=p0,closed.test=TRUE,show.region=TRUE)
print(testpower)
plot(testpower,cex=2)
str(testpower)
## maximal alpha with consonance constraint and using aggregated data as input
tab1<-table(data$endpoint1[data$group==1],data$endpoint2[data$group==1])
tab0<-table(data$endpoint1[data$group==0],data$endpoint2[data$group==0])
testalpha<-mfisher.test(x=tab1,y=tab0,method="alpha",closed.test=TRUE,
show.region=TRUE,consonant=TRUE)
print(testalpha)
plot(testalpha,cex=2)
## Examples with three endpoints
data3EP<-data.frame(endpoint1=c(0,0,0,0,0,1,1,0,0,0, 0,0,0,0,1,1,1,1,1,1),
endpoint2=c(0,0,0,0,0,1,0,1,0,0, 0,0,1,1,1,1,1,1,1,1),
endpoint3=c(0,0,0,0,0,0,0,0,1,1, 0,0,0,1,1,1,1,1,1,1),
group=rep(c(0,1),each=10))
## greedy alpha exhaustion
testgreedy3EP<-mfisher.test(x=data3EP[,1:3],y=data3EP$group,method="alpha.greedy",
show.region=TRUE,closed.test=TRUE)
print(testgreedy3EP)
par(mfrow=c(3,3))
for(i in 1:9) {
plot(testgreedy3EP,dim=c(1,2),slice=list(T3=i),show.titles=FALSE,cex=2,xlim=c(0,8),ylim=c(0,10))
title(paste("T3 =",i))
}
## Bonferroni greedy
mfisher.test(x=data3EP[,1:3],y=data3EP$group,method="bonferroni.greedy",closed.test=TRUE)
## Bonferroni greedy with alternative input of marginal tables
mfisher.test(x=list(table(data3EP$endpoint1,data3EP$group),
table(data3EP$endpoint2,data3EP$group),table(data3EP$endpoint3,data3EP$group)),
method="bonferroni.greedy",closed.test=TRUE)
}
\references{
Robin Ristl, Dong Xi, Ekkehard Glimm, Martin Posch (2018), Optimal exact tests for multiple binary endpoints.
\emph{Computational Statistics and Data Analysis}, \strong{122}, 1-17. doi: 10.1016/j.csda.2018.01.001 (open access)
}
\seealso{
\code{\link{print.multfisher}}, \code{\link{plot.multfisher}}
}
\author{
Robin Ristl, \email{robin.ristl@meduniwien.ac.at}
}
|
4d41d4d2e67e1c9e19755f4ba26d0a0ad3f6bd99
|
d5a70bccd84fec06593bdf1293dbba6b1cf3276b
|
/inst/testing/stan-time.R
|
5e89a017a0e42c2fc1f32354720801c7242b8017
|
[
"MIT"
] |
permissive
|
medewitt/pandemic
|
34c74d113e7a3eb8746f4a8503af7abca4829e8f
|
86acdf53a044119738cfc96835d56eaa5ac7c370
|
refs/heads/master
| 2022-06-06T10:51:12.601042
| 2020-05-01T18:50:47
| 2020-05-01T18:50:47
| 255,982,191
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,093
|
r
|
stan-time.R
|
library(deSolve)
library(dplyr)
library(rstan)
library(outbreaks)
library(bayesplot)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
# https://github.com/anastasiachtz/COMMAND_stan/blob/master/SingleStrainStan.Rmd
onset <- influenza_england_1978_school$date
onset <- dat_red$day
cases <- dat_red$cases #Number of students in bed
N = length(onset) # Number of days observed throughout the outbreak
pop = 10000000 # Population
sample_time=1:N
# Modify data into a form suitable for Stan
flu_data = list(n_obs = N,
n_theta = 2,
n_difeq = 3,
n_pop = pop,
y = cases,
t0 = 0,
ts = sample_time,
ts_2 = 1:60,
N_pred = 60)
# Specify parameters to monitor
#deterministic models (Model 1, Model 2)
parameters = c("y_hat", "y_init", "theta", "R_0")
#stochastic model (Model 3)
parameters_stoch = c("y_hat", "y_init", "theta", "kappa",
"lambda", "phi", "s_sq", "sigma", "R_0")
# compile the models ------------------------------------------------------
flu_model <- stan_model("flu.stan")
flu_model_2 <- stan_model("flu-2.stan")
# run it ---n_chains=5
#n_warmups=500
n_iter=1000
n_thin=50
set.seed(1234)
n_chains =2
# Set initial values:
ini_1 = function(){
list(theta=c(runif(1,0,5), runif(1,0.2,0.4)),
S0=runif(1,(pop-3)/pop,(pop-1)/pop))
}
time.start_nuts1 <- Sys.time()
nuts_fit_1 = sampling(flu_model, data = flu_data,
pars = parameters,
init = ini_1,
chains = n_chains,
warmup = n_warmups,
iter = n_iter, thin=n_thin, seed=13219)
nuts_fit_2 = sampling(flu_model_2, data = flu_data,
#pars = parameters,
init = ini_1,
chains = n_chains,
#warmup = n_warmups,
cores = 2,
iter = n_iter, thin=n_thin, seed=19)
time.end_nuts1 <- Sys.time()
duration_nuts1<- time.end_nuts1 - time.start_nuts1
nuts_fit_1_summary <- summary(nuts_fit_1,
pars = c("lp__", "theta[1]", "theta[2]",
"y_init[1]", "R_0"))$summary
print(nuts_fit_1_summary,scientific=FALSE,digits=2)
posts_1 <- rstan::extract(nuts_fit_1)
posts_2 <- rstan::extract(nuts_fit_2)
summary(nuts_fit_2)
summary(nuts_fit_2$y_hat_,
pars = c("lp__", "theta[1]", "theta[2]",
"y_init[1]", "R_0"))$summary
# ---------------------------------------------------------------
rstan::check_divergences(nuts_fit_1)
posterior_1 <- as.array(nuts_fit_1)
color_scheme_set("viridis")
# Markov chain traceplots
mcmc_trace(posterior_1, pars="lp__")
mcmc_trace(posterior_1, pars=c("theta[1]", "theta[2]", "y_init[1]"))
# Kernel density estimates of each Markov chain separately, overlaid
mcmc_dens_overlay(posterior_1, pars=c("theta[1]", "theta[2]", "y_init[1]"))
#Central posterior uncertainty intervals
mcmc_intervals(posterior_1,pars = c("theta[1]", "theta[2]", "y_init[1]"))
mcmc_trace(posterior_1, pars="R_0")
mcmc_trace(posterior_1, pars="lambda")
# Model fitted values across the observed time period
fit_I_1 <- posts_1$y_hat[,,2] # Fitted fraction of infected
fit_SIR_1 <- fit_I_1*pop # Fitted number of infected
median_I_1 = apply(fit_SIR_1, 2, median)
low_I_1 = apply(fit_SIR_1, 2, quantile, probs=c(0.1))
high_I_1 = apply(fit_SIR_1, 2, quantile, probs=c(0.9))
df_sample_N = data.frame(cases, sample_time)
df_fit_I_1 = data.frame(median_I_1, low_I_1, high_I_1, sample_time)
#save(df_sample_N,file="data.Rda")
#save(df_fit_I_1,file="df_I_det_Poiss.Rda")
# forecast ----------------------------------------------------------------
fit_forecast <- posts_2$y_hat_2[,,2] # Fitted fraction of infected
fit_forecast <- fit_forecast*pop
med_forcast = apply(fit_forecast, 2, median)
low_forcast = apply(fit_forecast, 2, quantile, probs=c(0.1))
high_forcast = apply(fit_forecast, 2, quantile, probs=c(0.9))
forecast_out <- data.frame(med = med_forcast,
low = low_forcast,
high = high_forcast)
# plot --------------------------------------------------------------------
ggplot(df_sample_N, aes(x=sample_time, y=cases)) +
geom_ribbon(aes(x=sample_time, ymin = low_I_1, ymax = high_I_1), fill = "orange", alpha = 0.6) +
geom_line(data = df_fit_I_1, aes(x=sample_time, y=median_I_1, color = "Median"), size = 1.3) +
geom_point(shape = 19, size = 3, (aes(color="Data"))) +
scale_colour_manual(name='', values=c('Data'='black', 'Median'='darkorange3'))+
guides(colour = guide_legend(override.aes = list(shape=c(16,NA), linetype=c(0,1))))+
labs(x = "Time (days)", y = "Number of Infected students") +
scale_x_continuous(limits=c(0, 60), breaks=c(0,7,60)) +
#scale_y_continuous(limits=c(0,1500), breaks=c(0,100,200,300,400)) +
theme_bw()+ theme(text = element_text(size=20))+
geom_ribbon(data = forecast_out,
aes(x = 1:60, ymax = high, ymin = low),
inherit.aes = FALSE, color = "gray90", alpha = .5)+
geom_line(data = forecast_out,
aes(x = 1:60, y = med),
inherit.aes = FALSE, color = "blue")+
scale_y_log10()
plot(posts_1$theta[,1], type = "b")
|
f780b490991a44f77516ae0d0703879195d6b037
|
b591a9d42c1933f3bf798e2bd87ba3cb2171daba
|
/r_scripts/short_long_term_trends.R
|
352ee0c1efc915b9c806922de104417062f818c0
|
[
"MIT"
] |
permissive
|
mtaylor-semo/163
|
79e24cfe51a984feb4d5dd8f1d38dee371655466
|
84f7a0e86200a36654618e1d91fcf4a583288c15
|
refs/heads/main
| 2023-08-17T21:05:01.563216
| 2023-08-15T14:06:42
| 2023-08-15T14:06:42
| 78,750,342
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,121
|
r
|
short_long_term_trends.R
|
# Code based on
# https://data-steve.github.io/global-temp-pt2/
# This uses only adjusted global temps.
library(tidyverse)
library(ggthemes)
library(RColorBrewer)
## Uncomment the following lines to get the latest data file.
# if (file.exists('HadSST.3.1.1.0_monthly_globe_ts.txt')){
# file.rename('HadSST.3.1.1.0_monthly_globe_ts.txt','HadSST.3.1.1.0_monthly_globe_ts.BAK')
# }
# mon_gl <- "http://hadobs.metoffice.com/hadsst3/data/HadSST.3.1.1.0/diagnostics/HadSST.3.1.1.0_monthly_globe_ts.txt"
# had_m_avg <- readLines(mon_gl)
# write.table(had_m_avg, file="HadSST.3.1.1.0_monthly_globe_ts.txt", quote=FALSE, row.names=FALSE, col.names=FALSE)
sst_m_avg <- readLines("HadSST.3.1.1.0_monthly_globe_ts.txt")
sst_long <- lapply(sst_m_avg, function(x) {#browser()
v <- strsplit(x,"\\s+")[[1]]
data_frame(ym=v[1], avg=as.numeric(v[2]))
}) %>% bind_rows()
one_color=brewer.pal(3,'Dark2')[2]
sst_short <- filter(sst_long, ym>="1997/01", ym<="2015/08")
ticks_short <- sst_short$ym[seq(1,300,by=12)]
sst_short_plot <- sst_short %>%
mutate(ym=factor(ym,levels=ym, ordered=T)) %>%
ggplot(aes(x=ym, y=avg)) +
geom_point(color=one_color) +
ylab("Anomalies (°C) from 1961-90 avg.") + xlab("Date (y/m)") +
ggtitle("Global Sea-Surface Temperature Trends (HadSST3)") +
scale_x_discrete(breaks = ticks_short) +
theme_tufte() +
geom_smooth(method='lm', aes(group=1), fill=NA, color='black') +
theme(legend.position="none", axis.text.x = element_text(angle = 45, hjust = 1))
sst_short_plot
ggsave('sst_short_plot.png', sst_short_plot, device = 'png')
sst_long[["sign"]] = ifelse(sst_long[["avg"]] >= 0, "positive", "negative")
ticks_long <- sst_long$ym[seq(1,2000,by=96)]
# Long-term, overall trend
sst_long_plot <-
sst_long %>%
mutate(ym=factor(ym,levels=ym, ordered=T)) %>%
ggplot(aes(x=ym, y=avg)) +
geom_abline(intercept = 0, slope = 0, color='lightgray',size=0.4) +
geom_point(aes(colour = sign, shape = sign)) +
scale_color_brewer(palette = "Dark2") +
ylab("Anomalies (°C) from 1961-90 avg.") + xlab("Date (y/m)") +
ggtitle("Global Sea-Surface Temperature Trends (HadSST3)") +
scale_x_discrete(breaks = ticks_long) +
geom_smooth(method='lm', aes(group=1), colour='black', fill=NA) +
theme_tufte() +
theme(legend.position="none", axis.text.x = element_text(angle = 45, hjust = 1))
# Use the ggsave based on the trendline used.
ggsave('sst_overall_trend.png', sst_long_plot, device = 'png')
# Long-term trend with 15 year trend line from sst-short added.
sst_long_plot <-
sst_long %>%
mutate(ym=factor(ym,levels=ym, ordered=T)) %>%
ggplot(aes(x=ym, y=avg)) +
geom_abline(intercept = 0, slope = 0, color='lightgray',size=0.4) +
geom_point(aes(colour = sign, shape = sign)) +
scale_color_brewer(palette = "Dark2") +
ylab("Anomalies (°C) from 1961-90 avg.") + xlab("Date (y/m)") +
ggtitle("Global Sea-Surface Temperature Trends (HadSST3)") +
scale_x_discrete(breaks = ticks_long) +
geom_smooth(data=sst_short, aes(group=1), method='lm', color='black', fill=NA) +
theme_tufte() +
theme(legend.position="none", axis.text.x = element_text(angle = 45, hjust = 1))
ggsave('sst_long_plus_15yr_trend.png', sst_long_plot, device = 'png')
# Long-term trend: Two trends, with a break 50 years ago.
# Highlights recent rapid increase.
sst_long_plot <-
sst_long %>%
mutate(ym=factor(ym,levels=ym, ordered=T)) %>%
ggplot(aes(x=ym, y=avg)) +
geom_abline(intercept = 0, slope = 0, color='lightgray',size=0.4) +
geom_point(aes(colour = sign, shape = sign)) +
scale_color_brewer(palette = "Dark2") +
ylab("Anomalies (°C) from 1961-90 avg.") + xlab("Date (y/m)") +
ggtitle("Global Sea-Surface Temperature Trends (HadSST3)") +
scale_x_discrete(breaks = ticks_long) +
geom_smooth(data=subset(sst_long,ym>="1967/01"), aes(group=1), method='lm', color='black', fill=NA) +
geom_smooth(data=subset(sst_long,ym<"1967/01"), aes(group=1), method='lm', color='black', fill=NA) +
theme_tufte() +
theme(legend.position="none", axis.text.x = element_text(angle = 45, hjust = 1))
ggsave('sst_long_50yr_trend.png', sst_long_plot, device = 'png')
|
906af63238eae63e0e86d7f0b358a317b36a66f4
|
2509533c9af9069aa18ad4eeeb222aef0b67de6a
|
/R/countsPerPatientMonth.R
|
213a515dfbfb556d839288861296a07c97270655
|
[] |
no_license
|
aGutierrezSacristan/temporalVariabilityAnalysis
|
5f21e555d061121c989cbfa1125d45f823da116a
|
d4d398b55f8b9b26dcbe9fd60d4891a7e3399f7f
|
refs/heads/main
| 2023-04-16T05:16:39.648791
| 2021-05-05T00:23:18
| 2021-05-05T00:23:18
| 335,374,105
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,826
|
r
|
countsPerPatientMonth.R
|
countsPerPatientMonth <- function( observationFile, clinicalFile, obfuscation, type ){
if(type == "diagnosis"){
observations <- unique( obs_raw %>%
filter( concept_type == 'DIAG-ICD10', days_since_admission >= 0) %>%
mutate( pair = as.character(paste0( patient_num, "*",days_since_admission))) %>%
select( patient_num, pair, days_since_admission, concept_code ))
}else if(type == "medication"){
observations <- unique( obs_raw %>%
filter( concept_type == 'MED-CLASS', days_since_admission >= 0) %>%
mutate( pair = as.character(paste0( patient_num, "*",days_since_admission))) %>%
select( patient_num, pair, days_since_admission, concept_code ))
}
clinical <- clinical_raw %>%
mutate( pair = as.character(paste0( patient_num, "*",days_since_admission))) %>%
select( patient_num, pair, calendar_date, severe, deceased)
final <- merge( observations, clinical, by="pair")
final <- final %>%
mutate(patient_num = patient_num.x) %>%
select(patient_num, concept_code, calendar_date)
final$year_month <- as.factor( paste0( sapply(strsplit( as.character(final$calendar_date), "[-]"), '[', 1), "-",
sapply(strsplit( as.character(final$calendar_date), "[-]"), '[', 2)))
counts <- final %>% dplyr::group_by( year_month ) %>%
dplyr::summarise(distinct_patients = n_distinct(patient_num)) %>%
dplyr::arrange( desc(distinct_patients), .by_group = FALSE)
if( obfuscation != FALSE){
counts$distinct_patients <- ifelse( counts$distinct_patients < obfuscation, 0.5 * obfuscation, counts$distinct_patients)
}
counts <- list(counts)
return( counts )
}
|
63cff455e5ce3affc252f2269af50a0773639499
|
b7854de8e5b0f97ec18dbed95559abd7ec0380d7
|
/man/visualize.Rd
|
63f5fa3ef82c15341440f5aa3bad3220ef3a770b
|
[] |
no_license
|
sahatava/MicNet
|
58615ddbb69426f3fa4468ee5b76f8250126140b
|
9c5176ed9ff8993cf02d9e3fb55872226f4efbf1
|
refs/heads/master
| 2020-06-24T23:27:47.060733
| 2019-09-30T14:29:51
| 2019-09-30T14:29:51
| 199,124,692
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 231
|
rd
|
visualize.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualize.R
\name{visualize}
\alias{visualize}
\title{A function to}
\usage{
visualize(partial, threshold)
}
\value{
res
}
\description{
A function to
}
|
346fd1234eea6a416375d159711cd97dbb0334e2
|
9da5e5230522f71434aeb7ba08ed554702f5a3c7
|
/power_ecotox/ui.R
|
946f222cb7e81fdb5e6397802bfaf9c75614a320
|
[
"MIT"
] |
permissive
|
eduardszoecs/shiny_apps
|
cc728f3f7e1d7128cee0b416379d5d5ef047e2f3
|
9e956e8fdfcbe3766611bc0c469210539e72fc15
|
refs/heads/master
| 2023-02-09T17:46:43.406297
| 2016-12-30T11:58:04
| 2016-12-30T11:58:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,943
|
r
|
ui.R
|
library(shiny)
library(shinythemes)
library(ggplot2)
# Define UI for slider demo application
shinyUI(navbarPage("shinytox (alpha)", theme = shinytheme("united"),
tabPanel('Introduction', value = 'rm',
includeMarkdown("md/README_intro.md")
),
tabPanel("Power",
sidebarLayout(
sidebarPanel(
h2('Settings'),
tags$head(
tags$script(src = "http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML-full",
type = 'text/javascript')
),
wellPanel(tags$details(tags$summary("Effects"),
sliderInput("muc", HTML("$$\\text{Abundance in Control } (\\mu_C)$$"), value = 10,
min = 0.5, max = 200, step = 1),
sliderInput('theta', '$$\\text{Dispersion} (\\kappa)$$ ',
value = 4, min = 0.1, max = 250, step = 0.1),
sliderInput('effsize', 'Reduction in treatment (r)',
value = 0.5, min = 0, max = 1, step = 0.01)
)
),
wellPanel(tags$details(tags$summary("Simulations"),
sliderInput("nsims", "Number of simulations:",
min = 50, max = 250, value = 50, step = 10),
textInput('N', 'Sample sizes (separated by comma or space; up to 5 entries): ',
'3, 6, 9')
)
),
# wellPanel(tags$details(
# tags$summary("Models")
# )
# ),
wellPanel(tags$details(tags$summary("Inference"),
selectInput("mct", "Multiple comparison contrasts:",
choices = c('Dunnett contrasts' = 'Dunnett')),
selectInput("alt", "Hypothesis:",
choices = c('one sided' = 'less',
'two sided' = 'two.sided'))
)
),
actionButton("goButton", "Run Simulation!")
),
mainPanel(
tabsetPanel(id='main', selected = 'Readme',
tabPanel('Readme', value = 'rm1',
includeMarkdown("md/README_power.md")
),
tabPanel('Simulation-Design', value='sd',
p("1000 draws from the specified design:"),
plotOutput("desplot"),
h3("Summary"),
dataTableOutput("destab")
),
tabPanel("Global test", value = 'gt',
plotOutput("powplot"),
h3("Summary"),
# textOutput("test"),
dataTableOutput("powtable"),
downloadButton('downloadpowtable', 'Download table'),
downloadButton('downloadpowplot', 'Download plot')
),
tabPanel('LOEC', value = 'loec',
plotOutput("loecplot"),
h3("Summary"),
dataTableOutput("loectable"),
downloadButton('downloadloectable', 'Download table'),
downloadButton('downloadloecplot', 'Download plot')
)
)
)
)
),
tabPanel("Community - PRC",
sidebarLayout(
sidebarPanel(
tabsetPanel(
tabPanel('Data'
),
tabPanel('Transformation'
),
tabPanel('Settings'
),
tabPanel('README'
)
)
),
mainPanel(
tabsetPanel(
tabPanel('Plot'
),
tabPanel('Summary'
),
tabPanel('Tests'
),
tabPanel('Summary (per sampling)'
),
tabPanel('Tests (per sampling)'
)
)
)
)
),
tabPanel("Community - GLM",
sidebarLayout(
sidebarPanel(
tabsetPanel(
tabPanel('Data'
),
tabPanel('Settings'
),
tabPanel('README'
)
)
),
mainPanel(
tabsetPanel(
tabPanel('Diagnostics'
),
tabPanel('Plot'
),
tabPanel('Summary'
),
tabPanel('Tests'
),
tabPanel('Summary (per sampling)'
),
tabPanel('Tests (per sampling)'
)
)
)
)
),
tabPanel("Population-GLM",
sidebarLayout(
sidebarPanel(
tabsetPanel(
tabPanel('Data'
),
tabPanel('Settings'
),
tabPanel('README'
)
)
),
mainPanel(
tabsetPanel(
tabPanel('Diagnostics'
),
tabPanel('Summary'
),
tabPanel('Effect Class'
)
)
)
)
)
# ,tabPanel("About"
#
# )
))
|
30dddfdee35715fb18919d4e14c1ee4c70e2bac8
|
2691c3c0a93abc48ce909f76fc6d1304576426e5
|
/man/rfe.rmse.plot.Rd
|
41324cece28fc5fcf73500f160876da80b92b233
|
[
"MIT"
] |
permissive
|
liuyanguu/plotthis
|
f1857b5d41bc3da3929e15de122df868f0613da5
|
0fdd13d62bd1c7eade7975314a813d01ff390f50
|
refs/heads/master
| 2020-06-30T16:54:02.825891
| 2019-08-10T20:42:58
| 2019-08-10T20:42:58
| 200,889,961
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 488
|
rd
|
rfe.rmse.plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cv_rfe.R
\name{rfe.rmse.plot}
\alias{rfe.rmse.plot}
\title{Show RFE results: make plots of rmse for new RFE.
updated on (19.04.06). only one line is plotted.}
\usage{
rfe.rmse.plot(rmse_rfe = rferesults$rmse_rfe)
}
\arguments{
\item{rmse_rfe}{use rferesults$rmse_rfe}
}
\value{
ggplot2 object
}
\description{
Show RFE results: make plots of rmse for new RFE.
updated on (19.04.06). only one line is plotted.
}
|
1378a3eab51f5559e6e425b5701164f5bb61e06e
|
cdf173e0189074feed208d86b1fb59fffd987405
|
/Randoms/processFiles.R
|
972ea9f2060f3933b20f650f623fe57820a6a6de
|
[] |
no_license
|
apineda90/Metodos
|
168c665fdcc903537da7fb265075a0d7a99a57c6
|
d1eda601c54f2aef08e801f1b0c27e1c169707e9
|
refs/heads/master
| 2021-01-10T13:56:09.977217
| 2016-01-27T04:51:21
| 2016-01-27T04:51:21
| 50,320,090
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,448
|
r
|
processFiles.R
|
library(sqldf)
library(tkrplot)
library(car)
## levene test http://www.cookbook-r.com/Statistical_analysis/Homogeneity_of_variance/
levenetest <- function(dataset){
file1<- read.csv(dataset,header=TRUE,sep = ",")
## plot(write_count~group, data = file1)
leveneTest(read_count~as.factor(group), data=file1)
}
## Generate just 1 file with 2 different groups
mergeFiles <- function(dataset,dataset2,outputfile){
file1 <- read.csv(dataset,header=TRUE,sep = ",")
file2 <- read.csv(dataset2, header=TRUE,sep = ",")
## creating new field.
file1["group"] <- 1
file2["group"] <- 2
total <- rbind(file1, file2)
## DFtranspose <- cbind(t(file1[2, ]), t(file2))
## rownames(DFtranspose) <- file1[1, ]
write.csv(total,file=outputfile,row.names = FALSE)
}
## Given the name of a file
deleterepeated <- function(dataset,outputfile){
nimbus <- read.csv(dataset,header=TRUE,sep = ",")
file2 <- nimbus[!duplicated(nimbus), ]
write.csv(file2,file=outputfile,row.names = FALSE)
}
## prueba
## deleterepeated("~/Desktop/supervisor_pacemaker.csv","~/Desktop/supervisor_final_pacemaker.csv")
## mergeFiles("~/Desktop/supervisor_final.csv","~/Desktop/supervisor_final_pacemaker.csv","~/Desktop/supervisor_total.csv")
levenetest("~/Desktop/supervisor_total.csv")
file1<- read.csv("~/Desktop/supervisor_total.csv",header=TRUE,sep = ",")
png("~/Desktop/test.png")
plot(write_count~group, data = file1)
dev.off()
|
b8c168123c450ad0cf571228215d0e0472d73e38
|
c29c7b4b9e3d1e1e7b1ea8d888bbb38719ceec89
|
/RPM.R
|
796c222ebb461a977b205025cfb233a6bd213696
|
[] |
no_license
|
bweasels/SequencingUtilities
|
1732c53b9a1b602fe50f387af44d4de8108822e5
|
45255e18b56b620e00803c725dd56d18bae1051b
|
refs/heads/master
| 2021-07-06T16:55:05.734785
| 2020-10-15T17:43:36
| 2020-10-15T17:43:36
| 194,119,328
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 94
|
r
|
RPM.R
|
RPM <- function(counts) {
RPM <-apply(counts,2,function(x) x*1e6/sum(x))
return(RPM)
}
|
0d652670671f0d7eff71bd4469e3bae9a179ca29
|
548cf427d3dae97f216bdae6accfa95bf02bbf2e
|
/src/select.R
|
a715fb98091dd1d7c529316b2d05ce430a351333
|
[] |
no_license
|
mlcenzer/Code-for-dispersal-evolution-and-LA
|
27eb0818d47d0a021dff298c670ff2fe868493ae
|
a2eae9a0eb2f043506701fb46d32f09394cd4c8a
|
refs/heads/master
| 2021-04-06T14:06:28.848888
| 2018-10-14T16:18:14
| 2018-10-14T16:18:14
| 125,384,625
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 136
|
r
|
select.R
|
## selection
select <- function(prms, pop)
pop1 <- array(rbinom(length(pop), pop, prms$real.hab.fit),
dim=dim(pop))
|
5f3e27461bc06cd042dd881656e98fda585c88c2
|
317360cc997480b99ee0b5f29d4f682d5c5493e5
|
/man/fit_model.Rd
|
6e05e9062ded1b9fe5bf793332410ddbd5250ba1
|
[] |
no_license
|
StevenBoys/Ensemblelearn
|
408fcd88b533bbcf4e57de6518a10dd8cf5bee7c
|
db4c1da875020970597d38f607f5af0e34a70f8b
|
refs/heads/master
| 2020-09-05T12:10:11.829363
| 2019-12-08T04:26:06
| 2019-12-08T04:26:06
| 220,099,511
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 780
|
rd
|
fit_model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EnsembleFuns.R
\name{fit_model}
\alias{fit_model}
\title{Function that gets results from weak model}
\usage{
fit_model(fweak, parallel, data)
}
\arguments{
\item{fweak}{- function that generates estimate from weak model based on input}
\item{parallel}{- logical value, true if its results will be combined in parallel}
\item{data}{- list of data that fweak need}
}
\value{
The new trained results based on the weak model.
}
\description{
Function that gets results from weak model
}
\examples{
fweak <- function(x, y){
lm(y ~ -1 + x)$coefficients
}
data <- list(x = matrix(rnorm(1000), 200, 5))
parallel <- TRUE
data$y <- data$x \%*\% rnorm(5) + rnorm(200, 0, 3)
fit_model(fweak, parallel, data)
}
|
36743b1bf462696ee53d6bba0c7278ef6eda74a9
|
8a2148c79a23f8fa878c309a40f21f721ffa188c
|
/Code/NewCode/plot.R
|
7c776dccc0337d2c1abfde31b94a1f849b24b04d
|
[] |
no_license
|
LiXiang618/Scenario-Analysis-On-Portfolio-Rebalancing
|
8738f0a4363d92969b9879f558a7b975d346e20c
|
edbd1f085fef95a79fa37f1dd8a1584080d0c3a1
|
refs/heads/master
| 2021-05-02T08:25:58.071553
| 2018-02-24T02:35:46
| 2018-02-24T02:35:46
| 120,800,673
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 599
|
r
|
plot.R
|
par(mfrow=c(2,2))
plot(S,type = "l",col = "red")
plot(B,type = "l",col = "green")
plot(V_bh,type = "l",col = "purple")
plot(V,type = "l",col = "purple")
par(mfrow=c(2,2))
plot(V_bh,type = "l",col = "green")
plot(V_daily,type = "l",col = "pink")
plot(V_mon,type = "l",col = "red")
plot(V_con,type = "l",col = "purple")
par(mfrow=c(2,2))
hist(v_bh,100)
hist(v_daily,100)
hist(v_mon,100)
hist(v_con,100)
par(mfrow=c(2,2))
hist(v_bh,100)
hist(v_con,100)
hist(v_new,100)
par(mfrow=c(2,2))
plot(V_bh,type = "l",col = "green")
plot(V_con,type = "l",col = "purple")
plot(V_new,type = "l",col = "blue")
|
ff9e001e1f484f116d24fca573dc8a896ffd3695
|
fdc6fe3e44626a8d8a7a23004b2919ea949affc6
|
/plot3.R
|
b72277502abfb66f3bab7e072ff92a0115c0ef8e
|
[] |
no_license
|
mkerikss/ExData_Plotting1
|
a2ac64f3383e71fa39cb1284f14fe73a50b1509d
|
ebb2ac10275717175d7a23bd602d72712e2bdf12
|
refs/heads/master
| 2021-01-15T18:36:13.793041
| 2015-03-08T20:43:46
| 2015-03-08T20:43:46
| 31,847,447
| 0
| 0
| null | 2015-03-08T11:30:40
| 2015-03-08T11:30:40
| null |
UTF-8
|
R
| false
| false
| 731
|
r
|
plot3.R
|
## Reading data
file<-"household_power_consumption.txt"
dat<-read.table(file,header=TRUE,sep=";",na.strings="?")
## Only two days investigated
dat<-dat[which(dat$Date=="1/2/2007" | dat$Date=="2/2/2007"),]
## Variable for dates (Date-format)
datetimeStr<-paste(dat$Date,dat$Time,sep=" ")
datetime<-strptime(datetimeStr, "%d/%m/%Y %H:%M:%S")
## Plot
png("plot3.png",width=480,height=480,units="px",bg="transparent")
plot(datetime,dat$Sub_metering_1,"l",ylab="Energy sub metering",xlab="")
points(datetime,dat$Sub_metering_2,"l",col="red")
points(datetime,dat$Sub_metering_3,"l",col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty="solid",col=c("black","red","blue"))
dev.off()
|
a73b8479bd8e7d48833e4647fc3839d436e6882d
|
a6ac32e43c91a3e4594685a585455ebe89c9a04e
|
/R/findRoot.R
|
b4564a09575458015e71e2feeb3d832fd6c7ecef
|
[] |
no_license
|
heibl/megaptera
|
6aeb20bc83126c98603d9271a3f1ae87311eedc1
|
5e8b548b01c40b767bd3bb3eb73d89c33b0bc379
|
refs/heads/master
| 2021-07-08T16:44:30.106073
| 2021-01-11T13:58:04
| 2021-01-11T13:58:04
| 55,764,237
| 5
| 0
| null | 2019-02-13T13:50:43
| 2016-04-08T08:44:44
|
R
|
UTF-8
|
R
| false
| false
| 2,572
|
r
|
findRoot.R
|
## This code is part of the megaptera package
## © C. Heibl 2017 (last update 2018-12-12)
#' @title Lineage Down to the Root
#' @description Finds the lineage from one taxon, or the most recent common
#' ancestor of several taxa, down to the root of the Tree of Life.
#' @param x An object of class \code{\link{megapteraProj}}.
#' @param what A character string, either \code{"ingroup"}, \code{"outgroup"},
#' or \code{"both"}.
#' @return A data frame with three columns: \item{id}{the unique identifier of
#' the taxon} \item{taxon}{the scientific name of the taxon} \item{rank}{the
#' rank of the taxon} The row order is from lower to higher ranked taxa, i.e.
#' backwards into evolutionary time.
#' @importFrom methods slot
#' @export
findRoot <- function(x, what){
## CHECKS
## ------
if (!inherits(x, "megapteraProj"))
stop("'x' is not of class 'megapteraProj'")
## Determine scope
## ---------------
what <- match.arg(what, c("ingroup", "outgroup", "both"))
if (what == "both"){
tax <- c(x@taxon@ingroup, x@taxon@outgroup)
} else {
tax <- unlist(slot(x@taxon, what))
}
## CASE 1: species list
## --------------------
if (all(is.Linnean(unlist(tax)))){
## Read taxonomy from database
tax <- dbReadTaxonomy(x, subset = tax)
id <- all_ids <- 1
id <- setdiff(tax[tax$parent_id == id, "id"], id)
while (length(id) == 1){
all_ids <- c(all_ids, id)
id <- unique(tax$id[tax$parent_id == id])
}
r <- tax[match(rev(all_ids), tax$id), c("parent_id", "id", "taxon", "rank", "status")]
} else {
## CASE 2: higher rank taxa
## ------------------------
r <- dbReadTaxonomy(x)
r <- lapply(tax, taxdumpLineage, tax = r)
r <- r[!sapply(r, is.null)]
if (length(r) > 1){
## find common set of nodes
## ------------------------
rr <- lapply(r, function(x) x$id)
for (i in 2:length(rr)){
rr[[i]] <- intersect(rr[[i - 1]], rr[[i]])
}
## find lowest rank in common set of nodes
## ---------------------------------------
obj <- data.frame(common = rr[[i]], rank = NA)
for (j in 1:nrow(obj)){
obj[j, "rank"] <- mean(sapply(r, function(z, cc) which(z$id == cc),
cc = obj$common[j]))
}
r <- unique(do.call(rbind, r))
mrca.id <- obj$common[order(obj$rank)]
r <- r[match(mrca.id, r$id), c("id", "taxon", "rank")]
} else {
r <- r[[1]][, c("id", "taxon", "rank")]
}
}
r
}
|
c7fa2f6a364ee6718be1780251a3920feb8b99c4
|
f33a2a4d9f7fce3045407e17234cc7a97507d05e
|
/R/fars_function.R
|
b823c658e0566db86a875a5daad3b5c2b689752d
|
[
"MIT"
] |
permissive
|
Karol-sandoval/fars_package
|
08c7a1d12fa49fe6e4324a28dd4ba631be3dc69d
|
b7558a1ccf3352bac541c6d74826665c8b5266c6
|
refs/heads/main
| 2023-03-29T01:34:05.995980
| 2021-03-30T15:27:31
| 2021-03-30T15:27:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,403
|
r
|
fars_function.R
|
##############################################################################
############################## FarsKevin Package #############################
##############################################################################
# fars_read function ------------------------------------------------------
#' fars_read
#'
#' This function shows a quick view of your data in an object of class tibble,
#' according to a specified CSV filename.
#'
#' @param filename A character string giving the filename the function will
#' visualize.
#'
#' @return This function returns a data frame of class tibble with the data
#' from the specified filename. If the filename does not exist, there will
#' be a message informing about it.
#'
#' @note There will be errors if the filename is absent, there are no
#' quotation marks in the filename argument or the file does not exist.
#'
#' @importFrom readr read_csv
#' @importFrom dplyr tbl_df
#'
#' @examples
#' fars_read("accident_2013.csv.bz2")
#'
#' @export
fars_read <- function(filename) {
if(!file.exists(filename))
stop("file '", filename, "' does not exist")
data <- suppressMessages({
readr::read_csv(filename, progress = FALSE)
})
dplyr::tbl_df(data)
}
# make_filename function --------------------------------------------------
#' make_filename
#'
#' This simple function makes a filename for the data associated to the US
#' National Highway Traffic Safety Administration's Fatality Analysis
#' Reporting System, according to a specified year.
#'
#' @param year A numeric value giving the year to make the filename.
#'
#' @return This function returns the created filename with quotation marks.
#'
#' @note There will be errors if the year argument is absent or if the year is
#' referenced to a non-existing object in the environment. If there is a
#' character string in the year argument, R will introduce NAs by coercion.
#'
#' @examples
#' make_filename(2015)
#'
#' @export
make_filename <- function(year) {
year <- as.integer(year)
sprintf("accident_%d.csv.bz2", year)
}
# fars_read_years function ------------------------------------------------
#' fars_read_years
#'
#' This function selects the month and year from each record in the files
#' associated to the US National Highway Traffic Safety Administration's
#' Fatality Analysis Reporting System, given one or more years.
#'
#' @param years A numeric vector giving the year or years from which to select.
#'
#' @return This function returns a list with data frames of class tibble with
#' the months and years from each record of the file.
#'
#' @note There will be errors if the year is absent or the if the year is
#' referenced to a non-existing object in the environment. There are warnings
#' when the year is invalid (no existing filename with that year).
#'
#' @importFrom dplyr mutate
#' @importFrom dplyr select
#'
#' @examples
#' fars_read_years(c(2013,2014))
#'
#' @export
fars_read_years <- function(years) {
lapply(years, function(year) {
file <- make_filename(year)
tryCatch({
dat <- fars_read(file)
dplyr::mutate(dat, year = year) %>%
dplyr::select(MONTH, year)
}, error = function(e) {
warning("invalid year: ", year)
return(NULL)
})
})
}
# fars_summarize_years function -------------------------------------------
#' fars_summarize_years
#'
#' This function summarizes the number of times the months appear for a
#' specified vector of years.
#'
#' @param years A numeric vector giving the year or years from which to select
#'
#' @return This function returns a data frame with the amount of times every
#' month appears per year.
#'
#' @note There will be errors if the year is absent or the if the year is
#' referenced to a non-existing object in the environment. There are warnings
#' when the year or years are invalid (no existing filename with that year).
#'
#' @importFrom dplyr bind_rows
#' @importFrom dplyr group_by
#' @importFrom dplyr summarize
#' @importFrom tidyr spread
#'
#' @examples
#' fars_summarize_years(c(2013,2014,2015))
#'
#' @export
fars_summarize_years <- function(years) {
dat_list <- fars_read_years(years)
dplyr::bind_rows(dat_list) %>%
dplyr::group_by(year, MONTH) %>%
dplyr::summarize(n = n()) %>%
tidyr::spread(year, n)
}
# fars_map_state -------------------------------------------------------------------------
#' fars_map_state
#'
#' This function makes a plot of the fatality traffic for a specified state of
#' the United States, according to the year established by the user.
#'
#' @param state.num A numeric value for the state of the United States to
#' analyze.
#'
#' @param year A numeric value giving the year to analyze.
#'
#' @return This function returns a geographic plot of the state where there has
#' been at least one fatality traffic accident for a particular year.
#'
#' @note There will be errors if any of the arguments are absent or if there are
#' no numeric values for the arguments.
#'
#' @importFrom dplyr filter
#' @importFrom maps map
#' @importFrom graphics points
#'
#' @examples
#' fars_map_state(39, 2014)
#'
#' @export
fars_map_state <- function(state.num, year) {
filename <- make_filename(year)
data <- fars_read(filename)
state.num <- as.integer(state.num)
if(!(state.num %in% unique(data$STATE)))
stop("invalid STATE number: ", state.num)
data.sub <- dplyr::filter(data, STATE == state.num)
if(nrow(data.sub) == 0L) {
message("no accidents to plot")
return(invisible(NULL))
}
is.na(data.sub$LONGITUD) <- data.sub$LONGITUD > 900
is.na(data.sub$LATITUDE) <- data.sub$LATITUDE > 90
with(data.sub, {
maps::map("state", ylim = range(LATITUDE, na.rm = TRUE),
xlim = range(LONGITUD, na.rm = TRUE))
graphics::points(LONGITUD, LATITUDE, pch = 46)
})
}
|
bdc17d3d445ad56dce206283e3f640478271c5be
|
90eed8ec232b53d6be041f43238129c523f429a1
|
/R/aggregate_OCN.R
|
addd70bc3e6b86fef9dae9b312bec9154b95d2de
|
[] |
no_license
|
jacquetclaire/OCNet
|
f3f991ee3ebca6cb6c1e4d30bcb2ec723ca93df0
|
6220561edabaca3838bfae2092d9b946a0da3125
|
refs/heads/master
| 2023-01-08T00:28:27.280536
| 2020-07-23T05:54:18
| 2020-07-23T05:54:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,361
|
r
|
aggregate_OCN.R
|
aggregate_OCN <- function(OCN,
thrA=0.002*OCN$dimX*OCN$dimY*OCN$cellsize^2,
streamOrderType="Strahler",
maxReachLength=Inf){
if (!("slope" %in% names(OCN$FD))){
stop('Missing fields in OCN. You should run landscape_OCN prior to aggregate_OCN.')
}
if (maxReachLength < OCN$cellsize*sqrt(2)){
stop("maxReachLength cannot be smaller than OCN$cellsize*sqrt(2).")
}
#t1 <- Sys.time()
###############################
## BUILD NETWORK AT RN LEVEL ##
###############################
#print('Crop data at FD level to RN level...',quote=FALSE);
RN_mask <- as.vector(OCN$FD$A >= thrA)# RN_mask allows to sample RN-level values from matrices/vectors at FD level
RN_to_FD <- which(RN_mask) # RN_to_FD[i] is the pixel ID at the FD level of the pixel whose ID at the RN level is i
FD_to_RN <- RN_mask*cumsum(as.numeric(RN_mask)) # FD_to_RN[i] is the pixel ID at the RN level of the pixel whose ID at the FD level is i
# if pixel i at FD level doesn't belong to RN, then FD_to_RN[i]=0
Nnodes_RN <- length(RN_to_FD)
W_RN <- OCN$FD$W[RN_mask,,drop=FALSE]
W_RN <- W_RN[,RN_mask,drop=FALSE]
Outlet_RN <- FD_to_RN[OCN$FD$outlet]
Outlet_RN <- Outlet_RN[Outlet_RN!=0] # remove outlets if the corresponding catchment size is lower than thrAeshold
DownNode_RN <- numeric(Nnodes_RN)
# for (i in 1:Nnodes_RN){
# if (!(i %in% Outlet_RN)){
# DownNode_RN[i] <- which(W_RN[i,]==1)
# }}
tmp <- W_RN@rowpointers
NotOutlet <- which((tmp[-1] - tmp[-length(tmp)])==1)
DownNode_RN[NotOutlet] <- W_RN@colindices
A_RN <- OCN$FD$A[RN_mask]
X_RN <- OCN$FD$X[RN_mask]
Y_RN <- OCN$FD$Y[RN_mask]
Z_RN <- OCN$FD$Z[RN_mask]
Length_RN <- OCN$FD$leng[RN_mask]
# Drainage density
DrainageDensity_RN <- sum(Length_RN)/(OCN$dimX*OCN$dimY*OCN$cellsize^2)
# Connectivity indices at pixel level
DegreeIn <- colSums(W_RN)
DegreeOut <- rowSums(W_RN)
Confluence <- DegreeIn>1
Source <- DegreeIn==0
SourceOrConfluence <- Source|Confluence
ConfluenceNotOutlet <- Confluence&(DownNode_RN!=0)
ChannelHeads <- SourceOrConfluence #Source|ConfluenceNotOutlet
OutletNotChannelHead <- (DownNode_RN==0)&(!ChannelHeads)
IsNodeAG <- SourceOrConfluence|OutletNotChannelHead
whichNodeAG <- which(IsNodeAG)
# Calculate slope for each pixel of the river network
Slope_RN <- OCN$FD$slope[RN_mask]
#print(sprintf('Elapsed time %.2f s',difftime(Sys.time(),t1,units='secs')),quote=FALSE)
#t1 <- Sys.time()
# Upstream_RN : list containing IDs of all nodes upstream of each node (plus node itself)
Upstream_RN <- vector("list",Nnodes_RN)
Nupstream_RN <- numeric(Nnodes_RN)
for (i in 1:Nnodes_RN){
UpOneLevel <- which(DownNode_RN==i) # find reaches at one level upstream
Upstream_RN[[i]] <- UpOneLevel # add them to the list
while (length(UpOneLevel)!=0) { # continue until there are no more reaches upstream
ContinuePath <- UpOneLevel # jump 1 level above
UpOneLevel <- which(DownNode_RN %in% ContinuePath) # find reaches at one level upstream
Upstream_RN[[i]] <- c(Upstream_RN[[i]],UpOneLevel) # add them to the list
}
Upstream_RN[[i]] <- c(Upstream_RN[[i]],i)
Nupstream_RN[i] <- length(Upstream_RN[[i]])
}
# RN_to_CM[i] indicates outlet to which reach i drains
RN_to_CM <- numeric(Nnodes_RN)
for (i in 1:OCN$nOutlet){
RN_to_CM[Upstream_RN[[Outlet_RN[i]]]] <- i
}
###############################
## BUILD NETWORK AT AG LEVEL ##
###############################
# Vector that attributes reach ID to all river network pixels
#print('Define nodes of aggregated network...',quote=FALSE);
Nnodes_AG <- sum(IsNodeAG)
Length_AG <- numeric(Nnodes_AG)
RN_to_AG <- numeric(Nnodes_RN)
reachID <- 1
X_AG <- NaN*numeric(Nnodes_AG)
Y_AG <- NaN*numeric(Nnodes_AG)
Z_AG <- NaN*numeric(Nnodes_AG)
A_AG <- NaN*numeric(Nnodes_AG)
while (length(whichNodeAG) != 0){ # explore all AG Nodes
i <- whichNodeAG[1] # select the first
RN_to_AG[i] <- reachID
j <- DownNode_RN[i]
X_AG[reachID] <- X_RN[i]
Y_AG[reachID] <- Y_RN[i]
Z_AG[reachID] <- Z_RN[i]
A_AG[reachID] <- A_RN[i]
Length_AG[reachID] <- Length_RN[i]
tmp_length <- Length_RN[i]
tmp <- NULL
j0 <- j
while (!IsNodeAG[j] && j!=0) {
tmp <- c(tmp, j)
tmp_length <- tmp_length + Length_RN[j]
j_old <- j
j <- DownNode_RN[j]}
if (tmp_length > maxReachLength){
n_splits <- ceiling(tmp_length/maxReachLength)
new_maxLength <- tmp_length/n_splits
j <- j0
while (!IsNodeAG[j] && j!=0 && Length_AG[reachID] <= new_maxLength) {
RN_to_AG[j] <- reachID
Length_AG[reachID] <- Length_AG[reachID] + Length_RN[j]
j_old <- j
j <- DownNode_RN[j]}
if (Length_AG[reachID] > new_maxLength){
j <- j_old
Length_AG[reachID] <- Length_AG[reachID] - Length_RN[j]
ChannelHeads[j] <- 1
whichNodeAG <- c(whichNodeAG,j)}
} else {
RN_to_AG[tmp] <- reachID
Length_AG[reachID] <- tmp_length
}
reachID <- reachID + 1
whichNodeAG <- whichNodeAG[-1]
}
Nnodes_AG <- length(X_AG)
#Nnodes_AG <- length(X_AG) + sum(OutletNotChannelHead) # recalculate number of nodes to account for new channel heads
# if (sum(OutletNotChannelHead)>0){
# Length_AG[reachID:Nnodes_AG] <- 0
# }
# if thrA=0, do not perform aggregation. Every pixel is a node
# (note that with thrA=1, reaches with more than one pixel can exist)
if (thrA==0){
Nnodes_AG <- Nnodes_RN
RN_to_AG <- 1:Nnodes_RN
}
# FD_to_SC: vector of length OCN$FD$nNodes containing subcatchmentID for every pixel of the catchment
# AG_to_FD: list containing FD indices of pixels belonging to a given reach
# SC_to_FD: list containing FD indices of pixels belonging to a given subcatchment
FD_to_SC <- NaN*numeric(OCN$FD$nNodes)
# initialize FD_to_SC by attributing SC values to pixels belonging to AG level
FD_to_SC[RN_mask] <- RN_to_AG
# attribute new SC values to pixels corresponding to outlets of catchments without reaches (because the drained area of the catchment is < thrA)
Nnodes_SC <- Nnodes_AG + sum(OCN$FD$A[OCN$FD$outlet]<thrA)
FD_to_SC[OCN$FD$outlet[OCN$FD$A[OCN$FD$outlet] < thrA]] <- (Nnodes_AG+1):Nnodes_SC
IndexHeadpixel <- which(OCN$FD$A==OCN$cellsize^2) # find FD pixels corresponding to headwaters
AG_to_FD <- vector("list", Nnodes_AG)
AG_to_RN <- vector("list", Nnodes_AG)
for(i in 1:Nnodes_AG) { # attribute river network pixels to fields of the AG_to_FD list
AG_to_FD[[i]] <- RN_to_FD[which(RN_to_AG==i)]
AG_to_RN[[i]] <- which(RN_to_AG==i)
}
SC_to_FD <- AG_to_FD[1:Nnodes_AG] # initialize SC_to_FD by attributing the pixels that belong to reaches
# add pixels corresponding to outlets of catchments without reaches
if (Nnodes_SC > Nnodes_AG){
for (i in (Nnodes_AG+1):Nnodes_SC){
SC_to_FD[[i]] <- OCN$FD$outlet[OCN$FD$A[OCN$FD$outlet]<thrA][i-Nnodes_AG]
}}
for (i in 1:length(IndexHeadpixel)){ # i: index that spans all headwater pixels
p <- IndexHeadpixel[i] # p: ID of headwater pixel
pNew <- p; # pNew: pixel downstream of p
k <- NaN; # k: SC value of pixel pNew
sub_p <- integer(0) # sub_p is the subset of pixels downstream of pixel p
while (is.nan(k)){ # continue downstream movement until a pixel to which the SC has already been attributed is found
k <- FD_to_SC[pNew]
if (is.nan(k)){
sub_p <- c(sub_p,pNew)
pNew <- OCN$FD$downNode[pNew]
}}
FD_to_SC[sub_p] <- k
SC_to_FD[[k]] <- c(SC_to_FD[[k]],sub_p)
}
######################################
## CALCULATE PROPERTIES AT AG LEVEl ##
######################################
#print('W matrix at AG level...',quote=FALSE);
# Adjacency matrix at reach level
DownNode_AG <- numeric(Nnodes_AG)
# W_AG <- sparseMatrix(i=1,j=1,x=0,dims=c(Nnodes_AG,Nnodes_AG))
W_AG <- spam(0,Nnodes_AG,Nnodes_AG)
ind <- matrix(0,Nnodes_AG,2)
reachID <- sum(ChannelHeads) + 1
for (i in 1:Nnodes_RN){
if (DownNode_RN[i] != 0 && RN_to_AG[DownNode_RN[i]] != RN_to_AG[i]) {
DownNode_AG[RN_to_AG[i]] <- RN_to_AG[DownNode_RN[i]]
#W_AG[RN_to_AG[i],DownNode_AG[RN_to_AG[i]]] <- 1
ind[RN_to_AG[i],] <- c(RN_to_AG[i],DownNode_AG[RN_to_AG[i]])
}
# contributing area of nodes at AG level
# if (ChannelHeads[i]){
# A_AG[RN_to_AG[i]] <- A_RN[i]
# }
}
ind <- ind[-which(ind[,1]==0),]
W_AG[ind] <- 1
Outlet_AG <- RN_to_AG[Outlet_RN]
# Upstream_AG : list containing IDs of all reaches upstream of each reach (plus reach itself)
Upstream_AG <- vector("list",Nnodes_AG)
Nupstream_AG <- numeric(Nnodes_AG)
for (i in 1:Nnodes_AG){
UpOneLevel <- which(DownNode_AG==i) # find reaches at one level upstream
Upstream_AG[[i]] <- UpOneLevel # add them to the list
while (length(UpOneLevel)!=0) { # continue until there are no more reaches upstream
ContinuePath <- UpOneLevel # jump 1 level above
UpOneLevel <- which(DownNode_AG %in% ContinuePath) # find reaches at one level upstream
Upstream_AG[[i]] <- c(Upstream_AG[[i]],UpOneLevel) # add them to the list
}
Upstream_AG[[i]] <- c(Upstream_AG[[i]],i)
Nupstream_AG[i] <- length(Upstream_AG[[i]])
}
# AG_to_CM[i] indicates outlet to which reach i drains
AG_to_CM <- numeric(Nnodes_AG)
for (i in 1:OCN$nOutlet){
AG_to_CM[Upstream_AG[[Outlet_AG[i]]]] <- i
}
#print(sprintf('Elapsed time %.2f s',difftime(Sys.time(),t1,units='secs')),quote=FALSE)
#t1 <- Sys.time()
#print('Stream order at AG level...',quote=FALSE)
if (streamOrderType=="Strahler"){
# calculate Strahler stream order
StreamOrder_AG <- numeric(Nnodes_AG)
for (i in 1:Nnodes_AG){
j <- order(Nupstream_AG)[i] # index that explores reaches in a downstream direction
tmp <- which(DownNode_AG==j) # set of reaches draining into j
if (length(tmp)>0){
IncreaseOrder <- sum(StreamOrder_AG[tmp]==max(StreamOrder_AG[tmp])) # check whether tmp reaches have the same stream order
if (IncreaseOrder > 1) {
StreamOrder_AG[j] <- 1 + max(StreamOrder_AG[tmp]) # if so, increase stream order
} else {StreamOrder_AG[j] <- max(StreamOrder_AG[tmp])} # otherwise, keep previous stream order
} else {StreamOrder_AG[j] <- 1} # if j is an headwater, impose StreamOrder = 1
}
} else if (streamOrderType=="Shreve"){
# calculate Shreve stream order
StreamOrder_AG <- numeric(Nnodes_AG)
for (i in 1:Nnodes_AG){
j <- order(Nupstream_AG)[i] # index that explores reaches in a downstream direction
tmp <- which(DownNode_AG==j) # set of reaches draining into j
if (length(tmp)>0){
StreamOrder_AG[j] <- sum(StreamOrder_AG[tmp])
} else {StreamOrder_AG[j] <- 1} # if j is an headwater, impose StreamOrder = 1
}
}
#print(sprintf('Elapsed time %.2f s',difftime(Sys.time(),t1,units='secs')),quote=FALSE);
#t1 <- Sys.time()
#print('Length and slope at AG level...',quote=FALSE)
# Calculate length and slopes of reaches
#Length_AG <- rep(0,Nnodes_AG)
Slope_AG <- numeric(Nnodes_AG)
for (i in 1:Nnodes_AG){
#Length_AG[i] <- sum(OCN$FD$leng[AG_to_FD[[i]]])
Slope_AG[i] <- (Slope_RN[RN_to_AG==i] %*% Length_RN[RN_to_AG==i])/Length_AG[i] # scalar product between vector of slopes and lengths of nodes at RN level belonging to reach i
}
######################################
## CALCULATE PROPERTIES AT SC LEVEL ##
######################################
#print(sprintf('Elapsed time %.2f s',difftime(Sys.time(),t1,units='secs')),quote=FALSE)
#t1 <- Sys.time()
#print('Subcatchment properties...',quote=FALSE)
# calculate subcatchment properties: Local Elevation, Local Drained Area, Upstream Area
Z_SC <- numeric(Nnodes_SC)
Alocal_SC <- numeric(Nnodes_SC)
for (i in 1:Nnodes_SC) {
Z_SC[i] <- mean(OCN$FD$Z[SC_to_FD[[i]]])
Alocal_SC[i] <- length(SC_to_FD[[i]])*OCN$cellsize^2
}
# drained area at AG level: note that the first Nnodes_AG elements of Alocal_SC correspond to subcatchments with reaches
# Areach_AG: includes the areas drained by the reaches
Areach_AG <- numeric(Nnodes_AG)
for (i in 1:Nnodes_AG) {
Areach_AG[i] <- sum(Alocal_SC[Upstream_AG[[i]]])
}
# coordinates of AG nodes considered at the downstream end of the respective edge
XReach <- numeric(Nnodes_AG)
YReach <- numeric(Nnodes_AG)
ZReach <- numeric(Nnodes_AG)
for (i in 1:Nnodes_AG){
tmp <- AG_to_RN[[i]]
ind <- which(A_RN[tmp]==max(A_RN[tmp]))
node <- tmp[ind]
XReach[i] <- X_RN[node]
YReach[i] <- Y_RN[node]
ZReach[i] <- Z_RN[node]
}
XReach[Outlet_AG] <- NaN
YReach[Outlet_AG] <- NaN
ZReach[Outlet_AG] <- NaN
# build neighbouring nodes at FD level
# find list of possible neighbouring pixels
movement <- matrix(c(0,-1,-1,-1,0,1,1,1,1,1,0,-1,-1,-1,0,1),nrow=2,byrow=TRUE)
NeighbouringNodes <- vector("list", OCN$FD$nNodes)
cont_node <- 0
for (cc in 1:OCN$dimX) {
for (rr in 1:OCN$dimY) {
cont_node <- cont_node + 1
neigh_r <- rep(rr,8)+movement[1,]
neigh_c <- rep(cc,8)+movement[2,]
if (OCN$periodicBoundaries == TRUE){
neigh_r[neigh_r==0] <- OCN$dimY
neigh_c[neigh_c==0] <- OCN$dimX
neigh_r[neigh_r>OCN$dimY] <- 1
neigh_c[neigh_c>OCN$dimX] <- 1
}
NotAboundary <- neigh_r>0 & neigh_r<=OCN$dimY & neigh_c>0 & neigh_c<=OCN$dimX # only effective when periodicBoundaries=FALSE
NeighbouringNodes[[cont_node]] <- neigh_r[NotAboundary] + (neigh_c[NotAboundary]-1)*OCN$dimY
}
}
# Subcatchment adjacency matrix: find which subcatchments have borders in common
#W_SC <- sparseMatrix(i=1,j=1,x=0,dims=c(Nnodes_SC,Nnodes_SC))
W_SC <- spam(0,Nnodes_SC,Nnodes_SC)
indices <- matrix(0,Nnodes_SC,2)
for (i in 1:Nnodes_SC){
for (k in 1:length(SC_to_FD[[i]])){
ind <- SC_to_FD[[i]][k]
if (length(ind)>0) {
set <- NeighbouringNodes[[ind]]
NeighSubcatch <- FD_to_SC[set]
NeighSubcatch <- NeighSubcatch[!is.nan(NeighSubcatch)]
Border <- which(NeighSubcatch!=i)
if (length(Border)>0) {
W_SC[i,unique(NeighSubcatch[Border])] <- 1
}}
}
}
# X,Y of subcatchment centroids
X_SC <- numeric(Nnodes_SC)
Y_SC <- numeric(Nnodes_SC)
for (i in 1:Nnodes_SC){
X_SC[i] <- mean(OCN$FD$X[SC_to_FD[[i]]])
Y_SC[i] <- mean(OCN$FD$Y[SC_to_FD[[i]]])
}
######################
## EXPORT VARIABLES ##
######################
#FD level
OCN$FD[["toRN"]] <- FD_to_RN
OCN$FD[["toSC"]] <- FD_to_SC
# RN level
OCN$RN[["A"]] <- A_RN
OCN$RN[["W"]] <- W_RN
OCN$RN[["downNode"]] <- DownNode_RN
OCN$RN[["drainageDensity"]] <- DrainageDensity_RN
OCN$RN[["leng"]] <- Length_RN
OCN$RN[["nNodes"]] <- Nnodes_RN
OCN$RN[["nUpstream"]] <- Nupstream_RN
OCN$RN[["outlet"]] <- Outlet_RN
OCN$RN[["slope"]] <- Slope_RN
OCN$RN[["toFD"]] <- RN_to_FD
OCN$RN[["toAGReach"]] <- RN_to_AG
OCN$RN[["toCM"]] <- RN_to_CM
OCN$RN[["upstream"]] <- Upstream_RN
OCN$RN[["X"]] <- X_RN
OCN$RN[["Y"]] <- Y_RN
OCN$RN[["Z"]] <- Z_RN
# AG level
OCN$AG[["A"]] <- A_AG
OCN$AG[["AReach"]] <- Areach_AG
OCN$AG[["W"]] <- W_AG
OCN$AG[["downNode"]] <- DownNode_AG
OCN$AG[["leng"]] <- Length_AG
OCN$AG[["nNodes"]] <- Nnodes_AG
OCN$AG[["nUpstream"]] <- Nupstream_AG
OCN$AG[["outlet"]] <- Outlet_AG
OCN$AG[["slope"]] <- Slope_AG
OCN$AG[["streamOrder"]] <- StreamOrder_AG
OCN$AG[["ReachToFD"]] <- AG_to_FD
OCN$AG[["ReachToRN"]] <- AG_to_RN
OCN$AG[["toCM"]] <- AG_to_CM
OCN$AG[["upstream"]] <- Upstream_AG
OCN$AG[["X"]] <- X_AG
OCN$AG[["XReach"]] <- XReach
OCN$AG[["Y"]] <- Y_AG
OCN$AG[["YReach"]] <- YReach
OCN$AG[["Z"]] <- Z_AG
OCN$AG[["ZReach"]] <- ZReach
# SC level
OCN$SC[["ALocal"]] <- Alocal_SC
OCN$SC[["W"]] <- W_SC
OCN$SC[["nNodes"]] <- Nnodes_SC
OCN$SC[["toFD"]] <- SC_to_FD
OCN$SC[["X"]] <- X_SC
OCN$SC[["Y"]] <- Y_SC
OCN$SC[["Z"]] <- Z_SC
# other
OCN$thrA <- thrA
OCN$streamOrderType <- streamOrderType
OCN$maxReachLength <- maxReachLength
# re-define AG_to_RN, AG_to_FD, RN_to_AG considering AG nodes as pixels and not reaches
AG_to_FDnode <- numeric(Nnodes_AG)
AG_to_RNnode <- numeric(Nnodes_AG)
for (i in 1:Nnodes_AG){
tmpFD <- AG_to_FD[[i]]
AG_to_FDnode[i] <- tmpFD[OCN$FD$A[tmpFD]==min(OCN$FD$A[tmpFD])]
tmpRN <- AG_to_RN[[i]]
AG_to_RNnode[i] <- tmpRN[OCN$RN$A[tmpRN]==min(OCN$RN$A[tmpRN])]
}
RN_to_AGnode <- numeric(Nnodes_RN)
for (i in 1:Nnodes_AG){
RN_to_AGnode[AG_to_RNnode[i]] <- i
}
OCN$RN[["toAG"]] <- RN_to_AGnode
OCN$AG[["toFD"]] <- AG_to_FDnode
OCN$AG[["toRN"]] <- AG_to_RNnode
invisible(OCN)
}
|
73dfb32287ae1e5216ff4bd6390ad6972c90922d
|
7da718dc45c69be0dbf0409fe423f32f28151dff
|
/inst/shiny/server_5_prettyPlot/server_5_prettyPlot_toplot.R
|
7cafc99f101cd86b7ed343cd1a49ef42e27f2da8
|
[] |
no_license
|
cran/eSDM
|
ac865dd1a35268c31a17be3e20d964b1882bb850
|
35c58df0a1d89e5c501ecd55cb3608c5ebda5101
|
refs/heads/master
| 2021-06-16T10:59:20.678147
| 2021-05-04T03:50:08
| 2021-05-04T03:50:08
| 199,010,992
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,331
|
r
|
server_5_prettyPlot_toplot.R
|
### Code for 'High Quality Maps' tab for adding parameters to reactive values
# maps 'toplot' aka saved maps. 'toplot' used in various names
###############################################################################
### Add data to pretty plot reactive variables
pretty_toplot_add <- eventReactive(input$pretty_toplot_add_execute, {
validate(
need(pretty_models_idx_count() > 0,
paste("Error: Please select at least one set of",
"predictions to map"))
)
if (isTruthy(vals$pretty.toplot.idx)) {
id.used <- pretty_toplot_table()$ID
validate(
need(!(input$pretty_toplot_add_id %in% id.used),
"Error: Each map must have a unique ID")
)
}
validate( #!is.null() in case values are NA
need(!is.null(input$pretty_range_xmin) &&
!is.null(input$pretty_tick_lon_start),
paste("Error: Please wait until the parameter inputs below",
"have finished loading"))
)
if (input$pretty_addobj) { # to get check earlier
validate(
need(vals$pretty.addobj,
paste("Error: Please either load additional objects or uncheck",
"the 'Include addition objects box'"))
)
}
validate(
need(is.numeric(pretty_map_range()) && !anyNA(pretty_map_range()),
"Error: All map range entries must be numbers")
)
# Get/set plotting variables
withProgress(message = "Processing map parameters", value = 0.3, {
#--------------------------------------------------------------------------
#------------------------------------------------------
# Simpler operations; happen first in case user clicks around
map.range <- pretty_map_range()
background.color <- input$pretty_background_color
pretty.id <- input$pretty_toplot_add_id
list.legend <- pretty_legend_list()
list.titlelab <- pretty_titlelab_list()
list.margin <- pretty_margin_list()
list.tick <- pretty_tick_list()
list.idx <- list(pretty_models_idx_list())
incProgress(0.1)
#------------------------------------------------------
model.toplot <- pretty_model_toplot()
range.poly <- pretty_range_poly_func(
pretty_map_range(), st_crs(model.toplot)
)
incProgress(0.1)
if (pretty_range_360()) {
# This is here so incProgress() can be called
# Don't need check_preview360() becuase check was already done above
incProgress(0, detail = "Processing predictions that span dateline")
model.toplot <- pretty_model_toplot360()
incProgress(0, detail = "")
}
model.int <- pretty_int_func( #contains validate()
model.toplot, range.poly, "selected predictions"
)
rm(model.int)
incProgress(0.2)
#------------------------------------------------------
# Can be more complex operations
incProgress(0, detail = "Processing additional objects")
list.colorscheme <- pretty_colorscheme_list()
list.addobj <- if (input$pretty_addobj) pretty_addobj_list() else NULL
incProgress(0.2, detail = "")
#--------------------------------------------------------------------------
# Save plot parameters to reactive values
vals$pretty.params.toplot <- c(
vals$pretty.params.toplot,
list(list(
model.toplot = model.toplot, map.range = map.range,
background.color = background.color,
list.titlelab = list.titlelab, list.margin = list.margin,
list.tick = list.tick,
list.colorscheme = list.colorscheme, list.legend = list.legend,
list.addobj = list.addobj,
id = pretty.id, se.flag = TRUE
))
)
vals$pretty.toplot.idx <- c(vals$pretty.toplot.idx, list.idx)
#--------------------------------------------------------------------------
# If applicable, save a second plot of the associated SE
if (isTruthy(input$pretty_toplot_se) & any(!is.na(model.toplot$SE))) {
#input$pretty_toplot_se is NULL is ensemble isn't selected
incProgress(0, detail = "Creating map of the associated SE")
se.sf <- model.toplot
# preview360_split() is what is used in pretty_model_toplot360()
# if (check_360(se.sf)) se.sf <- preview360_split(se.sf)
# Seems like don't need ^ because model.toplot is already split
validate(
need(identical(st_geometry(model.toplot), st_geometry(se.sf)),
paste("Error creating map of associated SE;",
"please report this as an issue")
)
)
incProgress(0.1)
#--------------------------------------------------
# Update parameters as necessary
### Color scheme
list.colorscheme.var <- list.colorscheme
list.colorscheme.var$data.name <- ifelse(
isTruthy(pretty_models_idx_list()[[3]]), "SE_ens", "SE"
)
se.vals <- st_set_geometry(se.sf, NULL)[, list.colorscheme.var$data.name]
if (list.colorscheme.var$perc) {
list.colorscheme.var$data.breaks <- breaks_calc(se.vals)
} else {
# Update min and max
tmp <- list.colorscheme.var$data.breaks
list.colorscheme.var$data.breaks[1] <- min(
c(se.vals, tmp), na.rm = TRUE
)
list.colorscheme.var$data.breaks[length(tmp)] <- max(
c(se.vals, tmp), na.rm = TRUE
)
tmp.check <- dplyr::between(
se.vals, min(list.colorscheme.var$data.breaks),
max(list.colorscheme.var$data.breaks)
)
validate(
need(all(tmp.check, na.rm = TRUE),
paste("Error creating map of associated SE;",
"please report this as an issue"))
)
rm(tmp, tmp.check)
}
### Title
list.titlelab.var <- list.titlelab
if (list.titlelab.var$title != "") {
list.titlelab.var$title <- paste(list.titlelab.var$title, "SE")
}
### Map ID
pretty.id.se <- paste0(pretty.id, "_SE")
#--------------------------------------------------
# Save SE map
vals$pretty.params.toplot <- c(
vals$pretty.params.toplot,
list(list(
model.toplot = se.sf, map.range = map.range,
background.color = background.color,
list.titlelab = list.titlelab.var, list.margin = list.margin,
list.tick = list.tick,
list.colorscheme = list.colorscheme.var, list.legend = list.legend,
list.addobj = list.addobj,
id = pretty.id.se, se.flag = TRUE
))
)
vals$pretty.toplot.idx <- c(vals$pretty.toplot.idx, list.idx)
} else {
incProgress(0.1, detail = "")
}
})
if (exists("pretty.id.se")) {
paste0("Saved maps '", pretty.id, "'", "and ", "'", pretty.id.se, "'")
} else {
paste0("Saved map '", pretty.id, "'")
}
})
###############################################################################
### Table
pretty_toplot_table <- reactive({
validate(
need(vals$pretty.toplot.idx, "No maps have been saved"),
errorClass = "validation2"
)
data.frame(
Predictions = sapply(vals$pretty.toplot.idx, function(i) {
switch(
which(!sapply(i, is.null)),
row.names(table_orig())[i[[1]]],
row.names(table_overlaid())[i[[2]]],
row.names(table_ensembles())[i[[3]]]
)
}),
ID = sapply(vals$pretty.params.toplot, function(i) i$id),
stringsAsFactors = FALSE
)
})
###############################################################################
### Remove stuff from list
pretty_toplot_remove <- eventReactive(input$pretty_toplot_remove_execute, {
req(vals$pretty.params.toplot)
x <- input$pretty_update_table_out_rows_selected
validate(
need(x, "Error: You must select a saved map to remove")
)
vals$pretty.params.toplot <- vals$pretty.params.toplot[-x]
vals$pretty.toplot.idx <- vals$pretty.toplot.idx[-x]
if (length(vals$pretty.params.toplot) == 0) vals$pretty.params.toplot <- NULL
if (length(vals$pretty.toplot.idx) == 0) vals$pretty.toplot.idx <- NULL
""
})
###############################################################################
|
9bcc05a308328d9e85a862788e9faf7a7f8b63b7
|
15e8eddb196c8d977d138511bf96ea9503c7d071
|
/man/choice_text_from_question.Rd
|
46c0759d9a676afa13e6b9da9654d604a003c64a
|
[] |
no_license
|
leettran/QualtricsTools
|
41f83cf6e9b907152e38e1c7cd34236bc2debb20
|
3523f91a54e3bd31b9b1d8ac3456399027754c6c
|
refs/heads/master
| 2021-05-06T09:19:54.204181
| 2017-12-11T17:30:38
| 2017-12-11T17:30:38
| 114,054,095
| 1
| 0
| null | 2017-12-13T00:33:26
| 2017-12-13T00:33:26
| null |
UTF-8
|
R
| false
| true
| 1,282
|
rd
|
choice_text_from_question.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R
\name{choice_text_from_question}
\alias{choice_text_from_question}
\title{Get the Choice Text based on the Choice from a Question}
\usage{
choice_text_from_question(question, choice)
}
\arguments{
\item{question}{This is a list object representing an individual question
from a Qualtrics Survey File. The question must have a paired
response column placed into the question
under [['Responses']]. The insertion of the responses into questions is
handled by link_responses_to_questions.}
\item{choice}{A numeric value representing the choice made in a response
to the question provided. This choice can be a choice in a cell in the
response columns associated with the given question, but it can also be a
choice which was not chosen by any respondents in the responses dataframe
as long as it is a choice built into the question's construction.}
}
\description{
Input a question and a variable corresponding to a choice,
and this function returns the choice text. The function
works by determining the question type and some question properties
and then using a combination of the question's list of choices
and related values. The text is then cleaned of any HTML before
returned.
}
|
5a1bb004d97b2f6cb7ffc30d801703954959a93f
|
dec6fcd5a0329f808e2e8d980f94dd3cf6e80c55
|
/svd.R
|
fe8f457d7b94e40e3bf195b74e0fb822b14fa48a
|
[] |
no_license
|
IggyZhao/NLP-and-Time-Series-Notes-by-Iggy
|
5711707c04ef71a3a249d3590d0172d965e42366
|
cba89102bc73ae6fd6241f4cbf0bcb08a765ba03
|
refs/heads/master
| 2022-12-08T04:50:52.979765
| 2020-08-28T02:44:39
| 2020-08-28T02:44:39
| 286,923,306
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 567
|
r
|
svd.R
|
options(digits=3)
A = cbind(c(1,3,4,5,0,0,0), c(1,3,4,5,2,0,1), c(1,3,4,5,0,0,0), c(0,0,0,0,4,5,2), c(0,0,0,0,4,5,2))
# default decomposition
s1 = svd(A)
A1 = s1$u %*% diag(s1$d) %*% t(s1$v)
s1
A1
# lower-rank decomposition (thin SVD)
k = 2
s2 = svd(A, k, k)
D = diag(k)
diag(D) = s2$d[1:k]
A2 = s2$u %*% D %*% t(s2$v)
A2
# PCA (no centering, no scaling)
p2 = prcomp(A, center=F, scale=F, retx=T)
summary(p2)
# PCA scores = U*D of SVD
p2$x
s1$u %*% diag(s1$d)
# PCA axes = V of SVD
p2$rotation
s1$v
# orthognal
t(s1$v) %*% s1$v
|
31fc30304bf1e14d158d23764051607319a623bf
|
c1a59d6d38c52662ae5c28ee653d369effc8a96d
|
/OLD-OBSOLETE-FILES/R-testing-PB.R
|
d4729337fccbedf2417abae3a0697c83794b63f1
|
[] |
no_license
|
DataScienceDuo/COMPAS
|
95f5137d4d6d5d14cad49ad4466bb57b3f1959a3
|
9a7a213ac606293939e5ae0340ad1c631309691b
|
refs/heads/master
| 2022-12-06T17:32:13.791455
| 2020-09-01T18:54:26
| 2020-09-01T18:54:26
| 268,357,852
| 0
| 1
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 2,017
|
r
|
R-testing-PB.R
|
##############################################################################################
#### OBSOLETE ALL CODE MOVED TO FILE "logit_ref_testing_pb.R" ################################
##############################################################################################
library(readr)
library(IDPmisc)
compas_scores_raw <- read_csv("C:/Users/pablo/OneDrive/Escritorio/Proyecto Final WozU Git/COMPAS/compas-scores-raw.csv")
View(compas_scores_raw)
compas_scores2 <- na.omit(compas_scores_raw)
# To remove NA´s fields
compas_scores3 <- NaRV.omit(compas_scores_raw)
# No change.
# We remove rows for Persons with "weird" birthdates
compas_scores4 <-compas_scores3[!(compas_scores3$Person_ID=="51157" | compas_scores3$Person_ID=="57823"),]
compas_scores5 <-compas_scores4[!(compas_scores4$Person_ID=="62384" | compas_scores4$Person_ID=="54272"),]
# examples of removing rows using a list/vector
# install.packages("Hmisc")
# library("Hmisc")
# datawithoutVF = data[which(rownames(data) %nin% remove), ]
# datawithoutVF = data[!row.names(data)%in%remove,]
# datawithoutVF = data[ !(row.names(data) %in% remove), ]
# Libraries needed for regression. (not in use YET, for kept just in case for next steps use)
install.packages("caret")
install.packages("e1071")
install.packages("predictmeans")
install.packages("gvlma")
install.packages("popbio")
library("car")
library("caret")
library("gvlma")
library("predictmeans")
library("e1071")
library("magrittr")
library("dplyr")
library("tidyr")
library("lmtest")
library("popbio")
# We remove columns to keep the ones that we will use as IV
# We need to make a decision on what columns we will keep & if (and how) we will link this data with rows on other files
##############################################################################################
#### OBSOLETE ALL CODE MOVED TO FILE "logit_ref_testing_pb.R" ################################
##############################################################################################
|
bc5bef8d5da23dacb36c4c8c85a0be9b906bbc40
|
8b9b6f22f29652151485c9f573ba09077847018a
|
/workout2/code/functions/count-vowels.R
|
d06c15d8edbf8aef48f3355dc7e2522a1ba522bd
|
[] |
no_license
|
richardjmin/stat133-hws-fall18
|
c8cdc84648fee933c149b326e9b79b6d59e59dd1
|
53e43de8468d558004e9a474452015f4089504d0
|
refs/heads/master
| 2020-03-30T14:36:45.508603
| 2018-12-03T07:38:17
| 2018-12-03T07:38:17
| 151,326,608
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 752
|
r
|
count-vowels.R
|
#' @title count vowels
#' @description computes the number of vowels of a character string
#' @param string
#' @return number of the computed vowels
count_vowels = function(x) {
if (is.character(x) == FALSE) {
stop("invalid input; a string was expected")
}
vowels = c('a', 'e', 'i', 'o', 'u')
a = 0
e = 0
i = 0
o = 0
u = 0
split_string = strsplit(tolower(x), '')[[1]]
for (y in split_string) {
if (y == vowels[1]) {
a = a + 1
}
if (y == vowels[2]) {
e = e + 1
}
if (y == vowels[3]) {
i = i + 1
}
if (y == vowels[4]) {
o = o + 1
}
if (y == vowels[5]) {
u = u + 1
}
}
counts = as.double(c(a, e, i, o, u))
names(counts) = vowels
return(counts)
}
|
47f958269ea5df2388afd26c38dc4bee85db4e4e
|
aa2b0a69212c29a03238b9b9e17a7919251e7fe6
|
/R/idx2Timeseries.R
|
ab5da82c229c3f045f8b04170f0fd6c7ec949891
|
[] |
no_license
|
vicelab/apexsensun
|
793b389ecbee02fd2aa408a6e94c3ea6d2109b51
|
4a8b789d4dcfa7f01dd02c8ae4b60a2ddefdea61
|
refs/heads/master
| 2023-03-15T23:50:47.227652
| 2018-11-15T20:06:20
| 2018-11-15T20:06:20
| 532,413,139
| 0
| 0
| null | 2022-09-04T01:42:14
| 2022-09-04T01:42:10
| null |
UTF-8
|
R
| false
| false
| 3,133
|
r
|
idx2Timeseries.R
|
#' idx2Timeseries
#' @description Extracts output time series corresponding to accepted simulation. indices with a selected time window.
#' @param dwsFolderPath Folder path for .DWS output folder.
#' @param startDate Start date with format YYYY-MM-DD (e.g. "2002-01-01").
#' @param endDate End date with format YYYY-MM-DD (e.g. "2003-01-01").
#' @param captionDwsVar Variable caption as it appears in the .DWS output file.
#' @param TW Time averaging window could be: "day" , or "week", or "month" or "year" or any multiple of them, e.g. "3 month".
#' @param acceptedIndices An integer vector containing accepted simulation numbers.
#'
#' @return A dataframe containing aggregated time series for the selected TW.
#' @export
#'
#' @examples
#' \dontrun{
#' # Creating a copy of tutorial folder:
#' getExampleFolder()
#' # Creating a list for setting inputs:
#' globalInput <- inputGen()
#' # Setting parameter bounds:
#' globalInput$apexPARM$Root_growth_soil[1] = 0.15
#' globalInput$apexPARM$Root_growth_soil[2] = 0.2
#' globalInput$apexPARM$Soil_evap_plant_cover[1] = 0
#' globalInput$apexPARM$Soil_evap_plant_cover[2] = 0.5
#' # Performing Monte Carlo simulation:
#' input4SA <- APEXSENSUN::mc4APEX(globalInput)
#' # Extracting time series
#' extractedTimeseries <- idx2Timeseries(dwsFolderPath = "Example/Calculated_Outputs/DWS",
#' startDate="2002-01-01", endDate="2003-01-01",
#' captionDwsVar = "ET",TW="month",
#' acceptedIndices=c(1,15,10,8))
#'}
#'
idx2Timeseries <- function(dwsFolderPath, startDate, endDate,
captionDwsVar, TW="day", acceptedIndices="all") {
#Detecting .DWS root name..
namesList <- list.files(path = dwsFolderPath,pattern = ".DWS")
dwsCaptions <- list.files(path = dwsFolderPath,pattern = "1.DWS")
shortestCharLength <- .Machine$double.xmax
for (caption in dwsCaptions) {
shortestCharLength <- min(shortestCharLength,nchar(caption))
}
rootName<- substr(dwsCaptions[1], start = 1, stop = (shortestCharLength-5))
if (is.character(acceptedIndices)) {
acceptedIndices <- 1:length(namesList)
}
originalDws<- dws2timeseries(paste(dwsFolderPath, "/", rootName, toString(1), ".DWS", sep = ""),
captionDwsVar, startDate, endDate)
originalAggDws <- agg4timeseries(originalDws, startDate, endDate, TW)
ensembleMatrix <- array(data = NaN, dim = c(nrow(originalAggDws), (length(acceptedIndices)+1)))
colnames(ensembleMatrix) <- c("Date", paste("Sim", acceptedIndices, sep=""))
ensembleMatrix <- as.data.frame(ensembleMatrix)
ensembleMatrix['Date'] <- originalAggDws['Date']
for(i in 1:length(acceptedIndices)) {
temp_file_name <- paste(dwsFolderPath, "/", rootName, acceptedIndices[i], ".DWS", sep = "")
temp_time_series <- dws2timeseries(temp_file_name, captionDwsVar, startDate, endDate)
ensembleMatrix[paste("Sim", acceptedIndices[i], sep="")] <-
(agg4timeseries(temp_time_series, startDate, endDate,TW))[captionDwsVar]
}
return(ensembleMatrix)
}
|
a394013a90ef54f847b7cadac1ed093d8104e4b8
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/sourceR/R/sim_SA.R
|
2aa42358ac576b23aee781892bcf5fe946444e00
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,361
|
r
|
sim_SA.R
|
#' Simulated data: Human cases of campylobacteriosis and numbers of source samples positive for \emph{Campylobacter}.
#'
#' A simulated dataset containing the number of human cases of campylobacteriosis, the numbers of source samples
#' positive for \emph{Campylobacter} for each bacterial subtype, and the overall source prevalence.
#'
#' @format A list containing the human cases (`cases'), source samples (`sources'), prevalences (`prev') and true values (`truevals').
#'
#' \strong{cases:} data frame with 364 rows and 4 variables:
#' \describe{
#' \item{Human}{number of human cases of campylobacteriosis}
#' \item{Time}{Time id for the samples}
#' \item{Location}{Location id for the samples}
#' \item{Type}{MLST type id for the samples}
#' }
#'
#' \strong{sources:} data frame with 1092 rows and 4 variables
#' \describe{
#' \item{Count}{number of source samples positive for campylobacteriosis}
#' \item{Time}{Time id for the samples}
#' \item{Source}{Source id for the samples}
#' \item{Type}{MLST type id for the samples}
#' }
#'
#' \strong{prev:} data frame with 12 rows and 3 variables
#' \describe{
#' \item{Value}{Prevalence value (number of positive samples divided by total number of samples)}
#' \item{Time}{Time id for the samples}
#' \item{Source}{Source id for the samples}
#' }
#'
#' \strong{truevals:} list containing a long format data frame for each model parameter giving the true value of that parameter.
#' \describe{
#' \item{alpha}{A dataframe with 24 rows and 4 variables: Value contains the true alpha values,
#' Time, Location and Source contain the time, location and source id's respectively.}
#' \item{q}{A dataframe with 91 rows and 2 variables: Value contains the true q values, and
#' Type contains the type id's.}
#' \item{lambda_i}{A dataframe with 364 rows and 4 variables: Value contains the true lambda_i values,
#' Time, Location and Type contain the time, location and type id's respectively.}
#' \item{xi}{A dataframe with 24 rows and 4 variables: Value contains the true xi values,
#' Time, Location and Source contain the time, location and source id's respectively.}
#' \item{r}{A dataframe with 2184 rows and 5 variables: Value contains the true r values,
#' Time, Type, Location and Source contain the time, type, location and source id's respectively.}
#' }
"sim_SA"
|
b7195f97c3ea1d605b827034699a495f7169e8e8
|
18f784df016eda75bdf7f8e479d97666689abe07
|
/Functions/dropdownHeaderUser.R
|
d897231d023100a92a78da189ad98c36825937cf
|
[] |
no_license
|
OpenSourceMindshare/MindshareShiny
|
46729ed12a8f871e4aee8678468ad5a79dffcd30
|
f2994ced8872aa088304ab7f630b2131e78182b0
|
refs/heads/master
| 2020-06-15T10:36:55.784273
| 2019-09-09T15:17:40
| 2019-09-09T15:17:40
| 195,274,704
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 790
|
r
|
dropdownHeaderUser.R
|
dropdownHeaderUser <- function(title,body,footer=NULL){
if(!is.null(footer)){
tags$li( class="dropdown user user-menu",
tags$a( href="#", class="dropdown-toggle", `data-toggle`="dropdown",
span(class="user-name", title)
),
tags$ul(class="dropdown-menu",
tags$li(class="user-body",body),
tags$li(class="user-footer",footer)
)
)
}else{
tags$li( class="dropdown user user-menu",
tags$a( href="#", class="dropdown-toggle", `data-toggle`="dropdown",
span(class="user-name", title)
),
tags$ul(class="dropdown-menu",
tags$li(class="user-body",body)
)
)
}
}
|
573d5960b112aca6e826d6575c774ddda6ec7d55
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/CARrampsOcl/R/setup2.R
|
11b4b830aaabc9859c9f6b75fa47009ef8871985
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,173
|
r
|
setup2.R
|
setup2 <- function(dev)
{
# kronVect 3Q
code2 <- c(
"#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n",
"__kernel void krony3(\n",
"__global double *result,\n",
"const unsigned int count,\n",
"__global double *a,\n",
"__global double *b,\n",
"__global double *c,\n",
"__global double *y,\n",
"const unsigned int acols,\n",
"const unsigned int arows,\n",
"const unsigned int bcols,\n",
"const unsigned int brows,\n",
"const unsigned int ccols,\n",
"const unsigned int crows)\n",
"{\n",
"// Vector element index\n",
"int nIndex = get_global_id(0);\n",
"result[nIndex] = 0;\n",
"double Csub = 0.0;\n",
"int arow = (int)(nIndex / (brows*crows) );\n",
"int brow = (int) (nIndex - arow * brows * crows) / crows;\n",
"int crow = nIndex % crows;\n",
"int acol = 0;\n",
"int bcol = 0;\n",
"int ccol = 0;\n",
"for (int i = 0; i < acols * bcols * ccols; i++) {\n",
"acol = (int)(i / (bcols*ccols));\n",
"bcol = (int) (i - acol * bcols * ccols) / ccols;\n",
"ccol = i % ccols;\n",
"Csub += ((a[arow + acol * arows] * b[brow + bcol * brows] * c[crow + ccol*crows]) * y[i]);\n",
"}\n",
"result[nIndex] = Csub ;\n",
"};\n"
)
oclSimpleKernel(dev[[1]], "krony3", code2, "double")
}
|
c75b371d35e7bc695dd019674c15c3000e764867
|
c094316cfa2cdac816265e40060e3081df9da781
|
/USDA_study_area.R
|
1871c09c16e04bc2ee6df0d54ea31fd856d4281e
|
[] |
no_license
|
daniellegrogan/uncategorized
|
4c6195b44e0fc850d3c95f807551d6420cf1a9fd
|
11be75a8119ffed6b2a9161808807ed0e5b48fb8
|
refs/heads/master
| 2020-12-11T15:06:25.454636
| 2020-06-02T14:32:13
| 2020-06-02T14:32:13
| 233,880,351
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,353
|
r
|
USDA_study_area.R
|
# Map river networks for USDA 2020 proposal
library(raster)
library(rgdal)
library(rgeos)
source("~/git_repos/WBMr/Flowlines.R")
source("~/git_repos/WBMr/mouth_ts.R")
# load network ids
network = raster("/net/nfs/squam/raid/userdata/dgrogan/Proposals/USDA_2020/MinnMetro.asc")
network.id = raster("/net/nfs/squam/raid/userdata/dgrogan/Proposals/USDA_2020/Minneapolis_upstream_v1_IDs.asc")
crs(network.id) <- "+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84"
network.id.poly = rasterToPolygons(network.id, dissolve=T)
# make flowlines
# flowlines(flowdir = network,
# basinID=network.id,
# uparea=raster("/net/nfs/squam/raid/userdata/dgrogan/Proposals/USDA_2020/MNRiver_v1_upstrArea.asc"),
# region=NA,
# out.loc = "/net/nfs/squam/raid/userdata/dgrogan/Proposals/USDA_2020",
# out.name = "MNRiver_v1_flowlines")
# read in flowlines, plot
netwk.flow = readOGR("/net/nfs/squam/raid/userdata/dgrogan/Proposals/USDA_2020/Minneapolis_upstream_v1_flowlines/", "Minneapolis_upstream_v1_flowlines")
# plot(netwk.mn.flow, add=T, col='blue')
# identify downstream point
up.area=raster("/net/nfs/squam/raid/userdata/dgrogan/Proposals/USDA_2020/Minneapolis_upstream_v1_upstrArea.asc")
mouth = id_mouth(network.id, 1, up.area)
# plot(mouth.mn, pch=20, col='red', add=T)
up.area.b = up.area > 10000
up.area.b[up.area.b==0] = NA
pts.mouth = data.frame(matrix(nr=5, nc=3))
plot(netwk.flow)
plot(up.area.b, add=T, legend=F, col='blue')
plot(mouth, add=T, pch=19, col='black')
# characterize network
n.cells = sum(as.matrix(network > -1), na.rm=T)
cell.sz = raster::area(network)
total.area = sum(as.matrix(cell.sz), na.rm=T)
test = mask(cell.sz, metro.counties)
# minnesota
minn.city = as.data.frame(cbind(-93.2650, 44.9778))
colnames(minn.city) = c("lon", "lat")
coordinates(minn.city) <- ~ lon + lat
# counties
counties = readOGR("/net/nfs/squam/raid/userdata/dgrogan/data/map_data/cb_2015_us_county_500k/", "cb_2015_us_county_500k")
counties.MN = counties[counties$STATEFP == 27, ]
metro.counties = counties.MN[counties.MN$NAME == "Anoka" |
counties.MN$NAME == "Hennepin" |
counties.MN$NAME == "Carver" |
counties.MN$NAME == "Scott" |
counties.MN$NAME == "Dakota" |
counties.MN$NAME == "Washington" |
counties.MN$NAME == "Ramsey",]
metro.counties = spTransform(metro.counties, "+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84")
# ag area
# wheat
spr.wheat.irr = raster("/net/nfs/yukon/raid0/data/CDL/National/CDL_MC_crop_frac/2017/IrrCrop1_sub2.tif")
win.wheat.irr = raster("/net/nfs/yukon/raid0/data/CDL/National/CDL_MC_crop_frac/2017/IrrCrop1_sub1.tif")
spr.wheat.rf = raster("/net/nfs/yukon/raid0/data/CDL/National/CDL_MC_crop_frac/2017/RfdCrop27_sub2.tif")
win.wheat.rf = raster("/net/nfs/yukon/raid0/data/CDL/National/CDL_MC_crop_frac/2017/RfdCrop27_sub1.tif")
spr.wheat.tot = (spr.wheat.irr + spr.wheat.rf)
win.wheat.tot = (win.wheat.irr + win.wheat.rf)
wheat.tot = (spr.wheat.tot + win.wheat.tot)
wheat.tot = mask(wheat.tot, network.id.poly)
wheat.tot = crop(wheat.tot, extent(network.id.poly))
wheat.tot[wheat.tot==0] = NA
wheat.tot = mask(wheat.tot, metro.counties)
wheat.tot = crop(wheat.tot, extent(metro.counties))
wheat.tot[wheat.tot==0] = NA
# all crops
cropland = brick("/net/nfs/yukon/raid0/data/CDL/National/CDL_crop_frac/cdl_cropland_US.nc")
cropland = subset(cropland, 8)
#cropland = crop(cropland, extent(network.id.poly))
#cropland = mask(cropland, network.id.poly)
cropland.metro = mask(cropland, metro.counties)
# map
# plot(cropland.metro, legend=F, bty='n', box=F)
# plot(metro.counties, add=T)
# plot(network.id.poly, add=T)
# plot(wheat.tot>0, add=T, col='darkgreen', legend=F)
# plot(netwk.flow, add=T, col='darkblue', lwd=0.2)
# plot(mouth, add=T, col='darkblue', pch=19)
# plot(minn.city, add=T, col='Red', pch=8)
#
# plot(basin.id.poly)
# plot(netwk.flow, add=T, col='darkblue', lwd=0.2)
# plot(minn.city, add=T, col='Red', pch=8)
# read geodatabase of WWTP
fgdb <- "/net/nfs/squam/raid/userdata/dgrogan/Proposals/USDA_2020/FRS_Wastewater/CWA_summaries_060314.gdb"
# List all feature classes in a file geodatabase
subset(ogrDrivers(), grepl("GDB", name))
fc_list <- ogrListLayers(fgdb)
print(fc_list)
# Read the feature class
fc <- readOGR(dsn=fgdb,layer="D_ILRTSP_CWA_SUMMARIES")
fc = spTransform(fc, CRSobj = "+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84")
crs(network) = "+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84"
# crop to study area
wwtp = crop(fc, extent(metro.counties))
#wwtp = spTransform(wwtp, "+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84")
wwtp = wwtp[metro.counties,]
wwtp.public = wwtp[wwtp$CWP_FACILITY_TYPE_INDICATOR == "POTW",]
##################################################################################################
# try mapview
#install.packages("mapview")
#library(mapview)
## try ggmap
#install.packages("ggmap")
library(ggmap)
library(broom)
library(dplyr)
wwtp.public.df = data.frame(cbind(wwtp.public@data, coordinates(wwtp.public)))
sbbox <- make_bbox(lon = c(extent(metro.counties)[1], extent(metro.counties)[2]),
lat = c(extent(metro.counties)[3], extent(metro.counties)[4]),
f = .1)
test_map = get_map(location = sbbox, maptype = "watercolor", source = "stamen")
ggmap(test_map) + geom_point(data = wwtp.public.df, mapping = aes(x = coords.x1, y = coords.x2), color = "red")
metro.counties_df.1 <- fortify(metro.counties[1,])
metro.counties_df.2 <- fortify(metro.counties[2,])
metro.counties_df.3 <- fortify(metro.counties[3,])
metro.counties_df.4 <- fortify(metro.counties[4,])
metro.counties_df.5 <- fortify(metro.counties[5,])
metro.counties_df.6 <- fortify(metro.counties[6,])
metro.counties_df.7 <- fortify(metro.counties[7,])
###
gplot_data <- function(x, maxpixels = 50000) {
x <- raster::sampleRegular(x, maxpixels, asRaster = TRUE)
coords <- raster::xyFromCell(x, seq_len(raster::ncell(x)))
## Extract values
dat <- utils::stack(as.data.frame(raster::getValues(x)))
names(dat) <- c('value', 'variable')
dat <- dplyr::as.tbl(data.frame(coords, dat))
if (!is.null(levels(x))) {
dat <- dplyr::left_join(dat, levels(x)[[1]],
by = c("value" = "ID"))
}
dat
}
###
riv = readOGR("/net/nfs/merrimack/raid2/data/localdb/biophys/vectormaps/network/global/arcworld/shape", "arc_world_rivers")
rivers = crop(riv, extent(sbbox[1], sbbox[3], sbbox[2], sbbox[4]))
rivers = rivers[rivers$TYPE == 1,] # remove lakes
rivers = spTransform(rivers, crs(metro.counties))
rivers = rivers[metro.counties,]
rivers_df = tidy(rivers, FNODE_ = "id")
rivers$id <- rownames(rivers@data)
rivers_df <- left_join(rivers_df,
rivers@data,
by = "id")
cropland = brick("/net/nfs/yukon/raid0/data/CDL/National/CDL_crop_frac/cdl_cropland_US.nc")
cropland = subset(cropland, 8)
cropland.metro = mask(cropland, metro.counties)
cropland.metro = crop(cropland.metro, extent(metro.counties))
cropland.metro[cropland==0] = NA
cropland.metro[cropland<0.01] = NA
cropland_data <- gplot_data(cropland.metro, maxpixels = 100000) # Choose number of pixels
cropland_data = na.omit(cropland_data)
# minnesota
minn.city = as.data.frame(cbind(-93.2650, 44.9778))
colnames(minn.city) = c("lon", "lat")
coordinates(minn.city) <- ~ lon + lat
minn.city$id = 1
minn.city.df = data.frame(cbind(minn.city@data, coordinates(minn.city)))
study_area_map = get_stamenmap(bbox = sbbox, maptype = "terrain")
out.map =
ggmap(study_area_map) + # base map
geom_tile(data = cropland_data, aes(x, y, fill = value), alpha = 0.3) + # cropland raster
scale_fill_gradient(low = "greenyellow", high = "green4") + # color cropland raster
geom_path(data = rivers_df, aes(x = long, y = lat, group = group), # river lines
color='royalblue3', alpha = 0.4, lwd=1.1) +
geom_polygon(data = metro.counties_df.1, aes(x=long, y = lat), fill=NA, color='slategrey') + # county borders
geom_polygon(data = metro.counties_df.2, aes(x=long, y = lat), fill=NA, color='slategrey') +
geom_polygon(data = metro.counties_df.3, aes(x=long, y = lat), fill=NA, color='slategrey') +
geom_polygon(data = metro.counties_df.4, aes(x=long, y = lat), fill=NA, color='slategrey') +
geom_polygon(data = metro.counties_df.5, aes(x=long, y = lat), fill=NA, color='slategrey') +
geom_polygon(data = metro.counties_df.6, aes(x=long, y = lat), fill=NA, color='slategrey') +
geom_polygon(data = metro.counties_df.7, aes(x=long, y = lat), fill=NA, color='slategrey') +
geom_point(data = wwtp.public.df, mapping = aes(x = coords.x1, y = coords.x2), # WWTP points
color = "firebrick2", bg="yellow", shape = 24, cex=2) +
geom_point(data = minn.city.df, mapping = aes(x = lon, y = lat), # Minneapolis
color = "black", cex=2.1)
ggsave(filename = "/net/nfs/squam/raid/userdata/dgrogan/Proposals/USDA_2020/study_area_riv.png",
device = "png",
plot = out.map,
dpi = 300)
#
#
#
# sq_map <- get_map(location = sbbox, maptype = "toner-lines", source = "osm")
# ggmap(sq_map) +
# inset_raster(as.raster(cropland.metro), xmin = cropland.metro@extent[1], xmax = cropland.metro@extent[2],
# ymin = cropland.metro@extent[3], ymax = cropland.metro@extent[4]) +
# geom_point(data = wwtp.public.df, mapping = aes(x = coords.x1, y = coords.x2), color = "red") +
# geom_polygon(data = metro.counties_df.1, aes(x=long, y = lat), fill=NA, color='darkblue') +
# geom_polygon(data = metro.counties_df.2, aes(x=long, y = lat), fill=NA, color='darkblue') +
# geom_polygon(data = metro.counties_df.3, aes(x=long, y = lat), fill=NA, color='darkblue') +
# geom_polygon(data = metro.counties_df.4, aes(x=long, y = lat), fill=NA, color='darkblue') +
# geom_polygon(data = metro.counties_df.5, aes(x=long, y = lat), fill=NA, color='darkblue') +
# geom_polygon(data = metro.counties_df.6, aes(x=long, y = lat), fill=NA, color='darkblue') +
# geom_polygon(data = metro.counties_df.7, aes(x=long, y = lat), fill=NA, color='darkblue')
#
#
# # leaflet
# library(leaflet)
# library(RColorBrewer)
#
#
# ## create map
# m <- leaflet() %>% addTiles()
#
# ## save html to png
# saveWidget(m, "temp.html", selfcontained = FALSE)
# webshot("temp.html", file = "Rplot.png",
# cliprect = "viewport")
#
# cropland[cropland==0] = NA
# cropland[cropland<0] = NA
#
# pal <- colorNumeric(c("#FFFFCC", "#0C2C84", "#41B6C4"), values(cropland),
# na.color = "transparent")
# pal = colorNumeric(brewer.pal(9, "Greens"), values(cropland),
# na.color = "transparent")
#
#
# # point
# minn.city = as.data.frame(cbind(-93.2650, 44.9778))
# colnames(minn.city) = c("lon", "lat")
# coordinates(minn.city) <- ~ lon + lat
#
# g1 = as.data.frame(cbind(-92.8514, 44.7443))
# colnames(g1) = c("lon", "lat")
# coordinates(g1) <- ~ lon + lat
#
#
# leaflet() %>%
# addTiles() %>% # Add default OpenStreetMap map tiles
# addRasterImage(cropland.metro, colors = pal, opacity = 0.6) %>%
# addPolygons(data = metro.counties, weight = 2, color='darkred',fillColor = "transparent") %>%
# #addPolygons(data = network.id.poly, weight = 3, fillColor = "grey") %>%
# addCircleMarkers(lng = coordinates(wwtp.public)[,1], lat = coordinates(wwtp.public)[,2], radius = 0.1, color='darkblue') %>%
# addCircleMarkers(lng = coordinates(mouth)[,1], lat = coordinates(mouth)[,2], radius = 2, color='darkred') %>%
# addLegend(pal = pal, values = values(cropland.metro),
# title = "Cropland Area")
#
#
# cropland.cell.area = raster::area(cropland.metro)
# cropland.area = sum(as.matrix(cropland.cell.area * cropland.metro), na.rm=T)
# cropland.area/total.area
#
# wheat.cell.area = raster::area(wheat.tot)
# wheat.area = sum(as.matrix(wheat.cell.area * wheat.tot), na.rm=T)
# wheat.area/total.area
# wheat.area/cropland.area
|
8004b85785fad3cc20cd218dc4cc2a879692aedf
|
4611ebd8786c13281421fb3b5383784d030f1fc1
|
/man/getDatasetFields.Rd
|
04b97d90a0b129df2a8957ac22254e829fcabbdf
|
[
"MIT"
] |
permissive
|
jimmyg3g/domorrr
|
fe2c47b6d470c4991dd8786f4e269f372b5d46d4
|
7ece5b5770eb7954bb5c810c4ea6e14ebaa272d6
|
refs/heads/master
| 2023-02-05T16:11:20.228142
| 2020-12-24T02:41:14
| 2020-12-24T02:41:14
| 318,340,091
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 457
|
rd
|
getDatasetFields.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\name{getDatasetFields}
\alias{getDatasetFields}
\title{Get Dataset Fields}
\usage{
getDatasetFields(dataset_id)
}
\arguments{
\item{dataset_id}{Dataset ID to query}
}
\value{
A \code{data.frame} containing the dataset metadata
}
\description{
Get the fields of a dataset by dataset ID
}
\seealso{
\url{https://developer.domo.com/docs/dataset-api-reference/dataset}
}
|
aebe00158d1f85af708818be471267f30232061c
|
372e94189bd746b981723016c9e084f0eb8d4352
|
/ml_knn2.R
|
ce626461b6185136c959e5db298bfe58139ee41f
|
[] |
no_license
|
iverni/harvardx
|
ddc73113994ec25eb04fb111e592e0e474be8eb1
|
38db3651e0c58825aadd05ef3000d2fa4884179e
|
refs/heads/master
| 2020-03-23T18:38:23.624438
| 2019-01-14T16:47:13
| 2019-01-14T16:47:13
| 141,921,916
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,158
|
r
|
ml_knn2.R
|
library(dplyr)
library(purrr)
library(rvest)
library(tidyverse)
library(stringr)
library(dslabs)
library(caret)
library(matrixStats)
#Load the heights data
data("tissue_gene_expression")
#Set a range of k values to find out the max value.
ks <- seq(1, 11, 2)
x <- tissue_gene_expression$x
y <- tissue_gene_expression$y
#Create a function to determine the accuracy of each k value
#The result of the map_df must be a list.
# See https://github.com/tidyverse/purrr/issues/179
accuracy <- map_df(ks, function(k) {
set.seed(1)
train_index <- createDataPartition(y, times = 1, p = 0.5, list = FALSE)
train_x <- x[train_index,]
test_x <- x[-train_index,]
train_y <- y[train_index]
test_y <- y[-train_index]
knn_fit <- knn3(train_x,train_y , k = k)
y_hat <- predict(knn_fit, test_x, type="class")
test_error<- confusionMatrix(data = y_hat, reference = test_y, mode = "everything")$overall["Accuracy"]
list(k=k, test_error = test_error) #If this is missing the following error occurs: Error in bind_rows_(x, .id) : Argument 1 must have names
})
plot(accuracy$k, accuracy$test_error)
pos_max <- which.max(accuracy$test_error)
accuracy
|
7e6208f246a8ba537b4bf989ac13350532e81925
|
f79cd4e052c5cbb24e7ef3e4bec1c39f9ce4e413
|
/BEMTOOL-ver2.5-2018_0901/src/biol/bmtALADYM/ALADYM-ver12.3-2017_0501/src/saveKobePlot.r
|
a77f34d0996926f051f3f010fe4ea21502a6410e
|
[] |
no_license
|
gresci/BEMTOOL2.5
|
4caf3dca3c67423af327a8ecb1e6ba6eacc8ae14
|
619664981b2863675bde582763c5abf1f8daf34f
|
refs/heads/master
| 2023-01-12T15:04:09.093864
| 2020-06-23T07:00:40
| 2020-06-23T07:00:40
| 282,134,041
| 0
| 0
| null | 2020-07-24T05:47:24
| 2020-07-24T05:47:23
| null |
UTF-8
|
R
| false
| false
| 7,262
|
r
|
saveKobePlot.r
|
# ALADYM Age length based dynamic model - version 12.3
# Authors: G. Lembo, I. Bitetto, M.T. Facchini, M.T. Spedicato 2018
# COISPA Tecnologia & Ricerca, Via dei Trulli 18/20 - (Bari), Italy
# In case of use of the model, the Authors should be cited.
# If you have any comments or suggestions please contact the following e-mail address: facchini@coispa.it
# ALADYM is believed to be reliable. However, we disclaim any implied warranty or representation about its accuracy,
# completeness or appropriateness for any particular purpose.
saveKobePlot <- function(all_the_years) {
if (!IN_BEMTOOL) {
BMT_SPECIES = new_aldPopulation@scientific_name
# all_the_years = c(years, years_forecast )
}
symb_kobe <- c(1)
col_kobe <- c("blue", "black", "grey", "dark green", "purple", "blue", "black", "grey", "dark green", "purple")
plot_title <- paste("KOBE plot -", BMT_SPECIES)
plot_path <- KOBE_graph
#plot_title <- paste("[", casestudy_name, "] KOBE plot SIM", sep="")
plot_title_sub <- paste( "[years ", all_the_years[1], "-", all_the_years[length(all_the_years)],"]", sep="")
referencepoints_tbl <- read.csv(REFERENCEPOINTS_table, sep=";")
mortalities_tbl <- read.csv(MORTALITY_table, sep=";")
population_tbl <- read.csv(POPULATION_table, sep=";")
if (length(FLEETSEGMENTS_names) == 1) {
F_ <- as.numeric(as.character(mortalities_tbl[,14]) )
} else {
F_ <- as.numeric(as.character(mortalities_tbl[,(14+(length(FLEETSEGMENTS_names)*3))]) )
}
SSB_ <- as.numeric(as.character(population_tbl[,6] ))
if ((!is.na(is.na(referencepoints_tbl[3,2]) & (referencepoints_tbl[3,2]!=0) )) | (!is.na(is.na(referencepoints_tbl[3,2]) & (referencepoints_tbl[3,2]!=0) ))) {
if (!is.na(referencepoints_tbl[3,2])) {
F_Fref <- F_ / referencepoints_tbl[3,2]
} else {
F_Fref <- F_ / referencepoints_tbl[6,2]
}
if (!is.na(referencepoints_tbl[3,4])) {
SSBref <- referencepoints_tbl[3,4]*referencepoints_tbl[3,6]
} else {
SSBref <- referencepoints_tbl[6,4]*referencepoints_tbl[6,6]
}
SSB_SSBref <- SSB_ / SSBref
# read F and B ratios
#Ratios_bio = read.table(path_biological,sep=";",header=T)
#Ratios_bio <- Ratios_bio[as.character(Ratios_bio$Year) != "ALL", ]
#
#Ratios_press = read.table(path_pressure,sep=";",header=T)
#Ratios_press <- Ratios_press[as.character(Ratios_press$Year) != "ALL", ]
#
nb_species <- length(BMT_SPECIES)
vec_spe <- BMT_SPECIES
# Preparation plot before plotting species ratios
# Ylim_max=max(Ratios$F_Fref)+1
Ylim_max=max(F_Fref, na.rm=T)+1
Ylim_max <- ifelse(Ylim_max < 1,2, Ylim_max)
Ylim_min=min(F_Fref, na.rm=T)
Ylim_min <- ifelse(Ylim_min< -1,Ylim_min, -1)
Ylim =c(Ylim_min,Ylim_max)
Xlim_max = max(SSB_SSBref, na.rm=T)
Xlim_max <- ifelse(Xlim_max > 2, Xlim_max, 2)
#Xlim_min = min(as.numeric(as.character(Ratios_bio$Value[as.character(Ratios_bio$Variable) == "SSB_SSBref"])))
Xlim =c(0 ,Xlim_max)
# read population table
CI_Population <- suppressWarnings(try(read.table(paste(POPULATION_table_CI, " quantiles.csv", sep=""),header=TRUE,sep=";") ))
if (class(CI_Population) != "try-error") {
plot_title <- paste("RISK EVALUATION plot -", BMT_SPECIES)
# } else {
# plot_path <- paste(casestudy_path, "/MEY calculation/",harvest_rule_id,"/", casestudy_name, " - KOBE plot ", harvest_rule_id,".jpg", sep="")
# }
}
perc_SSB_greater_than_ref_allyears <- data.frame(matrix(0, nrow=length(all_the_years), ncol=length(BMT_SPECIES)+1))
colnames(perc_SSB_greater_than_ref_allyears) <- c("year", BMT_SPECIES)
perc_SSB_greater_than_ref_allyears$year <- all_the_years
CI_Population <- try(read.csv(paste(POPULATION_table_CI, " ", INP$nruns, " runs.csv", sep=""), sep=";") )
if (class(CI_Population) != "try-error") {
# plot_title <- "RISK EVALUATION plot"
annualSSB <- data.frame(matrix(0, nrow=INP$nruns, ncol=length(all_the_years)))
for (runi in c(1:INP$nruns)) {
annualSSB[runi,] <- CI_Population$SSB_exploited_pop[CI_Population$run==runi]
}
} else {
CI_Population <- NULL
}
if (!is.null(CI_Population)) {
annualSSB <- annualSSB/SSBref
annualSSB_TF <- data.frame(matrix(0, nrow=INP$nruns, ncol=length(all_the_years)))
annualSSB_TF[] <- ifelse(annualSSB[] < 1, 1, 0)
annualSSB_TF_year <- colSums(annualSSB_TF)
annualSSB_TF_year_percent <- annualSSB_TF_year/INP$nruns *100
perc_SSB_greater_than_ref_allyears[,2] <- annualSSB_TF_year_percent
last_year <- annualSSB_TF_year_percent[length(annualSSB_TF_year_percent)]
# perc_SSB_greater_than_ref[i] <- last_year
legend_string <- paste(" SSB < SSBref ", round(perc_SSB_greater_than_ref_allyears[nrow(perc_SSB_greater_than_ref_allyears), 2],2) , "%", sep="")
} else {
legend_string <- ""
}
#windows()
jpeg(file=plot_path, width=21, height=21, bg="white", units="cm",res=200)
par(mar=c(5, 5, 6, 10.5), xpd=T) # , sub=plot_title_sub
plot(1,0,xlim=Xlim,ylim=Ylim,type="n",xlab="SSB/SSBref",ylab="F/Fref", cex.axis=1.5, cex.lab=1.5, cex.main=1.8, main=plot_title, axes=F) # empty plot , main=plot_title
rect(0, 1, 1, Ylim_max, col="red") #xleft, ybottom, xright, ytop
rect(1, Ylim_min, Xlim_max, 1, col="green")
rect(0, Ylim_min, 1, 1, col="yellow") #overfished (bottom-left)
rect(1, 1, Xlim_max, Ylim_max, col="orange") #overfishing (top-right)
axis(1,pos=Ylim_min)
axis(2,pos=0)
#abline(v=1,lwd=1.5)
#abline(h=0,lwd=1.5)
mtext(plot_title_sub, 3, line=1, cex=1.3)
#box()
for (i in 1:nb_species) {
lines(SSB_SSBref, F_Fref,col=col_kobe[i],lwd=2.8)
points(SSB_SSBref, F_Fref,col=col_kobe[i], pch=symb_kobe[i], cex=1.3)
x_values <- SSB_SSBref
y_values <- F_Fref
str_start <- as.character(all_the_years[1]) #substring(as.character(all_the_years[1]), 3,4)
str_end <- as.character(all_the_years[length(all_the_years)]) #substring(as.character(all_the_years[length(all_the_years)]), 3,4)
text(x_values[1],y_values[1], labels=str_start,col=col_kobe[i], cex=0.8, font=2, pos=4)
text(x_values[length(all_the_years)],y_values[length(all_the_years)], labels=str_end,col=col_kobe[i], cex=0.8, font=2, pos=4)
}
#text(0.3,-1,"overfished",cex=1.2)
#text(1.3,-1,"healthy",cex=1.2)
#text(0.5,Ylim_max-0.2,"risky",cex=1.2)
#text(1.5,Ylim_max-0.2,"overfishing",cex=1.2) #,
# legend("topright", substring(BMT_SPECIES,1,17), col=col_kobe[1:length(BMT_SPECIES)], cex=1.3, pch=symb_kobe, bg="white", horiz=F, inset=c(-0.4,0), bty="n", y.intersp=1.1)
legend("top", legend_string, col=col_kobe[1:length(BMT_SPECIES)],
cex=1.1, xpd = TRUE, bg="white", horiz=F, bty="n", inset=c(0,-0.045*length(BMT_SPECIES))) #, y.intersp=1.1
dev.off()
if (!is.null(CI_Population)) {
write.table(perc_SSB_greater_than_ref_allyears, file=Uncertainty_table, sep=";", row.names=F)
}
}
}
# saveKobePlot(years, name_bioind, name_pressind)
|
7aaf9ee9467e753a856a24fbe4d1fbc951969fa1
|
f3fc4d3fa2e8e7baa5f2c6aa7bd3190e5e7a08cc
|
/enrichment.r
|
ecf8ea1384db784b86948398983f15617fdcef06
|
[] |
no_license
|
rameying/Thesis_Files
|
916aad74266988ced46614edd6368121c49ca0f3
|
ce229e2538a901081187f9b2755dc7744c4a6f32
|
refs/heads/master
| 2021-12-07T18:10:19.426914
| 2015-12-27T22:37:07
| 2015-12-27T22:37:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,463
|
r
|
enrichment.r
|
#download gene sets
datasets=function(directory){
#filepath="msigdb/"
file_list=list.files(directory)
#print(file_list)
geneSetMatrix=list()
i=1
for(file in file_list){
tempmaxCol=max(count.fields(paste(directory,file, sep=""), sep="\t"))
#print(tempmaxCol)
tempGeneSet=read.table(paste(directory,file, sep=""), header=FALSE, sep="\t", fill=TRUE, col.names=1:tempmaxCol)
geneSetMatrix[[i]]=tempGeneSet
rm(tempmaxCol)
rm(tempGeneSet)
i=i+1
}
return(geneSetMatrix)
}
#format gene sets
#geneSets=datasets("msigdb/")
formatGeneSets=function(geneSets){
formatedGeneSets=list()
i=1
for(geneSet in geneSets){
formatedGeneSet=geneSet[,-c(1:2)]
rownames(formatedGeneSet)=geneSet[,1]
formatedGeneSets[[i]]=formatedGeneSet
i=i+1
}
return(formatedGeneSets)
}
#prepare rank ordered corrolations
#source("GSEAcalculater.r")
#source("DEAnalyzer.r")
#geneLogratio=orderedLogRatioOfHighOverLowExpressedGeneIn5pcOfQuartiles
#lowExpressionSamplesForGenes=quartileSampleGroupsLowExpression
#highExpressionSamplesForGenes=quartileSampleGroupsHighExpression
#dataSet=gbmGeneExpressionMatrixDESeq2Normalized
#rankedCorrolation=phynotipicCorolation(dataSet,geneLogratio,lowExpressionSamplesForGenes,highExpressionSamplesForGenes)
#calculate enrichment score
#genes=as.list(names(rankedCorrolat
#entresGeneSymbol=function(gene){
#library('biomaRt')
#mart <- useDataset("hsapiens_gene_ensembl", useMart("ensembl"))
#G_list=getBM(filters= "ensembl_gene_id", attributes= c("ensembl_gene_id","entrezgene","hgnc_symbol"),values=gene,mart= mart)
#return(G_list)
#}
#ensembolGeneIds=names(rankedCorrolation)
#entresGeneSymbolList=lapply(ensembolGeneIds,entresGeneSymbol)
#entresGeneSymbols=do.call(rbind, lapply(entresGeneSymbolList, rbind))
#entresGeneSymbolDataframe=lapply(entresGeneSymbolList,rbind.fill)
ensembolToEntrezId=function(ensembolIds){
if("org.Hs.eg.db" %in% rownames(installed.packages()) == FALSE){
source("http://bioconductor.org/biocLite.R")
biocLite("org.Hs.eg.db")
}
library("org.Hs.eg.db")
entreazConversion=as.list(org.Hs.egENSEMBL2EG)
#head(entreazConversion)
return(entreazConversion[ensembolIds])
}
# get the unmapped ensembole ids and chnage it to gene symbole/id then try to find entrreze id for it
#rankedEntrezIds=ensembolToEntrezId(ensembolGeneIds)
#rankedEntrezIdsNULLreplacedWithZero=replace(rankedEntrezIds, sapply(rankedEntrezIds, is.null), 0)
#names(rankedEntrezIdsNULLreplacedWithZero)=ensembolGeneIds
ensembolToGeneSymbol=function(ensembolIds){
if("biomaRt" %in% rownames(installed.packages()) == FALSE){
source("http://bioconductor.org/biocLite.R")
biocLite("biomaRt")
}
library("biomaRt")
ensembl=useMart("ensembl")
ensembl = useMart("ensembl",dataset="hsapiens_gene_ensembl")
filters = listFilters(ensembl)
ensemboIdToGeneSymbol=getBM(attributes=c("ensembl_gene_id","hgnc_symbol"), filters = "ensembl_gene_id", values=ensembolIds, mart=ensembl)
#print(head(ensemboIdToGeneSymbol))
ensemboIdToGeneSymbolOnly=ensemboIdToGeneSymbol[,-1]
names(ensemboIdToGeneSymbolOnly)=ensemboIdToGeneSymbol[,-2]
ensembolIdorderedInsex=match(ensembolIds,names(ensemboIdToGeneSymbolOnly))
ensemboIdToGeneSymbolList=ensemboIdToGeneSymbolOnly[ensembolIdorderedInsex]
ensemboIdToGeneSymbolList1=ensemboIdToGeneSymbolList
names(ensemboIdToGeneSymbolList1)=ensembolIds
return(ensemboIdToGeneSymbolList1)
}
#ensembolToGeneSymbolConverted=ensembolToGeneSymbol(ensembolGeneIds)
source("GSEAcalculater.r")
#get gene sets
#allGeneSets=formatGeneSets((datasets("msigdb/")))
#singleGeneSets=allGeneSets[[1]]
#mainES=c()
#mainPermituationScore=list()
#for(i in 1:length(singleGeneSets)){
#for(i in 1:nrow(singleGeneSets)){
#mainES[i]=ES(rankedCorrolation,rankedEntrezIdsNULLreplacedWithZero,as.numeric(singleGeneSets[i,]))[[2]]
# mainPermituationScore[i]=ESsmallpermituation(rankedCorrolation,rankedEntrezIdsNULLreplacedWithZero,as.numeric(singleGeneSets[i,]),10)
#
# now calculate p-value
#for(i in 1:nrow(singleGeneSets)){
#for(i in 1:nrow(singleGeneSets)){
#mainES[i]=ES(rankedCorrolation,rankedEntrezIdsNULLreplacedWithZero,as.numeric(singleGeneSets[i,]))[[2]]
# mainPermituationScore[i]=ESsmallpermituation(rankedCorrolation,rankedEntrezIdsNULLreplacedWithZero,as.numeric(singleGeneSets[i,]),2500)
#}
fisherEnrichmentAnalysis=function(rankedGeneListsWithEntrezIds,lengthOfTopDEgenes, TotalGenesets,pvalueThreshold){
enrichment=list()
enrichmentPvalue=list()
for(j in 1:length(TotalGenesets)){
singleGeneSets=TotalGenesets[[j]]
enriched=c()
for(i in 1:nrow(singleGeneSets)){
enrichmentPvalue[i]=EnrichmentFisherTest(rankedGeneListsWithEntrezIds,lengthOfTopDEgenes,as.numeric(singleGeneSets[i,]))
if(enrichmentPvalue[i]<.05){
enriched=c(enriched, rownames(singleGeneSets[i,]))
#enriched=c(enriched, singleGeneSets[i,])
}
}
enrichment[j]=list(enriched)
}
return(enrichment)
}
#EnrichmentResult=data.frame(KEGG_enrichment=c(enrichment[[1]],rep(NA, max.row-length(enrichment[[1]]))),miRNA_enrichment=c(enrichment[[2]],rep(NA, max.row-length(enrichment[[2]]))),tf_enrichment=c(enrichment[[3]],rep(NA, max.row-length(enrichment[[3]]))),GO_bpEnrichment=c(enrichment[[4]],rep(NA, max.row-length(enrichment[[4]]))),GOCC_enrichment=c(enrichment[[5]],rep(NA, max.row-length(enrichment[[5]]))),GOMF_enrichment=c(enrichment[[6]],rep(NA, max.row-length(enrichment[[6]]))))
#write.table(EnrichmentResult,file="enrichmentResultTable.xsl", col.names="TRUE", sep="\t")
|
ae1a152a3d899ee3025f6d6711e3998f0ccfe055
|
fa1fb90937c883315496ee982a69b29e1f59b485
|
/001_ReadRawData.r
|
cdafb5600115d2dbd22047b67c5bf77b933a1717
|
[] |
no_license
|
bishopsqueeze/k_meg
|
9a7c60cb7238d8fdca74123685cc39818c6bd71e
|
5885cec4904ad949e4a3786f564aa85e8c92a09e
|
refs/heads/master
| 2020-05-01T06:04:54.095492
| 2014-04-27T12:39:29
| 2014-04-27T12:39:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,338
|
r
|
001_ReadRawData.r
|
##------------------------------------------------------------------
## The purpose of this script is to:
## 1. Simply read the raw data files
##------------------------------------------------------------------
##------------------------------------------------------------------
## Load libraries
##------------------------------------------------------------------
library(R.matlab)
##------------------------------------------------------------------
## Clear the workspace
##------------------------------------------------------------------
rm(list=ls())
##------------------------------------------------------------------
## Set the working directory
##------------------------------------------------------------------
setwd("/Users/alexstephens/Development/kaggle/meg/data")
##------------------------------------------------------------------
## Read the raw data by converting from .mat to .Rdata
##------------------------------------------------------------------
matlab.path <- "/Users/alexstephens/Development/kaggle/meg/data"
matlab.files <- dir(, pattern=".mat")
for (i in 13:length(matlab.files)) {
tmp.file <- matlab.files[i]
tmp.out <- gsub(".mat", ".Rdata", tmp.file)
pathname <- file.path(matlab.path, tmp.file)
rawdata <- readMat(pathname)
save(rawdata, file=tmp.out)
}
|
aa6a9cf8e06600cfdeec9fe6d73f143ac9af39e0
|
29cfeb524040f75634db376141b069ffbeb9f893
|
/Run.R
|
c3cfafb63acb95c781852cba4f3ce8037ca88128
|
[] |
no_license
|
skywalkerzwj/local-causal-discovery
|
04084e88f04e55f9f2de1e105f182e107837ea5a
|
b94073e59e4c5a9a8ea2c0db37f51073c6e48b25
|
refs/heads/master
| 2016-08-05T04:06:13.741744
| 2015-08-06T00:42:31
| 2015-08-06T00:42:31
| 39,878,729
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,430
|
r
|
Run.R
|
source("function.R")
source("compareResult.R")
# library(foreach)
# library(doParallel)
data_lnc<-prepareData("all",scaling=T,lnc=T)
data<-prepareData("all",scaling=T,lnc=F)
EMT <- readEMT("EMT-35-translated.csv")
EMT <- prepareEMT(EMT,T)
mmpc_local<-local_discovery(data,target="hsa-mir-200a",alpha=0.01,method = "mmpc")
mmpc_local_lnc<-local_discovery(data_lnc,target="hsa-mir-200a",alpha=0.01,method = "mmpc")
iamb_local<-local_discovery(data,target="hsa-mir-200a",alpha=0.01,method = "iamb")
iamb_local_lnc<-local_discovery(data_lnc,target="hsa-mir-200a",alpha=0.01,method = "iamb")
result <- discovery_twice(data,target="hsa-mir-200a",alpha=0.01)
result_lnc <- discovery_twice(data_lnc,target="hsa-mir-200a",alpha=0.01)
cl<-makeCluster(2)
registerDoParallel(cl)
time<- proc.time()
twice<-discovery_twice2(data_lnc,target="hsa-mir-200c",method="mmpc",alpha=0.01)
proc.time()-time
stopCluster(cl)
unregister <- function() {
env <- foreach:::.foreachGlobals
rm(list=ls(name=env), pos=env)
}
# list[a,b,c]<-local_discovery(alpha=0.01,condition="all")
#
# all.001<-local_discovery(alpha=0.01,condition="all")
# all.005<-local_discovery(alpha=0.05,condition="all")
#
# cancer.005<-local_discovery(alpha=0.05,condition="Cancer")
# cancer.001<-local_discovery(alpha=0.01,condition="Cancer")
#
# normal.005<-local_discovery(alpha=0.05,condition="Normal")
# normal.001<-local_discovery(alpha=0.01,condition="Normal")
|
f271af5cabcb90838b0955389493f252a89622bd
|
65419bf941e696d9be37e7be6e8b5ed93cde2707
|
/R/print.R0.R.R
|
7d225bdba745d8e016272987775b00ad3cfb104c
|
[] |
no_license
|
tobadia/R0
|
52a777a8efb7bdfde386a241dc42fc4ef5d1508e
|
14b28a6e94afb68b6cca19f14a494a4c8beabd63
|
refs/heads/master
| 2023-04-29T01:15:22.248797
| 2023-03-13T12:01:25
| 2023-03-13T12:01:25
| 250,542,462
| 3
| 2
| null | 2023-04-13T08:22:49
| 2020-03-27T13:32:00
|
R
|
UTF-8
|
R
| false
| false
| 1,738
|
r
|
print.R0.R.R
|
# Name : print.R0.R
# Desc : A tweaked "print" function designed to print useful data on R objects
# from any of the supported estimation methods.
# Date : 2011/11/09
# Update : 2023/03/03
# Author : Boelle, Obadia
###############################################################################
#' @title
#' Print method for objects of class `R0.R`
#'
#' @description
#' Prints summary statistics for an estimated reproduction ratio.
#'
#' @details
#' For internal use.
#'
#' Displays the estimated reproduction ratio along with its confidence interval.
#' For the TD method, the time-series of \eqn{R(t)} is printed.
#'
#' @param x An estimated object, output from `est.R0.xx()` routines (class `R0.R`).
#' @param ... Parameters passed to inner functions.
#'
#' @return
#' This function does not return any data.
#' Called for side effects.
#'
#' @keywords internal
#'
#' @author Pierre-Yves Boelle, Thomas Obadia
# Function declaration
print.R0.R <- function(
x,
...
)
# Code
{
#Make sure x is of the right class
if (!inherits(x, "R0.R")) {
stop("'x' must be of class 'R0.R'")
}
cat("Reproduction number estimate using ",x$method," method.\n")
#Next step should be to print the call used to compute this value... but buggy for now
#print(paste("> ", x$call,"\n", sep=""))
#If not TD/SB, then output R and its 95% CI
if (x$method.code %in% c("EG","ML","AR")) {
cat("R : ",x$R)
if (!any(is.na(x$conf.int))) {
cat("[", x$conf.int[1],",",x$conf.int[2],"]\n")
}
}
else {
#Must be TD/SB... output first 10 values
if (x$method.code %in% c("TD","SB")) {
cat(x$R[1:min(length(x$R),10)],"...\n")
}
}
cat("\n")
}
|
f0c8617d5f53530210c030b34ab7480854b0c23c
|
e37c7764ce9056a2577f02c8bb4c987cd9ac20e6
|
/R/plotHeatmap.R
|
acaac6e1ca5a9b0d908aa205bba07fc6a4067619
|
[] |
no_license
|
mimi3421/scater
|
bb8c9c8f9d8dfe94812214471b652d6d52975d97
|
e665819cd3964f2c00433ca51ea53b977dfcc72e
|
refs/heads/master
| 2020-06-26T12:53:08.854953
| 2019-07-30T11:46:08
| 2019-07-30T11:46:08
| 199,637,381
| 0
| 0
| null | 2019-07-30T11:20:03
| 2019-07-30T11:20:02
| null |
UTF-8
|
R
| false
| false
| 6,803
|
r
|
plotHeatmap.R
|
#' Plot heatmap of gene expression values
#'
#' Create a heatmap of expression values for each cell and specified features in a SingleCellExperiment object.
#'
#' @param object A SingleCellExperiment object.
#' @param features A character vector of row names, a logical vector of integer vector of indices specifying rows of \code{object} to show in the heatmap.
#' @param columns A vector specifying the subset of columns in \code{object} to show as columns in the heatmp.
#' By default, all columns are used in their original order.
#' @param exprs_values A string or integer scalar indicating which assay of \code{object} should be used as expression values for colouring in the heatmap.
#' @param center A logical scalar indicating whether each row should have its mean expression centered at zero prior to plotting.
#' @param zlim A numeric vector of length 2, specifying the upper and lower bounds for the expression values.
#' This winsorizes the expression matrix prior to plotting (but after centering, if \code{center=TRUE}).
#' If \code{NULL}, it defaults to the range of the expression matrix.
#' @param symmetric A logical scalar specifying whether the default \code{zlim} should be symmetric around zero.
#' If \code{TRUE}, the maximum absolute value of \code{zlim} will be computed and multiplied by \code{c(-1, 1)} to redefine \code{zlim}.
#' @param color A vector of colours specifying the palette to use for mapping expression values to colours.
#' This defaults to the default setting in \code{\link[pheatmap]{pheatmap}}.
#' @param colour_columns_by A list of values specifying how the columns should be annotated with colours.
#' Each entry of the list can be of the form described by \code{?"\link{scater-vis-var}"}.
#' A character vector can also be supplied and will be treated as a list of strings.
#' @param by_exprs_values A string or integer scalar specifying which assay to obtain expression values from,
#' for colouring of column-level data - see \code{?"\link{scater-vis-var}"} for details.
#' @param by_show_single Logical scalar specifying whether single-level factors should be used for column-level colouring, see \code{?"\link{scater-vis-var}"} for details.
#' @param show_colnames Logical scalar specifying whether column names should be shown, if available in \code{object}.
#' @param ... Additional arguments to pass to \code{\link[pheatmap]{pheatmap}}.
#'
#' @details Setting \code{center=TRUE} is useful for examining log-fold changes of each cell's expression profile from the average across all cells.
#' This avoids issues with the entire row appearing a certain colour because the gene is highly/lowly expressed across all cells.
#'
#' Setting \code{zlim} preserves the dynamic range of colours in the presence of outliers.
#' Otherwise, the plot may be dominated by a few genes, which will \dQuote{flatten} the observed colours for the rest of the heatmap.
#'
#' @return A heatmap is produced on the current graphics device.
#' The output of \code{\link[pheatmap]{pheatmap}} is invisibly returned.
#'
#' @seealso \code{\link[pheatmap]{pheatmap}}
#'
#' @author Aaron Lun
#'
#' @examples
#' example(normalizeSCE) # borrowing the example objects in here.
#' plotHeatmap(example_sce, features=rownames(example_sce)[1:10])
#' plotHeatmap(example_sce, features=rownames(example_sce)[1:10],
#' center=TRUE, symmetric=TRUE)
#'
#' plotHeatmap(example_sce, features=rownames(example_sce)[1:10],
#' colour_columns_by=c("Mutation_Status", "Cell_Cycle"))
#'
#' @export
#' @importFrom DelayedArray DelayedArray
#' @importFrom DelayedMatrixStats rowMeans2
#' @importFrom viridis viridis
#' @importFrom SummarizedExperiment assay
plotHeatmap <- function(object, features, columns=NULL, exprs_values="logcounts",
center=FALSE, zlim=NULL, symmetric=FALSE, color=NULL,
colour_columns_by=NULL, by_exprs_values = exprs_values, by_show_single = FALSE,
show_colnames = TRUE, ...)
{
features_to_use <- .subset2index(features, object, byrow=TRUE)
heat.vals <- assay(object, exprs_values, withDimnames=FALSE)[features_to_use,,drop=FALSE]
rownames(heat.vals) <- rownames(object)[features_to_use]
if (is.null(colnames(object))) {
colnames(heat.vals) <- seq_len(ncol(object)) # otherwise downstream colouring fails.
show_colnames <- FALSE
} else {
colnames(heat.vals) <- colnames(object)
}
if (!is.null(columns)) {
heat.vals <- heat.vals[,columns,drop=FALSE]
}
if (center) {
heat.vals <- heat.vals - rowMeans2(DelayedArray(heat.vals))
}
# Winsorizing to preserve the dynamic range.
if (is.null(zlim)) {
zlim <- range(heat.vals)
}
if (symmetric) {
extreme <- max(abs(zlim))
zlim <- c(-extreme, extreme)
}
heat.vals[heat.vals < zlim[1]] <- zlim[1]
heat.vals[heat.vals > zlim[2]] <- zlim[2]
if (is.null(color)) {
color <- eval(formals(pheatmap::pheatmap)$color, envir=environment(pheatmap::pheatmap))
}
color.breaks <- seq(zlim[1], zlim[2], length.out=length(color)+1L)
# Collecting variables to colour_by.
if (length(colour_columns_by)) {
column_variables <- column_colorings <- list()
for (field in colour_columns_by) {
colour_by_out <- .choose_vis_values(object, field, mode = "column", search = "any",
exprs_values = by_exprs_values, discard_solo = !by_show_single)
if (is.null(colour_by_out$val)) {
next
} else if (is.numeric(colour_by_out$val)) {
colour_fac <- cut(colour_by_out$val, 25)
} else {
colour_fac <- as.factor(colour_by_out$val)
}
nlevs_colour_by <- nlevels(colour_fac)
if (nlevs_colour_by <= 10) {
col_scale <- .get_palette("tableau10medium")
} else if (nlevs_colour_by > 10 && nlevs_colour_by <= 20) {
col_scale <- .get_palette("tableau20")
} else {
col_scale <- viridis(nlevs_colour_by)
}
col_scale <- col_scale[seq_len(nlevs_colour_by)]
names(col_scale) <- levels(colour_fac)
column_variables[[colour_by_out$name]] <- colour_fac
column_colorings[[colour_by_out$name]] <- col_scale
}
column_variables <- do.call(data.frame, c(column_variables, list(row.names=colnames(object))))
} else {
column_variables <- column_colorings <- NULL
}
# Creating the heatmap as specified.
pheatmap::pheatmap(heat.vals, color=color, breaks=color.breaks,
annotation_col=column_variables, annotation_colors=column_colorings,
show_colnames=show_colnames, ...)
}
|
4abe2676a7642e8dcb04095c2c094b23f014438e
|
cbeba1052ca315b741882eeb5cbfb04b35d21c14
|
/pK184_directional_integration_count_circos_for_publication.R
|
8f6bf7ccf625fb45e7b3cf2fdf2cf4f32873f476
|
[] |
no_license
|
immunoviraldesign/HIV_in_vitro_integration
|
55ee99d997e71d3f397c51aba054e0e871a4e941
|
cc7069989fc9b49affb1e5915d99145de3705941
|
refs/heads/master
| 2022-11-25T01:08:35.388178
| 2020-07-29T16:15:30
| 2020-07-29T16:15:30
| 282,979,207
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,135
|
r
|
pK184_directional_integration_count_circos_for_publication.R
|
# Upload library
library(circlize)
library(grid)
# clear variables
rm(list = ls())
#Set as working directory -- Rename as appropriate for file location
setwd("~/Desktop/HIV_integration/Tables/")
#read in data table
#note that excel tables need to be converted to .txt files and with the headings removed for the first three files shown below for these commands to work
df = read.table("046_insertions_directional_new_calc_complex_CIGAR.txt", header = FALSE)
df = read.table("047_insertions_directional_new_calc_complex_CIGAR.txt", header = FALSE)
df = read.table("048_insertions_directional_new_calc_complex_CIGAR.txt", header = FALSE)
df = read.table("all_samples_insertions_directional_scaled_complex_CIGAR.txt", header = TRUE)
df = read.table("concensus_integration_sites.txt", header = TRUE)
# if plotting plasmid tracks, label columns for scaled counts from single sample df: eg. 046_insertions
colnames(df) = c("vector", "position", "scaling_position_F", "scaling_position_R","F_scalar", "R_scalar",
"integration_counts_F", "shifted_counts_F", "merge_counts_F", "scaled_counts_F",
"log2_merge_counts_F", "log2_merge_scaled_F", "integration_counts_R", "shifted_counts_R",
"merge_counts_R", "scaled_counts_R", "log2_merge_counts_R", "log2_merge_scaled_R",
"added_counts", "log2_added_counts", "log2_added_scaled", "plasmid_sectors",
"sectors", "sector_position", "log2_F_minus_R", "log2_F_minus_R_scaled")
# no need to label columns from "all_samples_insertions_directional_scaled_complex_CIGAR.txt" or "concensus_integration_sites.txt"
# Step 1: Initialise the chart giving factor and x-axis.
circos.clear()
# Step 2: Set track guidelines for main plasmid tracks
circos.par("track.height" = 2, "start.degree" = 90, "gap.degree" = 0)
# -OR- Set track guidelines for plasmid features
circos.par("track.height" = 2, "start.degree" = 90, "gap.degree" = 0, "cell.padding" = c(0, 0, 0, 0))
# Step 3: Initialize
# for main plasmid tracks
circos.initialize(factors=df$vector, x=df$position)
# -OR- for main plotting plasmid features
circos.initialize(factors=df$sectors, x=df$sector_position)
# Step 4: Build the plot regions.
# Use these commands if building plots for plasmid features. It's easiest to plot all three rings
# then delete the ones you don't need in illustrator
circos.trackPlotRegion(factors = df$sectors, y = df$merge_counts_F, panel.fun = function(x, y) {
}, track.height = 0.05)
circos.trackPlotRegion(factors = df$sectors, y = df$merge_counts_F, panel.fun = function(x, y) {
}, track.height = 0.05)
circos.trackPlotRegion(factors = df$sectors, y = df$merge_counts_F, panel.fun = function(x, y) {
}, track.height = 0.05)
circos.text(30, 1, "NEO/KAN prom.", sector.index = "b", track.index = 1, facing = "bending.outside", niceFacing = TRUE, cex = 1)
circos.text(400, 1, "NEO/KAN res.", sector.index = "d", track.index = 1, facing = "bending.outside", niceFacing = TRUE, cex = 1)
circos.text(350, 1, "origin", sector.index = "f", track.index = 1, facing = "bending.outside", niceFacing = TRUE, cex = 1)
circos.text(1, 1, "lac prom.", sector.index = "h", track.index = 1, facing = "bending.outside", niceFacing = TRUE, cex = 1)
circos.text(30, 1, "MCS", sector.index = "j", track.index = 1, facing = "bending.outside", niceFacing = TRUE, cex = 1)
circos.text(70, 1, "lacZ_a", sector.index = "l", track.index = 1, facing = "bending.outside", niceFacing = TRUE, cex = 1)
# -OR- Use these commands for building main plasmid tracks.
# Use them one at a time in the following sequence
#1. choose one command beginning with circos.trackPlotRegion
#2. choose the corresponding circos.trackLines (from step 5)
#3. repeat 1 & 2 for each step as needed if plotting multitrack circles
# for all samples composite ln-scaled values with y-axis scale
circos.trackPlotRegion(factors = df$vector, y = df$ln_added_both, panel.fun = function(x, y) {
circos.axis(major.at = c(0, 250, 500, 750, 1000, 1250, 1500, 1750, 2000, 2250, 2500))
circos.yaxis(side = "left")
}, track.height = .6)
# for integration concensus sites with y-axis scale plotted on single track
circos.trackPlotRegion(factors = df$vector, y = df$sum, panel.fun = function(x, y) {
circos.axis(major.at = c(0, 250, 500, 750, 1000, 1250, 1500, 1750, 2000, 2250, 2500))
circos.yaxis(side = "left")
}, track.height = .6)
# for integration concensus sites with y-axis scale plotted on multi track
circos.trackPlotRegion(factors = df$vector, y = df$sum, panel.fun = function(x, y) {
circos.axis(major.at = c(0, 250, 500, 750, 1000, 1250, 1500, 1750, 2000, 2250, 2500))
circos.yaxis(side = "left")
}, track.height = .25)
circos.trackPlotRegion(factors = df$vector, y = df$Fcount, panel.fun = function(x, y) {
circos.yaxis(side = "left")
}, track.height = .25)
circos.trackPlotRegion(factors = df$vector, y = df$Rcount, ylim = c(0,7), panel.fun = function(x, y) {
circos.yaxis(side = "left")
}, track.height = .25)
# for final multitrack LN scaled values with y-axis scale
circos.trackPlotRegion(factors = df$vector, y = df$ln_added_F, panel.fun = function(x, y) {
circos.axis(major.at = c(0, 250, 500, 750, 1000, 1250, 1500, 1750, 2000, 2250, 2500))
circos.yaxis(side = "left")
}, track.height = .25)
circos.trackPlotRegion(factors = df$vector, y = df$ln_added_R, panel.fun = function(x, y) {
circos.yaxis(side = "left")
}, track.height = .25)
circos.trackPlotRegion(factors = df$vector, y = df$ln_F_minus_R, panel.fun = function(x, y) {
circos.yaxis(side = "left")
}, track.height = .25)
# Step 5: Add points
# for all samples composite ln-scaled values with y-axis scale
circos.trackLines(df$vector, df$position[order(df$position)], df$ln_added_both[order(df$position)], col = rgb(0.1,0.5,0.8,0.3), lwd=2, type="h")
# for integration concensus sites with y-axis scale plotted on multitrack
circos.trackLines(df$vector, df$position[order(df$position)], df$sum[order(df$position)], col = rgb(0.1,0.5,0.8,0.3), lwd=2, type="h")
# for integration concensus sites with y-axis scale plotted on multitrack
circos.trackLines(df$vector, df$position[order(df$position)], df$sum[order(df$position)], col = rgb(0.8,0,0,0.3), lwd=2, type="h", baseline = 0)
circos.trackLines(df$vector, df$position[order(df$position)], df$Fcount[order(df$position)], col = rgb(0.1,0.5,0.8,0.3), lwd=2, type="h")
circos.trackLines(df$vector, df$position[order(df$position)], df$Rcount[order(df$position)], col = rgb(0.7,0.3,0.9,0.3), lwd=2, type="h")
# for final multitrack LN scaled values with axis -- plotted in blue and purple and red
circos.trackLines(df$vector, df$position[order(df$position)], df$ln_added_F[order(df$position)], col = rgb(0.1,0.5,0.8,0.3), lwd=2, type="h")
circos.trackLines(df$vector, df$position[order(df$position)], df$ln_added_R[order(df$position)], col = rgb(0.7,0.3,0.9,0.3), lwd=2, type="h")
circos.trackLines(df$vector, df$position[order(df$position)], df$ln_F_minus_R[order(df$position)], col = rgb(0.8,0,0,0.3), lwd=2, type="h", baseline = 0)
|
78214ab79d1d40451967c99423c7503e35120244
|
c47619fd21966e935458b5c24c312194d8b825d0
|
/get_GDC_project_ids.r
|
15f491cf0ef51543a619a666a4aafb82443ab53e
|
[] |
no_license
|
DrOppenheimer/Kevin_R_scripts
|
4543137d343399f1a156d634cd4ab8442a0fd168
|
d121284914a98d50308e46d426b0417ef70d40ee
|
refs/heads/master
| 2020-04-12T02:30:27.517905
| 2017-02-28T23:01:30
| 2017-02-28T23:01:30
| 47,999,492
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 648
|
r
|
get_GDC_project_ids.r
|
get_GDC_project_ids <- function(){
my_projects <- vector(mode="character")
#first_call, get number of projects then make a second to get them all
first_call <- "https://gdc-api.nci.nih.gov/projects"
first_call.json <- fromJSON(getURL(first_call))
number_projects <- first_call.json$data$pagination$total
second_call <- paste("https://gdc-api.nci.nih.gov/projects?size=",number_projects,sep="")
second_call.json <- fromJSON(getURL(second_call))
for (i in 1:length(second_call.json$data$hits)){
my_projects <- c(my_projects, second_call.json$data$hits[[i]]$project_id)
}
return(my_projects)
}
|
8e78022e635f3bc6aac2ed75b59b6991ba0d454d
|
6824412cf427de215c59d743b970a803587b3ed0
|
/SynthETIC/man/get_Weibull_parameters.Rd
|
28a4ed728211b0e4a03fe7a847fba97716b103b4
|
[] |
no_license
|
minghao2016/SynthETIC
|
292969a398ac4541761049cfea22fc07a405989e
|
ad1692601909d54c0bdb00aef8a361bb5acfefba
|
refs/heads/master
| 2023-04-04T07:11:46.310061
| 2021-04-02T10:20:18
| 2021-04-02T10:20:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 683
|
rd
|
get_Weibull_parameters.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats.R
\name{get_Weibull_parameters}
\alias{get_Weibull_parameters}
\title{Estimating Weibull Parameters}
\usage{
get_Weibull_parameters(target_mean, target_cv)
}
\arguments{
\item{target_mean}{mean of the target Weibull distribution.}
\item{target_cv}{CoV of the target Weibull distribution.}
}
\description{
Returns the Weibull shape and scale parameters given the mean and the CoV
of the target Weibull distribution.
}
\examples{
get_Weibull_parameters(target_mean = 100000, target_cv = 0.60)
get_Weibull_parameters(target_mean = c(100000, 200000, 300000),
target_cv = 0.60)
}
|
5bd08479421bc921daab7eb021fada8f815bc98c
|
2ec82dd20d0b86e9b37158579ea71495d1e9fb63
|
/man/adjacencyMatFromDF.Rd
|
5c6981c0a1bf3e5e9e8fa4ae7e4ce3775d6a0cb0
|
[
"MIT"
] |
permissive
|
sverchkov/CommunityInference
|
92619dfd036c20ec3e9e2d4ce998299fc4212f70
|
1382351bde3597b01516dfde2cabc1323b75f4e9
|
refs/heads/master
| 2020-03-25T16:19:14.212537
| 2018-08-13T14:40:53
| 2018-08-13T14:40:53
| 143,925,184
| 0
| 0
|
MIT
| 2018-08-07T20:53:43
| 2018-08-07T20:46:14
| null |
UTF-8
|
R
| false
| true
| 576
|
rd
|
adjacencyMatFromDF.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adjacencyMatFromDF.R
\name{adjacencyMatFromDF}
\alias{adjacencyMatFromDF}
\title{Get an adjacency matrix from an edge data frame}
\usage{
adjacencyMatFromDF(edges, nodes = NULL)
}
\arguments{
\item{edges}{a data frame with columns a, b, and (optionally) weight}
\item{nodes}{an array of the unique node IDs used in a, b (inferred if not provided)}
}
\value{
an adjacency matrix
}
\description{
Get an adjacency matrix from an edge data frame.
If no weights are provided all weights are set to 1
}
|
a2f91b6d8618bc42d3ddfcca9292610eb6c0efc8
|
3e19165859b69351301f683292135cba75549db6
|
/Harvard/Stats221/pset3/ykuang_ps3_task2_part2.R
|
ed6bda5d4e52cc50a637e100d5eb7d7996dde602
|
[] |
no_license
|
k-ye/OpenCourses
|
b084638e212920a831a6baf74d740dd704b9447f
|
7ac57b6fbfe1ae574f60378cf15d308e191be3eb
|
refs/heads/master
| 2021-07-04T19:50:34.040105
| 2020-04-05T09:02:14
| 2020-04-05T09:02:14
| 99,991,859
| 27
| 9
| null | null | null | null |
UTF-8
|
R
| false
| false
| 819
|
r
|
ykuang_ps3_task2_part2.R
|
# @pre Should create subfiles of "data/" before using
# read args
args <- as.numeric(commandArgs(trailingOnly = TRUE))
if (length(args) == 0) {
args <- c(1e5, 1)
} else if (length(args) != 2) {
stop("Not correct no. of args")
} else {
print("args fine")
}
numData <- args[1]
job.id <- args[2]
source("ykuang_ps3_task2_part2_util.R")
set.seed(39)
data <- data.gen(t=numData)
if (job.id == 1) {
result <- batch(data)
save(result, file="data/ps3_task2_part2_batch.rda")
} else if (job.id == 2) {
result <- SGD(data)
save(result, file="data/ps3_task2_part2_SGD.rda")
} else if (job.id == 3) {
result <- ASGD(data)
save(result, file="data/ps3_task2_part2_ASGD.rda")
} else if (job.id == 4) {
result <- Implicit(data)
save(result, file="data/ps3_task2_part2_implicit.rda")
} else {
print(job.id)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.