blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7208201a4b9309d78b6ffe2f2c477fe59ff7c5f9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/DescTools/examples/Large.Rd.R
|
931cfdfc638eeb1fabd64af20bb68e7cecc97bd9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 385
|
r
|
Large.Rd.R
|
library(DescTools)
### Name: Extremes
### Title: Kth Smallest/Largest Values
### Aliases: Large Small HighLow
### Keywords: arith
### ** Examples
x <- sample(1:10, 1000, rep=TRUE)
Large(x, 3)
Large(x, k=3, unique=TRUE)
# works fine up to x ~ 1e6
x <- runif(1000000)
Small(x, 3, unique=TRUE)
Small(x, 3, unique=FALSE)
# Both ends
cat(HighLow(d.pizza$temperature, na.last=NA))
|
4d7ea47ea887d718ca169aef21ff6066c75193b5
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/mDAG/R/main.R
|
4b72e70b9fa0f18e50bdfdb6b348ada32a8f8388
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,362
|
r
|
main.R
|
MatrixtoGraph_undirected=function(skeleton,data){
graph=list()
arcs <- which(skeleton!=0,arr.ind = T)
arcs=t(apply(arcs,1,function(z) colnames(data)[z]))
colnames(arcs)=c('from','to')
rownames(arcs)=NULL
graph$arcs=arcs
for (i in 1:ncol(data)){
nbr=arcs[which(arcs[,2]==colnames(data)[i]),1]
graph$nodes[[colnames(data)[i]]]$nbr=nbr
}
colnames(skeleton)=rownames(skeleton)=colnames(data)
graph$skeleton=skeleton
return(graph)
}
MatrixtoGraph_directed=function(skeleton,data){
graph=list()
arcs <- which(skeleton!=0,arr.ind = T)
arcs=t(apply(arcs,1,function(z) colnames(data)[z]))
colnames(arcs)=c('from','to')
rownames(arcs)=NULL
graph$arcs=arcs
for (i in 1:ncol(data)){
parents=arcs[which(arcs[,2]==colnames(data)[i]),1]
children=arcs[which(arcs[,1]==colnames(data)[i]),2]
graph$nodes[[colnames(data)[i]]]$parents=parents
graph$nodes[[colnames(data)[i]]]$children=children
graph$nodes[[colnames(data)[i]]]$nbr=union(parents,children)
}
colnames(skeleton)=rownames(skeleton)=colnames(data)
graph$skeleton=skeleton
return(graph)
}
mgm_skeleton=function(data = data, type = type, level = level, SNP = SNP ,lambdaGam = lambdaGam,
lambdaSel = lambdaSel, ruleReg=ruleReg, alphaSel = alphaSel, threshold=threshold, weights){
edgeWeights=pair_fast_mgm(data = data,
type = type,
level = level,lambdaGam = lambdaGam,
lambdaSel = lambdaSel,ruleReg=ruleReg,alphaSel = alphaSel,threshold=threshold )
edgeWeights=(edgeWeights>0)
edgeWeights[edgeWeights==T]=1
edgeWeights[edgeWeights==F]=0
# we don't allow edges among SNPs
SNP_ind=which(SNP==1)
edgeWeights[SNP_ind,SNP_ind]=0
return (MatrixtoGraph_undirected(edgeWeights,data))
}
penpc_skeleton=function(data,type,level,edgeWeights,indepTest=ConditionalTestPermute,nperm=nperm,alpha=alpha){
suffStat=list('dat_in'=data,'type'=type,'level'=level,C = cor(data), n = nrow(data),'nperm'=nperm)
penpcskel=skeletonPENstable_modified(suffStat, indepTest, p=as.integer(ncol(data)), alpha=alpha,
edgeWeights, verbose=F)
skeleton_penpc=wgtMatrix(penpcskel@graph)
return (list("graph"=MatrixtoGraph_undirected(skeleton_penpc,data),"penpcskel"=penpcskel))
}
greedysearch_orientation=function(data,type,level,SNP,result,weights=rep(1, nrow(data))){
estimateundirect=which(result!=0,arr.ind = T)
arcs=t(apply(estimateundirect,1,function(z) colnames(data)[z]))
colnames(arcs)=c('from','to')
rownames(arcs)=NULL
find_nbr=sapply(1:ncol(data),function(i)
colnames(data)[(estimateundirect[which(estimateundirect[,1]==i),2])] )
rst=list('learning'=NULL,'nodes'=list(),'arcs'=arcs)
rst$learning=list('whitelist'=NULL,'blacklist'=NULL,'test'='cor','optimized'=F,'ntests'=NULL,'algo'='mgmPenPC')
for(i in 1:ncol(data)){
mb=find_nbr[[i]]
nbr=find_nbr[[i]]
nbr_index=(1:ncol(data))[(colnames(data)%in%nbr)]
parents=character(0)
children=character(0)
rst$nodes[[colnames(data)[i]]]=list('mb'=mb,'nbr'=nbr,'nbr_index'=nbr_index,'parents'=parents,'children'=children)
}
rst$skeleton=result
skeleton=GreedySearch(data ,type, level, SNP, rst, weights)
return(MatrixtoGraph_directed(skeleton,data))
}
|
db425e355f3825bb59c0ad7a7cf531df53ef9b84
|
4e50d2345a2cfeb3c9ecb02187f88e753d1ed83c
|
/bin/02.taxonomy/pca.R
|
0a471acfa3628136d58a38a671fe75370796de1b
|
[] |
no_license
|
ms201420201029/real_metagenome_pipeline
|
7c7b54e5e8a798933387f960256ebb849e9c2200
|
e8f0b188f21975305565d06e4fefe4f4c9adc1f7
|
refs/heads/master
| 2020-04-05T06:41:34.349662
| 2018-12-04T05:48:52
| 2018-12-04T05:48:52
| 156,646,750
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,345
|
r
|
pca.R
|
args <- commandArgs("T")
profile <- read.table(args[1], head = T, check.names= F)
group <- read.table(args[2], head = F, check.names = F, row.names = 1, col.names = c("", "group.name"))
library(ade4)
profile.pca = dudi.pca(t(profile), scannf = F, nf = 2)
pdf(args[3]);
layout(matrix(c(1,1,1,3,
1,1,1,3,
1,1,1,3,
2,2,2,0), byrow = T, ncol = 4));
#col.sample <- topo.colors(length(attr(sample.list,"levels")));
col.sample <- c("orange", "royalblue", "seagreen", "orchid", "lightpink")
par(mar = c(5, 5, 5, 5));
pca.eig <- signif(profile.pca$eig / sum(profile.pca$eig), digits = 4);
if (ncol(profile) <= 10) {
pca.cex <- 4
}else if (ncol(profile) <= 30){
pca.cex <- 2
}else {
pca.cex <- 1
}
plot(profile.pca$li, pch = 20, col = col.sample[as.numeric(group[, 1])], cex = pca.cex,
main = paste("PCA"),
xlab = paste("PCA1: ", pca.eig[1] * 100, "%", sep = ""),
ylab = paste("PCA2: ", pca.eig[2] * 100, "%", sep = ""))
if (ncol(profile) <= 10) {
for (i in 1 : ncol(profile)){
text(x = profile.pca$li[i, 1], y = profile.pca$li[i, 2], labels = colnames(profile)[i], xpd = T)
}
}
par(mar = c(3, 5, 2, 5));
boxplot(profile.pca$li[,1] ~ group[, 1], pch = 20, col = col.sample, notch = F, horizontal = T);
par(mar = c(5, 2, 5, 3));
boxplot(profile.pca$li[,2] ~ group[, 1], pch = 20, col = col.sample, notch = F);
dev.off();
|
72c58fc59d33389ce7ab33ef6a57dbb27f732fb1
|
bee8ce64fbb1714b18a80ed1d8deda2a006aef29
|
/code/fmd-vnt-only.R
|
f117f902eea66197ea95571dfd5d08c17f3c7408
|
[] |
no_license
|
GustafRydevik/hindcasting-fmd
|
722fed853f202045df9aa24af5b246ff70d06361
|
dbad4798d72088b12285037234999b5ddedb08b7
|
refs/heads/master
| 2021-01-17T06:12:36.580948
| 2017-12-07T06:57:32
| 2017-12-07T06:57:32
| 53,070,026
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,403
|
r
|
fmd-vnt-only.R
|
output.path<-"./output"
code.path<-"./code"
function.path<-"./code/functions"
textdata.path<-"./textdata"
binarydata.path<-"./binarydata"
#bronsvoort_training_data
bronsvoort_training_data_clean<-bronsvoort_training_data_clean[bronsvoort_training_data_clean$FMD_cELISA!=0,]
bronsvoort_training_data_clean<-bronsvoort_training_data_clean[!(is.na(bronsvoort_training_data_clean$FMD_VNT_O)|is.na(bronsvoort_training_data_clean$FMD_VNT_SAT2)|is.na(bronsvoort_training_data_clean$FMD_VNT_A)),]
###Splitting up the data into prediction and estimation
set.seed(1337)
herd_pred<-sort(sample(herd_monlast$hcode,15))
herd_est<-sort(setdiff(herd_monlast$hcode,herd_pred))
ind_df_est<-subset(bronsvoort_training_data_clean,hcode%in%herd_est)
herd_df_est<-subset(herd_monlast,hcode%in%herd_est)
herd_df_est$hcode_stan_est<-1:nrow(herd_df_est)
ind_df_est<-left_join(ind_df_est,herd_df_est,by="hcode")
ind_df_pred<-subset(bronsvoort_training_data_clean,hcode%in%herd_pred)
herd_df_pred<-subset(herd_monlast,hcode%in%herd_pred)
herd_df_pred$hcode_stan_pred<-1:nrow(herd_df_pred)
ind_df_pred<-left_join(ind_df_pred,herd_df_pred,by="hcode")
###Creating predictive and estimating data lists
with(ind_df_est,
dat_est <<- list(
probang = Probang,
age = age,
vnt = VNTAny,
elisa_obs=FMD_cELISA,
hcode = hcode_stan_est,
vnt_obs=pmax(FMD_VNT_SAT2,FMD_VNT_O,FMD_VNT_A)
))
names(dat_est)<-paste(names(dat_est),"_est",sep="")
herd_list_est<-list(
He=nrow(herd_df_est),
herd_est=herd_df_est$hcode_stan_est,
monlast_est = herd_df_est$monlast
)
dat_est<-c(dat_est,
herd_list_est,
list(Ne=nrow(ind_df_est)))
with(
ind_df_pred,
dat_pred <<- list(
probang = Probang,
age = age,
vnt = VNTAny,
elisa_obs=FMD_cELISA,
hcode = hcode_stan_pred,
vnt_obs=pmax(FMD_VNT_SAT2,FMD_VNT_O,FMD_VNT_A)
)
)
names(dat_pred)<-paste(names(dat_pred),"_pred",sep="")
herd_list_pred<-list(
Hp=nrow(herd_df_pred),
herd_pred=herd_df_pred$hcode_stan_pred
)
dat_pred<-c(dat_pred,
herd_list_pred,
list(Np=nrow(ind_df_pred)))
##Creating separate
dat_model<-c(dat_est,dat_pred,p=5)
fileName <- file.path(code.path,"fmd-vnt-only.stan")
resStan <- stan(fileName, data = dat_model,
chains =5,cores=5 ,iter = 10000, warmup = 5000, thin = 10,control = list(adapt_delta = 0.8))
pairs(resStan,pars=c("decay_start","decay_scale","decay_asym","sigma"))
traceplot(resStan,pars=c("decay_start","decay_scale","decay_asym","sigma"))
traceplot(resStan, pars = c("monlast_pred"), inc_warmup = TRUE)
herd_df_pred<-left_join(herd_df_pred,ind_df_pred%>%group_by(hcode)%>%summarise(vnt_mean=mean(FMD_VNT_SAT2),vnt_sd=sd(FMD_VNT_SAT2)),by="hcode")
#plot(herd_df_pred$vnt_mean,summary(resStan,"monlast_pred")$summary[,"mean"])
monlast_pred_stan<-as.data.frame(summary(resStan,pars="monlast_pred")$summary)
monlast_pred_stan$hcode<-herd_df_pred$hcode
monlast_pred_stan$monlast<-herd_df_pred$monlast
monlast_pred_stan<-left_join(monlast_pred_stan,
(ind_df_pred%>%group_by(hcode)%>%summarise(probang_incidence=mean(Probang)))[,c("hcode","probang_incidence")],by="hcode")
ggplot(data.frame(monlast_pred_stan),aes(x=monlast,y=mean))+
geom_errorbar(aes(ymin=X2.5.,ymax=X97.5.))+geom_point(col="red")+geom_abline(slope=1,intercept=0,col="blue")
|
45130c861fa6e3a8340ec0f79afb9ad623dc2fb9
|
154109aaf7ca07fd67145fd094ca9be21c2b1d62
|
/man/boot.Rd
|
c7db30fb406632a7b3547bef0a3c7696d7420765
|
[] |
no_license
|
emvolz/treedater
|
6e5c81919e3d6e218c6608b133da03506d3bdef6
|
7b8a72aa0ea71ded4cde6cd287529aa8ae679c68
|
refs/heads/master
| 2022-05-10T12:01:51.266204
| 2022-03-11T15:36:18
| 2022-03-11T15:36:18
| 75,198,675
| 20
| 10
| null | 2021-10-07T18:19:39
| 2016-11-30T15:12:18
|
R
|
UTF-8
|
R
| false
| true
| 3,127
|
rd
|
boot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boot.R
\name{boot}
\alias{boot}
\title{Estimate of confidence intervals of molecular clock parameters with user-supplied set of bootstrap trees}
\usage{
boot(
td,
tres,
ncpu = 1,
searchRoot = 1,
overrideTempConstraint = TRUE,
overrideClock = NULL,
quiet = TRUE,
normalApproxTMRCA = FALSE,
parallel_foreach = FALSE
)
}
\arguments{
\item{td}{A fitted treedater object}
\item{tres}{A list or multiPhylo with bootstrap trees with branches in units of substitutions per site}
\item{ncpu}{Number of threads to use for parallel computation. Recommended.}
\item{searchRoot}{See *dater*}
\item{overrideTempConstraint}{If TRUE (default) will not enforce positive branch lengths in simualtion replicates. Will speed up execution.}
\item{overrideClock}{May be 'strict' or 'additive' or 'relaxed' in which case will force simulations to fit the corresponding model. If ommitted, will inherit the clock model from td}
\item{quiet}{If TRUE will minimize output printed to screen}
\item{normalApproxTMRCA}{If TRUE will use estimate standard deviation from simulation replicates and report confidence interval based on normal distribution}
\item{parallel_foreach}{If TRUE will use the foreach package for parallelization. May work better on HPC systems.}
}
\value{
A list with elements
\itemize{
\item trees: The fitted treedater objects corresponding to each simulation
\item meanRates: Vector of estimated rates for each simulation
\item meanRate_CI: Confidence interval for substitution rate
\item coef_of_variation_CI: Confidence interval for rate variation
\item timeOfMRCA_CI: Confidence interval for time of common ancestor
}
}
\description{
If the original treedater fit estimated the root position, root
position will also be estimated for each simulation, so the
returned trees may have different root positions. Some replicates
may converge to a strict clock or a relaxed clock, so the
parameter estimates in each replicate may not be directly
comparable. It is possible to compute confidence intervals for the
times of particular nodes or for estimated sample times by
inspecting the output from each fitted treedater object, which is
contained in the $trees attribute.
}
\examples{
# simulate a tree
tre <- ape::rtree(25)
# sample times based on distance from root to tip:
sts <- setNames( ape::node.depth.edgelength( tre )[1:ape::Ntip(tre)], tre$tip.label)
# make a list of trees that simulate outcome of bootstrap using nonparametric phylogeny estimation
# also modify edge length to represent evolutionary distance with rate 1e-3:
bootTrees <- lapply( 1:25, function(i) {
.tre <- tre
.tre$edge.length <- tre$edge.length * pmax(rnorm( length(tre$edge.length), 1e-3, 1e-4 ), 0 )
.tre
})
tre$edge.length <- tre$edge.length * 1e-3
# run treedater
td <- dater( tre, sts, s= 1000, clock='strict', omega0=.0015 )
# bootstrap:
( tdboot <- boot( td, bootTrees ) )
# plot lineages through time :
plot( tdboot )
}
\seealso{
dater
parboot
}
\author{
Erik M Volz <erik.volz@gmail.com>
}
|
b14b7dffeb3a9a70ae03f7284d630e9f42921028
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/phenofit/examples/R2_sign.Rd.R
|
ef56dc997b5e54b45bd8f858347842f1747a6d3e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 196
|
r
|
R2_sign.Rd.R
|
library(phenofit)
### Name: R2_sign
### Title: Critical value of determined correlation
### Aliases: R2_sign
### ** Examples
R2_critical <- R2_sign(30, NumberOfPredictor = 2, alpha = 0.05)
|
eb2c0f827a35af39431e47a88797b0876c8ff3ac
|
ce9ddae103c7b3cead05ed65c824292e82ce42b6
|
/R/data.R
|
d850ac7ed5556af7cb6a8fe502b03f288c13c2c7
|
[] |
no_license
|
guhjy/MIMOmicsData
|
48100658d00bd9217f04bb29a355715591cc1f44
|
3e9bc06aecc40f6b783ce46f2c2261dea7d40eb1
|
refs/heads/master
| 2020-04-13T11:48:22.226015
| 2017-10-24T09:35:30
| 2017-10-24T09:35:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 757
|
r
|
data.R
|
#' Observed allele proportions in X1 dataset
#'
#' Observed allele proportions in X1 dataset coded as 0, 1 and 2
#'
#'
#' @format A matrix with SNPs as columns and genotype as rows
#'
#' @source Korcula and Vis data
"Probs"
#' Observed allele proportions in X2 dataset
#'
#' Observed allele proportions in X2 dataset coded as 0, 1 and 2
#'
#'
#' @format A matrix with SNPs as columns and genotype as rows
#'
#' @source Korcula and Vis data
"Probs2"
#' Observed spearman correlations in X1 dataset
#'
#'
#' @format A positive (semi)definite correlation matrix
#'
#' @source Korcula and Vis data
"Cors"
#' Observed spearman correlations in X2 dataset
#'
#'
#' @format A positive (semi)definite correlation matrix
#'
#' @source Korcula and Vis data
"Cors2"
|
d55082f14352da1f06d9e41875805324a6f8334f
|
d51d8b4ada0e926491ed7c5561baee2bd7c56b98
|
/R/timepicker.R
|
c77b5ad9404dd235d85d4405fc1aaa45b9ee96ce
|
[] |
no_license
|
PythonJournalist/dash-more-components
|
25242305c27288bb401b46f6afec13c451dbf6d6
|
976f9e3476fec69667525adbfa3c0dec3c420ec2
|
refs/heads/master
| 2023-03-14T09:55:25.225700
| 2021-02-28T22:01:31
| 2021-02-28T22:01:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 810
|
r
|
timepicker.R
|
# AUTO GENERATED FILE - DO NOT EDIT
timepicker <- function(id=NULL, value=NULL, format=NULL, maxDetail=NULL, maxTime=NULL, minTime=NULL, disabled=NULL, disableClock=NULL, locale=NULL) {
props <- list(id=id, value=value, format=format, maxDetail=maxDetail, maxTime=maxTime, minTime=minTime, disabled=disabled, disableClock=disableClock, locale=locale)
if (length(props) > 0) {
props <- props[!vapply(props, is.null, logical(1))]
}
component <- list(
props = props,
type = 'Timepicker',
namespace = 'dash_more_components',
propNames = c('id', 'value', 'format', 'maxDetail', 'maxTime', 'minTime', 'disabled', 'disableClock', 'locale'),
package = 'dashMoreComponents'
)
structure(component, class = c('dash_component', 'list'))
}
|
86b725cb8ff2cc4f20de124465531d4867dc5d2d
|
3053a557531d328b430b69fb7851dcb2dde22c93
|
/dataone/man/listQueryEngines.Rd
|
0343d63b866007003b604f9cb19476a349e14c30
|
[
"Apache-2.0"
] |
permissive
|
KillEdision/rdataone
|
e3bfe188ed1eba1f01d6e256f3a98a64104125ef
|
3ec0efb67cc3ba951d44ce13e5750bfec8caaac4
|
refs/heads/master
| 2021-01-15T20:24:17.028477
| 2015-07-29T01:16:47
| 2015-07-29T01:16:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 486
|
rd
|
listQueryEngines.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/D1Node.R
\name{listQueryEngines}
\alias{listQueryEngines}
\title{List the query engines available for a DataONE member node or coordinating node}
\usage{
listQueryEngines(node, ...)
}
\arguments{
\item{node}{The CNode or MNode to list the query engines for.}
}
\value{
list The list of query engines.
}
\description{
List the query engines available for a DataONE member node or coordinating node
}
|
12b57fac30969031525f83c98a733564a3d749da
|
155a862d1e3de6ac5993c3d47b5b97552d9d66e5
|
/Projects/Project4.R
|
1ed0adbdcd18379b65247a1cfd2cf314cb211b64
|
[] |
no_license
|
dsmilo/DATA643
|
e11b45e6ae4d4778391b7141c8937cabb7d32722
|
ff11869d3fa28603cfed2149855b6234a8c679f8
|
refs/heads/master
| 2020-12-25T15:08:16.558349
| 2016-10-05T16:38:14
| 2016-10-05T16:38:14
| 61,250,691
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,686
|
r
|
Project4.R
|
library(recommenderlab)
# items & time ####
data(MovieLense)
ratings_movies <- MovieLense[rowCounts(MovieLense) > 20, colCounts(MovieLense) > 50]
which_set <- sample(x = 1:5,
size = nrow(ratings_movies),
replace = TRUE)
for(i_model in 1:5) {
which_train <- which_set == i_model
recc_data_train <- ratings_movies[which_train, ]
recc_data_test <- ratings_movies[!which_train, ]
}
recc_model <- Recommender(data = recc_data_train, method = "IBCF")
recc_predicted <- predict(object = recc_model, newdata = recc_data_test, n = 20)
year_ref <- max(MovieLenseMeta$year, na.rm = TRUE)
year_diff <- year_ref - MovieLenseMeta$year
year_wt <- 1 / log(year_diff^2 + exp(1))
year_wt[is.na(year_wt)] <- 0
weights <- data.frame(title = MovieLenseMeta$title, wt = year_wt, stringsAsFactors = FALSE)
recc_df <- data.frame(user = sort(rep(1:length(recc_predicted@items), recc_predicted@n)), rating = unlist(recc_predicted@ratings), index = unlist(recc_predicted@items))
recc_df$title <- recc_predicted@itemLabels[recc_df$index]
recc_df2 <- recc_df %>% group_by(user) %>% arrange(desc(rating)) %>% top_n(5) %>% select(user, title, rating)
head(recc_df2, 10)
library(dplyr)
recc_wt <- inner_join(recc_df, weights, by = "title")
recc_wt <- recc_wt %>% mutate(wt_rating = rating * wt) %>% select(user, title, wt_rating) %>% group_by(user) %>% top_n(5)
head(recc_wt, 10)
# users & location ####
#http://files.grouplens.org/datasets/movielens/ml-100k-README.txt
movielens_ratings <- read.table('http://files.grouplens.org/datasets/movielens/ml-100k/u.data', header=FALSE, sep="\t")
names(movielens_ratings) <- c("user_id", "item_id", "rating", "timestamp")
movielens_matrix <- matrix(nrow = length(levels(as.factor(movielens_ratings$user_id))), ncol = length(levels(as.factor(movielens_ratings$item_id))))
for (n in 1:nrow(movielens_ratings)) {
movielens_matrix[movielens_ratings[n, 1], movielens_ratings[n, 2]] <- movielens_ratings[n, 3]
}
movielens <- as(movielens_matrix, "realRatingMatrix")
users <- read.table('http://files.grouplens.org/datasets/movielens/ml-100k/u.user', header=FALSE, sep = "|", stringsAsFactors = FALSE)
names(users) <- c("user_id", "age", "gender", "occupation", "zip_code")
movielens_keep <- movielens[rowCounts(movielens) > 20, colCounts(movielens) > 50]
sets <- sample(x = 1:5,
size = nrow(movielens_keep),
replace = TRUE)
for(m in 1:5) {
which_train <- sets == m
data_train <- movielens_keep[which_train, ]
data_test <- movielens_keep[!which_train, ]
}
user_model <- Recommender(data = data_train, method = "UBCF")
model_details <- getModel(user_model)
dim(model_details$sim)
|
812b7669f52d8ab1f6177440f3b758c7d370061e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/tsDyn/examples/lstar.Rd.R
|
9aea59c5603b350e3381c75e0375870a582000f7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 823
|
r
|
lstar.Rd.R
|
library(tsDyn)
### Name: LSTAR
### Title: Logistic Smooth Transition AutoRegressive model
### Aliases: LSTAR lstar
### Keywords: ts
### ** Examples
#fit a LSTAR model. Note 'maxit': slow convergence
mod.lstar <- lstar(log10(lynx), m=2, mTh=c(0,1), control=list(maxit=3000))
mod.lstar
#fit a LSTAR model without a constant in both regimes.
mod.lstar2 <- lstar(log10(lynx), m=1, include="none")
mod.lstar2
#Note in example below that the initial grid search seems to be to narrow.
# Extend it, and evaluate more values (slow!):
controls <- list(gammaInt=c(1,2000), nGamma=50)
mod.lstar3 <- lstar(log10(lynx), m=1, include="none", starting.control=controls)
mod.lstar3
# a few methods for lstar:
summary(mod.lstar)
residuals(mod.lstar)
AIC(mod.lstar)
BIC(mod.lstar)
plot(mod.lstar)
predict(mod.lstar, n.ahead=5)
|
9229c72e187036417ce447573a9af7890c601bc1
|
20d92a0b85ac3b0eb7713087f3fa8d324c8d3cb1
|
/plot4.R
|
1e4ce247d1cb5557e3a4b1abecadd2c26f422eaa
|
[] |
no_license
|
caioaf/ExData_Plotting1
|
4d81d7cc58fd9033445b454565c4acd244067ef2
|
1f955de9da5224c1a12a78e95ae4071c8332c90a
|
refs/heads/master
| 2020-03-25T00:50:34.082227
| 2018-08-01T21:08:31
| 2018-08-01T21:08:31
| 143,208,656
| 0
| 0
| null | 2018-08-01T21:05:26
| 2018-08-01T21:05:25
| null |
UTF-8
|
R
| false
| false
| 1,073
|
r
|
plot4.R
|
# Load data
library(dplyr)
fullData <- read.table("household_power_consumption.txt", na.strings = "?", sep = ";", header = TRUE)
fullData$Date <- as.Date(fullData$Date, "%d/%m/%Y")
data <- filter(fullData, Date >= "2007-02-01", Date <= "2007-02-02")
datetime <- paste(data$Date, data$Time)
data$datetime <- as.POSIXct(datetime)
# Plot chart 4
par(mfrow = c(2, 2))
# top left
with(data, plot(Global_active_power ~ datetime, type="l", xlab = "", ylab = "Global Active Power (kilowatts)"))
# top right
with(data, plot(Voltage ~ datetime, type="l"))
# bottom left
with(data, plot(Sub_metering_1 ~ datetime, type="l", xlab = "", ylab = "Global Active Power (kilowatts)"))
with(data, lines(Sub_metering_2 ~ datetime, col='red') )
with(data, lines(Sub_metering_3 ~ datetime, col='blue') )
legend("topright", col=c("black", "red", "blue"), lty=1,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# bottom right
with(data, plot(Global_reactive_power ~ datetime, type="l"))
# Save pictures
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
42446f4065f2c739eba52944f0eaaa0e2700abc3
|
990baad326e1d000ed592fa2f0535432d3babd5c
|
/WeightMedian_example.R
|
7ab347147b37dd641e4118871a85f31e7497d6d3
|
[] |
no_license
|
jm2ds/RStudioTest
|
0797a839951e6a421ac4de65804b14a7d8de8b6f
|
58bd1c91a6ebebf1f49f4d4ecaf7bebda13d9815
|
refs/heads/master
| 2016-09-05T16:02:35.706605
| 2014-05-23T17:25:53
| 2014-05-23T17:25:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 544
|
r
|
WeightMedian_example.R
|
weightmedian <- function(directory, day) {
files_list <- dir(directory, full.names = TRUE) #creates a list of files
dat <- data.frame() #creates an empty data frame
for (i in 1:5) { #loops through the files, rbinding them together
dat <- rbind(dat, read.csv(files_list[i]))
}
dat_subset <- subset(dat, dat$Day == day) #susets the rows matching the 'day' argument
median(dat_subset$Weight, na.rm = TRUE) #identifies the median of the subset
# while stripping out the NAs
}
|
3825ca45dc5fbc19232bf8c7eb8576466cbd5680
|
a23f02eb2cf26250d1ca69f4da78f764bf8f0c48
|
/cathChiSquare.R
|
6b36740ea39ca54007626fc0e9618d16a1f738fe
|
[] |
no_license
|
mihirt41/ChiSquareCodeCVI
|
82c11b427c70252aefbec7229b01e733c1568184
|
892cc78a9c93c9b9b1710a8183cc72589ccc9f37
|
refs/heads/master
| 2021-05-14T17:18:40.068175
| 2018-01-02T18:34:34
| 2018-01-02T18:34:34
| 116,044,083
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,589
|
r
|
cathChiSquare.R
|
##Script Written By Mihir Trivedi
#################################
##loading data
library(readr)
cathData <- read_csv("F:/cath/cathCasesRaceCorrected.csv")
View(cathData)
##removing weird line in data
cathData<-cathData[-1,]
##reconfirming correct dataset/numbers correspond with those in the abstract
table(cathData$caseindicator, cathData$indicatorforexclusion)
##Creating Chi-Square Tests of Independence to Generate Some P-Values For Tables
library(MASS)
chiSquares<-list()
chiSquareTestGen<-function(categoryOne, categoryTwo, nameTable){
nameTable<-table(categoryOne, categoryTwo)
return(chisq.test(nameTable))
}
categoryTwoList<-c("AGECAT","SEX","RACE_RECODE","INSURANCE","Mor_Teach","county2","ANEMIA","CERVASD","CANCER","COPD","AFIB","RENAL","DIABETES","HYPERTENSION")
categoryOneList<-rep("cathData$caseindicator", length(categoryTwoList))
prefix<-rep("cathData$", length(categoryTwoList))
tableNames <- paste(categoryTwoList, rep("table",length(categoryTwoList)), sep = "")
categoryTwoList<-paste(prefix, categoryTwoList, sep ="")
chiSquares<-lapply(c(1:length(categoryTwoList)), function(x){
chiSquareTestGen(eval(parse(text = categoryOneList[x])), eval(parse(text = categoryTwoList[x])),tableNames[x])
})
##extracting pvalues from chisquares may be of use for tables
cstPvalues<-sapply(c(1:length(chiSquares)), function(x){
chiSquares[[x]]$p.value
})
##variables of interest
chiSquares ## chisquare Tests
cstPvalues ## Pvalues
categoryTwoList ##Variable Names
|
83043b76140534193e520e6946c5b08b0ae38e0f
|
6eb0741293bbcaeadd68c2475596c809d6f582da
|
/fin.r
|
38a36175e37769484d7c85558e2c0d786bd3d0cf
|
[
"MIT"
] |
permissive
|
Sivatharshen/Diabetes-prediction-using-ML-and-comparing-the-accuracy-on-different-algorithms
|
9718bd2239b082ec213e7657b50878c044781caa
|
4ef67dd7d0b67a6f9c96fb2eee302452ee1537e0
|
refs/heads/master
| 2022-05-11T16:28:47.962667
| 2020-04-23T18:04:53
| 2020-04-23T18:04:53
| 258,289,269
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,814
|
r
|
fin.r
|
library(corrplot)
library(caret)
pima <- read.csv("diabetes.csv", col.names=c("Pregnant","Plasma_Glucose","Dias_BP","Triceps_Skin","Serum_Insulin","BMI","DPF","Age","Diabetes"))
head(pima) # # visualize the header of Pima data
str(pima)
sapply(pima, function(x) sum(is.na(x)))
pairs(pima, panel = panel.smooth)
corrplot(cor(pima[, -9]), type = "lower", method = "number")
# Preparing the DataSet
set.seed(1000)
n <- nrow(pima)
train <- sample(n, trunc(0.70*n))
pima_training <- pima[train, ]
pima_testing <- pima[-train, ]
# Training The Model
glm_fm1 <- glm(Diabetes ~., data = pima_training, family = binomial)
summary(glm_fm1)
glm_fm2 <- update(glm_fm1, ~. - Triceps_Skin - Serum_Insulin - Age )
summary(glm_fm2)
par(mfrow = c(2,2))
plot(glm_fm2)
# Testing the Model
glm_probs <- predict(glm_fm2, newdata = pima_testing, type = "response")
glm_pred <- ifelse(glm_probs > 0.5, 1, 0)
test_tab1 <-table(Predicted = glm_pred, Actual = pima_testing$Diabetes)
test_tab1
accura1 <- round(sum(diag(test_tab1))/sum(test_tab1),2)
accura1
#confusionMatrix(glm_pred, pima_testing$Diabetes )
#acc_glm_fit <- confusionMatrix(glm_pred, pima_testing$Diabetes )$overall['Accuracy']
# Preparing the DataSet:
pima <- read.csv("diabetes.csv", col.names=c("Pregnant","Plasma_Glucose","Dias_BP","Triceps_Skin","Serum_Insulin","BMI","DPF","Age","Diabetes"))
pima$Diabetes <- as.factor(pima$Diabetes)
library(caret)
library(tree)
library(e1071)
set.seed(1000)
intrain <- createDataPartition(y = pima$Diabetes, p = 0.7, list = FALSE)
train <- pima[intrain, ]
test <- pima[-intrain, ]
# Training The Model
treemod <- tree(Diabetes ~ ., data = train)
summary(treemod)
treemod # get a detailed text output.
plot(treemod)
text(treemod, pretty = 0)
# Testing the Model
tree_pred <- predict(treemod, newdata = test, type = "class" )
confusionMatrix(tree_pred, test$Diabetes)
acc_treemod <- confusionMatrix(tree_pred, test$Diabetes)$overall['Accuracy']
# Training The Model
set.seed(123)
library(randomForest)
rf_pima <- randomForest(Diabetes ~., data = pima_training, mtry = 8, ntree=50, importance = TRUE)
# Testing the Model
rf_probs <- predict(rf_pima, newdata = pima_testing)
rf_pred <- ifelse(rf_probs > 0.5, 1, 0)
test_tab3 <-table(Predicted = rf_pred, Actual = pima_testing$Diabetes)
test_tab3
accura3 <- round(sum(diag(test_tab3))/sum(test_tab3),2)
accura3
#confusionMatrix(rf_pred, pima_testing$Diabetes )
#acc_rf_pima <- confusionMatrix(rf_pred, pima_testing$Diabetes)$overall['Accuracy']
importance(rf_pima)
par(mfrow = c(1, 2))
varImpPlot(rf_pima, type = 2, main = "Variable Importance",col = 'black')
plot(rf_pima, main = "Error vs no. of trees grown")
#Load the DataSet
pima <- read.csv("diabetes.csv", col.names=c("Pregnant","Plasma_Glucose","Dias_BP","Triceps_Skin","Serum_Insulin","BMI","DPF","Age","Diabetes"))
pima$Diabetes <- as.factor(pima$Diabetes)
library(e1071)
#Preparing the DataSet:
set.seed(1000)
intrain <- createDataPartition(y = pima$Diabetes, p = 0.7, list = FALSE)
train <- pima[intrain, ]
test <- pima[-intrain, ]
tuned <- tune.svm(Diabetes ~., data = train, gamma = 10^(-6:-1), cost = 10^(-1:1))
summary(tuned) # to show the results
svm_model <- svm(Diabetes ~., data = train, kernel = "radial", gamma = 0.01, cost = 10)
summary(svm_model)
svm_pred <- predict(svm_model, newdata = test)
confusionMatrix(svm_pred, test$Diabetes)
acc_svm_model <- confusionMatrix(svm_pred, test$Diabetes)$overall['Accuracy']
accuracy <- data.frame(Model=c("Logistic Regression","Decision Tree","Random Forest", "Support Vector Machine (SVM)"), Accuracy=c(accura1, acc_treemod, accura3, acc_svm_model ))
ggplot(accuracy,aes(x=Model,y=Accuracy)) + geom_bar(stat='identity') + theme_bw() + ggtitle('Comparison of Model Accuracy')
|
197c58e0aa72c14b04a792e026e1dbd3fe438a6d
|
18dfced9b24ab6ab91d9e08292a4907c58110928
|
/advent2018/03a.r
|
44fa600b3e6d5b243058a72f7bbe39be01cad0e9
|
[] |
no_license
|
Spacejoker/problem
|
84c7eeef4c8bcd1d074b67faad09cf7a7511bcfd
|
37fade5619205e5e8cc2ac700dee89555721952e
|
refs/heads/master
| 2022-12-22T13:23:35.548050
| 2022-12-19T07:02:46
| 2022-12-19T07:02:46
| 729,643
| 2
| 1
| null | 2022-12-18T05:22:23
| 2010-06-19T19:44:41
|
Java
|
UTF-8
|
R
| false
| false
| 668
|
r
|
03a.r
|
myString <- "Hello, World!"
print (myString)
f <- file("stdin")
open(f)
xdim = 5000
ydim = xdim
pelt = seq(0,0,length.out=xdim*ydim)
while(length(line <- readLines(f,n=1)) > 0) {
row <- unlist(strsplit(line, " "))
coords <- unlist(strsplit(row[3], ","))
y <- strtoi(coords[1])+1
x <- strtoi(substr(coords[2],1,nchar(coords[2])-1))+1
dims <- unlist(strsplit(row[4], "x"))
h <- strtoi(dims[1])
w <- strtoi(dims[2])
for (yy in (y):(y+h-1)) {
for (xx in (x):(x+w-1)) {
idx = (xdim*yy) + xx
pelt[idx] <- (pelt[idx] + 1)
}
}
}
cnt = 0
for (i in 1:(xdim*ydim)){
if(pelt[i]>1) {
cnt = cnt + 1
}
}
write(cnt, stderr())
close(f)
|
229099c62399dac7d5438947f6d846d6a02405db
|
5f8ae737ef12df3871dcd274a9ef4b550529d592
|
/GenerateData.R
|
a98b518eae6e29eef7a4ef9b59a0b003a1795f75
|
[] |
no_license
|
jclaramunt/LinkageData
|
b523a7a169cc03b52dd747b2e01403834ef3b7a1
|
98cf13b3b5eb887f34eacdfdeaccab8b5ba0e02e
|
refs/heads/master
| 2020-12-05T10:46:57.513961
| 2020-01-07T07:48:21
| 2020-01-07T07:48:21
| 232,085,589
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,841
|
r
|
GenerateData.R
|
# Example1 #
# Not unique key
dimA<-20
#Dataset A
set.seed(11)
day<-sample(x=1:28, size = dimA, replace=TRUE)
month<-sample(x=1:12, size = dimA, replace=TRUE)
year<-sample(x=1920:2020, size = dimA, replace=TRUE)
datebirth<-paste(day,month,year,sep = '/')
hair<-sample(x=c("Blond","Brown","Black","Red","White","Bold","Other"),size = dimA, replace=TRUE)
eye<-sample(x=c("Blue","Brown","Black","Green","Grey","Other"),size = dimA, replace=TRUE)
height<-sample(x=140:210, size = dimA, replace=TRUE)
weight<-sample(x=40:120, size = dimA, replace=TRUE)
DatasetAE1<-data.frame(datebirth,hair,eye,height,weight)
#Dataset B
reord<-sample(1:dimA, size = dimA, replace=FALSE)
datebirthB<-datebirth[reord]
smoke<-sample(x=0:1, size = dimA, replace=TRUE)
drink<-sample(x=0:1, size = dimA, replace=TRUE)
sportWeek<-sample(x=0:7, size = dimA, replace=TRUE)
strokes<-sample(x=0:5, prob = c(0.8,0.185,0.011,0.0025,0.001,0.0005), size = dimA, replace=TRUE)
heightB<-height[reord]
weightB<-weight[reord]
DatasetBE1<-data.frame(datebirthB,smoke,drink,sportWeek,strokes,heightB,weightB)
# Example2 a #
#Not unique key with random errors.
dimA<-20
#Dataset A
set.seed(11)
error<-sample(x=c(0,1), prob = c(0.9,0.1), size = dimA, replace = TRUE)
day<-sample(x=1:28, size = dimA, replace=TRUE)
month<-sample(x=1:12, size = dimA, replace=TRUE)
year<-sample(x=1920:2020, size = dimA, replace=TRUE)
datebirth<-paste(day,month,year,sep = '/')
hair<-sample(x=c("Blond","Brown","Black","Red","White","Bold","Other"),size = dimA, replace=TRUE)
eye<-sample(x=c("Blue","Brown","Black","Green","Grey","Other"),size = dimA, replace=TRUE)
height<-sample(x=140:210, size = dimA, replace=TRUE)
weight<-sample(x=40:120, size = dimA, replace=TRUE)
heightA<-height
weightA<-weight
for (i in 1:dimA) {
if(error[i]==1){
where<-sample(x=1:5, size = 1)
if(where==1){
day[i]<-day[i]+sample(x=c(-1,1),size=1)
}else if(where==2){
month[i]<-ifelse(month[i]==12, 11, month[i]+sample(x=c(-1,1),size=1))
}else if(where==3){
year[i]<-year[i]+sample(x=c(-1,1),size=1)
}else if(where==4){
heightA[i]<-height[i]+sample(x=c(-1,1),size=1)
}else if(where==5){
weightA[i]<-weight[i]+sample(x=c(-1,1),size=1)
}
}
}
datebirthA<-paste(day,month,year,sep = '/')
DatasetAE2a<-data.frame(datebirthA,hair,eye,heightA,weightA)
#Dataset B. No errors. Errors only in A
reord<-sample(1:dimA, size = dimA, replace=FALSE)
datebirthB<-datebirth[reord]
smoke<-sample(x=0:1, size = dimA, replace=TRUE)
drink<-sample(x=0:1, size = dimA, replace=TRUE)
sportWeek<-sample(x=0:7, size = dimA, replace=TRUE)
strokes<-sample(x=0:5, prob = c(0.8,0.185,0.011,0.0025,0.001,0.0005), size = dimA, replace=TRUE)
heightB<-height[reord]
weightB<-weight[reord]
DatasetBE2a<-data.frame(datebirthB,smoke,drink,sportWeek,strokes,heightB,weightB)
# Example 2b #
#Not unique key with random errors. Larger datasets
dimA<-2000
#Dataset A
set.seed(11)
error<-sample(x=c(0,1), prob = c(0.8,0.2), size = dimA, replace = TRUE)
day<-sample(x=1:28, size = dimA, replace=TRUE)
month<-sample(x=1:12, size = dimA, replace=TRUE)
year<-sample(x=1920:2020, size = dimA, replace=TRUE)
datebirth<-paste(day,month,year,sep = '/')
hair<-sample(x=c("Blond","Brown","Black","Red","White","Bold","Other"),size = dimA, replace=TRUE)
eye<-sample(x=c("Blue","Brown","Black","Green","Grey","Other"),size = dimA, replace=TRUE)
height<-sample(x=140:210, size = dimA, replace=TRUE)
weight<-sample(x=40:120, size = dimA, replace=TRUE)
heightA<-height
weightA<-weight
for (i in 1:dimA) {
if(error[i]==1){
where<-sample(x=1:5, size = 1)
if(where==1){
day[i]<-day[i]+sample(x=c(-1,1),size=1)
}else if(where==2){
month[i]<-ifelse(month[i]==12, 11, month[i]+sample(x=c(-1,1),size=1))
}else if(where==3){
year[i]<-year[i]+sample(x=c(-1,1),size=1)
}else if(where==4){
heightA[i]<-height[i]+sample(x=c(-1,1),size=1)
}else if(where==5){
weightA[i]<-weight[i]+sample(x=c(-1,1),size=1)
}
}
}
datebirthA<-paste(day,month,year,sep = '/')
DatasetAE2b<-data.frame(datebirthA,hair,eye,heightA,weightA)
#Dataset B. No errors. Errors only in A
reord<-sample(1:dimA, size = dimA, replace=FALSE)
datebirthB<-datebirth[reord]
smoke<-sample(x=0:1, size = dimA, replace=TRUE)
drink<-sample(x=0:1, size = dimA, replace=TRUE)
sportWeek<-sample(x=0:7, size = dimA, replace=TRUE)
strokes<-sample(x=0:5, prob = c(0.8,0.185,0.011,0.0025,0.001,0.0005), size = dimA, replace=TRUE)
heightB<-height[reord]
weightB<-weight[reord]
DatasetBE2b<-data.frame(datebirthB,smoke,drink,sportWeek,strokes,heightB,weightB)
|
dc9e356f137ed6fe9962ef2e1abd4c7688ff0b44
|
09fede3ddb1fe90d486ead390e557275514ebcd7
|
/man-roxygen/example.R
|
6f899c3a6d4858cded8203361f3e1efee1108cb2
|
[] |
no_license
|
Mrlirhan/mlr3extralearners
|
05723212d650c623a8bcf5ba4eb809eb7cc6707e
|
3655ccecb43f18837bc6a2078672636a399bab5d
|
refs/heads/main
| 2023-06-29T09:31:58.113359
| 2021-08-04T17:42:26
| 2021-08-04T17:42:26
| 392,993,757
| 1
| 0
| null | 2021-08-05T10:05:51
| 2021-08-05T10:05:50
| null |
UTF-8
|
R
| false
| false
| 232
|
r
|
example.R
|
<%
lrn = mlr3::lrn(id)
%>
#' @examples
#' # stop example failing with warning if package not installed
#' learner = suppressWarnings(mlr3::lrn("<%= id %>"))
#' print(learner)
#'
#' # available parameters:
#' learner$param_set$ids()
|
ba6baf6833cba388a695bcc87c93fb680bd100bd
|
11a2bffc556f663f912cab24ed3b07f461e6665d
|
/man/PCARaster.Rd
|
989fff1a65af89dfa5f7176deb8d59c16f637c7a
|
[] |
no_license
|
vijaybarve/ENMGadgets
|
dd94ab1ff1a1e7067f44178b17db057abadcdaca
|
70d0ce36f84e94a523d4be01b0464e103bc25d4d
|
refs/heads/master
| 2021-01-25T12:30:19.119139
| 2018-06-26T17:06:21
| 2018-06-26T17:06:21
| 14,593,644
| 1
| 2
| null | 2016-09-14T22:32:19
| 2013-11-21T16:52:17
|
R
|
UTF-8
|
R
| false
| true
| 1,236
|
rd
|
PCARaster.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PCARaster.R
\name{PCARaster}
\alias{PCARaster}
\title{PCARaster - PCA of Raster files}
\usage{
PCARaster(BioStackFiles = NA, LoadingFile = NA, CompImpFile = NA,
OPfolder = NA)
}
\arguments{
\item{BioStackFiles}{- ESRI ASCII grid files of predictor variables}
\item{LoadingFile}{- name of output file which stores loadings for components}
\item{CompImpFile}{- name of output file of the PCA summaries}
\item{OPfolder}{- name of output folder to save PCA component, loading and summary file}
}
\value{
a summary of the PCA is returnd as a strcture
}
\description{
Performs Principle Component Analysis of Raster objects and returns summary
and loadings for the componetns. For interactive version check \link{iPCARaster}
}
\details{
Main function to generate PCA for the selected bioclimatic layer and then save
the pca components in ASCII format.
This function will accept the bioclimatic ASCII files. PCApca components are
stored as Comp1.asc, Comp2.asc...... and so on.
This function retuns the pca of the ascii data supplied to do further processing like checking for eigen values, broken stick etc.
}
\examples{
\dontrun{
pcaop = PCARaster()
}
}
|
1f1f6bb31f336abe96f69e51439f737a0d041517
|
9b81b5fa72d6d123e65ac8ccff0c7830476d03a0
|
/ui.R
|
f2dff10e3f1372c3a26724af6568a2c26c2aa058
|
[] |
no_license
|
pg-environmental-stats/SSD-Analysis-Internal
|
73c6495310ecfda5e4153c818a822aea2a9cff14
|
2ed3276b6370b4707a712f2c17043f1ac2e715b3
|
refs/heads/main
| 2023-06-20T02:03:26.199371
| 2021-07-20T17:46:42
| 2021-07-20T17:46:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,725
|
r
|
ui.R
|
installedPackages <- rownames(installed.packages())
SSDpackages <- c("devtools","shiny","shinyjs","shinyalert","shinyWidgets",# only if for table output
"htmlwidgets","magrittr","parallel","formattable","DT",
"RColorBrewer","multcomp","openxlsx","ADGofTest",
"eha","evd", "flexsurv")
LCxPackages <- c("shiny", "shinydashboardPlus","isotone","grid", "gridExtra", "openxlsx", "optimx", "plotrix","rstudioapi","colourpicker")
packages <- unique(c(SSDpackages,LCxPackages))
packageTests <- packages %in% installedPackages
if(all(packageTests)){
cat("\n",paste(rep("#",100),collapse = ""),
"\n All required packages are present.",
"\n",paste(rep("#",100),collapse = ""),"\n")
}
if(sum(!packageTests)>0){
cat("\n",paste(rep("#",100),collapse = ""),
"\n Please wait while these required packages and their dependencies are installed:",
"\n ",paste(names(packageTests[!packageTests]),collapse = " "),
"\n Requires internet access and sufficient rights to install R packages on your system.",
"\n",paste(rep("#",100),collapse = ""),"\n")
install.packages(packages[!packageTests], repos = "https://cran.rstudio.com/", dependencies=TRUE)
### In one case, needed to add this to a users install.packages call: INSTALL_opts = c('--no-lock')
# recheck for packages
installedPackages <- rownames(installed.packages())
packageTests <- packages %in% installedPackages
if(all(packageTests)){
cat("\n",paste(rep("#",100),collapse = ""),
"\n All required packages were successfully installed.",
"\n",paste(rep("#",100),collapse = ""),"\n")
}
if(!all(packageTests)){
cat("\n",paste(rep("#",100),collapse = ""),
"\n Not all packages were successfully installed:",
"\n ",paste(names(packageTests[!packageTests]),collapse = " "),
"\n",paste(rep("#",100),collapse = ""),"\n")
}
}
#load all packages -- a few masking issues arise, but none that affect this code AFAIK
invisible(sapply(packages,FUN=library,character.only=TRUE))
jsResetCode <- "shinyjs.reset = function() {history.go(0)}" # Define the js method that resets the page
### just in case, purge files that might be left over from a previous run
### the code attempts to prevent this by resetting things, but it's all limits of the
### imagination for the order things are entered, changed, etc. The user
### must assume the ultimate responsibility. Any critical analysis should be run
### from a reset tool, in the proper order.
if(file.exists("SSDplotOutput.pdf"))unlink("SSDplotOutput.pdf")
if(file.exists("SSD Analysis.pdf"))unlink("SSD Analysis.pdf")
if(file.exists("SSDoutput.xlsx"))unlink("SSDoutput.xlsx")
if(file.exists("SSD Analysis.xlsx"))unlink("SSD Analysis.xlsx")
if(file.exists("LCxplotOutput.pdf"))unlink("LCxplotOutput.pdf")
if(file.exists("LCx Analysis.pdf"))unlink("LCx Analysis.pdf")
if(file.exists("LCxoutput.xlsx"))unlink("LCxoutput.xlsx")
if(file.exists("LCx Analysis.xlsx"))unlink("LCx Analysis.xlsx")
if(file.exists("BVplotOutput.pdf"))unlink("BVplotOutput.pdf")
if(file.exists("BV Analysis.pdf"))unlink("BV Analysis.pdf")
if(file.exists("BVoutput.xlsx"))unlink("BVoutput.xlsx")
if(file.exists("BV Analysis.xlsx"))unlink("BV Analysis.xlsx")
shinyUI(
fluidPage(
shinyalert::useShinyalert(), # Sets up shinyalert
titlePanel("ES + BMD Tools"),
sidebarLayout(
sidebarPanel(
splitLayout(
actionBttn(
inputId="reset_button",
label = "Reset Tool",
icon = icon("redo"),
style = "pill",
color = "default",
size = "md",
block = FALSE,
no_outline = TRUE
),
uiOutput("ExampleDownload")
),
#actionButton("reset_button", "Reset Page",icon = icon("redo")),
### before, we assigned default vars but current version does not work on that idea
radioButtons("analysisType",label = "Select Analysis",selected = "SSD",
choiceValues = list("Count","BMD","SK","Continuous","SSD")[5],
choiceNames=list("LCx / Spearman-Karber","Binary BMD","Spearman-Karber","BV","SSD")[5]
),
### splitLayout(
### radioButtons("analysisType",label = "Select Analysis",selected = "SSD",
### choiceValues = list("Count","BMD","SK","Continuous","SSD"),
### choiceNames=list("LCx","Binary BMD","Spearman-Karber","BV","SSD")
### ),
### fluidPage(
### wellPanel(uiOutput("defaultVars"))
### )
###),
# in the server, these SSD inputs are NULLed out if the analysis is not SSD
uiOutput("optshead"),
uiOutput("analysisOpts"),
#splitLayout(uiOutput("SSD.2.1"),uiOutput("SSD.2.2"),
# cellArgs = list(style = c("align: left","align: right"))),
#splitLayout(uiOutput("SSD.2.3"),uiOutput("SSD.2.6"),#cellWidths = "33%",
# cellArgs = list(style = c("align: left","align: right"))),
uiOutput("conflevelSelects"),
uiOutput("effectSelects"),
textAreaInput("pasteData",label="Data with column labels:",rows=3,
placeholder = "Click inside this box and paste data (copied from Excel or similar)."),
### always need a response variable
### all of these will initally be set to None
uiOutput("varSelects"),
#uiOutput("responsesVar"),
#uiOutput("sizesVar"),
#uiOutput("dosesVar"),
#uiOutput("speciesVar"),
#sliderInput("ECXvalue", "Effect Level", 0.05, .95, 0.50, step = 0.05),
uiOutput("scaleSelect"),
uiOutput("varLabels"),
uiOutput("respLabels"),
#uiOutput("xLabBox"),
#uiOutput("yLabBox"),
#textInput("xLab",label="Exposure label",value="Exposure Concentration"),
#textInput("yLab",label="Response label",value="Mortality Rate"),
uiOutput("graphOpts"),
# this puts out the species customizations only if SSD is chosen.
# otherwise, NULLed out
uiOutput("speciesOpts"),
### gray-scale and legend always asked
uiOutput("plotOpts"),
uiOutput("setupButton"),
uiOutput("runButton"),
h3("Results:"),
### idea is only to offer output when an analysis is complete.
### otherwise, old files could get posted. Another option
### is to use a different output file (xls and pdf) for
### each analysis, but that's not implemented yet.
splitLayout(
uiOutput("Excelbutton"),
uiOutput("PDFbutton")
),
# https://stackoverflow.com/questions/25062422/restart-shiny-session
shinyjs::useShinyjs(), # Include shinyjs in the UI
shinyjs::extendShinyjs(text = jsResetCode, functions = "reset") # Add the js code to the page
),
mainPanel(
### I think this should work for any analysis: a view of the input data before selections,
### after selections, and a preview plot, and that's it for now.
tabsetPanel(type="tabs",id = "outputTabActive",
tabPanel("Output",
h3("Input data:"),
DTOutput("DTtableRaw",width = "75%"),
h3("Analysis data:"),
DTOutput("DTtable"),
h3("Preview plot (Nonparametric fit):"),
conditionalPanel(condition="output.setupComplete",plotOutput("basePlot"))#,
),
tabPanel("Help",uiOutput("helpPanel")),
tabPanel("Contacts",
strong("Contacts:"),
tags$ul(
tags$li("Domain expert: Kristin Connors (connors.ka@pg.com)"),
tags$li("Stats expert: Gregory Carr (carr.gj@pg.com)"),
tags$li("Stats expert: Christian Geneus (geneus.cj@pg.com)"),
tags$li("Alternate: Joel Chaney (chaney.jg@pg.com)")
),
img(src = "PG-DMS.jpg",
#as a part of the webpage display, by default this file is looked for in www
height = 100)
))
### only for BMD, and that should probably be
### simplified since my old version somehow works
### outside of shiny, but not inside it. Go figure.
#uiOutput("markdown")
)
)
)
)
|
171424677c4fdeb24d69752e07ac580bdc4fa8b2
|
4f20b35b8bb927979f2bf50567de446fc4fceacd
|
/run_analysis.R
|
1fab8e2239c7584ad162c25714f33b159217a3da
|
[] |
no_license
|
markg1965/Coursera_Cleaning_Data
|
f6f4a318f8bf1b0173ac9c21aeee02ee6fab84e1
|
c4aa00a71629734a982bb4370898428dfd4d1c49
|
refs/heads/master
| 2021-01-10T20:06:56.538140
| 2015-09-23T13:00:04
| 2015-09-23T13:00:04
| 42,933,580
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,495
|
r
|
run_analysis.R
|
run_analysis <- function()
library(digest)
library(dplyr)
library(data.table)
library(tidyr)
#File Pathways
X_Test_Data_File <- "c:/Users/Mark/Documents/UCI HAR Dataset/test/X_test.txt"
Y_Test_Data_File <- "c:/Users/Mark/Documents/UCI HAR Dataset/test/Y_test.txt"
Subject_Test_File <- "c:/Users/Mark/Documents/UCI HAR Dataset/test/subject_test.txt"
features_File <- "c:/Users/Mark/Documents/UCI HAR Dataset/features.txt"
X_Train_Data_File <- "c:/Users/Mark/Documents/UCI HAR Dataset/train/X_train.txt"
Y_Train_Data_File <- "c:/Users/Mark/Documents/UCI HAR Dataset/train/Y_train.txt"
Subject_Train_File <- "c:/Users/Mark/Documents/UCI HAR Dataset/train/subject_train.txt"
Activity_Labels_File <- "c:/Users/Mark/Documents/UCI HAR Dataset/activity_labels.txt"
Tidy_Data_Measurement_Names <- "c:/Users/Mark/Documents/UCI HAR Dataset/Tidy_Column_Names.csv"
##Load raw data into R (Tidy_Data_Measurement_Names contains adjusted measurement type names for final tidy data table)
X_Test_Data <- read.table(X_Test_Data_File) ##Test set
Y_Test_Data <- read.table(Y_Test_Data_File) ##Test labels
Subject_Test_Data <- read.table(Subject_Test_File)
X_Train_Data <- read.table(X_Train_Data_File) ##Training set
Y_Train_Data <- read.table(Y_Train_Data_File) ##Training labels
Subject_Train_Data <- read.table(Subject_Train_File)
features_Data <- read.table(features_File, as.is = TRUE) ##Shows information about the variables used on the feature vector
Activity_Labels_Data <- read.table(Activity_Labels_File) ##Links the class labels with their activity name
Tidy_Data_Measurement_Names <- read.csv(Tidy_Data_Measurement_Names, header = FALSE)
##Transpose feature_Data (column headers) from 1 column with 561 rows to 1 row with 561 columns
##conversion is required to bind column header/variable names to the test/train data sets
features_Data$V1 <- NULL
features_data_transposed <- t(features_Data)
##Add column names (features_data_transposed) to the test and train data sets
Full_Test_data_set <- setNames(X_Test_Data, features_data_transposed)
Full_Train_data_set <- setNames(X_Train_Data, features_data_transposed)
##Add Subject ID to 1st column in each of the "full" data sets and match activity names with "Y" activity data
Subject_Test_wname <- setNames(Subject_Test_Data, "Subject_ID")
Subject_Train_wname <- setNames(Subject_Train_Data, "Subject_ID")
Activity_Test_Data <- left_join(Y_Test_Data, Activity_Labels_Data, by = "V1")
Activity_Train_Data <- left_join(Y_Train_Data, Activity_Labels_Data, by = "V1")
Activity_Test_Data$V1 <- NULL
Activity_Train_Data$V1 <- NULL
Activity_Test_Data_wname <- setNames(Activity_Test_Data, "Activity_Name")
Activity_Train_Data_wname <- setNames(Activity_Train_Data, "Activity_Name")
Full_Test_data_set <- bind_cols(Subject_Test_wname, Activity_Test_Data_wname, Full_Test_data_set)
Full_Train_data_set <- bind_cols(Subject_Train_wname, Activity_Train_Data_wname, Full_Train_data_set)
##Merge Data Sets
Full_Data_Set <- rbind(Full_Test_data_set, Full_Train_data_set)
##Remove unneeded columns from Full_Data_Set--1st 3 lines remove columns with duplicate column names
##Second block of code extracts the mean and std columns and deletes "angle" columns from the Full_Data_Set
Full_Data_Set <- Full_Data_Set[,-c(463:504)]
Full_Data_Set <- Full_Data_Set[,-c(384:425)]
Full_Data_Set <- Full_Data_Set[,-c(305:346)]
Tidy_dataset_Step1 <- select(Full_Data_Set, contains("subject", ignore.case = TRUE), contains("activity", ignore.case = TRUE),
contains("mean", ignore.case = TRUE), contains("std", ignore.case = TRUE),
-contains("angle", ignore.case = TRUE))
##Create final tidy dataset
Tidy_dataset_Step2 <- gather(Tidy_dataset_Step1, Subject_ID, Activity_Name)
Tidy_dataset_Step2 <- bind_cols(Tidy_dataset_Step2, Tidy_Data_Measurement_Names)
colnames(Tidy_dataset_Step2) <- c("Subject_ID", "Activity_Name", "Measurement_Type", "Mean_Value", "Measurement_Type_Adjusted")
##Calculate mean value for each user and activity after deleting the unadjusted measurement type names (e.g. showing ()-X)
Tidy_dataset_Step2$Measurement_Type <- NULL
Tidy_dataset_Step2 <- group_by(Tidy_dataset_Step2, Subject_ID, Activity_Name, Measurement_Type_Adjusted) %>% summarise_each(c("mean"))
|
ad61d81f8746b2cac1dafee46485bc60da7c870e
|
18e3cca024a121995bbe8068f0709593d716f378
|
/man/place_spec_on_tree.Rd
|
6cbcd262b3ff6d44dc70c7deb41616e98ca976d4
|
[] |
no_license
|
annakat/evolspec
|
56febd447fde3cafe7a51c4d552a14afc62a66ee
|
ad74616e56c8a5be7272ea0b136a9f4c2d189010
|
refs/heads/master
| 2021-01-12T17:00:58.586504
| 2016-10-16T15:07:47
| 2016-10-16T15:07:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 610
|
rd
|
place_spec_on_tree.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/place_spec_on_tree.R
\name{place_spec_on_tree}
\alias{place_spec_on_tree}
\title{Places an ublabeled spectrum on a tree}
\usage{
place_spec_on_tree(tree, spec, model = "BM", method = "ml")
}
\arguments{
\item{tree}{Tree of class "phylo".}
\item{spec}{spectral dataset, where the first column are species names and the remaining are data. The spectra with unknown placement must be in this}
\item{model}{Only "BM" availiable for now.}
\item{method}{Choose "ml" or "bayes"}
}
\description{
Places an unlabeled specrum on a tree
}
|
9429fdfbfd54c27c51560b57973db4c0a8724bec
|
ff47639ba6d38d47b5224276022c200563e639ac
|
/main.R
|
d17434a8161342b153db27309f74588540093025
|
[] |
no_license
|
markvregel/greenest_city
|
36d9bba7b048e1b46b665b67473b6229347fba29
|
27ad8767e98ab8b626e33735c3020bdbcd6f9315
|
refs/heads/master
| 2021-01-10T07:14:23.395572
| 2016-01-13T08:56:09
| 2016-01-13T08:56:09
| 49,502,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,363
|
r
|
main.R
|
# Hakken en zagen
# Mark ten Vregelaar and Jos Goris
# 12 January 2016
# Greenest city: find the city with the highest NDVI
# Start with empty environment
rm(list=ls())
# Get required libraries
library(raster)
library(rgdal)
library(rasterVis)
# Read R Code from function in map R
source("R/NDVIextract.R")
#set input and output folder
ifolder <- "./data/"
ofolder <- "./output/"
dir.create(ifolder, showWarnings = FALSE)
dir.create(ofolder, showWarnings = FALSE)
# Read R Code from function in map R
# Download data -----------------------------------------------------------------
# Download NDVI data
NDVIURL <- "https://github.com/GeoScripting-WUR/VectorRaster/raw/gh-pages/data/MODIS.zip"
inputZip <- list.files(path=ifolder, pattern= '^.*\\.zip$')
if (length(inputZip) == 0){ ##only download when not alrady downloaded
download.file(url = NDVIURL, destfile = 'data/NDVI_data.zip', method = 'wget')
}
# Download municipality boundaries
nlCity <- raster::getData('GADM',country='NLD', level=2,path=ifolder)
# Data pre-processing -----------------------------------------------------------
unzip('data/NDVI_data.zip', exdir=ifolder) # unzip NDVI data
NDVIlist <- list.files(path=ifolder,pattern = '+.grd$', full.names=TRUE) # list NDVI raster
NDVI_12 <- stack(NDVIlist) # NDVI rasters
nlCity@data <- nlCity@data[!is.na(nlCity$NAME_2),] # remove rows with NA
nlCity_sinu <- spTransform(nlCity, CRS(proj4string(NDVI_12))) # change projection municipality data
NDVI_12 <- mask(NDVI_12,nlCity_sinu)# mask the NDVI stack to municipality data
# Select and calculate NDVI January, August and the mean of the year
NDVI_Jan <- NDVI_12[['January']]
NDVI_Aug <- NDVI_12[['August']]
NDVI_mean <- calc(NDVI_12,mean)
# Calculate NDVI for the municipalitys and find greenest cities--------------------
Jan <- NDVIextract(NDVI_Jan,nlCity_sinu)
Aug <- NDVIextract(NDVI_Aug,nlCity_sinu)
Mean <- NDVIextract(NDVI_mean,nlCity_sinu)
# Combine result into one dataframe
cities <- cbind(nlCity_sinu$NAME_2, Jan[2],Aug[2],Mean[2])
colnames(cities)[1]<- 'City'
colnames(cities)[4]<- 'Mean'
# find the greenest cities
maxJan_spatial<- nlCity_sinu[cities$January==max(cities$January),]
maxAug_spatial<- nlCity_sinu[cities$August==max(cities$August),]
maxMean_spatial<- nlCity_sinu[cities$Mean==max(cities$Mean),]
# add NDVI data to spatial of the data municipalities
nlCity_sinu$Jan<-cities$Jan
nlCity_sinu$Aug<-cities$Aug
nlCity_sinu$Mean<-cities$Mean
# Visualization-----------------------------------------------------------------
spplot(nlCity_sinu, zcol = "Jan",col.regions= colorRampPalette(c("red","yellow","green","darkgreen"))(255)
,main=paste('NDVI January per municipality\n Greensest City:', maxJan_spatial$NAME_2),
sp.layout = list('sp.polygons', maxJan_spatial,col="red",first=F,lwd=3))
spplot(nlCity_sinu, zcol = "Aug",col.regions= colorRampPalette(c("red","yellow","green","darkgreen"))(255)
,main=paste('NDVI August per municipality\n Greensest City:', maxAug_spatial$NAME_2),
sp.layout = list('sp.polygons', maxAug_spatial,col="red",first=F,lwd=3))
spplot(nlCity_sinu, zcol = "Mean",col.regions= colorRampPalette(c("red","yellow","green","darkgreen"))(255)
,main=paste('Mean NDVI per municipality\n Greensest City:', maxMean_spatial$NAME_2),
sp.layout = list('sp.polygons', maxMean_spatial,col="red",first=F,lwd=3))
|
a0a453a5ed6ea76b8e1f3e7b41fe26993fb226d6
|
00a092422e4a8ae30fe356324c2be1e26ab0e1c1
|
/R/compress_space.R
|
beb9bfacf2f6725eb3955a1a721a0ec9e6dc2a75
|
[] |
no_license
|
cran/shattering
|
103c1f844ba0c1cae5fa529325135a696f492842
|
0ba348d25ac129d4884fe4696f506b55195bbadd
|
refs/heads/master
| 2023-07-15T15:38:16.930545
| 2021-08-21T12:50:02
| 2021-08-21T12:50:02
| 298,777,328
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,644
|
r
|
compress_space.R
|
#' Function to compress the space based on the equivalence relations.
#'
#' This function compresses the input space according to the equivalence relations, i.e., it compresses whenever an example has other elements inside its open ball but having the same class label as the ball-centered instance.
#' @param M sparse matrix representing all equivalence relations
#' @param Y numerical vector indentifying the output space of variables
#' @return A list containing sparse vectors (from package slam) identifying the equivalence relations
#' @keywords compress space
#' @export
compress_space <- function(M, Y) {
flag = TRUE
while (flag) {
flag = FALSE
row = 1
while (row < length(M)) {
if (!is.null(M[[row]]) && length(M[[row]]$i) > 1) {
# row will represent the element and its neighbors
connect.to = setdiff(M[[row]]$i, row) # [ 2 3 ]
# Matrix reduction
reduced = list()
counter = 1
for (i in 1:length(M)) {
if (!is.null(M[[i]])) {
ids = setdiff(M[[i]]$i, connect.to)
if ((length(ids) > 0 && !(i %in% connect.to)) || (length(ids) > 1 && (i %in% connect.to))) {
red = as.numeric(lapply(ids, function(el) { return (el - sum(el > connect.to)) }))
len = M[[i]]$dim - length(connect.to)
reduced[[counter]] = slam::simple_sparse_array(i=red, v=rep(1, length(red)), dim=len)
counter = counter + 1
}
}
}
# Copying list
M = reduced
# Setting this flag to carry on operating
flag = TRUE
# Getting back to avoid loosing elements
row = row - length(connect.to)
if (row < 1) row = 1
}
row = row + 1
}
}
return (M)
}
|
cb07f958d32904a13c14a75496af0d2a1f7f685b
|
27127fa3729cca6769185a3468a1311de123d260
|
/Git.code_dHIT.upload.R
|
bc6970c377dba4bfc84addcef61b5768a0a845c1
|
[] |
no_license
|
alexachivu/dHITpaper_2021
|
4f71b681e1442d3aaa323962d79e43be2b993446
|
1539452c509a5f6ccc0197cf9308d308e11ee43c
|
refs/heads/main
| 2023-04-17T19:15:01.033368
| 2021-11-21T03:58:59
| 2021-11-21T03:58:59
| 429,227,967
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,220
|
r
|
Git.code_dHIT.upload.R
|
#Code to upload on git: --> dHIT paper
### Packages used: rtracklayer, GenomicRanges, BRGenomics, DEseq2, ggplot.
#1 .Function: Imports BigWigs into R as GRanges object
#Parameters:
# BW_plus.path = path to positive/plus strand PROseq
# BW_minus.path = path to negative/minus strand PROseq
# coord.list = GRanges object, list of coordinates to read PROseq within
#Output: Granges of PROseq reads within the given coordinates list
Import.BigWigs = function(BW_plus.path, BW_minus.path, coord.list = c() ){
#Import PROseq BigWig files
PRO_plus = import.bw(BW_plus.path, which = coord.list);
PRO_minus = import.bw(BW_minus.path, which = coord.list);
#Set the strand
strand(PRO_plus) = "+";
strand(PRO_minus) = "-";
#Make all PROseq score positive
PRO_minus$score = (abs(PRO_minus$score));
PRO_plus$score = (abs(PRO_plus$score));
#Merge PROseq plus and minus strands & convert to BRG
PROseq = makeGRangesBRG( append( PRO_plus, PRO_minus ) );
return(PROseq)
}
#2 .Function: Counts reads that fall within a given set of coordinates
#Parameters:
# BW_plus.path = path to positive/plus strand PROseq
# BW_minus.path = path to negative/minus strand PROseq
# coord.list = GRanges object, list of coordinates to read PROseq within (all entries need to be resized to the same windth)
# binsize = integer, bin size to count signal within coord.list
# field = field corresponding to signal/score in PROseq GRanges (column in the PROseq GRanges object imported prom paths)
# FUN = function to use for binning the reads
#Output: Granges of PROseq reads within the given coordinates list, binned in bins of width equal to the binsize
Count.reads = function( BW_plus.path, BW_minus.path, coord.list, binsize = 10, field = "score", FUN = "sum"){
options(warn=-1)
#Import PROseq BigWig files
PRO_plus = import.bw(BW_plus.path, which = gene.list);
PRO_minus = import.bw(BW_minus.path, which = gene.list);
#Set the strand
strand(PRO_plus) = "+";
strand(PRO_minus) = "-";
#Merge PROseq plus and minus strands & convert to BRG
PROseq = makeGRangesBRG( append( PRO_plus, PRO_minus ) );
PROseq$Norm.score = abs(PROseq$score);
#Count signal around TREs
PROseq.cts = getCountsByPositions(PROseq, gene.list, binsize = binsize, FUN = FUN, field = field);
return(PROseq.cts)
#3 .Function: Runs DEseq2 on a list of PROseq paths
#Parameters:
# PROseq.list = list of PROseq paths (example of PROseq list: PROseq.list <- GRangesList(A_rep1 = Import.BigWigs("PROseq1_plus.bw", "PROseq1_minus.bw"),
# A_rep2 = Import.BigWigs("PROseq2_plus.bw", "PROseq2_minus.bw"),)
# )
# ranges = ranges of data to run DEseq2 on
# spikeins.list = list of spike-in or scaling factors (if left empty, DEseq will run its intrinsic normalization of the data)
# ncores = number of cores to use for computation
# field = field to use for getting PROseq signal
#Output: DEseq output of results
Run.DEseq2 = function( PROseq.list, ranges, spikeins.list = c(), ncores = 1, field = "score" ){
#Filter data before DEseq analysis
dds = getDESeqDataSet( PROseq.list, ranges, gene_names = ranges$Gn.name, ncores = ncores, field = field);
#Run DEseq2
res.dds = getDESeqResults(dds, "B", "A", sizeFactors = spikeins.list, alpha = 0.05);
#Add gene information to DEseq results
res.dds_genes = merge(as.data.frame(res.dds), as.data.frame(ranges), by="row.names", all=TRUE);
res.dds_genes = na.omit(as.data.frame(res.dds_genes)) # --> genes w/o NAs
#Prepare the function output
out = list(res.dds, res.dds_genes);
return(out)
}
#4 .Function: Makes custom MA plot for given DEseq output data
#Parameters:
# output_Run.DEseq2 = DEseq2 output (can be used with the Run.DEseq2 output)
# Fig.type = figure type to write the plot to (can be png, svg, bit, etc)
# fig.width = figure width
# fig.height = figure height
#Output: MA plot with density map
Plot.MAs = function(output_Run.DEseq2, file.type = "image/png", pl.height = 4, pl.width = 4){
options(jupyter.plot_mimetypes = file.type, repr.plot.width = pl.width, repr.plot.height = pl.height);
#Plot a generic MA graph
plotMA( na.omit(output_Run.DEseq2[[1]]), alpha = 0.05 )
#Plot a more complex MA, with heatmap
ggplot( as.data.frame( na.omit(output_Run.DEseq2[[1]]) ), mapping = aes(x=baseMean, y=log2FoldChange) ) +
geom_point() +
scale_x_log10() +
scale_y_continuous() +
geom_pointdensity(adjust = 0.8, size = 1.5) +
scale_color_viridis()+
xlab("baseMean") +
ylab("log2FC") +
theme_classic() +
geom_hline(yintercept = 0, alpha=0.5, linetype="dashed", col = "red", size = 1)
}
#5. Heatmap function
LinearHeatmap = function( CountMatrix, nRows, FUN='mean', ... ) {
CountMatrix = CountMatrix;
cksz = floor(nrow(CountMatrix)/nRows);
myit = iter(CountMatrix, by='row', chunksize=cksz);
linMat = foreach( chunk = myit, .combine='rbind' ) %do% {
apply(chunk, 2, FUN, ...);
}
return( linMat[1:nRows,] );
}
#6 .Function: Takes in a matrix of counts and plots a heatmap
#Parameters:
# Signal.cts = matrix of counts (can use output from Count.reads)
# n.lines = number of lines to plot in the heatmap (the smaller the number, the lower the resolution)
# color = heatmap desireg max color
# Fig.type = figure type to write the plot to (can be png, svg, bit, etc)
# fig.width = figure width
# fig.height = figure height
# Fig.title = figure title
# data.scale = adjust outliers in data (all values > data.scale will be reset to data.scale)
# l.max = color limits for scale_fill_gradient2
#Output: Granges of PROseq reads within the given coordinates list, binned in bins of width equal to the binsize
Plot.heatmap = function(Signal.cts, n.lines = 100, color = "#FF0000", Fig.type = "image/png", fig.width = 4, fig.height = 3, Fig.title = "", data.scale = 0.1, l.max = 0.1){
#Figure characteristics
options(jupyter.plot_mimetypes = Fig.type, repr.plot.width = fig.width, repr.plot.height = fig.height );
theme_set(
theme_classic() +
theme(
plot.title = element_text(color="black", size=12, face="bold"),
axis.title.x = element_text(color="black", size=10, face="bold"),
axis.title.y = element_text(color="black", size=10, face="bold")
) +
theme(axis.title.y = element_text(margin = margin(t = 0, r = 30, b = 0, l = 0))) +
theme(axis.title.x = element_text(margin = margin(t = 30, r = 0, b = 0, l = 0))) +
theme(axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.x = element_text(face="bold", color="black", size=14),
axis.line = element_line(colour = "white")) +
theme(plot.title = element_text(hjust = 0.5))
);
#############################################################################################################
#Working with the data
#Remove rows with no counts
hmap = Signal.cts[rowMeans(Signal.cts)>0,];
#Row-normalize signals
hmap = hmap / apply(hmap, 1, max);
linmap = LinearHeatmap(hmap, n.lines);
#Set limits for the data
linmap[linmap == "NaN"] = 0;
linmap[linmap > data.scale] = data.scale;
rownames(linmap) = NULL;
df.0.BW <- reshape2::melt(linmap, varnames = c("TSS", "position"))
#gg_plot = ggplot(df.0.BW, aes(x = position, y = TSS, fill = log10(1+10*value))) +
gg_plot = ggplot(df.0.BW, aes(x = position, y = TSS, fill = log10(1+value))) +
stat_density2d() +
geom_tile() +
ggtitle(Fig.title) +
xlab("Distance from TSS (kb)") +
ylab(" ") +
scale_fill_gradient2(
low = muted("red"),
mid = "white",
high = muted("blue"),
#midpoint = 0.01,
#low="white", high = color,
na.value = "grey50", guide = "colourbar",
aesthetics = "fill", limits = c(0, l.max) )
return(gg_plot)
}
#7. Compute linear regression
#Define regions that background regions for the ChIP of interest (example for H3K27ac below).
# The rationale for this analysis follows the mathematical models presented in: Bonhoure et al., 2014
#("Quantifying ChIP-seq data: a spiking method providing an internal reference for sample-to-sample normalization")
## I. `Remove regions that may contain H3K27ac signal` :
To achieve this, I masked ed all coordinates corresponding to H3K27ac ENCODE broad peaks from hg19 (+ the adjacent regions)
bedtools complement -i wgEncodeBroadHistoneK562H3k27acStdPk.broadPeak.sorted -g ../hg19/hg19.sort.chromInfo > hg19.noENCODEpeaks.K27ac.bed
## II. `Split the untranscribed chromatin (hg19) into 5kb non-overlapping windows`:
*bedtools makewindows -b hg19.noENCODEpeaks.K27ac.bed -w 5000 > hg19.noENCODE.5kb.nonOverlspWin.bed
The output "hg19.noENCODE.5kb.nonOverlspWin.bed" can be found in:
../ChIPseq_Trp/NormUsing.ReadsIn.Over.ReadsOutsidePeaks_April2020/K27ac
<br>
- Output looks like:
chr1 0 5000
chr1 5000 10000
chr1 10000 11869 #--> Certain windows are < 5kb.
chr1 31109 34554 # --> To address this, I will import this file into R and remove all winsows < 5kb
chr1 36081 41081 # --> A total of 284216 windows were removed
chr1 41081 46081
chr1 46081 51081
chr1 51081 52473
chr1 54936 59936
chr1 59936 62948
<br>
The name of the file containing only untranscribed chromatin (w/o H3K27ac) binned in 5kb non-overlapping windows is: hg19.noENCODE.5kb.nonOverlspWin.fixed.bed
<br>
## III. `Count number of ChIP-seq reads falling in each 5kb window`:
I took H3K27ac ChIP-seq as example and overlapped each ChIPseq BAM with hg19.5kb.nonOverlspWin.bed, counting signal in each bin.
ex: bedtools coverage -a hg19.noENCODE.5kb.nonOverlspWin.fixed.bed -b K27ac.bam* > K27ac.coverage5kb.nonOverlapWin.bed
bedtools coverage -a hg19.noENCODE.5kb.nonOverlspWin.fixed.bed -b ./mergedMasked_K27ac_0h_br1.bam > K27ac_0h_br1.coverage5kb.noENCODE.bed &
<br>
<br>
- Output looks like:
[agc94@cbsudanko WCE]$ head K27ac_0h_br1.coverage5kb.noENCODE.bed
chr start end width strand depth nrBases_at_depth width.of.window proportion.Of.B.at.depth
chr1 0 5000 5001 * 0 0 5000 0.0000000
chr1 5000 10000 5001 * 0 0 5000 0.0000000
chr1 36081 41081 5001 * 0 0 5000 0.0000000
chr1 41081 46081 5001 * 0 0 5000 0.0000000
chr1 46081 51081 5001 * 0 0 5000 0.0000000
chr1 54936 59936 5001 * 12 456 5000 0.0912000
chr1 63887 68887 5001 * 12 407 5000 0.0814000
chr1 70008 75008 5001 * 8 252 5000 0.0504000,
where:
A feture = bed file of coordinates
B feature = BAM files
depth = nr reads in each window
nrBases_at_depth = nr bases in each window
width.of.window = width of each window
proportion.Of.B.at.depth = nrBases_at_depth/5000
<br>
<br>
# ***lot ChIPseq as a function of WCE signal in these background regions. Get linear regression coeficients to use for ChIPseq normalization.***
#8. Function: Count reads in binned windows (used for CUT&RUN)
#Parameters:
# mark.BAM = GRanges object, Rdata of BAM file (each entry contains start and end coordinates for each read in the BAM)
# regions = regions of interest to count reads within
# spike = spike-in/ scaling factor (total number of spike-ins in a given sample)
# binsize = integer, bin size to count signal within regions
# field = field corresponding to signal/score in PROseq GRanges (column in the PROseq GRanges object imported prom paths)
#Output: matrix of spike-in normalized reads within given regions
Count.reads.InBins = function(mark.BAM, regions, spike = 1, binsize = 1, field = "score"){
#Resize each read from BAM to 1bp (center position of each read)
mark.BAM.res = resize(mark.BAM, width = 1, fix = "center");
#Compute coverage
mark.BAM.res = GRanges(coverage(mark.BAM.res, weight="count"));
mark.BAM.res = makeGRangesBRG(mark.BAM.res[mark.BAM.res$score > 0]);
#Count DHS signal within each CTCF peak
mark.BAM.score = getCountsByPositions(mark.BAM.res, regions, binsize = binsize, field="score");
return(mark.BAM.score/spike)
}
#9. Function: Count and normalize reads in binned windows (used for ChIPseq)
#Parameters:
# ChIP.Rdata.path = path to the ChIPseq Rdata containing GRanges object of BAM file (each entry contains start and end coordinates for each read in the BAM)
# WCE.Rdata.path = path to the WCE Rdata containing GRanges object of BAM file (each entry contains start and end coordinates for each read in the BAM)
# coord.path = path to regions of interest to count reads within
# up = bases upstream from the TSS of coord.path
# down = bases downstream from the TSS of coord.path
# binsize = integer, bin size to count signal within regions
# field = field corresponding to signal/score in PROseq GRanges (column in the PROseq GRanges object imported prom paths)
# a,b = linear regression coeficients (obtained as decribed in 7.)
# spike = spike-in/ scaling factor (total number of spike-ins in a given sample)
#Output: matrix of normalized ChIPseq reads within given regions (coord.path)
Norm.ChIP = function(ChIP.Rdata.path, WCE.Rdata.path, coord.path, up = 2000, down = 2000, binsize = 100, field = "score",
a = 0.87326, b = 6.87326, spike = 3002){
#Read in coordinates of interest
promoters = read.table(coord.path, header = T);
#Convert them to GRanges
promoters = makeGRangesFromDataFrame(promoters.maxTSS);
#Resize them to 4kb: each entries consists of the base-start positions of each maxTSS
promoters = promoters(promoters, upstream=up, downstream=down);
#Load ChIPseq BAM file
load(ChIP.Rdata.path);
ChIP = reads;
rm(reads)
#Load Whole cell extract (WCE) BAM file
load(WCE.Rdata.path);
WCE = reads;
rm(reads)
#Size select the data for mono-nucleosome fragments
ChIP = ChIP[width(ChIP) >= 100 & width(ChIP) <= 200];
WCE = WCE[width(WCE) >= 100 & width(WCE) <= 200];
#Count reads within given regions on interest
ChIP.perBin = Count.reads.InBins(ChIP, promoters, spike = 1, binsize = binsize, field = field);
WCE.perBin = Count.reads.InBins(WCE, promoters, spike = 1, binsize = binsize, field = field);
#Calculate theotic value fo ChIP signal from the linear regressions computed using (Lnear.reg function)
ChIP.theoretic.perBin = (a * WCE.perBin) + b;
#Calculate residuals between the real number of reads and the theoretical one
res = ChIP.perBin - ChIP.theoretic.perBin;
#Replace all residuals < 0 with 0
res[res < 0] = 0;
#Subsample residual matrices to compute CI and apply the spike-in normalization
#(To normalize, divide by the total number of spike-ins in that given sample)
res.subs = metaSubsampleMatrix(res/spike);
return(res.subs)
}
#10. Function: Find ChIP-seq reads that fall within peaks
#Parameters:
# peaks.GRanges = GRanges object containing coordinated of ChIPseq peaks called with macs2
# *This function requires the file encoding peaks coordinated (peaks.GRanges) to include a unique identified column with the name of each peak
# ChIP.GRanges = GRanges object containing ChIPseq signal (signal needs to have already been adjusted for differences in background)
# spike = spike-in/ scaling factor (total number of spike-ins in a given sample)
# sumReads.colName = column name to append with the number of reads in each peak
#Output: matrix of normalized reads
GetSignal.inPeaks = function(peaks.GRanges, ChIP.GRanges, spike = 1, sumReads.colName="New.column"){
#Spike-in norm the BAM file
ChIP.GRanges$score = ChIP.GRanges$score * 1/spike;
hits = findOverlaps(ChIP.GRanges, peaks.GRanges);
ChIP.GRanges$transcript_id = "nothing";
ChIP.GRanges$transcript_id[hits@from] = as.character(peaks.GRanges$transcript_id[hits@to]);
#Get number of reads per peak
aggregated.peaks = aggregate(ChIP.GRanges$score ~ ChIP.GRanges$transcript_id, FUN="sum");
colnames(aggregated.peaks) = c("transcript_id", "score");
#Perform left-join to get the number of reads per peak
mrg.data2b <- merge(aggregated.peaks, as.data.frame(peaks.GRanges), by="transcript_id", all = TRUE);
#If there were no reads for certain genes/peaks, the merge function will fill score = NA
#So, I have replaced these NA values with 0
mrg.data2b[is.na(mrg.data2b$score), ]$score = 0;
#Change name of transcript_id column to sumReads.colName
names(mrg.data2b)[1]<-paste(sumReads.colName);
return(mrg.data2b)
}
|
10af9480f4a212ac9c045aadf0cb683db8b69de1
|
7f3f07533ba8d565b076a24a7fff3d4d05e3d417
|
/rankhospital.R
|
6da4a014f427716122e4c1d4b50f8d59538a1483
|
[] |
no_license
|
brazeiro63/ProgrammingAssignment3
|
bf2d07e91e656d7f1a609a6e3f5a639a34afe930
|
dd87558af90c75f0a3fb04b9a210876acf255183
|
refs/heads/master
| 2021-01-02T09:15:00.830019
| 2015-07-25T17:04:01
| 2015-07-25T17:04:01
| 39,693,732
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,735
|
r
|
rankhospital.R
|
rankhospital <- function(state, outcome, num = "best"){
## Read outcome data
outcomeData <- read.csv("outcome-of-care-measures.csv",
colClasses = "character")
## Clean parameter
outcomeVec <- strsplit(outcome, split = " ")[[1]]
outcomeCap <- paste(toupper(substring(outcomeVec, 1, 1)),
substring(outcomeVec, 2), sep = "", collapse = " ")
outcomeIn <- gsub(" ", ".", outcomeCap)
stateIn <- toupper(state)
## Check that state and outcome are valid
if (!any(unique(outcomeData$State==stateIn))){
stop("invalid state")
} else if (!any(grep(outcomeIn, colnames(outcomeData),
ignore.case = TRUE))){
stop("invalid outcome", call. = TRUE)
}
## Return hospital name in that state with the given rank
## 30-day death rate
colName <- paste("Hospital.30.Day.Death..Mortality..Rates.from.", outcomeIn, sep = "")
inState <- outcomeData$State==stateIn
hospitalList <- outcomeData[inState,c("Hospital.Name", colName)]
hospitalList[,colName] <- suppressWarnings(as.numeric(hospitalList[,colName]))
hospitalClean <- hospitalList[! is.na(hospitalList[,colName]),]
ranking = hospitalClean[order(hospitalClean[,2], hospitalClean[,1],
decreasing = c(FALSE, FALSE)),]
if (num=="best"){
ind <- 1
}else if (num=="worst"){
ind <- nrow(ranking)
}else{
ind <- num
}
return(ranking[ind, "Hospital.Name"])
}
|
c3ee75600a12138104c79f013a9d9170a1658dc3
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query55_query04_1344n/query55_query04_1344n.R
|
486ee3706eb37216d22fb93b75c3919c1fe8172c
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
query55_query04_1344n.R
|
7c0d7227c72298ccca274f5958638700 query55_query04_1344n.qdimacs 331 555
|
5140d4171f164008230dd2a99becf2a37741bc69
|
b5904ad0771f8ab6f49ad4af85f64241b51f8396
|
/code (2).r
|
e5a83580bb5760bc6c7de2e95aa4e5501f151a5c
|
[] |
no_license
|
jmbuecken/macroeconometrics
|
8b4ec76fbc7dd27000ae943dc971642e5a7cfcc0
|
3590c9bf3a21722ee8211aa33f95b819543cdfc3
|
refs/heads/main
| 2023-06-02T14:11:29.904925
| 2021-06-25T14:43:40
| 2021-06-25T14:43:40
| 373,765,742
| 0
| 2
| null | 2021-06-15T15:39:40
| 2021-06-04T07:59:16
|
R
|
UTF-8
|
R
| false
| false
| 1,226
|
r
|
code (2).r
|
### Macroeconomics Project ###
require(tidyverse)
rm(list = ls())
# read data
# Data: https://fred.stlouisfed.org/series/DCOILBRENTEU
# choose greatest timeframe!
oil.df <- read_csv("DCOILBRENTEU.csv")
colnames(oil.df) = c("Date", "Price")
oil.df$Price = as.numeric(oil.df$Price)
all(!is.na(oil.df)) # Table has NAs
dim(oil.df)
oil.df <- na.omit(oil.df)
all(!is.na(oil.df)) # Table has no NAs
dim(oil.df)
glimpse(oil.df)
# monthly averages instead of daily data
oil.df$Month <- months(oil.df$Date, abbreviate=T)
oil.df$Year <- format(oil.df$Date, format="%Y")
oil.monthly = aggregate( Price ~ Month + Year , oil.df , mean )
glimpse(oil.monthly)
oil.monthly$Date = paste(oil.monthly$Month, oil.monthly$Year, "1")
oil.monthly$Date = as.Date(oil.monthly$Date, format = "%b %Y %d")
oil.monthly = select(oil.monthly, "Price", "Date")
glimpse(oil.monthly)
oil.monthly = arrange(oil.monthly, Date)
attach(oil.monthly)
plot(Date, Price, type="l")
# First Order Differences
diff_val <- Price[2:length(Price)] - Price[1:length(Price)-1]
plot(Date[1:length(Date)-1], diff_val, type="h"); abline(h=0)
# whatever you were doing
plot(Price[1:length(Price)-1], Price[2:length(Price)],); abline(a=0,b=1)
detach(oil.monthly)
|
4fbde4bd939322515f31b1877ec5914d2ea417f9
|
ae3919b76ab9025a661fc1e2f323ba1950095375
|
/man/repYpost.Rd
|
9b6d91564ea3e6d7db782fa73016a24ed94361f1
|
[] |
no_license
|
cran/geoCount
|
2d9a86fe7ef2d29dcee2cf94a5a30f052d0972c5
|
86880c19678c46bb245eae3215126cf1f4199ab5
|
refs/heads/master
| 2021-01-20T05:52:39.771026
| 2015-01-20T00:00:00
| 2015-01-20T00:00:00
| 17,696,337
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 947
|
rd
|
repYpost.Rd
|
\name{repYpost}
\alias{repYpost}
\title{Generate Replicated Data with Posterior Samples of Latent Variables}
\usage{
repYpost(res.m, L, Y.family="Poisson")
}
\arguments{
\item{res.m}{a list with elements containing the posterior samples of latent variables and parameters for observed locations}
\item{L}{a vector of length n; it indicates the time duration during which the Poisson counts are accumulated, or the total number of trials for Binomial response}
\item{Y.family}{take the value of \code{"Poisson"} or \code{"Binomial"} which indicates Poisson or Binomial distribution for response variables}
}
\description{
This function generates replicated data sets based on posterior samples of latent variables.}
\value{
A matrix of replicated data sets.
}
\author{
Liang Jing \email{ljing918@gmail.com}
}
\examples{
\dontrun{
Yrep.post <- repYpost(res.m, L) }
}
\seealso{
\code{\link{repYeb}}
}
\keyword{Data Simulation}
|
e04327756cffdedb01c682d59bf613b7d2277f35
|
8e45eafc3ab0d1f65c0b811fd4f313598a9618c9
|
/R/data.R
|
064f816bb61962fe5a4c6678a11b4016b88bc874
|
[] |
no_license
|
SydneyBioX/CiteFuse
|
9e5c53a238ddb6265c6dd30038239392cd54c551
|
b6075864d8930967946022fdfbbf57a6a120061f
|
refs/heads/master
| 2023-02-16T07:41:24.133860
| 2023-02-07T04:19:48
| 2023-02-07T04:19:48
| 223,888,651
| 28
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,627
|
r
|
data.R
|
#' A subset of ECCITE-seq data (control)
#'
#' Data from Mimitou et al. ECCITE-seq PBMC control sample data, which is a list
#' of three matrices of RNA, ADT and HTO
#'
#' @usage data(CITEseq_example, package = 'CiteFuse')
#'
#' @references Mimitou, E. P., Cheng, A., Montalbano, A., et al. (2019).
#' Multiplexed detection of proteins, transcriptomes, clonotypes and
#' CRISPR perturbations in single cells.
#' Nature Methods, 16(5), 409–412.
#'
#' @source Gene Expression Omnibus with the accession code GSE126310.
"CITEseq_example"
#' A subset of Ligand Receptor Pairs
#'
#' @usage data(lr_pair_subset, package = 'CiteFuse')
"lr_pair_subset"
#' A SingleCellExperiment of ECCITE-seq data
#'
#' Data from Mimitou et al. ECCITE-seq PBMC CTCL sample data
#'
#'
#' @references Mimitou, E. P., Cheng, A., Montalbano, A., et al. (2019).
#' Multiplexed detection of proteins, transcriptomes, clonotypes and
#' CRISPR perturbations in single cells.
#' Nature Methods, 16(5), 409–412.
#'
#' @usage data(sce_ctcl_subset, package = 'CiteFuse')
#'
#' @source Gene Expression Omnibus with the accession code GSE126310.
"sce_ctcl_subset"
#' A SingleCellExperiment of ECCITE-seq data
#'
#' Data from Mimitou et al. ECCITE-seq PBMC Control sample data
#'
#' @usage data(sce_control_subset, package = 'CiteFuse')
#'
#' @references Mimitou, E. P., Cheng, A., Montalbano, A., et al. (2019).
#' Multiplexed detection of proteins, transcriptomes, clonotypes and
#' CRISPR perturbations in single cells.
#' Nature Methods, 16(5), 409–412.
#'
#' @source Gene Expression Omnibus with the accession code GSE126310.
"sce_control_subset"
|
e099e27289e08009904a0a05ae9ca90afc5309cc
|
b8edae4a4880310b701eddade612e88d13df9cee
|
/cachematrix.R
|
08378d78724b495931e6b72fa4268a6cec107298
|
[] |
no_license
|
Fluctuzz/ProgrammingAssignment2
|
f34c282e83ab7063f1963c498fd4d83b8a02ee6d
|
9be26460fc8b4e194167fa70da3357e73bb2923a
|
refs/heads/master
| 2020-05-01T19:52:23.481707
| 2019-03-25T20:50:52
| 2019-03-25T20:50:52
| 177,658,852
| 0
| 0
| null | 2019-03-25T20:24:47
| 2019-03-25T20:24:46
| null |
UTF-8
|
R
| false
| false
| 1,065
|
r
|
cachematrix.R
|
## The following functions can create a "special" matrix and to calculate the inverse
## of that "special" matrix. The inverse of the "special" matrix is cached.
## Creates a "special" matrix, which can cache its inverse. Returns a list of functions
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
set_inverse <- function(new_inverse) inverse <<- new_inverse
get_inverse <- function() inverse
list(set = set, get = get,
set_inverse = set_inverse,
get_inverse = get_inverse)
}
##Calculates and returns the inverse of a CacheMatrix.
##If the inverse of the matrix is already cached,it returns the cached value.
cacheSolve <- function(x, ...) {
inverse <- x$get_inverse()
if(!is.null(inverse)) {
message("getting cached inverse of matrix")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$set_inverse(inverse)
inverse
}
|
c09c09c1d22af21aa0a64490a6d015ec6160ec6a
|
8e20060c5475f00e9a513f76725bcf6e54f2068a
|
/man/delete_vertex_attr.Rd
|
b3ccb7652743d72eb5c84e7fb82b1cad915c1079
|
[] |
no_license
|
DavisVaughan/rigraph
|
8cc1b6c694ec03c1716d8b471d8f910e08c80751
|
a28ac7fe7b45323a38ffe1f13843bb83bdb4278f
|
refs/heads/master
| 2023-07-18T20:34:16.631540
| 2021-09-20T22:55:53
| 2021-09-20T22:55:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,191
|
rd
|
delete_vertex_attr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/attributes.R
\name{delete_vertex_attr}
\alias{delete_vertex_attr}
\alias{remove.vertex.attribute}
\title{Delete a vertex attribute}
\usage{
delete_vertex_attr(graph, name)
}
\arguments{
\item{graph}{The graph}
\item{name}{The name of the vertex attribute to delete.}
}
\value{
The graph, with the specified vertex attribute removed.
}
\description{
Delete a vertex attribute
}
\examples{
g <- make_ring(10) \%>\%
set_vertex_attr("name", value = LETTERS[1:10])
vertex_attr_names(g)
g2 <- delete_vertex_attr(g, "name")
vertex_attr_names(g2)
}
\seealso{
Other graph attributes:
\code{\link{delete_edge_attr}()},
\code{\link{delete_graph_attr}()},
\code{\link{edge_attr<-}()},
\code{\link{edge_attr_names}()},
\code{\link{edge_attr}()},
\code{\link{graph_attr<-}()},
\code{\link{graph_attr_names}()},
\code{\link{graph_attr}()},
\code{\link{igraph-dollar}},
\code{\link{igraph-vs-attributes}},
\code{\link{set_edge_attr}()},
\code{\link{set_graph_attr}()},
\code{\link{set_vertex_attr}()},
\code{\link{vertex_attr<-}()},
\code{\link{vertex_attr_names}()},
\code{\link{vertex_attr}()}
}
\concept{graph attributes}
|
4f3aabdfa475f3b5556bc19c70e2d7e5e633eb60
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.analytics/man/kinesisanalyticsv2_add_application_input_processing_configuration.Rd
|
64c5a58a49b6f7d10cd8de14220790c309ca53cb
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 2,357
|
rd
|
kinesisanalyticsv2_add_application_input_processing_configuration.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinesisanalyticsv2_operations.R
\name{kinesisanalyticsv2_add_application_input_processing_configuration}
\alias{kinesisanalyticsv2_add_application_input_processing_configuration}
\title{Adds an InputProcessingConfiguration to a SQL-based Kinesis Data
Analytics application}
\usage{
kinesisanalyticsv2_add_application_input_processing_configuration(
ApplicationName, CurrentApplicationVersionId, InputId,
InputProcessingConfiguration)
}
\arguments{
\item{ApplicationName}{[required] The name of the application to which you want to add the input
processing configuration.}
\item{CurrentApplicationVersionId}{[required] The version of the application to which you want to add the input
processing configuration. You can use the
\code{\link[=kinesisanalyticsv2_describe_application]{describe_application}}
operation to get the current application version. If the version
specified is not the current version, the
\code{ConcurrentModificationException} is returned.}
\item{InputId}{[required] The ID of the input configuration to add the input processing
configuration to. You can get a list of the input IDs for an application
using the
\code{\link[=kinesisanalyticsv2_describe_application]{describe_application}}
operation.}
\item{InputProcessingConfiguration}{[required] The InputProcessingConfiguration to add to the application.}
}
\value{
A list with the following syntax:\preformatted{list(
ApplicationARN = "string",
ApplicationVersionId = 123,
InputId = "string",
InputProcessingConfigurationDescription = list(
InputLambdaProcessorDescription = list(
ResourceARN = "string",
RoleARN = "string"
)
)
)
}
}
\description{
Adds an InputProcessingConfiguration to a SQL-based Kinesis Data
Analytics application. An input processor pre-processes records on the
input stream before the application's SQL code executes. Currently, the
only input processor available is \href{https://docs.aws.amazon.com/lambda/}{AWS Lambda}.
}
\section{Request syntax}{
\preformatted{svc$add_application_input_processing_configuration(
ApplicationName = "string",
CurrentApplicationVersionId = 123,
InputId = "string",
InputProcessingConfiguration = list(
InputLambdaProcessor = list(
ResourceARN = "string"
)
)
)
}
}
\keyword{internal}
|
df90ad2fac7f25b56c1cb8376b6e4076b1a34287
|
d90b947d2703d4b3b9e6f83d11be703ea02fef60
|
/man/missing_s3.Rd
|
f6c81ac006134cea3eda7f22aacfdb6c9a1130dd
|
[] |
no_license
|
andrie/devtools
|
b0276d804cb1f8f552743ba72ad46f6015610073
|
f297de4debe1b02cec281f6387be4a2c2f430d1c
|
refs/heads/master
| 2021-01-15T19:22:30.648803
| 2012-07-23T09:17:48
| 2012-07-23T09:17:48
| 2,048,076
| 10
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 329
|
rd
|
missing_s3.Rd
|
\name{missing_s3}
\alias{missing_s3}
\title{Find missing s3 exports.}
\usage{
missing_s3(pkg = NULL)
}
\arguments{
\item{pkg}{package description, can be path or package
name. See \code{\link{as.package}} for more information}
}
\description{
The method is heuristic - looking for objs with a period
in their name.
}
|
8453a9fd9ab2285c2c86c66942ed27c5ef1db633
|
62cfdb440c9f81b63514c9e545add414dc4d5f63
|
/R/qat_plot_block_distribution_1d.R
|
80d2592da9cec21e67cf0b201eae2e6f036e080b
|
[] |
no_license
|
cran/qat
|
7155052a40947f6e45ba216e8fd64a9da2926be4
|
92975a7e642997eac7b514210423eba2e099680c
|
refs/heads/master
| 2020-04-15T16:53:45.041112
| 2016-07-24T01:26:59
| 2016-07-24T01:26:59
| 17,698,828
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,239
|
r
|
qat_plot_block_distribution_1d.R
|
qat_plot_block_distribution_1d <-
function(resultlist, filename, blocksize=-1, measurement_name="", directoryname="", plotstyle=NULL) {
## functionality: plot statistical parameters of a blockwise scan of a measurement-vector
## author: André Düsterhus
## date: 23.02.2010
## version: A0.1
## input: resultlist from qat_analyse_block_distribution, directoryname, filename, plotstylelist
## output: plot
if (is.null(plotstyle)) {
# if no plotstyle available, use standard plotstyle
plotstyle<-qat_style_plot()
}
# library("gplots")
# set up savepath of the plot
path <- paste(directoryname,filename,"_1.png", sep="")
png(filename=path,width=800,height=600, pointsize=12, bg=plotstyle$basecolor)
par(font.lab=2, mfrow=c(2,2),oma=c(0,0,2,0))
if (length(resultlist$first_moment)!=0) {
if(sum(is.nan(resultlist$first_moment)) != length(resultlist$first_moment)) {
plot(resultlist$first_moment, col=plotstyle$plotcolorminor, main="1st moment", font=2, xlab="", ylab="", , type="p", col.lab=plotstyle$fontcolor, col.main=plotstyle$fontcolor, col.sub=plotstyle$fontcolor,fg=plotstyle$frontcolor, col.axis=plotstyle$fontcolor, pch=plotstyle$plotpointmain)
abline(h=mean(resultlist$first_moment),col=plotstyle$plotcolormain)
} else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
}
else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
if (length(resultlist$second_moment)!=0) {
if(sum(is.nan(resultlist$second_moment)) != length(resultlist$second_moment)) {
plot(resultlist$second_moment, col=plotstyle$plotcolorminor, main="2nd moment", font=2, xlab="", ylab="", type="p", col.lab=plotstyle$fontcolor, col.main=plotstyle$fontcolor, col.sub=plotstyle$fontcolor,fg=plotstyle$frontcolor, col.axis=plotstyle$fontcolor, pch=plotstyle$plotpointmain)
abline(h=mean(resultlist$second_moment),col=plotstyle$plotcolormain)
} else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
}
else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
if (length(resultlist$third_moment)!=0) {
if (sum(is.nan(resultlist$third_moment)) != length(resultlist$third_moment)) {
plot(resultlist$third_moment, col=plotstyle$plotcolorminor, main="3rd moment", font=2, xlab="", ylab="", type="p", col.lab=plotstyle$fontcolor, col.main=plotstyle$fontcolor, col.sub=plotstyle$fontcolor,fg=plotstyle$frontcolor, col.axis=plotstyle$fontcolor, pch=plotstyle$plotpointmain)
abline(h=mean(resultlist$third_moment),col=plotstyle$plotcolormain)
} else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
}
else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
if (length(resultlist$fourth_moment)!=0) {
if(sum(is.nan(resultlist$fourth_moment)) != length(resultlist$fourth_moment)) {
plot(resultlist$fourth_moment, col=plotstyle$plotcolorminor, main="4th moment", font=2, xlab="", ylab="", type="p", col.lab=plotstyle$fontcolor, col.main=plotstyle$fontcolor, col.sub=plotstyle$fontcolor,fg=plotstyle$frontcolor, col.axis=plotstyle$fontcolor,pch=plotstyle$plotpointmain)
abline(h=mean(resultlist$fourth_moment),col=plotstyle$plotcolormain)
} else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
}
else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
mtext("Distribution of a blockwise shift (1)", side=3, line=1 ,font=2 ,col=plotstyle$fontcolor, outer=TRUE)
if(blocksize != -1) {
bordertext2<-paste("Blocksize: ",blocksize, sep="")
mtext(bordertext2, side=3, line=0, font=2, col=plotstyle$fontcolor, outer=TRUE)
}
if(measurement_name != "") {
bordertext3<-paste("Data: ",measurement_name, sep="")
mtext(bordertext3, side=3, line=-1, font=2, col=plotstyle$fontcolor, outer=TRUE)
}
dev.off()
path <- paste(directoryname,filename,"_2.png", sep="")
png(filename=path,width=800,height=600, pointsize=12, bg=plotstyle$basecolor)
par(font.lab=2, mfrow=c(2,2),oma=c(0,0,2,0))
if (length(resultlist$standard_deviation)!=0) {
if(sum(is.nan(resultlist$standard_deviation)) != length(resultlist$standard_deviation)) {
plot(resultlist$standard_deviation, col=plotstyle$plotcolorminor, main="standard deviation", font=2, xlab="", ylab="", , type="p", col.lab=plotstyle$fontcolor, col.main=plotstyle$fontcolor, col.sub=plotstyle$fontcolor,fg=plotstyle$frontcolor, col.axis=plotstyle$fontcolor, pch=plotstyle$plotpointmain)
abline(h=mean(resultlist$standard_deviation),col=plotstyle$plotcolormain)
} else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
}
else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
if (length(resultlist$skewness)!=0) {
if(sum(is.nan(resultlist$skewness)) != length(resultlist$skewness)) {
plot(resultlist$skewness, col=plotstyle$plotcolorminor, main="skewness", font=2, xlab="", ylab="", type="p", col.lab=plotstyle$fontcolor, col.main=plotstyle$fontcolor, col.sub=plotstyle$fontcolor,fg=plotstyle$frontcolor, col.axis=plotstyle$fontcolor, pch=plotstyle$plotpointmain)
abline(h=mean(resultlist$skewness),col=plotstyle$plotcolormain)
} else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
}
else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
if (length(resultlist$kurtosis)!=0) {
if(sum(is.nan(resultlist$kurtosis)) != length(resultlist$kurtosis)) {
plot(resultlist$kurtosis, col=plotstyle$plotcolorminor, main="kurtosis", font=2, xlab="", ylab="", type="p", col.lab=plotstyle$fontcolor, col.main=plotstyle$fontcolor, col.sub=plotstyle$fontcolor,fg=plotstyle$frontcolor, col.axis=plotstyle$fontcolor, pch=plotstyle$plotpointmain)
abline(h=mean(resultlist$kurtosis),col=plotstyle$plotcolormain)
} else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
}
else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
if (length(resultlist$median)!=0) {
if(sum(is.nan(resultlist$median)) != length(resultlist$median)) {
plot(resultlist$median, col=plotstyle$plotcolorminor, main="median", font=2, xlab="", ylab="", type="p", col.lab=plotstyle$fontcolor, col.main=plotstyle$fontcolor, col.sub=plotstyle$fontcolor,fg=plotstyle$frontcolor, col.axis=plotstyle$fontcolor,pch=plotstyle$plotpointmain)
abline(h=mean(resultlist$median),col=plotstyle$plotcolormain)
} else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
}
else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
mtext("Distribution of a blockwise shift (2)", side=3, line=1 ,font=2 ,col=plotstyle$fontcolor, outer=TRUE)
if(blocksize != -1) {
bordertext2<-paste("Blocksize: ",blocksize, sep="")
mtext(bordertext2, side=3, line=0, font=2, col=plotstyle$fontcolor, outer=TRUE)
}
if(measurement_name != "") {
bordertext3<-paste("Data: ",measurement_name, sep="")
mtext(bordertext3, side=3, line=-1, font=2, col=plotstyle$fontcolor, outer=TRUE)
}
dev.off()
path <- paste(directoryname,filename,"_3.png", sep="")
png(filename=path,width=800,height=600, pointsize=12, bg=plotstyle$basecolor)
par(font.lab=2, mfrow=c(2,2),oma=c(0,0,2,0))
if (length(resultlist$p5_quantile)!=0) {
if(sum(is.nan(resultlist$p5_quantile)) != length(resultlist$p5_quantile)) {
plot(resultlist$p5_quantile, col=plotstyle$plotcolorminor, main="5% percentile", font=2, xlab="", ylab="", , type="p", col.lab=plotstyle$fontcolor, col.main=plotstyle$fontcolor, col.sub=plotstyle$fontcolor,fg=plotstyle$frontcolor, col.axis=plotstyle$fontcolor, pch=plotstyle$plotpointmain)
abline(h=mean(resultlist$p5_quantile),col=plotstyle$plotcolormain)
} else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
}
else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
if (length(resultlist$p95_quantile)!=0) {
if(sum(is.nan(resultlist$p95_quantile)) != length(resultlist$p95_quantile)) {
plot(resultlist$p95_quantile, col=plotstyle$plotcolorminor, main="95% percentile", font=2, xlab="", ylab="", type="p", col.lab=plotstyle$fontcolor, col.main=plotstyle$fontcolor, col.sub=plotstyle$fontcolor,fg=plotstyle$frontcolor, col.axis=plotstyle$fontcolor, pch=plotstyle$plotpointmain)
abline(h=mean(resultlist$p95_quantile),col=plotstyle$plotcolormain)
} else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
}
else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
if (length(resultlist$p25_quantile)!=0) {
if(sum(is.nan(resultlist$p25_quantile)) != length(resultlist$p25_quantile)) {
plot(resultlist$p25_quantile, col=plotstyle$plotcolorminor, main="25% percentile", font=2, xlab="", ylab="", type="p", col.lab=plotstyle$fontcolor, col.main=plotstyle$fontcolor, col.sub=plotstyle$fontcolor,fg=plotstyle$frontcolor, col.axis=plotstyle$fontcolor, pch=plotstyle$plotpointmain)
abline(h=mean(resultlist$p25_quantile),col=plotstyle$plotcolormain)
} else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
}
else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
if (length(resultlist$p75_quantile)!=0) {
if(sum(is.nan(resultlist$p75_quantile)) != length(resultlist$p75_quantile)) {
plot(resultlist$p75_quantile, col=plotstyle$plotcolorminor, main="75% percentile", font=2, xlab="", ylab="", type="p", col.lab=plotstyle$fontcolor, col.main=plotstyle$fontcolor, col.sub=plotstyle$fontcolor,fg=plotstyle$frontcolor, col.axis=plotstyle$fontcolor,pch=plotstyle$plotpointmain)
abline(h=mean(resultlist$p75_quantile),col=plotstyle$plotcolormain)
} else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
}
else {
textplot("no data", valign="top",col=plotstyle$fontcolor)
}
mtext("Distribution of a blockwise shift (3)", side=3, line=1 ,font=2 ,col=plotstyle$fontcolor, outer=TRUE)
if(blocksize != -1) {
bordertext2<-paste("Blocksize: ",blocksize, sep="")
mtext(bordertext2, side=3, line=0, font=2, col=plotstyle$fontcolor, outer=TRUE)
}
if(measurement_name != "") {
bordertext3<-paste("Data: ",measurement_name, sep="")
mtext(bordertext3, side=3, line=-1, font=2, col=plotstyle$fontcolor, outer=TRUE)
}
dev.off()
}
|
44de70740c6785655c620c8e740e07b8e84ab304
|
b4c1bc98a83dc8f4ad492911faca4c709c146288
|
/R/population_attributable_fraction.R
|
39720cdbca2fbb35fb165aa5d633f498e094aff0
|
[] |
no_license
|
danielgils/ITHIM-R
|
facd3e74e3b4cdea279245372e3f2ec3bcb35cc0
|
7e306f0aea3e6ea21521104206a0281e5882af13
|
refs/heads/master
| 2023-09-05T05:33:00.779268
| 2021-11-10T10:07:42
| 2021-11-10T10:07:42
| 277,875,917
| 0
| 0
| null | 2020-07-07T17:04:21
| 2020-07-07T17:04:20
| null |
UTF-8
|
R
| false
| false
| 389
|
r
|
population_attributable_fraction.R
|
#' Calculate population attributable fraction
#'
#'
#'
#' @param pop
#' @param cn
#' @param mat
#'
#' @return population attributable fractions by demographic group
#'
#' @export
population_attributable_fraction <- function(pop, cn, mat){
##!! hard coding of indices: 1=sex, 2=age or age_cat
paf <- apply(mat,1,function(x)sum(pop[[cn]][pop[[1]]==x[1]&pop[[2]]==x[2]]))
paf
}
|
6f07277b44bb3845015b4b94b1a7b317bfc0f0b7
|
93c8032caa615b4d59d9a6d5f48e415cfde02874
|
/Interface/cmdstan-2.16.0/data/data.data.R
|
4e826ad0733e6dd5d76aa2839acbdfa717908ea2
|
[
"BSD-3-Clause"
] |
permissive
|
paolariva2/PART_BayesPACS
|
1e13c49c22929bc08e05d523a24040617250bd79
|
c773a4cec41b4b798ebc3a7ff3ea87f5b23457da
|
refs/heads/master
| 2021-01-15T23:59:28.951925
| 2017-08-11T10:52:28
| 2017-08-11T10:52:28
| 99,946,790
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,395
|
r
|
data.data.R
|
N <- 66
Q <- 8
Y <-
c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0)
X <-
structure(c(0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0,
0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0,
0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1,
0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0,
0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0,
0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1,
0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1,
0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1,
0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0,
1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1,
0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1,
0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0,
1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0,
0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1,
0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1,
0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0),
.Dim = c(8, 66))
f <-
c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0.428571428571429, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0.333333333333333, 0, 0, 1, 0, 0, 0,
1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0)
L0 <-
structure(c(1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
1),
.Dim = c(8, 8))
L1 <- 100
m0 <-
c(0, 0, 0, 0, 0, 0, 0, 0)
m1 <- 0
|
4762a858aba0806d0e7a93cf8e8f94ab414380cf
|
13c395755f0ba62cd4d6abdc981c964d6fe559d2
|
/Python/Py_projects/project/R/전력량_지도데이터.R
|
a1f83f238352bb27d31f9629d98151ef875a6eef
|
[] |
no_license
|
db3124/bigdata_maestro
|
c18d081f6c5d4900747466185bcb1ffce1387bb3
|
6edcf7316c157056b2ec6d4ce0f20239e8d8f524
|
refs/heads/master
| 2022-11-28T11:05:13.483421
| 2020-08-13T09:30:59
| 2020-08-13T09:30:59
| 263,229,406
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,547
|
r
|
전력량_지도데이터.R
|
# 라이브러리 설치
install.packages('stringi')
install.packages('devtools')
devtools::install_github('cardiomoon/kormaps2014')
install.packages('mapproj')
install.packages('ggiraphExtra')
install.packages('ggthemes')
# 라이브러리 로드
library(kormaps2014)
library(dplyr)
library(ggplot2)
library(readxl)
library(mapproj)
library('ggiraphExtra')
library(ggthemes)
# 지도 만들기
str(changeCode(korpop2))
df_kormap2 <- kormaps2014::kormap2
df_kormap2$id <- as.double(df_kormap2$id)
df_kormap2 <- df_kormap2[df_kormap2$id < 25,]
df_kormap2$name <- iconv(df_kormap2$name, "UTF-8", "EUC-KR")
# 지도를 그리기 위한 파일 불러오기
# 위도 및 경도, 구별 전력량
df_sec <- read_excel("./res/위도_경도_코드.xlsx")
elec <- read_excel('./res/세대밀도_전력량_가정용.xlsx')
# 위도, 경도 코드 변수명 인코딩 방식 바꾸기
df_sec$name <- iconv(df_sec$name, "UTF-8", "EUC-KR")
# 데이터프레임 합치기
elec <- elec %>%
select(name, total)
df_sec2 <- left_join(df_sec, elec, by = 'name')
map <- ggChoropleth(data = df_sec2,
aes(fill = total,
map_id = code,
tooltip = name),
map = df_kormap2,
palette = 'GnBu',
color = 'black',
interactive = F)
map <- map +
theme_void()+
labs(fill="전력 사용량")+
theme(legend.title=element_text(size=20, face = "bold"))+
theme(legend.text=element_text(size=20, face = "bold"))
map
|
54cbd218053cc3d74f50812ce16ad65816e22472
|
2c71a5672f9dcdcd584cf467a9b5ca4e249ab72a
|
/man/condpowcure.Rd
|
f18b59b2a739ccbde71132c336ddcc16ed04723d
|
[] |
no_license
|
raredd/desmon
|
2eb3b8d2b2254dc689aba53b20a211f677f2fc93
|
1d92579b7f946e50cf9184b056ce9c4742a60f24
|
refs/heads/master
| 2022-06-21T10:16:28.224741
| 2022-05-26T00:09:41
| 2022-05-26T00:09:41
| 210,757,161
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,452
|
rd
|
condpowcure.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/condpowcure.R
\name{condpowcure}
\alias{condpowcure}
\title{Perform Simulation to Calculate Conditional Power of a Logrank Test}
\usage{
condpowcure(
time,
status,
rx,
nsamp = 500,
crit.val = 1.96,
control = 0,
control.cure,
control.rate,
test.cure,
test.rate,
inf.time = 1,
total.inf,
add.acc = 0,
add.acc.period,
p.con = 0.5
)
}
\arguments{
\item{time}{Failure/censoring times in the current data}
\item{status}{failure indicator for current data (1=failure, 0=censored)}
\item{rx}{treatment variable in current data}
\item{nsamp}{number of samples to generate in the simulation}
\item{crit.val}{critical values to be used at \emph{future} analyses}
\item{control}{the code of \code{rx} for the control group}
\item{control.cure}{the cure fraction in the control group}
\item{control.rate}{the failure rate in the conditional exponential
distribution for non-cured subjects in the control group}
\item{test.cure}{the cure fraction in the experimental treatment group}
\item{test.rate}{the failure rate in the conditional exponential
distribution for non-cured subjects in the experimental treatment group}
\item{inf.time}{information times of future analyses}
\item{total.inf}{Number of failures at full information}
\item{add.acc}{Additional number of cases that will be accrued}
\item{add.acc.period}{Duration of time over which the \code{add.acc} cases
will be accrued}
\item{p.con}{The proportion randomized to the control group}
}
\value{
Returns the proportion of samples where the upper boundary was
crossed, and its simulation standard error. Also prints the value of the
logrank statistic for the current data.
}
\description{
Performs a simulation to calculate conditional power of a logrank test in a
group sequential experiment with failure times generated from a cure rate
model.
}
\details{
Adds \code{add.acc} cases and generates additional follow-up for subjects
already entered but censored in the current data. Failure times are
generated from exponential cure rate models, with fractions
\code{control.cure} and \code{test.cure} that will never fail and
exponential distributions for the failure times of those who will eventually
fail.
Additional interim analyses are performed when \code{inf.time * total.inf}
failures are observed. The critical values used for rejecting
the null at these additional interim analyses are specified in
\code{crit.value}. The number of additional analyses is the length of
\code{inf.time} and \code{crit.value}. The information of the current data
should not be included. Full information (\code{inf.time = 1}) must be
explicitly included. Information times should be in increasing order.
After generating the additional data, the function performs the interim
analyses, and determines whether the upper boundary is crossed. This is
repeated \code{nsamp} times, giving an estimate of the probability of
eventually rejecting the null under the specified distributions, conditional
on the current data. Low conditional power under the target alternative for
the study might be justification for stopping early.
It is very important that consistent time units be used for all arguments
and for the current data.
}
\examples{
## current data
set.seed(3)
ft <- c(ifelse(runif(100) < 0.6, rexp(100), 100), ifelse(runif(100) < 0.55,
rexp(100) / 0.95, 100))
ct <- runif(200) * 4
fi <- ifelse(ct < ft, 0, 1)
ft <- pmin(ft, ct)
rx <- c(rep(0, 100), rep(1, 100))
## currently at 0.375 information -- assume no prior interim analyses
critv <- sequse(c(0.375, 0.7, 1))[-1]
condpowcure(ft, fi, rx, nsamp = 10, crit.val = critv, control.cure = 0.4,
control.rate = 1, test.cure = 0.6, test.rate = 0.75,
inf.time = c(0.7, 1), total.inf = 200, add.acc = 200, add.acc.period = 1)
\dontrun{
## use larger nsamp in practice:
condpowcure(ft, fi, rx, nsamp = 1000, crit.val = critv, control.cure = 0.4,
control.rate = 1, test.cure = 0.6, test.rate = 0.75,
inf.time = c(0.7, 1), total.inf = 200, add.acc = 200, add.acc.period = 1)
## Observed Z = 1.43
## [1] 0.958
}
}
\references{
Jennison and Turnbull (1990). \emph{Statistical Science} \strong{5}:299-317.
Betensky (1997). \emph{Biometrics}, \strong{53}:794-806.
}
\seealso{
\code{\link{condpow}}; \code{\link{sequse}}; \code{\link{acondpow}}
}
\keyword{design}
\keyword{survival}
|
7603109215378abdfbc74c01c7d4a9a45e9adff4
|
3eeb2c22f4c378914b6594823d976fddf7653965
|
/man/min_bic_2020_11_26_death.Rd
|
0c3bcaf40a1497c0147b296183b7fd420bc24205
|
[
"MIT"
] |
permissive
|
vjcitn/sars2death
|
943a262e71c9c5896d627fe54eaa3e052ee132bb
|
6ae6e9f04c8cf5f4515ab0b03fdf3908e17d54fe
|
refs/heads/main
| 2023-01-15T18:19:34.986340
| 2020-11-27T12:21:20
| 2020-11-27T12:21:20
| 316,250,492
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 404
|
rd
|
min_bic_2020_11_26_death.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{min_bic_2020_11_26_death}
\alias{min_bic_2020_11_26_death}
\title{a statewide collection of best AR/MA model orders, 11/26, death only}
\format{
S3 instance
}
\usage{
min_bic_2020_11_26_death
}
\description{
a statewide collection of best AR/MA model orders, 11/26, death only
}
\keyword{datasets}
|
06ca28eed7324cb44ab9d5a84da66c8e53242da6
|
dc284fe45eea59ade9e1a75095af6285be51af3c
|
/C/make_figure_simulation_sub.R
|
ed9cbb0a10a5cabc1e98def22ce4d60f7f8ea2f2
|
[] |
no_license
|
gui11aume/analytic_combinatorics_seeding
|
c6d47ecd4a7df4428e0d0a24a578bc276bb01004
|
f6dabd7cf074f7069bcf12bca996fe35530b19f0
|
refs/heads/master
| 2020-05-26T00:29:23.183443
| 2017-10-18T16:09:52
| 2017-10-18T16:09:52
| 84,981,428
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,343
|
r
|
make_figure_simulation_sub.R
|
f = function(z, p, d) 1 - ((1-p)*z)^(d+1)
g = function(z, p, d) 1 - p*z*(1-((1-p)*z)^d)/(1-(1-p)*z)
dg = function(z, p, d) (((d + 1)*p + (d*p^2 - d*p)*z)*(-(p - 1)*z)^d - p) / ((p^2 - 2*p + 1)*z^2 + 2*(p - 1)*z + 1)
Newton = function(p, d) {
# Set initial value close to solution.
oldz = 1
while (TRUE) {
newz = oldz - g(oldz, p, d)/ dg(oldz, p, d)
dz = newz - oldz
if (abs(dz) < 1e-15) { break }
if (newz < 1) {
# Backtrack.
while (oldz + dz < 1) { dz = dz/2 }
newz = oldz + dz
}
oldz = newz
}
return(newz)
}
bisect = function(p, d) {
lo = 1.0
hi = 1.2
while (TRUE) {
mid = (lo+hi) / 2
if (g(mid, p, d) < 0) {
hi = mid
}
else {
lo = mid
}
if (hi-lo < 1e-12) break
}
return ((hi+lo)/2)
}
cst_term = function(z,p,d) {
(1-(1-p)*z)^2 / p^2 / (1+d*((1-p)*z)^(d+1)-(d+1)*((1-p)*z)^d)
}
d = 17
k = 1:204
L = list()
for (p in c(0.08, 0.1, 0.12)) {
z = Newton(p,d)
C = cst_term(z,p,d)
L[[as.character(p)]] = 1-C/z^(k+2)
}
S = list(
read.table("out-.08.txt"),
read.table("out-.10.txt"),
read.table("out-.12.txt")
)
pdf("simulp.pdf", width=11, height=5.5, useDingbats=FALSE)
par(mfrow=c(1,2))
subs = seq(1,51,2)
plot(S[[1]][subs,1], 1-S[[1]][subs,2]/1e7, pch=19, cex=.5, ylim=c(0.5,1),
plot.first=grid(),
xlab="Read size", ylab="Seeding probability")
lines(k[1:154], L[[1]][1:154])
points(S[[2]][subs,1], 1-S[[2]][subs,2]/1e7, pch=19, cex=.5)
lines(k[1:154], L[[2]][1:154])
points(S[[3]][subs,1], 1-S[[3]][subs,2]/1e7, pch=19, cex=.5)
lines(k[1:154], L[[3]][1:154])
legend(x="bottomright", inset=0.05, legend=c("Simulation", "Estimate"),
pch=c(19, NA), lty=c(NA, 1), pt.cex=.6, bg="white", box.col=NA)
subs = seq(51,76)
plot(S[[1]][subs,1], 1-S[[1]][subs,2]/1e7, pch=19, cex=.5, ylim=c(0.9,1),
plot.first=grid(),
xlab="Read size", ylab="Seeding probability")
lines(k[146:204], L[[1]][146:204])
points(S[[2]][subs,1], 1-S[[2]][subs,2]/1e7, pch=19, cex=.5)
lines(k[146:204], L[[2]][146:204])
points(S[[3]][subs,1], 1-S[[3]][subs,2]/1e7, pch=19, cex=.5)
lines(k[146:204], L[[3]][146:204])
dev.off()
max(abs(1-S[[1]][,2]/1e7 - L[[1]][S[[1]][,1]]))
max(abs(1-S[[2]][,2]/1e7 - L[[2]][S[[2]][,1]]))
max(abs(1-S[[3]][,2]/1e7 - L[[3]][S[[3]][,1]]))
|
0b687702bddd98719a828ece716a0d0551a260e6
|
201a8a213e2993159f3ee1e2923af2d54b0546d2
|
/R/job_create.R
|
6350bcea5269442e2f9e557e771699c3b84d15f1
|
[
"MIT"
] |
permissive
|
djnavarro/workbch
|
b66bd43b39e3c42bd0eef0d089fa4ec9d3698cb7
|
6cc7a28e92a24acee1f61ad60c124f701108a96a
|
refs/heads/master
| 2020-06-12T23:38:07.160985
| 2020-04-23T02:24:58
| 2020-04-23T02:24:58
| 194,461,800
| 41
| 3
|
NOASSERTION
| 2019-08-09T04:47:23
| 2019-06-30T01:05:15
|
R
|
UTF-8
|
R
| false
| false
| 4,740
|
r
|
job_create.R
|
#' Create a new job
#'
#' @param jobname name of the job to create
#' @param description brief description of the job
#' @param status should be "active", "inactive", "complete", "abandoned", "masked"
#' @param owner should be a name or a nickname
#' @param priority numeric
#' @param tags a string containing comma separated list of tags
#' @param path path to the job home directory
#'
#' @details The role of the \code{job_create()} function is to create new workbch job.
#' It can be called in two ways, interactively or programmatically. To call the
#' function interactively, R must be in interactive mode and the function should
#' be called with no arguments specified. When called in this fashion the user
#' will be presented with a sequence of prompts, asking them to specify each
#' of the parameters that define a job (e.g., a character string for \code{jobname},
#' a number for \code{priority}). When used interactively, you do not need to include
#' quote marks when entering a string: \code{job_create()} will coerce the input to
#' the appropriate format, and then append the created job to the job file.
#'
#' When called programmatically, the user must specify the arguments in the
#' call to \code{job_create()}. The \code{jobname}, \code{description} and
#' \code{owner} arguments should be character strings of length 1, and all three
#' are mandatory. The \code{status} for a job should be one of the following
#' values: \code{"active"}, \code{"inactive"}, \code{"complete"}, \code{"abandoned"}
#' or \code{"masked"}. The \code{priority} for a job should be a positive integer:
#' the intent is that priority 1 is the highest priority, followed by priority 2,
#' and so one. The \code{tags} for a job can be specified as a single string, using
#' \code{|} as a separator character (e.g., \code{tags = "research | statistics"}
#' would create two tags for the job). Finally, the \code{path} should specify the
#' location of a folder containing the project files.
#'
#' For non-mandatory arguments, if the user does not specify a value, the
#' \code{job_create()} function applies the following defaults: \code{priority = 1},
#' \code{status = "active"}, \code{tags = character(0)} and \code{path = NA}`.
#'
#' Note that, although jobs can also be associated with URLs (e.g., link to a
#' GitHub repository or a document on Overleaf), the \code{job_create()} function
#' does not (at this time) allow you to specify URLs. These can be added using
#' \code{job_modify()}.
#'
#' @return Invisibly returns a list containing the parameters for the job
#' @export
job_create <- function(jobname = NULL, description = NULL, owner = NULL,
status = NULL, priority = NULL, tags = NULL,
path = NULL) {
use_prompt <- is.null(jobname) & is.null(description) & is.null(owner) &
is.null(status) & is.null(priority) & is.null(tags) & is.null(path) &
interactive()
if(use_prompt) {
cat("\nDetails of the new job:\n")
cat("(Press enter to skip or use default values)\n\n")
# elicit responses from user
jobname <- readline( " Job name............ ")
description <- readline( " Description......... ")
owner <- readline( " Owner............... ")
status <- readline( " Status.............. ")
priority <- as.numeric(readline( " Priority............ "))
tags <- readline( " Tags (comma separated) ........... ")
path <- readline( " Path................ ")
# treat no response as default value
if(jobname == "") jobname <- NULL
if(description == "") description <- NULL
if(owner == "") owner <- NULL
if(status == "") status <- NULL
if(is.na(priority)) priority <- NULL
if(length(tags) == 0) tags <- NULL
if(path == "") path <- NULL
}
if(is.null(jobname)) stop("'jobname' cannot be empty", call. = FALSE)
if(is.null(description)) stop("'description' cannot be empty", call. = FALSE)
if(is.null(owner)) stop("'owner' cannot be empty", call. = FALSE)
# read jobs file and check the names of the jobs
jobs <- job_read()
job_names <- pull_jobnames(jobs)
# make sure no job exists with this name
verify_jobname(jobname)
verify_jobmissing(jobname, jobs)
# split the tags if necessary
if(!is.null(tags)) {tags <- split_tags(tags)}
# append the new job
jb <- new_job(
jobname = jobname,
description = description,
owner = owner,
status = status,
priority = priority,
tags = tags,
path = path,
urls = empty_url()
)
jobs[[jobname]] <- jb
# write the file
job_write(jobs)
# invisibly returns the created job
return(invisible(jb))
}
|
045ec8a6e06e73bde73078c67ce57cd855560e43
|
99fd4277aa21e4a702c73b26a95a201489d9b415
|
/scripts/21_extractStations/hbm_extract_stations/hbm_extract_stationsI.R
|
29f03b696be62489ced929981568b6e6b7070708
|
[] |
no_license
|
neumannd/HBM_tools
|
774bc67afdaaa25281ed1e64b4a478ca9746d3c7
|
e287f2876fc3ae549c99263fb6f2e7aba70d5a8a
|
refs/heads/master
| 2020-03-10T14:41:52.339594
| 2018-06-19T18:00:18
| 2018-06-19T18:00:18
| 129,432,728
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,065
|
r
|
hbm_extract_stationsI.R
|
library('ncdf4')
source('support/hbm_constants.R')
source('support/read_varlist.R')
source('support/read_varmapping_L0L1.R')
source('support/generate_varmapping_L2L2.R')
source('support/createL1file.R')
source('support/createL2file.R')
source('support/get_basic_grid_data.R')
source('support/latlon2cell.R')
source('support/read_stations.R')
source('support/calculate_variable.R')
source('support/generate_filesnames.R')
source('support/open_files.R')
source('support/close_files.R')
source('support/fill_grid_data_base.R')
source('support/get_varmapping_L0Fi.R')
source('support/remap_variables_L0L1.R')
source('support/read_data_L0.R')
source('support/readL1file.R')
source('support/extract_tag_data.R')
readL0=TRUE
calcL1=TRUE
writeL1=TRUE
readL1=FALSE
calcL2=TRUE
writeL2=TRUE
doShip=TRUE
doRiver=TRUE
# WORK='B'
# WORK='I'
WORK='I'
saveData=TRUE
# savePrefix=''
# savePrefix='testing_'
# savePrefix='test_'
savePrefix='hlrn_'
# sea='northsea'
# sea='balticsea'
sea='all'
# load(paste0(savePrefix, 'dataL0_', sea, '_work', WORK, '.RData'))
# load(paste0(savePrefix, 'dataL1_', sea, '_work', WORK, '.RData'))
## load('')
## save('dataL0', 'dataL1', 'dataL2', 'grids', 'varnames', 'varmapping', 'varlist', 'stations', 'ncFileNames', file = paste0('data_work', WORK, '.RData'))
### save('dataL0', 'dataL1', 'dataL2', 'grids', 'varnames', 'varmapping', 'varlist', 'stations', 'ncFileNames', file = paste0('data_northsea_work', WORK, '.RData'))
### save('dataL0', 'dataL1', 'dataL2', 'grids', 'varnames', 'varmapping', 'varlist', 'stations', 'ncFileNames', file = paste0('data_balticsea_work', WORK, '.RData'))
### save('dataL0', 'dataL1', 'dataL2', 'grids', 'varnames', 'varmapping', 'varlist', 'stations', 'ncFileNames', file = paste0('data_', sea, '_work', WORK, '.RData'))
## save('dataL0', 'dataL1', 'dataL2', 'grids', 'varnames', 'varmapping', 'varlist', 'stations', 'ncFileNames', file = paste0(savePrefix, 'data_', sea, '_work', WORK, '.RData'))
# directories ----
varmappingFileName = paste0('../control_files/variablemapping_work', WORK,'.csv')
varlistFileName = '../control_files/variablelist_neumannd3.csv'
# stationsFileName = '/media/neumannd/work_dell/88_MeRamo/64_validator_data_preparation/test_stations.csv'
# stationsFileName = '../control_files/all_stations.csv'
# stationsFileName = '/media/neumannd/work_dell/88_MeRamo/65_validation_data_BSH/northsea_stations.csv'
stationsFileName = paste0('../control_files/',sea,'_stations.csv')
# outdir = paste0('../output/STATIONS_WORK', WORK)
outdir = paste0('/gfs1/work/mvkdneum/HBM/RESULTS/EVALUATION/stations_annual/WORK', WORK)
# inDir = '/silod4/dneumann/HBM/RESULTS/EVALUATION'
inDir = '/gfs1/work/mvkdneum/HBM/RESULTS/EVALUATION/grid_annual'
# get grid infos ----
if (readL0) {
grids = get_basic_grid_data()
}
# generate files names and open files ----
ncFiles = list()
filenamePrefix = list('ERGOM'='p', 'HBM'='p')
filenameSuffix = list('ERGOM'=paste0('WORK', WORK, '_merge_mean_2012.nc'),
'HBM'='WORKE_merge_mean_2012.nc')
fileTypes = list('ERGOM'=c('biodat', 'bendat', 'chldat', 'light', 'secchidat'),
'HBM' = c('t_file', 'z_file', 'c_file'))
ncFileNames = generate_filesnames(grids, fileTypes, filenamePrefix, filenameSuffix)
# read variable and station data ----
varmapping = list()
varnames = list()
varmapping = append(varmapping, get_varmapping_L0Fi(WORK))
varmapping = append(varmapping, read_varmapping_L0L1(varmappingFileName))
varlist = read_varlist(varlistFileName)
varnames$L0 = names(varmapping$L0Fi)
varnames$L1 = names(varmapping$L1L0)
varnames$L2 = names(varlist)
varmapping = append(varmapping, generate_varmapping_L2L2(varnames$L2))
# read L0 data ----
if (readL0) {
print('read data L0')
ncFiles$L0 = open_files(ncFileNames)
grids = fill_grid_data_base(grids, ncFiles$L0$biodat)
stations = read_stations(stationsFileName, grids)
dataL0 = read_data_L0(stations, ncFiles$L0, varmapping, grids)
}
if (saveData && readL0) save('dataL0', 'grids', 'varnames', 'varmapping', 'varlist', 'stations', 'ncFileNames', file = paste0(savePrefix, 'dataL0_', sea, '_work', WORK, '.RData'))
# remap L0 to L1 ----
if (calcL1) {
print('convert data L0 -> L1')
dataL1 = remap_variables_L0L1(names(stations), varnames, varmapping, dataL0)
}
if (saveData && calcL1) save('dataL1', 'grids', 'varnames', 'varmapping', 'varlist', 'stations', 'ncFileNames', file = paste0(savePrefix, 'dataL1_', sea, '_work', WORK, '.RData'))
# write L1 data ----
if (writeL1) {
print('write data L1')
ncFiles$L1 = createL1file(stations, varnames, varmapping, dataL1, outdir, grids)
}
# read L1 data ----
if (readL1) {
print('read data L1')
print('Empty')
}
# calculate L2 data ----
if (calcL2) {
print('convert data L1 -> L2')
dataL2 = list()
for(iVar in names(varlist)) {
dataL2[[iVar]] = list()
dataL2[[iVar]]$data = calculate_variable(varlist[[iVar]]$formula, dataL1)
}
}
if (saveData) save('dataL2', 'grids', 'varnames', 'varmapping', 'varlist', 'stations', 'ncFileNames', file = paste0(savePrefix, 'dataL2_', sea, '_work', WORK, '.RData'))
if (saveData) save('dataL0', 'dataL1', 'dataL2', 'grids', 'varnames', 'varmapping', 'varlist', 'stations', 'ncFileNames', file = paste0(savePrefix, 'data_', sea, '_work', WORK, '.RData'))
varnames$L2 = varnames$L2[!(varnames$L2=="secchi")]
varlist$secchi = NULL
# write L2 data ----
if (writeL2) {
print('write data L2')
ncFiles$L2 = createL2file(stations, varnames, varlist, varmapping, dataL2, outdir, grids)
}
# do ship data ----
if (doShip && (WORK=='B' || WORK=='I')) {
print('do ship: extract L1')
dataShipL1 = extract_tag_data(dataL1, 'ship', c('ship', 'river'), sep = '_')
print('do ship: write L1')
tmpVarnames = list('L1' = intersect(names(dataShipL1), varnames$L1))
ncFiles$L1 = createL1file(stations, tmpVarnames, varmapping, dataShipL1, outdir, grids, '_ship')
print('do ship: extract L2')
dataShipL2 = list()
for(iVar in names(varlist)) {
dataShipL2[[iVar]] = list()
dataShipL2[[iVar]]$data = calculate_variable(varlist[[iVar]]$formula, dataShipL1)
}
print('do ship: write L2')
ncFiles$L2 = createL2file(stations, varnames, varlist, varmapping, dataShipL2, outdir, grids, '_ship')
}
# do river data ----
if (doRiver && (WORK=='B' || WORK=='I')) {
print('do river: extract L1')
dataRiverL1 = extract_tag_data(dataL1, 'river', c('ship', 'river'), sep = '_')
print('do river: write L1')
tmpVarnames = list('L1' = intersect(names(dataRiverL1), varnames$L1))
ncFiles$L1 = createL1file(stations, tmpVarnames, varmapping, dataRiverL1, outdir, grids, '_river')
print('do river: extract L2')
dataRiverL2 = list()
for(iVar in names(varlist)) {
dataRiverL2[[iVar]] = list()
dataRiverL2[[iVar]]$data = calculate_variable(varlist[[iVar]]$formula, dataRiverL1)
}
print('do river: write L2')
ncFiles$L2 = createL2file(stations, varnames, varlist, varmapping, dataRiverL2, outdir, grids, '_river')
}
# close all netCDF files ----
# close_files(ncFiles)
|
c0a26ccb0e69c4030c63e29a2281beb8c400b02d
|
a59b0019cd455e5c8c59263d5248b388eb235257
|
/tests/testthat/test-family-utils.R
|
f1c9d75ea2ba475f867da0454d3005c81f48d522
|
[
"MIT"
] |
permissive
|
dill/gratia
|
4df529f5e636a0139f5c355b52a2924bebf7aca4
|
26c3ece0e6a6298ab002b02019b0ea482d21dace
|
refs/heads/master
| 2023-04-08T18:35:18.730888
| 2023-03-20T12:52:33
| 2023-03-20T12:52:33
| 160,169,115
| 0
| 0
|
NOASSERTION
| 2018-12-03T09:54:30
| 2018-12-03T09:54:30
| null |
UTF-8
|
R
| false
| false
| 46,276
|
r
|
test-family-utils.R
|
## Test family and link utilities
## load packages
library("testthat")
library("gratia")
library("mgcv")
library("gamm4")
val <- 1
l <- list(mer = 1:3, gam = 1:3)
test_that("link() works with a glm() model", {
f <- link(m_glm)
expect_type(f, "closure")
expect_identical(f, gaussian()$linkfun)
})
test_that("link() works with a gam() model", {
f <- link(m_gam)
expect_type(f, "closure")
expect_identical(f, gaussian()$linkfun)
})
test_that("link() works with a gamm() model", {
f <- link(m_gamm)
expect_type(f, "closure")
expect_identical(f, gaussian()$linkfun)
})
test_that("link() works with a gamm4() model", {
f <- link(m_gamm4)
expect_type(f, "closure")
expect_identical(f, gaussian()$linkfun)
})
test_that("link.list() fails with a list that isn't a gamm4", {
expect_error(link(l),
regexp = "`object` does not appear to a `gamm4` model object",
fixed = TRUE)
})
test_that("link() works with a bam() model", {
f <- link(m_bam)
expect_type(f, "closure")
expect_identical(f, gaussian()$linkfun)
})
test_that("link() works with a gam() gaulss model", {
f <- link(m_gaulss)
expect_type(f, "closure")
expect_identical(f, gaussian()$linkfun)
})
test_that("inv_link() works with a gam() model", {
f <- inv_link(m_gam)
expect_type(f, "closure")
expect_identical(f, gaussian()$linkinv)
})
test_that("inv_link() works with a glm() model", {
f <- inv_link(m_glm)
expect_type(f, "closure")
expect_identical(f, gaussian()$linkinv)
})
test_that("inv_link() works with a gamm() model", {
f <- inv_link(m_gamm)
expect_type(f, "closure")
expect_identical(f, gaussian()$linkinv)
})
test_that("inv_link() works with a gamm4() model", {
f <- inv_link(m_gamm4)
expect_type(f, "closure")
expect_identical(f, gaussian()$linkinv)
})
test_that("inv_link() works with a bam() model", {
f <- inv_link(m_bam)
expect_type(f, "closure")
expect_identical(f, gaussian()$linkinv)
})
test_that("inv_link.list() fails with a list that isn't a gamm4", {
expect_error(inv_link(l),
regexp = "`object` does not appear to a `gamm4` model object",
fixed = TRUE)
})
test_that("inv_link() works with a gam() gaulss model", {
f <- inv_link(m_gaulss)
expect_type(f, "closure")
expect_identical(f, gaussian()$linkinv)
})
## link
test_that("link() works for gaussian() family objects", {
f <- link(gaussian())
expect_type(f, "closure")
expect_identical(f(val), val)
expect_identical(f, gaussian()$linkfun)
})
test_that("link() works for poisson() family objects", {
f <- link(poisson())
expect_type(f, "closure")
expect_identical(f(val), log(val))
expect_identical(f, poisson()$linkfun)
})
test_that("link() works for binomial() family objects", {
f <- link(binomial())
expect_type(f, "closure")
expect_identical(f(val), binomial()$linkfun(val))
expect_identical(f, binomial()$linkfun)
})
test_that("link() works for Gamma() family objects", {
f <- link(Gamma())
expect_type(f, "closure")
expect_identical(f(val), Gamma()$linkfun(val))
expect_identical(f, Gamma()$linkfun)
})
test_that("link() works for inverse.gaussian() family objects", {
f <- link(inverse.gaussian())
expect_type(f, "closure")
expect_identical(f(val), inverse.gaussian()$linkfun(val))
expect_identical(f, inverse.gaussian()$linkfun)
})
test_that("link() works for quasi() family objects", {
f <- link(quasi())
expect_type(f, "closure")
expect_identical(f(val), quasi()$linkfun(val))
expect_identical(f, quasi()$linkfun)
})
test_that("link() works for quasibinomial() family objects", {
f <- link(quasibinomial())
expect_type(f, "closure")
expect_identical(f(val), quasibinomial()$linkfun(val))
expect_identical(f, quasibinomial()$linkfun)
})
test_that("link() works for quasipoisson() family objects", {
f <- link(quasipoisson())
expect_type(f, "closure")
expect_identical(f(val), quasipoisson()$linkfun(val))
expect_identical(f, quasipoisson()$linkfun)
})
test_that("link() works for negbin() family objects", {
theta <- 1.1
f <- link(negbin(theta = theta))
expect_type(f, "closure")
expect_identical(f(val), negbin(theta = theta)$linkfun(val))
expect_identical(f, negbin(theta = theta)$linkfun)
})
test_that("link() works for nb() family objects", {
f <- link(nb())
expect_type(f, "closure")
expect_identical(f(val), nb()$linkfun(val))
expect_identical(f, nb()$linkfun)
})
test_that("link() works for Tweedie() family objects", {
p <- 1.1
f <- link(Tweedie(p = p))
expect_type(f, "closure")
expect_identical(f(val), Tweedie(p = p)$linkfun(val))
expect_identical(f, Tweedie(p = p)$linkfun)
})
test_that("link() works for tw() family objects", {
f <- link(tw())
expect_type(f, "closure")
expect_identical(f(val), tw()$linkfun(val))
expect_identical(f, tw()$linkfun)
})
test_that("link() works for scat() family objects", {
f <- link(scat())
expect_type(f, "closure")
expect_identical(f(val), scat()$linkfun(val))
expect_identical(f, scat()$linkfun)
})
test_that("link() works for scat() family objects", {
f <- link(m_scat)
expect_type(f, "closure")
expect_identical(f(val), scat()$linkfun(val))
expect_identical(f, scat()$linkfun)
})
test_that("link() works for betar() family objects", {
f <- link(betar())
expect_type(f, "closure")
expect_identical(f(val), betar()$linkfun(val))
expect_identical(f, betar()$linkfun)
})
test_that("link() works for ocat() family objects", {
theta <- 1.1
f <- link(ocat(theta = theta))
expect_type(f, "closure")
expect_identical(f(val), ocat(theta = theta)$linkfun(val))
expect_identical(f, ocat(theta = theta)$linkfun)
})
test_that("link() works for ziP() family objects", {
f <- link(ziP())
expect_type(f, "closure")
expect_identical(f(val), ziP()$linkfun(val))
expect_identical(f, ziP()$linkfun)
})
test_that("link() works for cox.ph() family objects", {
f <- link(cox.ph())
expect_type(f, "closure")
expect_identical(f(val), cox.ph()$linkfun(val))
expect_identical(f, cox.ph()$linkfun)
})
test_that("link() works for cnorm() family objects", {
f <- link(cnorm())
expect_type(f, "closure")
expect_identical(f(val), val)
expect_identical(f, cnorm()$linkfun)
})
## inv_link
test_that("inv_link() works for gaussian() family objects", {
f <- inv_link(gaussian())
expect_type(f, "closure")
expect_identical(f(val), val)
expect_identical(f, gaussian()$linkinv)
})
test_that("inv_link() works for poisson() family objects", {
f <- inv_link(poisson())
expect_type(f, "closure")
expect_identical(f(val), exp(val))
expect_identical(f, poisson()$linkinv)
})
test_that("inv_link() works for binomial() family objects", {
f <- inv_link(binomial())
expect_type(f, "closure")
expect_identical(f(val), binomial()$linkinv(val))
expect_identical(f, binomial()$linkinv)
})
test_that("inv_link() works for Gamma() family objects", {
f <- inv_link(Gamma())
expect_type(f, "closure")
expect_identical(f(val), Gamma()$linkinv(val))
expect_identical(f, Gamma()$linkinv)
})
test_that("inv_link() works for inverse.gaussian() family objects", {
f <- inv_link(inverse.gaussian())
expect_type(f, "closure")
expect_identical(f(val), inverse.gaussian()$linkinv(val))
expect_identical(f, inverse.gaussian()$linkinv)
})
test_that("inv_link() works for quasi() family objects", {
f <- inv_link(quasi())
expect_type(f, "closure")
expect_identical(f(val), quasi()$linkinv(val))
expect_identical(f, quasi()$linkinv)
})
test_that("inv_link() works for quasibinomial() family objects", {
f <- inv_link(quasibinomial())
expect_type(f, "closure")
expect_identical(f(val), quasibinomial()$linkinv(val))
expect_identical(f, quasibinomial()$linkinv)
})
test_that("inv_link() works for quasipoisson() family objects", {
f <- inv_link(quasipoisson())
expect_type(f, "closure")
expect_identical(f(val), quasipoisson()$linkinv(val))
expect_identical(f, quasipoisson()$linkinv)
})
test_that("inv_link() works for negbin() family objects", {
theta <- 1.1
f <- inv_link(negbin(theta = theta))
expect_type(f, "closure")
expect_identical(f(val), negbin(theta = theta)$linkinv(val))
expect_identical(f, negbin(theta = theta)$linkinv)
})
test_that("inv_link() works for nb() family objects", {
f <- inv_link(nb())
expect_type(f, "closure")
expect_identical(f(val), nb()$linkinv(val))
expect_identical(f, nb()$linkinv)
})
test_that("inv_link() works for Tweedie() family objects", {
p <- 1.1
f <- inv_link(Tweedie(p = p))
expect_type(f, "closure")
expect_identical(f(val), Tweedie(p = p)$linkinv(val))
expect_identical(f, Tweedie(p = p)$linkinv)
})
test_that("inv_link() works for tw() family objects", {
f <- inv_link(tw())
expect_type(f, "closure")
expect_identical(f(val), tw()$linkinv(val))
expect_identical(f, tw()$linkinv)
})
test_that("inv_link() works for scat() family objects", {
f <- inv_link(scat())
expect_type(f, "closure")
expect_identical(f(val), scat()$linkinv(val))
expect_identical(f, scat()$linkinv)
})
test_that("inv_link() works for betar() family objects", {
f <- inv_link(betar())
expect_type(f, "closure")
expect_identical(f(val), betar()$linkinv(val))
expect_identical(f, betar()$linkinv)
})
test_that("inv_link() works for ocat() family objects", {
theta <- 1.1
f <- inv_link(ocat(theta = theta))
expect_type(f, "closure")
expect_identical(f(val), ocat(theta = theta)$linkinv(val))
expect_identical(f, ocat(theta = theta)$linkinv)
})
test_that("inv_link() works for ziP() family objects", {
f <- inv_link(ziP())
expect_type(f, "closure")
expect_identical(f(val), ziP()$linkinv(val))
expect_identical(f, ziP()$linkinv)
})
test_that("inv_link() works for cox.ph() family objects", {
f <- inv_link(cox.ph())
expect_type(f, "closure")
expect_identical(f(val), cox.ph()$linkinv(val))
expect_identical(f, cox.ph()$linkinv)
})
test_that("inv_link() works for cnorm() family objects", {
f <- inv_link(cnorm())
expect_type(f, "closure")
expect_identical(f(val), val)
expect_identical(f, cnorm()$linkinv)
})
test_that("extract_link() works on gaussian() family objects", {
## link
f <- extract_link(gaussian())
expect_type(f, "closure")
expect_identical(f(val), val)
expect_identical(f, gaussian()$linkfun)
## inverse
f <- extract_link(gaussian(), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), val)
expect_identical(f, gaussian()$linkinv)
})
test_that("extract_link() works on poisson() family objects", {
## link
f <- extract_link(poisson())
expect_type(f, "closure")
expect_identical(f(val), log(val))
expect_identical(f, poisson()$linkfun)
## inverse
f <- extract_link(poisson(), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), exp(val))
expect_identical(f, poisson()$linkinv)
})
test_that("extract_link() works on binomial() family objects", {
## link
f <- extract_link(binomial())
expect_type(f, "closure")
expect_identical(f(val), binomial()$linkfun(val))
expect_identical(f, binomial()$linkfun)
## inverse
f <- extract_link(binomial(), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), binomial()$linkinv(val))
expect_identical(f, binomial()$linkinv)
})
test_that("extract_link() works on Gamma() family objects", {
## link
f <- extract_link(Gamma())
expect_type(f, "closure")
expect_identical(f(val), Gamma()$linkfun(val))
expect_identical(f, Gamma()$linkfun)
## inverse
f <- extract_link(Gamma(), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), Gamma()$linkinv(val))
expect_identical(f, Gamma()$linkinv)
})
test_that("extract_link() works on inverse.gaussian() family objects", {
## link
f <- extract_link(inverse.gaussian())
expect_type(f, "closure")
expect_identical(f(val), inverse.gaussian()$linkfun(val))
expect_identical(f, inverse.gaussian()$linkfun)
## inverse
f <- extract_link(inverse.gaussian(), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), inverse.gaussian()$linkinv(val))
expect_identical(f, inverse.gaussian()$linkinv)
})
test_that("extract_link() works on quasi() family objects", {
## link
f <- extract_link(quasi())
expect_type(f, "closure")
expect_identical(f(val), quasi()$linkfun(val))
expect_identical(f, quasi()$linkfun)
## inverse
f <- extract_link(quasi(), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), quasi()$linkinv(val))
expect_identical(f, quasi()$linkinv)
})
test_that("extract_link() works on quasibinomial() family objects", {
## link
f <- extract_link(quasibinomial())
expect_type(f, "closure")
expect_identical(f(val), quasibinomial()$linkfun(val))
expect_identical(f, quasibinomial()$linkfun)
## inverse
f <- extract_link(quasibinomial(), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), quasibinomial()$linkinv(val))
expect_identical(f, quasibinomial()$linkinv)
})
test_that("extract_link() works on quasipoisson() family objects", {
## link
f <- extract_link(quasipoisson())
expect_type(f, "closure")
expect_identical(f(val), quasipoisson()$linkfun(val))
expect_identical(f, quasipoisson()$linkfun)
## inverse
f <- extract_link(quasipoisson(), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), quasipoisson()$linkinv(val))
expect_identical(f, quasipoisson()$linkinv)
})
test_that("extract_link() works on negbin() family objects", {
## link
theta = 1.1
f <- extract_link(negbin(theta = theta))
expect_type(f, "closure")
expect_identical(f(val), negbin(theta = theta)$linkfun(val))
expect_identical(f, negbin(theta = theta)$linkfun)
## inverse
f <- extract_link(negbin(theta = theta), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), negbin(theta = theta)$linkinv(val))
expect_identical(f, negbin(theta = theta)$linkinv)
})
test_that("extract_link() works on nb() family objects", {
## link
f <- extract_link(nb())
expect_type(f, "closure")
expect_identical(f(val), nb()$linkfun(val))
expect_identical(f, nb()$linkfun)
## inverse
f <- extract_link(nb(), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), nb()$linkinv(val))
expect_identical(f, nb()$linkinv)
})
test_that("extract_link() works on Tweedie() family objects", {
## link
p = 1.1
f <- extract_link(Tweedie(p = p))
expect_type(f, "closure")
expect_identical(f(val), Tweedie(p = p)$linkfun(val))
expect_identical(f, Tweedie(p = p)$linkfun)
## inverse
f <- extract_link(Tweedie(p = p), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), Tweedie(p = p)$linkinv(val))
expect_identical(f, Tweedie(p = p)$linkinv)
})
test_that("extract_link() works on tw() family objects", {
## link
f <- extract_link(tw())
expect_type(f, "closure")
expect_identical(f(val), tw()$linkfun(val))
expect_identical(f, tw()$linkfun)
## inverse
f <- extract_link(tw(), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), tw()$linkinv(val))
expect_identical(f, tw()$linkinv)
})
test_that("extract_link() works on scat() family objects", {
## link
f <- extract_link(scat())
expect_type(f, "closure")
expect_identical(f(val), scat()$linkfun(val))
expect_identical(f, scat()$linkfun)
## inverse
f <- extract_link(scat(), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), scat()$linkinv(val))
expect_identical(f, scat()$linkinv)
})
test_that("extract_link() works on betar() family objects", {
## link
f <- extract_link(betar())
expect_type(f, "closure")
expect_identical(f(val), betar()$linkfun(val))
expect_identical(f, betar()$linkfun)
## inverse
f <- extract_link(betar(), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), betar()$linkinv(val))
expect_identical(f, betar()$linkinv)
})
test_that("extract_link() works on ziP() family objects", {
## link
f <- extract_link(ziP())
expect_type(f, "closure")
expect_identical(f(val), ziP()$linkfun(val))
expect_identical(f, ziP()$linkfun)
## inverse
f <- extract_link(ziP(), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), ziP()$linkinv(val))
expect_identical(f, ziP()$linkinv)
})
test_that("extract_link() works on ocat() family objects", {
theta <- 1.1
## link
f <- extract_link(ocat(theta = theta))
expect_type(f, "closure")
expect_identical(f(val), ocat(theta = theta)$linkfun(val))
expect_identical(f, ocat(theta = theta)$linkfun)
## inverse
f <- extract_link(ocat(theta = theta), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), ocat(theta = theta)$linkinv(val))
expect_identical(f, ocat(theta = theta)$linkinv)
})
test_that("extract_link() works on cox.ph() family objects", {
## link
f <- extract_link(cox.ph())
expect_type(f, "closure")
expect_identical(f(val), cox.ph()$linkfun(val))
expect_identical(f, cox.ph()$linkfun)
## inverse
f <- extract_link(cox.ph(), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), cox.ph()$linkinv(val))
expect_identical(f, cox.ph()$linkinv)
})
test_that("extract_link() works on gaulss() family objects", {
fam <- gaulss()
## location parameter
## link
f <- extract_link(fam, parameter = "location")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
f <- extract_link(fam, parameter = "mu")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "location", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkinv(val))
expect_identical(f, fam$linfo[[1L]]$linkinv)
f <- extract_link(fam, parameter = "mu", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkinv(val))
expect_identical(f, fam$linfo[[1L]]$linkinv)
## scale parameter
## link
f <- extract_link(fam, parameter = "scale")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkfun(val))
expect_identical(f, fam$linfo[[2L]]$linkfun)
f <- extract_link(fam, parameter = "sigma")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkfun(val))
expect_identical(f, fam$linfo[[2L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "scale", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
f <- extract_link(fam, parameter = "sigma", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
})
test_that("extract_link() works on gammals() family objects", {
fam <- gammals()
## location parameter
## link
f <- extract_link(fam, parameter = "location")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
f <- extract_link(fam, parameter = "mu")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "location", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkinv(val))
expect_identical(f, fam$linfo[[1L]]$linkinv)
f <- extract_link(fam, parameter = "mu", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkinv(val))
expect_identical(f, fam$linfo[[1L]]$linkinv)
## scale parameter
## link
f <- extract_link(fam, parameter = "scale")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkfun(val))
expect_identical(f, fam$linfo[[2L]]$linkfun)
f <- extract_link(fam, parameter = "theta")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkfun(val))
expect_identical(f, fam$linfo[[2L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "scale", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
f <- extract_link(fam, parameter = "theta", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
})
test_that("extract_link() works on gumbls() family objects", {
fam <- gumbls()
## location parameter
## link
f <- extract_link(fam, parameter = "location")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
f <- extract_link(fam, parameter = "mu")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "location", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkinv(val))
expect_identical(f, fam$linfo[[1L]]$linkinv)
f <- extract_link(fam, parameter = "mu", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkinv(val))
expect_identical(f, fam$linfo[[1L]]$linkinv)
## scale parameter
## link
f <- extract_link(fam, parameter = "scale")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkfun(val))
expect_identical(f, fam$linfo[[2L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "scale", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
})
test_that("extract_link() works on twlss() family objects", {
fam <- twlss()
## location parameter
## link
f <- extract_link(fam, parameter = "location")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
f <- extract_link(fam, parameter = "mu")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "location", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkinv(val))
expect_identical(f, fam$linfo[[1L]]$linkinv)
f <- extract_link(fam, parameter = "mu", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkinv(val))
expect_identical(f, fam$linfo[[1L]]$linkinv)
## scale parameter
## link
f <- extract_link(fam, parameter = "scale")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkfun(val))
expect_identical(f, fam$linfo[[2L]]$linkfun)
f <- extract_link(fam, parameter = "sigma")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkfun(val))
expect_identical(f, fam$linfo[[2L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "scale", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
f <- extract_link(fam, parameter = "sigma", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
## power parameter
## link
f <- extract_link(fam, parameter = "power")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[3L]]$linkfun(val))
expect_identical(f, fam$linfo[[3L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "power", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[3L]]$linkinv(val))
expect_identical(f, fam$linfo[[3L]]$linkinv)
})
test_that("extract_link() works on gevlss() family objects", {
fam <- gevlss()
## location parameter
## link
f <- extract_link(fam, parameter = "location")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
f <- extract_link(fam, parameter = "mu")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "location", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkinv(val))
expect_identical(f, fam$linfo[[1L]]$linkinv)
f <- extract_link(fam, parameter = "mu", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkinv(val))
expect_identical(f, fam$linfo[[1L]]$linkinv)
## scale parameter
## link
f <- extract_link(fam, parameter = "scale")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkfun(val))
expect_identical(f, fam$linfo[[2L]]$linkfun)
f <- extract_link(fam, parameter = "sigma")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkfun(val))
expect_identical(f, fam$linfo[[2L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "scale", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
f <- extract_link(fam, parameter = "sigma", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
## shape parameter, also xi
## link
xi_val <- 0.5 # must be in range 0-1
f <- extract_link(fam, parameter = "shape")
expect_type(f, "closure")
expect_identical(f(xi_val), fam$linfo[[3L]]$linkfun(xi_val))
expect_identical(f, fam$linfo[[3L]]$linkfun)
f <- extract_link(fam, parameter = "xi")
expect_type(f, "closure")
expect_identical(f(xi_val), fam$linfo[[3L]]$linkfun(xi_val))
expect_identical(f, fam$linfo[[3L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "shape", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(xi_val), fam$linfo[[3L]]$linkinv(xi_val))
expect_identical(f, fam$linfo[[3L]]$linkinv)
f <- extract_link(fam, parameter = "xi", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(xi_val), fam$linfo[[3L]]$linkinv(xi_val))
expect_identical(f, fam$linfo[[3L]]$linkinv)
})
test_that("extract_link() works on ziplss() family objects", {
fam <- ziplss()
## location parameter
## link
f <- extract_link(fam, parameter = "location")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
f <- extract_link(fam, parameter = "mu")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "location", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkinv(val))
expect_identical(f, fam$linfo[[1L]]$linkinv)
f <- extract_link(fam, parameter = "mu", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkinv(val))
expect_identical(f, fam$linfo[[1L]]$linkinv)
## scale parameter - really the zero-inflation bit
## link
f <- extract_link(fam, parameter = "scale")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkfun(val))
expect_identical(f, fam$linfo[[2L]]$linkfun)
f <- extract_link(fam, parameter = "pi")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkfun(val))
expect_identical(f, fam$linfo[[2L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "scale", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
f <- extract_link(fam, parameter = "pi", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
})
test_that("extract_link() works on mvn() family objects", {
fam <- mvn(d = 2)
## location parameter
## link
f <- extract_link(fam, parameter = "location", which_eta = 1L)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
f <- extract_link(fam, parameter = "mu", which_eta = 1L)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
## error if no `which_eta`
expect_error(extract_link(fam, parameter = "mu"),
"Which linear predictor not specified; see 'which_eta'",
fixed = TRUE)
## inverse
f <- extract_link(fam, parameter = "location", inverse = TRUE,
which_eta = 2L)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
f <- extract_link(fam, parameter = "mu", inverse = TRUE,
which_eta = 2L)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
})
test_that("extract_link() works on multinom() family objects", {
fam <- multinom(K = 2)
## location parameter
## link
f <- extract_link(fam, parameter = "location", which_eta = 1L)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
f <- extract_link(fam, parameter = "mu", which_eta = 1L)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
## error if no `which_eta`
expect_error(extract_link(fam, parameter = "mu"),
"Which linear predictor not specified; see 'which_eta'",
fixed = TRUE)
## inverse
f <- extract_link(fam, parameter = "location", inverse = TRUE,
which_eta = 2L)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
f <- extract_link(fam, parameter = "mu", inverse = TRUE,
which_eta = 2L)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
})
test_that("extract_link() works on shash() family objects", {
fam <- shash()
## location parameter
## link
f <- extract_link(fam, parameter = "location")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
f <- extract_link(fam, parameter = "mu")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "location", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkinv(val))
expect_identical(f, fam$linfo[[1L]]$linkinv)
f <- extract_link(fam, parameter = "mu", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkinv(val))
expect_identical(f, fam$linfo[[1L]]$linkinv)
## scale parameter
## link
f <- extract_link(fam, parameter = "scale")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkfun(val))
expect_identical(f, fam$linfo[[2L]]$linkfun)
f <- extract_link(fam, parameter = "sigma")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkfun(val))
expect_identical(f, fam$linfo[[2L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "scale", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
f <- extract_link(fam, parameter = "sigma", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[2L]]$linkinv(val))
expect_identical(f, fam$linfo[[2L]]$linkinv)
## skewness parameter
## link
f <- extract_link(fam, parameter = "skewness")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[3L]]$linkfun(val))
expect_identical(f, fam$linfo[[3L]]$linkfun)
f <- extract_link(fam, parameter = "epsilon")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[3L]]$linkfun(val))
expect_identical(f, fam$linfo[[3L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "skewness", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[3L]]$linkinv(val))
expect_identical(f, fam$linfo[[3L]]$linkinv)
f <- extract_link(fam, parameter = "epsilon", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[3L]]$linkinv(val))
expect_identical(f, fam$linfo[[3L]]$linkinv)
## skewness parameter
## link
f <- extract_link(fam, parameter = "kurtosis")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[4L]]$linkfun(val))
expect_identical(f, fam$linfo[[4L]]$linkfun)
f <- extract_link(fam, parameter = "delta")
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[4L]]$linkfun(val))
expect_identical(f, fam$linfo[[4L]]$linkfun)
## inverse
f <- extract_link(fam, parameter = "kurtosis", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[4L]]$linkinv(val))
expect_identical(f, fam$linfo[[4L]]$linkinv)
f <- extract_link(fam, parameter = "delta", inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[4L]]$linkinv(val))
expect_identical(f, fam$linfo[[4L]]$linkinv)
})
test_that("extract_link() works on cnorm() family objects", {
## link
f <- extract_link(cnorm())
expect_type(f, "closure")
expect_identical(f(val), val)
expect_identical(f, cnorm()$linkfun)
## inverse
f <- extract_link(cnorm(), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), val)
expect_identical(f, cnorm()$linkinv)
})
## tests some specific extract functions
test_that("twlss_link() can extract a link function", {
fam <- twlss()
expect_silent(f <- twlss_link(fam, parameter = "mu"))
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
})
## tests some specific extract functions
test_that("gevlss_link() can extract a link function", {
fam <- gevlss()
expect_silent(f <- gevlss_link(fam, parameter = "mu"))
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
})
## tests some specific extract functions
test_that("gumbls_link() can extract a link function", {
fam <- gumbls()
expect_silent(f <- gumbls_link(fam, parameter = "mu"))
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
})
## tests some specific extract functions
test_that("gammals_link() can extract a link function", {
fam <- gammals()
expect_silent(f <- gammals_link(fam, parameter = "mu"))
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
})
## tests some specific extract functions
test_that("ziplss_link() can extract a link function", {
fam <- ziplss()
expect_silent(f <- ziplss_link(fam, parameter = "mu"))
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
})
## tests some specific extract functions
test_that("mvn_link() can extract a link function", {
fam <- mvn()
expect_silent(f <- mvn_link(fam, parameter = "location",
which_eta = 1))
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
})
## tests some specific extract functions
test_that("multinom_link() can extract a link function", {
fam <- multinom()
expect_silent(f <- multinom_link(fam, parameter = "location",
which_eta = 1))
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
})
## tests some specific extract functions
test_that("shash_link() can extract a link function", {
fam <- shash()
expect_silent(f <- shash_link(fam, parameter = "mu"))
expect_type(f, "closure")
expect_identical(f(val), fam$linfo[[1L]]$linkfun(val))
expect_identical(f, fam$linfo[[1L]]$linkfun)
})
## test internal link functions fail gracefully
test_that("gaussian_link() fails gracefully", {
expect_error(gaussian_link(1), "'family' is not a family object")
expect_error(gaussian_link(nb()), "'family' is not of type '\"gaussian\"'")
})
## test internal link functions fail gracefully
test_that("poisson_link() fails gracefully", {
expect_error(poisson_link(1), "'family' is not a family object")
expect_error(poisson_link(nb()), "'family' is not of type '\"poisson\"'")
})
## test internal link functions fail gracefully
test_that("binomial_link() fails gracefully", {
expect_error(binomial_link(1), "'family' is not a family object")
expect_error(binomial_link(nb()), "'family' is not of type '\"binomial\"'")
})
## test internal link functions fail gracefully
test_that("gamma_link() fails gracefully", {
expect_error(gamma_link(1), "'family' is not a family object")
expect_error(gamma_link(nb()), "'family' is not of type '\"Gamma\"'")
})
## test internal link functions fail gracefully
test_that("inverse_gaussian_link() fails gracefully", {
expect_error(inverse_gaussian_link(1), "'family' is not a family object")
expect_error(inverse_gaussian_link(nb()), "'family' is not of type '\"inverse.gaussian\"'")
})
## test internal link functions fail gracefully
test_that("quasi_link() fails gracefully", {
expect_error(quasi_link(1), "'family' is not a family object")
expect_error(quasi_link(nb()), "'family' is not of type '\"quasi\"'")
})
## test internal link functions fail gracefully
test_that("quasi_poisson_link() fails gracefully", {
expect_error(quasi_poisson_link(1), "'family' is not a family object")
expect_error(quasi_poisson_link(nb()), "'family' is not of type '\"quasipoisson\"'")
})
## test internal link functions fail gracefully
test_that("quasi_binomial_link() fails gracefully", {
expect_error(quasi_binomial_link(1), "'family' is not a family object")
expect_error(quasi_binomial_link(nb()), "'family' is not of type '\"quasibinomial\"'")
})
## test internal link functions fail gracefully
test_that("nb_link() fails gracefully", {
expect_error(nb_link(1), "'family' is not a family object")
expect_error(nb_link(tw()), "'family' is not of type '\"Negative Binomial\"'")
})
## test internal link functions fail gracefully
test_that("tw_link() fails gracefully", {
expect_error(tw_link(1), "'family' is not a family object")
expect_error(tw_link(nb()), "'family' is not of type '\"Tweedie\"'")
})
## test internal link functions fail gracefully
test_that("beta_link() fails gracefully", {
expect_error(beta_link(1), "'family' is not a family object")
expect_error(beta_link(nb()), "'family' is not of type '\"Beta regression\"'")
})
## test internal link functions fail gracefully
test_that("scaled_t_link() fails gracefully", {
expect_error(scaled_t_link(1), "'family' is not a family object")
expect_error(scaled_t_link(nb()), "'family' is not of type '\"scaled t\"'")
})
## test internal link functions fail gracefully
test_that("ocat_link() fails gracefully", {
expect_error(ocat_link(1), "'family' is not a family object")
expect_error(ocat_link(nb()), "'family' is not of type '\"Ordered Categorical\"'")
})
## test internal link functions fail gracefully
test_that("zip_link() fails gracefully", {
expect_error(zip_link(1), "'family' is not a family object")
expect_error(zip_link(nb()), "'family' is not of type '\"zero inflated Poisson\"'")
})
## test internal link functions fail gracefully
test_that("cox_ph_link() fails gracefully", {
expect_error(cox_ph_link(1), "'family' is not a family object")
expect_error(cox_ph_link(nb()), "'family' is not of type '\"Cox PH\"'")
})
## test internal link functions fail gracefully
test_that("gaulss_link() fails gracefully", {
expect_error(gaulss_link(1), "'family' is not a family object")
expect_error(gaulss_link(nb()), "'family' is not of type '\"gaulss\"'")
})
## test internal link functions fail gracefully
test_that("twlss_link() fails gracefully", {
expect_error(twlss_link(1), "'family' is not a family object")
expect_error(twlss_link(nb()), "'family' is not of type '\"twlss\"'")
})
## test internal link functions fail gracefully
test_that("gevlss_link() fails gracefully", {
expect_error(gevlss_link(1), "'family' is not a family object")
expect_error(gevlss_link(nb()), "'family' is not of type '\"gevlss\"'")
})
## test internal link functions fail gracefully
test_that("gammals_link() fails gracefully", {
expect_error(gammals_link(1), "'family' is not a family object")
expect_error(gammals_link(nb()), "'family' is not of type '\"gammals\"'")
})
## test internal link functions fail gracefully
test_that("ziplss_link() fails gracefully", {
expect_error(ziplss_link(1), "'family' is not a family object")
expect_error(ziplss_link(nb()), "'family' is not of type '\"ziplss\"'")
})
## test internal link functions fail gracefully
test_that("mvn_link() fails gracefully", {
expect_error(mvn_link(1), "'family' is not a family object")
expect_error(mvn_link(nb()), "'family' is not of type '\"Multivariate normal\"'")
})
## test internal link functions fail gracefully
test_that("multinom_link() fails gracefully", {
expect_error(multinom_link(1), "'family' is not a family object")
expect_error(multinom_link(nb()), "'family' is not of type '\"multinom\"'")
})
## test other gamm4 family utils
test_that("family.gamm4 works for a gamm4 object", {
fam <- family(m_gamm4)
expect_s3_class(fam, class = "family")
expect_equal(fam, gaussian(), ignore_function_env = TRUE)
})
test_that("family.gamm4 throws an error when passed a non-gamm4 object", {
expect_error(family(l),
regexp = "`object` does not appear to a `gamm4` model object",
fixed = TRUE)
})
## test gamm family
test_that("family.gamm works for a gamm object", {
fam <- family(m_gamm)
expect_s3_class(fam, class = "family")
expect_equal(fam, gaussian(), ignore_function_env = TRUE)
})
## test family name
test_that("family_name() works with a gam() model", {
f <- family_name(m_gam)
expect_type(f, "character")
expect_identical(f, "gaussian")
})
test_that("family_name() works with a glm() model", {
f <- family_name(m_glm)
expect_type(f, "character")
expect_identical(f, "gaussian")
})
test_that("family_name() works with a gamm() model", {
f <- family_name(m_gamm)
expect_type(f, "character")
expect_identical(f, "gaussian")
})
test_that("family_name() works with a gamm4() model", {
f <- family_name(m_gamm4)
expect_type(f, "character")
expect_identical(f, "gaussian")
})
test_that("family_name() works with a bam() model", {
f <- family_name(m_bam)
expect_type(f, "character")
expect_identical(f, "gaussian")
})
test_that("family_name.list() fails with a list that isn't a gamm4", {
expect_error(family_name(l),
regexp = "`object` does not appear to a `gamm4` model object",
fixed = TRUE)
})
test_that("family_name() works with a gam() gaulss model", {
f <- family_name(m_gaulss)
expect_type(f, "character")
expect_identical(f, "gaulss")
})
test_that("family_name() works with a family() object", {
f <- family_name(gaussian())
expect_type(f, "character")
expect_identical(f, "gaussian")
})
# special cnorm tests
test_that("family utils work on cnorm() family objects from a model", {
## link
f <- extract_link(family(m_censor))
expect_type(f, "closure")
expect_identical(f(val), val)
expect_identical(f, cnorm()$linkfun)
## inverse
f <- extract_link(family(m_censor), inverse = TRUE)
expect_type(f, "closure")
expect_identical(f(val), val)
expect_identical(f, cnorm()$linkinv)
f <- family_name(m_censor)
expect_type(f, "character")
expect_match(f, "^cnorm\\(\\d+\\.?\\d+\\)$")
})
|
34d17c894c9b1e2996959ad892f4dd73e40f861a
|
112b7f027320cf8ccffefc19b552f7427886eb50
|
/run_analysis.R
|
f83a33faa7b1e46d2f37a5700dab156d7afb0f2c
|
[] |
no_license
|
hiicharles/getdata-008
|
f70769860e9e49bf2fb79c0d67a0c38e07aa522d
|
9f3a9f617b575fe07d1c98d8dcecaee6692f78a7
|
refs/heads/master
| 2021-01-15T13:01:44.771228
| 2014-10-26T18:03:30
| 2014-10-26T18:03:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,009
|
r
|
run_analysis.R
|
## Getting and Cleaning Data Course Project
## Course ID: getdata-008
## Submission login (email): hiicharles@gmail.com
## Date: 2014-10-26i
## Instructions
## You should create one R script called run_analysis.R that does the following.
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive variable names.
## 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
## Load library dplyr and tidyr
library(dplyr)
library(tidyr)
## Create directory "~/data" if does not exist
workdir <- "~/data"
if (!file.exists(workdir)) {
dir.create(workdir)
}
## Change working directory to "~/data"
setwd(workdir)
## Download file "getdata-projectfiles-UCI HAR Dataset.zip" if does not exist
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
filePath <- paste(workdir,"getdata-projectfiles-UCI HAR Dataset.zip",sep="/")
if (!file.exists(filePath)) {
download.file(fileUrl, destfile = filePath, method = "wget")
}
## Unzip file getdata-projectfiles-UCI HAR Dataset.zip if "~/data/UCI HAR Dataset" or required files does not exist
extPath = paste(workdir, "UCI HAR Dataset", sep="/")
## Filepath
activity_filepath <- paste(extPath,"activity_labels.txt",sep="/")
features_filepath <- paste(extPath, "features.txt", sep="/")
x_test_filepath <- paste(extPath,"test/X_test.txt",sep="/")
y_test_filepath <- paste(extPath,"test/y_test.txt",sep="/")
subject_test_filepath <- paste(extPath,"test/subject_test.txt",sep="/")
x_train_filepath <- paste(extPath,"train/X_train.txt",sep="/")
y_train_filepath <- paste(extPath,"train/y_train.txt",sep="/")
subject_train_filepath <- paste(extPath,"train/subject_train.txt",sep="/")
if (!file.exists(extPath) ||
!file.exists(activity_filepath) ||
!file.exists(features_filepath) ||
!file.exists(x_test_filepath) ||
!file.exists(y_test_filepath) ||
!file.exists(subject_test_filepath) ||
!file.exists(x_train_filepath) ||
!file.exists(y_train_filepath) ||
!file.exists(subject_train_filepath)) {
# Unzip
unzip(zipfile = filePath, unzip = "internal", overwrite = TRUE)
}
## Class : tbl_df
## Table : tbl_activity
## Column : id and description
tbl_activity <- read.table(file=activity_filepath, header=FALSE, sep="", col.names = c("id", "description")) %>% tbl_df()
## Class : tbl_df
## Table : tbl_features
## Column : id, description, description2
## description2 is better descriptive name
tbl_features <- read.table(file=features_filepath, header=FALSE, sep="", col.names = c("id", "description")) %>% tbl_df()
tbl_features <- tbl_features %>%
mutate(description2 = gsub("-", ".", description)) %>%
mutate(description2 = gsub("\\(", "", description2)) %>%
mutate(description2 = gsub("\\)", "", description2)) %>%
mutate(description2 = gsub(",", ".", description2))
## Replace ()- to .
## Replace - to .
## Replace
## Class : tbl_df
## Table : x_test
## Column : 564 columns (features)
x_test <- read.table(file=x_test_filepath, header=FALSE, sep = "", col.names = as.character(tbl_features$description2)) %>% tbl_df()
## Class : tbl_df
## Table : y_test
## Column : activity
y_test <- read.table(file=y_test_filepath, header=FALSE, sep = "", col.names = c("activity"), colClasses = "character") %>% tbl_df()
## Class : tbl_df
## Table : subject_test
## Column : subject
subject_test <- read.table(file=subject_test_filepath, header=FALSE, sep = "", col.names = c("subject")) %>% tbl_df()
## Class : tbl_df
## Table : x_train
## Column : 564 columns (features)
x_train <- read.table(file=x_train_filepath, header=FALSE, sep = "", col.names = as.character(tbl_features$description2)) %>% tbl_df()
## Class : tbl_df
## Table : y_train
## Column : activity
y_train <- read.table(file=y_train_filepath, header=FALSE, sep = "", col.names = c("activity"), colClasses = "character") %>% tbl_df()
## Class : tbl_df
## Table : subject_train
## Column : subject
subject_train <- read.table(file=subject_train_filepath, header=FALSE, sep = "", col.names = c("subject")) %>% tbl_df()
## 1. Merges the training and the test sets to create one data set.
## Class : tbl_df
## Table : tbl_test
## Column : activity, subject, category and 564 features
## Observations = 2947
tbl_test <-
cbind(y_test, subject_test, category = c("test")) %>%
cbind(x_test) %>%
tbl_df()
## Class : tbl_df
## Table : tbl_train
## Column : activity, subject, category and 564 features
## Observations = 7352
tbl_train <-
cbind(y_train, subject_train, category = c("train")) %>%
cbind(x_train) %>%
tbl_df()
## Class : tbl_df
## Table : tbl_data
## Column : activity, subject, category and 564 features
## Observations = 10299
tbl_data_1 <-
rbind(tbl_test, tbl_train) %>%
tbl_df()
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## Class : tbl_df
## Table : tbl_data_2
## Column : activity, subject, category and 66 features that matched .std. or .mean.
## Note : Refer to feature.txt - 33 features with -mean()- and 33 features with -std()-
## Ignored MeanFreq, gravityMean, tBodyAccMean, tBodyAccJerkMean, tBodyGyroMean, tBodyGyroJerkMean
## Because the variable names is based on description2, which is treated,
## in between .mean. and .std.
## at the end .mean and .std
tbl_data_2 <- select(tbl_data_1, activity, subject, category, matches("\\.mean\\.|\\.mean$|\\.std\\.|\\.std$"))
## 3. Uses descriptive activity names to name the activities in the data set
## Class : tbl_df
## Table : tbl_data_3
## Column : activity, subject, category and 66 features that matched .std. or .mean.
tbl_data_3 <-
merge(tbl_data_2, tbl_activity, by.x = "activity", by.y = "id") %>%
tbl_df() %>%
mutate(activity = description)
## Removed column 70 (description)
tbl_data_3 <- tbl_data_3[, 1:69]
## 4. Appropriately labels the data set with descriptive variable names.
## Already being assigned with proper descriptive variables during read.table.
## 5. From the data set in step 4, creates a second, independent tidy tbl_data set with
## the average of each variable for each activity and each subject.
## Class : tbl_df
## Table : tbl_data_4
## Column : activity, subject and 66 features that matched .std. or .mean.
tbl_data_4 <- aggregate(tbl_data_3[,4:69],
by = list(activity=tbl_data_3$activity, subject=tbl_data_3$subject),
FUN=mean) %>%
tbl_df()
## Write the final tidy data to file "~/data/output.txt"
write.table(tbl_data_4, file = paste(workdir, "output.txt", sep = "/"), row.names = FALSE)
|
fb71a840be2dac05f1acfc03309f26084224bd1b
|
e470b224ede4a63bec073def3511184d904f9702
|
/scripts/parse_picard_complexity.R
|
23d9182a695ad26c2013b484d86507d0a9bf88e9
|
[] |
no_license
|
mardzix/cutandtag-standard
|
091a136a850f777d0d93ec2db7d2426caa9df225
|
48b1d9b63b826798e9858ef949bf17461cfeff61
|
refs/heads/master
| 2023-05-27T19:54:14.113583
| 2021-06-17T08:36:49
| 2021-06-17T08:36:49
| 377,759,376
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 759
|
r
|
parse_picard_complexity.R
|
args <- commandArgs(trailingOnly=TRUE)
out.file <- args[length(args)]
reports <- args[-length(args)]
parse_picard <- function(report){
cat(paste0("*** Reading file ",basename(report),"\n"))
d <- read.table(file = report,
nrows = 2,
stringsAsFactors=FALSE,
sep = "\t")
colnames(d) <- d[1,]
d <- d[-1,]
d$SAMPLE <- basename(report)
return(d)
}
r.ls <- lapply(reports,parse_picard)
percent.duplication.ls <- lapply(r.ls,function(x){
return(x[,c('SAMPLE','PERCENT_DUPLICATION','UNPAIRED_READS_EXAMINED','READ_PAIRS_EXAMINED')])
})
percent.duplication.df <- do.call('rbind',percent.duplication.ls)
write.csv(x=percent.duplication.df,
file=out.file,
quote=FALSE)
|
bed5b6697f6675a93936febba4e25df3cb8f2bfe
|
876801a26a4c31dd95e7bbab466729ea4619feae
|
/cachematrix.R
|
becbf3c12f953652cb916310062af2a52da4cfa7
|
[] |
no_license
|
spbachu/ProgrammingAssignment2
|
79ba236e7b29c8b464ea16eb5a7447330b96eacd
|
bc414f6cb85c16287aad7c0b203997f55a31490d
|
refs/heads/master
| 2020-12-01T09:28:46.284336
| 2014-05-17T21:52:55
| 2014-05-17T21:52:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 940
|
r
|
cachematrix.R
|
## Caches the inverse of a matrix computation
##makeChacheMatrix creates a special "matrix", which is really a list containing a
##set,get,setmatrix,getmatrix functions
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(matrix) m <<- matrix
getmatrix <- function() m
list(set = set, get = get, setmatrix = setmatrix, getmatrix = getmatrix)
}
## Caluculates the inverse of the matrix. if available in cache returns from cache. if not
## does the computation using solve R function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getmatrix()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setmatrix(m)
m
}
|
8e43b5e623bea306e8300ca15f694e575b5d7585
|
8ff11b361633b2c805b8fc2ea415c51cca22929f
|
/R/formatting.R
|
fb90615d4bcd340c3b0e3e430c20920d360264f6
|
[] |
no_license
|
Pascal-Schmidt/tblGoat
|
689da114ba6b147b30bf696921a0301dd7e4a686
|
ad5964a8eaaec322889a48093031909a88eda587
|
refs/heads/master
| 2022-04-29T02:09:40.178624
| 2020-04-27T20:03:57
| 2020-04-27T20:03:57
| 258,673,762
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,101
|
r
|
formatting.R
|
#' Title
#'
#' @param df
#' @param df_mode
#' @param grouping_var
#' @param header
#' @param mode_tbl
#'
#' @return
#'
#' @examples
formatting <- function(df, df_mode, grouping_var, header = TRUE, mode_tbl) {
df_mode %>%
# make variabel name bold
dplyr::mutate(name = ifelse(name %in% colnames(df),
paste0("**", name, "**"),
name
)) -> df_mode
if (header & length(grouping_var) > 0) {
# add number of grouping variables in header
df %>%
dplyr::filter_at(vars(grouping_var), ~ !is.na(.)) %>%
dplyr::count(!!!syms(grouping_var)) %>%
dplyr::mutate(prop = paste0(round(n / sum(n), 4) * 100, "%")) %>%
{
dplyr::bind_rows(
mutate_all(., as.character),
purrr::set_names(dplyr::as_tibble(t(c(rep("Total", length(grouping_var)), sum(.$n), "100%"))), colnames(.))
)
} %>%
tidyr::unite(col = "group", grouping_var, sep = "_") %>%
# make sure no column switching
.[match(dplyr::pull(., group)[order(match(dplyr::pull(., group), colnames(df_mode)))], .$group), ] %>%
dplyr::mutate(
group = ifelse(stringr::str_detect(group, "Total"), "Total", group),
col_name = paste0(group, " N = ", n, " (", prop, ")")
) %>%
dplyr::pull(col_name) -> table_names
}
# add sd, and quantiles to mean, median and indent
df_mode %>%
purrr::when(
mode_tbl == "numeric" ~ dplyr::mutate(., name = ifelse(stringr::str_detect(name, "Mean"),
paste0(name, " (sd)"),
name
)) %>%
dplyr::mutate(., name = ifelse(stringr::str_detect(name, "Median"),
paste0(name, " (Q1 - Q3)"),
name
)),
~.
) %>%
dplyr::mutate(name = ifelse(stringr::str_detect(df_mode$name, "\\*"),
name,
paste0(strrep(" ", 3), name)
)) -> formatted_tbl
if (length(grouping_var) > 0) {
formatted_tbl %>%
purrr::when(
header ~ purrr::set_names(., c(" ", table_names)),
~ purrr::set_names(., c(" ", colnames(.)[-1]))
) -> formatted_tbl
}
return(formatted_tbl)
}
|
99f3f51243f6fbd77db456913b71f8c6061d7be1
|
6345226fb321ac2719b99c2c6e2d5693bc74ecec
|
/inst/doc/polychrome.R
|
7c127369c53fe2e167ce42e44870e405b0f08e3e
|
[] |
no_license
|
cran/Polychrome
|
0d8b9a387573e82d4b96212409fb3d1051a4708c
|
51e403e6e32306e3b0d272e2818e2e1b1c7aa099
|
refs/heads/master
| 2022-05-31T04:34:19.681591
| 2022-05-03T06:20:12
| 2022-05-03T06:20:12
| 85,283,338
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,409
|
r
|
polychrome.R
|
## -----------------------------------------------------------------------------
library(Polychrome)
## -----------------------------------------------------------------------------
mypal <- kelly.colors(22)
## ----fig.width=7, fig.height=5------------------------------------------------
swatch(mypal)
## ----fig.width=7, fig.height=5------------------------------------------------
swatchHue(mypal)
## ----fig.width=7, fig.height=5------------------------------------------------
swatchLuminance(mypal)
## ----fig.width=7, fig.height=5------------------------------------------------
ranswatch(mypal)
## ----fig.width=7, fig.height=5------------------------------------------------
pal2 <- alphabet.colors(26)
ranpoints(pal2, 14)
## ----fig.width=7, fig.height=5------------------------------------------------
uvscatter(pal2)
## ----fig.width=7, fig.height=5------------------------------------------------
luminance(pal2)
## ----fig.width=7, fig.height=5------------------------------------------------
rancurves(pal2)
## ----fig.width=7, fig.height=5------------------------------------------------
plothc(pal2)
## ----fig.width=7, fig.height=5------------------------------------------------
plotpc(pal2)
## ----fig.width=7, fig.height=5------------------------------------------------
grayed <- colorspace::desaturate(pal2)
swatchLuminance(grayed)
|
e9352936154761c40ac8adbdc9f9c2411de3c4d3
|
d75a1e1e95ae70ce048a0c26fb0f9c283fd5dd70
|
/man/NOAH_2B.Rd
|
926e08ddc2a44780e55d42b843dea3742ca81179
|
[] |
no_license
|
Owain-S/kmdata
|
49d65b279e7e84e170550f7d1fbdc8573f28784c
|
22569373a88f64ef480ea895c8ef7b7b5ced260e
|
refs/heads/master
| 2023-05-25T22:58:06.758825
| 2021-06-01T19:36:49
| 2021-06-01T19:36:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 970
|
rd
|
NOAH_2B.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{NOAH_2B}
\alias{NOAH_2B}
\title{NOAH, figure 2B}
\format{
A data frame of 235 observations and 3 variables:
\tabular{lll}{
\tab \code{time} \tab event time (in months) \cr
\tab \code{event} \tab OS event indicator (\code{0}: no event, \code{1}: event) \cr
\tab \code{arm} \tab treatment arms (chemo, chemotherapy_trastuzumab) \cr
}
}
\source{
Gianni L, Eiermann W, Semiglazov V, et al. Neoadjuvant and adjuvant
trastuzumab in patients with HER2-positive locally advanced breast
cancer (NOAH): follow-up of a randomised controlled superiority trial
with a parallel HER2-negative cohort. Lancet Oncol 2014; 15: 640–7.
}
\usage{
NOAH_2B
}
\description{
Kaplan-Meier digitized data from NOAH, figure 2B (PMID 24657003). A reported sample size of 235 for a primary endpoint of EFS in breast cancer.
}
\examples{
summary(NOAH_2B)
kmplot(NOAH_2B)
}
\keyword{datasets}
|
73c023d5df7967d3a2428bf7f5784acea6e449b4
|
af9e48f7a5f4a2ff9547122d866ba7f5dc63a89b
|
/tidy-data.R
|
d53aaebf7d3c1f6c3170d7b6ad574dbb06d7bd16
|
[
"MIT"
] |
permissive
|
joethorley/bioRxiv-028274
|
3844d9461755f6d2afd9d2647b7efcf52b8007c2
|
37002f17a9ec7732b25cf91ecf4560caa3d5adeb
|
refs/heads/master
| 2021-01-02T08:36:01.044493
| 2018-07-04T00:57:46
| 2018-07-04T00:57:46
| 99,027,969
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,422
|
r
|
tidy-data.R
|
source("header.R")
pdo <- readRDS("output/clean/pdo.rds")
leks <- readRDS("output/clean/leks.rds")
counts <- readRDS("output/clean/counts.rds")
wells <- readRDS("output/clean/wells.rds")
# buffer each well by 60 m
wells %<>% st_buffer(set_units(60, "m"))
# produce list of sf objects by year
wells_years <- list()
years <- min(wells$Year):max(wells$Year)
for (i in seq_along(years)) {
wells_years[[i]] <- filter(wells, Year <= years[i])
}
names(wells_years) <- years
for (dist in dists) {
print(str_c("dist: ", dist))
data <- st_buffer(leks, set_units(dist, "m")) %>%
lapply(wells_years, st_intersection_switch, .) %>%
lapply(function(y) aggregate(y, by = list(Lek = y$Lek), FUN = identity)) %>%
lapply(select, Lek) %>%
lapply(function(x) tibble(Lek = x$Lek, Area = st_area(x))) %>%
bind_rows(.id = "Year") %>%
mutate(Area = Area / (pi * dist^2)) %>%
right_join(st_fortify(leks), by = "Lek") %>%
select(-x, -y) %>%
replace_na(replace = list(Year = min(wells$Year), Area = 0)) %>%
complete(Year, nesting(Lek, Group), fill = list(Area = 0)) %>%
mutate(Year = as.integer(Year)) %>%
left_join(counts, by = c("Lek", "Year")) %>%
inner_join(pdo, by = "Year") %>%
select(Lek, Group, Year, Males, Area, PDO, Dayte)
# select required to ensure locational information not made publicly available
saveRDS(data, str_c("data/analysis/data_", dist, ".rds"))
}
|
b4774d5cc90125c87d270d0410eb54f9c838fcf8
|
18fe92d34e448d7f20043384100454c4a96fbfd6
|
/R/plot.R
|
7691de4b8dd02f01faed7eb06792866b1597c963
|
[] |
no_license
|
cran/EpiModel
|
7564726177fa86a6a7f60c07c6e2c86147919180
|
bee708b011b790a5518c7cbf9ea52a3fe0752ee7
|
refs/heads/master
| 2023-06-22T02:25:35.668350
| 2023-06-20T17:20:05
| 2023-06-20T17:20:05
| 17,679,037
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 82,209
|
r
|
plot.R
|
# Main Exported Methods ---------------------------------------------------
#' @title Plot Data from a Deterministic Compartmental Epidemic Model
#'
#' @description Plots epidemiological data from a deterministic compartment
#' epidemic model solved with \code{\link{dcm}}.
#'
#' @param x An \code{EpiModel} object of class \code{dcm}.
#' @param y Output compartments or flows from \code{dcm} object to plot.
#' @param popfrac If \code{TRUE}, plot prevalence of values rather than numbers
#' (see details).
#' @param run Run number to plot, for models with multiple runs
#' (default is run 1).
#' @param col Color for lines, either specified as a single color in a standard
#' R color format, or alternatively as a color palette from
#' \code{\link{RColorBrewer}} (see details).
#' @param lwd Line width for output lines.
#' @param lty Line type for output lines.
#' @param alpha Transparency level for lines, where 0 = transparent and
#' 1 = opaque (see \code{adjustcolor} function).
#' @param legend Type of legend to plot. Values are \code{"n"} for no legend,
#' \code{"full"} for full legend, and \code{"lim"} for limited legend
#' (see details).
#' @param leg.name Character string to use for legend, with the default
#' determined automatically based on the \code{y} input.
#' @param leg.cex Legend scale size.
#' @param axs Plot axis type (see \code{\link{par}} for details), with default
#' of "r".
#' @param grid If \code{TRUE}, a grid is added to the background of plot
#' (see \code{\link{grid}} for details), with default of nx by ny.
#' @param add If \code{TRUE}, new plot window is not called and lines are added
#' to existing plot window.
#' @param ... Additional arguments to pass to main plot window (see
#' \code{\link{plot.default}}).
#'
#' @details
#' This function plots epidemiological outcomes from a deterministic
#' compartmental model solved with \code{\link{dcm}}. Depending on the number of
#' model runs (sensitivity analyses) and number of groups, the default plot is
#' the fractional proportion of each compartment in the model over time. The
#' specific compartments or flows to plot may be set using the \code{y}
#' parameter, and in multiple run models the specific run may also be specified.
#'
#' @section The \code{popfrac} Argument:
#' Compartment prevalence is the size of a compartment over some denominator.
#' To plot the raw numbers from any compartment, use \code{popfrac=FALSE}; this
#' is the default. The \code{popfrac} parameter calculates
#' and plots the denominators of all specified compartments using these rules:
#' 1) for one-group models, the prevalence of any compartment is the compartment
#' size divided by the total population size; 2) for two-group models, the
#' prevalence of any compartment is the compartment size divided by the group
#' size.
#'
#' @section Color Palettes:
#' Since \code{\link{dcm}} supports multiple run sensitivity models, plotting
#' the results of such models uses a complex color scheme for distinguishing
#' runs. This is accomplished using the \code{\link{RColorBrewer}} color
#' palettes, which include a range of linked colors using named palettes. For
#' \code{plot.dcm}, one may either specify a brewer color palette listed in
#' \code{\link{brewer.pal.info}}, or, alternatively, a vector of standard R
#' colors (named, hexidecimal, or positive integers; see \code{\link{col2rgb}}).
#'
#' @section Plot Legends:
#' There are three automatic legend types available, and the legend is
#' added by default for plots. To turn off the legend, use \code{legend="n"}. To
#' plot a legend with values for every line in a sensitivity analysis, use
#' \code{legend="full"}. With models with many runs, this may be visually
#' overwhelming. In those cases, use \code{legend="lim"} to plot a legend
#' limited to the highest and lowest values of the varying parameter in the
#' model. In cases where the default legend names are not helpful, one may
#' override those names with the \code{leg.name} argument.
#'
#' @method plot dcm
#' @export
#'
#' @keywords plot
#' @seealso \code{\link{dcm}}, \code{\link{brewer.pal.info}}
#'
#' @examples
#' # Deterministic SIR model with varying act rate
#' param <- param.dcm(inf.prob = 0.2, act.rate = 1:10,
#' rec.rate = 1/3, a.rate = 0.011, ds.rate = 0.01,
#' di.rate = 0.03, dr.rate = 0.01)
#' init <- init.dcm(s.num = 1000, i.num = 1, r.num = 0)
#' control <- control.dcm(type = "SIR", nsteps = 100, dt = 0.25)
#' mod <- dcm(param, init, control)
#'
#' # Plot disease prevalence by default
#' plot(mod)
#'
#' # Plot prevalence of susceptibles
#' plot(mod, y = "s.num", popfrac = TRUE, col = "Greys")
#'
#' # Plot number of susceptibles
#' plot(mod, y = "s.num", popfrac = FALSE, col = "Greys", grid = TRUE)
#'
#' # Plot multiple runs of multiple compartments together
#' plot(mod, y = c("s.num", "i.num"),
#' run = 5, xlim = c(0, 50), grid = TRUE)
#' plot(mod, y = c("s.num", "i.num"),
#' run = 10, lty = 2, legend = "n", add = TRUE)
#'
plot.dcm <- function(x, y, popfrac = FALSE, run, col, lwd, lty, alpha = 0.9,
legend, leg.name, leg.cex = 0.8, axs = "r", grid = FALSE,
add = FALSE, ...) {
## Set missing flags
noy <- ifelse(missing(y), TRUE, FALSE)
norun <- ifelse(missing(run), TRUE, FALSE)
nocol <- ifelse(missing(col), TRUE, FALSE)
nolwd <- ifelse(missing(lwd), TRUE, FALSE)
nolty <- ifelse(missing(lty), TRUE, FALSE)
noleg <- ifelse(missing(legend), TRUE, FALSE)
## Dot args
da <- list(...)
## Model dimensions
nsteps <- x$control$nsteps
nruns <- x$control$nruns
if (norun == FALSE && any(run > nruns)) {
stop("Specify run between 1 and", nruns,
call. = FALSE)
}
if (!is.null(x$control$new.mod) && noy == TRUE) {
stop("Specify y when simulating a new model type in dcm",
call. = FALSE)
}
groups <- x$param$groups
dis.type <- x$control$type
## Main title default
if (is.null(da$main)) {
main <- ""
} else {
main <- da$main
}
## Defaults for missing y
if (noy == TRUE && nruns == 1) {
y <- grep(".num", names(x$epi), value = TRUE)
}
if (noy == TRUE && nruns > 1) {
y <- grep("i.num", names(x$epi), value = TRUE)
}
if (all(y %in% names(x$epi)) == FALSE) {
stop("Specified y is unavailable", call. = FALSE)
}
lcomp <- length(y)
## Prevalence calculations
x <- denom(x, y, popfrac)
## Compartment ymax calculations
if (popfrac == FALSE) {
allmax <- sapply(1:lcomp, function(i) max(x$epi[[y[i]]], na.rm = TRUE))
ymax <- ceiling(max(allmax))
} else {
ymax <- 1
}
## Defaults for ylim, xlim
if (is.null(da$ylim)) {
ylim <- c(0, ymax)
} else {
ylim <- da$ylim
}
if (is.null(da$xlim)) {
xlim <- c(0, nsteps)
} else {
xlim <- da$xlim
}
## Defaults for lwd
if (nolwd == FALSE && lcomp > 1 && length(lwd) < lcomp) {
lwd <- rep(lwd, lcomp)
}
if (nolwd == FALSE && lcomp == 1 && length(lwd) < nruns) {
lwd <- rep(lwd, nruns)
}
if (nolwd == TRUE) {
lwd <- rep(2.5, lcomp * nruns)
}
## Defaults for lty
if (nolty == FALSE && lcomp > 1 && length(lty) < lcomp) {
lty <- rep(lty, lcomp)
}
if (nolty == FALSE && lcomp == 1 && length(lty) < nruns) {
lty <- rep(lty, nruns)
}
if (nolty == TRUE) {
lty <- rep(1, lcomp * nruns)
if (groups == 2 && noy == TRUE) {
lty <- rep(1:2, each = lcomp / 2)
}
}
## Defaults for xlab and ylab
if (is.null(da$xlab)) {
xlab <- "Time"
} else {
xlab <- da$xlab
}
if (is.null(da$ylab)) {
if (popfrac == FALSE) {
ylab <- "Number"
} else {
ylab <- "Prevalence"
}
} else {
ylab <- da$ylab
}
## Main plot window
if (add == FALSE) {
plot(1, 1, type = "n", bty = "n",
xaxs = axs, yaxs = axs, xlim = xlim, ylim = ylim,
xlab = xlab, ylab = ylab, main = main)
}
## Default line colors
pal <- NULL
# Missing col
if (nocol == TRUE) {
if (lcomp == 1) {
if (nruns == 1) {
col <- "black"
}
if (nruns > 1) {
col <- "Set1"
}
if (nruns > 5) {
col <- "Spectral"
}
if (norun == FALSE && length(run) == 1) {
col <- "black"
}
}
if (lcomp > 1) {
col <- "Set1"
}
}
# Test if using a RColorBrewer palette
if (length(col) == 1 && col %in% row.names(brewer.pal.info)) {
use.brewer <- TRUE
} else {
use.brewer <- FALSE
}
# Set color palette
if (is.null(pal)) {
if (lcomp == 1) {
if (use.brewer == TRUE) {
if (nruns < 6) {
pal <- adjustcolor(brewer.pal(5, col)[1:nruns], alpha)
} else {
pal <- adjustcolor(brewer_ramp(nruns, col), alpha)
}
}
if (use.brewer == FALSE) {
pal <- adjustcolor(rep(col, nruns), alpha)
}
}
if (lcomp > 1) {
if (use.brewer == TRUE) {
if (lcomp > 4) {
pal <- adjustcolor(brewer_ramp(lcomp, col), alpha)
} else {
pal <- adjustcolor(brewer.pal(max(c(lcomp, 4)), col), alpha)
fixpal <- pal
fixpal[1] <- pal[2]
fixpal[2] <- pal[1]
pal <- fixpal
}
if (groups == 2 && noy == TRUE) {
pal <- adjustcolor(brewer.pal(3, col), alpha)
fixpal <- pal
fixpal[1] <- pal[2]
fixpal[2] <- pal[1]
pal <- fixpal
if (dis.type != "SIR") {
pal <- pal[1:2]
}
pal <- rep(pal, times = lcomp / 2)
}
}
if (use.brewer == FALSE) {
pal <- adjustcolor(rep(col, lcomp), alpha)
if (groups == 2 && noy == TRUE) {
pal <- adjustcolor(rep(col, times = 2), alpha)
}
}
}
}
## Plot lines
if (lcomp == 1) {
if (nruns == 1) {
lines(x$control$timesteps, x$epi[[y]][, 1],
lwd = lwd[1], lty = lty[1], col = pal[1])
}
if (nruns > 1) {
if (norun == TRUE) {
for (i in 1:nruns) {
lines(x$control$timesteps, x$epi[[y]][, i],
lwd = lwd[i], lty = lty[i], col = pal[i])
}
} else {
if (length(run) == 1) {
lines(x$control$timesteps, x$epi[[y]][, run],
lwd = lwd[1], lty = lty[1], col = pal[1])
}
if (length(run) > 1) {
for (i in seq_along(run)) {
lines(x$control$timesteps, x$epi[[y]][, run[i]],
lwd = lwd[i], lty = lty[i], col = pal[i])
}
}
}
}
}
if (lcomp > 1) {
if (nruns == 1) {
for (i in 1:lcomp) {
lines(x$control$timesteps, x$epi[[y[i]]][, 1],
lwd = lwd, lty = lty[i], col = pal[i])
}
}
if (nruns > 1) {
if (norun == TRUE) {
for (i in 1:lcomp) {
run <- 1
lines(x$control$timesteps, x$epi[[y[i]]][, run],
lwd = lwd[i], lty = lty[i], col = pal[i])
}
}
if (norun == FALSE) {
if (length(run) > 1) {
stop("Plotting multiple runs of multiple y is not supported",
call. = FALSE)
}
for (i in 1:lcomp) {
lines(x$control$timesteps, x$epi[[y[i]]][, run],
lwd = lwd[i], lty = lty[i], col = pal[i])
}
}
}
}
## Grid
if (grid == TRUE) {
grid()
}
## Legend
# Default legend type
if (noleg == TRUE) {
legend <- "n"
if (lcomp == 1 && nruns < 3) {
legend <- "full"
}
if (lcomp == 1 && nruns >= 3) {
legend <- "lim"
}
if (lcomp > 1) {
legend <- "full"
}
if (noy == FALSE) {
legend <- "n"
}
} else {
if (legend == "lim" && nruns < 3) {
legend <- "full"
}
if (legend == "lim" && lcomp == 2) {
legend <- "full"
}
}
# Default legend names
if (missing(leg.name)) {
if (nruns == 1) {
leg.names <- y
}
if (nruns > 1) {
if (norun == TRUE && lcomp == 1) {
leg.names <- names(x$epi[[y[1]]])
}
if (norun == FALSE && lcomp == 1) {
if (length(run) == 1) {
leg.names <- y
}
if (length(run) > 1) {
leg.names <- names(x$epi[[y[1]]][run])
}
}
if (lcomp > 1) {
leg.names <- y
}
}
} else {
if (lcomp == 1) {
leg.names <- paste(leg.name, 1:nruns)
}
if (lcomp > 1) {
leg.names <- y
warning("Legend names ignored for multiple y plots of multiple run
models", call. = FALSE)
}
}
# Legend
if (norun == TRUE) {
if (legend == "full") {
legend("topright", legend = leg.names,
bg = "white", lty = lty, lwd = lwd,
col = pal, cex = leg.cex)
}
if (legend == "lim") {
legend("topright",
legend = c(leg.names[1], "...", leg.names[nruns]),
bg = "white",
lty = c(lty[1], 1, lty[nruns]), lwd = lwd + 1,
col = c(pal[1], "white", pal[nruns]), cex = leg.cex)
}
}
if (norun == FALSE && legend != "n") {
if (lcomp == 1) {
legend("topright", legend = leg.names,
bg = "white", lty = lty[seq_along(run)],
lwd = lwd[seq_along(run)],
col = pal[seq_along(run)], cex = leg.cex)
}
if (lcomp > 1) {
legend("topright", legend = leg.names,
bg = "white", lty = lty, lwd = lwd,
col = pal, cex = leg.cex)
}
}
}
#' @title Plot Data from a Stochastic Individual Contact Epidemic Model
#'
#' @description Plots epidemiological data from a stochastic individual contact
#' model simulated with \code{\link{icm}}.
#'
#' @param x An \code{EpiModel} model object of class \code{icm}.
#' @param y Output compartments or flows from \code{icm} object to plot.
#' @param sims A vector of simulation numbers to plot.
#' @inheritParams plot.netsim
#'
#' @details
#' This plotting function will extract the epidemiological output from a model
#' object of class \code{icm} and plot the time series data of disease
#' prevalence and other results. The summary statistics that the function
#' calculates and plots are individual simulation lines, means of the individual
#' simulation lines, and quantiles of those individual simulation lines. The
#' mean line, toggled on with \code{mean.line=TRUE}, is calculated as the row
#' mean across simulations at each time step.
#'
#' Compartment prevalences are the size of a compartment over some denominator.
#' To plot the raw numbers from any compartment, use \code{popfrac=FALSE}; this
#' is the default for any plots of flows. The \code{popfrac} parameter
#' calculates and plots the denominators of all specified compartments using
#' these rules: 1) for one-group models, the prevalence of any compartment is
#' the compartment size divided by the total population size; 2) for two-group
#' models, the prevalence of any compartment is the compartment size divided by
#' the group population size. For any prevalences that are not automatically
#' calculated, the \code{\link{mutate_epi}} function may be used to add new
#' variables to the \code{icm} object to plot or analyze.
#'
#' The quantiles show the range of outcome values within a certain specified
#' quantile range. By default, the interquartile range is shown: that is the
#' middle 50\% of the data. This is specified by \code{qnts=0.5}. To show the
#' middle 95\% of the data, specify \code{qnts=0.95}. To toggle off the polygons
#' where they are plotted by default, specify \code{qnts=FALSE}.
#'
#' @method plot icm
#' @export
#'
#' @keywords plot
#' @seealso \code{\link{icm}}
#'
#' @examples
#' ## Example 1: Plotting multiple compartment values from SIR model
#' param <- param.icm(inf.prob = 0.5, act.rate = 0.5, rec.rate = 0.02)
#' init <- init.icm(s.num = 500, i.num = 1, r.num = 0)
#' control <- control.icm(type = "SIR", nsteps = 100,
#' nsims = 3, verbose = FALSE)
#' mod <- icm(param, init, control)
#' plot(mod, grid = TRUE)
#'
#' ## Example 2: Plot only infected with specific output from SI model
#' param <- param.icm(inf.prob = 0.25, act.rate = 0.25)
#' init <- init.icm(s.num = 500, i.num = 10)
#' control <- control.icm(type = "SI", nsteps = 100,
#' nsims = 3, verbose = FALSE)
#' mod2 <- icm(param, init, control)
#'
#' # Plot prevalence
#' plot(mod2, y = "i.num", mean.line = FALSE, sim.lines = TRUE)
#'
#' # Plot incidence
#' par(mfrow = c(1, 2))
#' plot(mod2, y = "si.flow", mean.smooth = TRUE, grid = TRUE)
#' plot(mod2, y = "si.flow", qnts.smooth = FALSE, qnts = 1)
#'
plot.icm <- function(x, y, popfrac = FALSE, sim.lines = FALSE, sims, sim.col,
sim.lwd, sim.alpha, mean.line = TRUE, mean.smooth = TRUE,
mean.col, mean.lwd = 2, mean.lty = 1, qnts = 0.5, qnts.col,
qnts.alpha, qnts.smooth = TRUE, legend, leg.cex = 0.8,
axs = "r", grid = FALSE, add = FALSE, ...) {
## Model dimensions and class ##
nsteps <- x$control$nsteps
nsims <- x$control$nsims
if (missing(sims)) {
sims <- 1:nsims
}
if (max(sims) > nsims) {
stop("Set sim to between 1 and ", nsims, call. = FALSE)
}
if (is.null(x$param$groups) || !is.numeric(x$param$groups)) {
groups <- 1
x$param$groups <- 1
} else {
groups <- x$param$groups
}
# dotargs
da <- list(...)
## Compartments ##
nocomp <- ifelse(missing(y), TRUE, FALSE)
if (nocomp == TRUE) {
if (groups == 1) {
y <- grep(".num$", names(x$epi), value = TRUE)
}
if (groups == 2) {
if (inherits(x, "icm")) {
y <- c(grep(".num$", names(x$epi), value = TRUE),
grep(".num.g2$", names(x$epi), value = TRUE))
}
}
if (missing(legend)) {
legend <- TRUE
}
}
if (nocomp == FALSE) {
if (any(y %in% names(x$epi) == FALSE)) {
stop("Specified y is not available in object", call. = FALSE)
}
}
lcomp <- length(y)
## Color palettes ##
# Main color palette
bpal <- c(4, 2, 3)
# Mean line
if (missing(mean.col)) {
mean.col <- bpal
}
mean.pal <- adjustcolor(mean.col, 0.9)
# Quantile bands
if (missing(qnts.col)) {
qnts.col <- bpal
}
if (missing(qnts.alpha)) {
qnts.alpha <- 0.5
}
qnts.pal <- adjustcolor(qnts.col, qnts.alpha)
# Sim lines
if (missing(sim.lwd)) {
sim.lwd <- rep(0.75, lcomp)
} else {
if (length(sim.lwd) < lcomp) {
sim.lwd <- rep(sim.lwd, lcomp)
}
}
if (missing(sim.col)) {
sim.col <- bpal
} else {
if (length(sim.col) < lcomp) {
sim.col <- rep(sim.col, lcomp)
}
}
if (missing(sim.alpha) && nsims == 1) {
sim.alpha <- 0.9
}
if (missing(sim.alpha) && nsims > 1) {
sim.alpha <- max(c(0.05, 1 - log10(nsims) / 3))
}
sim.pal <- adjustcolor(sim.col, sim.alpha)
## Prevalence calculations ##
x <- denom(x, y, popfrac)
# Compartment max
if (popfrac == FALSE) {
if (lcomp == 1) {
min.prev <- min(x$epi[[y]], na.rm = TRUE)
max.prev <- max(x$epi[[y]], na.rm = TRUE)
} else {
min.prev <- min(sapply(y, function(comps) min(x$epi[[comps]], na.rm = TRUE)))
max.prev <- max(sapply(y, function(comps) max(x$epi[[comps]], na.rm = TRUE)))
}
} else {
min.prev <- 0
max.prev <- 1
}
## Missing args ##
if (is.null(da$xlim)) {
xlim <- c(0, nsteps)
} else {
xlim <- da$xlim
}
#Initialize ylim min max values
qnt.min <- 1E10
qnt.max <- -1E10
mean.min <- 1E10
mean.max <- -1E10
## Quantiles - ylim min max ##
if (missing(qnts) || qnts == FALSE) {
disp.qnts <- FALSE
} else {
disp.qnts <- TRUE
}
if (nsims == 1) {
disp.qnts <- FALSE
}
if (groups == 1 && missing(qnts)) {
disp.qnts <- TRUE
qnts <- 0.5
}
if (disp.qnts == TRUE) {
if (qnts > 1 || qnts < 0) {
stop("qnts must be between 0 and 1", call. = FALSE)
}
qnt.min <- draw_qnts(x, y, qnts, qnts.pal, qnts.smooth, "epi", 0, "min")
qnt.max <- draw_qnts(x, y, qnts, qnts.pal, qnts.smooth, "epi", 0, "max")
}
## Mean lines - ylim max ##
if (mean.line == TRUE) {
if (!missing(mean.lwd) && length(mean.lwd) < lcomp) {
mean.lwd <- rep(mean.lwd, lcomp)
}
if (missing(mean.lwd)) {
mean.lwd <- rep(2.5, lcomp)
}
if (!missing(mean.lty) && length(mean.lty) < lcomp) {
mean.lty <- rep(mean.lty, lcomp)
}
if (missing(mean.lty)) {
mean.lty <- rep(1, lcomp)
}
mean.min <- draw_means(x, y, mean.smooth, mean.lwd,
mean.pal, mean.lty, "epi", 0, "min")
mean.max <- draw_means(x, y, mean.smooth, mean.lwd,
mean.pal, mean.lty, "epi", 0, "max")
}
# Dynamic scaling based on sim.lines and mean lines and quantile bands
if (!is.null(da$ylim)) {
ylim <- da$ylim
} else if (is.null(da$ylim) && sim.lines == FALSE &&
(mean.line == TRUE || qnts == TRUE)) {
ylim <- c(min(qnt.min * 0.9, mean.min * 0.9),
max(qnt.max * 1.1, mean.max * 1.1))
} else {
ylim <- c(min.prev, max.prev)
}
if (is.null(da$main)) {
main <- ""
} else {
main <- da$main
}
if (is.null(da$xlab)) {
xlab <- "Time"
} else {
xlab <- da$xlab
}
if (is.null(da$ylab)) {
if (popfrac == FALSE) {
ylab <- "Number"
} else {
ylab <- "Prevalence"
}
} else {
ylab <- da$ylab
}
## Main plot window ##
if (add == FALSE) {
plot(1, 1, type = "n", bty = "n",
xaxs = axs, yaxs = axs, xlim = xlim, ylim = ylim,
xlab = xlab, ylab = ylab, main = main)
}
## Quantiles - Plotting ##
if (missing(qnts) || qnts == FALSE) {
disp.qnts <- FALSE
} else {
disp.qnts <- TRUE
}
if (nsims == 1) {
disp.qnts <- FALSE
}
if (groups == 1 && missing(qnts)) {
disp.qnts <- TRUE
qnts <- 0.5
}
if (disp.qnts == TRUE) {
if (qnts > 1 || qnts < 0) {
stop("qnts must be between 0 and 1", call. = FALSE)
}
draw_qnts(x, y, qnts, qnts.pal, qnts.smooth)
}
## Simulation lines ##
if (sim.lines == TRUE) {
for (j in seq_len(lcomp)) {
for (i in sims) {
lines(x$epi[[y[j]]][, i], lwd = sim.lwd[j], col = sim.pal[j])
}
}
}
## Mean lines - plotting ##
if (mean.line == TRUE) {
if (!missing(mean.lwd) && length(mean.lwd) < lcomp) {
mean.lwd <- rep(mean.lwd, lcomp)
}
if (missing(mean.lwd)) {
mean.lwd <- rep(2.5, lcomp)
}
if (!missing(mean.lty) && length(mean.lty) < lcomp) {
mean.lty <- rep(mean.lty, lcomp)
}
if (missing(mean.lty)) {
if (nocomp == FALSE || (nocomp == TRUE && groups == 1)) {
mean.lty <- rep(1, lcomp)
} else {
mean.lty <- rep(1:2, each = lcomp / 2)
}
}
draw_means(x, y, mean.smooth, mean.lwd, mean.pal, mean.lty)
}
## Grid
if (grid == TRUE) {
grid()
}
## Legends ##
if (!missing(legend) && legend == TRUE) {
if (groups == 2 && nocomp == TRUE) {
leg.lty <- mean.lty
} else {
leg.lty <- 1
}
legend("topright", legend = y, lty = leg.lty, lwd = 2,
col = mean.pal, cex = leg.cex, bg = "white")
}
}
## Helper utilities
draw_qnts <- function(x, y, qnts, qnts.pal, qnts.smooth,
loc = "epi", plot.qnts = 1, qnts.min_max = "max") {
qnt.min <- 1E10
qnt.max <- -1E10
lcomp <- length(y)
for (j in seq_len(lcomp)) {
quants <- c((1 - qnts) / 2, 1 - ((1 - qnts) / 2))
qnt.prev <- apply(x[[loc]][[y[j]]], 1,
function(x) {
quantile(x, c(quants[1], quants[2]), na.rm = TRUE)
})
qnt.prev <- qnt.prev[, complete.cases(t(qnt.prev))]
xx <- c(seq_len(ncol(qnt.prev)), rev(seq_len(ncol(qnt.prev))))
if (qnts.smooth == FALSE) {
yy <- c(qnt.prev[1, ], rev(qnt.prev[2, ]))
} else {
yy <- c(suppressWarnings(supsmu(x = seq_len(ncol(qnt.prev)),
y = qnt.prev[1, ]))$y,
rev(suppressWarnings(supsmu(x = seq_len(ncol(qnt.prev)),
y = qnt.prev[2, ]))$y))
}
if (plot.qnts == 1) {
polygon(xx, yy, col = qnts.pal[j], border = NA)
} else {
qnt.max[j] <- max(yy)
qnt.min[j] <- min(yy)
}
}
if (plot.qnts == 0 && qnts.min_max == "max") {
return(max(qnt.max))
} else if (plot.qnts == 0 && qnts.min_max == "min") {
return(min(qnt.min))
}
}
draw_means <- function(x, y, mean.smooth, mean.lwd,
mean.pal, mean.lty, loc = "epi",
plot.means = 1, mean.min_max = "max") {
mean.min <- 1E10
mean.max <- -1E10
lcomp <- length(y)
nsims <- x$control$nsims
for (j in seq_len(lcomp)) {
if (nsims == 1) {
mean.prev <- x[[loc]][[y[j]]][, 1]
} else {
mean.prev <- rowMeans(x[[loc]][[y[j]]], na.rm = TRUE)
}
if (mean.smooth == TRUE) {
mean.prev <- suppressWarnings(supsmu(x = seq_along(mean.prev),
y = mean.prev))$y
}
if (plot.means == 1) {
lines(mean.prev, lwd = mean.lwd[j],
col = mean.pal[j], lty = mean.lty[j])
} else {
mean.max[j] <- max(mean.prev, na.rm = TRUE)
mean.min[j] <- min(mean.prev, na.rm = TRUE)
}
}
if (plot.means == 0 && mean.min_max == "max") {
return(max(mean.max))
} else if (plot.means == 0 && mean.min_max == "min") {
return(min(mean.min))
}
}
get_qnts <- function(data, qnts, qnts.smooth) {
if (qnts < 0 || qnts > 1) {
stop("qnts must be between 0 and 1", call. = FALSE)
}
quants <- c((1 - qnts) / 2, 1 - ((1 - qnts) / 2))
qnt.prev <- apply(data, 1, function(x) {
quantile(x, c(quants[1], quants[2]), na.rm = TRUE)
})
if (isFALSE(qnts.smooth)) {
xx <- c(seq_len(ncol(qnt.prev)), rev(seq_len(ncol(qnt.prev))))
yy <- c(qnt.prev[1, ], rev(qnt.prev[2, ]))
xx <- xx[!is.na(yy)]
yy <- yy[!is.na(yy)]
} else {
ss1 <- suppressWarnings(supsmu(x = seq_len(ncol(qnt.prev)),
y = qnt.prev[1, ]))
ss2 <- suppressWarnings(supsmu(x = rev(seq_len(ncol(qnt.prev))),
y = qnt.prev[2, ]))
xx <- c(ss1$x, rev(ss2$x))
yy <- c(ss1$y, rev(ss2$y))
}
list(x = xx, y = yy)
}
get_means <- function(data, mean.smooth) {
mean.prev <- rowMeans(data, na.rm = TRUE)
if (isFALSE(mean.smooth)) {
xx <- seq_along(mean.prev)
yy <- mean.prev
xx <- xx[!is.na(yy)]
yy <- yy[!is.na(yy)]
} else {
ss <- suppressWarnings(supsmu(x = seq_along(mean.prev),
y = mean.prev))
xx <- ss$x
yy <- ss$y
}
list(x = xx, y = yy)
}
plot_stats_table <- function(data,
nmstats,
method,
duration.imputed,
sim.lines,
sim.col,
sim.lwd,
mean.line,
mean.smooth,
mean.col,
mean.lwd,
mean.lty,
qnts,
qnts.col,
qnts.alpha,
qnts.smooth,
targ.line,
targ.col,
targ.lwd,
targ.lty,
plots.joined,
draw_legend,
grid,
targets,
dynamic,
da,
...) {
nstats <- length(nmstats)
if (missing(plots.joined)) {
plots.joined <- ifelse(nstats > 3, FALSE, TRUE)
}
if (nstats == 1) {
plots.joined <- TRUE
sim.col <- "dodgerblue3"
}
xlim <- NVL(da$xlim, c(1, dim(data)[1]))
xlab <- if (isFALSE(plots.joined)) "" else NVL(da$xlab, if (isTRUE(dynamic)) "time" else "simulation number")
ylab <- if (isFALSE(plots.joined)) "" else NVL(da$ylab, if (nstats == 1) nmstats else "Statistic")
if (missing(sim.lwd)) {
if (dim(data)[3] > 1) {
sim.lwd <- max(c(1 - (dim(data)[3] * 0.05), 0.5))
} else {
sim.lwd <- 1
}
}
## Color Vector Validation
# 1. Sim.col, mean.col, qnts.col, targ.col must be missing or a vector of
# length 1 or nstats
# 2. If sim.col, mean.col, qnts.col, or targ.col is not missing
# but is a vector of length 1 and nstats is greater than 1,
# then replicate the color vector nstats times to achieve a vector of
# size nstats.
check_len_rep <- function(object, default, name) {
if (!missing(object)) {
if (length(object) %in% c(1, nstats)) {
rep(object, length.out = nstats)
} else {
stop(paste0(name, " must be either missing or a vector of length 1 or nstats (", nstats, ")"))
}
} else {
rep(default, length.out = nstats)
}
}
sim.col <- check_len_rep(sim.col,
if (isFALSE(plots.joined)) "dodgerblue3" else seq_len(nstats + 1L)[-1L],
"sim.col")
mean.col <- check_len_rep(mean.col,
if (isFALSE(plots.joined)) "black" else sim.col,
"mean.col")
qnts.col <- check_len_rep(qnts.col,
sim.col,
"qnts.col")
qnts.col <- adjustcolor(qnts.col, qnts.alpha)
targ.col <- check_len_rep(targ.col,
if (isFALSE(plots.joined) || nstats == 1) "black" else sim.col,
"targ.col")
draw_legend <- isTRUE(plots.joined) &&
((!missing(draw_legend) && isTRUE(draw_legend)) ||
(missing(draw_legend) && nstats == 1))
draw_qnts <- isTRUE(dynamic) && is.numeric(qnts)
mains <- if (isTRUE(plots.joined)) character(nstats) else nmstats
if (method == "l") {
qnts_list <- list()
means <- list()
ylims <- list()
for (j in seq_len(nstats)) {
dataj <- matrix(data[, j, ], nrow = dim(data)[1])
if (isTRUE(draw_qnts)) {
qnts_list[[j]] <- get_qnts(dataj, qnts, qnts.smooth)
}
if (isTRUE(mean.line)) {
means[[j]] <- get_means(dataj, mean.smooth)
}
}
for (j in seq_len(nstats)) {
if (!is.null(da$ylim)) {
ylims[[j]] <- da$ylim
} else {
limdat <- c(if (isTRUE(plots.joined) && isTRUE(sim.lines)) data,
if (isFALSE(plots.joined) && isTRUE(sim.lines)) data[, j, ],
if (isTRUE(plots.joined) && isTRUE(mean.line)) unlist(lapply(means, `[[`, "y")),
if (isFALSE(plots.joined) && isTRUE(mean.line)) means[[j]]$y,
if (isTRUE(plots.joined) && isTRUE(draw_qnts)) unlist(lapply(qnts_list, `[[`, "y")),
if (isFALSE(plots.joined) && isTRUE(draw_qnts)) qnts_list[[j]]$y,
if (isTRUE(plots.joined) && isTRUE(targ.line)) targets,
if (isFALSE(plots.joined) && isTRUE(targ.line)) targets[j])
ylimsj <- suppressWarnings(c(min(limdat, na.rm = TRUE), max(limdat, na.rm = TRUE)))
if (any(is.infinite(ylimsj))) {
## should both be infinite in this case, indicating no non-missing data to plot;
## set both limits to 0 simply to avoid errors when calling plot below
ylimsj <- c(0, 0)
} else {
## give +/- 10% buffer in a way that works for signed statistics
ylimsj[1] <- ylimsj[1] - 0.1 * abs(ylimsj[1])
ylimsj[2] <- ylimsj[2] + 0.1 * abs(ylimsj[2])
}
ylims[[j]] <- ylimsj
}
}
if (isFALSE(plots.joined)) {
if (nstats == 1) dimens <- c(1, 1)
if (nstats == 2) dimens <- c(1, 2)
if (nstats == 3) dimens <- c(1, 3)
if (nstats == 4) dimens <- c(2, 2)
if (nstats == 5) dimens <- c(2, 3)
if (nstats == 6) dimens <- c(2, 3)
if (nstats %in% 7:9) dimens <- c(3, 3)
if (nstats %in% 10:12) dimens <- c(4, 3)
if (nstats %in% 13:16) dimens <- c(4, 4)
if (nstats > 16) dimens <- rep(ceiling(sqrt(nstats)), 2)
# Pull graphical parameters
ops <- list(mar = par()$mar, mfrow = par()$mfrow, mgp = par()$mgp)
par(mar = c(2.5, 2.5, 2, 1), mgp = c(2, 1, 0), mfrow = dimens)
}
## do actual plotting
for (j in seq_len(nstats)) {
if (j == 1 || isFALSE(plots.joined)) {
plot(NULL,
xlim = xlim,
ylim = ylims[[j]],
type = "n",
xlab = xlab,
ylab = ylab,
main = mains[j])
}
dataj <- matrix(data[, j, ], nrow = dim(data)[1])
if (isTRUE(draw_qnts)) {
polygon(qnts_list[[j]]$x, qnts_list[[j]]$y, col = qnts.col[j], border = NA)
}
if (isTRUE(sim.lines)) {
apply(dataj,
2,
function(y) lines(which(!is.na(y)), y[!is.na(y)], lwd = sim.lwd, col = sim.col[j]))
}
if (isTRUE(mean.line)) {
lines(means[[j]]$x,
means[[j]]$y,
lwd = mean.lwd,
col = mean.col[j],
lty = mean.lty)
}
if (isTRUE(targ.line)) {
abline(h = targets[j],
lty = targ.lty,
lwd = targ.lwd,
col = targ.col[j])
}
if (isTRUE(grid) && isFALSE(plots.joined)) {
grid()
}
}
if (isTRUE(grid) && isTRUE(plots.joined)) {
grid()
}
if (isTRUE(draw_legend)) {
legend("topleft", legend = nmstats, lwd = 2,
col = sim.col[1:nstats], cex = 0.75, bg = "white")
}
if (isFALSE(plots.joined)) {
# Reset graphical parameters
on.exit(par(ops))
}
}
if (method == "b") {
data <- matrix(aperm(data, c(1, 3, 2)), nrow = dim(data)[1] * dim(data)[3])
colnames(data) <- nmstats
boxplot(data, ...)
for (j in seq_len(nstats)) {
points(x = j, y = targets[j],
pch = 16, cex = 1.5, col = "blue")
## Grid
if (isTRUE(grid)) {
grid()
}
}
}
}
#' @title Plot Dynamic Network Model Diagnostics
#'
#' @description Plots dynamic network model diagnostics calculated in
#' \code{\link{netdx}}.
#'
#' @param x An \code{EpiModel} object of class \code{netdx}.
#' @param type Plot type, with options of \code{"formation"} for network
#' model formation statistics, \code{"duration"} for dissolution model
#' statistics for average edge duration, or \code{"dissolution"} for
#' dissolution model statistics for proportion of ties dissolved per time
#' step.
#' @param method Plot method, with options of \code{"l"} for line plots and
#' \code{"b"} for box plots.
#' @param sims A vector of simulation numbers to plot.
#' @param stats Statistics to plot. For \code{type = "formation"}, \code{stats}
#' are among those specified in the call to \code{\link{netdx}};
#' for \code{type = "duration", "dissolution"}, \code{stats} are among
#' those of the dissolution model (without \code{offset()}). The default
#' is to plot all statistics.
#' @param plots.joined If \code{TRUE}, combine all statistics in one plot,
#' versus one plot per statistic if \code{FALSE}.
#' @inheritParams plot.netsim
#'
#' @details
#' The plot function for \code{netdx} objects will generate plots of two types
#' of model diagnostic statistics that run as part of the diagnostic tools
#' within that function. The \code{formation} plot shows the summary statistics
#' requested in \code{nwstats.formula}, where the default includes those
#' statistics in the network model formation formula specified in the original
#' call to \code{\link{netest}}.
#'
#' The \code{duration} plot shows the average age of existing edges at each time
#' step, up until the maximum time step requested. The age is used as an
#' estimator of the average duration of edges in the equilibrium state. When
#' \code{duration.imputed = FALSE}, edges that exist at the beginning of the
#' simulation are assumed to start with an age of 1, yielding a burn-in period
#' before the observed mean approaches its target. When
#' \code{duration.imputed = TRUE}, expected ages prior to the start of the
#' simulation are calculated from the dissolution model, typically eliminating
#' the need for a burn-in period.
#'
#' The \code{dissolution} plot shows the proportion of the extant ties that are
#' dissolved at each time step, up until the maximum time step requested.
#' Typically, the proportion of ties that are dissolved is the reciprocal of the
#' mean relational duration. This plot thus contains similar information to that
#' in the duration plot, but should reach its expected value more quickly, since
#' it is not subject to censoring.
#'
#' The \code{plots.joined} argument will control whether the statistics
#' are joined in one plot or plotted separately, assuming there are multiple
#' statistics in the model. The default is based on the number of network
#' statistics requested. The layout of the separate plots within the larger plot
#' window is also based on the number of statistics.
#'
#' @method plot netdx
#' @export
#'
#' @keywords plot
#' @seealso \code{\link{netdx}}
#'
#' @examples
#' \dontrun{
#' # Network initialization and model parameterization
#' nw <- network_initialize(n = 500)
#' nw <- set_vertex_attribute(nw, "sex", rbinom(500, 1, 0.5))
#' formation <- ~edges + nodematch("sex")
#' target.stats <- c(500, 300)
#' coef.diss <- dissolution_coefs(dissolution = ~offset(edges) +
#' offset(nodematch("sex")), duration = c(50, 40))
#'
#' # Estimate the model
#' est <- netest(nw, formation, target.stats, coef.diss, verbose = FALSE)
#'
#' # Static diagnostics
#' dx1 <- netdx(est, nsims = 1e4, dynamic = FALSE,
#' nwstats.formula = ~edges + meandeg + concurrent +
#' nodefactor("sex", levels = NULL) +
#' nodematch("sex"))
#' dx1
#'
#' # Plot diagnostics
#' plot(dx1)
#' plot(dx1, stats = c("edges", "concurrent"), mean.col = "black",
#' sim.lines = TRUE, plots.joined = FALSE)
#' plot(dx1, stats = "edges", method = "b",
#' col = "seagreen3", grid = TRUE)
#'
#' # Dynamic diagnostics
#' dx2 <- netdx(est, nsims = 10, nsteps = 500,
#' nwstats.formula = ~edges + meandeg + concurrent +
#' nodefactor("sex", levels = NULL) +
#' nodematch("sex"))
#' dx2
#'
#' # Formation statistics plots, joined and separate
#' plot(dx2, grid = TRUE)
#' plot(dx2, type = "formation", plots.joined = TRUE)
#' plot(dx2, type = "formation", sims = 1, plots.joined = TRUE,
#' qnts = FALSE, sim.lines = TRUE, mean.line = FALSE)
#' plot(dx2, type = "formation", plots.joined = FALSE,
#' stats = c("edges", "concurrent"), grid = TRUE)
#'
#' plot(dx2, method = "b", col = "bisque", grid = TRUE)
#' plot(dx2, method = "b", stats = "meandeg", col = "dodgerblue")
#'
#' # Duration statistics plot
#' par(mfrow = c(1, 2))
#' # With duration imputed
#' plot(dx2, type = "duration", sim.line = TRUE, sim.lwd = 0.3,
#' targ.lty = 1, targ.lwd = 0.5)
#' # Without duration imputed
#' plot(dx2, type = "duration", sim.line = TRUE, sim.lwd = 0.3,
#' targ.lty = 1, targ.lwd = 0.5, duration.imputed = FALSE)
#'
#' # Dissolution statistics plot
#' plot(dx2, type = "dissolution", qnts = 0.25, grid = TRUE)
#' plot(dx2, type = "dissolution", method = "b", col = "pink1")
#' }
#'
plot.netdx <- function(x, type = "formation", method = "l", sims, stats,
duration.imputed = TRUE, sim.lines = FALSE, sim.col, sim.lwd,
mean.line = TRUE, mean.smooth = TRUE, mean.col,
mean.lwd = 2, mean.lty = 1, qnts = 0.5, qnts.col,
qnts.alpha = 0.5, qnts.smooth = TRUE, targ.line = TRUE,
targ.col, targ.lwd = 2, targ.lty = 2,
plots.joined, legend, grid = FALSE, ...) {
# Checks and Variables ----------------------------------------------------
## Check Object
if (!inherits(x, "netdx")) {
stop("x must be an object of class netdx", call. = FALSE)
}
if (x$dynamic == FALSE && type %in% c("duration", "dissolution")) {
stop("Plots of type duration and dissolution only available if netdx ",
"run with dynamic = TRUE", call. = FALSE)
}
if (is.null(x$stats.table.dissolution) && type %in% c("duration",
"dissolution")) {
stop("Plots of type duration and dissolution only available if netdx ",
"run with skip.dissolution = FALSE", call. = FALSE)
}
## Check sims
nsims <- x$nsims
if (missing(sims)) {
sims <- 1:nsims
}
if (max(sims) > nsims) {
stop("Maximum sim number is", nsims, call. = FALSE)
}
dynamic <- x$dynamic
# Get dotargs
da <- list(...)
type <- match.arg(type, c("formation", "duration", "dissolution"))
# Formation Plot ----------------------------------------------------------
if (type == "formation") {
stats_table <- x$stats.table.formation
data <- do.call("cbind", args = x$stats)
dim3 <- if (isTRUE(dynamic)) nsims else 1L
data <- array(data, dim = c(dim(data)[1], dim(data)[2] / dim3, dim3))
} else { # duration/dissolution case
if (x$anyNA == TRUE) {
cat("\nNOTE: Duration & dissolution data contains undefined values due to zero edges of some dissolution
dyad type(s) on some time step; these undefined values will be set to 0 when processing the data.")
}
if (type == "duration") {
if (is.logical(duration.imputed) == FALSE) {
stop("For plots of type duration, duration.imputed must
be a logical value (TRUE/FALSE)", call. = FALSE)
}
if (isTRUE(duration.imputed)) {
data <- x$pages_imptd
} else {
data <- x$pages
}
stats_table <- x$stats.table.duration
} else { # if type is "dissolution"
data <- x$prop.diss
stats_table <- x$stats.table.dissolution
}
}
## Find available stats
sts <- which(!is.na(stats_table[, "Sim Mean"]))
nmstats <- rownames(stats_table)[sts]
## Pull and check stat argument
if (missing(stats)) {
stats <- nmstats
}
if (any(stats %in% nmstats == FALSE)) {
stop("One or more requested stats not contained in netdx object",
call. = FALSE)
}
outsts <- which(nmstats %in% stats)
nmstats <- nmstats[outsts]
## Subset data
data <- data[, outsts, , drop = FALSE]
if (isTRUE(dynamic)) {
# sims only used to subset data in dynamic case
data <- data[, , sims, drop = FALSE]
}
## Pull target stats
targets <- stats_table$Target[sts][outsts]
plot_stats_table(data = data,
nmstats = nmstats,
method = method,
duration.imputed = duration.imputed,
sim.lines = sim.lines,
sim.col = sim.col,
sim.lwd = sim.lwd,
mean.line = mean.line,
mean.smooth = mean.smooth,
mean.col = mean.col,
mean.lwd = mean.lwd,
mean.lty = mean.lty,
qnts = qnts,
qnts.col = qnts.col,
qnts.alpha = qnts.alpha,
qnts.smooth = qnts.smooth,
targ.line = targ.line,
targ.col = targ.col,
targ.lwd = targ.lwd,
targ.lty = targ.lty,
plots.joined = plots.joined,
draw_legend = legend,
grid = grid,
targets = targets,
dynamic = dynamic,
da = da,
...)
}
#' @title Plot Data from a Stochastic Network Epidemic Model
#'
#' @description Plots epidemiological and network data from a stochastic network
#' model simulated with \code{\link{netsim}}.
#'
#' @param x An \code{EpiModel} model object of class \code{netsim}.
#' @param type Type of plot: \code{"epi"} for epidemic model results,
#' \code{"network"} for a static network plot (\code{plot.network}),
#' or \code{"formation"}, \code{"duration"}, or \code{"dissolution"} for
#' network formation, duration, or dissolution statistics.
#' @param y Output compartments or flows from \code{netsim} object to plot.
#' @param popfrac If \code{TRUE}, plot prevalence of values rather than numbers
#' (see details).
#' @param sim.lines If \code{TRUE}, plot individual simulation lines. Default is
#' to plot lines for one-group models but not for two-group models.
#' @param sims If \code{type="epi"} or \code{"formation"}, a vector of
#' simulation numbers to plot. If \code{type="network"}, a single
#' simulation number for which to plot the network, or else \code{"min"}
#' to plot the simulation number with the lowest disease prevalence,
#' \code{"max"} for the simulation with the highest disease prevalence,
#' or \code{"mean"} for the simulation with the prevalence closest to the
#' mean across simulations at the specified time step.
#' @param sim.col Vector of any standard R color format for simulation lines.
#' @param sim.lwd Line width for simulation lines.
#' @param sim.alpha Transparency level for simulation lines, where
#' 0 = transparent and 1 = opaque (see \code{adjustcolor} function).
#' @param mean.line If \code{TRUE}, plot mean of simulations across time.
#' @param mean.smooth If \code{TRUE}, use a loess smoother on the mean line.
#' @param mean.col Vector of any standard R color format for mean lines.
#' @param mean.lwd Line width for mean lines.
#' @param mean.lty Line type for mean lines.
#' @param qnts If numeric, plot polygon of simulation quantiles based on the
#' range implied by the argument (see details). If \code{FALSE}, suppress
#' polygon from plot.
#' @param qnts.col Vector of any standard R color format for polygons.
#' @param qnts.alpha Transparency level for quantile polygons, where 0 =
#' transparent and 1 = opaque (see \code{adjustcolor} function).
#' @param qnts.smooth If \code{TRUE}, use a loess smoother on quantile polygons.
#' @param legend If \code{TRUE}, plot default legend.
#' @param leg.cex Legend scale size.
#' @param axs Plot axis type (see \code{\link{par}} for details), with default
#' of \code{"r"}.
#' @param grid If \code{TRUE}, a grid is added to the background of plot
#' (see \code{\link{grid}} for details), with default of nx by ny.
#' @param add If \code{TRUE}, new plot window is not called and lines are added
#' to existing plot window.
#' @param network Network number, for simulations with multiple networks
#' representing the population.
#' @param at If \code{type = "network"}, time step for network graph.
#' @param col.status If \code{TRUE} and \code{type="network"}, automatic disease
#' status colors (blue = susceptible, red = infected, green = recovered).
#' @param shp.g2 If \code{type = "network"} and \code{x} is for a two-group model,
#' shapes for the Group 2 vertices, with acceptable inputs of "triangle"
#' and "square". Group 1 vertices will remain circles.
#' @param vertex.cex Relative size of plotted vertices if \code{type="network"},
#' with implicit default of 1.
#' @param stats If \code{type="formation","duration","dissolution"}, statistics
#' to plot. For \code{type = "formation"}, \code{stats} are among those
#' specified in \code{nwstats.formula} of \code{\link{control.net}}; for
#' \code{type = "duration", "dissolution"}, \code{stats} are among those
#' of the dissolution model (without \code{offset()}). The default is
#' to plot all statistics.
#' @param targ.line If \code{TRUE}, plot target or expected value line for
#' the statistic of interest.
#' @param targ.col Vector of standard R colors for target statistic lines, with
#' default colors based on \code{RColorBrewer} color palettes.
#' @param targ.lwd Line width for the line showing the target statistic values.
#' @param targ.lty Line type for the line showing the target statistic values.
#' @param plots.joined If \code{TRUE} and
#' \code{type="formation","duration","dissolution"}, combine all
#' statistics in one plot, versus one plot per statistic if
#' \code{FALSE}.
#' @param method Plot method for \code{type="formation", "duration", "dissolution"},
#' with options of \code{"l"} for line plots and \code{"b"} for box plots.
#' @param duration.imputed If \code{type = "duration"}, a logical indicating
#' whether or not to impute starting times for relationships extant at
#' the start of the simulation. Defaults to \code{TRUE} when
#' \code{type = "duration"}.
#' @param ... Additional arguments to pass.
#'
#' @details
#' This plot function can produce three types of plots with a stochastic network
#' model simulated through \code{\link{netsim}}:
#' \enumerate{
#' \item \strong{\code{type="epi"}}: epidemic model results (e.g., disease
#' prevalence and incidence) may be plotted.
#' \item \strong{\code{type="network"}}: a static network plot will be
#' generated. A static network plot of a dynamic network is a
#' cross-sectional extraction of that dynamic network at a specific
#' time point. This plotting function wraps the
#' \code{\link{plot.network}} function in the \code{network} package.
#' Consult the help page for \code{plot.network} for all of the plotting
#' parameters. In addition, four plotting parameters specific to
#' \code{netsim} plots are available: \code{sim}, \code{at},
#' \code{col.status}, and \code{shp.g2}.
#' \item \strong{\code{type="formation"}}: summary network statistics related
#' to the network model formation are plotted. These plots are similar
#' to the formation plots for \code{netdx} objects. When running a
#' \code{netsim} simulation, one must specify there that
#' \code{save.nwstats=TRUE}; the plot here will then show the network
#' statistics requested explicitly in \code{nwstats.formula}, or will use
#' the formation formula set in \code{netest} otherwise.
#' \item \strong{\code{type="duration","dissolution"}}: as in
#' \code{\link{plot.netdx}}; supported in \code{plot.netsim} only when
#' the dissolution model is \code{~offset(edges)}, \code{tergmLite} is
#' \code{FALSE}, and \code{save.network} is \code{TRUE}.
#' }
#'
#' @details
#' When \code{type="epi"}, this plotting function will extract the
#' epidemiological output from a model object of class \code{netsim} and plot
#' the time series data of disease prevalence and other results. The summary
#' statistics that the function calculates and plots are individual simulation
#' lines, means of the individual simulation lines, and quantiles of those
#' individual simulation lines. The mean line, toggled on with
#' \code{mean.line=TRUE}, is calculated as the row mean across simulations at
#' each time step.
#'
#' Compartment prevalences are the size of a compartment over some denominator.
#' To plot the raw numbers from any compartment, use \code{popfrac=FALSE}; this
#' is the default for any plots of flows. The \code{popfrac} parameter
#' calculates and plots the denominators of all specified compartments using
#' these rules: 1) for one-group models, the prevalence of any compartment is
#' the compartment size divided by the total population size; 2) for two-group
#' models, the prevalence of any compartment is the compartment size divided by
#' the group population size. For any prevalences that are not automatically
#' calculated, the \code{\link{mutate_epi}} function may be used to add new
#' variables to the \code{netsim} object to plot or analyze.
#'
#' The quantiles show the range of outcome values within a certain specified
#' quantile range. By default, the interquartile range is shown: that is the
#' middle 50\% of the data. This is specified by \code{qnts=0.5}. To show the
#' middle 95\% of the data, specify \code{qnts=0.95}. To toggle off the polygons
#' where they are plotted by default, specify \code{qnts=FALSE}.
#'
#' When \code{type="network"}, this function will plot cross sections of the
#' simulated networks at specified time steps. Because it is only possible to
#' plot one time step from one simulation at a time, it is necessary to enter
#' these in the \code{at} and \code{sims} parameters. To aid in visualizing
#' representative and extreme simulations at specific time steps, the
#' \code{sims} parameter may be set to \code{"mean"} to plot the simulation in
#' which the disease prevalence is closest to the average across all
#' simulations, \code{"min"} to plot the simulation in which the prevalence is
#' lowest, and \code{"max"} to plot the simulation in which the prevalence is
#' highest.
#'
#' @method plot netsim
#' @export
#'
#' @keywords plot
#' @seealso \code{\link{plot.network}}, \code{\link{mutate_epi}}
#'
#' @examples
#' ## SI Model without Network Feedback
#' # Initialize network and set network model parameters
#' nw <- network_initialize(n = 100)
#' nw <- set_vertex_attribute(nw, "group", rep(1:2, each = 50))
#' formation <- ~edges
#' target.stats <- 50
#' coef.diss <- dissolution_coefs(dissolution = ~offset(edges), duration = 20)
#'
#' # Estimate the network model
#' est <- netest(nw, formation, target.stats, coef.diss, verbose = FALSE)
#'
#' # Simulate the epidemic model
#' param <- param.net(inf.prob = 0.3, inf.prob.g2 = 0.15)
#' init <- init.net(i.num = 10, i.num.g2 = 10)
#' control <- control.net(type = "SI", nsteps = 20, nsims = 3,
#' verbose = FALSE, save.nwstats = TRUE,
#' nwstats.formula = ~edges + meandeg + concurrent)
#' mod <- netsim(est, param, init, control)
#'
#' # Plot epidemic trajectory
#' plot(mod)
#' plot(mod, type = "epi", grid = TRUE)
#' plot(mod, type = "epi", popfrac = TRUE)
#' plot(mod, type = "epi", y = "si.flow", qnts = 1, ylim = c(0, 4))
#'
#' # Plot static networks
#' par(mar = c(0, 0, 0, 0))
#' plot(mod, type = "network", vertex.cex = 1.5)
#'
#' # Automatic coloring of infected nodes as red
#' par(mfrow = c(1, 2), mar = c(0, 0, 2, 0))
#' plot(mod, type = "network", main = "Min Prev | Time 50",
#' col.status = TRUE, at = 20, sims = "min", vertex.cex = 1.25)
#' plot(mod, type = "network", main = "Max Prev | Time 50",
#' col.status = TRUE, at = 20, sims = "max", vertex.cex = 1.25)
#'
#' # Automatic shape by group number (circle = group 1)
#' par(mar = c(0, 0, 0, 0))
#' plot(mod, type = "network", at = 20, col.status = TRUE,
#' shp.g2 = "square")
#' plot(mod, type = "network", at = 20, col.status = TRUE,
#' shp.g2 = "triangle", vertex.cex = 2)
#'
#' # Plot formation statistics
#' par(mfrow = c(1,1), mar = c(3,3,1,1), mgp = c(2,1,0))
#' plot(mod, type = "formation", grid = TRUE)
#' plot(mod, type = "formation", plots.joined = FALSE)
#' plot(mod, type = "formation", sims = 2:3)
#' plot(mod, type = "formation", plots.joined = FALSE,
#' stats = c("edges", "concurrent"))
#' plot(mod, type = "formation", stats = "meandeg",
#' mean.lwd = 1, qnts.col = "seagreen", mean.col = "black")
#'
plot.netsim <- function(x, type = "epi", y, popfrac = FALSE, sim.lines = FALSE,
sims, sim.col, sim.lwd, sim.alpha, mean.line = TRUE,
mean.smooth = TRUE, mean.col, mean.lwd = 2,
mean.lty = 1, qnts = 0.5, qnts.col, qnts.alpha = 0.5,
qnts.smooth = TRUE, legend, leg.cex = 0.8, axs = "r",
grid = FALSE, add = FALSE, network = 1, at = 1,
col.status = FALSE, shp.g2 = NULL, vertex.cex, stats,
targ.line = TRUE, targ.col, targ.lwd = 2, targ.lty = 2,
plots.joined, duration.imputed = TRUE, method = "l", ...) {
type <- match.arg(type, c("epi", "network", "formation", "duration", "dissolution"))
if (type == "network") {
# Network plot ------------------------------------------------------------
if (x$control$tergmLite == TRUE) {
stop("networkDyanmic object is not saved in tergmLite netsim simulation.
Check control setting tergmLite", call. = FALSE)
}
nsteps <- x$control$nsteps
if (at > x$control$nsteps) {
stop("Specify a time step between 1 and ", nsteps, call. = FALSE)
}
nsims <- x$control$nsims
if (missing(sims)) {
sims <- 1
}
if (length(sims) > 1 || (!is.numeric(sims) &&
!(sims %in% c("mean", "max", "min")))) {
stop("sims argument must be single simulation number",
"or \"mean\", \"max\", or \"min\" ", call. = FALSE)
}
sims.arg <- sims
if (sims == "mean") {
sims <- which.min(abs(as.numeric(x$epi$i.num[at, ]) -
mean(as.numeric(x$epi$i.num[at, ]))))
sims.val <- as.numeric(x$epi$i.num[at, sims])
}
if (sims == "max") {
sims <- as.numeric(which.max(x$epi$i.num[at, ]))
sims.val <- x$epi$i.num[at, sims]
}
if (sims == "min") {
sims <- as.numeric(which.min(x$epi$i.num[at, ]))
sims.val <- x$epi$i.num[at, sims]
}
obj <- get_network(x, sims, network, collapse = TRUE, at = at)
tergmLite <- x$control$tergmLite
miss_vertex.cex <- missing(vertex.cex)
if (!is.null(shp.g2)) {
if (all(shp.g2 != c("square", "triangle"))) {
stop("shp.g2 accepts inputs of either \"square\" or \"triangle\" ",
call. = FALSE)
}
grp.flag <- length(unique(get_vertex_attribute(obj, "group")))
if (is.numeric(grp.flag)) {
mids <- idgroup(obj)
if (shp.g2 == "square") {
vertex.sides <- ifelse(mids == 1, 50, 4)
vertex.rot <- 45
if (miss_vertex.cex == TRUE) {
vertex.cex <- 1
}
}
if (shp.g2 == "triangle") {
vertex.sides <- ifelse(mids == 1, 50, 3)
vertex.rot <- 90
if (miss_vertex.cex == TRUE) {
vertex.cex <- 1
}
}
} else {
warning("shp.g2 applies to two-group networks only, so ignoring.")
vertex.sides <- 50
vertex.rot <- 0
if (miss_vertex.cex == TRUE) {
vertex.cex <- 1
}
}
} else {
vertex.sides <- 50
vertex.rot <- 0
if (miss_vertex.cex == TRUE) {
vertex.cex <- 1
}
}
if (col.status == TRUE) {
if (tergmLite == TRUE) {
stop("Plotting status colors requires tergmLite=FALSE in netsim
control settings.", call. = FALSE)
}
pal <- adjustcolor(c(4, 2, 3), 0.75)
if (tergmLite == FALSE) {
testatus <- get.vertex.attribute.active(obj, "testatus", at = at)
cols <- rep(pal[1], length(testatus))
cols[testatus == "i"] <- pal[2]
cols[testatus == "r"] <- pal[3]
}
plot.network(obj, vertex.col = cols, vertex.border = "grey60",
edge.col = "grey40", vertex.sides = vertex.sides,
vertex.rot = vertex.rot, vertex.cex = vertex.cex,
displaylabels = FALSE, ...)
if (sims.arg %in% c("mean", "max", "min")) {
mtext(side = 1, text = paste("Sim =", sims, " | Prev =", sims.val))
}
} else {
plot.network(obj, vertex.sides = vertex.sides, vertex.rot = vertex.rot,
vertex.cex = vertex.cex, displaylabels = FALSE, ...)
}
} else if (type == "epi") {
# Epidemic plot -----------------------------------------------------------
## Model dimensions and class ##
nsteps <- x$control$nsteps
nsims <- x$control$nsims
if (missing(sims)) {
sims <- 1:nsims
}
if (max(sims) > nsims) {
stop("Set sim to between 1 and ", nsims, call. = FALSE)
}
if (is.null(x$param$groups) || !is.numeric(x$param$groups)) {
groups <- 1
x$param$groups <- 1
} else {
groups <- x$param$groups
}
# dotargs
da <- list(...)
## Compartments ##
nocomp <- ifelse(missing(y), TRUE, FALSE)
if (nocomp == TRUE) {
if (groups == 1) {
y <- grep(".num$", names(x$epi), value = TRUE)
}
if (groups == 2) {
if (inherits(x, "icm")) {
y <- c(grep(".num$", names(x$epi), value = TRUE),
grep(".num.g2$", names(x$epi), value = TRUE))
}
if (inherits(x, "netsim")) {
y <- c(grep(".num$", names(x$epi), value = TRUE),
grep(".num.g2$", names(x$epi), value = TRUE))
}
}
if (missing(legend)) {
legend <- TRUE
}
}
if (nocomp == FALSE) {
if (any(y %in% names(x$epi) == FALSE)) {
stop("Specified y is not available in object", call. = FALSE)
}
}
lcomp <- length(y)
## Color palettes ##
# Main color palette
bpal <- c(4, 2, 3, 5:100)
# Mean line
if (missing(mean.col)) {
mean.col <- bpal
}
mean.pal <- adjustcolor(mean.col, 0.9)
# Quantile bands
if (missing(qnts.col)) {
qnts.col <- bpal
}
qnts.pal <- adjustcolor(qnts.col, qnts.alpha)
# Sim lines
if (missing(sim.lwd)) {
sim.lwd <- rep(0.75, lcomp)
} else {
if (length(sim.lwd) < lcomp) {
sim.lwd <- rep(sim.lwd, lcomp)
}
}
if (missing(sim.col)) {
sim.col <- bpal
} else {
if (length(sim.col) < lcomp) {
sim.col <- rep(sim.col, lcomp)
}
}
if (missing(sim.alpha) && nsims == 1) {
sim.alpha <- 0.9
}
if (missing(sim.alpha) && nsims > 1) {
sim.alpha <- max(c(0.05, 1 - log10(nsims) / 3))
}
sim.pal <- adjustcolor(sim.col, sim.alpha)
## Prevalence calculations ##
nopopfrac <- ifelse(missing(popfrac), TRUE, FALSE)
if (nopopfrac == TRUE) {
popfrac <- FALSE
}
if (nopopfrac == TRUE) {
if (any(grepl(".flow", y)) ||
(groups == 1 && all(grepl(".num$", y)) == FALSE) ||
(groups == 2 && all(c(grepl(".num$", y), grepl(".g2$", y)) == FALSE)) ||
any(y %in% c("num", "num.g2", "num.g2"))) {
popfrac <- FALSE
}
}
x <- denom(x, y, popfrac)
# Compartment max
if (popfrac == FALSE) {
if (lcomp == 1) {
min.prev <- min(x$epi[[y]], na.rm = TRUE)
max.prev <- max(x$epi[[y]], na.rm = TRUE)
} else {
min.prev <- min(sapply(y, function(comps) min(x$epi[[comps]], na.rm = TRUE)))
max.prev <- max(sapply(y, function(comps) max(x$epi[[comps]], na.rm = TRUE)))
}
} else {
min.prev <- 0
max.prev <- 1
}
# Initialize ylim max values
qnt.min <- 1E10
qnt.max <- -1E10
mean.min <- 1E10
mean.max <- -1E10
## Quantiles - ylim max ##
if (qnts == FALSE) {
disp.qnts <- FALSE
} else {
disp.qnts <- TRUE
}
if (nsims == 1) {
disp.qnts <- FALSE
}
if (disp.qnts == TRUE) {
if (qnts > 1 || qnts < 0) {
stop("qnts must be between 0 and 1", call. = FALSE)
}
qnt.max <- draw_qnts(x, y, qnts, qnts.pal, qnts.smooth, "epi", 0, "max")
qnt.min <- draw_qnts(x, y, qnts, qnts.pal, qnts.smooth, "epi", 0, "min")
}
## Mean lines - ylim max ##
if (mean.line == TRUE) {
if (!missing(mean.lwd) && length(mean.lwd) < lcomp) {
mean.lwd <- rep(mean.lwd, lcomp)
}
if (missing(mean.lwd)) {
mean.lwd <- rep(1.5, lcomp)
}
if (!missing(mean.lty) && length(mean.lty) < lcomp) {
mean.lty <- rep(mean.lty, lcomp)
}
if (missing(mean.lty)) {
mean.lty <- rep(1, lcomp)
}
mean.max <- draw_means(x, y, mean.smooth, mean.lwd, mean.pal,
mean.lty, "epi", 0, "max")
mean.min <- draw_means(x, y, mean.smooth, mean.lwd, mean.pal,
mean.lty, "epi", 0, "min")
}
## Missing args ##
if (is.null(da$xlim)) {
da$xlim <- c(0, nsteps)
}
if (is.null(da$ylim) && (popfrac == TRUE || sim.lines == TRUE)) {
da$ylim <- c(min.prev, max.prev)
} else if (is.null(da$ylim) && popfrac == FALSE && sim.lines == FALSE &&
(mean.line == TRUE || qnts == TRUE)) {
da$ylim <- c(min(qnt.min * 0.9, mean.min * 0.9), max(qnt.max * 1.1, mean.max * 1.1))
}
if (is.null(da$main)) {
da$main <- ""
}
if (is.null(da$xlab)) {
da$xlab <- "Time"
}
if (is.null(da$ylab)) {
if (popfrac == FALSE) {
da$ylab <- "Number"
} else {
da$ylab <- "Prevalence"
}
}
## Main plot window ##
if (add == FALSE) {
da$x <- 1
da$y <- 1
da$type <- "n"
da$bty <- "n"
do.call(plot, da)
}
## Quantiles ##
## NOTE: Why is this repeated from above?
if (qnts == FALSE) {
disp.qnts <- FALSE
} else {
disp.qnts <- TRUE
}
if (nsims == 1) {
disp.qnts <- FALSE
}
if (disp.qnts == TRUE) {
if (qnts > 1 || qnts < 0) {
stop("qnts must be between 0 and 1", call. = FALSE)
}
y.l <- length(y)
qnts.pal <- qnts.pal[1:y.l]
draw_qnts(x, y, qnts, qnts.pal, qnts.smooth)
}
## Simulation lines ##
if (sim.lines == TRUE) {
for (j in seq_len(lcomp)) {
for (i in sims) {
lines(x$epi[[y[j]]][, i], lwd = sim.lwd[j], col = sim.pal[j])
}
}
}
## Mean lines ##
if (mean.line == TRUE) {
if (!missing(mean.lwd) && length(mean.lwd) < lcomp) {
mean.lwd <- rep(mean.lwd, lcomp)
}
if (missing(mean.lwd)) {
mean.lwd <- rep(2.5, lcomp)
}
if (!missing(mean.lty) && length(mean.lty) < lcomp) {
mean.lty <- rep(mean.lty, lcomp)
}
if (missing(mean.lty)) {
if (nocomp == FALSE) {
mean.lty <- rep(1, lcomp)
}
}
y.n <- length(y)
mean.pal <- mean.pal[1:y.n]
draw_means(x, y, mean.smooth, mean.lwd, mean.pal, mean.lty)
}
## Grid
if (grid == TRUE) {
grid()
}
## Legends ##
if (!missing(legend) && legend == TRUE) {
if (groups == 2 && nocomp == TRUE) {
leg.lty <- mean.lty
} else {
leg.lty <- 1
}
legend("topright", legend = y, lty = leg.lty, lwd = 2,
col = mean.pal, cex = leg.cex, bg = "white")
}
} else {
# stat plot
## Stats
nsims <- x$control$nsims
if (missing(sims)) {
sims <- 1:nsims
}
if (max(sims) > nsims) {
stop("Maximum sims for this object is ", nsims, call. = FALSE)
}
nsims <- length(sims)
nsteps <- x$control$nsteps
if (type == "formation") {
# Formation plot ----------------------------------------------------------
## get nw stats
data <- get_nwstats(x, sims, network, mode = "list")
## target stats
nwparam <- get_nwparam(x, network)
ts <- nwparam$target.stats
tsn <- nwparam$target.stats.names
names(ts) <- tsn
} else {
## duration/dissolution plot
if (isTRUE(x$control$save.diss.stats) &&
isTRUE(x$control$save.network) &&
isFALSE(x$control$tergmLite) &&
isFALSE(is.null(x$diss.stats)) &&
isTRUE(x$nwparam[[network]]$coef.diss$diss.model.type == "edgesonly")) {
if (any(unlist(lapply(x$diss.stats, `[[`, "anyNA")))) {
cat("\nNOTE: Duration & dissolution data contains undefined values due to zero edges of some dissolution
dyad type(s) on some time step; these undefined values will be set to 0 when processing the data.")
}
if (type == "duration") {
if (isTRUE(duration.imputed)) {
data <- lapply(sims, function(sim) x$diss.stats[[sim]][[network]][["meanageimputed"]])
} else {
data <- lapply(sims, function(sim) x$diss.stats[[sim]][[network]][["meanage"]])
}
ts <- x$nwparam[[network]]$coef.diss$duration
} else { # if type is "dissolution"
data <- lapply(sims, function(sim) x$diss.stats[[sim]][[network]][["propdiss"]])
ts <- 1 / x$nwparam[[network]]$coef.diss$duration
}
} else {
stop("cannot produce duration/dissolution plot from `netsim` object ",
"unless `save.diss.stats` is `TRUE`, `save.network` is `TRUE`, ",
"`tergmLite` is `FALSE`, `keep.diss.stats` is `TRUE` (if ",
"merging), and dissolution model is edges-only")
}
}
stats_table <- make_stats_table(data, ts)
data <- array(unlist(data), dim = c(dim(data[[1]]), nsims))
## Find available stats
sts <- which(!is.na(stats_table[, "Sim Mean"]))
nmstats <- rownames(stats_table)[sts]
## Pull and check stat argument
if (missing(stats)) {
stats <- nmstats
}
if (any(stats %in% nmstats == FALSE)) {
stop("One or more requested stats not contained in netsim object",
call. = FALSE)
}
outsts <- which(nmstats %in% stats)
nmstats <- nmstats[outsts]
## Subset data
data <- data[, outsts, , drop = FALSE]
## we've already subset the data to `sims`
## Pull target stats
targets <- stats_table$Target[sts][outsts]
da <- list(...)
plot_stats_table(data = data,
nmstats = nmstats,
method = method,
duration.imputed = duration.imputed,
sim.lines = sim.lines,
sim.col = sim.col,
sim.lwd = sim.lwd,
mean.line = mean.line,
mean.smooth = mean.smooth,
mean.col = mean.col,
mean.lwd = mean.lwd,
mean.lty = mean.lty,
qnts = qnts,
qnts.col = qnts.col,
qnts.alpha = qnts.alpha,
qnts.smooth = qnts.smooth,
targ.line = targ.line,
targ.col = targ.col,
targ.lwd = targ.lwd,
targ.lty = targ.lty,
plots.joined = plots.joined,
draw_legend = legend,
grid = grid,
targets = targets,
dynamic = TRUE, # always dynamic in netsim
da = da,
...)
}
}
#' @title Plot Compartment Diagram for Epidemic Models
#'
#' @description Plots a compartment flow diagram for deterministic compartmental
#' models, stochastic individual contact models, and stochastic
#' network models.
#'
#' @param x An \code{EpiModel} object of class \code{dcm}, \code{icm}, or
#' \code{netsim}.
#' @param at Time step for model statistics.
#' @param digits Number of significant digits to print.
#' @param ... Additional arguments passed to plot (not currently used).
#'
#' @details
#' The \code{comp_plot} function provides a visual summary of an epidemic model
#' at a specific time step. The information contained in \code{comp_plot} is the
#' same as in the \code{summary} functions for a model, but presented
#' graphically as a compartment flow diagram.
#'
#' For \code{dcm} class plots, specify the model run number if the model
#' contains multiple runs, as in a sensitivity analysis. For \code{icm} and
#' \code{netsim} class plots, the \code{run} argument is not used; the plots
#' show the means and standard deviations across simulations at the specified
#' time step.
#'
#' These plots are currently limited to one-group models for each of the three
#' model classes. That functionality may be expanded in future software
#' releases.
#'
#' @export
#' @keywords plot
#'
#' @examples
#' ## Example 1: DCM SIR model with varying act.rate
#' param <- param.dcm(inf.prob = 0.2, act.rate = 5:7,
#' rec.rate = 1/3, a.rate = 1/90, ds.rate = 1/100,
#' di.rate = 1/35, dr.rate = 1/100)
#' init <- init.dcm(s.num = 1000, i.num = 1, r.num = 0)
#' control <- control.dcm(type = "SIR", nsteps = 25, verbose = FALSE)
#' mod1 <- dcm(param, init, control)
#' comp_plot(mod1, at = 25, run = 3)
#'
#' ## Example 2: ICM SIR model with 3 simulations
#' param <- param.icm(inf.prob = 0.2, act.rate = 3, rec.rate = 1/50,
#' a.rate = 1/100, ds.rate = 1/100,
#' di.rate = 1/90, dr.rate = 1/100)
#' init <- init.icm(s.num = 500, i.num = 1, r.num = 0)
#' control <- control.icm(type = "SIR", nsteps = 25,
#' nsims = 3, verbose = FALSE)
#' mod2 <- icm(param, init, control)
#' comp_plot(mod2, at = 25, digits = 1)
#'
comp_plot <- function(x, at, digits, ...) {
UseMethod("comp_plot")
}
#' @param run Model run number, for \code{dcm} class models with multiple runs
#' (sensitivity analyses).
#' @method comp_plot dcm
#' @rdname comp_plot
#' @export
comp_plot.dcm <- function(x, at = 1, digits = 3, run = 1, ...) {
## Variables
nsteps <- x$control$nsteps
dis.type <- x$control$type
groups <- x$param$groups
vital <- x$param$vital
## Errors
if (groups != 1) {
stop("Only 1-group dcm models currently supported",
call. = FALSE)
}
## Time
if (at > nsteps || at < 1) {
stop("Specify a time step between 1 and ", nsteps)
}
intime <- at
at <- which(x$control$timesteps == intime)
## Dataframe subsets
df <- as.data.frame(x, run = run)
df <- round(df[at, ], digits)
## Change graphical parameters
ops <- list(mar = par()$mar, mfrow = par()$mfrow, mgp = par()$mgp)
par(mar = c(0, 0, 2, 0))
options(scipen = 10)
## Main Plot
plot(0:100, 0:100, type = "n", axes = FALSE)
title(main = paste(dis.type, "Model Diagram"))
mtext(paste0("time=", intime, " | run=", run),
side = 3, cex = 0.8, line = -1)
## 1. SI Model
if (dis.type == "SI") {
mbox(22, 40, "Susceptible", df$s.num)
mbox(57, 40, "Infected", df$i.num)
harrow(22, 40, "si.flow", df$si.flow, dir = "right")
if (vital == TRUE) {
varrow(22, 40, "ds.flow", df$ds.flow, dir = "out")
varrow(57, 40, "di.flow", df$di.flow, dir = "out")
varrow(22, 40, "a.flow", df$a.flow, dir = "in")
}
}
## 2. SIR Model
if (dis.type == "SIR") {
mbox(5, 40, "Susceptible", df$s.num)
mbox(40, 40, "Infected", df$i.num)
mbox(75, 40, "Recovered", df$r.num)
harrow(5, 40, "si.flow", df$si.flow, dir = "right")
harrow(40, 40, "ir.flow", df$ir.flow, dir = "right")
if (vital == TRUE) {
varrow(5, 40, "ds.flow", df$ds.flow, dir = "out")
varrow(40, 40, "di.flow", df$di.flow, dir = "out")
varrow(75, 40, "dr.flow", df$dr.flow, dir = "out")
varrow(5, 40, "a.flow", df$a.flow, dir = "in")
}
}
## 3. SIS Model
if (dis.type == "SIS") {
mbox(22, 40, "Susceptible", df$s.num)
mbox(57, 40, "Infected", df$i.num)
harrow(22, 40, "si.flow", df$si.flow, dir = "right")
harrow(22, 40, "is.flow", df$is.flow, dir = "left")
if (vital == TRUE) {
varrow(22, 40, "ds.flow", df$ds.flow, dir = "out")
varrow(57, 40, "di.flow", df$di.flow, dir = "out")
varrow(22, 40, "a.flow", df$a.flow, dir = "in")
}
}
# Reset graphical parameters
on.exit(par(ops))
}
#' @method comp_plot icm
#' @rdname comp_plot
#' @export
comp_plot.icm <- function(x, at = 1, digits = 3, ...) {
# Variables
nsteps <- x$control$nsteps
dis.type <- x$control$type
vital <- x$param$vital
# Standardize groups
if (inherits(x, "icm")) {
groups <- x$param$groups
}
if (inherits(x, "netsim")) {
groups <- x$param$groups
}
if (groups != 1) {
stop("Only 1-group models currently supported",
call. = FALSE)
}
# Time
if (at > nsteps || at < 1) {
stop("Specify a timestep between 1 and ", nsteps,
call. = FALSE)
}
## Dataframe subsets for plots
df.mn <- as.data.frame(x, out = "mean")
df.mn <- round(df.mn[at == df.mn$time, ], digits)
df.sd <- as.data.frame(x, out = "sd")
df.sd <- round(df.sd[at == df.sd$time, ], digits)
## Change graphical parameters
ops <- list(mar = par()$mar, mfrow = par()$mfrow, mgp = par()$mgp)
par(mar = c(0, 0, 2, 0))
options(scipen = 10)
## Main Plot
plot(0:100, 0:100, type = "n", axes = FALSE)
title(main = paste(dis.type, "Model Diagram"))
mtext(paste0("Simulation means(sd) | time=", at),
side = 3, cex = 0.8, line = -1)
## 1. SI Model
if (dis.type == "SI" && groups == 1) {
mbox(22, 40, "Susceptible", paste0(df.mn$s.num, "(", df.sd$s.num, ")"))
mbox(57, 40, "Infected", paste0(df.mn$i.num, "(", df.sd$i.num, ")"))
harrow(22, 40, "si.flow", df.mn$si.flow, dir = "right")
if (vital == TRUE) {
varrow(22, 40, "ds.flow", df.mn$ds.flow, dir = "out")
varrow(57, 40, "di.flow", df.mn$di.flow, dir = "out")
varrow(22, 40, "a.flow", df.mn$a.flow, dir = "in")
}
}
## 2. SIR Model
if (dis.type == "SIR" && groups == 1) {
mbox(5, 40, "Susceptible", paste0(df.mn$s.num, "(", df.sd$s.num, ")"))
mbox(40, 40, "Infected", paste0(df.mn$i.num, "(", df.sd$i.num, ")"))
mbox(75, 40, "Recovered", paste0(df.mn$r.num, "(", df.sd$r.num, ")"))
harrow(5, 40, "si.flow", df.mn$si.flow, dir = "right")
harrow(40, 40, "ir.flow", df.mn$ir.flow, dir = "right")
if (vital == TRUE) {
varrow(5, 40, "ds.flow", df.mn$ds.flow, dir = "out")
varrow(40, 40, "di.flow", df.mn$di.flow, dir = "out")
varrow(75, 40, "dr.flow", df.mn$dr.flow, dir = "out")
varrow(5, 40, "a.flow", df.mn$a.flow, dir = "in")
}
}
## 3. SIS Model
if (dis.type == "SIS" && groups == 1) {
mbox(22, 40, "Susceptible", paste0(df.mn$s.num, "(", df.sd$s.num, ")"))
mbox(57, 40, "Infected", paste0(df.mn$i.num, "(", df.sd$i.num, ")"))
harrow(22, 40, "si.flow", df.mn$si.flow, dir = "right")
harrow(22, 40, "is.flow", df.mn$is.flow, dir = "left")
if (vital == TRUE) {
varrow(22, 40, "ds.flow", df.mn$ds.flow, dir = "out")
varrow(57, 40, "di.flow", df.mn$di.flow, dir = "out")
varrow(22, 40, "a.flow", df.mn$a.flow, dir = "in")
}
}
# Reset graphical parameters
on.exit(par(ops))
}
#' @method comp_plot netsim
#' @rdname comp_plot
#' @export
comp_plot.netsim <- function(x, at = 1, digits = 3, ...) {
comp_plot.icm(x = x, at = at, digits = digits, ...)
}
# ggplot ------------------------------------------------------------------
#' @title ggplot2 Geom for Quantile Bands
#'
#' @description Plots quantile bands given a data.frame with stochastic model
#' results from \code{\link{icm}} or \code{\link{netsim}}.
#'
#' @param mapping Standard aesthetic mapping \code{aes()} input for ggplot2.
#' @param lower Lower quantile for the time series.
#' @param upper Upper quantile for the time series.
#' @param alpha Transparency of the ribbon fill.
#' @param ... Additional arguments passed to \code{stat_summary}.
#'
#' @details
#' This is a wrapper around \code{ggplot::stat_summary} with a ribbon geom as
#' aesthetic output.
#'
#' @export
#' @keywords plot
#'
#' @examples
#' param <- param.icm(inf.prob = 0.2, act.rate = 0.25)
#' init <- init.icm(s.num = 500, i.num = 1)
#' control <- control.icm(type = "SI", nsteps = 250, nsims = 5)
#' mod1 <- icm(param, init, control)
#' df <- as.data.frame(mod1)
#' df.mean <- as.data.frame(mod1, out = "mean")
#'
#' library(ggplot2)
#' ggplot() +
#' geom_line(data = df, mapping = aes(time, i.num, group = sim),
#' alpha = 0.25, lwd = 0.25, color = "firebrick") +
#' geom_bands(data = df, mapping = aes(time, i.num),
#' lower = 0.1, upper = 0.9, fill = "firebrick") +
#' geom_line(data = df.mean, mapping = aes(time, i.num)) +
#' theme_minimal()
#'
geom_bands <- function(mapping, lower = 0.25, upper = 0.75, alpha = 0.25, ...) {
stat_summary(mapping,
geom = "ribbon",
fun.min = function(x) quantile(x, lower),
fun.max = function(x) quantile(x, upper),
alpha = alpha, ...)
}
# Helper Functions --------------------------------------------------------
# Calculate denominators
denom <- function(x, y, popfrac) {
cont.val <- ifelse(class(x) == "dcm", "nruns", "nsims")
if (popfrac == TRUE) {
for (i in seq_along(y)) {
dname <- paste(strsplit(y[i], "[.]")[[1]][-1], collapse = ".")
x$epi[[y[i]]] <- x$epi[[y[i]]] / x$epi[[dname]]
}
}
if (popfrac == FALSE && x$control[[cont.val]] == 1) {
for (j in seq_along(y)) {
x$epi[[y[j]]] <- data.frame(x$epi[[y[j]]])
}
}
return(x)
}
## comp_plot helper utilities ##
# Text box
mbox <- function(x, y, title, val) {
polygon(c(x, x + 20, x + 20, x), c(y, y, y + 20, y + 20))
text(x + 10, y + 10, paste(title, "\n n=", val, sep = ""), cex = 0.9)
}
# Horizontal arrow
harrow <- function(xbox, ybox, title, val, dir) {
if (dir == "right") {
arrows(xbox + 20, ybox + 12, xbox + 35, lwd = 2, length = 0.15)
text(xbox + 27.5, ybox + 17, paste(title, val, sep = "="), cex = 0.8)
}
if (dir == "left") {
arrows(xbox + 20 + 15, ybox + 5, xbox + 20, lwd = 2, length = 0.15)
text(xbox + 27.5, ybox + 2, paste(title, val, sep = "="), cex = 0.8)
}
}
# Vertical arrow
varrow <- function(xbox, ybox, title, val, dir) {
if (dir == "out") {
arrows(xbox + 10, ybox, xbox + 10, ybox - 25, lwd = 2, length = 0.15)
text(xbox + 10, ybox - 12.5, paste(title, val, sep = "="),
cex = 0.8, pos = 4)
}
if (dir == "in") {
arrows(xbox + 10, ybox + 45, xbox + 10, ybox + 20, lwd = 2, length = 0.15)
text(xbox + 10, ybox + 32.5, paste(title, val, sep = "="),
cex = 0.8, pos = 4)
}
}
|
2010c4e2cd7de476b47b990255aded82e22472d3
|
e68e99f52f3869c60d6488f0492905af4165aa64
|
/man/nnf_conv_tbc.Rd
|
0315c055b7de93f41a23c74db87f61c49d586778
|
[
"MIT"
] |
permissive
|
mlverse/torch
|
a6a47e1defe44b9c041bc66504125ad6ee9c6db3
|
f957d601c0295d31df96f8be7732b95917371acd
|
refs/heads/main
| 2023-09-01T00:06:13.550381
| 2023-08-30T17:44:46
| 2023-08-30T17:44:46
| 232,347,878
| 448
| 86
|
NOASSERTION
| 2023-09-11T15:22:22
| 2020-01-07T14:56:32
|
C++
|
UTF-8
|
R
| false
| true
| 699
|
rd
|
nnf_conv_tbc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nnf-conv.R
\name{nnf_conv_tbc}
\alias{nnf_conv_tbc}
\title{Conv_tbc}
\usage{
nnf_conv_tbc(input, weight, bias, pad = 0)
}
\arguments{
\item{input}{input tensor of shape \eqn{(\mbox{sequence length} \times
batch \times \mbox{in\_channels})}}
\item{weight}{filter of shape (\eqn{\mbox{kernel width} \times \mbox{in\_channels}
\times \mbox{out\_channels}})}
\item{bias}{bias of shape (\eqn{\mbox{out\_channels}})}
\item{pad}{number of timesteps to pad. Default: 0}
}
\description{
Applies a 1-dimensional sequence convolution over an input sequence.
Input and output dimensions are (Time, Batch, Channels) - hence TBC.
}
|
93db0fa39ed6db26a3eea920fdaf8cec2df5b985
|
852db404bb02ebb5a9508d46e17da13f61eee7bb
|
/plot3.R
|
e219208cbcf7f0a44b4547d9abcffc72adcc9a24
|
[] |
no_license
|
manjunathyelipeta/ExData_Plotting1
|
48b6a20246ed21d055e35cc02f8782c46ef5b8a8
|
e50fcba509575b710aef39fd0a697ac607ec92a0
|
refs/heads/master
| 2021-01-15T18:41:46.509700
| 2015-09-13T13:39:55
| 2015-09-13T13:39:55
| 42,116,736
| 0
| 0
| null | 2015-09-08T14:17:12
| 2015-09-08T14:17:12
| null |
UTF-8
|
R
| false
| false
| 1,023
|
r
|
plot3.R
|
ass_eda1 = read.table("household_power_consumption.txt",header = TRUE,sep = ';',stringsAsFactors = FALSE,colClasses = c(rep("character",2),rep("numeric",7)),na.strings = "?")
ass_eda1$Date = as.Date(ass_eda1$Date,"%d/%m/%Y")
filtered_df = ass_eda1[ass_eda1$Date >= as.Date("2007-02-01") & ass_eda1$Date <= as.Date("2007-02-02"),]
filtered_df$timestamp = strptime(paste(filtered_df$Date,filtered_df$Time),"%Y-%m-%d %H:%M:%S")
dropnames = c("Date","Time")
filtered_df = filtered_df[,!(names(filtered_df) %in% dropnames)]
plot(filtered_df$timestamp,filtered_df$Sub_metering_1,type = "n",ylab = "Energy Sub Metering" ,xlab = "", mar = c(4,3,2,4))
lines(filtered_df$timestamp,filtered_df$Sub_metering_2,col = "red")
lines(filtered_df$timestamp,filtered_df$Sub_metering_3,col = "blue")
lines(filtered_df$timestamp,filtered_df$Sub_metering_1,col = "black")
legend("topright", col = c("black", "red","blue"), legend = c("Sub_metering1", "Sub_metering2","Sub_metering3"),lty = c(1,1))
dev.copy(png,file = "plot3.png")
dev.off()
|
f51d0ef891edcb6c3bb79a0bb38bf7032bf6b9ce
|
e55183e9cb2effdb3ee1613237c2964b22460510
|
/app.R
|
cc92f7386ced30cf60f5b278f0d28d87d094e71e
|
[] |
no_license
|
peiqingzhang/movie_predictor
|
9df2e553a87ba4fc1b4b20db4db60044a11c2985
|
c3960b369627ea9d2da3ecf1a9924c4c2e551e8d
|
refs/heads/main
| 2023-03-04T05:14:49.234816
| 2021-02-10T20:42:36
| 2021-02-10T20:42:36
| 333,903,251
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,490
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(httr)
library(tidyverse)
library(shiny)
values <- reactiveValues()
values$review_text = ""
values$to_show = ""
values$num_button = 0
values$to_show = ""
# Define UI for application that draws a histogram
ui <- fluidPage(
navbarPage(
theme = shinythemes::shinytheme("cerulean"),
"Movie Review Predictor",
# Application title
tabPanel(id = "write_review", "Write down your review",
textAreaInput("review", "Write your review here", "I love this movie!",
rows = 10,
width = '800px'),
actionButton("do", "Predict"),
textOutput("score_written")
),
tabPanel(id = "upload_review", "Upload your review",
sidebarLayout(
sidebarPanel(
fileInput("file1", "Choose a txt file to upload the review",
multiple = FALSE,
accept = c(
".txt")),
textOutput("score_uploaded")
),
mainPanel(
fluidRow(textOutput("review"))
)
)
)
)
)
server <- function(input, output, session) {
output$review <- renderText({
req(input$file1)
tryCatch(
{
text <- paste(read_lines(input$file1$datapath, skip = 0, n_max = -1L), collapse = '\n')
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
return(text)
})
### write in
socre_text <- eventReactive(input$do, {
tryCatch(
{
if(nchar(input$review) >= 10){
values$r <- POST("http://13.48.45.7:1080/predict", body = list(review = input$review))
values$str = content(values$r, "text", encoding = "ISO-8859-1")
values$to_show_written = str_replace_all(values$str,'[\n|\"|{|}]',"")}
else{
values$to_show_written = "Please write a little bit more...."
}
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
return(values$to_show_written)
})
output$score_written <- renderText({
socre_text()
})
###upload
output$score_uploaded <- renderText({
req(input$file1)
tryCatch(
{
values$review_text <- paste(read_lines(input$file1$datapath, skip = 0, n_max = -1L), collapse = ' ')
values$r <- POST("http://13.48.45.7:1080/predict", body = list(review = values$review_text))
values$str = content(values$r, "text", encoding = "ISO-8859-1")
values$to_show = str_replace_all(values$str,'[\n|\"|{|}]',"")
isolate(values$to_show)
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
return(values$to_show)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
b11fad52981119b510334d774b60eb02f14d89f2
|
eefdf8ef5b585f11fa348fef725c59a6c10d5d53
|
/R/mes.R
|
b64b3dfe87accdda605e6b341c6b4d0714b066c7
|
[] |
no_license
|
bupianlizhugui/mes
|
a1fb89375141945cc415ece98d4fddd928014b4f
|
12ed2eb75d639aa83867462997f3e8b3507f7992
|
refs/heads/master
| 2021-01-22T21:37:12.027728
| 2016-03-30T15:48:08
| 2016-03-30T15:48:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,540
|
r
|
mes.R
|
H.se <- function(X) {
if(any(X==0)) X <- X[X>0]
p <- X/sum(X)
-sum(p * log2(p))
}
mes <- function(dat, N, seed=1, ix=FALSE, hx=FALSE) {
dat.table <- is.table(dat)
counts <- dat
if(!dat.table) counts <- table(counts)
if(any(counts<=0)) counts <- counts[counts>0]
if(ix & dat.table) {stop('Cannot return indicies when supplying counts; Check ix')}
if(N>=sum(counts)) {stop('Sample size needs to be less than population size; Check N')}
set.seed(seed)
n_grp <- length(counts)
target <- floor(N/n_grp)
iter <- 1
# Step 1: Create target vector = floor(N/n_grp)
sample_obs <- rep(target, n_grp)
count_update <- counts<sample_obs
sample_obs[count_update] <- counts[count_update]
remaining2target <- target-sample_obs
hx2 <- c(iter, sample_obs, sum(sample_obs))
# Step 2: Check to see if all strata = target
if(all(remaining2target==0 & sum(sample_obs)!=N)) {
# Due to rounding, we may need to randomly select strata s.t. final N is obtained.
# Not unique, but reproducible using seed argument.
remaining <- N - sum(sample_obs)
remainingcounts <- counts-sample_obs
remainingcounts <- remainingcounts[remainingcounts>0]
update_i <- sample(seq(length(remainingcounts)),remaining, replace=FALSE)
sample_obs[update_i] <- sample_obs[update_i]+1
remaining_obs <- counts-sample_obs
hx2 <- rbind(hx2, c(iter+1, sample_obs, sum(sample_obs)))
} else {
remaining_obs <- counts - sample_obs
add_i <- which(remaining2target>0)
remaining2target <- target - sample_obs
sumSampled <- sum(sample_obs)
while( sumSampled<N ) {
iter <- iter+1
## Which strata have subjects left to sample?
remaining <- remaining_obs
remaining_ii <- which(remaining>0)
remaining_ii <- remaining_ii[order(remaining[remaining_ii])]
remaining_01 <- (remaining_obs[remaining_ii]>0)*1 # add 1 at a time (update!)
# How many can we add at this iteration?
# Since we are adding 1 at each iteration, we just check # of strata with at least 1
# and compare that to the number needed to be sampled.
if(sum(remaining_01) > N-sumSampled) {
remaining_ii <- sample(remaining_ii, N-sumSampled, replace=FALSE)
}
sample_obs[remaining_ii] = sample_obs[remaining_ii] + remaining_01[remaining_ii]
remaining_obs[remaining_ii] = remaining_obs[remaining_ii]-remaining_01[remaining_ii]
sumSampled = sum(sample_obs)
hx2 <- rbind(hx2, c(iter, sample_obs, sum(sample_obs)))
}
}
out <- data.frame('original'=counts, 'sampled'=sample_obs)
colnames(out) <- c('strata','freq','mes')
attr(out, 'totals') <- data.frame('total'=sum(counts),'N'=N, 'mes'= sum(sample_obs))
attr(out,'entropy') <- data.frame('max_entropy'=log2(sum(counts>0)), 'rs_entropy'=H.se(counts), 'mes_entropy'=H.se(sample_obs))
if(ix) {
dat2 <- data.frame(seq(length(dat)), dat)
colnames(dat2) <- c('ix','grp')
dat2_s <- split(dat2$ix, dat2$grp)
tmp <- vector('list',length(dat2_s))
for(jx in seq(tmp)) {
if(length(dat2_s[[jx]])==1) dat2_s[[jx]] <- rep(dat2_s[[jx]],2)
tmp[[jx]] <- data.frame(out$strata[jx], sort(sample(dat2_s[[jx]], out$mes[jx], replace=FALSE)))
}
samp_ix <- do.call(rbind, tmp)
colnames(samp_ix) <- c('strata','ix')
attr(out,'ix') <- samp_ix
}
if(hx) {
colnames(hx2) <- c('iteration',names(counts),'total')
attr(out,'hx') <- hx2
}
out
}
|
44eda255273059f235546e857020474c3b0ac536
|
d4bb4f725018c3c727976bd87d69e21582f0894e
|
/loadExcel.R
|
3731c559b9601b6e96847a5b72be527d7a507d18
|
[] |
no_license
|
prayaggordy/HiMCM
|
3bd30b53db99db909508c3c0f286cab9ab9eecad
|
17b90ed17426cdbfa2e4592e8465ca72314b2d47
|
refs/heads/master
| 2020-04-05T16:28:34.961860
| 2018-11-19T23:37:18
| 2018-11-19T23:37:18
| 157,014,339
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,149
|
r
|
loadExcel.R
|
require(tidyverse)
#given data
excel <- suppressWarnings(suppressMessages(read_csv("COMAP_RollerCoasterData_2018.csv"))) %>%
rename_all(tolower) %>%
set_colnames(gsub(gsub(names(.), pattern = "[()]", replacement = ""), pattern = "([[:punct:]])|\\s+", replacement = "_")) #remove parentheses, replace punctuation (spaces, slashes, etc.) with underscores
#pipe excel...
excel <- excel %>%
mutate(height_feet = suppressWarnings(as.numeric(height_feet)), #make the character column numeric
duration_sec = 60*60*as.numeric(substr(duration_min_sec, 1, 2)) + 60*as.numeric(substr(duration_min_sec, 4, 5)) + as.numeric(substr(duration_min_sec, 7, 8))) %>% #instead of the duration in hr:mn:sc, sum into seconds
select(-x20, -status, -duration_min_sec, -inversions_yes_or_no, city = city_region, state = city_state_region, country = country_region, region = geographic_region, year_opened = year_date_opened, inversions = number_of_inversions, height = height_feet, length = length_feet, speed = speed_mph, angle = vertical_angle_degrees) %>% #remove and rename
filter(!is.na(name))
write_csv(excel, "cleaned_COMAP_data.csv") #write as a new CSV
|
2f11d9b9bb3a329755ade176ef2d1622de6ef24a
|
d3d2fbf3eaf7075f4679997c422fefaa5d524f67
|
/Dimensionality_Reduction.R
|
1954e21b82096709696a359149d76ce12e6f06f8
|
[] |
no_license
|
harshitsaini/Business-Analytics-Data-Mining
|
eaca89f755d15ecbb580be57e2ff95a725086a33
|
9a0613906c2a7f945cbd2a2855d07dc9b2e98778
|
refs/heads/master
| 2021-04-03T09:12:41.303022
| 2018-04-23T12:22:37
| 2018-04-23T12:22:37
| 125,231,756
| 15
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,164
|
r
|
Dimensionality_Reduction.R
|
library(xlsx)
df1= read.xlsx(file.choose(),1, header= T)
df1= df1[,!apply(is.na(df1), 2,all)]
Age= 2017- df1$Mfg_Year
df1= cbind(df1,Age)
dfb= df1
df1= df1[,-c(1,2,3)]
head(df1)
str(df1)
#Summary Statistics
countblank= function(x) sum(x=="")
dfsum= data.frame(Average= sapply(df1[,-1],mean),Median= sapply(df1[,-1],median),
Min= sapply(df1[,-1],min),Max= sapply(df1[,-1],max),
Std= sapply(df1[,-1],sd),Count= sapply(df1[,-1],length),
Countblank=sapply(df1[,-1],countblank))
round(dfsum,digits = 2)
M= cor(df1[,-c(1,5,8)]);M
M[upper.tri(M)]=NA;M
print(round(M,digits = 2),na.print = "")
symnum(M)
#Reducing Categories
Age_groups= levels(as.factor(df1$Age))
Age_groups2= as.numeric(Age_groups)
C_PricebyAge1= NULL
C_PricebyAge2= NULL
#Group1 has less than Rs400000 cost
# Rest lies in Group2
for(x in Age_groups2) {
C_PricebyAge1= c(C_PricebyAge1,
100* sum(df1$Age==x & df1$C_Price==0)/sum(df1$Age==x))
C_PricebyAge2= c(C_PricebyAge2,
100* sum(df1$Age==x & df1$C_Price==1)/sum(df1$Age==x))
}
C_PricebyAge= matrix(c(C_PricebyAge1, C_PricebyAge2),nrow = 2,
ncol= length(Age_groups), byrow= T)
#palette(c("purple","green"))
barplot(C_PricebyAge, names.arg = Age_groups, xlab= "Age",
legend.text = c("0","1"), args.legend = list(x="topright"),
main= "Distribution of C_Price by Age",col = c("blue","green"),
ylim = c(0,100), xlim = c(0,12))
Sales= c(45,50, 55,100,51,56,61,125,60,65,70,145,68,74,79,165)
tsv = ts(Sales, start = c(2012,1),end= c(2015,4), frequency = 4)
plot(tsv, xlab= "Quarter", ylab= "Sales(in crores)", las=2 , ylim=c(0,180))
#BreakfastCereals.xlsx
df2= read.xlsx(file.choose(),1,header = T)
df2= df2[, !apply(is.na(df2), 2,all)]
df2=cereal
dim(df2)
df2$vitamins= as.factor(df2$vitamins)
df2$vitamins= as.numeric(df2$vitamins)
df2$mfr= as.factor(df2$mfr)
df2$mfr= as.numeric(df2$mfr)
df2$mfr= as.factor(df2$mfr)
df2$mfr= as.numeric(df2$mfr)
sum=NULL
for(x in 1:dim(df2)[1]) {
csum=0
for(y in df2[x,-c(1,9,11)]) {
csum=csum+y
}
sum=c(sum,csum)
}
df2$weight= sum
df3= as.data.frame(lapply(df2[,-c(1,9,11,12)], function(x){x=100*(x/df2$weight)}))
df3= cbind(df3,df2[,c(1,9,11)])
range(df3$potassium)
range(df3$fibre)
plot(df3$potassium, df3$fibre, xlab="POTASSIUM", ylab="FIBRE")
v1= var(df3$potassium)
v2= var(df3$fibre)
c12= cov(df3$potassium,df3$fibre)
matrix(c(v1,c12,c12,v2),2,2,T)
cor(df3$potassium,df3$fibre)
v1+v2
100*v1/(v1+v2)
100*v2/(v1+v2)
#Principal Component Analysis
dfpca= df3[,c(8,5)]
mod= prcomp(dfpca)
#adding PC directions to the plot
slp= with(mod, rotation[2,1]/rotation[1,1])
int= with(mod,center[2]-slp*center[1])
#First principal component
abline(coef= c(int,slp))
mod$rotation
slp1= -1/slp
int1= with(mod,center[2]-slp1*center[1])
#Second principal component
abline(coef= c(int1,slp1))
mod$rotation
head(mod$x)
dfpca[1,]
First= mod$rotation[1,1]*(dfpca[1,1]-mean(dfpca[,1]))+
mod$rotation[1,2]*(dfpca[1,2]-mean(dfpca[,2])); First
vz1= var(mod$x[,1])
vz2= var(mod$x[,2])
vz1+vz2
100*vz1/(vz1+vz2)
100*vz2/(vz1+vz2)
|
4c09367b3e390883297b29db37cdb4003ccc52cd
|
8a1d46bd149a192ff81493a50a48c4bf544784ba
|
/single cell plassのコピー.R
|
f7d051b494918ab15e85e3c90d8294778e148333
|
[] |
no_license
|
kaede1021/datascience_traning
|
7155b502c5ef0f9041b01caf1e62e0f5ff77b9c3
|
2d1ae6d82dfc9025a31181f82a098d9517a76026
|
refs/heads/master
| 2022-12-04T13:04:40.374255
| 2020-08-18T13:59:29
| 2020-08-18T13:59:29
| 288,460,943
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,858
|
r
|
single cell plassのコピー.R
|
getwd()
setwd("./Desktop/single")
library(Seurat)
library(dplyr)
library(patchwork)
#ノーマライズされたデータセットでは、NormalizeDataはスキップする。
#今回のデータセットではノーマライズされている。
Data = read.table(gzfile("dge.txt.gz"),sep="\t") #gz fileの読み込み
Data[1:5,1:5]
Plass <- CreateSeuratObject(counts = Data, project = "Plass", min.cells = 3, min.features = 200)
GetAssayData(object = Plass, slot = "counts")[1:10,1:30]#counts= 生データ
Plass[["RNA"]]@counts[1:10,1:10] #これでもいい
GetAssayData(object = Plass, slot = "data")[1:10,1:30] #objentにした段階でここまで入力されている#data=正規化したデータ
GetAssayData(object = Plass, slot = "scale.data") # 0 xx 0 matrix#scale.data=変動データ
head(Plass$nCount_RNA)
head(Plass$nFeature_RNA)
VlnPlot(Plass,features = c("nCount_RNA","nFeature_RNA"), ncol = 2)
Norm_Data = log(x = Data + 1)
Norm_Data[1:10,1:10]
Norm_Data <- as.matrix(Norm_Data)
Plass <- SetAssayData(object = Plass, slot = "data", new.data = Norm_Data)
Plass[["RNA"]]@data[1:10,1:10]
Plass <- subset(Plass, subset = nFeature_RNA < 2500 )
#FindVariableFeaturesは正規化されたデータに対して分散のでかい遺伝子を見つける。
Plass <- FindVariableFeatures(object = Plass, mean.function = ExpMean ,
selection.method = "mvp",
dispersion.function = LogVMR,
mean.cutoff = c(0.01, 3),
dispersion.cutoff = c(0.4, Inf)) #, nfeatures = 2000)#featureの足切しない?デフォルトだと2000で切られてる
#メモリを使い切った、と表示された。
top10 <- head(VariableFeatures(Plass),10)
plot1 <- VariableFeaturePlot(Plass)
plot2 <- LabelPoints(plot = plot1, points = top10,repel = TRUE)
CombinePlots(plot = list(plot1,plot2)) #画面が小さくてうめく描画できない
#FindVariableFeaturesによって出てきた遺伝子を元にPCA解析を行う
all.genes <- rownames(Plass)
Plass <- ScaleData(Plass, features = all.genes)
GetAssayData(object = Plass, slot = "scale.data")[1:10,1:10]
#FindVariableFeaturesによって出てきた遺伝子を元にPCA解析を行う
Plass <- RunPCA(Plass, features = VariableFeatures(object = Plass),npcs = 50)
print(Plass[["pca"]], dims = 1:5, nfeatures = 5)
VizDimLoadings(Plass,dims = 1:3, reduction = "pca") #あんまりきれいに別れてない??
DimPlot(Plass, reduction = "pca")
DimHeatmap(Plass, dims=1, cells=500, balanced =TRUE)
DimHeatmap(Plass, dims=1:15, cells=500, balanced =TRUE)
Plass <- JackStraw(Plass, dims = 50, num.replicate = 100)#defolt : dims=20
Plass <- ScoreJackStraw(Plass, dims = 1:50, score.thresh = 1e-05)
JackStrawPlot(Plass,dims = 1:50)
ElbowPlot(Plass,ndims = 50)
Plass <- FindNeighbors(Plass, dims = 1:50)
#PCの値を元に近隣を分類
Plass <- FindClusters(Plass, resolution = 6)#このresolutionのパラメーターでクラスターの数が変動する
Plass <- RunUMAP(Plass, dims = 1:50)
#上記の計算を元にUMAPを出す
DimPlot(Plass, reduction = "umap")
Plass <- RunTSNE(Plass,dims = 1:50)
DimPlot(Plass, reduction = "tsne")
cluster1.markers <- FindMarkers(Plass, ident.1 = 1, min.pct = 0.25)
head(cluster1.markers, n = 5)
cluster5.markers <- FindMarkers(Plass, ident.1 = 5, ident.2 = c(0, 3), min.pct = 0.25)
head(cluster5.markers, n = 5)
#FindMarkers クラスター
plass.markers <- FindAllMarkers(Plass, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
plass.markers %>% group_by(cluster) %>% top_n(n = 2, wt = avg_logFC)
VlnPlot(Plass, features = c("dd-Smed-v6-6208-0", "dd-Smed-v6-11968-0"))
FeaturePlot(Plass, features = c("dd-Smed-v6-6208-0", "dd-Smed-v6-11968-0","dd-Smed-v6-432-0","dd-Smed-v6-19336-0","dd-Smed-v6-74478-0"))
1*2
|
818f384176b9eb50aee9b44e9b2b4565da9cdf32
|
aac3d25b50bf5fcd9e78c8ad1d412c580503d665
|
/R/EC_gui.R
|
a9f7ce3fb36f7c7a0fba312b3563e0bd31eae77a
|
[] |
no_license
|
yusriy/fluxMPOB
|
e232eba9b30627d570636630288931a889e41948
|
98d3a7755f819d113527b965e75c91d022a4cd2c
|
refs/heads/master
| 2021-01-09T21:48:09.798017
| 2016-10-16T03:16:02
| 2016-10-16T03:16:02
| 48,431,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,847
|
r
|
EC_gui.R
|
## GUI to import eddy covariance data
#### Preliminaries ####
library(gWidgets)
library(gWidgetsRGtk2)
# Create a window
win <- gwindow("Tab delimited file upload")
# Create the group to add widgets
grp_name <- ggroup(container = win)
# Add a label
lbl_data_frame_name <- glabel("Data frame to save data to: ", container = grp_name)
# Add text to edit box
txt_data_frame_name <- gedit("dfr", container = grp_name)
# Another group for the upload button
grp_upload <- ggroup(container = win)
# Create a handler to respond to button presses
btn_upload <- gbutton(
text = "Upload csv file", container = grp_upload,
handler = function(h, ...) {
gfile(
text = "Upload csv file",
type = "open",
action = ifelse("read.csv"),
handler = function(h, ...)
{
tryCatch(
{
data_frame_name <- make.names(svalue(txt_data_frame_name))
the_data <- do.call(h$action, list(h$file))
assign(data_frame_name, the_data, envir = globalenv())
svalue(status_bar) <-
paste(nrow(the_data), "records saved to variable", data_frame_name)
},
error = function(e) svalue(status_bar) <- "Could not upload data"
)
},
filter = list(
"Tab delimited" = list(patterns = c("*.csv")),
"All files" = list(patterns = c("*"))
)
)
})
# If we want to create a checkbox to configure file settings
#use_comma_for_decimal <- function() {
# unname(Sys.localeconv()["decimal_point"] == ",")
#}
#chk_eurostyle <- gcheckbox(
# text = "Use comma for decimal place",
# checked = use_comma_for_decimal(),
# container = grp_upload
#)
# To add a status bar
status_bar <- gstatusbar("", container = win)
# To create generic widget for some functions in R
lmwidget <- ggenericwidget(lm)
|
2f516be312fa240e8c05fb63831ff01deb56c9cb
|
df1d2b978c77d0934b02fbaf88c1730a8a82ed3e
|
/R/mysd2_250918singleChi.R
|
4a3fdb83aba4b8d43e018522d82c0d0b9120effe
|
[] |
no_license
|
portokalh/adforesight
|
3dc0f5f692014cbb6cc373634fb6caca9ab191fa
|
d43c1e4c35459122e2ae55169a7a77a4943f8e4d
|
refs/heads/master
| 2020-04-05T13:03:34.866181
| 2018-12-20T16:14:47
| 2018-12-20T16:14:47
| 95,047,388
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,066
|
r
|
mysd2_250918singleChi.R
|
#load libs
library( knitr )
library(ANTsR)
library(visreg)
library(robustbase)
library(groupdata2)
library(ggplot2)
library(caret)
require(broom)
#Manganese Enhanced MRI predicts cognitive performnace Alex Badea and Natalie Delpratt
#reuses results of sparsedecom2 for best performing fold Alex Badea 7 dept 2018
#uses RMSE as prediction error, rather than goodness of fit error 30 August 2018
#legacy from Natalie to rememebr paths
#setwd( '/Users/omega/alex/adforesight/' )
#output.path <- '/Users/omega/alex/adforesight/mydata/outdata/sd2_projall_noscale/' #ND
mypath<-'/Volumes/CivmUsers/omega/alex/GitHub/adforesight/'
mypath <- '/Users/alex/GitHub/adforesight/' #flavors of serifos
mypath <- '/Users/alex/Documents/GitHub/adforesight/' #flavors of ithaka
setwd(mypath)
source(paste(mypath, '/R/myr2score.R',sep=''))
mysp <- 0.05 #0.05 # 0.01 # 0.05 #0.2 #0.05 # was 0.005 sparseness
#if mynvecs is set to one eig1 neetds to be transposed
mynvecs <- 2 # 5 vecs is better # 10 shows ventral thalamic nuclei 10 # 50 #put back to 50 alex #tested with 10
myell1 <- 1 # make this smaller 0.5, Brian says it is not what i think just switch between l0 and l1
myits<-15 #15 #5
mysmooth<-0.1 #0.1 # 0.1 #0.01 #0 # was 0.01
myclus<-250 #was 250
# Load in Behavior and Imaging Data
behavior <- read.csv('./mydata/All_Behavior.csv')
labled.set <-read.csv('./mydata/legendsCHASS2symmetric.csv')
labeled.brain.img <- antsImageRead('./mydata/MDT_labels_chass_symmetric.nii.gz')
mask <- antsImageRead('./mydata/MDT_mask_e3.nii')
mask <- thresholdImage( mask, 0.1, Inf )
#read all 3 contrast files
mang_files <- list.files(path = "./mydata/imdata/", pattern = "T2_to_MDT",full.names = T,recursive = T)
jac_files <- list.files(path = "./mydata/imdata/", pattern = "jac_to_MDT",full.names = T,recursive = T)
chi_files <- list.files(path = "./mydata/imdata/", pattern = "X_to_MDT",full.names = T,recursive = T)
########################################
#build a place to save results
extension<-paste('sd2SINGLECHI', 'sp', toString(mysp), 'vecs', toString(mynvecs), 's', toString(mysmooth),'clus', toString(myclus), sep='') # 'JACsp0p005s0'
output.path <- paste(mypath,'/mydata/outdata_sd2/',extension, '/', sep='') #sd2_projall_noscale/'
if (dir.exists(output.path)){ 1} else {dir.create(output.path, recursive=TRUE)}
#pick yourcontrast
mang_mat <- imagesToMatrix(chi_files,mask)
#######################################
#let things flow from here
mygroup <- behavior$genotype[1:24]
myindex <- c(1:24)
mydfb <- data.frame("mysubject_index" = factor(as.integer(myindex)),"mygenotype"=mygroup)
kable(mydfb, align = 'c')
set.seed(1)
k<-4
performances <- c()
myBICs <- c()
myR2score<-c()
myps<-c()
gfit<-c()
###build k models and retain the best performing one in terms of RMSE2
#considet using LOOCV to replace folds, but results may be unstable
#k<-length(rows.train)-1
k<-4
set.seed(1)
res_train<-createFolds(behavior$genotype,k, list = TRUE, returnTrain = TRUE)
set.seed(1)
res_test<-createFolds(behavior$genotype,k)
for (myfold in 1:k){
# for (myfold in 3){
gc(verbose = TRUE, reset = FALSE)
print('myfold:',myfold)
print(myfold)
rows.train<-as.integer(unlist(res_train[myfold]))
rows.test<-as.integer(unlist(res_test[myfold]))
mang.train <- mang_mat[rows.train, ]
mang.test <- mang_mat[rows.test, ]
behav.train <- behavior[rows.train, ]
behav.test <- behavior[rows.test, ]
dist4.train <- behav.train[,'d4']
dist4.test <- behav.test[,'d4']
start_time <- Sys.time()
#negative sparseness is what? allows for negative weights!
myeig2_mang<-sparseDecom2(inmatrix = list(mang.train,as.matrix(behav.train$d4)),its = myits, cthresh=c(myclus,0), smooth = mysmooth, mycoption = 0, sparseness = c(mysp,1), nvecs = mynvecs, verbose=1, statdir=paste(output.path))
#myeig2_mang<-sparseDecom(inmatrix = mang.train,its = myits, cthresh=c(myclus), smooth = mysmooth, mycoption = 0, sparseness = c(mysp), nvecs = mynvecs, verbose=1, statdir=paste(output.path2))
end_time <- Sys.time()
t1time<-end_time - start_time
print(t1time)
imgpredtrain_mang<-mang.train %*% (myeig2_mang$eig1)
imgpredtest_mang<-mang.test %*% (myeig2_mang$eig1)
####start do single alex
ncolcombo<-ncol( imgpredtrain_mang)
projs.train <- data.frame(dist4.train, imgpredtrain_mang) # column combind the behavior wth the projections
colnames(projs.train) <- c('Dist_4', paste0('Proj', c(1:ncolcombo)))
projs.test <- data.frame(dist4.test, imgpredtest_mang ) # column combind the behavior wth the projections
colnames(projs.test) <- c('Dist_4', paste0('Proj', c(1:ncolcombo)))
###end do single alex
mylm <- lm('Dist_4 ~ .', data=projs.train) # behavior correlation with projections
summylm<-summary(mylm)
summanovalm<-anova(mylm)
rSquared <- summary(mylm)$r.squared
pVal <- anova(mylm)$'Pr(>F)'[1]
mylmsummary<-glance(mylm)
pval1<-mylmsummary$p.value
e2i_mang<-matrixToImages((t(myeig2_mang$eig1)),mask = mask)
for (i in 1:mynvecs){
antsImageWrite(e2i_mang[[i]],paste(output.path,extension,'sd2eig_' ,as.character(i), 'fold', toString(myfold), '_Mn.nii.gz',sep=''))
# antsImageWrite(e2i_jac[[i]],paste(output.path,extension,'sd2eig_' ,as.character(i), 'fold', toString(myfold), '_jac.nii.gz',sep=''))
# antsImageWrite(e2i_chi[[i]],paste(output.path,extension,'sd2eig_' ,as.character(i), 'fold', toString(myfold), '_chi.nii.gz',sep=''))
}
distpred4 <- predict.lm(mylm, newdata=projs.test) # based on the linear model predict the distances for the same day
glance(cor.test(projs.test$Dist_4,(distpred4)))
glance(cor.test(distpred4,dist4.test))
#remove next lines for LOOCV
mymodel<-lm(distpred4~dist4.test)
modsum <-summary(mymodel)
r2 <- modsum$r.squared #modsum$adj.r.squared
# my.p <- modsum$coefficients[2,4]
RMSE2<-sqrt(mean((distpred4 - dist4.test)^2))
performances[myfold]<-RMSE2
myR2score[myfold]<-myr2score(distpred4,dist4.test)
myps[myfold]<-pval1<-mylmsummary$p.value #my.p
myBICs[myfold] <- BIC(mylm)
###
mytheme <- theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank(),
panel.background = element_rect(fill = "white"))
myplot<- visreg(mymodel, gg=TRUE)
myplot + theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank(),
panel.background = element_rect(fill = "transparent", colour = NA),
#xaxs="i", yaxs="i",
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid')) +
ggtitle(paste("RMSE=",formatC(RMSE2,digits=2, format="f"), "p=",formatC(myps[myfold],digits=4, format="f"), " BIC=", formatC(BIC(mymodel),digits=2, format="f")))
ggsave(paste(output.path,extension,'Mnfold',toString(myfold),'.pdf',sep=''), plot = last_plot(), device = 'pdf',
scale = 1, width = 4, height = 4, units = c("in"),dpi = 300)
save(mylm, file=paste(output.path , "model2", toString(myfold), ".Rdata", sep=''))
save(mymodel, file=paste(output.path , "behavmodelsd2", toString(myfold), ".Rdata", sep=''))
myperf<-data.frame(rbind(distpred4,dist4.test),row.names=c("d_predicted","d_valid"))
write.csv(myperf, file = paste(output.path ,extension,'distances4_pv_fold' , toString(myfold), '.csv',sep=''))
myperf<-data.frame(c(RMSE2,myR2score[myfold],myps[myfold],myBICs[myfold], r2),row.names=c("RMSE2","R2score","p","BIC", "R2"))
write.csv(myperf, file = paste(output.path ,extension,'distances4_stats_fold' , toString(myfold), '.csv',sep=''))
gc(verbose = TRUE, reset = FALSE)
}
###################################
##### for validation now ####
###################################
myminfold<-which(performances == min(performances), arr.ind = TRUE)
myfold<-myminfold
load(file=paste(output.path , "model2", toString(myfold), ".Rdata", sep='')) # loads mylm
ncolcombo<-mynvecs
rows.valid <- c(1:24)
mang.valid <- mang_mat[rows.valid, ]
#read eigenregions for best myfold
#paste(output.path,extension,'sd2eig' ,as.character(i), 'fold', toString(myfold), '.nii.gz',sep='')
eig_files_Mn <- list.files(path = paste(output.path,sep=''), pattern=paste('*', 'fold', toString(myfold), '_Mn.nii.gz', sep=''),full.names = T,recursive = T)
eig_mat_Mn <- imagesToMatrix(eig_files_Mn,mask)
imgmat_mang_valid <- mang.valid %*% t(eig_mat_Mn) # [24,numvox] [nvecsx3,numvox]
dist4.valid <- behavior[rows.valid, 'd4']
projs.valid <- data.frame(cbind(dist4.valid,imgmat_mang_valid))
colnames(projs.valid) <- c('Dist_4', paste0('Proj', c(1:ncolcombo)))
distpred <- predict.lm(mylm, newdata=projs.valid)
mymodel<-lm(distpred~dist4.valid)
RSS <- c(crossprod(mymodel$residuals))
MSE <- RSS / length(mymodel$residuals)
RMSE <- sqrt(MSE)
RMSE2<-sqrt(mean((distpred - dist4.valid)^2))
mysummary <-summary(mymodel)
r2pred <- mysummary$adj.r.squared
ppred <- mysummary$coefficients[2,4]
max(behavior$d4[1:24])
RMSE_valid<-RMSE2
BIC_valid <- BIC(mymodel)
R2score_valid<-myr2score(distpred,dist4.valid)
res_cor<-cor.test(dist4.valid,distpred)
myplot<- visreg(mymodel, gg=TRUE, scale='linear', plot=TRUE, xlim=c(0,max(dist4.valid)),ylim=c(0,max(dist4.valid)))
myplot2<-plot(myplot,xlim=c(0,max(dist4.valid)),ylim=c(0,max(dist4.valid)))
ggsave(paste(output.path,extension,'MnValidationSet',toString(myfold),'sd2plainjane.pdf',sep=''), plot = last_plot(), device = 'pdf',
scale = 1, width = 4, height = 4, units = c("in"),dpi = 300)
myplot + theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank(),
panel.background = element_rect(fill = "transparent", colour = NA),
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'))+
#xlim(0,1200)+ylim(0,1200)+coord_cartesian(xlim = c(1200, 1200),ylim = c(1200, 1200)) + coord_equal()+
ggtitle(paste("RMSE=",formatC(RMSE_valid,digits=2, format="f"),
# "R2score=",formatC(R2score_valid,digits=2, format="f"),
# " R2=", formatC(r2pred,digits=2, format="f"),
" p= ", formatC(ppred,digits=4, format="f"),
" BIC=", formatC(BIC_valid,digits=2, format="f")))
ggsave(paste(output.path,extension,'MnValidationSet',toString(myfold),'sd2.pdf',sep=''), plot = last_plot(), device = 'pdf',
scale = 1, width = 4, height = 4, units = c("in"),dpi = 300)
numcols<-dim(projs.valid)[2]
rd4 <- t(t(dist4.valid)[rep(1,c(3*mynvecs)),])
pcor<-c(numcols)
corval<-c(numcols)
for (i in 1:numcols) {
mypcor<-cor.test(t(dist4.valid),t(projs.valid[,i]))
pcor[i]<-mypcor$p.value
corval[i]<-mypcor$estimate
}
rt<-glance(cor.test(dist4.valid,distpred))
corval[1]<-rt$estimate
pcor[1]<-rt$p.value
mycorsdf_eig2d4<-data.frame(rbind(pcor,corval),row.names=c("pcor","cor"))
colnames(mycorsdf_eig2d4)<-c('total', paste0('Proj', c(1:ncolcombo)))
write.csv(mycorsdf_eig2d4, file = paste(output.path ,extension,'fold', toString(myfold), 'd4corsvalidsd2.csv',sep=''))
myperf<-data.frame(rbind(distpred,dist4.valid),row.names=c("d_predicted","d_valid"))
write.csv(myperf, file = paste(output.path ,extension,'fold', toString(myfold), 'distances4_validsd2.csv',sep=''))
myeig2_mang_valid<-sparseDecom2(inmatrix = list(mang.valid,as.matrix(dist4.valid)),its = myits, cthresh=c(myclus,0), smooth = mysmooth, mycoption = 0, sparseness = c(mysp,1), nvecs = mynvecs, verbose=1, statdir=paste(output.path))
#myeig2_mang<-sparseDecom2(inmatrix = list(mang.train,as.matrix(behav.train$d4)),its = myits, cthresh=c(myclus,0), smooth = mysmooth, mycoption = 0, sparseness = c(mysp,1), nvecs = mynvecs, verbose=1, statdir=paste(output.path))
imgmat_mang_valid<-mang.valid %*% (myeig2_mang_valid$eig1)
e2i_mang_valid<-matrixToImages((t(myeig2_mang$eig1)),mask = mask)
for (i in 1:mynvecs){
antsImageWrite(e2i_mang_valid[[i]],paste(output.path,extension,'full_eig' ,as.character(i), '_Mn.nii.gz',sep=''))
}
#redo fold min or save models
# #imgmat_mang_valid <- mang.valid %*% t(e2i_mang_valid) # [24,numvox] [nvecsx3,numvox]
# projs.valid <- data.frame(cbind(dist4.valid,imgmat_mang_valid))
# colnames(projs.valid) <- c('Dist_4', paste0('Proj', c(1:ncolcombo)))
# distpred <- predict.lm(mylm, newdata=projs.valid)
# mymodel<-lm(distpred~dist4.valid)
# RSS <- c(crossprod(mymodel$residuals))
# MSE <- RSS / length(mymodel$residuals)
# RMSE <- sqrt(MSE)
# RMSE2<-sqrt(mean((distpred - dist4.valid)^2))
# mysummary <-summary(mymodel)
# r2pred <- mysummary$adj.r.squared
# ppred <- mysummary$coefficients[2,4]
#
#
# max(behavior$d4[1:24])
# RMSE_valid<-RMSE2
# BIC_valid <- BIC(mymodel)
# R2score_valid<-myr2score(distpred,dist4.valid)
# res_cor<-cor.test(dist4.valid,distpred)
#
# myplot<- visreg(mymodel, gg=TRUE, scale='linear', plot=TRUE, xlim=c(0,max(dist4.valid)),ylim=c(0,max(dist4.valid)))
# myplot2<-plot(myplot,xlim=c(0,max(dist4.valid)),ylim=c(0,max(dist4.valid)))
# ggsave(paste(output.path,extension,'newMnValidationSetFULL',toString(myfold),'sd2plainjane.pdf',sep=''), plot = last_plot(), device = 'pdf',
# scale = 1, width = 4, height = 4, units = c("in"),dpi = 300)
# myplot + theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank(),
# panel.background = element_rect(fill = "transparent", colour = NA),
# axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
# axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'))+
# #xlim(0,1200)+ylim(0,1200)+coord_cartesian(xlim = c(1200, 1200),ylim = c(1200, 1200)) + coord_equal()+
# ggtitle(paste("RMSE=",formatC(RMSE_valid,digits=2, format="f"),
# # "R2score=",formatC(R2score_valid,digits=2, format="f"),
# # " R2=", formatC(r2pred,digits=2, format="f"),
# " p= ", formatC(ppred,digits=4, format="f"),
# " BIC=", formatC(BIC_valid,digits=2, format="f")))
#
# ggsave(paste(output.path,extension,'newMnValidationSetFULL',toString(myfold),'sd2.pdf',sep=''), plot = last_plot(), device = 'pdf',
# scale = 1, width = 4, height = 4, units = c("in"),dpi = 300)
# numcols<-dim(projs.valid)[2]
# rd4 <- t(t(dist4.valid)[rep(1,c(3*mynvecs)),])
# pcor<-c(numcols)
# corval<-c(numcols)
#
#
# for (i in 1:numcols) {
# mypcor<-cor.test(t(dist4.valid),t(projs.valid[,i]))
# pcor[i]<-mypcor$p.value
# corval[i]<-mypcor$estimate
# }
#
# rt<-glance(cor.test(dist4.valid,distpred))
# corval[1]<-rt$estimate
# pcor[1]<-rt$p.value
#
#
# mycorsdf_eig2d4<-data.frame(rbind(pcor,corval),row.names=c("pcor","cor"))
# colnames(mycorsdf_eig2d4)<-c('total', paste0('Proj', c(1:ncolcombo)))
# write.csv(mycorsdf_eig2d4, file = paste(output.path ,extension,'FULL', 'd4corsvalidsd2.csv',sep=''))
#
# myperf<-data.frame(rbind(distpred,dist4.valid),row.names=c("d_predicted","d_valid"))
# write.csv(myperf, file = paste(output.path ,extension,'FULL', 'distances4_validsd2.csv',sep=''))
|
4a338bb00d46381519a5622e17b5b99ff62858ad
|
40e8b14246b5cc4b587f4d88121474720f9f1c39
|
/tests/testthat.R
|
48c57c7d8a6e48e91bd54d3d2b2bbf3e10ed2814
|
[
"MIT"
] |
permissive
|
coolbutuseless/snowcrash
|
644bbfaf74f101df6f919572653b3150e1ded9a7
|
9b1472784c03d907b552e9c4172a2b7137df3402
|
refs/heads/master
| 2022-12-19T16:03:44.755180
| 2020-09-27T03:35:32
| 2020-09-27T03:35:32
| 294,376,185
| 10
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 62
|
r
|
testthat.R
|
library(testthat)
library(snowcrash)
test_check("snowcrash")
|
5b463a0efc5a74d4683cba3a9e591f024cbc7ef3
|
7e3ce11bc22c009a399a36490ed962836d6aaff6
|
/signals/hk_stock/hk_stock_signal_20day_Function_multil.R
|
8810675b5fa0733212cea200d01f1ad145a52099
|
[] |
no_license
|
tedddy/Learn_R
|
2340a1f73e0ab4a7e07b5aa97181bc42e7acd22f
|
a04cd823fb382b5457e3b043ec346b1ee5ab1724
|
refs/heads/master
| 2021-01-17T13:33:36.685310
| 2016-10-17T23:26:46
| 2016-10-17T23:26:46
| 25,558,722
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,972
|
r
|
hk_stock_signal_20day_Function_multil.R
|
# 载入需要的Package
library(quantmod)
signal_BuySell <- function (code) {
tckr = paste(code,"HK",sep=".")
# 设定开始和结束日期
Sys.Date()-30 -> start
Sys.Date()-1 -> end
# 从Yahoo下载数据
hk <- getSymbols(tckr, from = start, to = end, auto.assign = FALSE)
# 提取Close列
hk.C <- hk[,4]
# 生成最后一天的价格
hk_close <- tail(hk.C, n=1)
hk_close_numeric <- as.numeric(hk_close[1])
# 提取High列
hk.H <- hk[,2]
# 生成20天最高time series
hk_high <- runMax(hk.H, n = 20, cumulative = FALSE)
# 生成买点
hk_buy <- tail(hk_high, n=1)
# 显示买点
# View(hk_buy)
hk_buy_numeric <- as.numeric(hk_buy[1])
# 提取Low列
hk.L <- hk[,3]
# 生成20天最低time series
hk_low <- runMin(hk.L, n = 20, cumulative = FALSE)
# 生成卖点
hk_sell <- tail(hk_low, n=1)
# 显示卖点
# View(hk_sell)
hk_sell_numeric <- as.numeric(hk_sell[1])
# hk_signals <- merge(hk_buy,hk_sell)
# 显示卖点和买点
# View(hk_signals)
Lst <- data.frame(code = tckr, buy = hk_buy_numeric, close = hk_close_numeric, sell = hk_sell_numeric, buy_diff = 100*(hk_buy_numeric-hk_close_numeric)/hk_close_numeric, sell_diff = 100*(hk_sell_numeric-hk_close_numeric)/hk_close_numeric)
}
view_results <- function (tickers) {
for (i in seq_along(tickers)) {
if (i == 1) {
signal <- signal_BuySell(tickers[[i]])
} else {
signal <- rbind(signal, signal_BuySell(tickers[[i]]))
}
}
# View(signal_current)
signal_ordered_sell <- signal[ order(signal[,6], signal[,5]), ]
View(signal_ordered_sell)
signal_ordered_buy <- signal[ order(signal[,5], signal[,6]), ]
View(signal_ordered_buy)
}
view_results(current)
view_results(bank)
view_results(insurance)
view_results(RealEstate)
current <- c("0111", "0218", "0388", "0665", "0998", "1359", "1375", "1788", "3818", "6818", "6837") ## First code is put in " " to make the vector charactor vector, and the code with 0 at the first postion should be quoted.
sold <- c("0323", "0338", "0493", "0588", "0670", "0981", "1065", "1157", "1266", "1618", "2357", "2727","2866" )
bank <- c("0939", "0998", "1288", "1393", "1988", "3328", "3618", "3968", "3988", "6818")
bank_2 <- c("0023", "0011", "1111", "2356")
require(quantmod)
financial <- c("0111", "0188", "0218", "0227", "0290", "0388", "0665", "0717", "0812", "0821", "0851", "0952", "1359", "1375", "1788", "6030", "6837", "6881")
view_results(watch_financial)
insurance <- c("1336", "2318", "2601", "2628", "6837")
watch_RealEstate <- c("0119", "0123", "0152", "0270", "0272", "0283", "0291", "0363", "0392", "0410", "0588", "0604", "0813", "0817", "0917", "0960", "1109", "1138", "1668", "1813", "2007", "3333", "3377", "3380", "6837") ##, "3383", "3900": these two cannot be fetched.
|
21d7f1c9158911d8ea7b5ed2aa0762c2167136b8
|
9c15de6799c592361701427ee10d128ebc499e3d
|
/C03/W04/quiz.R
|
6ed783f239e3a933cc50a3b1f7abeb6bb20839a5
|
[] |
no_license
|
mahmoudjahanshahi/datasciencecoursera
|
2136d5eea723c2f09b82069131ccd23dbc2f0687
|
350c40de3616aa25775bb27582ae04d0d8a630c6
|
refs/heads/master
| 2023-03-02T04:41:58.646229
| 2021-02-11T11:57:52
| 2021-02-11T11:57:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,806
|
r
|
quiz.R
|
#Q01
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
filePath <- "./C03/W04/src/communities.csv"
download.file(fileURL, destfile = filePath ,method = "curl")
df <- read.csv(filePath)
strsplit(names(df), "wgtp")[123]
#Q02
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
filePath <- "./C03/W04/src/gdp.csv"
download.file(fileURL, destfile = filePath ,method = "curl")
library(dplyr)
library(readr)
gdp <- read.csv(filePath, skip = 3)
gdp <- gdp %>% select(X, Ranking, Economy, US.dollars.) %>%
rename(code = X, country = Economy, gdp = US.dollars.)
gdp <- gdp[2:191,] %>% mutate(Ranking = parse_number(as.character(Ranking)))
mean(parse_number(gsub(",", "", gdp$gdp)))
#Q03
grep("^United",gdp$country)
#Q04
file1URL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
file1Path <- "./C03/W04/src/gdp.csv"
file2URL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv"
file2Path <- "./C03/W04/src/educational.csv"
download.file(file1URL, destfile = file1Path ,method = "curl")
download.file(file2URL, destfile = file2Path ,method = "curl")
edu <- read.csv(file2Path)
gdp <- read.csv(file1Path, skip = 3)
gdp <- gdp %>% select(X, Ranking, Economy) %>%
rename(code = X, country = Economy)
gdp <- gdp[2:191,] %>% mutate(Ranking = parse_number(as.character(Ranking)))
merged <- merge(gdp, edu, by.x = "code", by.y = "CountryCode", all = FALSE) %>%
select(code, Ranking, country, Special.Notes) %>% arrange(desc(Ranking))
grep("Fiscal year end: June",merged$Special.Notes, value = TRUE)
#Q05
library(quantmod)
amzn = getSymbols("AMZN",auto.assign=FALSE)
sampleTimes = index(amzn)
sample2012 <- sampleTimes[year(sampleTimes) == 2012]
length(sample2012)
length(sample2012[wday(sample2012) == 2])
|
6b2922179e2017ce586549648468e214d3b12f0e
|
6b629e8bc4bb0b1c93bb217cb218af5ae5e587c8
|
/MR/mibiogenOct2020/mibiogen-ukbiobank.R
|
795584edb627b5ff5a366469ca87a051ce490a8c
|
[] |
no_license
|
DashaZhernakova/umcg_scripts
|
91b9cbffea06b179c72683145236c39f5ab7f8c2
|
1846b5fc4ae613bec67b2a4dd914733094efdb23
|
refs/heads/master
| 2023-08-31T10:45:17.057703
| 2023-08-23T14:47:43
| 2023-08-23T14:47:43
| 237,212,133
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,807
|
r
|
mibiogen-ukbiobank.R
|
args <- commandArgs(trailingOnly = TRUE)
library(TwoSampleMR)
library(MRInstruments)
library(MRPRESSO)
source("/groups/umcg-lld/tmp03/umcg-dzhernakova/umcg_scripts/MR/run_MR.R")
mibiogen_path <- args[1]
bact <- basename(mibiogen_path)
pval_thres = 5e-08
#pval_thres = 1e-05
out_filebase <- paste0("/groups/umcg-lld/tmp03/umcg-dzhernakova/MR/results/mibiogenOct2020/per_bact/", pval_thres)
print(paste0("Taking mbQTLs from: ", mibiogen_path))
print(paste0("p-value threshold for mbQTLs when used as exposure: ",pval_thres))
print(paste0("Writing to this folder:",out_filebase))
res_table <- data.frame()
file.names <- dir("/groups/umcg-lld/tmp03/umcg-dzhernakova/MR/data/UKB/original_files_Neale/UKB_formatted/", pattern =".for_MR.txt.gz")
mibiogen_table <- read.table(gzfile(mibiogen_path), header = T, sep = "\t", as.is = T, check.names = F)
exp_table <- mibiogen_table[mibiogen_table$pval < as.numeric(pval_thres),]
#for(i in 1:length(file.names)){
# print(file.names[i])
# pheno_table <- read.table(gzfile(paste0("/groups/umcg-lld/tmp03/umcg-dzhernakova/MR/data/UKB/",file.names[i])), header = T, sep = "\t", as.is = T, check.names = F)
# #
# # mibiogen -> GWAS
# #
# if (nrow(exp_table) > 0){
# out_dat = NULL
# exp_dat = NULL
# tryCatch({
# out_dat <- format_data(pheno_table, snps = exp_table$SNP, type = "outcome")
# exp_dat <- clump_data(format_data(exp_table, snps = out_dat$SNP, type = "exposure"))
# },error=function(e) NULL)
# if (!is.null(out_dat) & !is.null(exp_dat)){
# res <- run_mr(exp_dat, out_dat)
# if (!is.null(res)){
# res_table <- rbind(res_table, res)
# }
# }
# }
#}
#res_table$filter_before_BH <- (res_table$nsnp > 2 | "cis" %in% res_table$type) & res_table$egger_intercept_pval > 0.05 & res_table$heterogeneity_Q_pval > 0.05
#write.table(res_table, file = paste0(out_filebase, "/mibiogen-UKB.Fstat.", pval_thres, ".", bact), sep = "\t", quote = F, col.names = NA)
res_table <- data.frame()
#
# GWAS - mibiogen
#
print ("GWAS -> mibiogen")
for(i in 1:length(file.names)){
print(file.names[i])
pheno_table <- read.table(gzfile(paste0("/groups/umcg-lld/tmp03/umcg-dzhernakova/MR/data/UKB/original_files_Neale/UKB_formatted/",file.names[i])), header = T, sep = "\t", as.is = T, check.names = F)
pheno_table_subs <- pheno_table[pheno_table$pval < 5e-08,]
out_dat = NULL
exp_dat = NULL
tryCatch({
out_dat <- format_data(mibiogen_table, snps = pheno_table_subs$SNP, type = "outcome")
exp_dat <- clump_data(format_data(pheno_table_subs, snps = out_dat$SNP, type = "exposure"))
},error=function(e) NULL)
if (!is.null(out_dat) & !is.null(exp_dat)){
res <- run_mr(exp_dat, out_dat)
if (!is.null(res)){
res_table <- rbind(res_table, res)
}
}
}
#res_table$BH_qval <- p.adjust(res_table$pval, method = "BH")
#res_table$filter_before_BH <- (res_table$nsnp > 2 | "cis" %in% res_table$type) & res_table$egger_intercept_pval > 0.05 & res_table$heterogeneity_Q_pval > 0.05
write.table(res_table, file = paste0(out_filebase, "/UKB-mibiogen.", pval_thres, ".", bact) , sep = "\t", quote = F, col.names = NA)
#
# mibiogen -> GWAS with a less stringent threshold 1e-5
#
pval_thres = 1e-5
out_filebase <- paste0("/groups/umcg-lld/tmp03/umcg-dzhernakova/MR/results/mibiogenOct2020/per_bact/", pval_thres)
print(paste0("Taking mbQTLs from: ", mibiogen_path))
print(paste0("p-value threshold for mbQTLs when used as exposure: ",pval_thres))
res_table <- data.frame()
exp_table <- mibiogen_table[mibiogen_table$pval < as.numeric(pval_thres),]
for(i in 1:length(file.names)){
print(file.names[i])
pheno_table <- read.table(gzfile(paste0("/groups/umcg-lld/tmp03/umcg-dzhernakova/MR/data/UKB/original_files_Neale/UKB_formatted/",file.names[i])), header = T, sep = "\t", as.is = T, check.names = F)
#
# mibiogen -> GWAS
#
if (nrow(exp_table) > 0){
out_dat = NULL
exp_dat = NULL
tryCatch({
out_dat <- format_data(pheno_table, snps = exp_table$SNP, type = "outcome")
exp_dat <- clump_data(format_data(exp_table, snps = out_dat$SNP, type = "exposure"))
},error=function(e) NULL)
if (!is.null(out_dat) & !is.null(exp_dat)){
res <- run_mr(exp_dat, out_dat)
if (!is.null(res)){
res_table <- rbind(res_table, res)
}
}
}
}
#res_table$filter_before_BH <- (res_table$nsnp > 2 | "cis" %in% res_table$type) & res_table$egger_intercept_pval > 0.05 & res_table$heterogeneity_Q_pval > 0.05
write.table(res_table, file = paste0(out_filebase, "/mibiogen-UKB.", pval_thres, ".", bact), sep = "\t", quote = F, col.names = NA)
|
e8d11d1fc93e2bb584537477e97bd9a8e5c91a5f
|
1e1939479e8014f48e7362a27be9dfc68719c6e8
|
/R_packages/quantify/pkg/R/qDSMAStatus.R
|
6536b722766389b39682156444157368e881bf80
|
[
"MIT"
] |
permissive
|
wotuzu17/tronador
|
bffec07586340bc5320d3baf092ba6388a6ee98c
|
8d55d26ab1accd0499e6264674408304a70d5e1b
|
refs/heads/master
| 2021-01-10T00:59:45.599781
| 2015-04-25T07:50:22
| 2015-04-25T07:50:22
| 32,209,141
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 576
|
r
|
qDSMAStatus.R
|
# function calculates quantile status of first derivative of SMA of
# a rollingwindow
# currently, only 3 levels are supported.
qDSMAStatus <- function (TS, n, rollingwindow) {
SMA <- SMA(Cl(TS), n=n)
FD_SMA <- diff(SMA)
FD_SMA_Q <- cbind(
rollapply(FD_SMA, rollingwindow, quantile, probs=.25, na.rm=TRUE),
rollapply(FD_SMA, rollingwindow, quantile, probs=.75, na.rm=TRUE)
)
FD_SMA_S <- rollapply(FD_SMA, 1, function(x) {
as.numeric(x > FD_SMA_Q[,1]) +
as.numeric(x > FD_SMA_Q[,2])})
colnames(FD_SMA_S) <- c(paste0("FSMA", n))
return(FD_SMA_S)
}
|
f1114fcd4a9a841751d09a91721cae9ec30d88f3
|
9277e549802eb213f90c7d61624aace84003e820
|
/man/update_recessions.Rd
|
d649cbfccd4af760d9e1333533f0f197050b4f4e
|
[
"MIT"
] |
permissive
|
CMAP-REPOS/cmapplot
|
5f479a7e217666e03c86132054915b23475ceb5a
|
13563f06e2fdb226500ee3f28dc0ab2c61d76360
|
refs/heads/master
| 2023-03-16T03:25:59.202177
| 2023-03-08T04:28:04
| 2023-03-08T04:28:04
| 227,153,492
| 9
| 1
|
NOASSERTION
| 2023-03-08T04:28:06
| 2019-12-10T15:25:18
|
R
|
UTF-8
|
R
| false
| true
| 1,741
|
rd
|
update_recessions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_recessions.R
\name{update_recessions}
\alias{update_recessions}
\title{Update recessions table}
\source{
\url{https://www.nber.org/data/cycles/cycle dates pasted.csv}
}
\usage{
update_recessions(url = NULL, quietly = FALSE)
}
\arguments{
\item{url}{Char, the web location of the NBER machine-readable CSV file. The
default, \code{NULL}, uses the most recently identified URL known to the
package development team, which appears to be the most stable location for
updates over time.}
\item{quietly}{Logical, suppresses messages produced by
\code{utils::download.file}.}
}
\value{
A data frame with the following variables: \itemize{ \item
\code{start_char, end_char}: Chr. Easily readable labels for the beginning
and end of the recession. \item \code{start_date, end_date}: Date. Dates
expressed in R datetime format, using the first day of the specified month.
\item \code{ongoing}: Logical. Whether or not the recession is ongoing as of
the latest available NBER data. }
}
\description{
The cmapplot package contains an internal dataset \code{recessions} of all
recessions in American history as recorded by the National Bureau of Economic
Research (NBER). However, users may need to replace the built-in data, such as
in the event of new recessions and/or changes to the NBER consensus on
recession dates. This function fetches and interprets this data from the NBER
website.
}
\examples{
recessions <- update_recessions()
# package maintainers can update the internal dataset from within
# package by running the following code:
\dontrun{
recessions <- update_recessions()
usethis::use_data(recessions, internal = TRUE, overwrite = TRUE)
}
}
|
a1e39ef2bb17de15fee5d1447cab784876508412
|
1478cc4003f4e402c612c54da5f4e0f3f5edd52f
|
/R/species.R
|
8bfb89c7ae2f400751cc16bf5808f18f18665b44
|
[
"MIT"
] |
permissive
|
msleckman/ESM262ClimatePackage
|
10eb6fc18af8e7d41e770710218e2d1dcc2a18a0
|
8e087a3925f884ebc3002ecc525ed138055dfeb4
|
refs/heads/master
| 2020-06-01T05:52:53.696683
| 2019-06-13T03:48:08
| 2019-06-13T03:48:08
| 190,666,248
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,469
|
r
|
species.R
|
#' Sample Plant Characteristic Data
#'
#' @description
#' Sample dataset for various Santa Barbara plant species providing climate and growth-based plant characteristics.
#'
#' @details
#' Species information given by species identifier, scientific name, and common name.
#' Dataset provides drought tolerance capability and the number of days species can tolerate high heat stress days per year.
#' Data also reports minimum and maximum amount of precipitation the species can tolerate.
#' URLs provide source for using >86 degF (>30 degC) as the baseline temperature for stressful temperature levels
#' and for USDA plant characteristic information.
#' Species heat stress days derived from drought tolerance level.
#'
#' @format A data frame with 3 rows and 7 variables:
#' \itemize{
#' \item Species is the species four letter identifier, based on scientific name
#' \item Scientific is the species scientific name
#' \item Common is the species common name
#' \item DroughtTolerance is the drought tolerance capability (low, medium, high)
#' \item StressDays is the number of days species can tolerate high temperatures per year before becoming stressed
#' \item PrecipMin is the minimum precipitation tolerated by the species
#' \item PrecipMax is the maximum precipitation tolerated by the species
#' }
#'
#' @source \url{http://agron-www.agron.iastate.edu/courses/Agron541/classes/541/lesson04a/4a.2.html}
#' @source \url{https://plants.usda.gov/}
#'
"species"
|
578bd3d8041e783639dd49bac020311401e7caf7
|
37ce38ba0eff95451aebea810a1e2ab119f89a85
|
/R/TN_Combine.R
|
8854f4700438c2af1f2149cf7efbf826a6e1458c
|
[
"MIT"
] |
permissive
|
SwampThingPaul/AnalystHelper
|
39fdd58dc4c7300b6e72ff2713316809793236ce
|
eb570b69d7ea798facaf146d80bc40269a3d5028
|
refs/heads/master
| 2023-07-21T00:19:21.162374
| 2023-07-11T17:24:36
| 2023-07-11T17:24:36
| 179,672,539
| 1
| 0
|
MIT
| 2020-03-21T20:05:31
| 2019-04-05T11:53:19
|
R
|
UTF-8
|
R
| false
| false
| 870
|
r
|
TN_Combine.R
|
#' Nitrogen concentration data handling
#'
#' @param NOx Nitrate-Nitrite (NOx)concentration (numeric)
#' @param TKN Total Kjeldahl Nitrogen (TKN) concentration (numeric)
#' @param TN Direct measure Total Nitrogen (TN) concentration(numeric)
#' @keywords "water quality" nitrogen
#' @export
#' @return This function handles and nitrogen data to calculate a common TN field
#' @examples
#' NOX=c(0.001,0.002,0.05,NA,NA)
#' TKN=c(0.5,0.05,0.4,NA,NA)
#' TN=c(NA,NA,NA,1.2,1.3)
#' TN_Combine(NOX,TKN,TN)
TN_Combine=function(NOx,TKN,TN){
TN=ifelse(is.na(TN)==T,NA,TN)
calc=ifelse(is.na(NOx)|is.na(TKN),NA,NOx+TKN)
final=ifelse(is.na(TN),calc,TN)
return(final)
}
SFWMD.TN.Combine=function(NOx,TKN,TN){
#.Deprecated("TN.Combine")
final=TN_Combine(NOx,TKN,TN)
warning("This function is being phased out. Consider using TN_Combine in the future.")
return(final)
}
|
84595a4b93d50f33f0224b7c932b428f08915d1c
|
ba86155005777258d2b08ddb5c1b407beb86fc41
|
/R/print.contrast.R
|
f639761262e893b7135fd6835e5d6556b6f68d83
|
[] |
no_license
|
cran/contrast
|
6640d7f7dc932585249f4c9ea4b6d3d650a8da97
|
d8d17b9d09ca977c05b605be131fabfef6c89c86
|
refs/heads/master
| 2022-10-28T19:10:13.808567
| 2022-10-05T16:20:09
| 2022-10-05T16:20:09
| 17,695,238
| 0
| 1
| null | 2017-02-22T19:06:34
| 2014-03-13T04:19:34
|
R
|
UTF-8
|
R
| false
| false
| 2,113
|
r
|
print.contrast.R
|
# This method is used for printing the objects returned by the contrast methods.
# It was copied from the rms package, written by Frank Harrell.
#' Print a Contrast Object
#' @param x Result of `contrast()`.
#' @param X A logical: set `TRUE` to print design matrix used in computing the
#' contrasts (or the average contrast).
#' @param fun A function to transform the contrast, SE, and lower and upper
#' confidence limits before printing. For example, specify `fun = exp` to
#' anti-log them for logistic models.
#' @param ... Not used.
#' @export
print.contrast <- function(x, X = FALSE, fun = function(u) u, ...) {
testLabels <- switch(x$model,
lm = ,
glm = ,
lme = ,
gls = c("t", "Pr(>|t|)"),
geese = c("Z", "Pr(>|Z|)")
)
w <- x[c(
"Contrast",
"SE",
"Lower",
"Upper",
"testStat",
"df",
"Pvalue"
)]
w$testStat <- round(w$testStat, 2)
w$Pvalue <- round(w$Pvalue, 4)
no <- names(w)
no[no == "SE"] <- "S.E."
no[no == "testStat"] <- testLabels[1]
no[no == "Pvalue"] <- testLabels[2]
names(w) <- no
cat(x$model, "model parameter contrast\n\n")
cnames <- x$cnames
if (length(cnames) == 0) {
cnames <- if (x$nvary) {
rep("", length(x[[1]]))
} else {
as.character(1:length(x[[1]]))
}
}
attr(w, "row.names") <- cnames
attr(w, "class") <- "data.frame"
w$Contrast <- fun(w$Contrast)
w$S.E. <- fun(w$S.E.)
if (x$model != "geese") {
w$df <- x$df
}
w$Lower <- fun(w$Lower)
w$Upper <- fun(w$Upper)
print(as.matrix(w), quote = FALSE, ...)
if (X) {
attr(x$X, "contrasts") <- NULL
attr(x$X, "assign") <- NULL
cat("\nContrast coefficients:\n")
if (is.matrix(x$X)) {
dimnames(x$X) <- list(cnames, dimnames(x$X)[[2]])
}
print(x$X, ...)
}
if (x$model == "lm") {
if (x$covType != "const") {
cat("\nThe", x$covType, "covariance estimator was used.\n")
}
}
invisible()
}
|
5dd428315c54f359d59e929f699f88674558673d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/etasFLP/examples/italycatalog.Rd.R
|
566d32302b921de29471c2882671cac427728ee5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 215
|
r
|
italycatalog.Rd.R
|
library(etasFLP)
### Name: italycatalog
### Title: Small sample catalog of italian earthquakes
### Aliases: italycatalog
### Keywords: datasets earthquake
### ** Examples
data(italycatalog)
str(italycatalog)
|
ac89bb17337fc1641d41a67db233e996f6e124f4
|
3b9a9525adbcdaad1e1ce56936500ec073ac77cc
|
/ui.R
|
8ae3bdad3a5ea1569578572ec0b15b94ee085b5b
|
[] |
no_license
|
wx-chen/IrisShinyApp
|
61522ffe54feaf502e7c3600a1bbfb228a049934
|
aefff296a37e9ac873e363a455751a5e028c3d43
|
refs/heads/master
| 2016-09-06T06:33:35.901726
| 2014-07-18T22:05:08
| 2014-07-18T22:05:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,537
|
r
|
ui.R
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Interesting Facts about Iris"),
# Sidebar with selection of iris species, and a check box of iris images
sidebarLayout(
sidebarPanel(
selectInput('irisspecies', label='What species do you want to see?',
choices=c("All","setosa","versicolor","virginica"),
selected="All"),
checkboxInput('imgs', 'Image of Iris:', value=TRUE),
h4("Simple Instructions:"),
h6("This app use the Iris dataset and gives you some interesting facts about different species of iris :)"),
helpText("You probably don't need the instructions, but in case you do,"),
helpText("1. You can use the checkbox (Image of iris) to determine whether you want to see the images of iris displayed"),
helpText("2. You can use the dropdown manu to select the species of iris you want to plot on the right"),
helpText("3. The plot on the right displays the length and width of the Petal and Sepal, respectively"),
h5("That's about it... Enjoy! :D")
),
# Show a plot of the Petal/Sepal dimentions of iris
mainPanel(
plotOutput("irisplot"),
h3("Image of iris:"),
imageOutput("setose")
)
)
))
|
cc19f636ffa3f40fb4d0f3e11f75acbf092b6e9d
|
5444935089dd69c6a53118902f52dda84634f884
|
/capital one/evaluate.R
|
f6303fe5bb6f942a7fb8a1ead9d3ea58aa00227e
|
[] |
no_license
|
kusakewang/Machine-Learning
|
96c5acbc9597785728169ca3729634dae83b891f
|
203d87e9651a6fb3c575451bf26dd6f4a7b4591c
|
refs/heads/master
| 2021-01-19T20:15:16.378520
| 2016-05-23T15:10:43
| 2016-05-23T15:10:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,113
|
r
|
evaluate.R
|
EvaluateAUC <- function(dfEvaluate) {
require(xgboost)
require(Metrics)
require(pROC)
CVs <- 5
cvDivider <- floor(nrow(dfEvaluate) / (CVs+1))
indexCount <- 1
outcomeName <- c('cluster')
predictors <- names(dfEvaluate)[!names(dfEvaluate) %in% outcomeName]
lsErr <- c()
lsAUC <- c()
for (cv in seq(1:CVs)) {
# print(paste('cv',cv))
dataTestIndex <- c((cv * cvDivider):(cv * cvDivider + cvDivider))
dataTest <- dfEvaluate[dataTestIndex,]
dataTrain <- dfEvaluate[-dataTestIndex,]
bst <- xgboost(data = as.matrix(dataTrain[,predictors]),
label = dataTrain[,outcomeName],
max.depth=6, eta = 1, verbose=0,
nround=5, nthread=4,
objective = "reg:linear")
predictions <- predict(bst, as.matrix(dataTest[,predictors]), outputmargin=TRUE)
err <- rmse(dataTest[,outcomeName], predictions)
auc <- auc(dataTest[,outcomeName],predictions)
lsErr <- c(lsErr, err)
lsAUC <- c(lsAUC, auc)
gc()
}
print(paste('Mean Error:',mean(lsErr)))
print(paste('Mean AUC:',mean(lsAUC)))
}
|
6d75e2900c399425601aa672d9ae39fe81187840
|
e2de741b6608cab0db43a71e486be123049f2dfb
|
/R_Analysis/_Statistical_Analysis/explore_diff_in_EPA_risk.R
|
d7e7b9af7736e14ab21bd73b1580d2a94e702d52
|
[] |
no_license
|
jpf1282/tribal_lands
|
1b07d410c734faf4848b2ffe3602e124a2f61836
|
ecd397f1c8e8b7fd8f9306907ebde66785ac35c6
|
refs/heads/master
| 2021-06-23T21:39:58.754161
| 2020-12-22T15:59:19
| 2020-12-22T15:59:19
| 161,838,281
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,032
|
r
|
explore_diff_in_EPA_risk.R
|
library(readr)
library(tidyr)
library(knitr)
library(DT)
library(ggbeeswarm)
library(dplyr)
library(stringr)
library(scales)
library(broom)
library(ggplot2)
# Import variables to be joined to tribes ---------------------------------
# Import variables to be joined (climate and precipitation)
setwd("/Users/kathrynmcconnell/Documents/GitHub/tribal_lands2/R_Analysis/_Variables")
climate <- read_csv("EPA_CSRI_Index_Final.csv") %>% select(FIPS, FIPS_manual, Risk, CRSI)
precip <- read_csv("MeanAnnualPrecipitation.csv") %>% select(FIPS, MeanAnnualPrecipitation_in)
# For counties that were missing automatic FIPS designations use manual FIPS designation instead
climate[1:126,]$FIPS <- climate[1:126,]$FIPS_manual
# Change FIPS from character to integer for future joins
climate$FIPS <- as.numeric(climate$FIPS)
precip$FIPS <- as.numeric(precip$FIPS)
# Processing Justin’s tribal data and completing the join -----------------
# Data comes from an .RData file that Justin created (documentation available in 'data wrangle change score centroid and map data.R`)
load("/Users/kathrynmcconnell/Documents/GitHub/tribal_lands2/R_Analysis/processed_data.RData")
# Then select distinct records only
data_long <- distinct(merged_data_record_all_long, tribe, time, FIPS, .keep_all = TRUE)
# Change FIPS from character to integer
data_long$FIPS <- as.numeric(data_long$FIPS)
# Join new variables to all tribe records (for some reason adds around ten records) - update this in github
# Adds nine more rows, not sure which
main_join1 <- left_join(data_long, precip, c("FIPS" = "FIPS")) %>% # Join precip data
left_join(climate, c("FIPS" = "FIPS")) %>% # Join climate data
mutate(Precip = MeanAnnualPrecipitation_in) %>% # Shorten variable names
select(-(FIPS_manual)) %>% # Remove any extraneous columns from added datasets that we don't want
select(-(MeanAnnualPrecipitation_in))
# write to .csv
#setwd("/Users/kathrynmcconnell/Documents/GitHub/tribal_lands2/Clean_Tribe_Files_for_Analysis")
#write.csv(main_join1, "Tribes_merged_2.24.19.csv")
# Select only tribes with time 1 and time 2 data
data_t1and2_long <- filter(main_join1, tribe %in% tribes_time1and2_lst$tribe)
# Change from character to numeric
data_t1and2_long$FIPS <- as.numeric(data_t1and2_long$FIPS)
# Confirm that there are only two records per tribe, result should be zero
t1and2_lst <- group_by(data_t1and2_long, tribe, time) %>%
count() %>%
ungroup() %>%
group_by(tribe) %>%
count() %>%
filter(nn < 2)
nrow(t1and2_lst)
# Exploratory Analysis ----------------------------------------------------
### All tribes, including those missing values at t1 or t2
# Precipitation differences between t1 and t2
ggplot(main_join1, aes(Precip, fill = time)) +
geom_histogram(position = "dodge", binwidth = 200) +
theme_minimal() +
ylab("Count of Counties with Tribes Present") +
xlab("Mean Annual Precipitation (in)") +
facet_wrap(~ time, nrow = 2)
ggplot(main_join1, aes(time, Precip, colour = time)) +
geom_boxplot() +
geom_quasirandom(alpha = 1/10, varwidth = TRUE) +
theme_minimal() +
ylab("Mean Annual Precipitation (in)") +
ggtitle("Counties with Tribes Present at Time 1 & 2", "All Tribes")
# EPA Risk
ggplot(main_join1, aes(Risk, fill = time)) +
geom_histogram(position = "dodge", binwidth = .10) +
theme_minimal() +
ylab("Count of Counties with Tribes Present") +
xlab("EPA Risk Scale") +
facet_wrap(~ time, nrow = 2)
ggplot(main_join1, aes(time, Risk, colour = time)) +
geom_boxplot() +
geom_quasirandom(alpha = 1/10, varwidth = TRUE) +
theme_minimal() +
ylab("EPA Risk Scale") +
ggtitle("Counties with Tribes Present at Time 1 & 2", "All Tribes")
### Only tribes with t1 and t2 values
ggplot(data_t1and2_long, aes(Precip, fill = time)) +
geom_histogram(position = "dodge", binwidth = 200) +
theme_minimal() +
ylab("Count of Counties with Tribes Present") +
xlab("Mean Precipitation (in)") +
facet_wrap(~ time, nrow = 2)
ggplot(data_t1and2_long, aes(time, Precip, colour = time)) +
geom_boxplot() +
geom_quasirandom(alpha = 1/10, varwidth = TRUE) +
theme_minimal() +
ylab("Mean Annual Precipitation (in)") +
ggtitle("Counties with Tribes Present", "Only Tribes with T1 & T2")
# EPA Risk
ggplot(data_t1and2_long, aes(Risk, fill = time)) +
geom_histogram(position = "dodge", binwidth = .05) +
theme_minimal() +
ylab("Count of Counties with Tribes Present") +
xlab("EPA Risk Scale") +
facet_wrap(~ time, nrow = 2)
ggplot(data_t1and2_long, aes(time, Risk, colour = time)) +
geom_boxplot() +
geom_quasirandom(alpha = 1/10, varwidth = TRUE) +
theme_minimal() +
ylab("EPA Risk Scale") +
ggtitle("Counties with Tribes Present at Time 1 & 2", "Only Tribes with T1 & T2")
# Analysis ----------------------------------------------------------------
# Are data normally distributed? Doesn't immediately look like it
library(ggpubr)
# QQ plots plot sample observations against a normal mean (theoretical)
# None look normal, but for precipitation the main differences are in the tails
ggqqplot(main_join1$Risk) + ggtitle("EPA Risk, All Tribes")
ggqqplot(data_t1and2_long$Risk) + ggtitle("EPA Risk, Only Tribes with T1 and T2")
ggqqplot(main_join1$Precip) + ggtitle("Precipitation, All Tribes")
ggqqplot(data_t1and2_long$Precip) + ggtitle("Precipitation, Only Tribes with T1 and T2")
# Density of precipitation for all tribes
ggdensity(main_join1$Precip,
main = "Density plot of precipitation (all tribes)",
xlab = "Precipitation (in)")
# Density of precipitation for tribes with only t1 and t2
ggdensity(data_t1and2_long$Precip,
main = "Density plot of precipitation (tribes with t1 and t2)",
xlab = "Precipitation (in)")
# Density of EPA risk for all tribes
ggdensity(main_join1$Risk,
main = "Density plot of EPA risk (all tribes)",
xlab = "Risk")
# Density of EPA risk for tribes with t1 and t2
ggdensity(data_t1and2_long$Risk,
main = "Density plot of EPA risk (tribes with t1 and t2)",
xlab = "Risk")
### T-tests for precipitation
# T-test for all tribe-counties and precipitation
precip_all_ttest <- t.test(main_join1$Precip[main_join1$time == "time 1"],
main_join1$Precip[main_join1$time == "time 2"])
precip_all_ttest
# T-test for only t1 and t2 tribe-counties and precipitation
precip_t1t2_ttest <- t.test(data_t1and2_long$Precip[data_t1and2_long$time == "time 1"],
data_t1and2_long$Precip[data_t1and2_long$time == "time 2"])
precip_all_ttest
# T-test for all tribe-counties and EPA risk
risk_all_ttest <- t.test(main_join1$Risk[main_join1$time == "time 1"],
main_join1$Risk[main_join1$time == "time 2"])
risk_all_ttest
# T-test for only t1 and t2 tribe-counties and EPA risk
risk_t1t2_ttest <- t.test(data_t1and2_long$Risk[data_t1and2_long$time == "time 1"],
data_t1and2_long$Risk[data_t1and2_long$time == "time 2"])
risk_t1t2_ttest
|
4a9fa9fff69b3c1b0a22b1386ea0e09a0a0cd484
|
5553da94973b7022872ea6215bdb122710f51a15
|
/matrixInversionTests.R
|
5272907480a8cf8e5b8398eb0d58dc1e0d53bc58
|
[] |
no_license
|
SamBuckberry/ProgrammingAssignment2
|
6493045ad96cc7bc2f3b43d10d8ba72f6583ed9a
|
d0bf1e1fa83efd514af73e341c02a4846371138d
|
refs/heads/master
| 2021-01-17T20:39:12.745463
| 2014-04-23T15:10:32
| 2014-04-23T15:10:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 317
|
r
|
matrixInversionTests.R
|
# Create test matrix
myMatrix <- matrix(rnorm(n=10000, mean=100, sd=25), nrow=100, ncol=100)
# Get the inverse matrix
system.time(myInverse <- solve(myMatrix))
# Setup the matrix
system.time(a <- makeCacheMatrix(myMatrix))
# Solve the matrix
system.time(b <- cacheSolve(a))
# Check the results
all(myInverse==b)
|
52b29299baaf3d7ef186fd4d70347a7b15ab81c6
|
0d7f4c22f0fcb7a448de4b5a5f199b87bef4ef47
|
/Figures/example1.9.R
|
d7126363a9f8d9d5c1997c468a5bb7ddce7713cd
|
[] |
no_license
|
gjhunt/drew_thesis
|
c86d473d6fa5a022f707b442acddcb136a75cda5
|
1b83987fcd9f19ece1e83e9df18d78504ed4a859
|
refs/heads/master
| 2021-01-17T17:26:12.332048
| 2017-02-23T15:14:45
| 2017-02-23T15:14:45
| 82,938,176
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 319
|
r
|
example1.9.R
|
plot.new()
par(pty="s")
par(mfrow = c(1,1))
plot(c(4,0,1),c(0,2,1), col="midnightblue",main="",xlab="",ylab="",asp=1,xlim=c(0,5),ylim=c(0,5) , pch=4,lwd=4)
grid(col="black")
abline(h=0,col="black")
abline(v=0,col="black")
X<-matrix(c(1,4,1,0,1,1),byrow=T,nrow=3)
b<-ginv(X)%*%c(0,2,1)
abline(coef=b,col="Red",lwd=2)
|
75d6219dcbbadbf1b44fc7eacfa7135f62fe4e98
|
8987dcc442aeb76d5663fb6385d7a9196d3bbba4
|
/cachematrix.R
|
e9b7ba28d19f42014c6ac7104b5f8eb5e581667d
|
[] |
no_license
|
megs161195/ProgrammingAssignment2
|
3662bb218258d1473fbbbfc55bc7f621f6b0ad60
|
a30a07f99becb673366366805676dc2f29def71e
|
refs/heads/master
| 2021-01-22T22:20:45.449492
| 2017-05-31T15:25:14
| 2017-05-31T15:25:14
| 92,769,886
| 0
| 0
| null | 2017-05-29T19:37:47
| 2017-05-29T19:37:46
| null |
UTF-8
|
R
| false
| false
| 694
|
r
|
cachematrix.R
|
## This function sets the value of matrix, gets the value of matrix,
## sets the value of inverse, gets the value of inverse
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
set<-function(y){
x<<-y
i<<-NULL
}
get<-function() x
setinv<-function(solve) i<<-solve
getinv<-function() i
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## Checks if inverse is present in cache. If present, retrieves it else
##computes the inverse and stores it in the cache.
cacheSolve<- function(x, ...) {
i<-x$getinv()
if(!is.null(i)){
message("showing cached inverse matrix")
return(i)
}
data<-x$get()
i<-solve(data,...)
x$setinv(i)
i
}
|
39a32a24d10ec682c8aa009f39db427e1c688299
|
c9e02a75abbd1d5048446a65aa23b10f79492b2f
|
/scripts/attractor_xpower4.2.R
|
f20414d21b34738cdb1ae9859826b815212b3693
|
[] |
no_license
|
somasushma/R-code
|
f8290d3ecd8ea87ef778b1deb0b7222e84b811be
|
2e1f0e05ae56ebe87354caeb374aebb19bf00080
|
refs/heads/master
| 2021-10-27T14:42:26.847193
| 2021-10-25T22:02:51
| 2021-10-25T22:02:51
| 137,162,116
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,000
|
r
|
attractor_xpower4.2.R
|
windowsFonts(f1 = windowsFont("Constantia"),
f2 = windowsFont("Book Antiqua"),
f3 = windowsFont("Cambria Math"))
#e
e=exp(1)
#function: midpoint------------
midpoint= function(x,y) c(sum(range(x))/2, sum(range(y))/2)
#function: distance------------
distance= function(x,y) sqrt((midpoint(x,y)[1]-x)^2+(midpoint(x,y)[2]-y)^2)
f1=function(x) (1+a*(x+x^2+x^3+x^4+x^5))/(1+b*x^4)
f2=function(x) (1+a*(x+x^3+e^(-x^2)))/(1+b*x^2)
f3=function(x) (1+a*(x+x^2+x^3+e^(-x^2)))/(1+b*x^2)
f4=function(x) (1+a*(x^2+x^4)+c*(x+x^3++x^5))/(1+b*x^4)
f=f4
m=4
#parameter search step
pbox=array(data = NA, dim = c(m^2, 3))
k=1
while( k<= (m^2)){
n=300
a=runif(n=1, min = -4, max = 4)
c=runif(n=1, min = -4, max = 4)
b=runif(n=1, min = 0, max = 10)
if(abs(a)>b) next
pm=array(data=NA, dim = c(n,2))
x=0.1
y=0.1
for (j in 1:n) {
x1=y+f(x)
y1=-x+f(x1)
pm[j,]=c(x1, y1)
x=x1
y=y1
}
te=distance(pm[,1],pm[,2])
if(any(abs(pm[,1]) > 150) || any(abs(pm[,2]) > 150)) { next
} else if(max(te)-min(te) < .66 || max(abs(pm)) < 2.5){
next
} else {
pbox[k,]=c(a,b,c)
k=k+1
}
}
#plot step------------
par(pty="s", mfrow=c(m,m))
for(k in 1:(m^2)){
n=50000
a=pbox[k,1]
c=pbox[k,3]
b=pbox[k,2]
pm=array(data=NA, dim = c(n,3))
x=0.1
y=0.1
for (j in 1:n) {
x1=y+f(x)
y1=-x+f(x1)
pm[j,]=c(x1, y1, (round(j/1000)+1))
x=x1
y=y1
}
par(mar=c(2,2,2,2), mgp=c(1.1, .4, 0))
plot(pm[,1], pm[,2], pch=16, col=pm[,3], cex=.1, asp=1, xlab="X", ylab = "Y", family="f3")
mtext(do.call(expression, list(bquote("a="~.(a)~"; b="~.(b)),bquote("c="~.(c)))),side=3,line=1:0, cex = .75)
}
# pic=1
naman=paste("~/R/Figures/Figures1/x4.2_attractor_",pic,".png", sep = "")
pic=pic+1
dev.copy(png, file=naman, width=8, height=8, res=300, units="in")
dev.off()
#rotational symmetry calculation
n=6
cbind(c(1:n), round(cos(2*pi/n*(1:n)), 5))
|
2e382bed7975133a204ebd28593d31e8d7f31cfe
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/billboarder/examples/bb_piechart.Rd.R
|
48f0e508dc4d2b406a37cfbf980d68012984c81c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 531
|
r
|
bb_piechart.Rd.R
|
library(billboarder)
### Name: bb_piechart
### Title: Helper for creating a pie chart
### Aliases: bb_piechart
### ** Examples
stars <- data.frame(
package = c("billboarder", "ggiraph", "officer", "shinyWidgets", "visNetwork"),
stars = c(9, 177, 43, 44, 169)
)
# Default
billboarder() %>%
bb_piechart(data = stars)
# Explicit mapping
billboarder() %>%
bb_piechart(data = stars, bbaes(package, stars))
# Other way to specify mapping
billboarder(data = stars) %>%
bb_aes(package, stars) %>%
bb_piechart()
|
bf3ff2995c563b6d7abb776b8af2c49d7dd67c6f
|
9e2296d74051d725efcc28cab16ca7703c8a6c1b
|
/man/add_ui_sidebar_basic.Rd
|
2062143a05bf370916902bd1c927ae66b55d9031
|
[] |
no_license
|
neuhausi/periscope
|
59f5d74cc7e399a9a9e03e19199409a6438a4a91
|
e0364b0db9b9bbcbc4b6c295bbbb6fa1d1d65fd4
|
refs/heads/master
| 2023-07-06T05:44:50.295396
| 2023-07-03T21:39:01
| 2023-07-03T21:39:01
| 171,934,957
| 27
| 1
| null | 2023-07-03T21:39:02
| 2019-02-21T19:49:03
|
R
|
UTF-8
|
R
| false
| true
| 1,337
|
rd
|
add_ui_sidebar_basic.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ui_helpers.R
\name{add_ui_sidebar_basic}
\alias{add_ui_sidebar_basic}
\title{Add UI Elements to the Sidebar (Basic Tab)}
\usage{
add_ui_sidebar_basic(elementlist = NULL, append = FALSE, tabname = "Basic")
}
\arguments{
\item{elementlist}{list of UI elements to add to the sidebar tab}
\item{append}{whether to append the \code{elementlist} to currently
registered elements or replace the currently registered elements.}
\item{tabname}{change the label on the UI tab (default = "Basic")}
}
\description{
This function registers UI elements to the primary (front-most) tab
on the dashboard sidebar. The default name of the tab is \strong{Basic} but
can be renamed using the tabname argument. This tab will be active on the
sidebar when the user first opens the shiny application.
}
\section{Shiny Usage}{
Call this function after creating elements in \code{ui_sidebar.R} to register
them to the application framework and show them on the Basic tab in the
dashboard sidebar
}
\examples{
require(shiny)
s1 <- selectInput("sample1", "A Select", c("A", "B", "C"))
s2 <- radioButtons("sample2", NULL, c("A", "B", "C"))
add_ui_sidebar_basic(list(s1, s2), append = FALSE)
}
\seealso{
\link[periscope]{add_ui_sidebar_advanced}
\link[periscope]{add_ui_body}
}
|
74aa232d397785dc681742aacfa21f827b2630c8
|
5bf589d943c0dcf7e9f6a331d25cc3dfef8d8d48
|
/src/clase_1/ej1.R
|
23eeaf63b9f8123547e3775498c6a709ef46dbfa
|
[] |
no_license
|
joagonzalez/ditella-data-mining
|
8b161df85c56f95b85007997f09302a84b969596
|
d124319b1d63ef8a54cf8c29d6f08f740badf788
|
refs/heads/master
| 2023-04-17T01:18:53.616889
| 2021-05-09T00:36:10
| 2021-05-09T00:36:10
| 352,432,877
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,997
|
r
|
ej1.R
|
# install.packages('e1071')
# install.packages('mlbench')
library(e1071)
library(mlbench)
# Read dataset
setwd('C:/Users/a310005/Desktop/DiTella/Data Mining/Clase_1')
getwd()
data <- read.csv('bankruptcy_data_red.csv', sep=';')
data <- na.omit(data)
head(data)
summary(data)
nrow(data)
ncol(data)
# Split dataset in trining and test
test_indexes <- sample(c(1:nrow(data)), 300)
train_data <- data[-test_indexes,]
test_data <- data[test_indexes,]
# Naive Bayes training model
nb_classifier <- naiveBayes(class ~ ., data = train_data,
laplace=0)
# Predictions in test_model
preds_test <- predict(nb_classifier, newdata = test_data)
table(predicted = preds_test, actual = test_data$class)
print(mean(preds_test == test_data$class))
# Predictions in training_model
preds_train <- predict(nb_classifier, newdata = train_data)
table(predicted = preds_train, actual = train_data$class)
print(mean(preds_train == train_data$class))
# Plot model accuracy
plot_data <- c(mean(preds_train == train_data$class),
mean(preds_test == test_data$class))
barplot(plot_data, main='Model Accuracy',
xlab='Dataset', ylab='Accuracy',
col='darkred',
names.arg = c('Train Data', 'Test Data'))
# No son features categoricas, aca no se luce Bayes ingenuo
# Add one smoothing sirve cuando hay pocos datos y algunas conjuntos
# son vacias y nos da probabilidad condicional 0. En este caso
# el dataser tiene columnas no categoricas y creo que se trabaja con
# media y desviacion, por lo que no mejora nada
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
library(class); library(kernlab)
# Read dataset
setwd('C:/Users/a310005/Desktop/DiTella/Data Mining/Clase_1')
getwd()
data <- read.csv('bankruptcy_data_red.csv', sep=';')
data <- na.omit(data)
head(data)
summary(data)
nrow(data)
ncol(data)
# Removing column to predict
std.data <- data[,-ncol(data)]
std.data <- na.omit(std.data)
# Conver to numeric every column in dataset
std.data <- sapply( std.data, as.numeric )
std.data <- scale(std.data)
# Split in test and training data
test_indexes <- sample(seq_len(nrow(std.data)), 300)
train_x <- std.data[-test_indexes,]
train_y <- data[-test_indexes, "class"]
test_x <- std.data[test_indexes,]
test_y <- data[test_indexes, "class"]
# Replacing NA for 0
train_x[is.na(train_x)] = 0
train_y[is.na(train_y)] = 0
test_x[is.na(test_x)] = 0
test_y[is.na(test_y)] = 0
# Knn model training
knn_predictions <- knn(train_x, test_x, train_y, k=5)
# Model prediction accuracy analysis in test data
table(preds = knn_predictions, actual = test_y)
print(mean(test_y == knn_predictions))
# Model prediction accuracy analysis in training data
print(mean(train_y == knn(train_x, train_x, train_y, k=5)))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# How Knn performs for different values of k?
library(ggplot2)
library(reshape2)
train_acc <- c()
test_acc <- c()
k_vals <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
15, 30, 50, 75, 100, 125, 150)
for (k in k_vals) {
print(k)
tmp_tr_pred <- knn(train_x, train_x, train_y, k)
tmp_ts_pred <- knn(train_x, test_x, train_y, k)
# table(preds = tmp_tr_pred, actual = train_y)
# table(preds = tmp_tr_pred, actual = test_y)
print(mean(train_y == tmp_tr_pred))
print(mean(train_y == tmp_tr_pred))
train_acc <- c(train_acc, mean(train_y == tmp_tr_pred))
test_acc <- c(test_acc, mean(test_y == tmp_ts_pred))
}
experiment_data <- data.frame(k = k_vals, train_acc, test_acc)
print(experiment_data)
plot_data <- melt(experiment_data, id.vars="k", value.name="Accuracy")
ggplot(data=plot_data, aes(x=k, y=Accuracy, col=variable)) + geom_line()
max(test_acc)
# k = 9 es el mejor k con test_acc = 0.7833333
|
17aaa3dba6447073f1cd04ac8ffde0ad3a704904
|
54ffa208f4de8d19504ee4194e30eb9f4d091a34
|
/Final_Project/Buecherl_FinalProject.R
|
7c7796cfa5b3a19f3e4bc91c4bcbf1a6b1d6028e
|
[] |
no_license
|
LukasBuecherl/CompBioLabsAndHW
|
a3250bbf23033ceb27d50862da9d0a344396d410
|
ad2e4052517fa287b7c87b04e3bcb11d031d2049
|
refs/heads/main
| 2023-05-01T18:20:52.945081
| 2021-04-29T17:10:10
| 2021-04-29T17:10:10
| 334,219,541
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,471
|
r
|
Buecherl_FinalProject.R
|
# EBIO 5420: Computational Biology
# Professor: Samuel Flaxman
# Student: Lukas Buecherl
# Final Project
###############################################################################
#
# CODE WRITTEN BY LUKAS BUECHERL FOR THE FINAL PROJECT FOR COURSE EBIO 5420.
# CONTACT: lukas.buecherl@colorado.edu
# GitHub of project: https://github.com/LukasBuecherl/CompBioLabsAndHW/tree/main/Final_Project
# GitHub of assingment: https://github.com/flaxmans/CompBio_on_git/blob/main/Assignments/09_Independent_Project_Step2.md
#
# Introduction:
# Human influence on the climate system of Earth may result in dangers for various
# aspects of life on our planet [1]. One way to quantify the effects of climate
# change on our planet is by looking at the planets forests. More specifically,
# the growth rate of trees is an indicator to study the effects of the climate on
# the trees over a longer time period [2]. In this final project, I will analyze
# the ring width of five common species of trees in the Rocky Mountains in the
# United States. My driving questions for this project are:
#
# 1. Comparing the growth rate in relation to the elevation of the individuals
# among five different tree species
# 2. Comparing the growth rate in relation to the location (latitude) of the
# individuals among five different tree species
#
# The following code will prune the dataset, calculate the average growth of
# the ring width and stem diamater for each species for each year and plot the
# results seperated by latitude and elevation. The original data was published
# in [2].
#
# References
# 1. Lorenzoni, I., Pidgeon, N.F. Public Views on Climate Change: European and USA Perspectives. Climatic Change 77, 73–95 (2006). https://doi.org/10.1007/s10584-006-9072-z
# 2. Buechling, A., Martin, P.H. and Canham, C.D., Climate and competition effects on tree growth in Rocky Mountain forests. J Ecol, 105: 1636-1647 (2017). https://doi.org/10.1111/1365-2745.12782
#
###############################################################################
# Clear Environment
rm(list = ls())
#################################################################
# Step 0: Get packages
#################################################################
# Setting the path for the working directory (Needs to be adjusted)
setwd(
"/Users/lukas/Library/Mobile Documents/com~apple~CloudDocs/CU Boulder/Spring 2021/Computational Biology/CompBioLabsAndHW/Final_Project"
)
# Require needed packages
require("tidyverse")
require("ggpubr")
library("ggplot2")
library("dplyr")
# Source the lib file for accessing functions
source("lib.R")
#################################################################
# Step 1: Clean and check data
#################################################################
# Read in the data
WoodData <-
read.csv(
"sample_tree_growth_data_with_locations.csv",
stringsAsFactors = F
)
# Checking for NA
if (length(which(is.na(WoodData))) != 0) {
print("NAs found")
NA_index <- which(is.na(WoodData))
}
# Data overview
str(WoodData)
# Extract the important data for the project (ID, Secies, Year, Latitude, Elevation, Ring width, Stem diameter)
# Sort the data according to the tree ID and the year
Wood_Data_Im <-
select(
WoodData,
Sample.tree.ID,
Species,
Year,
Latitude,
Elevation..m.,
Ring.width..mm.,
Stem.diameter..cm.
) %>%
arrange(Sample.tree.ID, Year)
# Check for duplicated rows, if found delete duplicate rows
if (length(which(duplicated(Wood_Data_Im) == TRUE)) != 0) {
Wood_Data_Im <- Wood_Data_Im %>% distinct()
}
# Check if numbers of entries for each year match (i.e. for every year there should be the same number of rows)
# Preallocate a vector and count all the occurrences belonging to a year
years <- unique(Wood_Data_Im$Year) %>% sort(decreasing = FALSE)
occ_years <- rep(NA, length(years))
# Find all rows for year (i.e. year = 1992 find all rows with year 1992)
for (i in 1:length(years)) {
occ_years[i] <- length(which(Wood_Data_Im$Year == years[i]))
}
# If not all the years have the same number of rows
if (length(unique(occ_years)) != 1) {
# Select the year with the fewest rows
fewest_year <- years[which.min(occ_years)]
# Find the index of the individual tree that is missing a row
# Iterate over the whole dataset
# Check for every row after the row of the fewest year if the previous row is the fewest year
# i.e. if fewest year is 1992 check for every row of 1993 if the row before it is 1992
# If that is not the case save the index
for (i in 1:length(Wood_Data_Im$Year)) {
if (Wood_Data_Im$Year[i] == fewest_year + 1) {
if (Wood_Data_Im$Year[i - 1] != fewest_year) {
index <- i
}
}
}
}
# Delete the rows of the individual with the missing entry
# Calculate the number of rows that need to be deleted
del_rows <- max(years) - min(years) - 1
# Delete the rows from the found index up to the last entry of that individual tree
Wood_Data_Im <- Wood_Data_Im[-1 * seq(i, i + del_rows), ]
# There are many typos in the dataset. With the delete function the user can
# specify the minimum ring width (mm) and the minimum stem diameter (cm) that
# the individual trees are required to have
# i.e. some trees have one entry of 0.0006 mm as ring width which impossible
Wood_Data_Im <- Delete(Wood_Data_Im, 0.05, 0)
#################################################################
# Step 2: Reformatting the data and calculating the growth rate
#################################################################
# Store the names of the species
Species_names <- unique(Wood_Data_Im$Species)
# Create a new matrix and fill it with the data
# Add two columns to the matrix to store the growth rate for the ring width and the stem diameter
Wood_Data_Growth_Rate <-
cbind(Wood_Data_Im, matrix(nrow = dim(Wood_Data_Im)[1], ncol = 2))
colnames(Wood_Data_Growth_Rate) <-
c(colnames(Wood_Data_Im), "GrowthRateRing", "GrowthRateStem")
# Calculate the growth rate of the ring width and the stem diameter by looping over the whole data frame
for (i in 1:dim(Wood_Data_Growth_Rate)[1]) {
# The first year does not have a previous year for reference so set it to 0 (won't be plotted later)
if (Wood_Data_Growth_Rate$Year[i] == min(Wood_Data_Growth_Rate$Year)) {
Wood_Data_Growth_Rate$GrowthRateRing[i] = 0
Wood_Data_Growth_Rate$GrowthRateStem[i] = 0
} else{
# Calculate the growth rate by: (MeasurementCurrentYear - MeasurementYearBefore) / |MeasurementYearBefore|
# Store the result in the data frame in the two added columns
Wood_Data_Growth_Rate$GrowthRateRing[i] = (
Wood_Data_Growth_Rate$Ring.width..mm.[i] - Wood_Data_Growth_Rate$Ring.width..mm.[i - 1]
) / abs(Wood_Data_Growth_Rate$Ring.width..mm.[i - 1])
Wood_Data_Growth_Rate$GrowthRateStem[i] = (
Wood_Data_Growth_Rate$Stem.diameter..cm.[i] - Wood_Data_Growth_Rate$Stem.diameter..cm.[i - 1]
) / abs(Wood_Data_Growth_Rate$Stem.diameter..cm.[i - 1])
}
}
#################################################################
# Step 3: Analyzing the Ring Growth Rate by Latitude
#################################################################
# Select a threshold for the Latitude (I chose the median) that is used to divide the data in two categories (one below and one above the threshold)
latitude_treshold <- round(median(Wood_Data_Growth_Rate$Latitude))
# Condition for separating the data
con <- Wood_Data_Growth_Rate$Latitude >= latitude_treshold
# Calculates the average of all individuals per species per year that fulfill the condition (i.e. returns a matrix with rows = number of years and col = number of species
# and the rows contain the average growth rate in that year of individuals of the different species that fulfill the condition (here latitude >= 40))
Ring_Growth_Lat_over_40 <-
Calculate_Growth_Ring(Wood_Data_Growth_Rate, con, Species_names)
# Condition for separating the data
con <- Wood_Data_Growth_Rate$Latitude < latitude_treshold
# Calculates the average of all individuals per species per year that fulfill the condition
Ring_Growth_Lat_under_40 <-
Calculate_Growth_Ring(Wood_Data_Growth_Rate, con, Species_names)
# First plot for fulfilling the first condition (growth rate of trees below the threshold)
p_ring_under_40 <-
ggplot(data.frame(Ring_Growth_Lat_under_40), aes(x = Year)) +
geom_line(aes(y = Ring_Growth_Lat_under_40[, 2], color = colnames(Ring_Growth_Lat_under_40)[2])) +
geom_line(aes(y = Ring_Growth_Lat_under_40[, 3], color = colnames(Ring_Growth_Lat_under_40)[3])) +
geom_line(aes(y = Ring_Growth_Lat_under_40[, 4], color = colnames(Ring_Growth_Lat_under_40)[4])) +
geom_line(aes(y = Ring_Growth_Lat_under_40[, 5], color = colnames(Ring_Growth_Lat_under_40)[5])) +
geom_line(aes(y = Ring_Growth_Lat_under_40[, 6], color = colnames(Ring_Growth_Lat_under_40)[6])) +
scale_color_discrete(name = "Species") +
ggtitle("Ring Growth Rate compared by year \n in the South") +
xlab("Year") + ylab("Ring Width Increase") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
# Second plot for fulfilling the second condition (growth rate of trees above the threshold)
p_ring_over_40 <-
ggplot(data.frame(Ring_Growth_Lat_over_40), aes(x = Year)) +
geom_line(aes(y = Ring_Growth_Lat_over_40[, 2], color = colnames(Ring_Growth_Lat_over_40)[2])) +
geom_line(aes(y = Ring_Growth_Lat_over_40[, 3], color = colnames(Ring_Growth_Lat_over_40)[3])) +
geom_line(aes(y = Ring_Growth_Lat_over_40[, 4], color = colnames(Ring_Growth_Lat_over_40)[4])) +
geom_line(aes(y = Ring_Growth_Lat_over_40[, 5], color = colnames(Ring_Growth_Lat_over_40)[5])) +
scale_color_discrete(name = "Species") +
ggtitle("Ring Growth Rate compared by year \n in the North") +
xlab("Year") + ylab("Ring Width Increase") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
# Arrange the two plots in one figure
figure_ring_latitude <- ggarrange(
p_ring_under_40,
p_ring_over_40,
labels = c("A", "B"),
ncol = 2,
nrow = 1
)
# Show the figure
#figure_ring_latitude
#################################################################
# Step 4: Analyzing the Ring Growth Rate by Elevation
#################################################################
# Selecting two thresholds to separate the data in three parts according to their elevation
# I divided the data in three equal parts
min_treshold_elevation <- 2000
max_treshold_elevation <- 3000
# Condition for separating the data
con <- Wood_Data_Growth_Rate$Elevation..m. < min_treshold_elevation
# Calculates the average of all individuals per species per year that fulfill the condition (i.e. returns a matrix with rows = number of years and col = number of species
# and the rows contain the average growth rate in that year of individuals of the different species that fulfill the condition (i.e. elevation < 2000))
Ring_Growth_Ele_under_2000 <-
Calculate_Growth_Ring(Wood_Data_Growth_Rate, con, Species_names)
# Condition for separating the data
con <-
(
Wood_Data_Growth_Rate$Elevation..m. > min_treshold_elevation &
Wood_Data_Growth_Rate$Elevation..m. < max_treshold_elevation
)
# Calculates the average of all individuals per species per year that fulfill the condition
Ring_Growth_Ele_over_2000 <-
Calculate_Growth_Ring(Wood_Data_Growth_Rate, con, Species_names)
# Condition for separating the data
con <- Wood_Data_Growth_Rate$Elevation..m. > max_treshold_elevation
# Calculates the average of all individuals per species per year that fulfill the condition
Ring_Growth_Ele_over_3000 <-
Calculate_Growth_Ring(Wood_Data_Growth_Rate, con, Species_names)
# First plot for fulfilling the first condition (growth rate of trees below the min threshold)
p_ring_under_2000 <-
ggplot(data.frame(Ring_Growth_Ele_under_2000), aes(x = Year)) +
geom_line(aes(y = Ring_Growth_Ele_under_2000[, 2],color = colnames(Ring_Growth_Ele_under_2000)[2])) +
geom_line(aes(y = Ring_Growth_Ele_under_2000[, 3],color = colnames(Ring_Growth_Ele_under_2000)[3])) +
geom_line(aes(y = Ring_Growth_Ele_under_2000[, 4],color = colnames(Ring_Growth_Ele_under_2000)[4])) +
geom_line(aes(y = Ring_Growth_Ele_under_2000[, 5],color = colnames(Ring_Growth_Ele_under_2000)[5])) +
geom_line(aes(y = Ring_Growth_Ele_under_2000[, 6],color = colnames(Ring_Growth_Ele_under_2000)[6])) +
scale_color_discrete(name = "Species") +
ggtitle(sprintf("Ring Growth Rate compared by year \n under %s m",min_treshold_elevation)) +
xlab("Year") + ylab("Ring Width Increase") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
# Second plot for fulfilling the second condition (growth rate of trees between the thresholds)
p_ring_over_2000 <-
ggplot(data.frame(Ring_Growth_Ele_over_2000), aes(x = Year)) +
geom_line(aes(y = Ring_Growth_Ele_over_2000[, 2], color = colnames(Ring_Growth_Ele_over_2000)[2])) +
geom_line(aes(y = Ring_Growth_Ele_over_2000[, 3], color = colnames(Ring_Growth_Ele_over_2000)[3])) +
geom_line(aes(y = Ring_Growth_Ele_over_2000[, 4], color = colnames(Ring_Growth_Ele_over_2000)[4])) +
geom_line(aes(y = Ring_Growth_Ele_over_2000[, 5], color = colnames(Ring_Growth_Ele_over_2000)[5])) +
geom_line(aes(y = Ring_Growth_Ele_over_2000[, 6], color = colnames(Ring_Growth_Ele_over_2000)[6])) +
scale_color_discrete(name = "Species") +
ggtitle(sprintf("Ring Growth Rate of compared by year \n between %s m and %s m",min_treshold_elevation,max_treshold_elevation)) +
xlab("Year") + ylab("Ring Width Increase") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
# Third plot for fulfilling the second condition (growth rate of trees above the max threshold)
p_ring_over_3000 <-
ggplot(data.frame(Ring_Growth_Ele_over_3000), aes(x = Year)) +
geom_line(aes(y = Ring_Growth_Ele_over_3000[, 2], color = colnames(Ring_Growth_Ele_over_3000)[2])) +
geom_line(aes(y = Ring_Growth_Ele_over_3000[, 3], color = colnames(Ring_Growth_Ele_over_3000)[3])) +
geom_line(aes(y = Ring_Growth_Ele_over_3000[, 4], color = colnames(Ring_Growth_Ele_over_3000)[4])) +
geom_line(aes(y = Ring_Growth_Ele_over_3000[, 5], color = colnames(Ring_Growth_Ele_over_3000)[5])) +
geom_line(aes(y = Ring_Growth_Ele_over_3000[, 6], color = colnames(Ring_Growth_Ele_over_3000)[6])) +
scale_color_discrete(name = "Species") +
ggtitle(sprintf("Ring Growth Rate compared by year \n over %s m",max_treshold_elevation)) +
xlab("Year") + ylab("Ring Width Increase") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
# Arrange the three plots in one figure
figure_ring_elevation <- ggarrange(
p_ring_under_2000,
p_ring_over_2000,
p_ring_over_3000,
labels = c("A", "B", "C"),
ncol = 1,
nrow = 3
)
# Show the figure
# figure_ring_elevation
#################################################################
# Step 5: Analyzing the Stem Growth Rate by Latitude
#################################################################
# Select a threshold for the Latitude (I chose the median) that is used to divide the data in two categroies (one below and one above the threshold)
latitude_treshold <- round(median(Wood_Data_Growth_Rate$Latitude))
# Condition for separating the data
con <- Wood_Data_Growth_Rate$Latitude >= latitude_treshold
# Calculates the average of all individuals per species per year that fulfill the condition
Stem_Growth_Lat_over_40 <-
Calculate_Growth_Stem(Wood_Data_Growth_Rate, con, Species_names)
# Condition for separating the data
con <- Wood_Data_Growth_Rate$Latitude < latitude_treshold
# Calculates the average of all individuals per species per year that fulfill the condition
Stem_Growth_Lat_under_40 <-
Calculate_Growth_Stem(Wood_Data_Growth_Rate, con, Species_names)
# First plot for fulfilling the first condition (growth rate of trees below the threshold)
p_stem_under_40 <-
ggplot(data.frame(Stem_Growth_Lat_under_40), aes(x = Year)) +
geom_line(aes(y = Stem_Growth_Lat_under_40[, 2], color = colnames(Stem_Growth_Lat_under_40)[2])) +
geom_line(aes(y = Stem_Growth_Lat_under_40[, 3], color = colnames(Stem_Growth_Lat_under_40)[3])) +
geom_line(aes(y = Stem_Growth_Lat_under_40[, 4], color = colnames(Stem_Growth_Lat_under_40)[4])) +
geom_line(aes(y = Stem_Growth_Lat_under_40[, 5], color = colnames(Stem_Growth_Lat_under_40)[5])) +
geom_line(aes(y = Stem_Growth_Lat_under_40[, 6], color = colnames(Stem_Growth_Lat_under_40)[6])) +
scale_color_discrete(name = "Species") +
ggtitle("Stem Growth Rate compared by year \n in the South") +
xlab("Year") + ylab("Stem Diameter Increase") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
# Second plot for fulfilling the second condition (growth rate of trees above the threshold)
p_stem_over_40 <-
ggplot(data.frame(Stem_Growth_Lat_over_40), aes(x = Year)) +
geom_line(aes(y = Stem_Growth_Lat_over_40[, 2], color = colnames(Stem_Growth_Lat_over_40)[2])) +
geom_line(aes(y = Stem_Growth_Lat_over_40[, 3], color = colnames(Stem_Growth_Lat_over_40)[3])) +
geom_line(aes(y = Stem_Growth_Lat_over_40[, 4], color = colnames(Stem_Growth_Lat_over_40)[4])) +
geom_line(aes(y = Stem_Growth_Lat_over_40[, 5], color = colnames(Stem_Growth_Lat_over_40)[5])) +
scale_color_discrete(name = "Species") +
ggtitle("Stem Growth Rate compared by year \n in the North") +
xlab("Year") + ylab("Stem Diameter Increase") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
# Arrange the two plots in one figure
figure_stem_latitude <- ggarrange(
p_stem_under_40,
p_stem_over_40,
labels = c("A", "B"),
ncol = 2,
nrow = 1
)
# Show the figure
#figure_stem_latitude
#################################################################
# Step 6: Analyzing the Stem Growth Rate by Elevation
#################################################################
# Selecting two thresholds to separate the data in three parts according to their elevation
min_treshold_elevation <- 2000
max_treshold_elevation <- 3000
# Condition for separating the data
con <- Wood_Data_Growth_Rate$Elevation..m. < min_treshold_elevation
# Calculates the average of all individuals per species per year that fulfill the condition
Stem_Growth_Ele_under_2000 <-
Calculate_Growth_Stem(Wood_Data_Growth_Rate, con, Species_names)
# Condition for separating the data
con <-
(
Wood_Data_Growth_Rate$Elevation..m. > min_treshold_elevation &
Wood_Data_Growth_Rate$Elevation..m. < max_treshold_elevation
)
# Calculates the average of all individuals per species per year that fulfill the condition
Stem_Growth_Ele_over_2000 <-
Calculate_Growth_Stem(Wood_Data_Growth_Rate, con, Species_names)
# Condition for separating the data
con <- Wood_Data_Growth_Rate$Elevation..m. > max_treshold_elevation
# Calculates the average of all individuals per species per year that fulfill the condition
Stem_Growth_Ele_over_3000 <-
Calculate_Growth_Stem(Wood_Data_Growth_Rate, con, Species_names)
# First plot for fulfilling the first condition (growth rate of trees below the min threshold)
p_stem_under_2000 <-
ggplot(data.frame(Stem_Growth_Ele_under_2000), aes(x = Year)) +
geom_line(aes(y = Stem_Growth_Ele_under_2000[, 2],color = colnames(Stem_Growth_Ele_under_2000)[2])) +
geom_line(aes(y = Stem_Growth_Ele_under_2000[, 3],color = colnames(Stem_Growth_Ele_under_2000)[3])) +
geom_line(aes(y = Stem_Growth_Ele_under_2000[, 4],color = colnames(Stem_Growth_Ele_under_2000)[4])) +
geom_line(aes(y = Stem_Growth_Ele_under_2000[, 5],color = colnames(Stem_Growth_Ele_under_2000)[5])) +
geom_line(aes(y = Stem_Growth_Ele_under_2000[, 6],color = colnames(Stem_Growth_Ele_under_2000)[6])) +
scale_color_discrete(name = "Species") +
ggtitle(sprintf("Stem Growth Rate compared by year \n under %s m ",min_treshold_elevation)) +
xlab("Year") + ylab("Stem Diameter Increase") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
# Second plot for fulfilling the second condition (growth rate of trees between the thresholds)
p_stem_over_2000 <-
ggplot(data.frame(Stem_Growth_Ele_over_2000), aes(x = Year)) +
geom_line(aes(y = Stem_Growth_Ele_over_2000[, 2], color = colnames(Stem_Growth_Ele_over_2000)[2])) +
geom_line(aes(y = Stem_Growth_Ele_over_2000[, 3], color = colnames(Stem_Growth_Ele_over_2000)[3])) +
geom_line(aes(y = Stem_Growth_Ele_over_2000[, 4], color = colnames(Stem_Growth_Ele_over_2000)[4])) +
geom_line(aes(y = Stem_Growth_Ele_over_2000[, 5], color = colnames(Stem_Growth_Ele_over_2000)[5])) +
geom_line(aes(y = Stem_Growth_Ele_over_2000[, 6], color = colnames(Stem_Growth_Ele_over_2000)[6])) +
scale_color_discrete(name = "Species") +
ggtitle(sprintf("Stem Growth Rate compared by year \n between %s m and %s m",min_treshold_elevation,max_treshold_elevation)) +
xlab("Year") + ylab("Stem Diameter Increase") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
# Third plot for fulfilling the second condition (growth rate of trees above the max threshold)
p_stem_over_3000 <-
ggplot(data.frame(Stem_Growth_Ele_over_3000), aes(x = Year)) +
geom_line(aes(y = Stem_Growth_Ele_over_3000[, 2], color = colnames(Stem_Growth_Ele_over_3000)[2])) +
geom_line(aes(y = Stem_Growth_Ele_over_3000[, 3], color = colnames(Stem_Growth_Ele_over_3000)[3])) +
geom_line(aes(y = Stem_Growth_Ele_over_3000[, 4], color = colnames(Stem_Growth_Ele_over_3000)[4])) +
geom_line(aes(y = Stem_Growth_Ele_over_3000[, 5], color = colnames(Stem_Growth_Ele_over_3000)[5])) +
geom_line(aes(y = Stem_Growth_Ele_over_3000[, 6], color = colnames(Stem_Growth_Ele_over_3000)[6])) +
scale_color_discrete(name = "Species") +
ggtitle(sprintf("Stem Growth Rate compared by year \n over %s m",max_treshold_elevation)) +
xlab("Year") + ylab("Stem Diameter Increase") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
# Arrange the three plots in one figure
figure_stem_elevation <- ggarrange(
p_stem_under_2000,
p_stem_over_2000,
p_stem_over_3000,
labels = c("A", "B", "C"),
ncol = 1,
nrow = 3
)
# Show the figure
#figure_stem_elevation
#################################################################
# Step 7: Showing the figures
#################################################################
# Run commands to show the figures
print(figure_ring_latitude)
print(figure_ring_elevation)
print(figure_stem_latitude)
print(figure_stem_elevation)
|
973d6821151c9fb3c931ccef824147782f77c07d
|
b11a9a886f0809ab2e342134dc41da7b95e8b422
|
/R/font.R
|
d3188ff3346a1e8bab0fdf09ca933b8e9927b360
|
[] |
no_license
|
kassambara/ggpubr
|
dbf17d6a921efe5e39b87ab566f3c9fd4f4ef047
|
6aeb4f701399929b130917e797658819c71a2304
|
refs/heads/master
| 2023-09-01T19:43:28.585371
| 2023-02-13T18:28:59
| 2023-02-13T18:28:59
| 63,722,465
| 1,041
| 195
| null | 2023-08-06T16:55:18
| 2016-07-19T19:35:48
|
R
|
UTF-8
|
R
| false
| false
| 3,861
|
r
|
font.R
|
#'Change the Appearance of Titles and Axis Labels
#'
#'@description Change the appearance of the main title, subtitle, caption, axis
#' labels and text, as well as the legend title and texts. Wrapper around
#' \code{\link[ggplot2:element]{element_text}()}.
#'
#'@param object character string specifying the plot components. Allowed values
#' include: \itemize{ \item \code{"title"} for the main title \item
#' \code{"subtitle"} for the plot subtitle \item \code{"caption"} for the plot
#' caption \item \code{"legend.title"} for the legend title \item
#' \code{"legend.text"} for the legend text \item \code{"x", "xlab", or "x.title"}
#' for x axis label \item \code{"y", "ylab", or "y.title"} for y axis label \item
#' \code{"xy", "xylab", "xy.title" or "axis.title"} for both x and y axis
#' labels \item \code{"x.text"} for x axis texts (x axis tick labels) \item
#' \code{"y.text"} for y axis texts (y axis tick labels) \item \code{"xy.text"}
#' or \code{"axis.text"} for both x and y axis texts }
#'
#'@param size numeric value specifying the font size, (e.g.: \code{size = 12}).
#'@param color character string specifying the font color, (e.g.: \code{color =
#' "red"}).
#'@param face the font face or style. Allowed values include one of
#' \code{"plain", "bold", "italic", "bold.italic"}, (e.g.: \code{face =
#' "bold.italic"}).
#'@param family the font family.
#'@param ... other arguments to pass to the function
#' \code{\link[ggplot2:element]{element_text}()}.
#'
#'@examples
#'# Load data
#'data("ToothGrowth")
#'
#'# Basic plot
#'p <- ggboxplot(ToothGrowth, x = "dose", y = "len", color = "dose",
#' title = "Box Plot created with ggpubr",
#' subtitle = "Length by dose",
#' caption = "Source: ggpubr",
#' xlab ="Dose (mg)", ylab = "Teeth length")
#'p
#'
#'# Change the appearance of titles and labels
#'p +
#' font("title", size = 14, color = "red", face = "bold.italic")+
#' font("subtitle", size = 10, color = "orange")+
#' font("caption", size = 10, color = "orange")+
#' font("xlab", size = 12, color = "blue")+
#' font("ylab", size = 12, color = "#993333")+
#' font("xy.text", size = 12, color = "gray", face = "bold")
#'
#'# Change the appearance of legend title and texts
#'p +
#' font("legend.title", color = "blue", face = "bold")+
#' font("legend.text", color = "red")
#'
#'@export
font <- function(object, size = NULL, color = NULL, face = NULL, family = NULL, ...){
elmt <- element_text(size = size, color = color,
face = face, family = family, ...)
switch(object,
title = theme(plot.title = elmt),
subtitle = theme(plot.subtitle = elmt),
caption = theme(plot.caption = elmt),
x = theme(axis.title.x = elmt),
xlab = theme(axis.title.x = elmt),
x.title = theme(axis.title.x = elmt),
y = theme(axis.title.y = elmt),
ylab = theme(axis.title.y = elmt),
y.title = theme(axis.title.y = elmt),
xy = theme(axis.title.x = elmt, axis.title.y = elmt),
xylab = theme(axis.title.x = elmt, axis.title.y = elmt),
xy.title = theme(axis.title.x = elmt, axis.title.y = elmt),
axis.title = theme(axis.title.x = elmt, axis.title.y = elmt),
legendtitle = theme(legend.title = elmt),
legend.title = theme(legend.title = elmt),
legendtext = theme(legend.text = elmt),
legend.text = theme(legend.text = elmt),
# Axis tick labels
x.text = theme(axis.text.x = elmt),
y.text = theme(axis.text.y = elmt),
xy.text = theme(axis.text.x = elmt, axis.text.y = elmt),
yxtext = theme(axis.text.x = elmt, axis.text.y = elmt),
axis.text = theme(axis.text.x = elmt, axis.text.y = elmt),
stop("Don't support ", object)
)
}
|
18a311a3bba43462d20bb3f5c1915fb514489922
|
35385dd99e197efdb1c0b7ccc89dd44a03384af6
|
/NATreatment.r
|
8a58ebbee13f7c929382c709e52630c439c3a550
|
[] |
no_license
|
rphccf/Final-Project-Quantitative_Methods-Msc-Economics-Course-
|
872359240476c9ca185211eef35c63ba05f740bc
|
dae2883401cf536fffd2d0264ae1b906f4f84dce
|
refs/heads/master
| 2020-09-17T09:57:58.317476
| 2019-11-26T01:33:14
| 2019-11-26T01:33:14
| 224,070,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,569
|
r
|
NATreatment.r
|
require(xts)
## No Loop abaixo e feito o tratamento para a existencia de um NaN na primeira linha, o que prejudicaria o desenvolvimento
## do modelo. A melhor solucao encontrada, que nos fez perder o menor numero de informacoes possiveis, foi excluir a primeira
## linha da base de cotacoes recebidas do YAHOO ate que a primeira linha n?o contenha nenhum NaN. A partir dai, faremos um
## segundo tratamento para NaN em nosso modelo.
firstLineNA = function(d){
iNA =1;
while(anyNA(d[iNA,])== TRUE){
d = d[-1,];
}
return (d)
}
## A funcao bodyNA percorre os dados baixados do YAHOO ao longo do objeto a partir da segunda linha
## da base de dados, uma vez que a ocorr?ncia de NA na primeira linha foi tratada na function anterior
## e substitui o NaN de uma determinada cota?ao de bolsa pela sua ultima cotacao valida.
## Este tratamento melhorou razoavelmente o fit do modelo, pois caso optassemos por excluir todas as linhas
## que possuem NaN em alguma das bolsas, perderiamos de 15 a 20% dos dados em uma observacao.
bodyNA = function(d){
## percorrendo as linhas ate a data atual -1
for(i in 2:(nrow(d)-1)){
## percorrendo as colunas do data frame tal qual uma matriz e substituindo cada ocorrencia de NaN
for(j in 1:ncol(d)){
if(is.na(d[i,j])!=FALSE)d[i,j]=d[i-1,j];
}
}
if(is.na(d[nrow(d),3])!=FALSE) d[nrow(d),3] = d[nrow(d)-1,3];
if(is.na(d[nrow(d),4])!=FALSE) d[nrow(d),4] = d[nrow(d)-1,4];
if(is.na(d[nrow(d),5])!=FALSE) d[nrow(d),5] = d[nrow(d)-1,5];
return(d);
}
|
bf5a67d88105b555da8ee68e163328d7e7061459
|
2a675299288d5bf42795f125c76d90b5ec149158
|
/man/get_reported_financials.Rd
|
080c79c7f62b58c2b29eb98896ac069102d229d4
|
[] |
no_license
|
atamalu/finntools
|
6cd25a84eb696b09724f16942e5589b1b1d7fda4
|
6b8f911db2f34fd57506be6c6c43dbef9c591ffc
|
refs/heads/master
| 2022-11-16T14:29:34.381913
| 2020-07-15T21:59:42
| 2020-07-15T21:59:42
| 277,924,208
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 917
|
rd
|
get_reported_financials.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_reported_financials.R
\name{get_reported_financials}
\alias{get_reported_financials}
\title{retrieve reported financial data}
\usage{
get_reported_financials(
symbol,
api.key,
frequency = "annual",
write.file = FALSE
)
}
\arguments{
\item{symbol}{the stock symbol to retrieve data for}
\item{api.key}{your API token from finnhub.io}
\item{frequency}{the time period encompassed by the report(s)}
\item{write.file}{should the table be written to the "reported_financials" folder?}
}
\value{
a data frame of reported financial information for the specified company
}
\description{
`get_reported_financials` retrieves financial data through a company's filings
}
\examples{
\donttest{
### Get financial report data for Apple
get_reported_financials(symbol = "AAPL", api.key = api.key, frequency = c("annual", "quarterly"))
}
}
|
25984537ee2a63cd83ef1a8d7bafb526565b0030
|
364ddfeadc15a6861f4372bdaf67c8b3df19ec81
|
/Graphing-Data-with-Quadrants.R
|
095132a7ea866b09a57b532bb8d7ef76172462ed
|
[] |
no_license
|
ttitamu/ctrma-txtag-dash
|
c0fdfcdd5f5b7489603c595f0348f9d1ef1e2cc8
|
50d5deddf23d90e34e09120fcec8dded48377182
|
refs/heads/master
| 2021-05-14T08:40:39.345146
| 2018-03-02T16:08:45
| 2018-03-02T16:08:45
| 116,305,751
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,463
|
r
|
Graphing-Data-with-Quadrants.R
|
# Graphing Data with Quadrants & Labels
# Michelle Plunkett
# January 4, 2018
# Load required packages
if (!require("pacman")) install.packages("pacman")
pacman::p_load(ggplot2, installr, data.table, DT, ggrepel)
# Get the user's home directory path (Defining this value allows other users to easily run this script)
home <- path.expand("~")
# Determine if computer running this script is on windows, if not, don't change path to backslashes
if (Sys.info()["sysname"]=="Windows") {
home <- as.character(gsub("/","\\\\",home))
} else {
home <- as.character(home)
}
# Set project directory
if (Sys.info()["sysname"]=="Windows") {
projectdir <- "\\Projects\\Graph Making\\CTRMA Data\\"
wd <- paste0(home,projectdir)
} else {
projectdir <- "/Projects/Graph Making/CTRMA Data/"
wd <- paste0(home,projectdir)
}
setwd(wd)
# Import the data file
datapts <- read.csv(paste0(wd,"ChrisNeedsHelp.csv"), stringsAsFactors = F)
# Rename columns
names(datapts) <- gsub("\\."," ",colnames(datapts))
# Get median values for x & y columns
xmed <- median(datapts$`Respondent Usability Score`)
ymed <- median(datapts$`Number of Respondents`)
# Graph the data with the "ggplot2" package & "ggrepel" for the labels
# Reference: https://stackoverflow.com/a/35510338
p <- ggplot(datapts, aes(label=`Function Code`, x=`Respondent Usability Score`, y=`Number of Respondents`)) +
geom_point() +
geom_text_repel() +
scale_x_continuous(limits=c(1,10), breaks=seq(1:10)) +
scale_y_sqrt(minor_breaks=seq(0,1200,100),breaks=seq(0,1200,200),limits=c(0,1200), expand=c(0,0)) +
theme_minimal() +
coord_cartesian() +
geom_vline(xintercept = xmed) +
geom_hline(yintercept = ymed)
# Add quadrant numbers to plot
qcolor <- "blue" # define color here to easily change label color
p <- p + annotate("text",x=((10-xmed)/2),y=((1200-ymed)/2),label="1", color=qcolor, size=9) +
annotate("text",x=8.25,y=((1200-ymed)/2),label="4", color=qcolor, size=9) +
annotate("text",x=((10-xmed)/2),y=35,label="2",color=qcolor,size=9)+
annotate("text",x=8.25,y=35,label="3",color=qcolor,size=9)
# View the plot
p
# Export the plot
fname <- paste0("Plot-v4-",qcolor) # define filename of exported plot
ftype <- ".png" # define desired file type
ggsave(paste0(fname,ftype), p, width=11, height=8.5, dpi=600)
embed_fonts(paste0(fname,ftype), outfile=paste0(fname,"-embed",ftype)) # Embeds font in PDF if font changed from default
|
64bdc37b1301e9ba3e62660fe7668971de2a7e04
|
167c33afe106c8e8e1c2e7c9b3859e096f95add1
|
/R/plot_metrics.R
|
6e6dd9c9365020bd575fd0adad1c30d7f8d15ca6
|
[] |
no_license
|
paulhendricks/Rperform
|
a705d91f05059494c8e92a1b89c6fd3b4f3000be
|
aaf97e7907c5c6fe5a8e072cb7f6b78d959a961e
|
refs/heads/master
| 2021-01-16T21:57:30.189292
| 2016-05-19T10:45:04
| 2016-05-19T12:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,436
|
r
|
plot_metrics.R
|
#' Plot test-file metrics across versions.
#'
#' Given a test-file path, plot the metrics of entire file and individual
#' testthat blocks against the commit message summaries of the specified number
#' of commits in the current git repository. If the parameter save_data is set
#' to true, it also stores the corresponding data-frames in an RData file in a
#' folder 'Rperform_Data' in the current directory.The metrics plotted are in
#' accordance with those specified using the parameter metric.
#'
#' @param test_path File-path of the test-file which is to be used for run-time
#' comparisons.
#' @param metric Type of plot(s) desired. This can be set to \code{time},
#' \code{memory}, \code{memtime} or \code{testMetrics}. (See examples below
#' for more details)
#' @param num_commits Number of commits (versions) against which the file is to
#' be tested, with default being 5.
#' @param save_data If set to TRUE, the data frame containing the metrics
#' information is stored in the 'Rperform_Data' directory in the root of the
#' repo. (default set to FALSE)
#' @param save_plots If set to TRUE, the plots generated are stored in the
#' 'Rperform_plots' directory in the root of the repo rather than being
#' printed. (default set to TRUE)
#'
#' @examples
#'
#' \dontrun{
#' # Set the current directory to the git repository concerned.
#' setwd("./Path/to/repository")
#'
#' # Specify the test-file path
#' t_path <- "Path/to/file"
#'
#' # Load the library
#' library(Rperform)
#'
#' ## Example-1
#'
#' # Pass the parameters and obtain the run-time followed by memory details against 10 commits
#' plot_metrics(test_path = t_path, metric = "time", n_commits = 10, save_data = F)
#' plot_metrics(test_path = t_path, metric = "memory", n_commits = 10, save_data = F)
#'
#' ## Example-2
#'
#' # Obtain both memory and time metrics for each individual testthat block
#' # inside a file and the file itself. The plots get stored in a directory
#' # 'Rperform_Graphs' in the repo's root directory.
#' plot_metrics(test_path = t_path, metric = "testMetrics", n_commits = 5, save_data = F)
#' }
#'
#' @section WARNING:
#' Function assumes the current directory to be the root directory of the
#' repository/package being tested.
#'
plot_metrics <- function(test_path, metric, num_commits = 5, save_data = FALSE, save_plots = TRUE) {
stopifnot(is.character(test_path))
stopifnot(length(test_path) == 1)
stopifnot(is.character(metric))
stopifnot(length(metric) == 1)
stopifnot(is.numeric(num_commits))
stopifnot(length(num_commits) == 1)
stopifnot(is.logical(save_data))
stopifnot(length(save_data) == 1)
if (metric == "time") {
temp_out <- capture.output(.plot_time(test_path, num_commits, save_data, save_plots))
}
else if (metric == "memory") {
temp_out <- capture.output(.plot_mem(test_path, num_commits, save_data, save_plots))
}
else if (metric == "memtime") {
temp_out <- capture.output(.plot_time(test_path, num_commits, save_data, save_plots))
temp_out <- capture.output(.plot_mem(test_path, num_commits, save_data, save_plots))
}
else if (metric == "testMetrics") {
temp_out <- capture.output(.plot_testMetrics(test_path, num_commits, save_data, save_plots))
}
remove(temp_out)
}
## -----------------------------------------------------------------------------------------
.plot_testMetrics <- function(test_path, num_commits = 5, save_data = FALSE, save_plots) {
suppressMessages(mem_data <- mem_compare(test_path, num_commits))
suppressMessages(time_data <- time_compare(test_path, num_commits))
# Store the metrics data if save_data is TRUE
if (save_data){
# Store the metric data
.save_data(time_data, pattern = "*.[rR]$", replacement = "_time.RData",
replace_string = basename(test_path))
.save_data(mem_data, pattern = "*.[rR]$", replacement = "_mem.RData",
replace_string = basename(test_path))
}
metric_data <- rbind(time_data, mem_data)
t_names <- levels(metric_data$test_name)
for (num in seq(t_names)) {
test_frame <- metric_data[metric_data$test_name == t_names[num],]
tryCatch(expr = {test_plot <- ggplot2::qplot(data = test_frame, x = message, y = metric_val) +
ggplot2::facet_grid(facets = metric_name ~ ., scales = "free") +
ggplot2::geom_point(color = "blue") +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = -90)) +
ggplot2::scale_x_discrete(limits = rev(levels(test_frame$message))) +
# In the above 4 lines of code, the first line creates the basic qplot. The
# third and fourth lines display the x-axis labels at 90 degrees to the
# horizontal and correct the order of message labels on the x -axis,
# respectively.
ggplot2::xlab("Commit message") +
ggplot2::ylab("Metric value") +
ggplot2::ggtitle(label = paste0("Variation in metrics for ", t_names[num]))
if (save_plots == TRUE) {
if (!dir.exists("./Rperform_testMetrics")){
dir.create(path = "./Rperform_testMetrics")
}
curr_name <- gsub(pattern = " ", replacement = "_", x = t_names[num])
curr_name <- gsub(pattern = ".[rR]$", replacement = "", x = curr_name)
png.file <- file.path("Rperform_testMetrics", paste0("Test_", curr_name, ".png"))
png(filename = png.file, width = 1024, height = 768, units = "px")
print(test_plot)
dev.off()
print(test_plot)
}
else {
print(test_plot)
}
},
error = function(e) {
print("Encountered an error!")
})
}
}
## -----------------------------------------------------------------------------------------
.plot_time <- function(test_path, num_commits = 5, save_data = FALSE, save_plots) {
stopifnot(is.character(test_path))
stopifnot(length(test_path) == 1)
stopifnot(is.numeric(num_commits))
num_commits <- floor(num_commits)
# Obtain the metrics data
suppressMessages(time_data <- time_compare(test_path, num_commits))
# Store the metrics data if save_data is TRUE
if (save_data){
# Store the metric data
.save_data(time_data, pattern = "*.[rR]$", replacement = "_time.RData",
replace_string = basename(test_path))
}
curr_name <- gsub(pattern = " ", replacement = "_", x = basename(test_path))
curr_name <- gsub(pattern = ".[rR]$", replacement = "", x = curr_name)
# Plot the metric data
tryCatch(expr =
{test_plot <- ggplot2::qplot(message, metric_val, data = time_data) +
ggplot2::facet_grid(facets = test_name ~ ., scales = "free") +
ggplot2::geom_point(color = "blue") +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = -90)) +
ggplot2::scale_x_discrete(limits = rev(levels(time_data$message))) +
# In the above 4 lines of code, the first line creates the basic qplot. The
# third and fourth lines display the x-axis labels at 90 degrees to the
# horizontal and correct the order of message labels on the x -axis,
# respectively.
ggplot2::xlab("Commit message") +
ggplot2::ylab("Time (in seconds)") +
ggplot2::ggtitle(label = paste0("Variation in time metrics for ", curr_name))
if (save_plots == TRUE) {
if (!dir.exists("./Rperform_timeMetrics")){
dir.create(path = "./Rperform_timeMetrics")
}
png.file <- file.path("Rperform_timeMetrics", paste0("Test_", curr_name, ".png"))
png(filename = png.file, width = 1600, height = 1200, units = "px")
print(test_plot)
dev.off()
print(test_plot)
}
else {
print(test_plot)
}},
error = function(e){
print("Encountered an error!")
})
}
## -----------------------------------------------------------------------------------------
.plot_mem <- function(test_path, num_commits = 5, save_data = FALSE, save_plots) {
stopifnot(is.character(test_path))
stopifnot(length(test_path) == 1)
stopifnot(is.numeric(num_commits))
num_commits <- floor(num_commits)
# Obtain the metrics data
suppressMessages(mem_data <- mem_compare(test_path, num_commits))
# Store the metrics data if save_data is TRUE
if (save_data){
# Store the metric data
.save_data(mem_data, pattern = "*.[rR]$", replacement = "_mem.RData",
replace_string = basename(test_path))
}
curr_name <- gsub(pattern = " ", replacement = "_", x = basename(test_path))
curr_name <- gsub(pattern = ".[rR]$", replacement = "", x = curr_name)
tryCatch(expr = {test_plot <- ggplot2::qplot(message, metric_val, data = mem_data) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = -90),
strip.text.x = ggplot2::element_text(size = 10, face = "bold")) +
ggplot2::scale_x_discrete(limits = rev(levels(mem_data$message))) +
ggplot2::facet_grid(test_name ~ metric_name, scales = "free") +
# In the above 4 lines of code, the first line creates the basic qplot. The
# second and third lines display the x-axis labels at 90 degrees to the
# horizontal and correct the order of message labels on the x -axis,
# respectively. The fourth line creates a facet grid so as to seperate
# the plots for the swap and leak memory metrics.
ggplot2::geom_point(color = "blue") +
ggplot2::ylab(label = "Memory (in Mb)") +
ggplot2::xlab(label = "Commit messages") +
ggplot2::ggtitle(label = paste0("Variation in memory metrics for ", curr_name))
if (save_plots == TRUE) {
if (!dir.exists("./Rperform_memoryMetrics")){
dir.create(path = "./Rperform_memoryMetrics")
}
png.file <- file.path("Rperform_memoryMetrics", paste0("Test_", curr_name, ".png"))
png(filename = png.file, width = 1024, height = 768, units = "px")
print(test_plot)
dev.off()
print(test_plot)
}
else {
print(test_plot)
}
},
error = function(e) {
print("Encountered an error!")
})
}
## -----------------------------------------------------------------------------------------
## -----------------------------------------------------------------------------------------
#' Plot the specified metrics of all test files in a specified directory on a
#' webpage.
#'
#' It plots specified metrics for all the tests present in the specified
#' directory of the current git repository on a webpage.
#'
#' @param test_directory Path of the directory containing the test files.
#' @param metric Type of plot(s) desired. This can be set to \code{time},
#' \code{memory}, \code{memtime} or \code{testMetrics}. (See examples below
#' for more details)
#' @param output_name Name of the output .html file.
#'
#' @examples
#'
#' \dontrun{
#' # Set to the git repository in consideration.
#' setwd("path/to/repo")
#' d_path <- "path/to/tests"
#'
#' # Load the library
#' library(Rperform)
#'
#' ## Example-1
#'
#' # Pass the parameters and obtain the run-time followed by memory details against 10 commits
#' # on two seperate webpages (html files).
#' plot_webpage(test_directory = d_path, metric = "time", output_name = "timePage")
#' plot_metrics(test_directory = d_path, metric = "memory", output_name = "memPage")
#'
#' ## Example-2
#'
#' # Obtain both memory and time metrics for each individual testthat block
#' # inside a file and the file itself.
#' plot_webpage(d_path, metric = "testMetrics", output_name = "testMetricsPage")
#' }
#'
#' @section WARNING:
#' Function assumes the current directory to be the root directory of the
#' repository being tested.
#'
plot_webpage <- function(test_directory = "tests/testthat", metric = "testMetrics",
output_name = "index"){
stopifnot(is.character(test_directory))
stopifnot(is.character(output_name))
stopifnot(is.character(metric))
stopifnot(length(test_directory) == 1)
stopifnot(length(output_name) == 1)
stopifnot(length(metric) == 1)
out_file <- paste0(output_name, ".Rmd")
if(!file.exists(out_file)){
file.create(out_file)
}
line_p1 <- "---\ntitle: \"plot\"\noutput: html_document\n---\n\n```{r}\nRperform::plot_directory(\""
line_p3 <- "\", metric = \""
line_p5 <- "\", save_plots = FALSE)\n```"
file_lines <- paste0(line_p1, test_directory, line_p3, metric, line_p5)
writeLines(file_lines, con = out_file)
knitr::knit2html(input = out_file, output = paste0(output_name, ".html"))
}
## -----------------------------------------------------------------------------------------
## -----------------------------------------------------------------------------------------
#' Plot metrics across versions for all files in a given directory.
#'
#' Given a directory path, plot the memory and time usage statistics of all files
#' in the directory against the commit message summaries of the specified number
#' of commits in the current git repository.
#'
#'
#' @param test_directory Directory containing the test-files which are to be used.
#' @param metric Type of plot(s) desired. This can be set to \code{time},
#' \code{memory}, \code{memtime} or \code{testMetrics}. (See examples below
#' for more details)
#' @param num_commits Number of commits (versions) against which the files are to
#' be tested, with default being 5.
#' @param save_data If set to TRUE, the metrics data is saved in a folder 'Rperform_Data'
#' in the current directory.
#' @param save_plots If set to TRUE, the plots generated are stored in the
#' 'Rperform_plots' directory in the root of the repo rather than being
#' printed.
#'
#' @examples
#'
#' \dontrun{
#' # Set to the git repository in consideration.
#' setwd("path/to/repo")
#' d_path <- "path/to/tests"
#'
#' # Load the library
#' library(Rperform)
#'
#' ## Example-1
#'
#' # Pass the parameters and obtain the run-time followed by memory details against 10 commits.
#' plot_directory(test_directory = d_path, metric = "time", num_commits = 10,
#' save_data = F, save_plots = T)
#' plot_directory(test_directory = d_path, metric = "memory", num_commits = 10,
#' save_data = F, save_plots = T)
#'
#' ## Example-2
#'
#' # Obtain both memory and time metrics for each individual testthat block
#' # inside a file and the file itself ,and save the resulting plot as well as
#' # data.
#' plot_directory(d_path, metric = "testMetrics", num_commits = 5, save_data = F,
#' save_plots = T)
#' }
#'
#' @section WARNING:
#' Library assumes the current directory to be the root directory of the
#' package being tested.
#'
## The plot_directory function, given a test-directory path, plots the time
## taken by all the test files present inside the directory (including those of
## the individual testthat blocks) against the corresponding commit messages for
## the given number of commits.
plot_directory <- function(test_directory, metric = "testMetrics", num_commits = 5, save_data = FALSE,
save_plots = TRUE) {
stopifnot(is.character(test_directory))
stopifnot(is.character(metric))
stopifnot(is.numeric(num_commits))
stopifnot(is.logical(save_data))
stopifnot(is.logical(save_plots))
stopifnot(length(test_directory) == 1)
stopifnot(length(metric) == 1)
stopifnot(length(save_data) == 1)
stopifnot(length(save_plots) == 1)
file_names <- list.files(test_directory)
# For each file, plots for both time and space metrics are plotted and stored
# in the folder Rperform_Graphs in png format
for (file_i in seq_along(file_names)) {
# Print the plots as per the metric parameter.
plot_metrics(test_path = file.path(test_directory, file_names[file_i]),
metric = metric, num_commits = num_commits,
save_data = save_data, save_plots = save_plots)
}
}
## -----------------------------------------------------------------------------------------
## -----------------------------------------------------------------------------------------
#' Plot run-times across branches.
#'
#' Given a test-file and two branches, plots the run-times of the file against
#' the first commit till the latest common commit in branch1, and against the
#' latest commit in branch2. The vertical line divides the commits from the two
#' branches with the ones from branch1 on the left side.
#'
#' @param test_path File-path for the test file to be tested.
#' @param branch1 Branch against whose commits the test file is to be
#' tested.
#' @param branch2 Branch into which branch1 is supposedly to be merged.
#'
#' @examples
#'
#' \dontrun{
#' # Set the current directory to the git repository concerned.
#' setwd("./Path/to/repository")
#'
#' # Set the file-path
#' t_path <- "Path/to/file"
#'
#' # Load the library and pass the parameters to the function
#' library(Rperform)
#' plot_btimes(test_path = t_path, branch1 = "helper", branch2 = "master")
#' }
#'
#' @section Warning:
#' Library assumes the current directory to be the root directory of the
#' package being tested.
#'
plot_btimes <- function(test_path, branch1, branch2 = "master") {
stopifnot(is.character(test_path))
stopifnot(length(test_path) == 1)
stopifnot(is.character(branch1))
stopifnot(length(branch1) == 1)
stopifnot(is.character(branch2))
stopifnot(length(branch2) == 1)
suppressMessages(btimes_df <- compare_brancht(test_path = test_path, branch1 = branch1,
branch2 = branch2))
common_commitdf <- (.common_commit(branch1 = branch1, branch2 = branch2))
ggplot2::qplot(x = message, y = metric_val, data = btimes_df, color = test_name) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = -90))+
ggplot2::scale_x_discrete(limits = rev(levels(btimes_df$message))) +
ggplot2::geom_vline(ggplot2::aes(xintercept = common_commitdf$cnum_b1 + 0.5, size = 2)) +
# In the above 4 lines code, the first line creates the basic qplot. The
# second and third lines display the x-axis labels at 90 degrees to the
# horizontal and correct the order of message labels on the x -axis,
# respectively. The fourth line of code divides the commits from the two
# different branches by a vertical line.
ggplot2::ylab(label = "Time (in seconds)") +
ggplot2::xlab(label = "Commit messages") +
ggplot2::ggtitle(label = "Variation in time metrics acros Git branches")
}
## -----------------------------------------------------------------------------------------
## -----------------------------------------------------------------------------------------
#' Plot memory metrics across branches.
#'
#' Given a test-file and two branches, plots the memory metrics of the file
#' against the first commit till the latest common commit in branch1, and
#' against the latest commit in branch2. The memory metrics plotted are the
#' memory leaked and maximum swapped memory during execution. The vertical line
#' divides the commits from the two branches with the ones from branch1 on the
#' left side.
#'
#' @param test_path File-path for the test file to be tested.
#' @param branch1 Branch against whose commits the test file is to be
#' tested.
#' @param branch2 Branch into which branch1 is supposedly to be merged.
#'
#' @examples
#'
#' \dontrun{
#' # Set the current directory to the git repository concerned.
#' setwd("./Path/to/repository")
#'
#' # Set the file-path
#' t_path <- "Path/to/file"
#'
#' # Load the library and pass the parameters to the function
#' library(Rperform)
#' plot_bmemory(test_path = t_path, branch1 = "helper", branch2 = "master")
#' }
#'
#' @section Warning:
#' Library assumes the current directory to be the root directory of the
#' package being tested.
#'
plot_bmemory <- function(test_path, branch1, branch2 = "master") {
stopifnot(is.character(test_path))
stopifnot(length(test_path) == 1)
stopifnot(is.character(branch1))
stopifnot(length(branch1) == 1)
stopifnot(is.character(branch2))
stopifnot(length(branch2) == 1)
bmem_df <- compare_branchm(test_path = test_path, branch1 = branch1,
branch2 = branch2)
common_commitdf <- (.common_commit(branch1 = branch1, branch2 = branch2))
ggplot2::qplot(message, metric_val, data = bmem_df, color = test_name) +
ggplot2::facet_grid(. ~ metric_name) +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = -90)) +
ggplot2::scale_x_discrete(limits = rev(levels(bmem_df$message))) +
ggplot2::geom_vline(ggplot2::aes(xintercept = common_commitdf$cnum_b1 + 0.5, size = 2)) +
# In the above 4 lines code, the first line creates the basic qplot. The
# second and third lines display the x-axis labels at 90 degrees to the
# horizontal and correct the order of message labels on the x -axis,
# respectively. The fourth line of code divides the commits from the two
# different branches by a vertical line.
ggplot2::ylab(label = "Memory (in Mb)") +
ggplot2::xlab(label = "Commit messages") +
ggplot2::ggtitle(label = "Variation in memory metrics acros Git branches")
}
## -----------------------------------------------------------------------------------------
## -----------------------------------------------------------------------------------------
.save_data <- function(metric_frame, pattern = "*.[rR]$", replacement, replace_string) {
# Create a directory for storing the metric data
if (!dir.exists("./Rperform_Data")){
dir.create(path = "./Rperform_Data")
}
if(grepl(pattern = "time", x = replacement) > 0) {
time_frame <- metric_frame
save(time_frame, file = file.path("Rperform_Data", sub(pattern = pattern,
replacement = replacement,
x = basename(replace_string))))
}
else if(grepl(pattern = "mem", x = replacement) > 0){
mem_frame <- metric_frame
save(mem_frame, file = file.path("Rperform_Data", sub(pattern = pattern,
replacement = replacement,
x = basename(replace_string))))
}
}
|
edb4ecd11369c7acc3fae25c18bb7ab9f89ca247
|
50f60bc47e66819835a6d4f927074d7e144be5e5
|
/man/stf_mono_metadata.Rd
|
f5dd46989f5438456135d4384ed7eb91cdf1d1fa
|
[
"MIT"
] |
permissive
|
jjesusfilho/stfstj
|
b4df8d5ca7ae9d4fa95b39bab1921c8506f32995
|
441088fc9015cf5c7a2847d64ad9aa9744646bae
|
refs/heads/master
| 2018-10-30T10:45:16.494106
| 2018-08-23T18:33:19
| 2018-08-23T18:33:19
| 111,905,940
| 0
| 1
| null | 2017-12-08T21:32:22
| 2017-11-24T10:25:57
|
R
|
UTF-8
|
R
| false
| true
| 612
|
rd
|
stf_mono_metadata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stf_mono_metadata.R
\name{stf_mono_metadata}
\alias{stf_mono_metadata}
\title{Returns metadada from Brazilian Supreme Court monocratic decisions}
\usage{
stf_mono_metadata(open_search, parties_names = TRUE)
}
\arguments{
\item{open_search}{Words to be searched}
\item{parties_names}{Logical. If TRUE (default), it will attempt to fix
parties prefixes.}
}
\value{
Dataframe with the metadata
}
\description{
Returns metadada from Brazilian Supreme Court monocratic decisions
}
\keyword{metadata}
\keyword{precedents,}
\keyword{stf,}
|
b319e33a01bdc646e21811b68796b10413ff224e
|
7acfddc8f6e83c086aff89440719ec4f21ebdfa4
|
/R/acs_screening.R
|
68ed23567a7cc5de229f00f6a391f8a5dc50790a
|
[] |
no_license
|
crazybilly/fundRaising
|
f16cafc02d23fd1fd140ba586cb6940516d19292
|
42f4f4ae725f62e31a7e16c10f172fff801737ea
|
refs/heads/master
| 2021-07-29T20:03:49.287552
| 2021-07-26T14:08:32
| 2021-07-26T14:08:32
| 145,170,311
| 4
| 3
| null | 2019-07-11T13:57:23
| 2018-08-17T22:03:05
|
R
|
UTF-8
|
R
| false
| false
| 7,605
|
r
|
acs_screening.R
|
#' Create a full name column using the first and last names
#'
#' @description creates a full name column - a convenience helper function for acs screening
#'
#' @param x a data frame
#' @param first_name_col a column from the data frame containing the first name
#' @param last_name_col a column from the data frame containing the last name
#'
#' @return a given data frame with an appended column containthing the full name - in this case: first name, space, last name
#' @export
#'
make_full_name <- function(x, first_name_col, last_name_col){
fn <- rlang::enexpr(first_name_col)
ln <- rlang::enexpr(last_name_col)
x <- x %>% dplyr::mutate(full_name = paste(!!fn, !!ln))
return(x)
}
#' Create a concatenated address column to be used for gathering latitude and longitude values
#'
#' @description creates a concatenatd address column - a convenience helper function for acs screening
#'
#' @param x a data frame
#' @param addr_line_col a column from the data frame containing the address line
#' @param addr_city_col a column from the data frame containing the address city
#' @param addr_postal_col a column from the data frame containing the address postal code
#'
#' @return a given data frame with an appended column containthing a concatenated address - in this case: address line, comma, city, comma, postal code
#' @export
#'
make_concat_address <- function(x, addr_line_col, addr_city_col, addr_postal_col){
##load("data/zipcode.rda")
## think of a way to remove the zipcode object and not return to users
# data("zipcode") # should already be available since it's in the data/ dir (though I'm sure it's going stale)
adln <- rlang::enexpr(addr_line_col)
adct <- rlang::enexpr(addr_city_col)
adpc <- rlang::enexpr(addr_postal_col)
x <- x %>%
## remove columns that are made if they exist?
dplyr::mutate(short_zip = stringr::str_sub(!!adpc,1,5)) %>% ## convert any plus-4s to 5 digit ZIPs
dplyr::left_join(zipcode, by = c("short_zip" = "zip")) %>% # join zipcodes to get the state abbr
dplyr::mutate(concat_add = stringr::str_c(!!adln, !!adct, state, sep=",")) # create a concat addr, city, state
return(x)
}
#' Get the latitide and longitude values based on address data
#'
#' @description creates a data frame with latitude and longitude values based on address data
#'
#' @param address a vector containing the concatenated address as a string most likely in the form of data dollar sign column
#' @param name a vector containing the full name for the record as a string most likely in the form of data dollar sign column
#'
#' @return a data frame with latitide and longitude data for every address
#' @export
#'
get_lat_lon <- function(name,address) {
gc <- reticulate::import("geocoder") # this can't be where this goes but where ..... \_(~)_/
addrs <- vector()
lats <- vector()
lngs <- vector()
names <- name
for(i in 1:length(address)) {
res <- gc$arcgis(address[i])$json
addr <- res$address
addrs <- c(addrs,addr)
lat <- res$lat
lats <- c(lats,lat)
lng <- res$lng
lngs <- c(lngs,lng)
}
ll_tbl <- tibble(
ll_names = names,
ll_address = addrs,
ll_lat = lats,
ll_lng = lngs
)
return(ll_tbl)
}
#' Get a block id for every longitude and latitude value - a helper function that is iterated through to make a table
#'
#' @description pulls a block id values for a given set of coordinates
#'
#' @param lat a value for latitude
#' @param lon a value for longitude
#'
#' @return block id value or values
#' @export
#'
get_block_ids <- function(lat, lon) {
fcc <- "https://geo.fcc.gov/api/census/area?lat=%f&lon=%f&format=json"
fcc <- sprintf(fcc, lat, lon)
json <- xml2::read_html(fcc)
json <- jsonlite::fromJSON(fcc)
dplyr::tibble (
block_id = json$results$block_fips,
fcc_lat = json$input$lat,
fcc_lon = json$input$lon
)
}
#' Iterate through get_block_ids to create a table of block ids - a list for now but a table soon
#'
#' @description creates a data frame with block ids generated from longitude and latitude values
#'
#' @param result a data frane output as a result of the get_lat_lon function containing a value for longitude and latitude labelled lon and lat respectively
#'
#' @return a data frame with block ids
#' @export
#'
get_block_id_table <- function(result){
blocks <- purrr::map2(result$lat,result$lon,get_block_ids)
return(blocks)
}
#' Obtain 5-Year American Community Survey Estimates
#'
#' @description The U.S. Census Bureau has published 5-year esimates of
#' demographic data since 2009. The data is aggregated at the national, state,
#' county, census tract, and census block group levels.
#'
#' This function queries the
#' \href{https://www.census.gov/data/developers/data-sets/acs-5year.html}{Census
#' Bureau API} based on FIPS codes for various geographies. Substituting a
#' wildcard character \code{"*"} instead of a FIPS code returns all values
#' within the parent geography (i.e. \code{tract = "*"} will return data for
#' all tracts within a county).
#'
#'
#' The API limits the number of queries for users who lack an API key. A key
#' can be obtained \href{https://api.census.gov/data/key_signup.html}{here}.
#'
#'
#' @param var Variables to query from the ACS. For a list of the available
#' variables and codes (for the 2017 ACS), see the
#' \href{https://api.census.gov/data/2017/acs/acs5/variables.html}{Official
#' Documentation}. Defaults to Median Household Income (B19013_00E) and Median
#' Home Value (Owner-Occupied Units) (B25077_001E). Supports groups.
#' @param year Four-digit year. Defaults to the most recent data, for 2017.
#' @param state Two-digit state FIPS code. Alternatively, \code{"us"} for
#' national-level statistics. Supports wildcard string (\code{"*"}).
#' @param county Three-digit county FIPS code. Supports wildcard string
#' (\code{"*"}).
#' @param tract Five-digit census tract FIPS code. Supports wildcard string
#' (\code{"*"}).
#' @param blkgrp One-digit blog group FIPS code.
#' @param key (optional) Developer key.
#'
#' @return Tibble of data points and FIPS codes, one line per valid input geography.
#' @export
#'
query_acs <- function(var = c("B19013_001E", "B25077_001E"),
year = NULL,
state,
county = NULL,
tract = NULL,
blkgrp = NULL,
key = NULL) {
# Validate the Year, defaulting to 2017.
year <- match.arg(year, c(2017:2009))
# Format the Variables
names <- paste("NAME", paste(var, collapse = ","), sep = ",")
base_url <- glue::glue("https://api.census.gov/data/{year}/acs/acs5?get={names}&for=")
if (!is.null(blkgrp)) {
url <- glue::glue("{base_url}block%20group:{blkgrp}&in=state:{state}%20county:{county}%20tract:{tract}")
} else if (!is.null(tract)) {
url <- glue::glue("{base_url}tract:{tract}&in=state:{state}%20county:{county}")
} else if (!is.null(county)) {
url <- glue::glue("{base_url}county:{county}&in=state:{state}")
} else if (state == "us") {
url <- glue::glue("{base_url}us")
} else {
url <- glue::glue("{base_url}state:{state}")
}
# Include API key, if provided.
if (!is.null(key)) {
link <- paste0(url, "&key=", key)
} else {
link <- url
}
# Fetch Results
results <- jsonlite::fromJSON(link)
# The first row of results contains the headers. Set the names and remove this row.
colnames(results) <- results[1, ]
results <- dplyr::as_tibble(results[-1, ])
return(results)
}
|
36956a00980f2c785e4b84f4f2d1a0afbba3f125
|
649ae76f788227b6f1ce8e92b006fd09563c0b27
|
/tempDates.R
|
52231559f2bf0547232f6ee1421845a5a8b665bf
|
[
"MIT"
] |
permissive
|
andyblueyo/city-weather
|
92e3d92debdc15c9cfc3074cb43a70165c3a86a4
|
0fbf883ec82522a64875a6414d36698ed3f22e64
|
refs/heads/master
| 2021-03-30T17:33:04.597922
| 2017-11-21T08:53:37
| 2017-11-21T08:53:37
| 90,248,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 721
|
r
|
tempDates.R
|
library(dplyr)
tempDates <- function(cal.date){
# read in the file names, can pull this out
location <- read.csv("data/location.csv", stringsAsFactors = FALSE)
files <- location$file_name
# convert csv files with appropriate dates
# also pull this out
charDate <- function(csv){
csv <- read.csv(paste0("data/",csv,".csv"), stringsAsFactors = FALSE)
csv$date <- as.Date(csv$date, "%Y-%m-%d")
return(csv)
}
list.data <- lapply(files, charDate)
for (i in seq(list.data)) {
list.data[[i]] <- list.data[[i]] %>% filter(list.data[[i]]$date == cal.date)
location$actual_mean_temp[location$file_name == files[i]] <<- list.data[[i]]$actual_mean_temp
}
}
tempDates("2014-07-09")
|
8a74748e04ab22f2da0efea92c91d0b6ff498fa4
|
8fff7a6210471ab26465e10e647121335e1314d9
|
/Project_01_KNN/R/NN1toKmaxPredict.R
|
1e7954a057b2c5a9943035a01ad8613797ae8eed
|
[] |
no_license
|
ShoeRider/CS499_MachineLearning_TeamProjects
|
a981df2f8ebd8e162ace8b0e50b553bdc9e8247e
|
2fc32ff1533fc9b9146c09f2e87006c4a23891bb
|
refs/heads/master
| 2020-04-20T02:46:06.502369
| 2019-05-04T07:31:06
| 2019-05-04T07:31:06
| 168,580,135
| 0
| 2
| null | 2019-02-19T18:43:45
| 2019-01-31T19:05:07
|
R
|
UTF-8
|
R
| false
| false
| 2,004
|
r
|
NN1toKmaxPredict.R
|
print(getwd())
#source("R/General.R")
#' NN1toKmaxPredict
#'
#' Wraps around c++ knn_interface code to call it using r
#'
#'@param TrainingData numeric imput feature matrix [n x p]
#'@param TrainingLabels numberic input label vector [n],
#'either all 0/1 for binary classification or other real numbers for regression
#'@param TestData numberic test festure vector [p], that is used to test the different hyperparameters for k (1 to MAX.K) of KNN
#'@param max.neighbors scalar integer, max number of neighbors
#'
#'@return numeric vector of size [test.x.vec x max.neighbors] max.neighbors, predictions from 1 to max.neighbors
#'@export
#'
#'@examples
#'~~ Example 1 ~~
#'Spam<-Prep_Spam()
#'Fold.vec = Random_Folds(Spam$n_Elements,4)
#'Fold.n = 4
#'KNNLearnCV.List = NN1toKmaxPredict(TrainingData, TrainingLabels,TestData, 30)
#' #Where KNNLearnCV.List is a list containing the elements above, and you are free to use the returned values as you wish.
#'
#'barplot(KNNLearnCV.List$train.loss.vec,main = "Spam: KNNLearnCV.L2TestError.FoldMeans",xlab = "KNN Compared",ylab = "Error",beside = TRUE)
#'
#'~~ Example 2 ~~
#'
NN1toKmaxPredict <- function(TrainingData, TrainingLabels, TestData, max.neighbors)
{
# n_test_observations
#print("Here is the nrow of Test.x.vec")
#print(dim(test.x.vec))
result.list <- .C("NN1toKmaxPredict_interface",
as.integer(nrow(TrainingData)),
as.integer(nrow(TestData)),
as.integer(ncol(TrainingData)),
as.integer(max.neighbors),
as.double(TrainingData),
as.double(TrainingLabels),
as.double(TestData),
predictions=as.double(matrix(0, nrow = nrow(as.matrix(TestData)), ncol = as.integer(max.neighbors))),
PACKAGE="NearestNeighbors")
dim(result.list$predictions)<- c(max.neighbors,nrow(TestData))
return(result.list$predictions)
}
|
136e8f8a0087ddfb7d9276b4a7a69681be55d01e
|
ee0689132c92cf0ea3e82c65b20f85a2d6127bb8
|
/93-wksp3/environ.R
|
31efa841d1180706d481278782de1d2e0d565a74
|
[] |
no_license
|
DUanalytics/rAnalytics
|
f98d34d324e1611c8c0924fbd499a5fdac0e0911
|
07242250a702631c0d6a31d3ad8568daf9256099
|
refs/heads/master
| 2023-08-08T14:48:13.210501
| 2023-07-30T12:27:26
| 2023-07-30T12:27:26
| 201,704,509
| 203
| 29
| null | null | null | null |
UTF-8
|
R
| false
| false
| 204
|
r
|
environ.R
|
# Environment
#objects in memory
ls()
#create an object
x = 1:5
ls() #check
y=100:200
ls() #check
#remove one
rm(x) #remove x from environ
ls() #check
#remove all
rm(list = ls())
ls() #check
|
8092a109e9ab9d5a59eb45d46fbd45d482f0c65f
|
91e06a1f477bc52792c65d98d8155fa212072837
|
/man/tidy_bib_file.Rd
|
490ca70cfcddaaa639c1b361a788f4125bc5f6dd
|
[] |
no_license
|
mbojan/citr
|
905ff08458d02cd6760e43ebf0a6226e84cfaf54
|
543554bfc00cc0f853fd930ff953c9a09e78acce
|
refs/heads/master
| 2021-01-23T13:29:51.069381
| 2017-03-21T08:32:44
| 2017-03-21T08:41:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 801
|
rd
|
tidy_bib_file.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy_bib_file.R
\name{tidy_bib_file}
\alias{tidy_bib_file}
\title{Tidy bibliography file}
\usage{
tidy_bib_file(rmd_file, messy_bibliography, file = NULL,
encoding = getOption("encoding"))
}
\arguments{
\item{rmd_file}{Character. One or more paths to the R Markdown files that use the messy bibliography file.}
\item{messy_bibliography}{Character. Path to the messy bibliography file.}
\item{file}{Character. Path and name for the to-be-created tidy bibliography. If \code{NULL} the messy bibliography is replaced.}
\item{encoding}{Character. The name of the encoding to be assumed. See the \code{\link{connection}}.}
}
\description{
Removes duplicate and unneeded entries from a Bib(La)Tex-file.
}
\examples{
NULL
}
|
cc700db2c2984760187f915040f0dd256006b7d8
|
164e7663830d35fb46c824ce46ee4376370d1272
|
/R/convertFx.R
|
fc443788a0443a885ef121223c2fbac64c49fe40
|
[
"MIT"
] |
permissive
|
mpascariu/MortalityLaws
|
2e839f96aaaf559f68a7a9552339e47245957ae2
|
0ae2da33bcb3326be2733bd2c036ca9637a4f74a
|
refs/heads/master
| 2023-08-10T21:58:13.258992
| 2023-07-21T12:25:03
| 2023-07-21T12:25:03
| 74,010,070
| 24
| 8
|
NOASSERTION
| 2021-11-24T09:58:20
| 2016-11-17T09:12:51
|
R
|
UTF-8
|
R
| false
| false
| 2,950
|
r
|
convertFx.R
|
# -------------------------------------------------------------- #
# Author: Marius D. PASCARIU
# Last Update: Thu Jul 20 21:11:11 2023
# -------------------------------------------------------------- #
#' Convert Life Table Indicators
#'
#' Easy conversion between the life table indicators. This function is based
#' on the \code{\link{LifeTable}} function and methods behind it.
#'
#' @usage convertFx(x, data, from, to, ...)
#' @inheritParams LifeTable
#' @param data Vector or data.frame/matrix containing the mortality indicators.
#' @param from Specify the life table indicator in the input \code{data}.
#' Character. Options: \code{mx, qx, dx, lx}.
#' @param to What indicator would you like to obtain? Character.
#' Options: \code{mx, qx, dx, lx, Lx, Tx, ex}.
#' @param ... Further arguments to be passed to the \code{\link{LifeTable}}
#' function with impact on the results to be produced.
#' @seealso \code{\link{LifeTable}}
#' @return A matrix or array containing life table indicators.
#' @author Marius D. Pascariu
#' @examples
#' # Data ---
#' x <- 0:110
#' mx <- ahmd$mx
#'
#' # mx to qx
#' qx <- convertFx(x, data = mx, from = "mx", to = "qx")
#' # mx to dx
#' dx <- convertFx(x, data = mx, from = "mx", to = "dx")
#' # mx to lx
#' lx <- convertFx(x, data = mx, from = "mx", to = "lx")
#'
#'
#' # There are 28 possible combinations --------------------------------
#' # Let generate all of them.
#' from <- c("mx", "qx", "dx", "lx")
#' to <- c("mx", "qx", "dx", "lx", "Lx", "Tx", "ex")
#' K <- expand.grid(from = from, to = to) # all possible cases/combinations
#'
#' for (i in 1:nrow(K)) {
#' In <- as.character(K[i, "from"])
#' Out <- as.character(K[i, "to"])
#' N <- paste0(Out, "_from_", In)
#' cat(i, " Create", N, "\n")
#' # Create the 28 sets of results
#' assign(N, convertFx(x = x, data = get(In), from = In, to = Out))
#' }
#' @export
convertFx <- function(x,
data,
from = c("mx", "qx", "dx", "lx"),
to = c("mx", "qx", "dx", "lx", "Lx", "Tx", "ex"),
...) {
from <- match.arg(from)
to <- match.arg(to)
LifeTable_foo <- switch(
from,
mx = function(x, w, ...) LifeTable(x, mx = w, ...),
qx = function(x, w, ...) LifeTable(x, qx = w, ...),
dx = function(x, w, ...) LifeTable(x, dx = w, ...),
lx = function(x, w, ...) LifeTable(x, lx = w, ...)
)
if (is.vector(data)) {
if (length(x) != length(data))
stop("The 'x' and 'data' do not have the same length", call. = FALSE)
out <- LifeTable_foo(x = x, data, ...)$lt[, to]
names(out) <- names(data)
} else {
if (length(x) != nrow(data))
stop("The length of 'x' must be equal to the numebr of rows in 'data'",
call. = FALSE)
LT <- function(D) LifeTable_foo(x = x, as.numeric(D), ...)$lt[, to]
out <- apply(X = data, 2, FUN = LT)
dimnames(out) <- dimnames(data)
}
return(out)
}
|
17b1ea7a8f2af7262d4cc48b85cc485979804eb4
|
e99a7f80f244408f4532b2e5ae682fb7ab641fa4
|
/Massachusetts/MA_Moving_Averages.R
|
f47182ca7bcfa20fceae1bb299ff30801d8f6c25
|
[] |
no_license
|
AndrewDisher/New-England-Weather
|
98a75bd171dd49a8c35c6200f00d0f2fdf0c897f
|
882031c40d7578b37217399890abb2c915c23650
|
refs/heads/master
| 2022-11-17T08:39:52.000970
| 2020-07-08T16:17:40
| 2020-07-08T16:17:40
| 275,846,324
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,162
|
r
|
MA_Moving_Averages.R
|
#
# Andrew Disher
# 3/27/2020
# Climate Change in Massachusetts
# Purpose: To Analyze Weather Data
#
library(zoo) # Includes the rollmean() function for finding the simple moving average of a time series.
library(ggplot2)
library(dplyr) # Includes cummean() function for finding the cumulative moving average of a time series.
# Moving Averages ---------------------------------------------------------
# Obtain simple moving averages for various weather metrics and plot them against their respective time series.
(ggplot()
+ geom_line(data = MA_Weather_Monthly, aes(x = NewDate, y = PRCP, group = 1, color = "Precipitation"), size = 1)
+ geom_line(data = MA_Weather_Monthly[5:240, ], aes(x = NewDate, y = rollmean(MA_Weather_Monthly$PRCP, k = 5), group = 1, color = "Moving Average"), size = 1)
+ xlab("Date") + ylab("Precipitation (inches)") + ggtitle("Average Daily Precipitation in Massachusetts by Month")
+ theme_bw() + guides(color = guide_legend(title = "", title.position = "left"))
+ theme(legend.position = "bottom"))
(ggplot()
+ geom_line(data = MA_SNWD_SNOW_Monthly, aes(x = NewDate, y = SNOW, group = 1, color = "Snowfall"), size = 1)
+ geom_line(data = MA_Weather_Monthly[5:240, ], aes(x = NewDate, y = rollmean(MA_Weather_Monthly$SNOW, k = 5), group = 1, color = "Moving Average"), size = 1)
+ xlab("Date") + ylab("Snowfall") + ggtitle("Average Daily Snowfall in Massachusetts by Month")
+ theme_bw() + guides(color = guide_legend(title = "", title.position = "left"))
+ theme(legend.position = "bottom"))
(ggplot()
+ geom_line(data = MA_SNWD_SNOW_Monthly, aes(x = NewDate, y = SNWD, group = 1, color = "Snow Depth"), size = 1)
+ geom_line(data = MA_Weather_Monthly[3:240, ], aes(x = NewDate, y = rollmean(MA_Weather_Monthly$SNWD, k = 3), group = 1, color = "Moving Average"), size = 1)
+ xlab("Date") + ylab("Snow Depth") + ggtitle("Average Daily Snow Depth in Massachusetts by Month")
+ theme_bw() + guides(color = guide_legend(title = "", title.position = "left"))
+ theme(legend.position = "bottom"))
(ggplot()
+ geom_line(data = MA_Temp_Monthly, aes(x = NewDate, y = TMAX, group = 1, color = "Max Temperature"), size = 1)
+ geom_line(data = MA_Weather_Monthly[5:240, ], aes(x = NewDate, y = rollmean(MA_Weather_Monthly$TMAX, k = 5), group = 1, color = "Moving Average"), size = 1)
+ xlab("Date") + ylab("Temperature") + ggtitle("Average Daily Maximum Temperature in Massachusetts by Month")
+ theme_bw() + guides(color = guide_legend(title = "", title.position = "left"))
+ theme(legend.position = "bottom"))
(ggplot()
+ geom_line(data = MA_Temp_Monthly , aes(x = NewDate, y = TMIN, group = 1, color = "Min Temperature"), size = 1)
+ geom_line(data = MA_Weather_Monthly[5:240, ], aes(x = NewDate, y = rollmean(MA_Weather_Monthly$TMIN, k = 5), group = 1, color = "Moving Average"), size = 1)
+ xlab("Date") + ylab("Temperature") + ggtitle("Average Daily Minimum Temperature in Massachusetts by Month")
+ theme_bw() + guides(color = guide_legend(title = "", title.position = "left"))
+ theme(legend.position = "bottom"))
|
cb1319ab249172f3552b1d036738d5e0bb9e1c64
|
06f1ce91c6141d4b41081bea4284403a30c76c69
|
/R/hit_map.R
|
6a361ffa31e5e2f4eada0b93f46712fa92939843
|
[] |
no_license
|
VizWizard/phenoScreen
|
5dea9caff26542a9fa67ef21c287f408685c1d0a
|
583ff96bf35b92d73f9d745b9539d22a08ea07fd
|
refs/heads/master
| 2021-01-20T02:54:17.701837
| 2016-06-14T17:41:58
| 2016-06-14T17:41:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,678
|
r
|
hit_map.R
|
#' Platemap to identify 'hits' in a screen
#'
#' Produces a plot in the form of a micro-titre layout,
#' with colours indicating wells above or below a nominated threshold.
#'
#' @param data Vector of numerical values to score
#' @param well Vector of well identifiers e.g "A01"
#' @param plate Number of wells in complete plate (96, 384 or 1536)
#' @param threshold Numerical value of standard deviations from the mean
#' for a well to be classified as a 'hit'. Default it +/- 2 SD
#' @param palette RColorBrewer palette
#'
#' @import ggplot2
#' @import dplyr
#' @import RColorBrewer
#'
#' @return ggplot plot
#'
#' @export
#'
#' @examples
#' df <- data.frame(vals = rnorm(1:384),
#' well = num_to_well(1:384, plate = 384))
#'
#' hit_map(data = df$vals,
#' well = df$well,
#' plate = 384,
#' threshold = 3)
hit_map <- function(data, well,
plate = 96,
threshold = 2,
palette = "Spectral"){
stopifnot(is.vector(data))
if (length(well) > plate) {
stop("Invalid plate selection. The data given has more rows than the number of wells. \nAre you sure argument 'plate' is correct for the number of wells in your data? \nnote: Default is set to a 96-well plate.")
}
# transform well labels into row-column values for a 96-well plate
platemap <- plate_map_scale(data, well)
platemap$hit <- NA
# calculate whether values are beyond the threshold; defined as hit or null
for (row in 1:nrow(platemap)){
if (platemap[row, 'values'] > threshold) {platemap$hit[row] <- "hit"
} else if (platemap[row, 'values'] < (-1 * threshold)){platemap$hit[row] <- "neg_hit"
} else {platemap$hit[row] <- "null"}
}
# RColorBrewerPalette
my_cols <- brewer.pal(3, palette)
my_colours <- c(hit = my_cols[1], neg_hit = my_cols[3], null = my_cols[2])
# change name of hit to values
# plt96 and plt384 are colouring points by value, in this case needs to be hit
platemap$actual_vales <- platemap$values
platemap$values <- platemap$hit
if (plate == 96L){
# produce a 96-well plate map layout in ggplot
plt <- plt96(platemap) +
scale_fill_manual("hit", values = my_colours) +
theme_bw()
} else if (plate == 384L){
# produce a 384-well plate map layout in ggplot
plt <- plt384(platemap) +
scale_fill_manual("hit", values = my_colours) +
theme_bw()
} else if (plate == 1536L){
plt <- plt1536(platemap) +
scale_fill_manual("hit", values = my_colours) +
theme_bw()
} else stop("Not a valid plate format. Either 96, 384 or 1536.", call. = FALSE)
return(plt)
}
|
6a114ca7f40fc9973a1193dc4d800b31b2b69f8a
|
bcde4003dfb3725293245f407a2398310f1e8151
|
/man/getPrevalenceSingleSample.Rd
|
ba44c305a4f28aee2ba7b374e14407ce4076fd42
|
[] |
no_license
|
cwcyau/OncoPhase-1
|
d9f88501fac95f7e1c5f7692bbb0d6c0120dea75
|
d8ffb3ac080de12a25228cf358d121eddcbaffa4
|
refs/heads/master
| 2021-01-22T16:26:29.942830
| 2016-08-09T14:04:34
| 2016-08-09T14:04:34
| 65,290,817
| 0
| 0
| null | 2016-08-09T11:48:39
| 2016-08-09T11:48:39
| null |
UTF-8
|
R
| false
| true
| 4,702
|
rd
|
getPrevalenceSingleSample.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OncoPhase_methods.R
\name{getPrevalenceSingleSample}
\alias{getPrevalenceSingleSample}
\title{Somatic mutations cellular prevalence on a single sample.}
\usage{
getPrevalenceSingleSample(input_df, mode = "PhasedSNP", nbFirstColumns = 0,
region = NULL, detail = TRUE, LocusCoverage = 1)
}
\arguments{
\item{input_df}{A data frame containing for each mutations :
\describe{
\item{lambda_S}{Alelle counts supporting the SNV}
\item{mu_S}{Alelle counts supporting the reference at the SNV locus}
\item{major_cn}{Major copy number at the SNV locus}
\item{minor_cn}{Minor copy number at the SNV locus}
\item{lambda_G}{Alelle counts supporting the SNP}
\item{mu_G}{Alelle counts supporting the reference at the SNP}
}}
\item{mode}{The mode under which the prevalence is computed (default : PhasedSNP , alternatives methods are FlankingSNP, OptimalSNP,and SNVOnly). Can also be provided as a numeric 0=SNVOnly, 1= PhasedSNP, 2=FlankingSNP and 3 = OptimalSNP
#@param formula The formula used to compute the prevalence. can be either "matrix" for the linear equations or "General" for the exact allele count cases. Default : Matrix}
\item{nbFirstColumns}{Number of first columns in snp_allelecount_df to reproduce in
the output dataframe e.g: Chrom, Pos, Vartype. Columns from nbFirstColumns +1 to the last column should contains the information needed for the prevalence computation at each tumour sample}
\item{region}{The region of the genome to consider for the prevalence computation in the format chrom:start-end
e.g "chr22:179800-98767}
\item{detail}{when set to TRUE, a detailed output is generated containing, the context and the detailed prevalence for each group of cells (germline cells, cells affected by one of the two genomic alterations (SNV or CNV) but not both, cells affected by by both copynumber alteration and SNV ). Default : TRUE.}
\item{LocusCoverage}{when set to true, lambda_S and mu_S might be adjusted if necessary so that they meet the rules lambda_S <= lambda_G. mu_S >= mu_G and lambda_S + mu_S = lambda_G + mu_G. Not used if mode=SNVOnly, Default = FALSE}
}
\value{
A data frame containing :
\describe{
\item{}{Column 1 to NbFirstcolumn of the input data frame snp_allelecount_df.
This will generally include the chromosome and the position of the mutation plus
any other columns to report in the prevalence dataframe (e.g REF, ALL, ...) }
\item{}{and the following information}
\describe{
\item{Prev}{The Cellular Prevalence of the mutation}
\item{Germ}{The proportion of cells with a normal genotype}
\item{Alt}{The proportion of cells with only the CNA if the context C=C1 or with only the SNV if the context C=C2}
\item{Both}{The proportion of cells with both the SNV and the SCNA}
\item{Context}{Context at the mutation. If C1 then the SNV occured after the SCNA, if C=c2 then the SNV occured before the SCNA}
\item{residual}{Residual after limSolveapproximation.}
}
}
}
\description{
This is a generic function to compute the cellular prevalence of somatic mutations in
cancer. The function applies the model to a range of mutations located at a given genomic region or at the whole genome scale. The model computes the prevalence of a somatic
mutation relatively to close and eventually phased germline mutations. It uses three sources
of information as input : The allelic counts, the phasing information and the
copy number alteration. Multiple tumor samples can be provided for the prevalence computation.
}
\examples{
#Example 1:
input_file=system.file("extdata","phylogeny1_d300_n80.tsv", package = "OncoPhase")
input_df<-read.table(input_file,header=TRUE)
rownames(input_df) = input_df$mutation_id
print(input_df)
# mut_id lambda_S mu_S major_cn minor_cn lambda_G mu_G
#a a 151 152 1 1 151 135
#b b 123 176 1 1 161 150
#c c 94 209 2 1 176 134
#d d 23 283 1 1 155 144
#e e 60 228 2 0 174 125
prevalence_df=getPrevalenceSingleSample(input_df,nbFirstColumns = 1)
print(prevalence_df)
# mut_id Prev Germ Alt Both Residual Context
# a a 0.9967 0.0017 0.0017 0.9967 3.1e-03 C1
# b b 0.8230 0.0890 0.0890 0.8230 1.3e-03 C1
# c c 0.4010 0.6000 0.0910 0.3100 3.9e-33 C2
# d d 0.1500 0.4200 0.4200 0.1500 1.4e-03 C1
# e e 0.2490 0.7500 0.0890 0.1600 5.1e-31 C2
}
\seealso{
\code{\link{getPrevalence}}
}
|
dc0f4d749b4bcbb2dfc03b1b232ff9f11d6c747f
|
40f8efa31fe5dbf1dbbe54d689ca0bcda716a8a9
|
/CIBERSORT_data/margeCIBERSORTTables.R
|
ce8416fabd1856722b9994e7caa18786c896cdf3
|
[] |
no_license
|
Shicheng-Guo/CIBERSORT
|
02deba2c03e68d1cc829a4bce70dde309c53c7ef
|
c321c766b50a0a1c738614bdc037dda0124a787e
|
refs/heads/master
| 2021-01-18T10:21:32.907921
| 2016-06-16T18:14:10
| 2016-06-16T18:14:10
| 61,145,865
| 1
| 0
| null | 2016-06-14T18:20:35
| 2016-06-14T18:20:34
|
R
|
UTF-8
|
R
| false
| false
| 296
|
r
|
margeCIBERSORTTables.R
|
sigMat <- read.csv("/Users/mitra/Desktop/cellType-R/CIBERSORT_data/LM22.csv")
mixData <- read.csv("/Users/mitra/Desktop/cellType-R/CIBERSORT_data/ExampleMixtures-GEPs.csv")
head(sigMat$GeneSymbol) #no idea why it needs a dot between Gene and symbol, Rstudio found it out
head(mixData$GeneSymbol)
|
8557fd17091e2d071f153b7b333278b8e80e07d5
|
475507c1fb088a4ac31dc66b17c3718d09cf66ba
|
/run_analysis.R
|
3cdc5b25cfd511656fc84b2ae7fe251be0b9cf4a
|
[] |
no_license
|
neerajasharma/Getting-and-cleaning-data_week4_project
|
bd5f93c9db7dab806c7206e86815b270020cc76a
|
b785e08a1b1e19e5ca79c50fb3164b70f01fc1aa
|
refs/heads/main
| 2023-09-04T11:24:41.483073
| 2021-10-26T11:28:01
| 2021-10-26T11:28:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,949
|
r
|
run_analysis.R
|
# Load packages
install.packages("data.table")
library(data.table)
install.packages("dplyr")
library(dplyr)
# set working directory
setwd("C:/Users/bspadmin/Documents/gettingandcleaningdata")
# download ZIP file from the web
URL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
destFile <- "CourseDataset.zip"
download.file(URL, destfile = destFile, mode='wb')
# unzip downloaded zip file
unzip(zipfile = "C:/Users/bspadmin/Documents/gettingandcleaningdata/CourseDataset.zip",exdir ="C:/Users/bspadmin/Documents/gettingandcleaningdata/CourseDataset" )
# set path and reading files
pathdata <- "C:/Users/bspadmin/Documents/gettingandcleaningdata/CourseDataset/UCI HAR Dataset"
xtrain <- read.table(file.path(pathdata, "train", "X_train.txt"),header = FALSE)
ytrain = read.table(file.path(pathdata, "train", "y_train.txt"),header = FALSE)
subject_train = read.table(file.path(pathdata, "train", "subject_train.txt"),header = FALSE)
xtest = read.table(file.path(pathdata, "test", "X_test.txt"),header = FALSE)
ytest = read.table(file.path(pathdata, "test", "y_test.txt"),header = FALSE)
subject_test = read.table(file.path(pathdata, "test", "subject_test.txt"),header = FALSE)
features = read.table(file.path(pathdata, "features.txt"),header = FALSE)
activityLabels = read.table(file.path(pathdata, "activity_labels.txt"),header = FALSE)
# Rename columns in activity labels and activity data
colnames(xtrain) = features[,2]
colnames(ytrain) = "activityId"
colnames(subject_train) = "subjectId"
colnames(xtest)= features[,2]
colnames(ytest) = "activityId"
colnames(subject_test) = "subjectId"
colnames(activityLabels) <- c('activityId','activityType')
# 1.Merge dataframes (Merges the training and the test sets to create one data set.)
merge_train = cbind(ytrain,subject_train,xtrain)
merge_test= cbind(ytest,subject_test,xtest)
full_merge = rbind(merge_test, merge_train)
# 2.Create new datasets by extracting only the measurements on the mean and standard deviation for each measurement
colnames<- colnames(full_merge)
mean_and_std <- (grepl("activityId",colnames) | grepl("subjectId",colnames) | grepl("mean..",colnames) | grepl("std..",colnames))
mean_and_std
setmeanstd <- full_merge[,mean_and_std == TRUE]
#3.Label the dataset with descriptive variable names
setactnames <- merge(setmeanstd,activityLabels, by='activityId', all.x=TRUE)
setactnames[,1] <- sub("1", "WALKING",setactnames[,1])
setactnames[,1] <- sub("2", "WALKING_UPSTAIRS",setactnames[,1])
setactnames[,1] <- sub("3", "WALKING_DOWNSTAIRS",setactnames[,1])
setactnames[,1] <- sub("4", "SITTING",setactnames[,1])
setactnames[,1] <- sub("5", "STANDING",setactnames[,1])
setactnames[,1] <- sub("6", "LAYING",setactnames[,1])
#4. Label the data set with descriptive variable names.
names(setactnames)<- gsub("Acc", "Accelerometer",names(setactnames))
names(setactnames)<- gsub("Gyro", "Gyroscope",names(setactnames))
names(setactnames)<- gsub("BodyBody", "Body",names(setactnames))
names(setactnames)<- gsub("Mag", "Magnitude",names(setactnames))
names(setactnames)<- gsub("^t", "Time",names(setactnames))
names(setactnames)<- gsub("^f", "Frequency",names(setactnames))
names(setactnames)<- gsub("tBody", "TimeBody",names(setactnames))
names(setactnames)<- gsub("-mean()", "Mean",names(setactnames))
names(setactnames)<- gsub("-std()", "STD",names(setactnames))
names(setactnames)<- gsub("-freq()", "Frequency",names(setactnames))
names(setactnames)<- gsub("angle", "Angle",names(setactnames))
names(setactnames)<- gsub("gravity", "Gravity",names(setactnames))
#5.create another dataset, independent tidy data set with the average of each variable for each activity and each subject
tidyset <- aggregate(.~subjectId + activityId + activityType, setactnames,mean)
tidyset <- tidyset[order(tidyset$subjectId, tidyset$activityId), ]
#save this tidy dataset to local file
write.table(tidyset, "tidyset.txt", row.names = FALSE)
|
7279f91c392554954af0652a65cef0b87c5c6bad
|
8bd02cf887641c987e73caa5bb1b8a82eddedc37
|
/web_scraping.R
|
e96200e6e997c5f2e49af8f74da4ff0fac21612b
|
[] |
no_license
|
turgeonmaxime/bfi-2012poll
|
cec8ac8a7814834bbc674d6fcf7965831a4b2300
|
e3d75a317880c53adefc4520195b8634a2e2ac8e
|
refs/heads/master
| 2021-05-13T23:27:23.631489
| 2018-01-06T21:04:47
| 2018-01-06T21:04:47
| 116,515,471
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,322
|
r
|
web_scraping.R
|
# BFI 2012 poll web scraping
library(rvest)
library(magrittr)
library(dplyr)
# There are 1205 voters
n_max <- 1205
# Schemas
schema_details <- tibble::tibble(
ID = numeric(0),
Name = character(0),
Details = character(0)
)
schema_votes <- tibble::tibble(
ID = numeric(0),
Title = character(0),
Year = integer(0),
Director = character(0)
)
for (index in 1:n_max) {
# Read web content
web_content <- read_html(paste0("http://www.bfi.org.uk/films-tv-people/sightandsoundpoll2012/voter/",
index))
# Find voter's name
name <- web_content %>%
html_nodes("h1") %>%
html_text() %>%
trimws()
# Parse voter's details
details <- web_content %>%
html_nodes(".sas-poll-voter-details") %>%
html_text() %>%
# strsplit("\n") %>%
# dplyr::first() %>%
# Filter(function(x) x!= "", .) %>%
trimws
details <- tibble::tibble(
ID = index,
Name = name,
Details = details
)
schema_details %<>% dplyr::bind_rows(details)
# Parse votes
votes <- web_content %>%
html_nodes("table") %>%
dplyr::first() %>%
html_table() %>%
tibble::as_tibble() %>%
dplyr::rename(Title = X1,
Year = X2,
Director = X3) %>%
dplyr::mutate(ID = index)
schema_votes %<>% dplyr::bind_rows(votes)
}
# Clean schema_details
schema_details2 <- schema_details %>%
dplyr::mutate(Poll = dplyr::case_when(
grepl("critic", Details) ~ "Critic",
grepl("director", Details) ~ "Director",
TRUE ~ NA_character_
),
Details = trimws(stringr::str_replace(Details, "Voted in the (critics|directors)\u2019 poll", "")),
n_elements = sapply(strsplit(Details, "\n"), length)) %>%
tidyr::separate(Details, sep = "\n", into = c("Details1", "Details2", "Details3",
"Details4", "Details5", "Country"),
fill = "left") %>%
dplyr::mutate(Details = paste(Details1, Details2, Details3, Details4, Details5)) %>%
dplyr::mutate(Details = stringr::str_replace_all(Details, "NA", "")) %>%
dplyr::select(-Details1, -Details2, -Details3, -Details4, -Details5, -n_elements)
# Clean schema_votes
schema_votes2 <- schema_votes %>%
dplyr::mutate(
Director = dplyr::case_when(
Director == "Kurosawa Akira" ~ "Akira Kurosawa",
Director == "Kurosawa Kiyoshi" ~ "Kiyoshi Kurosawa",
TRUE ~ Director
),
Year = dplyr::case_when(
Title == "4 months, 3 weeks and 2 days" ~ 2007L,
Title == "Amadeus" ~ 1984L,
Title == "Berlin Alexanderplatz" ~ 1980L,
Title == "Dekalog" ~ 1989L,
Title == "Diary" ~ 1983L,
Title == "Tale of the Wind, A" ~ 1988L,
Title == "Turin Horse, The" ~ 2011L,
TRUE ~ Year
))
# Create SQLite db
bfi_db <- DBI::dbConnect(RSQLite::SQLite(), "bfi-2012poll.sqlite")
DBI::dbWriteTable(bfi_db, "details", schema_details2, overwrite = TRUE)
DBI::dbWriteTable(bfi_db, "votes", schema_votes2, overwrite = TRUE)
DBI::dbDisconnect(bfi_db)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.