blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
β | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
β | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
β | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f6d055fa4838ad1d43b643048c3b60f85b481af7
|
10395d3e481332b1afade1b52b7e23d1f09b2380
|
/Quiz4.R
|
2cc999ca62f8dafc69c15555ad38c3664ac55907
|
[] |
no_license
|
sparklezzz/coursera-model-regression-sparklezzz
|
ed7b50215b75198a1b1ebd74df0d491e526180e4
|
529e46e83797d373953e9c1c46eb955d94206665
|
refs/heads/master
| 2016-09-05T15:38:15.190527
| 2014-11-03T02:14:06
| 2014-11-03T02:14:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 900
|
r
|
Quiz4.R
|
#q1
library(MASS)
fit <- glm(use ~ wind, family = binomial, data = shuttle)
exp(fit$coef)
#q2
fit2 <- glm(use ~ wind + factor(magn), family = binomial, data = shuttle)
exp(fit2$coef)
#q3
y1 = c()
y1[shuttle$use == 'auto'] = 0
y1[shuttle$use != 'auto'] = 1
fit3 <- glm(y1 ~ shuttle$wind, family = binomial)
summary(fit3)
y2 = 1 - y1
fit4 <- glm(y2 ~ shuttle$wind, family = binomial)
summary(fit4)
#q4
fit5 <- glm(count ~ factor(spray) - 1, family = poisson, data = InsectSprays)
coef <- exp((fit5$coef))
coef[1] / coef[2]
#q5
t <- rep(0, length(InsectSprays$count))
fit6 <- glm(count ~ spray + offset(t), family = poisson, data = InsectSprays)
fit6$coef
t2 <- t + log(10)
fit7 <- glm(count ~ spray + offset(t2), family = poisson, data = InsectSprays)
fit7$coef
#q6
x <- -5:5
y <- c(5.12, 3.93, 2.67, 1.87, 0.52, 0.08, 0.93, 2.05, 2.54, 3.87, 4.97)
x1 <- c(1:5)
y1 <- y[7:11]
summary(lm(y1~x1))
|
1da9bdfd2a184acbf40b78612de09cddc49d058b
|
a0fc3f6312e9bba3d7b370fc98adacd4c1c1b70b
|
/man/fcount2.Rd
|
bad3b8bc290724907c85f3f3ae3f9a77c1c1a666
|
[
"MIT"
] |
permissive
|
yusifang/foofactors
|
b30716f9c75cad8ea7d562d0a1c122cc4ee4a572
|
df10f08fa0629911bcbdcfacc9da93fce61a7286
|
refs/heads/main
| 2023-03-17T16:04:23.136687
| 2021-03-05T01:33:39
| 2021-03-05T01:33:39
| 344,342,199
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 224
|
rd
|
fcount2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fcount2.R
\name{fcount2}
\alias{fcount2}
\title{Title omit}
\usage{
fcount2(x)
}
\arguments{
\item{x}{x}
}
\value{
x
}
\description{
Title omit
}
|
1ba3aabe16fdcec091fff7f9a081d4540938d466
|
b2fecdade5403ea4eea7bb3d4dfb4b400d96f43e
|
/data_analysis/words.R
|
35f4a9df45ff9bef808e84e0a7b1a34531c58ff7
|
[] |
no_license
|
stankorenromp/StaticTwitterSent
|
63be537e11fddd4e5a73aa10c7e551446963277f
|
f27b5fee1aedc9a2eb3241d81911cc27879f521a
|
refs/heads/master
| 2021-01-20T15:04:08.476286
| 2013-10-23T20:18:21
| 2013-10-23T20:18:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,569
|
r
|
words.R
|
setwd("Dropbox/thesis_exp/")
metalex<-read.csv(file="metaLex.csv",header=T,sep="\t")
metalex<-metalex[metalex$OPFIND!="neutral"&metalex$OPFIND!="both",]
metalex<-metalex[metalex$SWN3!=0,]
NCRneutral<-metalex$Nanger==0&metalex$Nanticipation==0&metalex$Ndisgust==0&metalex$Nfear==0&metalex$Njoy==0&metalex$Nsadness==0&metalex$Nsurprise==0&metalex$Ntrust==0
metalex<-metalex[!NCRneutral,]
myframe<-data.frame(words=metalex$word,SWN3=ifelse(metalex$SWN3=="missing",0,1),
AFINN=ifelse(metalex$AFINN=="missing",0,1),
OPF=ifelse(metalex$OPFIND=="missing",0,1),
NCR=ifelse(metalex$Nanger=="missing",0,1))
AFINN.words<-myframe[myframe$SWN3==0&myframe$AFINN==1&myframe$OPF==0&myframe$NCR==0,]
AFINN_sample<-sample(1:dim(AFINN.words)[1],size=30,replace=F)
AFINN.words[AFINN_sample,1];
Opfinder.words<-myframe[myframe$SWN3==0&myframe$AFINN==0&myframe$OPF==1&myframe$NCR==0,]
NCR.words<-myframe[myframe$SWN3==0&myframe$AFINN==0&myframe$OPF==0&myframe$NCR==1,]
interact.words<-myframe[myframe$SWN3==1&myframe$AFINN==1&myframe$OPF==1&myframe$NCR==1,]
interact.sample<-sample(1:dim(interact.words)[1],size=30,replace=F)
interact.words[interact.sample,1]
interact.words2<-metalex[metalex$SWN3!="missing"&metalex$AFINN!="missing"&
metalex$OPF!="missing"&metalex$Nfear!="missing",]
#Palabras negativas afinn y positivas OPfind
neg.afinn<-as.numeric(as.character(interact.words2$AFINN))<0&interact.words2$OPFIND=="positive"
neg.opfind<-as.numeric(as.character(interact.words2$AFINN))>0&interact.words2$OPFIND=="negative"
interact.sample<-sample(1:dim(interact.words)[1],size=30,replace=F)
interact.words[interact.sample,1]
summary(metalex$SWN3!="missing")
summary(metalex$Nanger!="missing")
summary(metalex$AFINN!="missing")
summary(metalex$OPFIND!="missing")
summary(metalex$Nanticipation!="missing"&metalex$SWN3!="missing")
summary(metalex$AFINN!="missing"&metalex$SWN3!="missing")
summary(metalex$OPFIND!="missing"&metalex$SWN3!="missing")
summary(metalex$AFINN!="missing"&metalex$Nanger!="missing")
summary(metalex$OPFIND!="missing"&metalex$Nanger!="missing")
summary(metalex$OPFIND!="missing"&metalex$AFINN!="missing")
summary(metalex$AFINN!="missing"&metalex$Ndisgust!="missing")
summary(metalex$AFINN!="missing"&metalex$Ndisgust!="missing"&metalex$OPFIND!="missing")
summary(metalex$AFINN!="missing"&metalex$Ndisgust!="missing"&metalex$OPFIND!="missing"
&metalex$SWN3!="missing")
summary(metalex$AFINN!="missing"&metalex$Ndisgust!="missing"&metalex$SWN3!="missing")
summary(metalex$Ndisgust!="missing"&metalex$OPFIND!="missing"&metalex$SWN3!="missing")
summary(metalex$Ndisgust!="missing"&metalex$OPFIND!="missing"&metalex$AFINN!="missing")
summary(metalex$OPFIND!="missing"&metalex$AFINN!="missing"&metalex$SWN3!="missing")
myframe<-data.frame(words=metalex$word,SWN3=ifelse(metalex$SWN3=="missing",0,1),
AFINN=ifelse(metalex$AFINN=="missing",0,1),
OPF=ifelse(metalex$OPFIND=="missing",0,1),
NCR=ifelse(metalex$Nanger=="missing",0,1))
options( java.parameters="-Xmx4G" )
library(venneuler)
mat<-as.matrix(myframe)
vd<-venneuler(mat)
plot(vd)
values<-c(SWN3=34257
,NCR=4031
,AFINN=2017
,OPFINDER=4869
, "SWN3&NCR"=3870
,"SWN3&AFINN"=1341
,"SWN3&OPFINDER"=4256
,"AFINN&NCR"=865
,"OPFINDER&NCR"=2207
,"OPFINDER&AFINN"=1025
,"SWN3&NCR&AFINN"=834
,"SWN3&NCR&OPFINDER"=2191
,"SWN3&AFINN&OPFINDER"=978
,"NCR&AFINN&OPFINDER"=682
,"SWN3&NCR&AFINN&OPFINDER"=677
)
vd<-venneuler(values)
plot(vd)
|
a8a1c50b1e89f5e5b6ee04b2911ee7626af115d7
|
670d56d2eed0d4d99eef45cd880f6ba15d65f7ba
|
/chapter/01/R/recipe1b.R
|
486214e49482434874fe379db7dc42ad7b032bee
|
[] |
no_license
|
Keniajin/graphsCookbook
|
72ec324554ae4c689529fbc685e04b79f44ad6f9
|
6bd582ef2491019a4e291059ab305d8066c53370
|
refs/heads/master
| 2021-01-21T16:00:39.696560
| 2012-02-12T03:12:23
| 2012-02-12T03:13:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 949
|
r
|
recipe1b.R
|
source("comboPlot.R")
base.recipe.1b <-function() {
plot(cars$dist~cars$speed,
main="Relationship between car \ndistance \\& speed",
xlab="Speed (miles per hour)",
ylab="Distance traveled (miles)",
xlim=c(0,30),
ylim=c(0,140),
xaxs="i",
yaxs="i",
col="red",
pch=19
)
}
plot1b<-ggplot(cars)+geom_point(aes(x=speed,y=dist,color="red",shape=19)) +
scale_x_continuous("Speed (miles per hour)",limits=c(0,30),expand=c(0,0)) +
scale_y_continuous("Distance traveled (miles)",limits=c(0,140),expand=c(0,0))+
scale_colour_identity() +
theme_bw() +
opts(title="Relationship between car \ndistance \\& speed",
legend.position = "none") +
opts(axis.title.x = theme_text(vjust=-0.25),
plot.title = theme_text(vjust= 1.25,face="bold"),
plot.margin = unit(c(1, 2, 2, 1), "lines"))
doComboPlot(doBasePlot=base.recipe.1b,gplot=plot1b)
|
cca0c3e717875451085466a9ed7648219aed2be2
|
02ad61c1562483bd72a90b9a0baa1f5d7817b6e1
|
/test_size.R
|
762fbcafca90d0e6bad2b99b67d4d461d2d1023b
|
[] |
no_license
|
postgrespro/spatial_benchmark
|
0c954699fdc261f79707c9d272aef4d59f0feed8
|
88563adf73d2ef9d819c818f8d27d79fd7626ea4
|
refs/heads/master
| 2023-06-23T05:20:22.560407
| 2018-09-13T08:35:48
| 2018-09-13T08:35:48
| 143,134,460
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 722
|
r
|
test_size.R
|
require(sfsmisc)
setwd('~/compwork/postgres/spherical')
o_pg = scan('out_size_pg', list(t=0, n=0, sr=0, hbread=0, hbhit=0, ibread=0, ibhit=0))
png('fig_size_number_time.png', width=1024, height=768)
plot(o_pg$n, o_pg$t, type="b", pch=2, col="red", log='xy', cex=0.5, xlim=c(1e4,1e8), ylim=c(1e-2,1e2),
xlab="Number of points in table", ylab="Query time, ms", axes=FALSE)
box()
eaxis(1)
eaxis(2)
dev.off()
## #png('fig_match_number_time.png', width=1024, height=768)
## plot(o_pg$n, (o_pg$ibread + o_pg$hbread), type="b", pch=2, col="red", log='xy', cex=0.5, xlim=c(1e4,1e8), ylim=c(1,1e5),
## xlab="Number of points in table", ylab="idx_blks_read", axes=FALSE)
## box()
## eaxis(1)
## eaxis(2)
## #dev.off()
|
149aa99ad2fdf01acd4c71e179051c0979926122
|
cfda292acf2c2d4ad18b6bf1768ce7c4b4e2bc58
|
/tests/testthat.R
|
212a584d394d0233ffa2dd72f8c8052317747ce9
|
[] |
no_license
|
briandconnelly/acewater
|
8f0cac27c5eed4b8e6d11418e409480e192373e6
|
323478e54f217dcb846e057c5b9c279b753184bd
|
refs/heads/master
| 2021-06-06T10:01:01.136160
| 2021-04-25T02:13:00
| 2021-04-25T02:13:00
| 92,439,185
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60
|
r
|
testthat.R
|
library(testthat)
library(acewater)
test_check("acewater")
|
72f965c86c7f343405bf75d5e6e1b5ec43c590c1
|
aaec91363846b2cd9db733aa27a34b059bd24b00
|
/man/matrix-classes.Rd
|
fe132842b9df0429a6fd083373d90d84d415f2bf
|
[] |
no_license
|
cran/orientlib
|
2497db6c76472e79cc545a29cb227e1536eaeaf1
|
7978fe7f10a4a8f23c0618b5f93efc3b3755ad3e
|
refs/heads/master
| 2023-04-20T22:21:33.385297
| 2023-01-10T15:20:02
| 2023-01-10T15:20:02
| 17,698,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,299
|
rd
|
matrix-classes.Rd
|
\name{matrix-classes}
\docType{class}
\alias{matrix-classes}
\alias{rotmatrix-class}
\alias{skewmatrix-class}
\title{Matrix orientation classes }
\description{ An orientation represented by 3 x 3 SO(3) matrices or 3 x 3 skew symmetric matrices }
\section{Objects from the Class}{
Objects can be created by calls of the form \code{\link{rotmatrix}(x)}
or \code{\link{skewmatrix}(x)}.
The objects store the matrices in a 3 x 3 x n array.
}
\section{Slots}{
\describe{
\item{\code{x}:}{3 x 3 x n array holding the matrices. }
}
}
\section{Extends}{
Class \code{"orientation"}, directly.
Class \code{"vector"}, by class "orientation".
}
\section{Methods}{
\describe{
\item{[, [<-}{Extract or assign to subvector }
\item{[[, [[<-}{Extract or assign to an entry }
\item{length}{The length of the \code{orientation} vector }
\item{coerce}{Coerce methods are defined to convert all \code{\link{orientation}} descendants
from one to another, and to coerce an appropriately shaped matrix or array to a \code{rotmatrix}}
}
}
\author{ Duncan Murdoch }
\seealso{\code{\link{orientation-class}}, \code{\link{vector-classes}}, \code{\link{rotmatrix}},
\code{\link{skewmatrix}}}
\examples{
x <- rotmatrix(matrix(c(1,0,0, 0,1,0, 0,0,1), 3, 3))
x
skewmatrix(x)
}
\keyword{classes}
|
a342442d9c69c5a81afe2c9d49c61660f472b38a
|
af84f4fb4bd7c41432482cce6170da3e6af3a130
|
/PE/18.R
|
b763c886617deaded0603250005fa31017008ba2
|
[] |
no_license
|
tkmckenzie/pan
|
a65fc375eea8171c9b64f8360c5cf7b152830b7d
|
5337e4c1d09f06f2043551e1dd1ec734aab75b49
|
refs/heads/master
| 2023-01-09T11:54:29.636007
| 2022-12-21T22:26:58
| 2022-12-21T22:26:58
| 156,240,022
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 700
|
r
|
18.R
|
rm(list = ls())
s = "75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23"
row.sum.max = function(row.1, row.2){
row.sums = rbind(row.1 + row.2[-length(row.2)], row.1 + row.2[-1])
return(apply(row.sums, 2, max))
}
l = lapply(strsplit(s, "\n")[[1]], function(e) as.numeric(strsplit(e, " ")[[1]]))
for (row in length(l):2){
l[[row - 1]] = row.sum.max(l[[row - 1]], l[[row]])
}
l[[1]]
|
2b510e366f774cc99834abec69e41067092b6a8c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sptemExp/examples/inter2conOpt.Rd.R
|
5084874e483d8c9f45a0a8f3f92dbc50ad8c385b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 820
|
r
|
inter2conOpt.Rd.R
|
library(sptemExp)
### Name: inter2conOpt
### Title: Batch Interpolation of the Missing Values for Time Series Using
### Constrained Optimization.
### Aliases: inter2conOpt
### Keywords: models parallel
### ** Examples
## No test:
# Here is the sample for the first 500 locations.
# In practice, you may need more point locations and more cores.
data("allPre500","shdSeries2014")
# Get the temporal basis functions to be used in constrained optimization
season_trends=getTBasisFun(shdSeries2014,idStr="siteid",dateStr="date",
valStr="obs",df=10,n.basis=2,tbPath=NA)
#Constrained optimization
season_trends$tid=as.numeric(strftime(season_trends$date, format = "%j"))
allPre_part_filled=inter2conOpt(tarPDf=allPre500[c(1:6),],pol_season_trends=season_trends,ncore=2)
## End(No test)
|
c02608c70f336cacd2791d773211a8e58c966735
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ropercenter/examples/read_ascii.Rd.R
|
e83bc2896dd06e9b20fd48c06218b5f6fe88e2e3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 433
|
r
|
read_ascii.Rd.R
|
library(ropercenter)
### Name: read_ascii
### Title: Read ASCII datasets downloaded from the Roper Center
### Aliases: read_ascii
### ** Examples
## Not run:
##D gallup9206 <- read_ascii(file = "roper_data/USAIPOGNS1992-222054/USAIPOGNS1992-222054.dat",
##D total_cards = 4,
##D var_names = c("q24", "weight"),
##D var_cards = c(4, 1),
##D var_positions = c(46, 13),
##D var_widths = c(1, 3))
## End(Not run)
|
93fd1c23ba08b9a24e7834d35cd8c3ffbea39a30
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/crisp/examples/crisp-package.Rd.R
|
9a93898e5ccb46bd2f21332a278a3c610f987e76
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,023
|
r
|
crisp-package.Rd.R
|
library(crisp)
### Name: crisp-package
### Title: crisp: A package for fitting a model that partitions the
### covariate space into blocks in a data-adaptive way.
### Aliases: crisp-package
### ** Examples
## Not run:
##D #general example illustrating all functions
##D #see specific function help pages for details of using each function
##D
##D #generate data (using a very small 'n' for illustration purposes)
##D set.seed(1)
##D data <- sim.data(n = 15, scenario = 2)
##D #plot the mean model for the scenario from which we generated data
##D plot(data)
##D
##D #fit model for a range of tuning parameters, i.e., lambda values
##D #lambda sequence is chosen automatically if not specified
##D crisp.out <- crisp(X = data$X, y = data$y)
##D #or fit model and select lambda using 2-fold cross-validation
##D #note: use larger 'n.fold' (e.g., 10) in practice
##D crispCV.out <- crispCV(X = data$X, y = data$y, n.fold = 2)
##D
##D #summarize all of the fits
##D summary(crisp.out)
##D #or just summarize a single fit
##D #we examine the fit with an index of 25. that is, lambda of
##D crisp.out$lambda.seq[25]
##D summary(crisp.out, lambda.index = 25)
##D #lastly, we can summarize the fit chosen using cross-validation
##D summary(crispCV.out)
##D #and also plot the cross-validation error
##D plot(summary(crispCV.out))
##D #the lambda chosen by cross-validation is also available using
##D crispCV.out$lambda.cv
##D
##D #plot the estimated relationships between two predictors and outcome
##D #do this for a specific fit
##D plot(crisp.out, lambda.index = 25)
##D #or for the fit chosen using cross-validation
##D plot(crispCV.out)
##D
##D #we can make predictions for a covariate matrix with new observations
##D #new.X with 20 observations
##D new.data <- sim.data(n = 20, scenario = 2)
##D new.X <- new.data$X
##D #these will give the same predictions:
##D yhat1 <- predict(crisp.out, new.X = new.X, lambda.index = crispCV.out$index.cv)
##D yhat2 <- predict(crispCV.out, new.X = new.X)
## End(Not run)
|
37dbfcac18d060df0ba45672ae4f072ef56519bc
|
6a1d84304d1f4d45e3e417a4ca772a17aa03e559
|
/Part2/part2_prediction.R
|
af468d9eece1eab960566d56dc8d84d0f779cf7a
|
[] |
no_license
|
VivekKumarNeu/ADS-Midterm
|
59a77edbb833926917f842a7be2663c8159bfb11
|
e8fb279ab63a6b161b81f89ee64c1ebbc5a3b407
|
refs/heads/master
| 2020-07-03T07:55:54.170670
| 2016-11-21T04:48:36
| 2016-11-21T04:48:36
| 74,187,285
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,412
|
r
|
part2_prediction.R
|
# Algorithms on the entire model.
install.packages("forecast")
install.packages("devtools")
install.packages("broom")
install.packages("ROCR")
install.packages("FNN")
install.packages('neuralnet')
library(dplyr)
library(tidyr)
library(grid)
library(MASS)
library(neuralnet)
library(FNN)
library(forecast)
###########################################################################
#Setting the working directory-
setwd("/home/sankalp/Documents/ADS/ads_midterm/Data")
#Reading the input data
inputRead <- read.csv("Cleansed.csv")
#inputRead[apply(inputRead[,-1], 1, function(x) !all(x==0)),]
#names(inputRead)
#Selecting only the selected features
inputRead <- subset(inputRead, select = c(KWH,Hour,TemperatureF,Area,DayOfWeek,Month,BaseHourFlag,BuildingID,Meter_Number))
# Dividing the dataset in train and test
read_size <- floor(0.80 * nrow(inputRead))
set.seed(80)
train_data_ind <- sample(seq_len(nrow(inputRead)), size = read_size)
train_data <- inputRead[train_data_ind, ]
test_data <- inputRead[-train_data_ind, ]
train_data[train_data==0]<-0.000001
test_data[test_data==0]<- 0.000001
#Regression on 78 models
for (i in 1:78){
dataset<- df[[i]]
names(dataset)
read_size <- floor(0.80 * nrow(dataset))
set.seed(80)
train_data_ind <- sample(seq_len(nrow(dataset)), size = read_size)
train_data <- dataset[train_data_ind, ]
test_data <- dataset[-train_data_ind, ]
train_data[train_data==0]<-0.000001
test_data[test_data==0]<- 0.000001
varnames <- c("Hour", "TemperatureF", "Area", "DayOfWeek","Month","BaseHourFlag")
modelfits <- vector(length(varnames), mode = "list")
names(modelfits) <- varnames
names(train_data)
modelfits[[i]]<- lm(KWH~Hour+TemperatureF+Area+DayOfWeek+Month,data = train_data)
summary(modelfits[[i]])
library(forecast)
pred = predict(modelfits[[i]], test_data)
accuracy_pred=accuracy(pred, test_data$KWH)
x <- list(accuracy_pred)
print(x)
summary(modelfits[[i]])
}
# Neural Network on 78 models
trainingInput= as.data.frame(runif(50,min=0,max=100))
trainingOutput= sqrt(trainingInput)
print(trainingOutput)
trainingData<-cbind(trainingInput,trainingOutput)
na.omit(df[[1]])
modelfits <- vector(mode = "list")
for(i in 1:78){
modelfits[[i]]<-neuralnet(KWH~Hour+TemperatureF+Area+DayOfWeek+Month,data=df[[i]],hidden=2,threshold=0.01)
print(modelfits[[i]])
plot(modelfits[[i]])
modelfits[[i]]$result.matrix
}
# Knn on 78 data sets
for(i in 1:78){
dataset=df[[i]]
read_size <- floor(0.80 * nrow(dataset))
set.seed(80)
train_data_ind <- sample(seq_len(nrow(df[[i]])), size = read_size)
train_data <- dataset[train_data_ind, ]
test_data <- dataset[-train_data_ind, ]
train_data[train_data==0]<-0.000001
test_data[test_data==0]<- 0.000001
modelfit[[i]] <- knn.reg(train=train_data,y=train_data$KWH,test_data, k = 3)
print(modelfit)
test_data$KWH
head(test_data$KWH)
error=as.double(modelfit[[i]]$KWH)-test_data$KWH
accuracy(train_data$KWH,test_data$KWH)
}
print(accuracy)
# Regression on full dataset
varnames <- c("Hour", "TemperatureF", "Area", "DayOfWeek","Month","BaseHourFlag")
modelfit <- vector(length(varnames), mode = "list")
names(modelfit) <- varnames
names(train_data)
modelfit<- lm(KWH~Hour+TemperatureF+Area+DayOfWeek+Month,data = train_data)
summary(modelfit)
pred = predict(modelfit, test_data)
accuracy_pred=accuracy(pred, test_data$KWH)
print(accuracy_pred)
summary(modelfit)
#Finding error
print(modelfits[[2]])
print(test_data[2]$KWH)
error.reg = (modelfits[[2]]$KWH - test_data$KWH)
print(error.nnet)
mape <- function(error) {
mean(abs(error/test_data$KWH) * 100)
}
mae <- function(error)
{
mean(abs(error))
}
rmse <- function(error)
{
sqrt(mean(error^2))
}
#calculating mean square value
rmse(error.nnet)
rms <- c("RMS", rmse(error.reg))
mae(error.nnet)
ma <- c("MAE", mae(error.reg))
mape(error.nnet)
map <- c("MAPE", mape(error.reg))
# Neural Network on entire dataset
modelfit<-neuralnet(KWH~Hour+TemperatureF+Area+DayOfWeek+Month,data=inputRead,hidden=2,threshold=0.01)
print(modelfit)
modelfit$result.matrix
#Finding error
print(modelfit)
print(test_data$KWH)
error.nnet = (modelfits$KWH - test_data$KWH)
print(error.nnet)
mape <- function(error) {
mean(abs(error/test_data$KWH) * 100)
}
mae <- function(error)
{
mean(abs(error))
}
rmse <- function(error)
{
sqrt(mean(error^2))
}
#calculating mean square value
rmse(error.nnet)
rms <- c("RMS", rmse(error.nnet))
mae(error.nnet)
ma <- c("MAE", mae(error.nnet))
mape(error.nnet)
map <- c("MAPE", mape(error.nnet))
# KNN on entire dataset
print(train_data)
modelfit <- knn.reg(train=train_data,y=train_data$KWH,test_data, k = 3)
test_data$KWH
head(test_data$KWH)
error=as.double(modelfit$KWH)-test_data$KWH
accuracy(train_data$KWH,test_data$KWH)
summary(modelfit)
#Finding error
error.knn = (modelfit$KWH - test_data$KWH)
print(error.nnet)
mape <- function(error) {
mean(abs(error/test_data$KWH) * 100)
}
mae <- function(error)
{
mean(abs(error))
}
rmse <- function(error)
{
sqrt(mean(error^2))
}
#calculating mean square value
rmse(error.nnet)
rms <- c("RMS", rmse(error.knn ))
mae(error.nnet)
ma <- c("MAE", mae(error.knn ))
mape(error.nnet)
map <- c("MAPE", mape(error.knn ))
# Computing the outliers-
modelfit$res <- modelfit$KWH - modelfit$`Predicted value`
deviation <- sd(modelfit[,5])
Final_prediction <- modelfit %>% mutate(Outlier_Tag = ifelse(res >= (2*deviation) , 1, 0))
|
b01df0c9fb80b3249b6526da5259d3f943f17e88
|
ed10f88f5e57500f49e64c57986eb6bf3027b490
|
/plot1.R
|
783245c7d88170d9070675113ead787de621323e
|
[] |
no_license
|
solis/ExData_Plotting1
|
d70a8f37fd0e2aad82775243954d7fcf15188c75
|
e35110a7fba96c8eaa8dfd24db369ec171496ccc
|
refs/heads/master
| 2021-01-20T17:09:25.382789
| 2015-08-06T11:24:13
| 2015-08-06T11:24:13
| 39,191,277
| 0
| 1
| null | 2015-07-16T10:37:12
| 2015-07-16T10:37:12
| null |
UTF-8
|
R
| false
| false
| 185
|
r
|
plot1.R
|
source("read.R")
# Get data
df <- read()
png("plot1.png")
hist(df$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
|
375aeaa088c19d0b35c90ce80ea3fb7198cd1b63
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lidR/examples/readLAS.Rd.R
|
d5a1fc5e0e66d068b8a1194d66b987fb7e099027
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 503
|
r
|
readLAS.Rd.R
|
library(lidR)
### Name: readLAS
### Title: Read .las or .laz files
### Aliases: readLAS
### ** Examples
LASfile <- system.file("extdata", "Megaplot.laz", package="lidR")
las = readLAS(LASfile)
las = readLAS(LASfile, select = "xyz")
las = readLAS(LASfile, select = "xyzi", filter = "-keep_first")
las = readLAS(LASfile, select = "xyziar", filter = "-keep_first -drop_z_below 0")
# Negation of attribute is also possible (all except intensity and angle)
las = readLAS(LASfile, select = "* -i -a")
|
69307b67d677a9c6f7ca301f24ded78298ad3efb
|
6584c3f2f7f5ec2aea9d0ddfafb2c4d717a743ee
|
/DataPreprocessing.R
|
c108614fc113dde7fe317c5993012f4985a620c3
|
[] |
no_license
|
rishikesh21/data-merger
|
f81006d126218c4e5588425471161dff461ae8c6
|
814dd08309c536b873cf5120718230976f2036e7
|
refs/heads/master
| 2020-04-06T18:10:24.368315
| 2018-11-21T11:23:19
| 2018-11-21T11:23:19
| 157,688,101
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 335
|
r
|
DataPreprocessing.R
|
library(data.table)
library(dplyr)
setwd("D:\\IS5126\\RestaurantReviewTripAdvisor\\")
files = list.files(pattern="*.csv")
DT =tryCatch({
rbindlist(lapply(files, fread))
},
error=function(e){
print(e)
}
)
DT=DT[!duplicated(DT)]
fwrite(DT,file="D:\\IS5126\\trip_advisor_results.csv",sep=',',col.names = TRUE,row.names = FALSE)
|
52053b8d537f5da119d6b60c0ae1e3519f9124f6
|
f1b5dab47082ad795250ae3882bab5d0352e61c7
|
/man/comparison_test_data_perfect_prediction.Rd
|
9e056f10d4b026e6830516a05c74219a70aafbb5
|
[] |
no_license
|
johnchower/gloobtyd
|
79a20071b4f3637b12f6d885946fbab94740ce99
|
7c39258924eb51364850a9f3158dc0bfa787e24e
|
refs/heads/master
| 2021-01-18T05:09:16.842040
| 2017-03-24T15:51:20
| 2017-03-24T15:51:20
| 84,277,817
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 440
|
rd
|
comparison_test_data_perfect_prediction.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_doc.r
\docType{data}
\name{comparison_test_data_perfect_prediction}
\alias{comparison_test_data_perfect_prediction}
\title{Data frame for testing the output of the calculate_sps function}
\format{data.frame}
\usage{
comparison_test_data_perfect_prediction
}
\description{
Data frame for testing the output of the calculate_sps function
}
\keyword{datasets}
|
a67e37ea4c593fcae864c092f204c2a3827a3289
|
63b9f4ab3f858a53dababfb814f543b4f76f4556
|
/man/coeffRV.Rd
|
88c8f9c58fb10fe947b4699cc14638a1d751b132
|
[] |
no_license
|
cran/FactoMineR
|
81eaa31984e57e8f8836bd7bdc16a2a3f0fc629c
|
46df41770a7c7599fc28b0bf7351fb8034ba0c1f
|
refs/heads/master
| 2023-04-07T02:53:01.777853
| 2023-03-27T07:50:02
| 2023-03-27T07:50:02
| 17,679,199
| 14
| 16
| null | 2018-05-31T16:57:54
| 2014-03-12T18:53:31
|
R
|
UTF-8
|
R
| false
| false
| 1,976
|
rd
|
coeffRV.Rd
|
\name{coeffRV}
\alias{coeffRV}
\title{Calculate the RV coefficient and test its significance}
\description{
Calculate the RV coefficient and test its significance.
}
\usage{
coeffRV(X, Y)
}
\arguments{
\item{X}{a matrix with \emph{n} rows (individuals) and \emph{p} numerous columns (variables)}
\item{Y}{a matrix with \emph{n} rows (individuals) and \emph{p} numerous columns (variables)}
}
\details{
Calculates the RV coefficient between \code{X} and \code{Y}. It returns also the standardized RV,
the expectation, the variance and the skewness under the permutation
distribution. These moments are used to approximate the exact
distribution of the RV statistic with the Pearson type III approximation and the p-value associated to this test is given.
}
\value{
A list containing the following components:
\item{RV}{the RV coefficient between the two matrices}
\item{RVs}{the standardized RV coefficients}
\item{mean}{the mean of the RV permutation distribution}
\item{variance}{the variance of the RV permutation distribution}
\item{skewness}{the skewness of the RV permutation distribution}
\item{p.value}{the p-value associated to the test of the significativity of the RV coefficient (with the Pearson type III approximation}
}
\references{
Escouffier, Y. (1973) \emph{Le traitement des variables vectorielles}. Biometrics \bold{29} 751--760.\cr
Josse, J., Husson, F., Pag\`es, J. (2007) \emph{Testing the significance of the RV coefficient}.
Computational Statististics and Data Analysis. \bold{53} 82--91.\cr
Kazi-Aoual, F., Hitier, S., Sabatier, R., Lebreton, J.-D., (1995) Refined approximations to permutations tests
for multivariate inference. Computational Statistics and Data Analysis, \bold{20}, 643--656 \cr
}
\author{Julie Josse, Francois Husson \email{Francois.Husson@agrocampus-ouest.fr}}
\examples{
data(wine)
X <- wine[,3:7]
Y <- wine[,11:20]
coeffRV(X,Y)
}
\keyword{multivariate}
|
9b319679fc79fb8c988b76bc283e4e1568f71c0b
|
1dd001af7b9a6d69c6af484dc64ce95a60acf5e8
|
/Code/UD_buf_GPS.R
|
3290bcd6c2332ef75451e796bde5dc3c1a0990e0
|
[] |
no_license
|
Geethen/where_the_wild_things_are
|
c1fc394d53a685873a341f991c75b6c30c8b0282
|
d5ad3faa82d746241824abbb2e052cbeb476f473
|
refs/heads/main
| 2023-07-26T18:27:33.342127
| 2021-08-26T14:29:28
| 2021-08-26T14:29:28
| 371,052,132
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 429
|
r
|
UD_buf_GPS.R
|
library(ctmm)
library(parallel)
data(buffalo)
# fit models for first two buffalo
GUESS <- mclapply(buffalo[1:6], function(b) ctmm.guess(b,interactive=FALSE))
FITS <- mclapply(1:6, function(i) ctmm.fit(buffalo[[i]],GUESS[[i]]) )
names(FITS) <- names(buffalo[1:6])
UDS <- akde(buffalo[1:6],FITS)
meanUD = mean(UDS, weights= NULL)
plot(meanUD)
writeRaster(meanUD,"D:/My_projects/Rhinos_KNP/knp_buffalo/meanUD","GTiff",DF="PMF")
|
d3dee9cda291c3b0f83b5cde3efc8a42a0523f3b
|
6deb321a1918055852ab2f9a5d221fc5356ea2af
|
/man/defineOnset.Rd
|
07e6592e33b7c00023c8ebd9ec742ac6f903dd8a
|
[] |
no_license
|
kemacdonald/lwl
|
6c01faf2feb6271dedffea4c5dcbf72fe4b96c67
|
2be012b0f215bb531d1319c4876d689744070d6c
|
refs/heads/master
| 2021-01-11T00:04:08.983943
| 2016-10-13T05:29:29
| 2016-10-13T05:29:29
| 70,760,114
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 522
|
rd
|
defineOnset.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/define_onset.R
\name{defineOnset}
\alias{defineOnset}
\title{Define critical onset in iChart}
\usage{
defineOnset(iChart, critonset = 300, includeAways = FALSE)
}
\arguments{
\item{iChart}{the iChart}
\item{critonset}{a number}
\item{includeAways}{logical}
}
\description{
This function allows you to set the critical onset for the analysis window in the iChart
}
\examples{
defineOnset(iChart = "iChart", critonset = 300, includeAways=F)
}
|
eeb1b832477503f3cdd838debaefbab3b41060f3
|
9dfbd575e53169f9327b5b24ed52fa3014972d19
|
/Descripcion de Datos/Count of time by projects.R
|
79685dcc9361706057f5932933f7abc42f4aee74
|
[] |
no_license
|
davidDacR/Thesis
|
15a7ec7a7a984c7c3282906109673c71f1cade7b
|
54e7c8a151d9c8329e1683a106e122dd6e2bb377
|
refs/heads/master
| 2016-09-06T02:19:25.118686
| 2015-09-02T22:48:38
| 2015-09-02T22:48:38
| 40,221,748
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,658
|
r
|
Count of time by projects.R
|
#################
##Time/Projects##
#################
library(plotly)
# set driver
m<-dbDriver("MySQL");
#connect to the DB in MySQL
con<-dbConnect(m,user='root',password='admin',host='localhost',dbname='tsppacedb');
options(max.print=1000000);
vectorT<-c();
vectorX<-c();
vectorY<-c();
#get count of projects
sqlcmd_1 <- paste("select count(*) from project");
projectCantQ <- dbSendQuery(con, sqlcmd_1);
projectCantAux <- dbFetch(projectCantQ, n = -1);
projectCant <- projectCantAux[1,1];
proj = 1;
contCompGraf = 1;
for (proj in 1:projectCant) {
#time log
sqlcmd_7 <- paste("select ifnull(sum(tl.time_log_delta_minutes), 0) FROM time_log_fact_hist tl
INNER JOIN plan_item pi ON pi.plan_item_key = tl.plan_item_key
WHERE tl.row_current_flag = 1 and pi.project_key = ", proj);
timeQ <- dbSendQuery(con, sqlcmd_7);
timeAux <- dbFetch(timeQ, n = -1);
time <- timeAux[1,1];
vectorX[contCompGraf] <- proj;
vectorY[contCompGraf] <- time;
contCompGraf <- contCompGraf + 1;
}
#desconectarse de la base
dbClearResult(dbListResults(con)[[1]]);
dbDisconnect(con);
#PLOTTING
py <- plotly()
data <- list(
list(
x = vectorX,
y = vectorY,
type = "bar"
)
)
layout <- list(
title = "Tiempos por Proyecto",
font = list(family = "Raleway, sans-serif"),
showlegend = FALSE,
xaxis = list(
title = "Proyectos",
tickangle = -45
),
yaxis = list(
title = "Tiempos en minutos",
zeroline = FALSE,
gridwidth = 2
),
bargap = 0.05
)
response <- py$plotly(data, kwargs=list(layout=layout, filename="TimeByProject", fileopt="overwrite"))
url <- response$url
|
b4436fa6f724af5314b317f1de9232994e7cf7f3
|
fb0196d11088962ece6a9e50e617416f5de5606e
|
/man/plotPredictions.regres.Rd
|
23c64c116bbac49842054f6a8600230c8c72a7da
|
[] |
no_license
|
zeehio/mdatools
|
97b9ff3fbe5885232df6a8e9b36d278e0aa01fad
|
4dc3920700536a521b9e42efc67ddddb69235e7f
|
refs/heads/master
| 2021-01-12T19:43:28.567829
| 2015-12-01T07:10:29
| 2015-12-01T07:10:29
| 57,378,371
| 0
| 0
| null | 2016-04-29T11:04:35
| 2016-04-29T11:04:35
| null |
UTF-8
|
R
| false
| false
| 1,197
|
rd
|
plotPredictions.regres.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/regres.R
\name{plotPredictions.regres}
\alias{plotPredictions.regres}
\title{Predictions plot for regression results}
\usage{
\method{plotPredictions}{regres}(obj, ny = 1, ncomp = NULL,
main = "Predictions", xlab = NULL, ylab = NULL, show.line = T,
colmap = "default", col = NULL, ...)
}
\arguments{
\item{obj}{regression results (object of class \code{regres})}
\item{ny}{number of predictor to show the plot for (if y is multivariate)}
\item{ncomp}{complexity of model (e.g. number of components) to show the plot for}
\item{main}{main title for the plot}
\item{xlab}{label for x axis}
\item{ylab}{label for y axis}
\item{show.line}{logical, show or not line fit for the plot points}
\item{colmap}{a colormap to use for coloring the plot items}
\item{col}{a vector with color values for plot items}
\item{...}{other plot parameters (see \code{mdaplot} for details)}
}
\description{
Shows plot with predicted y values.
}
\details{
If reference values are available, the function shows a scatter plot with predicted vs.
reference values, otherwise predicted values are shown vs. object numbers.
}
|
311a1ec4babe7b774563858041db497d545d92b1
|
1b99276280112438a5487cd7d7d1eb3070dd3a47
|
/inst/templates/brapi_POST_callBody.R
|
28d36019a5252385f4c09ff49e4a48ef94101da9
|
[] |
no_license
|
mverouden/brapir-v2-gen
|
357ef012dda26fc512ce8d7af0546ef01347a7e2
|
d2d201c17e3029b847da7dddd1c2921623ad8f1b
|
refs/heads/main
| 2023-08-24T09:56:17.561428
| 2021-10-31T10:52:51
| 2021-10-31T10:52:51
| 309,704,622
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,870
|
r
|
brapi_POST_callBody.R
|
#' Helper function to create a call body for a POST call
#'
#' @author Maikel Verouden
#'
#' @noRd
#' @keywords internal
brapi_POST_callBody <- function(usedArgs, reqArgs) {
if (is.null(usedArgs[["con"]])) {return(NULL)}
## Preprocess required arguments
if (grepl(pattern = ", ", x = reqArgs)) {
reqArgs <- strsplit(x = reqArgs, split = ", ")[[1]]
}
### Remove con and required arguments from used arguments list
bodyArgs <- usedArgs
bodyArgs[c("con", reqArgs)] <- NULL
bodyList <- list()
j <- 1
for (i in seq_along(bodyArgs)) {
if (any(is.null(bodyArgs[[i]]))) {
bodyArgs[[i]] <- ""
}
if (any(is.na(bodyArgs[[i]])) || (inherits(bodyArgs[[i]], what = "list") && length(bodyArgs[[i]]) == 0)) {
bodyArgs[[i]] <- ""
}
if (all(bodyArgs[[i]] != "") || inherits(x = bodyArgs[[i]], what = c("data.frame", "list"))) {
if (inherits(x = bodyArgs[[i]], what = "character") &&
length(bodyArgs[[i]]) == 1 &&
grepl(pattern = "(Classes)|(Genus)|(Ids)|(Links)|(Names)|(Numbers)|(PUIs)|(Species)|(synonyms)|(Terms)|(tions)|(Xrefs)|(ypes)|(markerDbId)|(markerProfileDbId)|(matrixDbId)|(objectives)",
x = names(bodyArgs[i]))) {
bodyList[[j]] <- as.array(bodyArgs[[i]])
} else if (inherits(x = bodyArgs[[i]], what = "character") && length(bodyArgs[[i]]) > 1) {
bodyList[[j]] <- as.array(bodyArgs[[i]])
} else if (is.logical(bodyArgs[[i]])) {
bodyList[[j]] <- tolower(bodyArgs[[i]])
} else if (names(bodyArgs)[i] == "validValues" && bodyArgs[[i]][["categories"]] == "" ) {
bodyArgs[[i]][["categories"]] <- as.array(bodyArgs[[i]][["categories"]])
bodyList[[j]] <- bodyArgs[[i]]
} else {
bodyList[[j]] <- bodyArgs[[i]]
}
names(bodyList)[j] <- names(bodyArgs)[i]
j <- j + 1
}
}
return(bodyList)
}
|
3d71b7322b11dfce16ae0c03de470b5f8e50d69f
|
b28f74d681bb5dfbf34549c82a8c932f77c1b0a8
|
/man/prnMDS.Rd
|
de03c8a46d7c810a52f7f80c5b6f70311cfc9ba6
|
[
"MIT"
] |
permissive
|
sailfish009/proteoQ
|
b07e179e9fe27a90fd76cde2ed7caa55e793e9d6
|
e6a4fe79a21f9a9106a35d78c2ce42d59e9d82e2
|
refs/heads/master
| 2022-12-25T20:06:40.340740
| 2020-10-15T20:18:14
| 2020-10-15T20:18:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 18,525
|
rd
|
prnMDS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mds.R
\name{pepMDS}
\alias{pepMDS}
\alias{prnMDS}
\title{MDS plots}
\usage{
pepMDS(
col_select = NULL,
col_group = NULL,
col_color = NULL,
col_fill = NULL,
col_shape = NULL,
col_size = NULL,
col_alpha = NULL,
color_brewer = NULL,
fill_brewer = NULL,
size_manual = NULL,
shape_manual = NULL,
alpha_manual = NULL,
scale_log2r = TRUE,
complete_cases = FALSE,
impute_na = FALSE,
adjEucDist = FALSE,
classical = TRUE,
method = "euclidean",
p = 2,
k = 3,
dimension = 2,
folds = 1,
center_features = TRUE,
scale_features = TRUE,
show_ids = TRUE,
show_ellipses = FALSE,
df = NULL,
filepath = NULL,
filename = NULL,
theme = NULL,
...
)
prnMDS(
col_select = NULL,
col_group = NULL,
col_color = NULL,
col_fill = NULL,
col_shape = NULL,
col_size = NULL,
col_alpha = NULL,
color_brewer = NULL,
fill_brewer = NULL,
size_manual = NULL,
shape_manual = NULL,
alpha_manual = NULL,
scale_log2r = TRUE,
complete_cases = FALSE,
impute_na = FALSE,
adjEucDist = FALSE,
classical = TRUE,
method = "euclidean",
p = 2,
k = 3,
dimension = 2,
folds = 1,
center_features = TRUE,
scale_features = TRUE,
show_ids = TRUE,
show_ellipses = FALSE,
df = NULL,
filepath = NULL,
filename = NULL,
theme = NULL,
...
)
}
\arguments{
\item{col_select}{Character string to a column key in \code{expt_smry.xlsx}.
At the \code{NULL} default, the column key of \code{Select} in
\code{expt_smry.xlsx} will be used. In the case of no samples being
specified under \code{Select}, the column key of \code{Sample_ID} will be
used. The non-empty entries under the ascribing column will be used in
indicated analysis.}
\item{col_group}{Character string to a column key in \code{expt_smry.xlsx}.
Samples corresponding to non-empty entries under \code{col_group} will be
used for sample grouping in the indicated analysis. At the NULL default, the
column key \code{Group} will be used. No data annotation by groups will be
performed if the fields under the indicated group column is empty.}
\item{col_color}{Character string to a column key in \code{expt_smry.xlsx}.
Values under which will be used for the \code{color} aesthetics in plots. At
the NULL default, the column key \code{Color} will be used.}
\item{col_fill}{Character string to a column key in \code{expt_smry.xlsx}.
Values under which will be used for the \code{fill} aesthetics in plots. At
the NULL default, the column key \code{Fill} will be used.}
\item{col_shape}{Character string to a column key in \code{expt_smry.xlsx}.
Values under which will be used for the \code{shape} aesthetics in plots. At
the NULL default, the column key \code{Shape} will be used.}
\item{col_size}{Character string to a column key in \code{expt_smry.xlsx}.
Values under which will be used for the \code{size} aesthetics in plots. At
the NULL default, the column key \code{Size} will be used.}
\item{col_alpha}{Character string to a column key in \code{expt_smry.xlsx}.
Values under which will be used for the \code{alpha} (transparency)
aesthetics in plots. At the NULL default, the column key \code{Alpha} will
be used.}
\item{color_brewer}{Character string to the name of a color brewer for use in
\href{https://ggplot2.tidyverse.org/reference/scale_brewer.html}{ggplot2::scale_color_brewer},
i.e., \code{color_brewer = Set1}. At the NULL default, the setting in
\code{ggplot2} will be used.}
\item{fill_brewer}{Character string to the name of a color brewer for use in
\href{https://ggplot2.tidyverse.org/reference/scale_brewer.html}{ggplot2::scale_fill_brewer},
i.e., \code{fill_brewer = Spectral}. At the NULL default, the setting in
\code{ggplot2} will be used.}
\item{size_manual}{Numeric vector to the scale of sizes for use in
\href{https://ggplot2.tidyverse.org/reference/scale_manual.html}{ggplot2::scale_size_manual},
i.e., \code{size_manual = c(8, 12)}. At the NULL default, the setting in
\code{ggplot2} will be used.}
\item{shape_manual}{Numeric vector to the scale of shape IDs for use in
\href{https://ggplot2.tidyverse.org/reference/scale_manual.html}{ggplot2::scale_shape_manual},
i.e., \code{shape_manual = c(5, 15)}. At the NULL default, the setting in
\code{ggplot2} will be used.}
\item{alpha_manual}{Numeric vector to the scale of transparency of objects for
use in
\href{https://ggplot2.tidyverse.org/reference/scale_manual.html}{ggplot2::scale_alpha_manual}
, i.e., \code{alpha_manual = c(.5, .9)}. At the NULL default, the setting
in \code{ggplot2} will be used.}
\item{scale_log2r}{Logical; if TRUE, adjusts \code{log2FC} to the same scale
of standard deviation across all samples. The default is TRUE.}
\item{complete_cases}{Logical; if TRUE, only cases that are complete with no
missing values will be used. The default is FALSE.}
\item{impute_na}{Logical; if TRUE, data with the imputation of missing values
will be used. The default is FALSE.}
\item{adjEucDist}{Logical; if TRUE, adjusts the inter-plex \code{Euclidean}
distance by \eqn{1/sqrt(2)} at \code{method = "euclidean"}. The option
\code{adjEucDist = TRUE} may be suitable when \code{reference samples} from
each TMT plex undergo approximately the same sample handling process as the
samples of interest. For instance, \code{reference samples} were split at
the levels of protein lysates. Typically, \code{adjEucDist = FALSE} if
\code{reference samples} were split near the end of a sample handling
process, for instance, at the stages immediately before or after TMT
labeling. Also see online
\href{https://github.com/qzhang503/proteoQ}{README, section MDS} for a brief
reasoning.}
\item{classical}{Logical. Metric MDS will be performed at TRUE and non-metric
MDS at FALSE (see also \code{\link[stats]{cmdscale}} and
\code{\link[MASS]{isoMDS}}). The default is TRUE.}
\item{method}{Character string; the distance measure in
\code{\link[stats]{dist}}. The default method is "euclidean".}
\item{p}{Numeric; The power of the Minkowski distance in
\code{\link[stats]{dist}}. The default is 2.}
\item{k}{Numeric; The desired dimension for the solution passed to
\code{\link[stats]{cmdscale}}. The default is 3.}
\item{dimension}{Numeric; The desired dimension for pairwise visualization.
The default is 2.}
\item{folds}{Not currently used. Integer; the degree of folding data into
subsets. The default is one without data folding.}
\item{center_features}{Logical; if TRUE, adjusts log2FC to center zero by
features (proteins or peptides). The default is TRUE. Note the difference to
data alignment with \code{method_align} in \code{\link{standPrn}}
or \code{\link{standPep}} where log2FC are aligned by observations
(samples).}
\item{scale_features}{Logical; if TRUE, adjusts log2FC to the same scale of
variance by features (protein or peptide entries). The default is TRUE. Note
the difference to data scaling with \code{scale_log2r} where log2FC are
scaled by observations (samples).}
\item{show_ids}{Logical; if TRUE, shows the sample IDs in \code{MDS/PCA}
plots. The default is TRUE.}
\item{show_ellipses}{Logical; if TRUE, shows the ellipses by sample groups
according to \code{col_group}. The default is FALSE.}
\item{df}{The name of a primary data file. By default, it will be determined
automatically after matching the types of data and analysis with an
\code{id} among \code{c("pep_seq", "pep_seq_mod", "prot_acc", "gene")}. A
primary file contains normalized peptide or protein data and is among
\code{c("Peptide.txt", "Peptide_pVal.txt", "Peptide_impNA_pVal.txt",
"Protein.txt", "Protein_pVal.txt", "protein_impNA_pVal.txt")}. For analyses
require the fields of significance p-values, the \code{df} will be one of
\code{c("Peptide_pVal.txt", "Peptide_impNA_pVal.txt", "Protein_pVal.txt",
"protein_impNA_pVal.txt")}.}
\item{filepath}{A file path to output results. By default, it will be
determined automatically by the name of the calling function and the value
of \code{id} in the \code{call}.}
\item{filename}{A representative file name to outputs. By default, the name(s)
will be determined automatically. For text files, a typical file extension
is \code{.txt}. For image files, they are typically saved via
\code{\link[ggplot2]{ggsave}} or \code{\link[pheatmap]{pheatmap}} where the
image type will be determined by the extension of the file name.}
\item{theme}{A
\href{https://ggplot2.tidyverse.org/reference/ggtheme.html}{ggplot2}
theme, i.e., theme_bw(), or a custom theme. At the NULL default, a system
theme will be applied.}
\item{...}{\code{filter_}: Variable argument statements for the row filtration
against data in a primary file linked to \code{df}. See also
\code{\link{normPSM}} for the format of \code{filter_} statements. \cr \cr
Additional parameters for \code{ggsave}: \cr \code{width}, the width of
plot; \cr \code{height}, the height of plot \cr \code{...}}
}
\value{
MDS plots.
}
\description{
\code{pepMDS} visualizes the multidimensional scaling (MDS) of peptide \code{log2FC}.
\code{prnMDS} visualizes the multidimensional scaling (MDS) of protein
\code{log2FC}.
}
\details{
An Euclidean distance matrix of \code{log2FC} is returned by
\code{\link[stats]{dist}}, followed by a metric
(\code{\link[stats]{cmdscale}}) or non-metric (\code{\link[MASS]{isoMDS}})
MDS. The default is metric MDS with the input dissimilarities being euclidean
distances. Note that the \code{center_features} alone will not affect the
results of \code{\link[stats]{dist}}; it together with \code{scale_features}
will be passed to \code{\link[base]{scale}}.
}
\examples{
\donttest{
# ===================================
# MDS
# ===================================
## !!!require the brief working example in `?load_expts`
# global option
scale_log2r <- TRUE
## peptides
# all samples
pepMDS(
col_select = Select,
filter_peps_by = exprs(pep_n_psm >= 10),
show_ids = FALSE,
filename = "peps_rowfil.png",
)
# selected samples
pepMDS(
col_select = BI,
col_shape = Shape,
col_color = Alpha,
filter_peps_by = exprs(pep_n_psm >= 10),
show_ids = FALSE,
filename = "peps_rowfil_colsel.png",
)
## proteins
prnMDS(
col_color = Color,
col_shape = Shape,
show_ids = FALSE,
filter_peps_by = exprs(prot_n_pep >= 5),
filename = "prns_rowfil.png",
)
# custom palette
prnMDS(
col_shape = Shape,
color_brewer = Set1,
show_ids = FALSE,
filename = "my_palette.png",
)
## additional row filtration by pVals (proteins, impute_na = FALSE)
# if not yet, run prerequisitive significance tests at `impute_na = FALSE`
pepSig(
impute_na = FALSE,
W2_bat = ~ Term["(W2.BI.TMT2-W2.BI.TMT1)",
"(W2.JHU.TMT2-W2.JHU.TMT1)",
"(W2.PNNL.TMT2-W2.PNNL.TMT1)"],
W2_loc = ~ Term_2["W2.BI-W2.JHU",
"W2.BI-W2.PNNL",
"W2.JHU-W2.PNNL"],
W16_vs_W2 = ~ Term_3["W16-W2"],
)
prnSig(impute_na = FALSE)
# (`W16_vs_W2.pVal (W16-W2)` now a column key)
prnMDS(
col_color = Color,
col_shape = Shape,
show_ids = FALSE,
filter_peps_by = exprs(prot_n_pep >= 5),
filter_by = exprs(`W16_vs_W2.pVal (W16-W2)` <= 1e-6),
filename = pvalcutoff.png,
)
# analogous peptides
pepMDS(
col_color = Color,
col_shape = Shape,
show_ids = FALSE,
filter_peps_by = exprs(prot_n_pep >= 5),
filter_by = exprs(`W16_vs_W2.pVal (W16-W2)` <= 1e-6),
filename = pvalcutoff.png,
)
## additional row filtration by pVals (proteins, impute_na = TRUE)
# if not yet, run prerequisitive NA imputation
pepImp(m = 2, maxit = 2)
prnImp(m = 5, maxit = 5)
# if not yet, run prerequisitive significance tests at `impute_na = TRUE`
pepSig(
impute_na = TRUE,
W2_bat = ~ Term["(W2.BI.TMT2-W2.BI.TMT1)",
"(W2.JHU.TMT2-W2.JHU.TMT1)",
"(W2.PNNL.TMT2-W2.PNNL.TMT1)"],
W2_loc = ~ Term_2["W2.BI-W2.JHU",
"W2.BI-W2.PNNL",
"W2.JHU-W2.PNNL"],
W16_vs_W2 = ~ Term_3["W16-W2"],
)
prnSig(impute_na = TRUE)
prnMDS(
impute_na = TRUE,
col_color = Color,
col_shape = Shape,
show_ids = FALSE,
filter_peps_by = exprs(prot_n_pep >= 5),
filter_by = exprs(`W16_vs_W2.pVal (W16-W2)` <= 1e-6),
filename = filpvals_impna.png,
)
# analogous peptides
pepMDS(
impute_na = TRUE,
col_color = Color,
col_shape = Shape,
show_ids = FALSE,
filter_peps_by = exprs(prot_n_pep >= 5),
filter_by = exprs(`W16_vs_W2.pVal (W16-W2)` <= 1e-6),
filename = filpvals_impna.png,
)
## show ellipses
prnMDS(
show_ellipses = TRUE,
col_group = Shape,
show_ids = FALSE,
filename = ellipses_by_whims.png,
)
prnMDS(
show_ellipses = TRUE,
col_group = Color,
show_ids = FALSE,
filename = ellipses_by_labs.png,
)
## a higher dimension
pepMDS(
show_ids = FALSE,
k = 5,
dimension = 3,
filename = d3.pdf,
)
prnMDS(
show_ids = TRUE,
k = 4,
dimension = 3,
filename = d3.png,
)
# show ellipses
# (column `expt_smry.xlsx::Color` codes `labs`.)
prnMDS(
show_ids = FALSE,
show_ellipses = TRUE,
col_group = Color,
k = 4,
dimension = 3,
filename = d3_labs.png,
)
# (column `expt_smry.xlsx::Shape` codes `WHIMs`.)
prnMDS(
show_ids = FALSE,
show_ellipses = TRUE,
col_group = Shape,
k = 4,
dimension = 3,
filename = d3_whims.png,
)
## custom theme
library(ggplot2)
my_mds_theme <- theme_bw() + theme(
axis.text.x = element_text(angle=0, vjust=0.5, size=16),
axis.text.y = element_text(angle=0, vjust=0.5, size=16),
axis.title.x = element_text(colour="black", size=18),
axis.title.y = element_text(colour="black", size=18),
plot.title = element_text(face="bold", colour="black", size=20, hjust=0.5, vjust=0.5),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
legend.key = element_rect(colour = NA, fill = 'transparent'),
legend.background = element_rect(colour = NA, fill = "transparent"),
legend.title = element_blank(),
legend.text = element_text(colour="black", size=14),
legend.text.align = 0,
legend.box = NULL
)
pepMDS(
impute_na = TRUE,
col_color = Color,
col_shape = Shape,
show_ids = FALSE,
filter_peps_by = exprs(prot_n_pep >= 5),
filter_by = exprs(`W16_vs_W2.pVal (W16-W2)` <= 1e-6),
theme = my_mds_theme,
filename = my_theme.png,
)
## direct uses of ggplot2
library(ggplot2)
res <- prnMDS(filename = foo.png)
p_fil <- ggplot(res, aes(x = Coordinate.1, y = Coordinate.2)) +
geom_point(aes(colour = Color, shape = Shape, alpha = Alpha), size = 4, stroke = 0.02) +
scale_alpha_manual(values = c(.5, .9)) +
stat_ellipse(aes(fill = Shape), geom = "polygon", alpha = .4) +
guides(fill = FALSE) +
labs(title = "", x = "Coordinate 1", y = "Coordinate 2") +
coord_fixed()
ggsave(file.path(dat_dir, "Protein/MDS/my_ggplot2_fil.png"))
\dontrun{
prnMDS(
col_color = "column_key_not_existed",
col_shape = "another_missing_column_key"
)
}
}
}
\seealso{
\emph{Metadata} \cr
\code{\link{load_expts}} for metadata preparation and a reduced working example in data normalization \cr
\emph{Data normalization} \cr
\code{\link{normPSM}} for extended examples in PSM data normalization \cr
\code{\link{PSM2Pep}} for extended examples in PSM to peptide summarization \cr
\code{\link{mergePep}} for extended examples in peptide data merging \cr
\code{\link{standPep}} for extended examples in peptide data normalization \cr
\code{\link{Pep2Prn}} for extended examples in peptide to protein summarization \cr
\code{\link{standPrn}} for extended examples in protein data normalization. \cr
\code{\link{purgePSM}} and \code{\link{purgePep}} for extended examples in data purging \cr
\code{\link{pepHist}} and \code{\link{prnHist}} for extended examples in histogram visualization. \cr
\code{\link{extract_raws}} and \code{\link{extract_psm_raws}} for extracting MS file names \cr
\emph{Variable arguments of `filter_...`} \cr
\code{\link{contain_str}}, \code{\link{contain_chars_in}}, \code{\link{not_contain_str}},
\code{\link{not_contain_chars_in}}, \code{\link{start_with_str}},
\code{\link{end_with_str}}, \code{\link{start_with_chars_in}} and
\code{\link{ends_with_chars_in}} for data subsetting by character strings \cr
\emph{Missing values} \cr
\code{\link{pepImp}} and \code{\link{prnImp}} for missing value imputation \cr
\emph{Informatics} \cr
\code{\link{pepSig}} and \code{\link{prnSig}} for significance tests \cr
\code{\link{pepVol}} and \code{\link{prnVol}} for volcano plot visualization \cr
\code{\link{prnGSPA}} for gene set enrichment analysis by protein significance pVals \cr
\code{\link{gspaMap}} for mapping GSPA to volcano plot visualization \cr
\code{\link{prnGSPAHM}} for heat map and network visualization of GSPA results \cr
\code{\link{prnGSVA}} for gene set variance analysis \cr
\code{\link{prnGSEA}} for data preparation for online GSEA. \cr
\code{\link{pepMDS}} and \code{\link{prnMDS}} for MDS visualization \cr
\code{\link{pepPCA}} and \code{\link{prnPCA}} for PCA visualization \cr
\code{\link{pepLDA}} and \code{\link{prnLDA}} for LDA visualization \cr
\code{\link{pepHM}} and \code{\link{prnHM}} for heat map visualization \cr
\code{\link{pepCorr_logFC}}, \code{\link{prnCorr_logFC}}, \code{\link{pepCorr_logInt}} and
\code{\link{prnCorr_logInt}} for correlation plots \cr
\code{\link{anal_prnTrend}} and \code{\link{plot_prnTrend}} for trend analysis and visualization \cr
\code{\link{anal_pepNMF}}, \code{\link{anal_prnNMF}}, \code{\link{plot_pepNMFCon}},
\code{\link{plot_prnNMFCon}}, \code{\link{plot_pepNMFCoef}}, \code{\link{plot_prnNMFCoef}} and
\code{\link{plot_metaNMF}} for NMF analysis and visualization \cr
\emph{Custom databases} \cr
\code{\link{Uni2Entrez}} for lookups between UniProt accessions and Entrez IDs \cr
\code{\link{Ref2Entrez}} for lookups among RefSeq accessions, gene names and Entrez IDs \cr
\code{\link{prepGO}} for \code{\href{http://current.geneontology.org/products/pages/downloads.html}{gene
ontology}} \cr
\code{\link{prepMSig}} for \href{https://data.broadinstitute.org/gsea-msigdb/msigdb/release/7.0/}{molecular
signatures} \cr
\code{\link{prepString}} and \code{\link{anal_prnString}} for STRING-DB \cr
\emph{Column keys in PSM, peptide and protein outputs} \cr
system.file("extdata", "mascot_psm_keys.txt", package = "proteoQ") \cr
system.file("extdata", "mascot_peptide_keys.txt", package = "proteoQ") \cr
system.file("extdata", "mascot_protein_keys.txt", package = "proteoQ") \cr
}
|
ace14f603ee2a536d37ad06b6ddd61dc58bc41bb
|
91a77be68e5ad1aa16e9a2681ba6fb090c118e4d
|
/man/as.tdm.Rd
|
fd19aaf46953b7f3dd7155120cdce26da2a74edf
|
[] |
no_license
|
cran/qdap
|
e42f194e98a38eb02084eb6ac92dd587024b8540
|
5f032a6a8bf41255cd2547b11325ed457a02a72a
|
refs/heads/master
| 2023-05-25T03:10:36.324940
| 2023-05-11T05:10:02
| 2023-05-11T05:10:02
| 17,698,836
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 16,012
|
rd
|
as.tdm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.tdm.R
\name{as.tdm}
\alias{as.tdm}
\alias{as.TermDocumentMatrix}
\alias{as.dtm}
\alias{as.DocumentTermMatrix}
\alias{as.tdm.Corpus}
\alias{as.tdm.default}
\alias{as.tdm.character}
\alias{as.dtm.Corpus}
\alias{as.dtm.default}
\alias{as.dtm.character}
\alias{as.tdm.wfm}
\alias{as.dtm.wfm}
\alias{as.data.frame.Corpus}
\alias{as.Corpus}
\alias{as.Corpus.sent_split}
\alias{as.Corpus.default}
\alias{apply_as_tm}
\alias{apply_as_df}
\alias{as.Corpus.TermDocumentMatrix}
\alias{as.Corpus.DocumentTermMatrix}
\alias{as.Corpus.wfm}
\title{tm Package Compatibility Tools: Apply to or Convert to/from Term Document
Matrix or Document Term Matrix}
\usage{
as.tdm(text.var, grouping.var = NULL, vowel.check = TRUE, ...)
as.TermDocumentMatrix(text.var, grouping.var = NULL, vowel.check = TRUE, ...)
as.dtm(text.var, grouping.var = NULL, vowel.check = TRUE, ...)
as.DocumentTermMatrix(text.var, grouping.var = NULL, vowel.check = TRUE, ...)
\method{as.tdm}{Corpus}(text.var, grouping.var = NULL, vowel.check = TRUE, ...)
\method{as.tdm}{default}(text.var, grouping.var = NULL, vowel.check = TRUE, ...)
\method{as.tdm}{character}(text.var, grouping.var = NULL, vowel.check = TRUE, ...)
\method{as.dtm}{Corpus}(text.var, grouping.var = NULL, vowel.check = TRUE, ...)
\method{as.dtm}{default}(text.var, grouping.var = NULL, vowel.check = TRUE, ...)
\method{as.dtm}{character}(text.var, grouping.var = NULL, vowel.check = TRUE, ...)
\method{as.tdm}{wfm}(text.var, grouping.var = NULL, vowel.check = TRUE, ...)
\method{as.dtm}{wfm}(text.var, grouping.var = NULL, vowel.check = TRUE, ...)
\method{as.data.frame}{Corpus}(
x,
row.names,
optional,
...,
doc = "doc_id",
text = "text",
sent.split = FALSE
)
as.Corpus(text.var, grouping.var = NULL, demographic.vars, ...)
\method{as.Corpus}{sent_split}(text.var, grouping.var = NULL, demographic.vars, ...)
\method{as.Corpus}{default}(text.var, grouping.var = NULL, demographic.vars, ...)
apply_as_tm(wfm.obj, tmfun, ..., to.qdap = TRUE)
apply_as_df(
tm.corpus,
qdapfun,
...,
stopwords = NULL,
min = 1,
max = Inf,
count.apostrophe = TRUE,
ignore.case = TRUE
)
\method{as.Corpus}{TermDocumentMatrix}(text.var, ...)
\method{as.Corpus}{DocumentTermMatrix}(text.var, ...)
\method{as.Corpus}{wfm}(text.var, ...)
}
\arguments{
\item{text.var}{The text variable or a \code{\link[qdap]{wfm}} object.}
\item{grouping.var}{The grouping variables. Default \code{NULL} generates
one word list for all text. Also takes a single grouping variable or a list
of 1 or more grouping variables.}
\item{vowel.check}{logical. Should terms without vowels be remove?}
\item{x}{A \code{\link[tm]{Corpus}} object.}
\item{row.names}{\code{NULL} or a character vector giving the row names for
the data frame. Not used in \pkg{qdap}; for base generic consistency.}
\item{optional}{logical. If \code{TRUE}, setting row names and converting
column names is optional. Not used in \pkg{qdap}; for base generic consistency.}
\item{doc}{Name for \code{\link[tm]{Corpus}} documents.}
\item{text}{Name for \code{\link[tm]{Corpus}} text.}
\item{sent.split}{logical. If \code{TRUE} the text variable sentences will
be split into individual rows.}
\item{demographic.vars}{Additional demographic information about the grouping
variables. This is a data.frame, list of equal length vectors, or a single
vector corresponding to the grouping variable/text variable. This
information will be mapped to the DMetaData in the \code{\link[tm]{Corpus}}.}
\item{wfm.obj}{A \code{\link[qdap]{wfm}} object.}
\item{tmfun}{A function applied to a \code{\link[tm]{TermDocumentMatrix}}
object.}
\item{to.qdap}{logical. If \code{TRUE} should \code{\link[qdap]{wfm}} try to
coerce the output back to a qdap object.}
\item{tm.corpus}{A \code{\link[tm]{Corpus}} object.}
\item{qdapfun}{A qdap function that is usually used on
text.variable ~ grouping variable.}
\item{stopwords}{A character vector of words to remove from the text. qdap
has a number of data sets that can be used as stop words including:
\code{Top200Words}, \code{Top100Words}, \code{Top25Words}. For the tm
package's traditional English stop words use \code{tm::stopwords("english")}.}
\item{min}{Minimum word length.}
\item{max}{Maximum word length.}
\item{count.apostrophe}{logical. If \code{TRUE} apostrophes are counted as
characters.}
\item{ignore.case}{logical. If \code{TRUE} stop words will be removed
regardless of case.}
\item{\ldots}{Function dependant:
\itemize{
\item \bold{as.tdm} or \bold{as.dtm} - Other arguments passed to \code{wfm}
\item \bold{apply_as_tm} - Other arguments passed to functions used on a \pkg{tm} \code{TermDocumentMatrix}
\item \bold{as.data.frame} - Other arguments passed to \code{\link[qdap]{sentSplit}}
\item \bold{as.Corpus} - Other arguments passed to \pkg{tm}'s \code{\link[tm]{Corpus}}
}}
}
\value{
\code{as.tdm} - Returns a \code{\link[tm]{TermDocumentMatrix}}.
\code{as.TermDocumentMatrix} - Returns a
\code{\link[tm]{TermDocumentMatrix}}.
\code{as.dtm} - Returns a \code{\link[tm]{DocumentTermMatrix}}.
\code{as.DocumentTermMatrix} - Returns a
\code{\link[tm]{TermDocumentMatrix}}.
\code{as.data.frame} - Converts a \code{\link[tm]{Corpus}} and returns
a \pkg{qdap} oriented \code{\link[base]{data.frame}}.
\code{as.Corpus} - Converts a qdap oriented dataframe and returns
a \code{\link[tm]{Corpus}}.
\code{apply_as_tm} - Applies a tm oriented function to a
\code{\link[qdap]{wfm}} and attempts to simplify back to a
\code{\link[qdap]{wfm}} or \code{weight} format.
\code{apply_as_df} - Returns the output typical of the applied
\pkg{qdap} function.
}
\description{
\code{as.tdm} - Create term document matrices from raw text or
\code{\link[qdap]{wfm}} for use with other text analysis packages.
\code{as.TermDocumentMatrix} - Create document term matrices from raw text or
\code{\link[qdap]{wfm}} for use with other text analysis packages.
\code{as.dtm} - Create document term matrices from raw text or
\code{\link[qdap]{wfm}} for use with other text analysis packages.
\code{as.DocumentTermMatrix} - Create document term matrices from raw text or
\code{\link[qdap]{wfm}} for use with other text analysis packages.
\code{as.data.frame} - Convert a \pkg{tm} package \code{\link[tm]{Corpus}} to
a \pkg{qdap} \code{\link[base]{data.frame}}.
\code{as.Corpus} - Attempts to convert its argument into a \pkg{tm} package
\code{\link[tm]{Corpus}}.
\code{apply_as_tm} - Apply functions intended to be used on the \pkg{tm}
package's \code{\link[tm]{TermDocumentMatrix}} to a \code{\link[qdap]{wfm}}
object.
\code{apply_as_df} - Apply a \pkg{tm} \code{\link[tm]{Corpus}} as a qdap
dataframe.
\code{apply_as_df} - Apply functions intended to be used on the \pkg{qdap}
package's \code{\link[base]{data.frame}} + \code{\link[qdap]{sentSplit}} to
a \pkg{tm} \code{\link[tm]{Corpus}} object.
}
\details{
Produces output that is identical to the \code{tm} package's
\code{\link[tm]{TermDocumentMatrix}}, \code{\link[tm]{DocumentTermMatrix}},
\code{\link[tm]{Corpus}} or allows convenient interface between the qdap and
tm packages.
}
\note{
\code{aply_as_df} coerces to a dataframe with columns named `docs` and
the other named `text`.
}
\examples{
\dontrun{
as.dtm(DATA$state, DATA$person)
as.tdm(DATA$state, DATA$person)
x <- wfm(DATA$state, DATA$person)
as.tdm(x)
as.dtm(x)
library(tm)
plot(as.tdm(x))
pres <- as.tdm(pres_debates2012$dialogue, pres_debates2012$person)
plot(pres, corThreshold = 0.8)
pres
(pres2 <- removeSparseTerms(pres, .3))
plot(pres2, corThreshold = 0.95)
shorts <- all_words(pres_debates2012)[,1][nchar(all_words(
pres_debates2012)[,1]) < 4]
SW <- c(shorts, qdapDictionaries::contractions[, 1],
qdapDictionaries::Top200Words,
"governor", "president", "mister", "obama","romney")
DocTermMat2 <- with(pres_debates2012, as.dtm(dialogue, list(person, time), stopwords = SW))
DocTermMat2 <- removeSparseTerms(DocTermMat2,0.95)
(DocTermMat2 <- DocTermMat2[rowSums(as.matrix(DocTermMat2))> 0,])
plot(DocTermMat2)
## Correspondence Analysis
library(ca)
dat <- pres_debates2012
dat <- dat[dat$person \%in\% qcv(ROMNEY, OBAMA), ]
speech <- stemmer(dat$dialogue)
mytable1 <- with(dat, as.tdm(speech, list(person, time), stopwords = Top25Words))
fit <- ca(as.matrix(mytable1))
summary(fit)
plot(fit)
plot3d.ca(fit, labels=1)
mytable2 <- with(dat, as.tdm(speech, list(person, time), stopwords = Top200Words))
fit2 <- ca(as.matrix(mytable2))
summary(fit2)
plot(fit2)
plot3d.ca(fit2, labels=1)
## Topic Models
# Example 1 #
library(topicmodels); library(tm)
# Generate stop words based on short words, frequent words and contractions
shorts <- all_words(pres_debates2012)[,1][nchar(all_words(
pres_debates2012)[,1]) < 4]
SW <- c(shorts, qdapDictionaries::contractions[, 1],
qdapDictionaries::Top200Words,
"governor", "president", "mister", "obama","romney")
DocTermMat <- with(pres_debates2012, as.dtm(dialogue, person, stopwords = SW))
DocTermMat <- removeSparseTerms(DocTermMat,0.999)
DocTermMat <- DocTermMat[rowSums(as.matrix(DocTermMat))> 0,]
lda.model <- LDA(DocTermMat, 5)
(topics <- posterior(lda.model, DocTermMat)$topics)
terms(lda.model,20)
# Plot the Topics Per Person
topic.dat <- matrix2df(topics, "Person")
colnames(topic.dat)[-1] <- paste2(t(terms(lda.model,20)), sep=", ")
library(reshape2)
mtopic <- melt(topic.dat, variable="Topic", value.name="Proportion")
ggplot(mtopic, aes(weight=Proportion, x=Topic, fill=Topic)) +
geom_bar() +
coord_flip() +
facet_grid(Person~.) +
guides(fill=FALSE)
# Example 2 #
DocTermMat2 <- with(pres_debates2012, as.dtm(dialogue, list(person, time), stopwords = SW))
DocTermMat2 <- removeSparseTerms(DocTermMat2,0.95)
DocTermMat2 <- DocTermMat2[rowSums(as.matrix(DocTermMat2))> 0,]
lda.model2 <- LDA(DocTermMat2, 6)
(topics2 <- posterior(lda.model2, DocTermMat2)$topics)
terms(lda.model2,20)
qheat(topics2, high="blue", low="yellow", by.col=FALSE)
# Example 3 #
lda.model3 <- LDA(DocTermMat2, 10)
(topics3 <- posterior(lda.model3, DocTermMat2)$topics)
terms(lda.model3, 20)
qheat(topics3, high="blue", low="yellow", by.col=FALSE)
# Plot the Topics Per Person
topic.dat3 <- matrix2df(topics3, "Person&Time")
colnames(topic.dat3)[-1] <- paste2(t(terms(lda.model3, 10)), sep=", ")
topic.dat3 <- colsplit2df(topic.dat3)
library(reshape2)
library(scales)
mtopic3 <- melt(topic.dat3, variable="Topic", value.name="Proportion")
(p1 <- ggplot(mtopic3, aes(weight=Proportion, x=Topic, fill=Topic)) +
geom_bar() +
coord_flip() +
facet_grid(Person~Time) +
guides(fill=FALSE) +
scale_y_continuous(labels = percent) +
theme(plot.margin = unit(c(1, 0, 0.5, .5), "lines")) +
ylab("Proportion"))
mtopic3.b <- mtopic3
mtopic3.b[, "Topic"] <- factor(as.numeric(mtopic3.b[, "Topic"]), levels = 1:10)
mtopic3.b[, "Time"] <- factor(gsub("time ", "", mtopic3.b[, "Time"]))
p2 <- ggplot(mtopic3.b, aes(x=Time, y=Topic, fill=Proportion)) +
geom_tile(color = "white") +
scale_fill_gradient(low = "grey70", high = "red") +
facet_grid(Person~Time, scales = "free") +
theme(axis.title.y = element_blank(),
axis.text.x= element_text(colour="white"),
axis.ticks.x= element_line(colour="white"),
axis.ticks.y = element_blank(),
axis.text.y= element_blank(),
plot.margin = unit(c(1, -.5, .5, -.9), "lines")
)
library(gridExtra)
grid.arrange(p1, p2, nrow=1, widths = grid::unit(c(.85, .15), "native"))
## tm Matrices to wfm
library(tm)
data(crude)
## A Term Document Matrix Conversion
(tm_in <- TermDocumentMatrix(crude, control = list(stopwords = TRUE)))
converted <- as.wfm(tm_in)
head(converted)
summary(converted)
## A Document Term Matrix Conversion
(dtm_in <- DocumentTermMatrix(crude, control = list(stopwords = TRUE)))
summary(as.wfm(dtm_in))
## `apply_as_tm` Examples
## Create a wfm
a <- with(DATA, wfm(state, list(sex, adult)))
summary(a)
## Apply functions meant for a tm TermDocumentMatrix
out <- apply_as_tm(a, tm:::removeSparseTerms, sparse=0.6)
summary(out)
apply_as_tm(a, tm:::findAssocs, "computer", .8)
apply_as_tm(a, tm:::findFreqTerms, 2, 3)
apply_as_tm(a, tm:::Zipf_plot)
apply_as_tm(a, tm:::Heaps_plot)
apply_as_tm(a, tm:::plot.TermDocumentMatrix, corThreshold = 0.4)
library(proxy)
apply_as_tm(a, tm:::weightBin)
apply_as_tm(a, tm:::weightBin, to.qdap = FALSE)
apply_as_tm(a, tm:::weightSMART)
apply_as_tm(a, tm:::weightTfIdf)
## Convert tm Corpus to Dataframe
## A tm Corpus
library(tm)
reut21578 <- system.file("texts", "crude", package = "tm")
reuters <- Corpus(DirSource(reut21578),
readerControl = list(reader = readReut21578XML))
## Convert to dataframe
corp_df <- as.data.frame(reuters)
htruncdf(corp_df)
z <- as.Corpus(DATA$state, DATA$person,
demographic=DATA[, qcv(sex, adult, code)])
as.data.frame(z)
## Apply a qdap function
out <- formality(corp_df$text, corp_df$docs)
plot(out)
## Convert a qdap dataframe to tm package Corpus
(x <- with(DATA2, as.Corpus(state, list(person, class, day))))
library(tm)
inspect(x)
inspect_text(x)
class(x)
(y <- with(pres_debates2012, as.Corpus(dialogue, list(person, time))))
## Add demographic info to DMetaData of Corpus
z <- as.Corpus(DATA$state, DATA$person,
demographic=DATA[, qcv(sex, adult, code)])
lview(z)
lview(as.Corpus(DATA$state, DATA$person,
demographic=DATA$sex))
lview(as.Corpus(DATA$state, DATA$person,
demographic=list(DATA$sex, DATA$adult)))
## Apply qdap functions meant for dataframes from sentSplit to tm Corpus
library(tm)
reut21578 <- system.file("texts", "crude", package = "tm")
reuters <- Corpus(DirSource(reut21578),
readerControl = list(reader = readReut21578XML))
matches <- list(
oil = qcv(oil, crude),
money = c("economic", "money")
)
apply_as_df(reuters, word_stats)
apply_as_df(reuters, formality)
apply_as_df(reuters, word_list)
apply_as_df(reuters, polarity)
apply_as_df(reuters, Dissimilarity)
apply_as_df(reuters, diversity)
apply_as_df(reuters, pos_by)
apply_as_df(reuters, flesch_kincaid)
apply_as_df(reuters, trans_venn)
apply_as_df(reuters, gantt_plot)
apply_as_df(reuters, rank_freq_mplot)
apply_as_df(reuters, character_table)
(termco_out <- apply_as_df(reuters, termco, match.list = matches))
plot(termco_out, values = TRUE, high="red")
(wordcor_out <- apply_as_df(reuters, word_cor, word = unlist(matches)))
plot(wordcor_out)
(f_terms <- apply_as_df(reuters, freq_terms, at.least = 3))
plot(f_terms)
apply_as_df(reuters, trans_cloud)
## To use "all" rather than "docs" as "grouping.var"...
apply_as_df(reuters, trans_cloud, grouping.var=NULL,
target.words=matches, cloud.colors = c("red", "blue", "grey75"))
finds <- apply_as_df(reuters, freq_terms, at.least = 5,
top = 5, stopwords = Top100Words)
apply_as_df(reuters, dispersion_plot, match.terms = finds[, 1],
total.color = NULL)
## Filter for Term Document Matrix/Document Term Matrix
library(tm)
data(crude)
(tdm_in <- TermDocumentMatrix(crude, control = list(stopwords = TRUE)))
Filter(tdm_in, 5)
(dtm_in <- DocumentTermMatrix(crude, control = list(stopwords = TRUE)))
Filter(dtm_in, 5)
## Filter particular words based on max/min values
Filter(dtm_in, 5, 7)
Filter(dtm_in, 4, 4)
Filter(tdm_in, 3, 4)
Filter(tdm_in, 3, 4, stopwords = Top200Words)
## SPECIAL REMOVAL OF TERMS (more flexible consideration of words than wfm)
dat <- data.frame(
person = paste0("person_", 1:5),
tweets = c("test one two", "two apples","hashtag #apple",
"#apple #tree", "http://microsoft.com")
)
## remove specialty items
dat[[2]] <- rm_default(dat[[2]], pattern=pastex("@rm_url", "#apple\\\\b"))
myCorp <- tm::tm_map(crude, tm::removeWords, Top200Words)
myCorp \%>\% as.dtm() \%>\% tm::inspect()
}
}
\seealso{
\code{\link[tm]{DocumentTermMatrix}},
\code{\link[tm]{Corpus}},
\code{\link[tm]{TermDocumentMatrix}},
\code{\link[qdap]{as.wfm}}
\code{\link[qdap]{Filter}}
}
|
fe65c973893c92cd6918ea47c970fcd87dea88d4
|
b9b96aee722f984edf62eeabe0fb32ebbdc2598f
|
/R/f1d.irt.R
|
40933e4a32289352fce55db2d163a6f792700e6c
|
[] |
no_license
|
daniloap/sirt
|
46690dca1382385d0fdfc39c12f15d7dd2adf291
|
8c4cb12ffafd70c14b28c9ca34bfd28c58734e83
|
refs/heads/master
| 2021-01-17T10:49:37.611683
| 2015-03-03T00:00:00
| 2015-03-03T00:00:00
| 35,530,314
| 1
| 0
| null | 2015-05-13T05:38:46
| 2015-05-13T05:38:46
| null |
UTF-8
|
R
| false
| false
| 3,935
|
r
|
f1d.irt.R
|
#########################################################
# Functional Unidimensional Model (Ip et al., 2013)
f1d.irt <- function( dat=NULL , nnormal=1000 , nfactors=3 ,
A=NULL , intercept=NULL , mu=NULL , Sigma = NULL , maxiter=100 ,
conv=10^(-5) , progress=TRUE ){
if ( ! is.null(dat) ){
# estimate tetrachoric correlation matrix
if (progress){
cat("*** Estimate tetrachoric correlation\n")
}
tetra <- res <- tetrachoric2(dat , progress=progress)
# estimate factor analysis
fac1 <- fa( r=res$rho , nfactors=nfactors , rotate="none" )
fac0 <- fa( r=res$rho , nfactors=1 , rotate="none" )
# extract standardized loadings
A_stand <- as.matrix( fac1$loadings )
# calculate communality
h2 <- rowSums( A_stand^2 )
# unstandardized loadings
A <- A_stand / sqrt( 1-h2 )
# intercepts
intercept <- - res$tau / sqrt( 1 - h2 )
names.dat <- colnames(dat)
} else {
a0 <- NA
d0 <- NA
A[ is.na(A) ] <- 0
names.dat <- names(intercept)
tetra <- NULL
}
#***************************************
# approximation of normal distribution using quasi Monte Carlo integration nodes
theta <- qmc.nodes( nnormal , nfactors )
if ( is.null(mu) ){
mu <- rep(0,nfactors)
}
if ( is.null(Sigma) ){
Sigma <- diag(1,nfactors)
}
wgt_theta <- dmvnorm(x=theta, mean= mu,
sigma= Sigma )
wgt_theta <- wgt_theta / sum( wgt_theta )
I <- length(intercept)
TP <- nrow(theta)
W1 <- diag(wgt_theta )
D <- ncol(A)
# a_i ' theta_p
Zpi <- matrix( 0 , TP , I )
for (dd in 1:D){
# dd <- 1
Zpi <- Zpi + theta[,dd] * matrix( A[,dd] , TP , I , byrow=TRUE )
}
# Z_pi = a_i theta_p + d_i
Zpi <- Zpi + matrix( intercept , TP , I , byrow=TRUE )
# starting values for a_i and d_i
di <- intercept
diast <- di # diast = d_i*
aiast <- rep(.7,I)
thetaast <- rep(0,TP)
iter <- 0
parchange <- 1
#*****************************************
# begin algorithm
while( ( iter < maxiter ) & ( parchange > conv ) ){
thetaast0 <- thetaast
aiast0 <- aiast
diast0 <- diast
# (1) update theta_p using ( a_i , theta_p , d_i , a_i* , d_i* )
Ypi <- Zpi - matrix( diast , TP , I , byrow=TRUE)
aiastM <- matrix( aiast , TP , I , byrow=TRUE )
thetaast <- rowSums( Ypi * aiastM ) / rowSums( aiastM^2 )
wM <- weighted.mean( thetaast , wgt_theta )
sdM <- sqrt( sum( ( thetaast - wM )^2 * wgt_theta ) )
thetaast <- ( thetaast - wM ) / sdM
X <- cbind( 1 , thetaast )
c1 <- solve( crossprod( X , W1 ) %*% X , crossprod( X , W1 ) %*% Zpi )
diast <- c1[1,]
aiast <- c1[2,]
# compute approximation error
errpi <- Zpi - thetaast * matrix(aiast,TP , I , byrow=TRUE ) -
matrix(diast,TP , I , byrow=TRUE )
approx.error <- sum( errpi^2 * wgt_theta ) / I
# parameter change
parchange <- max( abs( c(diast - diast0,aiast-aiast0,thetaast-thetaast0) ))
iter <- iter + 1
if (progress){
cat( paste0( "Iteration " , iter ,
" | Approximation error = " , round( approx.error , 5 ) ,
" | Max. parameter change = " , round( parchange , 5) ,
"\n") )
flush.console()
}
}
#**************************************************
if ( ! is.null(dat) ){
# unstandardized loadings 1 factor model
A0_stand <- fac0$loadings
a0 <- A0_stand[,1] / sqrt( 1 - A0_stand[,1]^2 )
d0 <- - res$tau / sqrt( 1 - A0_stand[,1]^2 )
}
if ( is.null(dat) ){
a0 <- NULL
d0 <- NULL
}
item <- data.frame( "item" = names.dat )
item$ai.ast <- aiast
item$ai0 <- a0
item$di.ast <- diast
item$di0 <- d0
person <- data.frame( "theta.ast" = thetaast ,
"wgt" = wgt_theta )
res <- list( "item" = item , "person"=person ,
"A"=A , "intercept"=intercept ,
"dat"=dat , "tetra" = tetra )
return(res)
}
#**************************************************
|
9c13050ab7e613b768e3861854799dfb2e48c666
|
8516967f77aa4437daa77f038444e6929e4dfd35
|
/man/loglikeSP.Rd
|
37e710bee5dfd9c829fa05b2fe03f847af5ad675
|
[] |
no_license
|
bomeara/phybase
|
52a7c5872809f7dff283f4e8c8a0d3693cfe2d85
|
05949f49b2bac58de81113e81d6515c20340142d
|
refs/heads/master
| 2016-08-11T20:09:33.013441
| 2016-02-02T21:29:32
| 2016-02-02T21:29:32
| 50,945,737
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,523
|
rd
|
loglikeSP.Rd
|
\name{loglikeSP}
\alias{loglikeSP}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ loglikelihood of the species tree, i.e., Rannala and Yang formula }
\description{
This function calculates the loglikelihood of a species tree from a set of gene trees using the Rannala and Yang formula
}
\usage{
loglikeSP(gtree, sptree, taxaname,spname,species.structure,strict=T)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{gtree}{ a collection of gene trees }
\item{sptree}{ a species tree in newick format }
\item{taxaname}{ the names of taxa }
\item{spname}{ the names of species }
\item{species.structure}{ define which sequence belong to which species }
\item{strict}{whether or not to check the result}
}
\value{
The function returns the log likelihood score.
}
\references{ Rannala, B. and Z. Yang. 2003. Bayes estimation of species divergence times and ancestral population sizes using DNA sequences from multiple loci. Genetics 164: 1645-1656. }
\author{ Liang Liu }
\examples{
gtree<-"(((A:1,B:1):3,C:4):2,D:6);"
stree<-"(((A:0.5,B:0.5):1#0.1,C:1.5):1#0.1,D:2.5)#0.1;"
taxaname<-c("A","B","C","D")
spname<-taxaname
ntax<-length(taxaname)
nspecies<-length(spname)
species.structure<-matrix(0,nrow=nspecies,ncol=ntax)
diag(species.structure)<-1
loglikeSP(gtree,stree,taxaname,spname,species.structure)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ programming }
|
3fde62a700c7a44f0af10bd78d70af636924f4b0
|
57fb0239edbfecef2e28545e0976d5d54cb26ba9
|
/cachematrix.R
|
b14712a5e1fc0041edfb4ccb4daceb6e305ffaf9
|
[] |
no_license
|
llself/ProgrammingAssignment2
|
a75083c132446f11d14dcf44b9f576d7b4c030f6
|
0e4e74ba1a2284b345127ffedfa87a4ee0c0b9c5
|
refs/heads/master
| 2021-01-21T23:54:34.467087
| 2014-10-23T16:26:17
| 2014-10-23T16:26:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,192
|
r
|
cachematrix.R
|
# Leslie Self
# R Programming Class
# Programming Assignment #2, Lexical Scoping and getting the Inverse of a Matrix
# Date: October 23, 2014
## I have two functions, makeCacheMatrix and cacheSolve. The makeCacheMatrix manages my cached value.
## I will set it initially to null and then will create functions to get it and set it that value.
## The cacheSolve matrix will either return the cache value of the inverse or it will calculate
## the inverse and set it.
## this function create a list of matrix objects that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL #initially sets i (for inverse) to null
#returning i to be null so the next time it will get its new value
set <- function(y) {
x <<- y #sets x to be the value supplied whichever enivironment x is stored
i <<- NULL
}
#get function just returns the value of x in the first enviornment it is found
get <- function() {
x
}
#sets the inverse variable to its new value
setinverse <- function(inverse){
i <<- inverse
}
#returns the value of i
getinverse <- function()
{
i
}
#this creates my list of all the different functions and names them
# the same thing. So in my cacheSolve function I can just call that name
# and get the result I want.
list(set=set, get=get, setinverse = setinverse, getinverse=getinverse)
}
## Returns the inverse of a matrix that is supplied to this function
cacheSolve <- function(x, ...) {
i <- x$getinverse()
#if i in the first environment it is found is NOT null, then return that i and message
if(!is.null(i)) {
message("getting cached data")
return(i)
}
#if i is null then we need to get the data and create its inverse
# and then sets its value in the parent and returns i
data <- x$get()
i <- solve(data)
x$setinverse(i)
i
}
|
0264cb653ec9234bda22e91ddf5521ba28c0b301
|
f7546999748d00b74db8551ed65e02cc564f7f4f
|
/man/A0SD.BAF.Rd
|
95ce5fc13b332e4f338f43a31f91f992ee201639
|
[] |
no_license
|
cran/CHAT
|
b5887ac9eb97d1deace91cd638477e5d7bf56319
|
1819354a80335e6d92384002b90899b70c60719f
|
refs/heads/master
| 2021-01-19T08:15:18.756721
| 2014-02-10T00:00:00
| 2014-02-10T00:00:00
| 19,303,604
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 481
|
rd
|
A0SD.BAF.Rd
|
\name{A0SD.BAF}
\alias{A0SD.BAF}
\docType{data}
\title{
B-allele frequencies of selected chromosomes for TCGA sample A1-A0SD.
}
\description{
This is partial level 2 bi-allele copy number data selected from The Cancer Genome Atlas Breast Carcinoma 800K Genome-wide SNP 6.0 array for sample A1-A0SD in the format of B-allele frequency.
}
\usage{data(A0SD.BAF)}
\references{
The Cancer Genome Atlas Network, Comprehensive molecular portraits of human breast tumours, _Nature_, 2012
}
|
348f51912fd9c29f372bb6026c4b8fdfe5a5be7d
|
0fdc85c0cced444a2210e65523a0dd0b339319db
|
/R/spiderplot.R
|
8e0bd464ac82c399ce98c454ce6f5d5c8f72e075
|
[] |
no_license
|
jaroyle/oSCR
|
769730d56085d856437ddbb4d7b870509805bd41
|
250323186d9cef173be929667b618bbb79dc96c7
|
refs/heads/master
| 2023-04-14T00:24:00.143601
| 2023-04-06T11:40:40
| 2023-04-06T11:40:40
| 42,339,391
| 9
| 11
| null | 2020-10-30T14:19:41
| 2015-09-12T01:10:02
|
R
|
UTF-8
|
R
| false
| false
| 2,670
|
r
|
spiderplot.R
|
spiderplot <- function(scrFrame = NULL, session=1, y=NULL,
traplocs=NULL, add=FALSE, return.stats=FALSE){
if(!is.null(scrFrame)){
traplocs <- scrFrame$traps[[session]]
y <- scrFrame$caphist[[session]]
}
dither <- FALSE
dx <- max(traplocs[, 1]) - min(traplocs[, 1])
dy <- max(traplocs[, 2]) - min(traplocs[, 2])
dx <- 0.01 * dx
dy <- 0.01 * dy
if(length(dim(y)) == 3) {
if(dim(y)[2] == nrow(traplocs)) {
nind <- dim(y)[1]
ntraps <- dim(y)[2]
nocc <- dim(y)[3]
newy <- array(NA, dim = c(nind, nocc, ntraps))
for (i in 1:nind) {
newy[i, 1:nocc, 1:ntraps] <- t(y[i, , ])
}
y <- newy
}
y3d <- y
J <- dim(y3d)[3]
T <- dim(y3d)[2]
nind <- dim(y3d)[1]
if(add==FALSE){
plot(traplocs, pch = 20, xlab = " ", ylab = " ", cex = 1.5)
}else{
points(traplocs, pch = 20, xlab = " ", ylab = " ", cex = 1.5)
}
avg.s <- matrix(NA, nrow = nind, ncol = 2)
for(i in 1:nind){
tmp <- NULL
for(t in 1:T) {
aa <- y3d[i, t, ]
if(sum(aa) > 0){
aa <- traplocs[aa > 0, ]
tmp <- rbind(tmp, aa)
}
}
avg.s[i, ] <- c(mean(tmp[, 1]), mean(tmp[, 2]))
delta <- c(runif(1, -dx, dx), runif(1, -dy, dy)) * ifelse(dither, 1, 0)
points(avg.s[i, 1] + delta, avg.s[i, 2] + delta, pch = "S",
cex = 1, col = "red")
for(m in 1:nrow(tmp)) {
if(nrow(tmp) > 1){
lines(c(avg.s[i, 1], tmp[m, 1]), c(avg.s[i, 2], tmp[m, 2]))
}
}
}
if(length(dim(y)) == 2) {
y2d <- y
J <- nrow(traplocs)
T <- dim(y2d)[2]
nind <- dim(y2d)[1]
plot(traplocs, pch = 20, xlab = " ", ylab = " ", cex = 1.5)
avg.s <- matrix(NA, nrow = nind, ncol = 2)
for(i in 1:nind) {
tmp <- NULL
for (t in 1:T){
aa <- y2d[i, t]
if(aa <= J) {
aa <- traplocs[aa, ]
tmp <- rbind(tmp, aa)
}
}
avg.s[i, ] <- c(mean(tmp[, 1]), mean(tmp[, 2]))
points(avg.s[i, 1], avg.s[i, 2], pch = "S", cex = 1, col = "red")
for(m in 1:nrow(tmp)){
if(nrow(tmp) > 1){
lines(c(avg.s[i, 1], tmp[m, 1]), c(avg.s[i, 2], tmp[m, 2]))
}
}
}
}
points(traplocs, pch = 20)
Cx <- mean(traplocs[, 1])
Cy <- mean(traplocs[, 2])
xcent <- sqrt((avg.s[, 1] - Cx)^2 + (avg.s[, 2] - Cy)^2)
if(return.stats){
return(list(xcent = xcent, avg.s = avg.s, center = c(Cx, Cy)))
}
}
}
|
de18cb71b3b2ad1a3a680ed6f7e85299f5d73a07
|
d50f7b303f6b7c0c8b6878d51bc5e3323d7b4a6e
|
/Code.R
|
de1c0874c3ea354ac0be5867a3dc5f34d768d292
|
[] |
no_license
|
ShravyaKadur/gdp-net-debt-analysis
|
5fc7c312ead39dab5df359a35c278bccde676e7a
|
6f7b8cbeae628869d9f3cbffc61a7c1ed9f61703
|
refs/heads/master
| 2020-12-31T22:50:12.185586
| 2020-02-08T03:22:01
| 2020-02-08T03:22:01
| 239,062,152
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,996
|
r
|
Code.R
|
#Setting the directory
setwd("C:/Users/Shravya/Documents/College Stuff/Data Analytics/dataAnalyticsProject/Cleaned")
#Loading the data
gdpnc <- read.csv("GDPInPercent.csv")
netdebtnc <- read.csv("netDebtToGDP.csv")
#Converting to transpose
trgdpnc <- t(gdpnc)
trnetdebtnc <- t(netdebtnc)
#Converting each transpose from character to numeric data types
z<-matrix(0,10,94)
for ( i in 1:10)
{
for(j in 1:94)
{
z[i,j] <- as.numeric(trgdpnc[(i+1),j])
}
}
zz <- matrix(0,10,94)
for ( i in 1:10)
{
for(j in 1:94)
{
zz[i,j] <- as.numeric(trnetdebtnc[(i+1),j])
}
}
#Next I am going to fill in the values of Net Debt for Greece, Austria, South Sudan and
#Nigeria using imputeTS
library(imputeTS)
fullNetDebt <- na.interpolation(zz)
#Fill in the values of GDP for South Sudan
fullGDP <- na.interpolation(z)
#Find the correlation matrix between countries for GDP
corMatGDP <- matrix(0,94,94)
for( i in 1:94)
{
for(j in 1:94)
{
corMatGDP[i,j]<-(cor(fullGDP[,i], fullGDP[,j]))
}
}
#See which country Syria is most similar to in terms of GDP
maxValue <- corMatGDP[1,83]
country <- 0
for( i in 2:94)
{
if(i!=83)
{
if(corMatGDP[i,83] > maxValue)
{
maxValue <- corMatGDP[i,83]
country <- i
}
}
}
#Filling in missing data values for Syria
for( i in 1:10)
{
if(fullGDP[i,83] == 0)
{
fullGDP[i,83] = fullGDP[i,country]
}
}
#Find the correlation matrix between countries for Net Debt
corMatNDebt <- matrix(0,94,94)
for( i in 1:94)
{
for(j in 1:94)
{
corMatNDebt[i,j]<-(cor(fullNetDebt[,i], fullNetDebt[,j]))
}
}
#See which country Syria is most similar to in terms of Net Debt
maxValue <- corMatNDebt[1,83]
country2N <- 0
for( i in 2:94)
{
if(i!=83)
{
if(corMatNDebt[i,83] > maxValue)
{
maxValue <- corMatNDebt[i,83]
country2N <- i
}
}
}
#Filling in missing data values for Syria
for( i in 1:10)
{
if(fullNetDebt[i,83] == 0)
{
fullNetDebt[i,83] = fullNetDebt[i,country2N]
}
}
#NOW ALL THE DATA VALUES IN TERMS OF NATIONAL CURRENCY HAVE BEEN LOADED AND THE MISSING
#VALUES HAVE BEEN ACCOUNTED FOR
#FORMULATING THE FEATURE VECTORS
fvmat <- matrix(0,94,2)
for(i in 1:94)
{
sumgdp <- 0
sumnd <- 0
for(j in 1:10)
{
sumgdp <- sumgdp + fullGDP[j,i]
sumnd <- sumnd + fullNetDebt[j,i]
}
avggdp <- sumgdp/10
avgnd <- sumnd/10
fvmat[i,1] = avggdp
fvmat[i,2] = avgnd
}
#LOADING THE PARAMETER DATA
po1 <- read.csv("p1_Population.csv")
po2 <- read.csv("p2_CurrentAccountBalance.csv")
po3 <- read.csv("p3_GeneralGovernmentRevenue.csv")
po4 <- read.csv("p4_unemploymentRate.csv")
po5 <- read.csv("p5_volOfExports.csv")
po6 <- read.csv("p6_volOfImports.csv")
po7 <- read.csv("p7_grossNationalSavings.csv")
po8 <- read.csv("p8_totalInvestment.csv")
#Find the transpose of the data
hp1 <- t(po1)
hp2 <- t(po2)
hp3 <- t(po3)
hp4 <- t(po4)
hp5 <- t(po5)
hp6 <- t(po6)
hp7 <- t(po7)
hp8 <- t(po8)
#Converting each transpose from character to numeric data types
p1<-matrix(0,10,94)
p2<-matrix(0,10,94)
p3<-matrix(0,10,94)
p4<-matrix(0,10,94)
p5<-matrix(0,10,94)
p6<-matrix(0,10,94)
p7<-matrix(0,10,94)
p8<-matrix(0,10,94)
for ( i in 1:10)
{
for(j in 1:94)
{
p1[i,j] <- as.numeric(hp1[(i+1),j])
p2[i,j] <- as.numeric(hp2[(i+1),j])
p3[i,j] <- as.numeric(hp3[(i+1),j])
p4[i,j] <- as.numeric(hp4[(i+1),j])
p5[i,j] <- as.numeric(hp5[(i+1),j])
p6[i,j] <- as.numeric(hp6[(i+1),j])
p7[i,j] <- as.numeric(hp7[(i+1),j])
p8[i,j] <- as.numeric(hp8[(i+1),j])
}
}
#FILLING IN THE MISSING VALUES THROUGH INTERPOLATION
p2c <- na.interpolation(p2)
p3c <- na.interpolation(p3)
p4c <- na.interpolation(p4)
p6c <- na.interpolation(p6)
p7c <- na.interpolation(p7)
p8c <- na.interpolation(p8)
#NOW THAT THE MISSING VALUES HAVE BEEN FILLED, CALCULATE THE FEATURE VECTOR
#MATRIX FOR THE PARAMETERS
pfv <- matrix(0,94,8)
for(i in 1:94)
{
sump1 <- 0
sump2 <- 0
sump3 <- 0
sump4 <- 0
sump5 <- 0
sump6 <- 0
sump7 <- 0
sump8 <- 0
for(j in 1:10)
{
sump1 <- sump1 + p1[j,i]
sump2 <- sump2 + p2c[j,i]
sump3 <- sump3 + p3c[j,i]
sump4 <- sump4 + p4c[j,i]
sump5 <- sump5 + p5[j,i]
sump6 <- sump6 + p6c[j,i]
sump7 <- sump7 + p7c[j,i]
sump8 <- sump8 + p8c[j,i]
}
avgp1 <- sump1/10
avgp2 <- sump2/10
avgp3 <- sump3/10
avgp4 <- sump4/10
avgp5 <- sump5/10
avgp6 <- sump6/10
avgp7 <- sump7/10
avgp8 <- sump8/10
pfv[i,1] = avgp1
pfv[i,2] = avgp2
pfv[i,3] = avgp3
pfv[i,4] = avgp4
pfv[i,5] = avgp5
pfv[i,6] = avgp6
pfv[i,7] = avgp7
pfv[i,8] = avgp8
}
#print(pfv)
#NOW THE FEATURE VECTOR MATRIX FOR THE PARAMETERS HAVE BEEN CALCULATED
#PCA
pfv_new<-scale(pfv)
s<-cov(pfv)
r<-cor(pfv_new)
pca_job1<-princomp(s,cor=FALSE)
pca_job2<-princomp(r,cor=TRUE)
plot(pca_job1,type="lines")
plot(pca_job2,type="lines")
pca_job1.m=as.matrix(pca_job1)
pfv.m<-as.matrix(pfv.m)
for ( i in 1:8)
{
for(j in 1:8)
{
s[i,j] <- as.numeric(s[i,j])
}
}
scores<-pfv.m%*%(eigen(s)$vector[,1:2])
plot(scores[,1], scores[,2], main="PC score plot")
scores<-pfv.m%*%(eigen(s)$vector[,1:2])
plot(scores[,1], scores[,2], main="PC score plot")
screeplot(pca_job1)
screeplot(pca_job2)
biplot(pca_job1,cex=0.8)
biplot(pca_job2,cex=0.8)
#As we can see from the biplots PCA doesnt seem like a proper tool to analyse
#this dataset
#So we use another feature subset selection. LVQ(Learning Vector Quantization)
install.packages('mlbench')
install.packages('caret')
library('mlbench')
library('caret')
corrplot(r,method=c("number"),title="cor matrix",diag=T)
extra<-read.csv('extra.csv')
pfv1<-pfv
pfv1<-cbind(pfv1,extra['Income'])
control<- trainControl(method="repeatedcv",number=10,repeats=3)
model<-train((Income)~.,data=pfv1,method="lvq",preProcess="scale",trControl=control)
imp <- varImp(model, scale=FALSE)
plot(imp)
write.csv(pfv, file = "yourfile.csv", row.names = FALSE)
#and then copied
#the important attributes selcted by lvq in impFeatures.csv
pfv2<-read.csv('impFeatures.csv')
#then we read the file back to an R dataframe called pfv2
km <- kmeans(log(pfv2+10), 4, nstart = 25)
#we use log to scale out for better visualisation and we add 10 to all the values
#so we dont get NAN since log(0)=NAN.
#4 iterations decided by the elbow method
write.csv(km$cluster, file = "yourfile2.csv", row.names = FALSE)
#we copy the cluster values to a file
plot(log(pfv2+10),col=km$cluster)
#this is the plot showing the clusters
#CORRELATION
#fvmat is my feature vector containing feature vectors of Net Debt and GDP
write.csv(fvmat, file = "featureForCor.csv")
cluster1 <- read.csv("cluster1.csv")
cluster2 <- read.csv("cluster2.csv")
cluster3 <- read.csv("cluster3.csv")
cluster4 <- read.csv("cluster4.csv")
#print(cluster1)
for ( i in 1:10)
{
for(j in 2:3)
{
cluster1[i,j] <- as.numeric(cluster1[i,j])
}
}
for ( i in 1:15)
{
for(j in 2:3)
{
cluster2[i,j] <- as.numeric(cluster2[i,j])
}
}
for ( i in 1:nrow(cluster3))
{
for(j in 2:3)
{
cluster3[i,j] <- as.numeric(cluster3[i,j])
}
}
for ( i in 1:nrow(cluster4))
{
for(j in 2:3)
{
cluster4[i,j] <- as.numeric(cluster4[i,j])
}
}
#Find the correlation matrix for the clusters and display the final result
corclus1 <- 0
corclus1<-(cor(cluster1[,2], cluster1[,3]))
print("The correlation between Net Debt and GDP for Cluster 1")
print(corclus1)
corclus2 <- 0
corclus2<-(cor(cluster2[,2], cluster2[,3]))
print("The correlation between Net Debt and GDP for Cluster 2")
print(corclus2)
corclus3<- 0
corclus3<-(cor(cluster3[,2], cluster3[,3]))
print("The correlation between Net Debt and GDP for Cluster 3")
print(corclus3)
corclus4 <- 0
corclus4<-(cor(cluster4[,2], cluster4[,3]))
print("The correlation between Net Debt and GDP for Cluster 4")
print(corclus4)
|
dcc06b849eece64a79c72a440734f2e73c1c930b
|
6d7a5a343804d41d36224b3cdac78821af3a291f
|
/Section04/Section04_Tables.R
|
74c5fdb6c6cff4caca3317bf5b51ea492cf677a5
|
[] |
no_license
|
AI-Ahmed/Big-Data-Analytics-Using-R
|
eca4b339d8ed2d0df154eb2bc6efa46b2c9fd2dc
|
44d4876bfbd5cb54eddbc8d35414a8f7c6d9c3af
|
refs/heads/main
| 2022-12-29T17:45:23.269562
| 2020-10-16T04:24:34
| 2020-10-16T04:24:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,244
|
r
|
Section04_Tables.R
|
#Import File
mytable <- read.table(file = file.choose(),header= T, sep=",")
#alt Import File
mytable <- read.table(file = "E:\\Education\\Terms_Components\\Term07\\BigDataAnalysis\\Lab\\Practics\\Section04\\empData.csv",header= T, sep=",")
#To Run it -> view(mytable)
#Summary the data
summary(mytable)
#to select a certain col and rows in the table-> mytable[c(rows),c(col)]
mytable[c(1,2,3),]
#to select a range cols and rows in the table-> mytable[c(rows: rows),c(col)]
mytable[c(1: 500),c(1:4)]
#head(mytable) helps you to retrive the first 6 rows
head(mytable)
#tail(mytable) helps to retrive the last 6 rows
tail(mytable)
#Display the col's name
colnames(mytable)
#Change the col's name
colnames(mytable)<- c("col1", "col2",...)
#subset data: subset(mytable,mytable$certain_col_title >= col_certain_value & mytable$certain_col_title <= 80)
value <- subset(mytable, mytable$price >= 180 & mytable$price <= 1800 )
#Export Table
write.xlsx(mytable, file = "E:\\Education\\Terms_Components\\Term07\\BigDataAnalysis\\Lab\\Practics\\Brands.xlsx",row.names = F)
#Remove value
rm(list= ls())
#for Loop
for(i in 1:10){
print(i)
}
#while Loop
while(i<=10){
print(i*i)
i=i+sqrt(i)
}
|
2e335563c5b9d3dbd3014558066b11ca662e2999
|
580524d16c446d195dbc75234f43e5635db37c86
|
/random_trial.R
|
5b4b937cf4c2f15e379cbcfd84768ebb58f6a36a
|
[
"MIT"
] |
permissive
|
BrennaS/SURGE
|
967d1244e6246c82dbc1ae80b463a8d0564bbec4
|
11163eb74e608ac49042111a869447738a6c6dca
|
refs/heads/master
| 2021-01-15T18:04:18.088204
| 2016-05-03T23:32:35
| 2016-05-03T23:32:35
| 52,685,373
| 0
| 1
| null | 2016-02-27T19:44:16
| 2016-02-27T19:44:16
| null |
UTF-8
|
R
| false
| false
| 880
|
r
|
random_trial.R
|
## Random Walk Historical Surge Time Series
library(rjags)
y=y
RandomWalk = "
model{
#### Data Model
for(i in 1:n){
y[i] ~ dnorm(x[i],tau_obs)
}
#### Process Model
for(i in 2:n){
x[i]~dnorm(x[i-1],tau_add)
}
#### Priors
x[1] ~ dunif(-1,10)
tau_obs ~ dgamma(a_obs,r_obs)
tau_add ~ dgamma(a_add,r_add)
}
"
data <- list(y=y,n=length(y),a_obs=1,r_obs=1,a_add=1,r_add=1)
nchain = 3
init <- list()
for(i in 1:nchain){
y.samp = sample(y,length(y),replace=TRUE)
init[[i]] <- list(tau_add=1/var(diff(y.samp)),tau_obs=5/var(y.samp))
}
j.model <- jags.model (file = textConnection(RandomWalk),
data = data,
inits = init,
n.chains = 3)
jags.out <- coda.samples (model = j.model,
variable.names = c("tau_add","tau_obs"),
n.iter = 100)
plot(jags.out)
|
35582b77bb456602057ca641d81e59a57cca72ce
|
f797c1fa91d8072cc7f4cfcbcfee1d02b0c6b6c2
|
/listings/header_roxygen.r
|
13750d97525592d2fcfd8722d1e70b240c049410
|
[
"CC-BY-4.0"
] |
permissive
|
outheis/programmierleitfaden
|
0887b999c07ca464a58ad7a4772cc477a4f209ba
|
adf12e13f7bf4ebd51a17f3669dc16c1461e850f
|
refs/heads/master
| 2021-01-21T06:10:30.793615
| 2015-05-04T11:57:02
| 2015-05-04T11:57:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 311
|
r
|
header_roxygen.r
|
#!/usr/bin/Rscript --vanilla
#' Jahrestagung_ForstBW_CU.R
#'
#' create graphics for presentation at the 2014 annual convention of ForstBW.
#'
#' @author Dominik Cullmann, <dominik.cullmann@@forst.bwl.de>
#' @section Version: Tue Apr 14 10:22:59 CEST 2015
#' @docType data
#' @name Header
NULL
# ROXYGEN_STOP
|
09e7839a24708e28e22352669c7fba0bb884f1ec
|
4183488ba1d91a21cc7e80e385351a805bb1c37b
|
/man/sampleDirs.Rd
|
a6acece6c378ab600ef5b64cdd28bc50b9407a1c
|
[
"MIT"
] |
permissive
|
roryk/bcbioBase
|
f6f88b2b1324bb6291cd6b4e86941fe95bd62b18
|
ec7b4053c9bfed4189fd326ede0d7dad6facf6fb
|
refs/heads/master
| 2021-05-02T18:17:37.380306
| 2018-08-19T19:36:33
| 2018-08-19T19:36:33
| 120,660,171
| 0
| 0
| null | 2018-02-07T19:18:51
| 2018-02-07T19:18:51
| null |
UTF-8
|
R
| false
| true
| 659
|
rd
|
sampleDirs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sampleDirs.R
\name{sampleDirs}
\alias{sampleDirs}
\title{Sample Directories}
\usage{
sampleDirs(uploadDir)
}
\arguments{
\item{uploadDir}{\code{string}. File path to bcbio run upload directory.}
}
\value{
Named \code{character} containing sample directory paths.
}
\description{
Sample Directories
}
\note{
Function will stop if no sample directories match.
}
\examples{
uploadDir <- system.file("extdata/bcbio", package = "bcbioBase")
sampleDirs(uploadDir)
}
\seealso{
Other Data Functions: \code{\link{minimalSampleData}}
}
\author{
Michael Steinbaugh
}
\concept{Data Functions}
|
31a1a272dc8263f35d379fd65c2656b8317b3070
|
b08b7e3160ae9947b6046123acad8f59152375c3
|
/Programming Language Detection/Experiment-2/Dataset/Train/R/n-queens-problem-1.r
|
3332e6a59544d7460d32f376c1069b8757c09f60
|
[] |
no_license
|
dlaststark/machine-learning-projects
|
efb0a28c664419275e87eb612c89054164fe1eb0
|
eaa0c96d4d1c15934d63035b837636a6d11736e3
|
refs/heads/master
| 2022-12-06T08:36:09.867677
| 2022-11-20T13:17:25
| 2022-11-20T13:17:25
| 246,379,103
| 9
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 538
|
r
|
n-queens-problem-1.r
|
queens <- function(n) {
a <- seq(n)
u <- rep(T, 2 * n - 1)
v <- rep(T, 2 * n - 1)
m <- NULL
aux <- function(i) {
if (i > n) {
m <<- cbind(m, a)
} else {
for (j in seq(i, n)) {
k <- a[[j]]
p <- i - k + n
q <- i + k - 1
if (u[[p]] && v[[q]]) {
u[[p]] <<- v[[q]] <<- F
a[[j]] <<- a[[i]]
a[[i]] <<- k
aux(i + 1)
u[[p]] <<- v[[q]] <<- T
a[[i]] <<- a[[j]]
a[[j]] <<- k
}
}
}
}
aux(1)
m
}
|
8cb2913c204052fa9085fb706f65e77a17dad8a7
|
839640e98e37f8b668e58e61f86453270d5fcc57
|
/R/sim-coef.r
|
2284a71dc4fa49863daadf75b42a6ca1f833ec46
|
[] |
no_license
|
dan410/sfdasim
|
f7f06ce63747c717b3ac279ead0b484f8fcdd436
|
7c39d423d10c29cdfd62a57b6f4290f43cc0ebfc
|
refs/heads/master
| 2021-01-21T12:40:38.159809
| 2014-08-07T07:23:27
| 2014-08-07T07:23:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,179
|
r
|
sim-coef.r
|
#' simulates the coefficients of the basis functions as independend Gaussian random fields
#'
#' simulates a Gaussian random field for each basis function given user specified covariance function
#' and covariance parameters. The model specification corresponds to GeoR package.
#'
#' @param nfields number of fields to simulate (one for each basis function)
#' @param grid.xlim vector of length 2 specifying the xlim for the grid. Only used if locs is not specified.
#' @param grid.ylim vector of length 2 specifying the ylim for the grid. Only used if locs is not specified.
#' @param grid.dim vector with 2 numbers specifying the dimension of the rectangular grid. Only used if locs is not specified.
#' @param cov.model name of covariance model(s) connected to each basis function.
#' @param cov.pars a vector with 2 elements or an n x 2 matrix with values of the
#' covariance parameters sigma^2 (partial sill) and phi (range parameter). If a vector,
#' the elements are the values of sigma^2 and phi, respectively. If a matrix,
#' corresponding to a model with several structures, the values of sigma^2 are in
#' the first column and the values of phi are in the second.
#' @param locs nx2 matrix of locations
#' @param ... other parameters sent to grf()
#' @return coef list containing the coefficients ...
#' @return locs nx2 matrix of locations
#' @return cov.model name of covariance model(s) connected to each basis function.
#' @return cov.pars covariance parameters corresponding to the covariance models.
sim_coef <- function(nfields, grid.dim, grid.xlim, grid.ylim, cov.model, cov.pars, locs = NULL, ...){
if(is.null(locs)){# create grid of locations
nx <- grid.dim[1]
ny <- grid.dim[2]
locs <- expand.grid(seq(from=grid.xlim[1], grid.xlim[2], length.out = nx), seq(from = grid.ylim[1], to=grid.ylim[2],length.out=ny))
}
coef.fields <- NULL
for( i in 1:nfields){
sim <- grf(n=NROW(locs), grid=locs, cov.model=cov.model[i], cov.pars=cov.pars[i,],...)
coef.fields <- cbind(coef.fields, sim$data)
#image(sim); Sys.sleep(2)
}
res <- list(coef=coef.fields, locs=locs, cov.model=cov.model, cov.pars=cov.pars)
return(res)
}
|
0ad296f9259aff272146ecf3e7b34b2c9cf29590
|
e0f41a9688e53c450a93831466f6768ee0679145
|
/2 R programming/week3/scoping_function.R
|
8cb07ea50a05001333ec6cd1512f3f85e6dc864a
|
[] |
no_license
|
roeeorland/Data-Science
|
7f4652ef4d9d433483a314641df912fc35589daa
|
5b3184c429e1cc010349045e7560a7a751676dac
|
refs/heads/master
| 2021-01-22T22:33:58.851268
| 2017-04-24T11:49:51
| 2017-04-24T11:49:51
| 85,548,290
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 65
|
r
|
scoping_function.R
|
y<-10
f<-function(x)
{
y<-2
y^2+g(x)
}
g<-function(x)
{
x*y
}
|
9510f2a08f188a542a8d06a1252ab4008f10b161
|
1f2ed7e0778776371702499954ab1b11d3ad3a4c
|
/man/ecbit.Rd
|
e599f28961b760445d4b574254f302a79410544a
|
[] |
no_license
|
cran/VGAMdata
|
1e3b653b5a9d4921535fb7d2e6d4191aa2d9201a
|
fbbb0beb0bf79fff712d1b994cf51de5cb3b176b
|
refs/heads/master
| 2023-04-07T05:39:02.437835
| 2023-01-11T19:20:02
| 2023-01-11T19:20:02
| 17,694,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,586
|
rd
|
ecbit.Rd
|
\name{ecb06.it}
\alias{ecb06.it}
\alias{ecb14.it}
\docType{data}
\title{Italian Household Data for 2006 and 2014}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
Part of the data collected
at two time points (2006 and 2014)
by the Bank of Italy, as part of
the European Central Banks Eurosystem collection of statistics,
within the periodic
sample surveys on households, businesses and selected intermediaries.
}
%\usage{
%data(ecb06.it)
%data(ecb14.it)
%}
\format{
Data frame with the following 20 variables:
\describe{
\item{\code{ID}}{a numeric vector, a unique identification number of the
household.}
\item{\code{area}}{a factor with 5 levels,
the Italian geographic area or region
in which the household lives:
\code{NW} = North-West,
\code{NE} = North-East,
\code{C} = Center,
\code{S} = South,
\code{I} = Islands.
For users wanting a North--South contrast,
this variable might be coded as
\code{NC} = North and Center (NW, NE and C),
\code{SI} = South and Islands (S and I).
}
\item{\code{sex}}{a factor with 2 levels,
the gender of the head householder:
\code{M} = Male,
\code{F} = Female.
}
\item{\code{age}}{a numeric vector, age in years of the head householder.}
\item{\code{marital}}{a factor with 4 levels, marital status of the head
householder:
\code{married} = Married,
\code{single} = Single,
\code{separated} = Separated or divorced,
\code{widowed} = Widowed.
}
\item{\code{education}}{an ordered factor with 8 levels,
the education level of the head householder:
\code{none} = No education,
\code{primaryschool} = Primary school,
\code{middleschool} = Middle school,
\code{profschool} = Professional school,
\code{highschool} = High school,
\code{bachelors} = Bachelors degree,
\code{masters} = Masters degree,
\code{higherdegree} = Higher degree.
}
\item{\code{job}}{a factor with 7 levels,
the type of job done by the head householder:
\code{worker} = Worker,
\code{employee} = Employee,
\code{manager} = Manager,
\code{business} = Business person,
\code{other} = Other kind of independent job,
\code{retired} = Retired,
\code{unemployed} = Unemployed.
}
\item{\code{occupants}}{a numeric vector, the number of people living in
the same house.}
\item{\code{children}}{a numeric vector, the number of children of the head
householder living with him/her.}
\item{\code{other.children}}{a numeric vector, the number of children of the
head householder not living with the household.}
\item{\code{house.owned}}{a numeric vector, the ownership of the house in
which the householder lives;
\code{0} = The house is not owned,
\code{1} = The house is owned.
}
\item{\code{houses}}{a numeric vector, the number of houses owned
by the family, including the one in which the family lives.}
\item{\code{earners}}{a numeric vector, the number of people in the house
who have salary or some kind of earnings.}
\item{\code{accounts}}{a numeric vector, the number of bank accounts
collectively owned by the household.}
\item{\code{ccards}}{a numeric vector, the number of credit cards
collectively owned by the household.}
\item{\code{tot.income},
\code{dep.income},
\code{pens.income},
\code{self.income},
\code{cap.income}}{numeric vectors, the amount of income
(in Euros) collectively earned by the household through different
activities. The variables can be negative if the household has installments.
In order, they are the total amount of income,
the amount of income earned through dependent working,
the amount of income earned by the household through pensions,
the amount of income earned by the household through self-dependent working,
the amount of income earned by the household through capital investments.
}
}
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
The European Central Banks (ECB) Eurosystem requests and
helps organize
each country within the European Union to routinely collect statistical
information via their central banks.
These data frames are a subset from data collected by the
Bank of Italy.
Each year can be considered a cross-sectional study, although
there are some people common to each year. Hence the data
collectively can be considered partly a longitudinal study too.
}
\source{
Data was downloaded at \code{https://www.bancaditalia.it} in May 2016 by Lucia
Pilleri.
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
\emph{Supplements to the Statistical Bulletin,
Sample Surveys, Household Income and Wealth in 2006},
New series, Volume XVIII, Number 7--28, January 2008.
Banca D'Italia, Centro Stampa, Roma, Pubbl. Mensile,
\code{https://www.bancaditalia.it}.
}
\examples{
data(ecb06.it); data(ecb14.it)
summary(ecb06.it)
summary(ecb14.it)
\dontrun{
with(ecb14.it, table(house.owned))
with(ecb14.it, barplot(table(education), col = "lightblue"))
}
}
\keyword{datasets}
|
963e28ff93f660757501203df7b5f2ccb85f54f8
|
f26ea5d9d7488fd41ea17c17bf52ae3cf4f093c7
|
/man/getScale.Rd
|
cbd8057b6e15401edc8364aaf5d1989a1fde6391
|
[] |
no_license
|
cran/robustHD
|
b62265881b537151dd5990d64dc0a63379d52030
|
f348809d3fc47b880050183225863908867aca05
|
refs/heads/master
| 2023-01-30T18:45:24.631406
| 2023-01-18T13:50:02
| 2023-01-18T13:50:02
| 17,699,287
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,557
|
rd
|
getScale.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/accessors.R
\name{getScale}
\alias{getScale}
\alias{getScale.seqModel}
\alias{getScale.sparseLTS}
\title{Extract the residual scale of a robust regression model}
\usage{
getScale(x, ...)
\method{getScale}{seqModel}(x, s = NA, ...)
\method{getScale}{sparseLTS}(x, s = NA, fit = c("reweighted", "raw", "both"), ...)
}
\arguments{
\item{x}{the model fit from which to extract the robust residual scale
estimate.}
\item{\dots}{additional arguments to be passed down to methods.}
\item{s}{for the \code{"seqModel"} method, an integer vector giving
the steps of the submodels for which to extract the robust residual scale
estimate (the default is to use the optimal submodel). For the
\code{"sparseLTS"} method, an integer vector giving the indices of the
models from which to extract the robust residual scale estimate. If
\code{fit} is \code{"both"}, this can be a list with two components, with
the first component giving the indices of the reweighted fits and the second
the indices of the raw fits. The default is to use the optimal model for
each of the requested estimators. Note that the optimal models may not
correspond to the same value of the penalty parameter for the reweighted
and the raw estimator.}
\item{fit}{a character string specifying from which fit to extract the
robust residual scale estimate. Possible values are \code{"reweighted"}
(the default) for the residual scale of the reweighted fit, \code{"raw"} for
the residual scale of the raw fit, or \code{"both"} for the residual scale
of both fits.}
}
\value{
A numeric vector or matrix giving the robust residual scale estimates for
the requested model fits.
}
\description{
Extract the robust scale estimate of the residuals from a robust regression
model.
}
\details{
Methods are implemented for models of class \code{"lmrob"} (see
\code{\link[robustbase]{lmrob}}), \code{"lts"} (see
\code{\link[robustbase]{ltsReg}}), \code{"rlm"} (see
\code{\link[MASS]{rlm}}), \code{"seqModel"} (see \code{\link{rlars}}) and
\code{"sparseLTS"} (see \code{\link{sparseLTS}}). The default method
computes the MAD of the residuals.
}
\examples{
data("coleman")
fit <- lmrob(Y ~ ., data=coleman)
getScale(fit)
}
\seealso{
\code{\link[=AIC.seqModel]{AIC}}, \code{\link[robustbase]{lmrob}},
\code{\link[robustbase]{ltsReg}}, \code{\link[MASS]{rlm}},
\code{\link{rlars}}, \code{\link{sparseLTS}}
}
\author{
Andreas Alfons
}
\keyword{regression}
|
a8466570e5addadfde2c16007831267a7ac15638
|
1f91bd700defe89b61f8d3f31e69fcf082948085
|
/ProgramaΓ§Γ£o em R/BΓ‘sico ProgramaΓ§Γ£o R/aula3_For_loops.R
|
276adcbed58a597ae092a4e8b2951697d71711f2
|
[] |
no_license
|
Soaresolavos/Data-Science-and-ML-with-R
|
f94a62e526099bac1157ae02abce7a211804a68f
|
ceab8c36db0f26f8b53377578deae0cc83cee943
|
refs/heads/main
| 2023-05-15T01:07:26.020931
| 2021-06-08T21:46:27
| 2021-06-08T21:46:27
| 375,152,113
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 431
|
r
|
aula3_For_loops.R
|
v <- c(1, 2, 3, 4, 5)
for (temperatura in v) {
resultado <- temperatura + 1
print('A temperatura + 1 Γ© igual:')
print(resultado)
}
mat <- matrix(1:25, nrow = 5)
for (num in mat) {
print(num)
}
mat <- matrix(1:25, nrow = 5)
for (linha in 1:nrow(mat)) {
for (coluna in 1:ncol(mat)) {
print(paste('O elemento da linha: ', linha, 'e coluna:', coluna,' Γ© ', mat[linha, coluna]))
}
}
|
c39a54d8750cb444131a8573b81bb9cf7b8ce092
|
0ccb2ef7d5d608d9c33ec1b68c176c17a7a3d888
|
/discovery_SNP/conditional_analysis_novel/code/conditional_analysis_2M_known.R
|
8faa76b493c96c20f34364b33c4826522d8c7c37
|
[] |
no_license
|
andrewhaoyu/breast_cancer_data_analysis
|
dce6788aa526a9a35fcab73564a457e8fabb5275
|
d84441e315e3ce135149e111014fa9807228ee7c
|
refs/heads/master
| 2023-05-31T20:59:44.353902
| 2023-05-15T19:23:30
| 2023-05-15T19:23:30
| 103,444,023
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,912
|
r
|
conditional_analysis_2M_known.R
|
#install_github("andrewhaoyu/bcutility",args = c('--library="/home/zhangh24/R/x86_64-pc-linux-gnu-library/3.5"'))
args = commandArgs(trailingOnly = T)
i1 = as.numeric(args[[1]])
library(data.table)
library(bcutility)
setwd('/data/zhangh24/breast_cancer_data_analysis/')
discovery_snp <- as.data.frame(fread("./data/discovery_snps_annotated_clean.csv",header=T))
fine_mapping <- as.data.frame(fread("./data/fine_mapping_annotated_clean.csv"))
library(bc2)
dis <- 2*10^6
check.data <- NULL
check.id <- NULL
for(i in 1:28){
pos <- discovery_snp$position[i]
CHR <- discovery_snp$CHR[i]
for(j in 1:178){
pos.known <- fine_mapping$position[j]
CHR.known <- fine_mapping$CHR[j]
if(CHR==CHR.known&pos>=(pos.known-dis)&pos<=(pos.known+dis)){
print(c(i,j))
check.id <- rbind(check.id,c(i,j))
temp1 <- discovery_snp[i,c(1,3,2)]
colnames(temp1) <- c("SNP","CHR","Position")
temp2 <- fine_mapping[j,c(1,3,4)]
colnames(temp2) <- c("SNP","CHR","Position")
result <- rbind(temp1,temp2)
check.data <- rbind(check.data,result)
}
}
}
#write.csv(check.data,file="./data/check_SNPs.csv",row.names = F,col.names = T)
data1 <- fread("./data/iCOGS_euro_v10_10232017.csv",header=T)
data1 <- as.data.frame(data1)
discovery.snp.icog <- as.data.frame(fread("/data/zhangh24/breast_cancer_data_analysis/data/discovery_icog_data.csv",header=T))
y.pheno.mis1 <- cbind(data1$Behaviour1,data1$ER_status1,data1$PR_status1,data1$HER2_status1,data1$Grade1)
colnames(y.pheno.mis1) = c("Behavior","ER","PR","HER2","Grade")
# Grade1.fake <- data1$Grade1
# Grade1.fake[data1$Grade1==2|data1$Grade1==3] <- 1
# Grade1.fake[data1$Grade1==1] <- 0
#y.pheno.mis1 <- cbind(data1$Behaviour1,data1$PR_status1,data1$ER_status1,data1$HER2_status1,Grade1.fake)
# y.pheno.mis1 <- cbind(data1$Behaviour1,data1$PR_status1,data1$ER_status1,data1$HER2_status1)
#x.test.all.mis1 <- data1[,c(27:203)]
###pc1-10 and age
#i1 <- 4
idx.check <- which(check.data[,1]==check.data[2*i1-1,1])
check.data[2*i1-1,1]
if(length(idx.check)==1){
idx.known <- which(colnames(data1)==check.data[idx.check+1,1])
}else{
idx.known <- NULL
for(i in 1:length(idx.check)){
idx.temp <- which(colnames(data1)==check.data[idx.check[i]+1,1])
idx.known <- c(idx.known,idx.temp)
}
}
colnames(data1)[idx.known]
x.covar1 <- cbind(data1[,c(5:14)],data1[,idx.known])
colnames(discovery.snp.icog)[check.id[i1,1]-10]
gene1 <- discovery.snp.icog[,check.id[i1,1]-10]
age <- data1[,204]
x.covar1 <- cbind(x.covar1,age)
idx.mis <- which(age==888)
idx.complete <- which(age!=888)
y.pheno.mis1 <- y.pheno.mis1[idx.complete,]
x.covar1 <- x.covar1[idx.complete,]
gene1 <- gene1[idx.complete]
data2 <- fread("./data/Onco_euro_v10_10232017.csv",header=T)
discovery.snp.onco <- as.data.frame(fread("/data/zhangh24/breast_cancer_data_analysis/data/discovery_onco_data.csv"))
data2 <- as.data.frame(data2)
y.pheno.mis2 <- cbind(data2$Behaviour1,data2$ER_status1,data2$PR_status1,data2$HER2_status1,data2$Grade1)
#y.pheno.mis2 <- cbind(data2$Behaviour1,data2$PR_status1,data2$ER_status1,data2$HER2_status1)
colnames(y.pheno.mis2) = c("Behaviour","ER",
"PR","HER2","Grade")
# x.test.all.mis2 <- data2[,c(27:203)]
if(length(idx.check)==1){
idx.known <- which(colnames(data2)==check.data[idx.check+1,1])
}else{
idx.known <- NULL
for(i in 1:length(idx.check)){
idx.temp <- which(colnames(data2)==check.data[idx.check[i]+1,1])
idx.known <- c(idx.known,idx.temp)
}
}
x.covar2 <- cbind(data2[,c(5:14)],data2[,idx.known])
age <- data2[,204]
x.covar2 <- cbind(x.covar2,age)
idx.complete <- which(age!=888)
gene2 <- discovery.snp.onco[,check.id[i1,1]-10]
y.pheno.mis2 <- y.pheno.mis2[idx.complete,]
x.covar2 <- x.covar2[idx.complete,]
gene2 <- gene2[idx.complete]
#gene.known2 <- x.covar2[,11]
#idx.control <- which(y.pheno.mis2[,1]==0)
#cor(gene2[idx.control],gene.known2[idx.control])^2
z.standard <- GenerateZstandard(y.pheno.mis1)
z.random.support <- cbind(1,z.standard[,1])
z.random.test <- z.standard[,2:4]
p.value.two.stage.model <- two_data_two_stage_random(y.pheno.mis1,
gene1,
x.covar1,
y.pheno.mis2,
gene2,
x.covar2)
# data1 <- fread("./data/iCOGS_euro_v10_10232017.csv",header=T)
# data1 <- as.data.frame(data1)
# discovery.snp.icog <- as.data.frame(fread("/data/zhangh24/breast_cancer_data_analysis/data/discovery_icog_data.csv",header=T))
# y.pheno.mis1 <- cbind(data1$Behaviour1,data1$ER_status1,data1$PR_status1,data1$HER2_status1,data1$Grade1)
# colnames(y.pheno.mis1) = c("Behavior","ER","PR","HER2","Grade")
#
# country1 <- as.factor(data1[,3])
# x.covar1 <- cbind(data1[,c(5:14)],data1[,idx.known],country1)
# gene1 <- discovery.snp.icog[,check.id[i1,1]-10]
#
# idx.check <- which(check.data[,1]==check.data[2*i1-1,1])
# if(length(idx.check)==1){
# idx.known <- which(colnames(data1)==check.data[idx.check+1,1])
# }else{
# idx.known <- NULL
# for(i in 1:length(idx.check)){
# idx.temp <- which(colnames(data1)==check.data[idx.check[i]+1,1])
# idx.known <- c(idx.known,idx.temp)
# }
# }
#
# data2 <- fread("./data/Onco_euro_v10_10232017.csv",header=T)
# discovery.snp.onco <- as.data.frame(fread("/data/zhangh24/breast_cancer_data_analysis/data/discovery_onco_data.csv"))
#
# data2 <- as.data.frame(data2)
# y.pheno.mis2 <- cbind(data2$Behaviour1,data2$ER_status1,data2$PR_status1,data2$HER2_status1,data2$Grade1)
# #y.pheno.mis2 <- cbind(data2$Behaviour1,data2$PR_status1,data2$ER_status1,data2$HER2_status1)
# colnames(y.pheno.mis2) = c("Behaviour","ER",
# "PR","HER2","Grade")
#
# # x.test.all.mis2 <- data2[,c(27:203)]
# if(length(idx.check)==1){
# idx.known <- which(colnames(data2)==check.data[idx.check+1,1])
# }else{
# idx.known <- NULL
# for(i in 1:length(idx.check)){
# idx.temp <- which(colnames(data2)==check.data[idx.check[i]+1,1])
# idx.known <- c(idx.known,idx.temp)
# }
# }
# country2 <- as.factor(data2[,4])
# x.covar2 <- cbind(data2[,c(5:14)],data2[,idx.known],country2)
# gene2 <- discovery.snp.onco[,check.id[i1,1]-10]
# #age <- data2[,204]
#
# p.value.standard <- two_data_standard_anlysis(y.pheno.mis1,
# gene1,
# x.covar1,
# y.pheno.mis2,
# gene2,
# x.covar2)
result <- list(p.value.two.stage.model=p.value.two.stage.model
)
save(result,file=paste0("./discovery_SNP/conditional_analysis_novel/result/novel_conditional_result",i1,".Rdata"))
|
fc4f69fefa620766d1eafc8381c53c65fff81c32
|
b309bd451ea093e366c91dde005f60895e0adb00
|
/munge/03_norm_eval(duplicate).R
|
fad3c49d94d8dec102d0369ee52f6971ed2b1101
|
[] |
no_license
|
WaverlyWei/FA-meth450k
|
0ea3e28aa5f8799b610ff2772ff7a9d303651e23
|
706de94aca5da072e0438e69154793c9a2538f2e
|
refs/heads/master
| 2020-04-16T21:45:17.153018
| 2019-04-22T23:41:51
| 2019-04-22T23:41:51
| 165,938,713
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,788
|
r
|
03_norm_eval(duplicate).R
|
################################################################################
# In this script we estimate the correlation between technical replicates as a
# way to evaluate the normalizations.
################################################################################
# Set up workspace
library(here)
library(limma)
# setting project and data directories
replicates_norm_data <- here("data", "replicates_norm_data.RData")
# only load data if replicates_norm_data does not exist
if (!file.exists(normalizePath(replicates_norm_data))) {
cor_est <- function(dat=pheno, exp){
# design setup
Condition <- factor(dat$dich_tce)
design <- model.matrix(~0+Condition)
colnames(design) <- levels(Condition)
# calculate correlation within subjects
corfit <- duplicateCorrelation(exp, design, block=dat$Subject)
return(corfit)
}
pheno_PS <- read.csv(file=here("data", "pheno_PS.csv"))
rownames(pheno_PS) <- pheno_PS[,1]
pheno_PS <- pheno_PS[,-1]
colnames(RGset_filtered) == rownames(pheno_PS)
load(here("data", "norm_data.RData"))
# Warning messages:
# 1: In glmgam.fit(dx, dy, coef.start = start, tol = tol, maxit = maxit, :
# Too much damping - convergence tolerance not achievable
# ENmix_BMIQ
corfit_ENmix_BMIQ <- cor_est(dat=pheno_PS, exp=mvals_ENmix_BMIQ)
corfit_ENmix_BMIQ$cor
# [1] 0.2720031
0.2933981
# ENmix_RCP
corfit_ENmix_RCP <- cor_est(dat=pheno_PS, exp=mvals_ENmix_RCP)
corfit_ENmix_RCP$cor
# [1] 0.2818666
0.3046645
# Funnorm
corfit_Funnorm <- cor_est(dat=pheno_PS, exp=mvals_Funnorm)
corfit_Funnorm$cor
# [1] 0.2768499
0.2696287
# Illumina
corfit_Illumina <- cor_est(dat=pheno_PS, exp=mvals_Illumina)
corfit_Illumina$cor
# [1] 0.3730618
0.4167187
# Quantile
corfit_Quantile <- cor_est(dat=pheno_PS, exp=mvals_Quantile)
corfit_Quantile$cor
# [1] 0.2554466
0.2692027
# Raw
corfit_Raw <- cor_est(dat=pheno_PS, exp=mvals_Raw)
corfit_Raw$cor
# [1] 0.241971
0.2610215
# Noob
corfit_Noob <- cor_est(dat=pheno_PS, exp=mvals_Noob)
corfit_Noob$cor
# [1] 0.2558961
0.2803224
# SWAN_ENmix
corfit_SWAN_ENmix <- cor_est(dat=pheno_PS, exp=mvals_SWAN_ENmix)
corfit_SWAN_ENmix$cor
# [1] 0.2630327
0.284417
# SWAN_Noob
corfit_SWAN_Noob <- cor_est(dat=pheno_PS, exp=mvals_SWAN_Noob)
corfit_SWAN_Noob$cor
# [1] 0.2441743
0.2628474
# transform back to the correlation scale and plot a histogram
# have the average correlation and the IQR
#############################################################################
###### save the relevant data
#############################################################################
save(
corfit_ENmix_BMIQ, corfit_ENmix_RCP, corfit_Funnorm,
corfit_Illumina,corfit_Quantile, corfit_Raw,
corfit_Noob, corfit_SWAN_ENmix, corfit_SWAN_Noob,
file = here("data", "replicates_norm_data.RData"))
} else {
load(replicates_data)
}
|
b87edc553cc29f4df43d75469ae0a4f4edd9fdec
|
cab1356eaf5956e0b9d8d6e72583f74160840cc1
|
/man/quantileCI.Rd
|
21c1d199043b972fa416c14d370209670bdf928a
|
[] |
no_license
|
genome-vendor/r-cran-mkmisc
|
b6f3f9a0a2a70012706f2f045a3e8ed3a8eee74a
|
c072292326cc728de8eb8c51a483ab4f033bfb56
|
refs/heads/master
| 2021-01-10T19:24:11.540172
| 2014-10-31T19:21:46
| 2014-10-31T19:21:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,922
|
rd
|
quantileCI.Rd
|
\name{quantileCI}
\alias{quantileCI}
\alias{medianCI}
\title{ Confidence Intervals for Binomial Proportions }
\description{
This functions can be used to compute confidence intervals for binomial proportions.
}
\usage{
quantileCI(x, prob = 0.5, conf.level = 0.95, method = "exact", na.rm = FALSE)
medianCI(x, conf.level = 0.95, method = "exact", na.rm = FALSE)
}
\arguments{
\item{x}{ numeric data vector }
\item{prob}{ quantile }
\item{conf.level}{ confidence level }
\item{method}{ character string specifing which method to use; see details. }
\item{na.rm}{ logical, remove \code{NA} values. }
}
\details{
The exact confidence interval (\code{method = "exact"}) is computed using binomial
probabilities; see Section 6.8.1 in Sachs and Hedderich (2009). If the result is not
unique, i.e. there is more than one interval with coverage proability closest to
\code{conf.level}, then a matrix of confidence intervals is returned.
The asymptotic confidence interval (\code{method = "asymptotic"}) is based on the
normal approximation of the binomial distribution; see Section 6.8.1 in Sachs and Hedderich (2009).
}
\value{
A list with components
\item{estimate}{ the sample quantile. }
\item{CI}{ a confidence interval for the sample quantile. }
}
\references{
L. Sachs and J. Hedderich (2009). Angewandte Statistik. Springer.
}
\author{ Matthias Kohl \email{Matthias.Kohl@stamats.de}}
\seealso{\code{\link[stats]{binom.test}}, \code{\link[Hmisc]{binconf}}}
\examples{
## To get a non-trivial confidence interval for the median
## one needs at least 6 observations
set.seed(123)
x <- rnorm(8)
## exact confidence interval not unique
(res <- medianCI(x))
## asymptotic confidence interval
medianCI(x, method = "asymptotic")
## length of exact intervals
res$CI[,2]-res$CI[,1]
## confidence interval for quantiles
quantileCI(x, prob = 0.4)
quantileCI(x, prob = 0.6)
}
\keyword{univar}
|
3a8159d56c4e6423acb352d8f4c62dedf49fbf75
|
230f40513fd7169f354f7995919023321eecfd93
|
/R/AdjSpec_AllPoints_curve.R
|
157740c041ef1a0f5ec41b2998aafb94d57f4107
|
[] |
no_license
|
cran/caROC
|
3f665f7d0cfeb305d8cffff85addc9e2d9e3465b
|
6c243aa38c14806fac04fefb5fb164e189fc920a
|
refs/heads/master
| 2023-03-30T16:32:56.914227
| 2021-04-02T07:20:03
| 2021-04-02T07:20:03
| 354,063,180
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,407
|
r
|
AdjSpec_AllPoints_curve.R
|
AdjSpec_AllPoints_curve <- function(diseaseData,
controlData,
userFormula) {
outform <- ParseuserFormula(userFormula = userFormula)
M0 <- eval(parse(text = paste("controlData$",outform[[1]])))
M1 <- eval(parse(text = paste("diseaseData$",outform[[1]])))
if (length(outform[[2]]) == 1) {
expr2 <- paste0("controlData$", outform[[2]])
expr3 <- paste0("diseaseData$", outform[[2]])
} else if (length(outform[[2]]) > 1) {
expr2 <- paste0("controlData[, ", outform[2], "]")
expr3 <- paste0("diseaseData[, ", outform[2], "]")
}
all_Z_C <- eval(parse(text = expr2))
all_Z_D <- eval(parse(text = expr3))
rqfit <- NULL
expr1 <- paste0("rqfit <- rq(", userFormula, ", tau = -1, data = diseaseData)")
eval(parse(text = expr1))
all_tau <- rqfit$sol[1, ]
sens_vec <- 1 - all_tau
est_b <- rqfit$sol[4:(nrow(rqfit$sol)),]
control_threshold_q <- as.matrix(cbind(1, all_Z_C)) %*% est_b
spec_adj <- rep(0, length(all_tau))
spec_adj <- colSums(M0 <= control_threshold_q)/length(M0)
tmpout <- MonoRespect(tau = all_tau, orig_measure = spec_adj, startTau = 0.5)
spec_curve <- tmpout$new_meas
sens_curve <- 1-tmpout$tau
return(list(sensitivity = sens_curve,
specificity = spec_curve,
mono_adj = "ROC"))
}
|
cab56abb1e389c005d42cfebc5e5cec4b6ed9060
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/TwoStepCLogit/tests/bison_example.R
|
b1cf8e90f95996fded320dcea324a5402aa1cd56
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 829
|
r
|
bison_example.R
|
# Some descriptive statistics about the data set:
ddim(formula = Y ~ strata(Strata) + cluster(Cluster), data = bison)
# Model 1: covariates meadow, biomass and biomass^2
# Random effects in front of biomass and biomass^2
# Main diagonal covariance structure for D
Fit1 <- Ts.estim(formula = Y ~ meadow + biomass + I(biomass^2) +
strata(Strata) + cluster(Cluster), data = bison,
random = ~ biomass + I(biomass^2), all.m.1=FALSE, D="UN(1)")
Fit1
# Model 2: only covariates biomass and biomass^2
# Random effects in front of biomass and biomass^2
# Main diagonal covariance structure for D
Fit2 <- Ts.estim(formula = Y ~ biomass + I(biomass^2) + strata(Strata) +
cluster(Cluster), data = bison, all.m.1=FALSE, D="UN(1)")
Fit2
# Results reported in Table 2 of Craiu et al. (2011).
|
526f0656bcbe96f2bba7bfcfd3858739e7043691
|
01e953fe3ca03c2b20a8cfdfce3debe0e95e8a27
|
/install.R
|
433f29a4288ee2c794d8184cf30c8a65e4ab8c08
|
[] |
no_license
|
abotzki/datasc_mofa
|
110fe05bd9c0f1886c78f62c39390a28a5a6f285
|
52cd13c971e58ef811c3c7ed6e2ceef033bc6885
|
refs/heads/master
| 2020-03-14T07:18:35.409594
| 2018-04-29T18:47:43
| 2018-04-29T18:47:43
| 131,502,142
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 88
|
r
|
install.R
|
install.packages("devtools")
devtools::install_github("PMBio/MOFA", subdir="MOFAtools")
|
f4ac6607c0a9cf87a317c9c7c6a789ed60c6660f
|
73c2e9b0af9f7e9fc5299cde844b3527e0d79489
|
/R/openmi.om.runtimeController.R
|
be94f5b2e9dd5ad6f34b8447468bbf5c3f7ad3c3
|
[] |
no_license
|
HARPgroup/openmi-om
|
0907fc362423a48d67f704511b9a112424bc465d
|
166d76ca5f964bf6efcee105f917e1af07182722
|
refs/heads/master
| 2023-03-08T17:27:17.716249
| 2023-02-27T14:47:31
| 2023-02-27T14:47:31
| 149,749,781
| 0
| 0
| null | 2023-02-27T14:47:33
| 2018-09-21T10:41:26
|
R
|
UTF-8
|
R
| false
| false
| 1,771
|
r
|
openmi.om.runtimeController.R
|
#' The base class for meta-model simulation control.
#'
#' @description Class providing a runnable model controller
#' @details Will iterate through time steps from model timer start to end time, executing all child components
#' @importFrom R6 R6Class
#' @return R6 class of type openmi.om.runtimeController
#' @seealso NA
#' @export openmi.om.runtimeController
#' @examples NA
openmi.om.runtimeController <- R6Class(
"openmi.om.runtimeController",
inherit = openmi.om.linkableComponent,
public = list(
#' @description create controller
#' @return R6 class
initialize = function() {
self$timer <- openmi.om.timer$new()
},
#' @return boolean if model variables are sufficient to run
checkRunVars = function() {
if (!is.null(self$timer)) {
if (is.null(self$timer$starttime)) {
print("Timer$starttime required. Exiting.")
return(FALSE)
} else {
if (is.null(self$timer$endtime)) {
print("Timer$endtime required. Exiting.")
return(FALSE)
}
}
} else {
print("Timer object is not set. Exiting.")
return(FALSE)
}
return(TRUE)
},
#' @return NA
run = function() {
runok = self$checkRunVars()
if (runok) {
while (self$timer$status != 'finished') {
print(paste("Model update() @", self$timer$thistime,sep=""))
self$step()
}
print("Run completed.")
} else {
print("Could not complete run.")
}
},
#' @return NA
update = function() {
super$update()
# Update the timer afterwards
self$timer$update()
},
#' @return NA
init = function() {
super$init()
self$timer$init()
}
)
)
|
17f1c857b9e64193e5750fca6e84c490b4455752
|
b7dbc8fa280edb6215a6260e1401e0f83b9954b0
|
/OpenDataGroup/cpir/man/subindices.Rd
|
60e2f280714415221dc4536bdca04ca1f96d8c10
|
[] |
no_license
|
cwcomiskey/Misc
|
071c290dad38e2c2e6a5523d366ea9602c4c4e44
|
1fad457c3a93a5429a96dede88ee8b70ea916132
|
refs/heads/master
| 2021-05-14T18:10:35.612035
| 2020-02-17T15:09:51
| 2020-02-17T15:09:51
| 116,065,072
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 425
|
rd
|
subindices.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{subindices}
\alias{subindices}
\title{Example Subindices (ECs)}
\format{An object of class \code{data.frame} with 100 rows and 69 columns.}
\usage{
subindices
}
\description{
The subindices, and associated date, used to calculate aggregate index; in this case Expenditure Classes to Calculate CPI.
}
\keyword{datasets}
|
bd11676ac2fa7ef7dce315ded19be3a06b35c67c
|
3c3d1ea5eadf294bf0b8275b35d71efdea13835f
|
/R/scrape_chart_ts.R
|
cc13329d869f9ce883f51a575b086652d611fbf6
|
[] |
no_license
|
ces0491/webScrapeR
|
45bd4921a015353f8e4dfe5c2938ecbd9070ecbc
|
f5be62f4c11c2cc807cdbff4a031c9ee933d6f14
|
refs/heads/master
| 2023-06-18T20:50:17.521530
| 2021-07-15T17:34:52
| 2021-07-15T17:34:52
| 290,697,723
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 845
|
r
|
scrape_chart_ts.R
|
#' scrape timeseries data from highcharts
#'
#' @param pjs_session phantom JS session object
#'
#' @return \code{data.frame} containing date and value
#' @export
#'
scrape_chart_ts <- function(pjs_session) {
pg_source <- unlist(pjs_session$getSource())
source_data_link <- stringr::str_match(pg_source, "jQuery.getJSON\\('(.*?)',")[,2]
source_data_link_clean <- stringr::str_remove_all(source_data_link, "amp;")
source_data <- jsonlite::fromJSON(source_data_link_clean)
data_quote <- tibble::enframe(source_data$quote)
chart_data_df <- data_quote %>%
tidyr::unnest(value) %>%
dplyr::mutate(name = as.numeric(name)) %>%
dplyr::mutate(variable = ifelse((dplyr::row_number() %% 2) == 1, "x", "y")) %>%
tidyr::spread(variable, value) %>%
dplyr::mutate(date = as.Date(anytime::anytime(x/1000)))
chart_data_df
}
|
da7ef5a284720677570c2ef82026fb51b0320a92
|
ca21e692e45f0cafe76cb864aad5c1f363ef8123
|
/gym/R/gym.R
|
b486dbad5cdc36871c131c9e83af3221a0eda499
|
[] |
no_license
|
hal2001/gym-R
|
48b4d3ea3faaecdd4dc35cf6d894601b6bd056aa
|
52237a6de5d57dcf7dde2164435e910cba2287cb
|
refs/heads/master
| 2020-04-16T11:34:08.471198
| 2017-07-01T19:46:13
| 2017-07-01T19:46:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 87
|
r
|
gym.R
|
#' gym: Provides Access to the OpenAI Gym API
#'
#' @docType package
#' @name gym
NULL
|
f8c3324e5d43c4ab5e7db28e910038b1645018ac
|
949b3e51e617484c889a410913a822f424562e28
|
/K Means Segmentation in R/KMeans_Segmentation_v1.R
|
277d489f5d68aee372052703563f2dfdfb03fe4f
|
[] |
no_license
|
Shafquat/Machine-Learning-Tensorflow
|
ab3e9e7ad5fc9d100725e3aa1ef60697b2cfb6b1
|
6d49d3a09c072274e8ed242b117b128afca8043f
|
refs/heads/master
| 2020-09-26T11:42:44.628031
| 2019-12-06T05:39:43
| 2019-12-06T05:39:43
| 226,248,284
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,811
|
r
|
KMeans_Segmentation_v1.R
|
##################################
# Code for Segmentation
#################################################################
# ###########################################################
# Analysing the Grocery Store data
# ###########################################################
# load from the file location
# load by browsing
seg.raw <- read.csv(file.choose())
# check column names
names(seg.raw)
head(seg.raw) # info on 300 customers and their subscription decisions
# Male Dummy
seg.raw$Male[seg.raw$gender=='Female'] <- 0
seg.raw$Male[seg.raw$gender=='Male'] <- 1
# ###########################################################
# a simple function to report means by group
# if the data is solely numeric you can simply use the aggregate() function to do this
seg.summ <- function(data, groups) {
aggregate(data, list(groups), function(x) mean(as.numeric(x)))
}
# Illustration of how seg.summ() works
seg.summ(seg.raw, seg.raw$Segment)
str(seg.raw)
#### K-MEANS CLUSTERING
# convert factor variables to numeric (kmeans requires). OK b/c all are binary.
seg.df.num <- seg.raw[c(
'PointsTotal.x',
'Age',
'OnlineTicketPurchaser_value',
'TuesdayAttendee_value',
'ConcessionPurchaser_value',
'AttendsWithChild_value',
'WeekendMatineeViewer_value',
'WeekdayMatineeViewer_value',
'BlackEarnCount',
'BlackEarnPointTotal',
'BlackBurnCount',
'BlackBurnPointTotal',
'BlackActivityDays',
'AccountOpenD2D',
'PacPadD2D',
'VISACineplex',
'VISA',
'CineplexIssue',
'CineplexRedeem',
'PointsScene',
'CARAIssue',
'CARARedeem'
)]
summary(seg.df.num)
# Determine number of clusters
# My goal is to minimize the within cluster variance
# Lines 51-53 calculates within cluster sum of squares for each number of clusters
wss <- (nrow(seg.df.num)-1)*sum(apply(seg.df.num,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(seg.df.num,
centers=i)$withinss)
# The elbow graph to gauge the number of clusters
plot(1:15, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
set.seed(96743) # set seed to make sure the random assignment starts at the same point
seg.k4 <- kmeans(seg.df.num, centers=4)
# inspect the 4-segment solution by looking at the means per segment per numeric
seg.summ(seg.df.num, seg.k4$cluster)
|
894ab86c954fc8a65262b690aa202279e1b590b9
|
fbe83f5d30aef93e099354294ca0784ad3e99274
|
/man/check_form3.2_box7_abdo.Rd
|
86160b13a192cd2507287c06b70fe46e8cceed65
|
[] |
no_license
|
EddieZhang540/INORMUS
|
fcbf1b424cc9209a82b4b38d7b51df9e53385afd
|
155dea53b00bc488dc8061ae0b4c0aa4fc60f2a8
|
refs/heads/main
| 2023-06-04T09:34:00.792724
| 2021-06-06T01:30:29
| 2021-06-06T01:30:29
| 376,072,158
| 0
| 0
| null | 2021-06-11T15:44:47
| 2021-06-11T15:44:47
| null |
UTF-8
|
R
| false
| true
| 451
|
rd
|
check_form3.2_box7_abdo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checks.R
\name{check_form3.2_box7_abdo}
\alias{check_form3.2_box7_abdo}
\title{Filters out invalid rows for the abdomen box 7 of form3.2}
\usage{
check_form3.2_box7_abdo(form)
}
\arguments{
\item{form}{form3.2}
}
\value{
a form containing ptstatus and a data frame containing all the invalid rows
}
\description{
Filters out invalid rows for the abdomen box 7 of form3.2
}
|
0f007fd8c718f95f9f2761c486b9bd82f3ef0e5a
|
820289a6667b00d3a4c7c35d4cd4b9b43b6b1ebf
|
/code.R
|
64fe07ab2c40e56b7ef661d00a69076d696b12b4
|
[] |
no_license
|
Butrait/Loan-Prediction
|
c0b392032bd89fec40767984cb9a1c82f0d3bfae
|
1eefc0839b82a4e6b4ee4e73acb6d927a630f08b
|
refs/heads/master
| 2020-04-29T14:05:59.687579
| 2017-06-25T10:15:45
| 2017-06-25T10:15:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,935
|
r
|
code.R
|
setwd("C:/Users/Divya Sharma/Desktop/myprojects/loan")
customer <- read_csv("C:/Users/Divya Sharma/Desktop/myprojects/loan/customer.csv")
str(customer)
head(customer)
any(is.na(customer))
#Dependent Variables
customer$dti<-customer$debts/customer$income
customer$loan_decision_status <- ifelse(customer$loan_decision_type == 'Denied', 0, 1)
customer$loan_decision_status<-factor(customer$loan_decision_status,levels=c(0,1))
customer_loan_refined <- customer[,c(3,4,6:8,11,13:14)]
head(customer_loan_refined)
customer_loan_refined$gender <- as.numeric(factor(customer_loan_refined$gender,levels = c('Male','Female'), labels = c(1,2)))
customer_loan_refined$marital_status <- as.numeric(factor(customer_loan_refined$marital_status, levels = c('Divorced','Married','Single'),labels = c(1,2,3)))
customer_loan_refined$occupation <- as.numeric(factor(customer_loan_refined$occupation,levels = c('Accout','Business','IT','Manager','NYPD'),labels = c(1,2,3,4,5)))
customer_loan_refined$loan_type <- as.numeric(factor(customer_loan_refined$loan_type,levels = c('Auto','Credit','Home','Personal'),labels = c(1,2,3,4)))
head(customer_loan_refined)
install.packages('caTools')
#Partitioning
library(caTools)
set.seed(123)
split = sample.split(customer_loan_refined$loan_decision_status, SplitRatio = 0.70)
training_set = subset(customer_loan_refined, split == TRUE)
test_set = subset(customer_loan_refined, split == FALSE)
#Applying Feature Scaling
training_set[-8] = scale(training_set[-8])
test_set[-8] = scale(test_set[-8])
head(training_set)
#Dimensionality Reduction using PCA(linear combination)
install.packages('caret')
install.packages('ggplot2')
install.packages('colorspace')
install.packages('iterators')
library(caret)
pca = preProcess(x = training_set[-8], method = 'pca', pcaComp = 2)
training_set_pca = predict(pca, training_set)
training_set_pca = training_set_pca[c(2, 3, 1)]
test_set_pca = predict(pca, test_set)
test_set_pca = test_set_pca[c(2, 3, 1)]
head(test_set_pca)
#Naive Bayes Classification
install.packages('e0171')
library(e1071)
classifier=naiveBayes(x=training_set_pca[-3],y=training_set_pca$loan_decision_status)
y_pred=predict(classifier,newdata=test_set_pca[-3])
#Confusion Matrix
confusionMatrix(table(test_set_pca[,3],y_pred))
#Visualization
install.packages("ElemStatLearn")
library(ElemStatLearn)
set=test_set_pca
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('PC1', 'PC2')
y_grid = predict(classifier, newdata = grid_set)
#Plotting
plot(set[, -3], main = 'Naive Bayes (Test set)', xlab = 'PC1', ylab = 'PC2', xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
|
ee3bf7dfd3556ab577f09d946abc09408cca99ef
|
f4460d114a82aaf3ae848d2ec110abab2bae4d08
|
/Lab 3/ex_2.6.r
|
ee3b78be1b77a1f7181bd55350249964eae032bb
|
[] |
no_license
|
UTBM-AGH-courses/agh-data-mining
|
9bb84b0eea20f756cb0a06778188b0a601ed2465
|
ba19a6b68e43c426fdd07d56bf53f9bb6788f65e
|
refs/heads/master
| 2023-02-09T19:50:45.972503
| 2021-01-11T14:56:01
| 2021-01-11T14:56:01
| 307,353,451
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 605
|
r
|
ex_2.6.r
|
#!/usr/bin/env Rscript
florist <- read.table("florist.txt", header = T, sep = ",")
florist_by_week <- aggregate(florist$price, by = list(florist$bouquet, florist$week, florist$year), FUN = sum)
names(florist_by_week) <- c('bouquet', 'week', 'year', 'total')
florist_by_week <- florist_by_week[order(florist_by_week$year, florist_by_week$week),]
cut_florist_by_week <- cut(florist_by_week$total, labels=c("low_sale", "normal_sale", "high_sale", "extreme_sale"), breaks=c(0, 1000, 2000, 3000, 10000))
table_cut <- table(cut_florist_by_week)
summary(cut_florist_by_week)
png("hist_2.6.png")
plot(table_cut)
|
6806250e5ec0be0be3ccbe950afe84307c6981c6
|
ea166fae9420fbbfd99b965e8e5db1d05a772b81
|
/utils.R
|
43cac384f6d6ca439560f2f48be1ddd1138a082e
|
[] |
no_license
|
mkukielka/survival_analysis
|
d4bf9e544c2a0f45f43aa8f5b658effe876aa466
|
d95c526d62b3ec83393494edf1e465c5f013217f
|
refs/heads/main
| 2023-03-29T02:09:52.956769
| 2021-04-07T10:24:19
| 2021-04-07T10:24:19
| 355,501,707
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,979
|
r
|
utils.R
|
source('constants.R')
load.packages <- function() {
suppressPackageStartupMessages({
library("tidyverse")
library("Xmisc")
library("survival")
library("dplyr")
library("survminer")
library("mltools")
library("tidyr")
library("dplyr")
library("stringr")
library("caret")
library("rlist")
library("mefa")
library("doParallel")
library("openxlsx")
})
}
load.dataset <- function(cancer_type, stats_type, add_pathological_stage = F,
windowed_data = F, add_smoking_data = F, unified = T,
mutations_variant = 0.05, high_low_classes = F) {
cancer_type <- tolower(cancer_type)
if (cancer_type %in% c('luad', 'lusc')) {
if (unified) {
load.unified.dataset(cancer_type, stats_type, windowed_data,
add_smoking_data = add_smoking_data,
add_pathological_stage = add_pathological_stage,
mutations_variant = mutations_variant,
high_low_classes = high_low_classes)
} else {
load.cancer.dataset(cancer_type, stats_type, windowed_data,
add_smoking_data = add_smoking_data,
add_pathological_stage = add_pathological_stage)
}
} else if (cancer_type == 'combined') {
if (unified) {
load.unified.combined(stats_type, windowed_data,
add_smoking_data = add_smoking_data,
add_pathological_stage = add_pathological_stage,
mutations_variant = mutations_variant,
high_low_classes = high_low_classes)
} else {
load.combined.dataset(stats_type, windowed_data,
add_smoking_data = add_smoking_data,
add_pathological_stage = add_pathological_stage)
}
} else {
stop('Unsupported dataset! Use luad, luad or combined.')
}
}
prepare.formula <- function(desired_variables) {
variables.to.drop <- c('patient_id', 'time', 'vital_status')
filtered.variables <- desired_variables[!desired_variables %in% variables.to.drop]
variables.str <- paste0('`', str_c(filtered.variables, collapse = '` + `'), '`')
as.formula(paste('Surv(time, vital_status)', variables.str, sep = " ~ "))
}
assign_age_group <- function(age) {
if (age < 65) {
return('lower than 65')
} else {
return('greater than 65')
}
}
assign.smoking.status <- function(packs) {
if (is.na(packs)) {
return(NA)
} else if (packs == 0) {
return('non-smoker')
} else if (packs <= 10) {
return('light smoker (<10 packs annually)')
} else {
return('heavy smoker (>10 packs annually)')
}
}
assign.pathologic.stage <- function(stage) {
if (is.na(stage)) {
return(NA)
}
stage.stripped <- rstrip(stage, 'ab')
if (stage.stripped == 'stage i') {
return('early (stage I)')
} else if (stage.stripped == 'stage ii' | stage == 'stage iiia') {
return('itermediate (stage II + IIIa)')
} else if (stage.stripped == 'stage iv' | stage == 'stage iiib') {
return('advanced (stage IIIB + IV)')
} else if (stage.stripped == 'stage iii') {
return('itermediate (stage II + IIIa)')
}
}
get.stats.type <- function(stats_name, windowed_data) {
stats_name <- tolower(stats_name)
if (stats_name == 'prevalence') {
'stats'
} else if (stats_name == 'microenvironment') {
'counts'
} else {
stop("Unknown statistics! Use prevalence or microenvironment.")
}
}
append.cox.results <- function(results, cancer_type, model, cox_model, c_index_cutoff=0.65) {
rbind(results, list(cancer_type = cancer_type,
model = model,
mean_cindex = cox_model$mean_cindex,
sd_cindex = cox_model$sd_cindex,
best_cindex = cox_model$best_cindex),
stringsAsFactors = F)
}
determine.tissue.subsets <- function(cancer_type, stats_type) {
if(tolower(cancer_type) == 'luad') {
if(tolower(stats_type) == 'prevalence') {
# luad prevalence
return(list(c('TUMOR', 'VESSEL', 'IMMUNE', 'NECROSIS', 'STROMA', 'MIXED'),
c('VESSEL', 'IMMUNE', 'NECROSIS', 'STROMA')))
} else {
# luad microenvironment
return(list(c('VESSEL', 'IMMUNE', 'NECROSIS', 'STROMA', 'BRONCHI'),
c('VESSEL', 'IMMUNE', 'NECROSIS', 'STROMA')))
}
} else if (tolower(cancer_type) == 'lusc') {
if(tolower(stats_type) == 'prevalence') {
# lusc prevalence
return(list(c('TUMOR', 'VESSEL', 'IMMUNE', 'NECROSIS', 'STROMA'),
c('VESSEL', 'IMMUNE', 'NECROSIS', 'STROMA')))
} else {
# lusc microenvironment
return(list(c('VESSEL', 'IMMUNE', 'NECROSIS', 'STROMA', 'LUNG'),
c('VESSEL', 'IMMUNE', 'NECROSIS', 'STROMA')))
}
} else {
return(c(PREVALENCE.TISSUE.SUBSETS, MICROENVIRONMENT.TISSUE.SUBSETS))
}
}
|
009b9cc3b13e7aa611a7b44cf40a5c50d845de76
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/MRS/man/mrs.Rd
|
c6d4edd3532e475f24a990543da2b05e6cd21653
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,561
|
rd
|
mrs.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/mrs.R
\name{mrs}
\alias{mrs}
\title{Multi Resolution Scanning}
\usage{
mrs(X, G, n_groups = length(unique(G)), Omega = "default", K = 6,
init_state = NULL, beta = 1, gamma = 0.3, delta = NULL, eta = 0.3,
alpha = 0.5, return_global_null = TRUE, return_tree = TRUE,
min_n_node = 0)
}
\arguments{
\item{X}{Matrix of the data. Each row represents an observation.}
\item{G}{Numeric vector of the group label of each observation. Labels are integers starting from 1.}
\item{n_groups}{Number of groups.}
\item{Omega}{Matrix defining the vertices of the sample space.
The \code{"default"} option defines a hyperrectangle containing all the data points.
Otherwise the user can define a matrix where each row represents a dimension,
and the two columns contain the associated lower and upper limits for each dimension.}
\item{K}{Depth of the tree. Default is \code{K = 6}, while the maximum is \code{K = 14}.}
\item{init_state}{Initial state of the hidden Markov process.
The three states are \emph{null}, \emph{altenrative} and \emph{prune}, respectively.}
\item{beta}{Spatial clustering parameter of the transition probability matrix. Default is \code{beta = 1}.}
\item{gamma}{Parameter of the transition probability matrix. Default is \code{gamma = 0.3}.}
\item{delta}{Optional parameter of the transition probability matrix. Default is \code{delta = NULL}.}
\item{eta}{Parameter of the transition probability matrix. Default is \code{eta = 0.3}.}
\item{alpha}{Pseudo-counts of the Beta random probability assignments. Default is \code{alpha = 0.5}.}
\item{return_global_null}{Boolean indicating whether to return the posterior probability of the global null hypothesis.}
\item{return_tree}{Boolean indicating whether to return the posterior representative tree.}
\item{min_n_node}{Node in the tree is returned if there are more than \code{min_n_node} data-points in it.}
}
\value{
An \code{mrs} object.
}
\description{
This function executes the Multi Resolution Scanning algorithm to detect differences
across multiple distributions.
}
\examples{
set.seed(1)
n = 20
p = 2
X = matrix(c(runif(p*n/2),rbeta(p*n/2, 1, 4)), nrow=n, byrow=TRUE)
G = c(rep(1,n/2), rep(2,n/2))
ans = mrs(X=X, G=G)
}
\references{
Soriano J. and Ma L. (2016).
Probabilistic multi-resolution scanning for two-sample differences.
\emph{Journal of the Royal Statistical Society: Series B (Statistical Methodology)}.
\url{http://onlinelibrary.wiley.com/doi/10.1111/rssb.12180/abstract}
}
|
e1fabe9505c8ab67609739cc288793ee3e02afef
|
c787ef4c899aeb5ae61366c788cebeec3e173753
|
/analyze-bootstraps.R
|
778f0331c7e342320dc20b0259156d2d2e1cfa52
|
[] |
no_license
|
openbiome/fmt-public-health-model
|
be7b1e3f5af772120c20061136d55063c9a0e901
|
8ee892be7645b8aab872aa1699227759de3d6870
|
refs/heads/master
| 2021-03-16T16:11:12.537079
| 2020-04-02T14:54:24
| 2020-04-02T14:55:04
| 246,922,459
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,675
|
r
|
analyze-bootstraps.R
|
#!/usr/bin/env Rscript --vanilla
library(tidyverse)
source("util.R")
source("model.R")
set.seed(5)
n_plot_points <- 1e3
max_nnt <- 10
# Load variates
estimates <- readRDS("cache/estimates.rds")
variates <- readRDS("cache/variates.rds")
# Point estimate model
us <- c(0, 0.50, 1.0)
point_estimates <- tibble(u = us) %>%
mutate(
params = map(u, ~ `[[<-`(estimates, "u", .)),
outcomes = map(params, model_call)
) %>%
select(u, outcomes) %>%
unnest_longer(outcomes, values_to = "point_estimate", indices_to = "param")
# Bootstrapping model
simulations <- tibble(u = us) %>%
mutate(
params = map(u, ~ `[[<-`(variates, "u", .)),
outcomes = map(params, model_call)
)
# CIs around the point estimates
cis <- simulations %>%
select(u, outcomes) %>%
unnest_longer(outcomes, values_to = "values", indices_to = "param") %>%
mutate(
lci = map_dbl(values, ~ quantile(., ci_quantiles[1])),
uci = map_dbl(values, ~ quantile(., ci_quantiles[2]))
) %>%
select(u, param, lci, uci) %>%
left_join(point_estimates, by = c("u", "param"))
write_tsv(cis, "output/outcomes-cis.tsv")
# Plot of points
simulation_data <- simulations %>%
select(-u) %>%
unnest_wider(params) %>%
unnest_wider(outcomes) %>%
unnest(cols = c(model_params, model_outcomes))
exclusion_counts <- simulation_data %>%
count(between(nnt, 0, max_nnt)) %>%
mutate(f = n / sum(n))
write_tsv(exclusion_counts, "output/exclusion-counts.tsv")
plot_data <- simulation_data %>%
# keep non-weird NNTs
filter(between(nnt, 0, max_nnt)) %>%
# rescale
mutate_at(c("Cdot", "n_fmts"), ~ . / 1e3) %>%
# subsample
sample_n(n_plot_points) %>%
pivot_longer(setdiff(model_params, c("u")), names_to = "param_name", values_to = "param_value") %>%
pivot_longer(model_outcomes, names_to = "outcome_name", values_to = "outcome_value") %>%
mutate(
param_label = recode(param_name,
Cdot = "C[symbol('\\267')] / 10^3",
p1 = "p[1]",
p2 = "p[2]",
pabx = "p[abx]",
pfmt = "p[FMT]"
),
outcome_label = recode(outcome_name,
n_fmts = "N[FMT]",
f_geq3 = "f['' >= 3]",
nnt = "NNT"
)
)
plot <- plot_data %>%
ggplot(aes(param_value, outcome_value, color = factor(u))) +
facet_grid(outcome_label ~ param_label, scales = "free", labeller = label_parsed) +
geom_point(shape = 3) +
scale_color_manual(values = c("#edf8b1", "#7fcdbb", "#2c7fb8")) +
xlab("Parameter value") +
ylab("Outcome value") +
guides(color = guide_legend("u")) +
cowplot::theme_half_open() +
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1, size = 10))
ggsave("fig/outcomes.pdf")
ggsave("fig/outcomes.png")
|
e74b944c987a0950bb5a1c69ec9cc49c8e9a858e
|
2bffd1e59a1ee5606bf28003ab6d423c8d868b2e
|
/proyecto_nlp.R
|
6dcd48bc0a5e3094fcf326932a0522498851ab5c
|
[] |
no_license
|
luispalomo97/ProyectoNLP
|
c471f1ee6bceccb9a9d16a4740e92659ab5ff999
|
012dccd311c8dd9580c3c8338f43d6c043ba61a3
|
refs/heads/main
| 2023-02-11T08:22:35.399345
| 2021-01-07T20:13:32
| 2021-01-07T20:13:32
| 326,761,150
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 10,702
|
r
|
proyecto_nlp.R
|
#-------------------------------------------PASOS PREVIOS-------------------------------------------------------
#instalaciΓ³n de paquetes
install.packages("ggplot2")
install.packages("tm")
install.packages("wordcloud")
install.packages("RColorBrewer")
install.packages("sentimentr")
install.packages("tidytext")
install.packages("textdata")
install.packages("syuzhet")
install.packages("naivebayes")
install.packages("e1071")
install.packages("caret")
install.packages("janitor")
#Carga de paquetes
library(ggplot2)
library(tm)
library(wordcloud)
library(RColorBrewer)
library(dplyr)
library(sentimentr)
library(tidytext)
library(textdata)
library(tidyverse)
library(syuzhet)
library(naivebayes)
library(e1071)
library(caret)
library(janitor)
#---------------------------------------CARGA DEL DATA SET-------------------------------------------------------
#Iniciamos la carga de los datos del fichero amazon_alexa, previamente a cargar el fichero
#Se han eliminado aquellas reviews cuyo comentario esta vacio
alexa_reviews <- read.csv2("data_set_alexa_reviews.csv", row.names = NULL)
names(alexa_reviews) <- c('valoracion','fecha','producto','comentario','feedback')
#Se va a mostar cual es la frecuencia de cada una de las valoraciones
ggplot(data = alexa_reviews, mapping = aes(x = valoracion, fill = as.factor(valoracion))) +
geom_bar() +
labs(x = "valoracion", y = "Cantidad", fill = "valoracion")+
ggtitle("Valoraciones hechas sobre los productos")+
scale_fill_manual("leyenda", values = c("1" = "#D9EEC2", "2" = "#C6EC9D", "3" = "#B6EA7E",
"4" = "#A4E859", "5"= "#8BF01D"))+
theme(plot.title = element_text(hjust = 0.5))
#------------------------------------LIMPIEZA DEL DATA SET-------------------------------------------------------
#Limpieza de los comentarios, solo queremos elemenos alfa-numericos
alexa_reviews$comentario <-gsub("[^a-zA-Z0-9 ]", "", alexa_reviews$comentario)
#alexa_reviews$comentario <-gsub(pattern = "(^ +| +$)",replacement = "",x = alexa_reviews$comentario)
corpus <- Corpus(VectorSource(alexa_reviews$comentario))
#Eliminamos las letras mayusculas
corpus <-tm_map(corpus,content_transformer(tolower))
#Eliminamos los signos de puntuacion
corpus <- tm_map(corpus,removePunctuation)
#Eliminamos los numeros que hayan en los comentarios
corpus <- tm_map(corpus,removeNumbers)
#Eliminamos aquellas palabras que no aportan informaciΓ³n
corpus <- tm_map(corpus,removeWords, c(stopwords("en"),"echo","alexa","music","sound","dot","set",
"amazon","product","get","speaker","home","play",
"device","still","time","just","will"))
#Eliminamos los espacios en blanco entre palabas
corpus <- tm_map(corpus,stripWhitespace)
# Para crear el clasificador inicialemente nos hemos quedado con el conjunto de palabras del corpus
datafr <- data.frame("coment"= corpus$content)
#-----------------------------------------CREACION MATRIZ DE TERMINOS--------------------------------------------------
#A continuaciΓ³n se crea un term-document matrix para crear la nube de puntos
matrix_corpus <- TermDocumentMatrix(corpus)
m <- as.matrix(matrix_corpus)
v <- sort(rowSums(m),decreasing = TRUE)
data <- data.frame(word =names(v),freq = v)
head(data,5)
#Mostramos cuales son las palabras mΓ‘s frecuentes
barplot(data[1:11,]$freq, las = 2, names.arg = data[1:11,]$word,ylim = c(0,1000),
col ="lightgreen", main ="Palabras mΓ‘s frecuentes de las reviews",
ylab = "Frecuencia de palabras")
#--------------------------------------CREACION NUBE DE PALABRAS------------------------------------------------------
set.seed(1234)
wordcloud(words = data$word, freq = data$freq, min.freq = 1, max.words =140,
random.order=FALSE, rot.per = 0.35, colors=brewer.pal(8,"Dark2"))
#Nube de palabras diferenciando entre palabras positivas y negativas con bing
a <-data %>% inner_join(get_sentiments("bing"))
a$sentiment<-replace(a$sentiment, a$sentiment == "positive",0)
a$sentiment<-replace(a$sentiment, a$sentiment == "negative",1)
colour = ifelse(a$sentiment < 1,"#2CBD52","#BD2C2C")
set.seed(1234)
wordcloud(words = a$word, freq = a$freq, min.freq = 1, max.words =140,
random.order=FALSE, rot.per = 0.35, colors=colour,ordered.colors=TRUE)
#-----------------------------------------ANALISIS DE SENTIMIENTOS---------------------------------------------------
afinn_vector <- get_sentiment(datafr$coment,method = "afinn")
summary(afinn_vector)
#El resultado que nos arroja esta prueba es que el analisis de sentimientos mediante affinn nos muestra un valor medio de
# 3 lo que nos quiere decir que de media los comentarios son mas positivos, ya que la escala de affin es de -5 a 5
bing_vector <- get_sentiment(datafr$coment,method = "bing")
summary(bing_vector)
#El resultado que nos arroja esta prueba es que el analisis de sentimientos mediante bing nos muestra un valor medio de
# 1 lo que nos quiere decir que de media los comentarios son mas positivos, ya que la escala de bing es de -1 a 1
syuzhet_vector <- get_sentiment(datafr$coment,method = "syuzhet")
summary(syuzhet_vector)
#El resultado que nos arroja esta prueba es que el analisis de sentimientos mediante syuzhet nos muestra un valor medio de
# 1 lo que nos quiere decir que de media los comentarios son mas positivos, ya que la escala de syuzhet es de -1 a 1
#Se pueden plotear las diferentes funciones para ver su frecuencia de forma grΓ‘fica
plot(
syuzhet_vector,
type="h",
main="PuntuaciΓ³n de sentimientos syuzhet",
xlab = "Comentarios",
ylab= "Valor emocional"
)
plot(
bing_vector,
type="h",
main="PuntuaciΓ³n de sentimientos bing",
xlab = "Comentarios",
ylab= "Valor emocional"
)
plot(
afinn_vector,
type="h",
main="PuntuaciΓ³n de sentimientos afinn",
xlab = "Comentarios",
ylab= "Valor emocional"
)
#En estos plots tambien se puede observar de manera clara que los comentarios en su mayorΓa son positivos
#Aqui lo que se ha utilizado es la funciΓ³n get_nrc_sentiment, la cual devuelve una matriz de 10 columnas
# donde a cada comentario se muesta por cada una de esas columnas apiriciones de palabras asociadas a esos
#sentimientos
data_sentiment<-get_nrc_sentiment(datafr$coment)
head (data_sentiment,10)
data_sentiment <- colSums(data_sentiment)
#Aqui se muestra un diagrama de barras donde se muestra cuantas palabras asociadas a cada sentimiento hay en
#los comentarios
barplot(data_sentiment,col=brewer.pal(8,"Dark2"),ylim = c(0,5000),
ylab = "frecuencia de palabras",las=2,cex.names=.9, main = "Palabras de los comentarios asociados a sentimientos")
#Esta es la diferenciacion entre comentarios positivos y negativos
barplot(data_sentiment[c("negative","positive")],ylim=c(0,5000), col = c("#BD2C2C","#2CBD52"),
main = "Palabras de los comentarios asociados a sentimientos positivos y negativos")
#--------------------------------CreaciΓ³n del clasificador bayesiano-----------------------------------------------
#Con este clasificador bayesiano buscamos encontrar si a partir del producto, la valoraciΓ³n, el feedback y el numero
# de letras por cada comentario ver si es capaz de generar un valoraciΓ³n similar a la que se ha hecho.
# Realizamos una conversiΓ³n a etiquetas de las valoraciones, es decir, de un valor nΓΊmerico a uno categorico
firstModif_1 <- function(){
alexamodAux <- alexa_reviews
alexamodAux$valoracion <- as.character(alexamodAux$valoracion)
alexamodAux[alexamodAux$valoracion== 1 | alexamodAux== 2] <- "malo"
alexamodAux[alexamodAux== 3 | alexamodAux== 4] <- "regular"
alexamodAux[alexamodAux== 5] <- "bueno"
alexamodAux <- within(alexamodAux, valoracion <- factor(valoracion, labels = c("bueno","malo","regular")))
#Juntamos el dataframe que contiene el conjunto de palabras del corpus y el dataframe que contiene las modificaciones
#descritas anteriormente. Es en este punto cuando aΓ±adimos la columna NumeroLetras, que contiene el numero de palabras
#de cada comentario
dataset_a <- rbind(select(alexamodAux,valoracion,producto,comentario,feedback))
dataset_a <- cbind(dataset_a,datafr)
dataset_a <- transform(dataset_a, "NumeroLetras"=str_count(coment, '\\w+'))
dataset_a <- dataset_a[dataset_a$NumeroLetras > 0,]
dataset_a$producto <-NULL
dataset_a$comentario <-NULL
#dataset_a$NumeroLetras <-NULL
#dataset_a$feedback <-NULL
return (dataset_a)
}
firstModif_2 <- function(){
alexamod <- within(alexa_reviews, valoracion <- factor(valoracion, labels = c("uno","dos","tres","cuatro","cinco")))
#Juntamos el dataframe que contiene el conjunto de palabras del corpus y el dataframe que contiene las modificaciones
#descritas anteriormente. Es en este punto cuando aΓ±adimos la columna NumeroLetras, que contiene el numero de palabras
#de cada comentario
dataset_a <- rbind(select(alexamod,valoracion,producto,comentario,feedback))
dataset_a <- cbind(dataset_a,datafr)
dataset_a <- transform(dataset_a, "NumeroLetras"=str_count(coment, '\\w+'))
dataset_a <- dataset_a[dataset_a$NumeroLetras > 0,]
dataset_a$producto <-NULL
dataset_a$comentario <-NULL
#dataset_a$NumeroLetras <-NULL
#dataset_a$feedback <-NULL
return (dataset_a)
}
#Una vez creado el dataset adecuado procedemos a crear el conjunto de test y el de train
# El conjunto de train serΓ‘ del 80% y el de test 20%
# Primero se probarΓ‘ los datos con las 5 labels
primerTest <- firstModif_2()
trainIndex=createDataPartition(primerTest$valoracion, p=0.8, list = F)
train=primerTest[trainIndex, ]
test=primerTest[-trainIndex, ]
model <- naiveBayes(valoracion~., data = train )
predicted<-predict(model,test)
tab <- table(test$valoracion, predicted, dnn = c("Actual", "Predicha"))
confusionMatrix(tab)
#De forma similar a anteriormente realizamos la division de los conjuntos de train y test
# El conjunto de train serΓ‘ del 80% y el de test 20%
# Ahora los datos estarΓ‘n clasificados en : malo, regular y bueno
segundoTest <- firstModif_1()
trainIndex=createDataPartition(segundoTest$valoracion, p=0.8, list = F)
train=segundoTest[trainIndex, ]
test=segundoTest[-trainIndex, ]
model <- naiveBayes(valoracion~., data = train )
predicted<-predict(model,test)
tab <- table(test$valoracion, predicted, dnn = c("Actual", "Predicha"))
confusionMatrix(tab)
|
ed149573d59a269fe32712343bb1fded225cb981
|
d73d30ad8c323904788266f94bf4c99081e80b3a
|
/clean.r
|
7bb8b8e184ebf3118006807d672abe17e2c16bfa
|
[
"MIT"
] |
permissive
|
phoebewong/tsw-hackathon
|
fb5a0ecd9864b1ff0ee60b206f4e0f326d3a5766
|
24db62399aea6017f298b1286b00fe8c1ed8a2f8
|
refs/heads/master
| 2021-05-10T20:50:29.693312
| 2018-01-22T04:37:02
| 2018-01-22T04:37:02
| 118,207,686
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,174
|
r
|
clean.r
|
hospital.df$full_address <- paste(hospital.df$Address.1, hospital.df$Address.2, hospital.df$Address.3)
hospital.df$full_address <- paste(hospital.df$full_address, hospital.df$City, hospital.df$State, sep = ", ")
hospital.df$full_address <- gsub("NA, ", "", hospital.df$full_address)
hospital.df$pop_content <- paste(sep = "<br/>",
hospital.df$Address.1,
hospital.df$Phone.Number)
# Geocode the hospital address
# Geocoding a csv column of "addresses" in R
#load ggmap
library(ggmap)
# Select the file from the file chooser
fileToLoad <- file.choose(new = TRUE)
# Read in the CSV data and store it in a variable
origAddress <- read.csv(fileToLoad, stringsAsFactors = FALSE)
# Initialize the data frame
geocoded <- data.frame(stringsAsFactors = FALSE)
# Loop through the addresses to get the latitude and longitude of each address and add it to the
# origAddress data frame in new columns lat and lon
for(i in 26:nrow(hospital.df))
{
print(i)
if (i%%5 == 0){
Sys.sleep(60)
} else {
Sys.sleep(15)
result <- try(geocode(hospital.df$full_address[i], output = "latlona", source = "google"))
hospital.df$lon[i] <- as.numeric(result[1])
hospital.df$lat[i] <- as.numeric(result[2])
hospital.df$geoAddress[i] <- as.character(result[3])
}
}
for(i in 67:nrow(boston)){
print(i)
if (i%%5 == 0){
Sys.sleep(60)
} else {
Sys.sleep(15)
result <- try(geocode(boston$full_address[i], output = "latlona", source = "google"))
boston$lon[i] <- as.numeric(result[1])
boston$lat[i] <- as.numeric(result[2])
boston$geoAddress[i] <- as.character(result[3])
}
}
for (i in 26:nrow(boston)){
print(i)
if (i%%5 == 0){
Sys.sleep(60)
} else {
Sys.sleep(15)
result <- try(geocode(boston$full_address[i], output = "latlona", source = "google"))
boston$lon[i] <- as.numeric(result[1])
boston$lat[i] <- as.numeric(result[2])
boston$geoAddress[i] <- as.character(result[3])
}
}
# Write a CSV file containing origAddress to the working directory
write.csv(origAddress, "geocoded.csv", row.names=FALSE)
|
12910a283e903ae1378fee8d375181b076a6dc39
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/cope/R/MBContour.R
|
3474d5d404ec94528ca09e6b7cba5cbd92559d91
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,279
|
r
|
MBContour.R
|
#' Computes Multiplier Bootstrap realizations of the supremum of a Gaussian
#' field on a contour.
#'
#' @param x x-Coordinates of the grid on which the data is observed.
#' @param y y-Coordinates of the grid on which the data is observed.
#' @param R An array of dimension c(length(x),length(y),n) containing the
#' realizations of the field.
#' @param f A matrix of dimension c(length(x),length(y)). The contour on which
#' tail probabilities are to be computed is defind as {f=c}.
#' @param level The level of the contour.
#' @param N The number of Bootstrap realizations to produce. Default is 1000.
#' @return A vector of length N containing the Bootstrap realizations of the
#' supremum.
MBContour = function(x, y, R, f, level, N=1000){
cont = contourLines(list(x=x,y=y,z=f),levels=level,nlevels=1)
if(length(cont)==0) return(rep(-Inf,N))
cont_x = c()
cont_y = c()
for(l in 1:length(cont)) {cont_x = c(cont_x,cont[[l]]$x); cont_y = c(cont_y,cont[[l]]$y)}
cont = cbind(cont_x,cont_y)
interp_max = function(G){
G = matrix(G,ncol=length(y))
max(fields::interp.surface(list(x=x,y=y,z=G),cont))
}
n = dim(R)[3]
R = matrix(R,ncol=n)
g = matrix(rnorm(n*N),n,N)
apply(abs(R %*% g ),2,interp_max) / sqrt(n-2)
}
|
1ef183f36cf238dd97095bb2459a32ba24def401
|
459fc933c9da3c2ccc8456ee3bba8441fe3cb208
|
/code/Economics/median_income/median_income_lineGraph.R
|
388e2f058b6fb4da0b0163b002935615f61f309b
|
[] |
no_license
|
DSPG-Young-Scholars-Program/dspg21hampton_roads
|
a8847a252474815cce9196a771f16ec3e8542610
|
8189465458901f7b986f94b41e44e5c32c4bd674
|
refs/heads/main
| 2023-06-29T07:01:06.204889
| 2021-08-04T19:48:41
| 2021-08-04T19:48:41
| 392,808,859
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,923
|
r
|
median_income_lineGraph.R
|
library(tidycensus)
library(tidyverse)
library(dplyr)
library(ggplot2)
library(readr)
library(grid)
library(gridExtra)
#need to convert data from 2010-2019
#VA data
va_table <- function(varcode, year){
data.frame(get_acs(geography = "state", state = 51,
table = varcode,
year = year))}
#converting to csv
va_income19 <- va_table("S1903", 2010)
write.csv(va_income19, file = "shinyapp/data/TableS1903FiveYearEstimates/va_income2010.csv")
#Hampton Roads data
county_fips <- c(550, 620, 650, 700, 710, 735, 740, 800, 810,
830, 073, 093, 095, 115, 175, 199)
#income by Race (just median)
hamp_income <- get_acs(geography = "county", state = 51,
county = county_fips[1],
table = "S1903",
year = 2010)
for(i in 2:length(county_fips)) {
tmp <- get_acs(geography = "county", state = 51,
county = county_fips[i],
table = "S1903",
year = 2010)
hamp_income <- rbind(hamp_income, tmp)
}
write.csv(hamp_income, file = "shinyapp/data/TableS1903FiveYearEstimates/hampton_income2010.csv")
#2017 to 2019 format
#############################################################################2019
va_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/va_income2019.csv")
va_yr <- va_yr[2:6]
race_names <- c("Total", "Black")
#median income
va_race_income_median <- data.frame(va_yr[c(81,83), 4])
va_race_income <- data.frame(cbind(race_names, va_race_income_median))
colnames(va_race_income) <- c("Race", "Median Income")
#Hampton Income
hamp_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/hampton_income2019.csv")
hamp_yr <- hamp_yr[2:6]
#getting the name, variable and estimate
hamp_income2 <- hamp_yr[,2:4]
hamp_income3 <- hamp_income2 %>%
group_by(NAME) %>%
slice(c(81,83))
#This give us overall hampton overall and black median income
variable <- sample(c("S1903_C03_001","S1903_C03_003"),32, replace = TRUE)
hamp_race_income_median <- hamp_income3 %>% group_by(variable) %>% summarize(median(estimate, na.rm = TRUE))
#Va and Hampton Roads
median_income <- cbind(va_race_income, hamp_race_income_median)
median_income <- median_income[, c(2,4)]
#having all the estimates in the same column
median_income19 <- data.frame(median = c(median_income[,1], median_income[,2]))
#labeling
median_income19 <- mutate(median_income19, location = c(rep("Virginia",2), rep("Hampton Roads",2)))
median_income19 <- mutate(median_income19, demo = rep(c("Total Population", "Black Population"),2))
colnames(median_income19) <- c("Median Income (US Dollars)", "Location", "Demographic")
#making them all numeric
median_income19 <- transform(median_income19, `Median Income (US Dollars)` = as.numeric(`Median Income (US Dollars)`))
colnames(median_income19) <- c("Median Income (US Dollars)", "Location", "Demographic")
median_income19 <- mutate(median_income19, Year = "2019")
############################################################################2018
va_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/va_income2018.csv")
va_yr <- va_yr[2:6]
race_names <- c("Total", "Black")
#median income
va_race_income_median <- data.frame(va_yr[c(81,83), 4])
va_race_income <- data.frame(cbind(race_names, va_race_income_median))
colnames(va_race_income) <- c("Race", "Median Income")
#Hampton Income
hamp_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/hampton_income2018.csv")
hamp_yr <- hamp_yr[2:6]
#getting the name, variable and estimate
hamp_income2 <- hamp_yr[,2:4]
hamp_income3 <- hamp_income2 %>%
group_by(NAME) %>%
slice(c(81,83))
#This give us overall hampton overall and black median income
variable <- sample(c("S1903_C03_001","S1903_C03_003"),32, replace = TRUE)
hamp_race_income_median <- hamp_income3 %>% group_by(variable) %>% summarize(median(estimate, na.rm = TRUE))
#Va and Hampton Roads
median_income <- cbind(va_race_income, hamp_race_income_median)
median_income <- median_income[, c(2,4)]
#having all the estimates in the same column
median_income18 <- data.frame(median = c(median_income[,1], median_income[,2]))
#labeling
median_income18 <- mutate(median_income18, location = c(rep("Virginia",2), rep("Hampton Roads",2)))
median_income18 <- mutate(median_income18, demo = rep(c("Total Population", "Black Population"),2))
colnames(median_income18) <- c("Median Income (US Dollars)", "Location", "Demographic")
#making them all numeric
median_income18 <- transform(median_income18, `Median Income (US Dollars)` = as.numeric(`Median Income (US Dollars)`))
colnames(median_income18) <- c("Median Income (US Dollars)", "Location", "Demographic")
median_income18 <- mutate(median_income18, Year = "2018")
############################################################################2017
va_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/va_income2017.csv")
va_yr <- va_yr[2:6]
race_names <- c("Total", "Black")
#median income
va_race_income_median <- data.frame(va_yr[c(81,83), 4])
va_race_income <- data.frame(cbind(race_names, va_race_income_median))
colnames(va_race_income) <- c("Race", "Median Income")
#Hampton Income
hamp_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/hampton_income2017.csv")
hamp_yr <- hamp_yr[2:6]
#getting the name, variable and estimate
hamp_income2 <- hamp_yr[,2:4]
hamp_income3 <- hamp_income2 %>%
group_by(NAME) %>%
slice(c(81,83))
#This give us overall hampton overall and black median income
variable <- sample(c("S1903_C03_001","S1903_C03_003"),32, replace = TRUE)
hamp_race_income_median <- hamp_income3 %>% group_by(variable) %>% summarize(median(estimate, na.rm = TRUE))
#Va and Hampton Roads
median_income <- cbind(va_race_income, hamp_race_income_median)
median_income <- median_income[, c(2,4)]
#having all the estimates in the same column
median_income17 <- data.frame(median = c(median_income[,1], median_income[,2]))
#labeling
median_income17 <- mutate(median_income17, location = c(rep("Virginia",2), rep("Hampton Roads",2)))
median_income17 <- mutate(median_income17, demo = rep(c("Total Population", "Black Population"),2))
colnames(median_income17) <- c("Median Income (US Dollars)", "Location", "Demographic")
#making them all numeric
median_income17 <- transform(median_income17, `Median Income (US Dollars)` = as.numeric(`Median Income (US Dollars)`))
colnames(median_income17) <- c("Median Income (US Dollars)", "Location", "Demographic")
median_income17 <- mutate(median_income17, Year = "2017")
#2010-2016 format
###########################################################################2016
va_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/va_income2016.csv")
va_yr <- va_yr[,2:6]
race_names <- c("Total", "Black")
va_race_income_median <- data.frame(va_yr[c(31,33), 4])
va_race_income <- data.frame(cbind(race_names, va_race_income_median))
colnames(va_race_income) <- c("Race", "Median Income")
#Hampton Income
hamp_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/hampton_income2016.csv")
hamp_yr <- hamp_yr[,2:6]
#getting the name, variable and estimate
hamp_income2 <- hamp_yr[,2:4]
hamp_income3 <- hamp_income2 %>%
group_by(NAME) %>%
slice(c(31,33))
variable <- sample(c("S1903_C02_001","S1903_C02_003"),32, replace = TRUE)
hamp_race_income_median <- hamp_income3 %>% group_by(variable) %>% summarize(median(estimate, na.rm = TRUE))
#Va and Hampton Roads
median_income <- cbind(va_race_income, hamp_race_income_median)
median_income <- median_income[, c(2,4)]
#having all the estimates in the same column
median_income16 <- data.frame(median = c(median_income[,1], median_income[,2]))
#labeling
median_income16 <- mutate(median_income16, location = c(rep("Virginia",2), rep("Hampton Roads",2)))
median_income16 <- mutate(median_income16, demo = rep(c("Total Population", "Black Population"),2))
colnames(median_income16) <- c("Median Income (US Dollars)", "Location", "Demographic")
#making them all numeric
median_income16 <- transform(median_income16, `Median Income (US Dollars)` = as.numeric(`Median Income (US Dollars)`))
colnames(median_income16) <- c("Median Income (US Dollars)", "Location", "Demographic")
median_income16 <- mutate(median_income16, Year = "2016")
###########################################################################2016
va_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/va_income2015.csv")
va_yr <- va_yr[,2:6]
race_names <- c("Total", "Black")
va_race_income_median <- data.frame(va_yr[c(31,33), 4])
va_race_income <- data.frame(cbind(race_names, va_race_income_median))
colnames(va_race_income) <- c("Race", "Median Income")
#Hampton Income
hamp_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/hampton_income2015.csv")
hamp_yr <- hamp_yr[,2:6]
#getting the name, variable and estimate
hamp_income2 <- hamp_yr[,2:4]
hamp_income3 <- hamp_income2 %>%
group_by(NAME) %>%
slice(c(31,33))
variable <- sample(c("S1903_C02_001","S1903_C02_003"),32, replace = TRUE)
hamp_race_income_median <- hamp_income3 %>% group_by(variable) %>% summarize(median(estimate, na.rm = TRUE))
#Va and Hampton Roads
median_income <- cbind(va_race_income, hamp_race_income_median)
median_income <- median_income[, c(2,4)]
#having all the estimates in the same column
median_income15 <- data.frame(median = c(median_income[,1], median_income[,2]))
#labeling
median_income15 <- mutate(median_income15, location = c(rep("Virginia",2), rep("Hampton Roads",2)))
median_income15 <- mutate(median_income15, demo = rep(c("Total Population", "Black Population"),2))
colnames(median_income15) <- c("Median Income (US Dollars)", "Location", "Demographic")
#making them all numeric
median_income15 <- transform(median_income15, `Median Income (US Dollars)` = as.numeric(`Median Income (US Dollars)`))
colnames(median_income15) <- c("Median Income (US Dollars)", "Location", "Demographic")
median_income15 <- mutate(median_income15, Year = "2015")
###########################################################################2014
va_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/va_income2014.csv")
va_yr <- va_yr[,2:6]
race_names <- c("Total", "Black")
va_race_income_median <- data.frame(va_yr[c(31,33), 4])
va_race_income <- data.frame(cbind(race_names, va_race_income_median))
colnames(va_race_income) <- c("Race", "Median Income")
#Hampton Income
hamp_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/hampton_income2014.csv")
hamp_yr <- hamp_yr[,2:6]
#getting the name, variable and estimate
hamp_income2 <- hamp_yr[,2:4]
hamp_income3 <- hamp_income2 %>%
group_by(NAME) %>%
slice(c(31,33))
variable <- sample(c("S1903_C02_001","S1903_C02_003"),32, replace = TRUE)
hamp_race_income_median <- hamp_income3 %>% group_by(variable) %>% summarize(median(estimate, na.rm = TRUE))
#Va and Hampton Roads
median_income <- cbind(va_race_income, hamp_race_income_median)
median_income <- median_income[, c(2,4)]
#having all the estimates in the same column
median_income14 <- data.frame(median = c(median_income[,1], median_income[,2]))
#labeling
median_income14 <- mutate(median_income14, location = c(rep("Virginia",2), rep("Hampton Roads",2)))
median_income14 <- mutate(median_income14, demo = rep(c("Total Population", "Black Population"),2))
colnames(median_income14) <- c("Median Income (US Dollars)", "Location", "Demographic")
#making them all numeric
median_income14 <- transform(median_income14, `Median Income (US Dollars)` = as.numeric(`Median Income (US Dollars)`))
colnames(median_income14) <- c("Median Income (US Dollars)", "Location", "Demographic")
median_income14 <- mutate(median_income14, Year = "2014")
###########################################################################2013
va_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/va_income2013.csv")
va_yr <- va_yr[,2:6]
race_names <- c("Total", "Black")
va_race_income_median <- data.frame(va_yr[c(31,33), 4])
va_race_income <- data.frame(cbind(race_names, va_race_income_median))
colnames(va_race_income) <- c("Race", "Median Income")
#Hampton Income
hamp_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/hampton_income2013.csv")
hamp_yr <- hamp_yr[,2:6]
#getting the name, variable and estimate
hamp_income2 <- hamp_yr[,2:4]
hamp_income3 <- hamp_income2 %>%
group_by(NAME) %>%
slice(c(31,33))
variable <- sample(c("S1903_C02_001","S1903_C02_003"),32, replace = TRUE)
hamp_race_income_median <- hamp_income3 %>% group_by(variable) %>% summarize(median(estimate, na.rm = TRUE))
#Va and Hampton Roads
median_income <- cbind(va_race_income, hamp_race_income_median)
median_income <- median_income[, c(2,4)]
#having all the estimates in the same column
median_income13 <- data.frame(median = c(median_income[,1], median_income[,2]))
#labeling
median_income13 <- mutate(median_income13, location = c(rep("Virginia",2), rep("Hampton Roads",2)))
median_income13 <- mutate(median_income13, demo = rep(c("Total Population", "Black Population"),2))
colnames(median_income13) <- c("Median Income (US Dollars)", "Location", "Demographic")
#making them all numeric
median_income13 <- transform(median_income13, `Median Income (US Dollars)` = as.numeric(`Median Income (US Dollars)`))
colnames(median_income13) <- c("Median Income (US Dollars)", "Location", "Demographic")
median_income13 <- mutate(median_income13, Year = "2013")
############################################################################2012
va_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/va_income2012.csv")
va_yr <- va_yr[,2:6]
race_names <- c("Total", "Black")
va_race_income_median <- data.frame(va_yr[c(31,33), 4])
va_race_income <- data.frame(cbind(race_names, va_race_income_median))
colnames(va_race_income) <- c("Race", "Median Income")
#Hampton Income
hamp_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/hampton_income2012.csv")
hamp_yr <- hamp_yr[,2:6]
#getting the name, variable and estimate
hamp_income2 <- hamp_yr[,2:4]
hamp_income3 <- hamp_income2 %>%
group_by(NAME) %>%
slice(c(31,33))
variable <- sample(c("S1903_C02_001","S1903_C02_003"),32, replace = TRUE)
hamp_race_income_median <- hamp_income3 %>% group_by(variable) %>% summarize(median(estimate, na.rm = TRUE))
#Va and Hampton Roads
median_income <- cbind(va_race_income, hamp_race_income_median)
median_income <- median_income[, c(2,4)]
#having all the estimates in the same column
median_income12 <- data.frame(median = c(median_income[,1], median_income[,2]))
#labeling
median_income12 <- mutate(median_income12, location = c(rep("Virginia",2), rep("Hampton Roads",2)))
median_income12 <- mutate(median_income12, demo = rep(c("Total Population", "Black Population"),2))
colnames(median_income12) <- c("Median Income (US Dollars)", "Location", "Demographic")
#making them all numeric
median_income12 <- transform(median_income12, `Median Income (US Dollars)` = as.numeric(`Median Income (US Dollars)`))
colnames(median_income12) <- c("Median Income (US Dollars)", "Location", "Demographic")
median_income12 <- mutate(median_income12, Year = "2012")
###########################################################################2011
va_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/va_income2011.csv")
va_yr <- va_yr[,2:6]
race_names <- c("Total", "Black")
va_race_income_median <- data.frame(va_yr[c(31,33), 4])
va_race_income <- data.frame(cbind(race_names, va_race_income_median))
colnames(va_race_income) <- c("Race", "Median Income")
#Hampton Income
hamp_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/hampton_income2011.csv")
hamp_yr <- hamp_yr[,2:6]
#getting the name, variable and estimate
hamp_income2 <- hamp_yr[,2:4]
hamp_income3 <- hamp_income2 %>%
group_by(NAME) %>%
slice(c(31,33))
variable <- sample(c("S1903_C02_001","S1903_C02_003"),32, replace = TRUE)
hamp_race_income_median <- hamp_income3 %>% group_by(variable) %>% summarize(median(estimate, na.rm = TRUE))
#Va and Hampton Roads
median_income <- cbind(va_race_income, hamp_race_income_median)
median_income <- median_income[, c(2,4)]
#having all the estimates in the same column
median_income11 <- data.frame(median = c(median_income[,1], median_income[,2]))
#labeling
median_income11 <- mutate(median_income11, location = c(rep("Virginia",2), rep("Hampton Roads",2)))
median_income11 <- mutate(median_income11, demo = rep(c("Total Population", "Black Population"),2))
colnames(median_income11) <- c("Median Income (US Dollars)", "Location", "Demographic")
#making them all numeric
median_income11 <- transform(median_income11, `Median Income (US Dollars)` = as.numeric(`Median Income (US Dollars)`))
colnames(median_income11) <- c("Median Income (US Dollars)", "Location", "Demographic")
median_income11 <- mutate(median_income11, Year = "2011")
###########################################################################2010
va_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/va_income2010.csv")
va_yr <- va_yr[,2:6]
race_names <- c("Total", "Black")
va_race_income_median <- data.frame(va_yr[c(31,33), 4])
va_race_income <- data.frame(cbind(race_names, va_race_income_median))
colnames(va_race_income) <- c("Race", "Median Income")
#Hampton Income
hamp_yr <- read.csv("shinyapp/data/TableS1903FiveYearEstimates/hampton_income2010.csv")
hamp_yr <- hamp_yr[,2:6]
#getting the name, variable and estimate
hamp_income2 <- hamp_yr[,2:4]
hamp_income3 <- hamp_income2 %>%
group_by(NAME) %>%
slice(c(31,33))
variable <- sample(c("S1903_C02_001","S1903_C02_003"),32, replace = TRUE)
hamp_race_income_median <- hamp_income3 %>% group_by(variable) %>% summarize(median(estimate, na.rm = TRUE))
#Va and Hampton Roads
median_income <- cbind(va_race_income, hamp_race_income_median)
median_income <- median_income[, c(2,4)]
#having all the estimates in the same column
median_income10 <- data.frame(median = c(median_income[,1], median_income[,2]))
#labeling
median_income10 <- mutate(median_income10, location = c(rep("Virginia",2), rep("Hampton Roads",2)))
median_income10 <- mutate(median_income10, demo = rep(c("Total Population", "Black Population"),2))
colnames(median_income10) <- c("Median Income (US Dollars)", "Location", "Demographic")
#making them all numeric
median_income10 <- transform(median_income10, `Median Income (US Dollars)` = as.numeric(`Median Income (US Dollars)`))
colnames(median_income10) <- c("Median Income (US Dollars)", "Location", "Demographic")
median_income10 <- mutate(median_income10, Year = "2010")
#########################################################################Combining all the years into one data set to graph
income_years <- rbind(median_income19, median_income18, median_income17, median_income16,
median_income15, median_income14, median_income13, median_income12,
median_income11, median_income10)
#VA line graph
va_years <- income_years %>% filter(Location=="Virginia")
#graph
va_line <- ggplot(va_years, aes(x=Year, y=`Median Income (US Dollars)`, group = Demographic, color = Demographic)) +
geom_line(position = "identity", size =1.3) +
theme_minimal() +
ggtitle("Virginia") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_blank(),
legend.title = element_blank()) +
labs(y ="Median Income (US Dollars)") +
scale_color_manual(values = c("#D55E00", "#0072B2"))+
ylim(35000, 75000)
#hamp line graph
hamp_years <- income_years %>% filter(Location == "Hampton Roads")
#graph
hamp_line <- ggplot(hamp_years, aes(x=Year, y=`Median Income (US Dollars)`, group = Demographic, color = Demographic)) +
geom_line(position = "identity", size =1.3 ) +
scale_color_manual(values = c("#D55E00", "#0072B2")) +
theme_minimal() +
ggtitle("Hampton Roads")+
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_blank(),
legend.title = element_blank()) +
labs(y ="Median Income (US Dollars)")+
ylim(35000, 75000)
#plot the two graphs together
time_graphs <- grid.arrange(hamp_line, va_line, ncol=2)
time_graphs
|
06c2c257f7282ba1edb7f103b7cadda25db3b251
|
2e2c21e3103526a82fcf7cd76f5839be27f74745
|
/R/CESAnalysis.R
|
3ef37ed56593d8f4f6daf82ac6828ebee36b107a
|
[] |
no_license
|
Townsend-Lab-Yale/cancereffectsizeR
|
a65e1e8d91848902124ba72d987aa18e825b52e2
|
52ebab17ad8cc616fcc9728c528681ed0f4a63c2
|
refs/heads/main
| 2023-08-11T21:16:07.022309
| 2023-07-31T16:33:41
| 2023-07-31T16:33:41
| 140,888,382
| 24
| 12
| null | 2022-10-17T19:31:02
| 2018-07-13T20:03:10
|
R
|
UTF-8
|
R
| false
| false
| 27,772
|
r
|
CESAnalysis.R
|
#' Create a cancereffectsizeR analysis
#'
#' Creates a CESAnalysis, the central data structure of cancereffectsizeR.
#'
#' @param refset Name of reference data set (refset) to use; run \code{list_ces_refsets()} for
#' available refsets. Alternatively, the path to a custom reference data directory.
#' @return CESAnalysis object
#' @export
CESAnalysis = function(refset = NULL) {
if(is.null(refset)) {
msg = paste0("Required argument refset: Supply a reference data package (e.g., ces.refset.hg38 or ces.refset.hg19), or ",
"custom reference data (see docs).")
stop(paste0(strwrap(msg, exdent = 2), collapse = "\n"))
}
if(is(refset, "environment")) {
refset_name = as.character(substitute(refset))
} else {
refset_name = refset
}
# Check for and load reference data for the chosen genome/transcriptome data
if (! is(refset_name, "character")) {
stop("refset should be a refset object, the name of an installed refset package, or a path to a custom refset directory.")
}
using_custom_refset = TRUE
if (refset_name %in% names(.official_refsets)) {
using_custom_refset = FALSE
if(file.exists(refset_name)) {
stop("You've given the name of a CES reference data set package, but a file/folder with the same name is in your working directory. Stopping to avoid confusion.")
}
if(! require(refset_name, character.only = T)) {
if(refset_name == "ces.refset.hg19") {
message("Install ces.refset.hg19 like this:\n",
"options(timeout = 600)\n",
"remotes::install_github(\"Townsend-Lab-Yale/ces.refset.hg19@*release\")")
} else if(refset_name == "ces.refset.hg38") {
message("Install ces.refset.hg38 like this:\n",
"options(timeout = 600)\n",
"remotes::install_github(\"Townsend-Lab-Yale/ces.refset.hg38@*release\")")
}
stop("CES reference data set ", refset_name, " not installed.")
}
req_version = .official_refsets[[refset_name]]
actual_version = packageVersion(refset_name)
if (actual_version < req_version) {
stop("CES reference data set ", refset_name, " is version ", actual_version, ", but your version of cancereffectsizeR requires at least ",
"version ", req_version, ".\nRun this to update:\n",
"remotes::install_github(\"Townsend-Lab-Yale/", refset_name, "\")")
}
refset_version = actual_version
data_dir = system.file("refset", package = refset_name)
} else {
refset_version = NA_character_
if (! dir.exists(refset_name)) {
if (grepl('/', refset_name)) {
stop("Could not find reference data at ", refset_name)
} else {
stop("Invalid reference set name. Check spelling, or view available data sets with list_ces_refsets().")
}
}
data_dir = refset_name
refset_name = basename(refset_name)
if(refset_name %in% names(.official_refsets)) {
stop("Your custom reference data set has the same name (", refset_name, ") as a CES reference data package. Please rename it.")
}
}
# load reference data
if (! refset_name %in% ls(.ces_ref_data)) {
message("Loading reference data set ", refset_name, "...")
if (using_custom_refset) {
.ces_ref_data[[refset_name]] = preload_ref_data(data_dir)
} else {
.ces_ref_data[[refset_name]] = get(refset_name, envir = as.environment(paste0('package:', refset_name)))
}
.ces_ref_data[[refset_name]][["data_dir"]] = data_dir
}
# advanced is a grab bag of additional stuff to keep track of
## using_exome_plus: whether previously loaded and any future generic exome data uses the "exome+" coverage option
## (either all generic data must, or none of it, based on choice of enforce_default_exome_coverage on first load_maf call)
## recording: whether "run_history" is currently being recorded (gets set to false during some internal steps for clarity)
## uid: a unique-enough identifier for the CESAnalysis (just uses epoch time)
## genome_info: environment with stuff like genome build name, species, name of associated BSgenome
## snv_signatures: List CES signature sets used in the analysis
## cached_variants (not populated here): output of select_variants() run with default arguments
## (automatically updated as needed by load_cesa/update_covered_in)
## cds_refset: TRUE if gr_genes/RefCDS are protein-based; FALSE if gene-based.
genome_info = get_ref_data(data_dir, "genome_build_info")
ces_version = packageVersion("cancereffectsizeR")
if(refset_name == 'ces.refset.hg38') {
cds_refset = TRUE
} else if (refset_name == 'ces.refset.hg19') {
cds_refset = FALSE
} else {
gr_genes = get_ref_data(data_dir, "gr_genes")
cds_refset = "gene" %in% names(GenomicRanges::mcols(gr_genes))
}
advanced = list("version" = ces_version, using_exome_plus = F,
recording = T, uid = unclass(Sys.time()), genome_info = genome_info,
snv_signatures = list(), refset_version = refset_version,
cds_refset = cds_refset)
# Mutation table specifications (see template tables declared in imports.R)
mutation_tables = list(amino_acid_change = copy(aac_annotation_template),
snv = copy(snv_annotation_template), aac_snv_key = copy(aac_snv_key_template))
cesa = new("CESAnalysis", run_history = character(), ref_key = refset_name, maf = data.table(), excluded = data.table(),
mutrates = data.table(),
selection_results = list(), ref_data_dir = data_dir, epistasis = list(),
advanced = advanced, samples = copy(sample_table_template), mutations = mutation_tables,
coverage = list())
cesa@run_history = c(paste0("[Version: cancereffectsizeR ", ces_version, "]" ))
if (! is.na(refset_version)) {
cesa@run_history= c(cesa@run_history,
paste0("[Refset: ", refset_name, " ", refset_version, "]"))
}
cesa = update_cesa_history(cesa, match.call())
msg = paste0("This CESAnalysis will use ", refset_name, " reference data and the ", genome_info$species,
" genome, assembly ", genome_info$build_name, '.')
pretty_message(msg)
return(cesa)
}
#' Create an independent copy of a CESAnalysis
#'
#' Used internally to "copy" CESAnalysis objects while keeping memory use to a minimum.
#'
#' The trick is to use data.table's copy function on all data.tables (and lists of
#' data.tables) within CESAnalysis slots. (If you just call copy on the whole object, the
#' data tables won't be handled in a memory-efficient way. And if you call copy on
#' non-data.tables, it's actually less efficient since it forces an immediate full copy
#' instead of the usual copy-on-modify.)
#'
#' @param cesa CESAnalysis
#' @keywords internal
copy_cesa = function(cesa) {
if(! is(cesa, "CESAnalysis")) {
stop("cesa should be a CESAnalysis")
}
trinuc_orig = cesa@trinucleotide_mutation_weights
trinuc_copy = list()
if(length(trinuc_orig) > 0) {
for (i in c("trinuc_snv_counts", "trinuc_proportion_matrix", "group_average_dS_output")) {
trinuc_copy[[i]] = trinuc_orig[[i]]
}
trinuc_copy[c("signature_weight_table", "signature_weight_table_with_artifacts",
"raw_signature_weights")] = copy(trinuc_orig[c("signature_weight_table", "signature_weight_table_with_artifacts",
"raw_signature_weights")])
}
cesa = new("CESAnalysis", run_history = cesa@run_history,
ref_key = cesa@ref_key, ref_data_dir = cesa@ref_data_dir, dndscv_out_list = copy(cesa@dndscv_out_list),
maf = copy(cesa@maf), excluded = copy(cesa@excluded),
mutrates = copy(cesa@mutrates), selection_results = copy(cesa@selection_results),
epistasis = copy(cesa@epistasis), samples = copy(cesa@samples), mutations = copy(cesa@mutations),
advanced = copy(cesa@advanced), coverage = copy(cesa@coverage),
trinucleotide_mutation_weights = trinuc_copy)
return(cesa)
}
#' Save a CESAnalysis in progress
#'
#' Saves a CESAnalysis to a file by calling using base R's saveRDS function. Also updates
#' run history for reproducibility. Files saved should be reloaded with \code{load_cesa()}.
#'
#' Note that the genome reference data associated with a CESAnalysis (refset) is not
#' actually part of the CESAnalysis, so it is not saved here. (Saving this data with the
#' analysis would make file sizes too large.) When you reload the CESAnalysis, you can
#' re-associate the correct reference data.
#' @param cesa CESAnalysis to save
#' @param file filename to save to (must end in .rds)
#' @export
save_cesa = function(cesa, file) {
if (! is(cesa, "CESAnalysis")) {
stop("cesa should be a CESAnalysis")
}
if(! is(file, "character") || length(file) != 1) {
stop("file should be a valid file path (1-length character)")
}
if(! grepl('\\.rds$', file)) {
stop("filename should end in .rds (indicates R data serialized format)")
}
# Note updating of history before copying, so save_cesa call is recorded both in the
# user's active analysis and in the saved file.
cesa_to_save = update_cesa_history(cesa, match.call())
cesa_to_save = copy_cesa(cesa)
cesa_to_save@advanced$cached_variants = NULL # reduce file size (data recalculated on reload)
saveRDS(cesa_to_save, file)
}
#' Load a previously saved CESAnalysis
#'
#' @param file filename/path of CESAnalysis that has been saved with saveRDS, expected to end in .rds
#' @export
load_cesa = function(file) {
if (! endsWith(tolower(file), '.rds')) {
stop("Expected filename to end in .rds.", call. = F)
}
cesa = readRDS(file)
# Data tables must be reset to get them working properly
cesa@samples = setDT(cesa@samples)
# Convert old versions of sample table
if(cesa@samples[, .N] == 0) {
cesa@samples = copy(sample_table_template)
}
if(is.null(cesa@samples$maf_source)) {
# Usually use character names, or sequentially from 1.
# Use 0 here to suggest that sources aren't known.
cesa@samples$maf_source = '0'
}
if(is.null(cesa@samples$sig_analysis_grp)) {
cesa@samples[, sig_analysis_grp := NA_integer_]
if(! is.null(cesa@trinucleotide_mutation_weights$trinuc_proportion_matrix)) {
cesa@samples[rownames(cesa@trinucleotide_mutation_weights$trinuc_proportion_matrix), sig_analysis_grp := 0]
}
}
if(is.null(cesa@samples$gene_rate_grp)) {
cesa@samples[, gene_rate_grp := NA_integer_]
} else if(is.character(cesa@samples$gene_rate_grp)) {
# previously there was rate_grp_1, etc. here (now just 1, 2, ...)
cesa@samples[, gene_rate_grp := as.integer(sub('.*_', '', gene_rate_grp))]
}
cesa@maf = setDT(cesa@maf)
# Older versions lack annotation table templates
if (is.null(cesa@mutations$snv)) {
cesa@mutations = list(amino_acid_change = aac_annotation_template, snv = snv_annotation_template)
} else {
cesa@mutations$amino_acid_change = setDT(cesa@mutations$amino_acid_change, key = "aac_id")
cesa@mutations$snv = setDT(cesa@mutations$snv, key = "snv_id")
if (! is.null(cesa@mutations$aac_snv_key)) {
# if it is NULL, gets handled later
cesa@mutations$aac_snv_key = setDT(cesa@mutations$aac_snv_key, key = "aac_id")
}
}
if (! is.null(cesa@trinucleotide_mutation_weights[["signature_weight_table"]])) {
cesa@trinucleotide_mutation_weights[["signature_weight_table"]] = setDT(cesa@trinucleotide_mutation_weights[["signature_weight_table"]])
}
if (! is.null(cesa@trinucleotide_mutation_weights[["signature_weight_table_with_artifacts"]])) {
cesa@trinucleotide_mutation_weights[["signature_weight_table_with_artifacts"]] = setDT(cesa@trinucleotide_mutation_weights[["signature_weight_table_with_artifacts"]])
}
if (! is.null(cesa@trinucleotide_mutation_weights[["raw_signature_weights"]])) {
cesa@trinucleotide_mutation_weights[["raw_signature_weights"]] = setDT(cesa@trinucleotide_mutation_weights[["raw_signature_weights"]])
}
cesa@mutrates = setDT(cesa@mutrates)
cesa@selection_results = lapply(cesa@selection_results, setDT)
cesa@epistasis = lapply(cesa@epistasis, setDT)
# Now a list of signature sets. Formerly just 1 set, so put in enclosing list if necessary.
used_sig_sets = cesa@advanced$snv_signatures
if (! is.null(used_sig_sets) && length(used_sig_sets) > 0) {
if (! is.list(used_sig_sets[[1]])) {
cesa@advanced$snv_signatures = list(cesa@advanced$snv_signatures)
names(cesa@advanced$snv_signatures) = cesa@advanced$snv_signatures[[1]]$name
}
# Get each signature set's meta data.table and call setDT
lapply(lapply(cesa@advanced$snv_signatures, '[[', 'meta'), setDT)
} else {
cesa@advanced$snv_signatures = list()
}
refset_name = cesa@ref_key
if (refset_name %in% names(.official_refsets)) {
if(! require(refset_name, character.only = T, quietly = T)) {
stop("CES reference data set ", refset_name, " not installed. Run this to install:\n",
"remotes::install_github(\"Townsend-Lab-Yale/", refset_name, "\")")
}
req_version = .official_refsets[[refset_name]]
actual_version = packageVersion(refset_name)
if (actual_version < req_version) {
stop("CES reference data set ", refset_name, " is version ", actual_version, ", but your version of cancereffectsizeR requires at least ",
"version ", req_version, ".\nRun this to update:\n",
"remotes::install_github(\"Townsend-Lab-Yale/", refset_name, "\")")
}
previous_version = cesa@advanced$refset_version
if (is.null(previous_version) && refset_name == 'ces.refset.hg38') {
msg = paste0("This CESAnalysis was likely created with an older version of ces.refset.hg38 with known issues. ",
"You should create a new CESAnalysis and start over if you want to continue analysis.")
warning(pretty_message(msg, emit = F))
} else if(! is.null(previous_version)) {
previous_version = as.package_version(previous_version)
if (previous_version$major != actual_version$major | previous_version$minor != actual_version$minor) {
msg = paste0("This CESAnalysis was annotated with data from ", refset_name, ' ', previous_version, " and you currently have ",
"version ", actual_version, ". You should create a new CESAnalysis and start over if you want to continue analysis.")
warning(pretty_message(msg, emit = F))
}
}
cesa@ref_data_dir = system.file("refset", package = refset_name)
} else {
if (! dir.exists(cesa@ref_data_dir)) {
cesa@ref_data_dir = NA_character_
msg = paste0("Reference data not found at ", cesa@ref_data_dir, ". You can view the data in this CESAnalysis, ",
"but many functions will not work as expected. If this is a custom reference data set, ",
"you can fix the issue by using set_refset_dir() to associate the path to your data with the analysis.")
warning(pretty_message(msg, emit = F))
}
}
if (! is.na(cesa@ref_data_dir) & ! cesa@ref_key %in% ls(.ces_ref_data)) {
.ces_ref_data[[cesa@ref_key]] = preload_ref_data(cesa@ref_data_dir)
}
current_version = packageVersion("cancereffectsizeR")
previous_version = cesa@advanced$version
if (as.character(current_version) != as.character(previous_version)) {
warning("Version change: CESAnalysis previously created in CES ", previous_version, ".\n",
"Currently running version ", current_version, '.', call. = F)
cesa@advanced$version = paste(previous_version, current_version, sep = '/' )
cesa@run_history = c(cesa@run_history, paste0("\n[Now running CES ", current_version, ']'))
}
cesa = update_cesa_history(cesa, match.call())
# Before v2.2, nearest_pid not annotated; add it if needed
# This would crash on custom refsets with missing data directories, but unlikely
# situation will ever arise with a pre-2.2 refset
if (! "nearest_pid" %in% names(cesa@mutations$snv)) {
snv_and_gene = cesa@mutations$snv[, .(gene = unlist(genes)), by = "snv_id"]
to_lookup = snv_and_gene[, .(gene = unique(gene))]
RefCDS = get_ref_data(cesa, "RefCDS")
to_lookup[, pid := sapply(RefCDS[gene], '[[', 'protein_id')]
snv_and_gene[to_lookup, pid := pid, on = 'gene']
snv_and_pid = snv_and_gene[, .(nearest_pid = list(pid)), by = "snv_id"]
cesa@mutations$snv[snv_and_pid, nearest_pid := nearest_pid, on = "snv_id"]
}
if (is.null(cesa@mutations$aac_snv_key)) {
if (cesa@mutations$amino_acid_change[, .N] == 0) {
cesa@mutations$aac_snv_key = copy(aac_snv_key_template)
} else {
aac_snv_key = cesa@mutations$snv[, .(aac_id = unlist(assoc_aac)), by = 'snv_id'][! is.na(aac_id)]
setcolorder(aac_snv_key, c('aac_id', 'snv_id'))
aac_snv_key[, multi_anno_site := uniqueN(aac_id) > 1, by = 'snv_id']
setkey(aac_snv_key, 'aac_id')
cesa@mutations$aac_snv_key = aac_snv_key
cesa@mutations$snv[, assoc_aac := NULL]
cesa@maf[, assoc_aac := NULL]
}
}
# cache variant table for easy user access
if(length(cesa@mutations$snv[, .N]) > 0) {
cesa@advanced$cached_variants = select_variants(cesa)
# Annotate top coding implications of each variant, based on select_variants() tiebreakers
consequences = cesa@advanced$cached_variants[variant_type != 'snv', .(snv_id = unlist(constituent_snvs), variant_name, gene), by = 'variant_id']
cesa@maf[consequences, c("top_gene", "top_consequence") := list(gene, variant_name), on = c(variant_id = 'snv_id')]
}
# Pre-2.6.4, cds_refset status wasn't recorded
if(is.null(cesa@advanced$cds_refset)) {
cds_refset = NULL
if(refset_name == 'ces.refset.hg38') {
cds_refset = TRUE
} else if (refset_name == 'ces.refset.hg19') {
cds_refset = FALSE
} else {
# A view-only (no reference data) CESAnalysis won't have gr_genes available
gr_genes = .ces_ref_data[[cesa@ref_key]]$gr_genes
if(! is.null(gr_genes)) {
cds_refset = "gene" %in% names(GenomicRanges::mcols(gr_genes))
}
}
cesa@advanced$cds_refset = cds_refset
}
return(cesa)
}
#' Set reference data directory
#'
#' When working with custom reference data or loading a previously saved CESAnalysis in a
#' new environment, use this function to reassociate the location of reference data with
#' the analysis. (If \code{load_cesa()} didn't give you a warning when loading your
#' analysis, you probably don't need to use this function.)
#'
#' @param cesa CESAnalysis
#' @param dir path to data directory
#' @export
set_refset_dir = function(cesa, dir) {
if(cesa@ref_key %in% names(.official_refsets)) {
stop("You can't set the reference directory on a built-in CES reference data set.")
}
if(! is(cesa, "CESAnalysis")) {
stop("cesa should be a CESAnalysis")
}
if(! is(dir, "character") || length(dir) != 1) {
stop("dir should be a path to a directory")
}
if(! dir.exists(dir)) {
stop("Directory not found at ", dir)
}
dir = normalizePath(dir)
cesa@ref_data_dir = dir
.ces_ref_data[[cesa@ref_key]] = preload_ref_data(dir)
.ces_ref_data[[cesa@ref_key]][["data_dir"]] = dir
cesa = update_cesa_history(cesa, match.call())
return(cesa)
}
#' View data loaded into CESAnalysis
#'
#' returns a data.table containing MAF records used in the given CESAnalysis
#' @param cesa CESAnalysis object
#' @export
maf_records = function(cesa = NULL) {
if(! is(cesa, "CESAnalysis")) {
stop("\nUsage: maf_records(cesa), where cesa is a CESAnalysis")
}
if(cesa@maf[,.N] == 0) {
stop("No MAF data has been loaded")
}
return(copy(cesa@maf))
}
#' View excluded MAF data
#'
#' returns a data.table containing MAF records that were excluded from the given CESAnalysis
#' @param cesa CESAnalysis object
#' @export
excluded_maf_records = function(cesa = NULL) {
if(! is(cesa, "CESAnalysis")) {
stop("\nUsage: excluded_maf_records(cesa), where cesa is a CESAnalysis")
}
if(cesa@maf[,.N] == 0 && cesa@excluded[, .N] == 0) {
stop("No MAF data has been loaded yet, so naturally no records have been excluded.")
}
if(cesa@excluded[,.N] == 0) {
message("Returned an empty data table since no records have been excluded.")
}
return(copy(cesa@excluded))
}
#' View sample metadata
#'
#' returns a data.table with info on all samples in the CESAnalysis
#' @param cesa CESAnalysis object
#' @export
get_sample_info = function(cesa = NULL) {
if(! is(cesa, "CESAnalysis")) {
stop("\nUsage: get_sample_info(cesa), where cesa is a CESAnalysis")
}
to_include = names(cesa@samples)
for(col in c("sig_analysis_grp", "gene_rate_grp")) {
if(all(is.na(cesa@samples[[col]]))) {
to_include = setdiff(to_include, col)
}
}
return(copy(cesa@samples[, .SD, .SDcols = to_include]))
}
#' Get estimated relative rates of trinucleotide-specific SNV mutation
#'
#' @param cesa CESAnalysis object
#' @export
get_trinuc_rates = function(cesa = NULL) {
if(! is(cesa, "CESAnalysis")) {
stop("\nUsage: get_trinuc_rates(cesa), where cesa is a CESAnalysis")
}
return(as.data.table(cesa@trinucleotide_mutation_weights$trinuc_proportion_matrix, keep.rownames = "Unique_Patient_Identifier"))
}
#' Get table of signature attributions
#'
#' View SNV signature attributions associated with CESAnalysis samples.
#'
#'
#' Use raw = TRUE to get signature attributions as produced by the signature extraction
#' tool (or as provided by the user with set_signature_weights()), without any of the
#' adjustments that are made by cancereffectsizeR's trinuc_mutation_rates().
#'
#' @param cesa CESAnalysis object
#' @param raw Default FALSE. When TRUE, return raw signature attributions as found by the
#' signature extraction tool. Format may vary by tool.
#' @param artifacts_zeroed Deprecated.
#' @return A data.table of signature attributions for each sample. By default, these are
#' estimated relative weights of biologically-associated signatures (i.e., non-artifact
#' signatures) that sum to 1.
#' @export
get_signature_weights = function(cesa = NULL, raw = F, artifacts_zeroed = NULL) {
if(! is(cesa, "CESAnalysis")) {
stop("\nUsage: get_signature_weights(cesa), where cesa is a CESAnalysis")
}
if(! is(cesa, "CESAnalysis")) {
stop("cesa should be a CESAnalysis")
}
if(! is.logical(raw) || length(raw) != 1) {
stop("raw should be T/F.")
}
if(! is.null(artifacts_zeroed)) {
stop("artifacts_zeroed is deprecated. Consider using raw = TRUE.")
}
if (raw == TRUE) {
return(copy(cesa@trinucleotide_mutation_weights$raw_signature_weights))
} else {
return(copy(cesa@trinucleotide_mutation_weights$signature_weight_table))
}
}
#' Get table of neutral gene mutation rates
#'
#' @param cesa CESAnalysis object
#' @export
get_gene_rates = function(cesa = NULL) {
if(! is(cesa, "CESAnalysis")) {
stop("\nUsage: get_gene_rates(cesa), where cesa is a CESAnalysis")
}
gene_rates = copy(cesa@mutrates)
if (cesa@samples[, identical(unique(gene_rate_grp), 1L)]) {
setnames(gene_rates, 'rate_grp_1', 'rate')
}
return(gene_rates)
}
#' View results from ces_variant
#'
#' returns a list of ces_variant() results tables, with variant annotations added
#'
#' @param cesa CESAnalysis object
#' @export
snv_results = function(cesa = NULL) {
if(! is(cesa, "CESAnalysis")) {
stop("\nUsage: snv_results(cesa), where cesa is a CESAnalysis")
}
return(copy(cesa@selection_results))
}
#' View output from epistasis functions
#'
#' returns a list of data tables with results from epistasis functions
#' @param cesa CESAnalysis object
#' @export
epistasis_results = function(cesa = NULL) {
if(! is(cesa, "CESAnalysis")) {
stop("\nUsage: epistasis_results(cesa), where cesa is a CESAnalysis")
}
if (length(cesa@epistasis) == 0) {
stop("No results yet from epistasis functions in this CESAnalysis")
}
return(copy(cesa@epistasis))
}
#' clean_granges_for_cesa
#'
#' Tries to format an input GRanges object to be compatible with a CESAnalysis reference
#' genome. Optionally, also applies padding to start and end positions of ranges, stopping
#' at chromosome ends. Either stops with an error or returns a clean granges object.
#'
#' @param cesa CESAnalysis
#' @param gr GRanges object
#' @param padding How many bases to expand start and end of each position
#' @keywords internal
clean_granges_for_cesa = function(cesa = NULL, gr = NULL, padding = 0, refset_env = NULL) {
if(is.null(cesa)) {
bsg = refset_env$genome
supported_chr = refset_env$supported_chr
} else {
bsg = get_cesa_bsg(cesa)
supported_chr = cesa@advanced$genome_info$supported_chr
}
stopifnot(is(padding, "numeric"),
length(padding) == 1,
padding >= 0,
padding - as.integer(padding) == 0)
# try to make gr style/seqlevels match bsg (if this fails, possibly the genome build does not match)
# For now, suppressing "more than one best sequence renaming map"; tends to appear on single-chr inputs
withCallingHandlers(
{
GenomeInfoDb::seqlevelsStyle(gr) = GenomeInfoDb::seqlevelsStyle(bsg)[1]
},
warning = function(w) {
if (grepl("more than one best sequence renaming map", conditionMessage(w))) {
invokeRestart("muffleWarning")
} else if(grepl("cannot switch some of.*to .*style", conditionMessage(w))) {
invokeRestart("muffleWarning")
}
}
)
tryCatch({
msg = paste0("An input granges (or converted BED file) does't seem compatible with the current reference genome.\n",
"Make sure it uses the same genome assembly. It may also help to subset to just the\n",
"primary chromosomes, if any obscure contigs are present in your regions.\n",
"Original warning/error:")
GenomeInfoDb::seqlevels(gr) = GenomeInfoDb::seqlevels(bsg)
GenomeInfoDb::seqinfo(gr) = GenomeInfoDb::seqinfo(bsg)
}, error = function(e) {
message(msg)
stop(conditionMessage(e))
}, warning = function(w) {
message(msg)
stop(conditionMessage(w))
})
# drop any metadata
GenomicRanges::mcols(gr) = NULL
# sort, reduce, unstrand
gr = GenomicRanges::reduce(GenomicRanges::sort(gr), drop.empty.ranges = T)
GenomicRanges::strand(gr) = "*"
# require genome name to match the reference genome (too many potential errors if we allow anonymous or mismatched genome)
expected_genome = GenomeInfoDb::genome(bsg)[1]
gr_genome = GenomeInfoDb::genome(gr)[1]
if (expected_genome != gr_genome) {
stop(paste0("The genome name of an input granges object (", gr_genome, ") does not match the current reference genome (",
expected_genome, ")."))
}
# subset to just supported contigs
gr = gr[as.character(GenomeInfoDb::seqnames(gr)) %in% supported_chr]
gr = GenomeInfoDb::keepSeqlevels(gr, supported_chr)
if (padding > 0) {
# Suppress the out-of-range warning since we'll trim afterwards
withCallingHandlers(
{
GenomicRanges::start(gr) = GenomicRanges::start(gr) - padding
GenomicRanges::end(gr) = GenomicRanges::end(gr) + padding
}, warning = function(w)
{
if (grepl("out-of-bound range", conditionMessage(w))) {
invokeRestart("muffleWarning")
}
}
)
gr = GenomicRanges::reduce(GenomicRanges::trim(gr))
}
return(gr)
}
update_cesa_history = function(cesa, comm) {
if (identical(cesa@advanced$recording, TRUE)) {
cesa@run_history = c(cesa@run_history, deparse(comm, width.cutoff = 500))
}
return(cesa)
}
|
c019bcb6f5069218353e1479c31035b3d4da4b32
|
c8daf8580452d43e76da593a44a983beb94fae70
|
/sendjs/ui.R
|
e198855fea57d74a4940656015aac2fddead10de
|
[] |
no_license
|
marcionicolau/testapp
|
2e537e8296b2669bb2758116862a2947c7fedca0
|
c182950ad15fb9b8e924794b54a469a4a5504557
|
refs/heads/master
| 2021-01-23T20:57:03.182316
| 2013-05-20T18:13:02
| 2013-05-20T18:13:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 235
|
r
|
ui.R
|
shinyUI(pageWithSidebar(
headerPanel("Shiny Widgets"),
sidebarPanel(
sliderInput("controller", "This slider controls other inputs:",
min = 1, max = 20, value = 15)
),
mainPanel(
h2("Filler text")
)
))
|
f8dc7582ef95563d093c22c8135c3467595b54d5
|
e521a623151f7a838e7a3c79efa52370f61d0251
|
/R/BATD.R
|
634f0da86eac74d129a5af823606139727a7bdeb
|
[] |
no_license
|
HeJasonL/BATD
|
0a1128d5fb6254b66b54e2b3e290de8800c2dcd0
|
5bf5f667ca1c1a34b0b8a8045c0914f1c2b97c78
|
refs/heads/master
| 2023-08-29T13:42:26.723249
| 2023-08-05T07:08:53
| 2023-08-05T07:08:53
| 241,194,462
| 3
| 2
| null | 2023-09-05T15:22:14
| 2020-02-17T19:51:44
|
R
|
UTF-8
|
R
| false
| false
| 1,073
|
r
|
BATD.R
|
#' Batch analysis of tactile data 'BATD' package
#'
#' The scripts contained within this packages are used to analyze data recorded by Cortical Metrics software.
#' The scripts for the initial extraction are entitled BATD_extract_XX, with "XX" being either NF, referring to data collected from the CM4(?) or OF, referring to data collected from the CM5. Once data has been extracted, the data can then be analyzed using BATD_analyze or BATD_analyze_all. BATD analyze is used to analyze single participants, whereas BATD_analyze_all is used to batch analyze multiple participants. The data can also be plotted using many functions from the ggplot package, using the functions BATD_plot and BATD_plot_all. BATD_analyze and BATD_analyze_all, BATD_plot is used for single participants, whereas BATD_plot_all is used for multiple participants.
#' Further description of how to use these functions can be accessed by placing a question mark prior to the function e.g., ?BATD_analyze
#'
#' @docType package
#'
#' @author Jason He \email{jasonhe93@gmail.com}
#'
#' @name BATD
NULL
|
4091ad94e42e2b4aee561d411079ff1b63eb825b
|
255bde5d965a626504175d1800d22896f6820eed
|
/R/NI.Array.R
|
24a172d54f90afb38ae3befa28cc3f652ccfd239
|
[] |
no_license
|
cran/binGroup
|
f0345d60454b835216ba4d29bbcbd193f3557f81
|
288a6aa03e33afa530fc1fd1eb3ad39f5b25427d
|
refs/heads/master
| 2021-01-22T11:59:06.761427
| 2018-08-24T10:24:26
| 2018-08-24T10:24:26
| 17,694,764
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,499
|
r
|
NI.Array.R
|
# Start NI.Array() function
###################################################################
#' @title Find the optimal testing configuration for non-informative
#' array testing without master pooling
#'
#' @description Find the optimal testing configuration (OTC) for
#' non-informative array testing without master pooling and
#' calculate the associated operating characteristics.
#'
#' @param p the probability of disease, which can be specified as an overall
#' probability of disease or a homogeneous vector of individual probabilities.
#' @param group.sz a single group size (representing the row/column size)
#' for which to calculate the operating characteristics, or a range of group
#' (row/column) sizes over which to find the OTC.
#' The details of group size specification are given under 'Details'.
#' @inheritParams OTC
#'
#' @details This function finds the OTC and computes the associated
#' operating characteristics for non-informative array testing without
#' master pooling. Array testing without master pooling involves
#' amalgamating specimens in rows and columns for the first stage of testing.
#' This function uses only square arrays, which is the way array-based group
#' testing is carried out in most real-world applications. Operating
#' characteristics calculated are expected number of tests, pooling sensitivity,
#' pooling specificity, pooling positive predictive value, and pooling negative
#' predictive value for the algorithm. See Hitt et al. (2018) at
#' \url{http://chrisbilder.com/grouptesting} or Kim et al. (2007)
#' for additional details on the implementation of non-informative array
#' testing without master pooling.
#'
#' The value(s) specified by \kbd{group.sz} represent the initial group
#' (row/column) size. If a single value is provided for \kbd{group.sz}, operating
#' characteristics will be calculated and no optimization will be performed.
#' If a range of group sizes is specified, the OTC will be found over all
#' group sizes.
#'
#' The displayed pooling sensitivity, pooling specificity, pooling positive
#' predictive value, and pooling negative predictive value are weighted
#' averages of the corresponding individual accuracy measures for all
#' individuals within the initial group for a hierarchical algorithm.
#' Expressions for these averages are provided in the Supplementary
#' Material for Hitt et al. (2018). These expressions are based on accuracy
#' definitions given by Altman and Bland (1994a, 1994b).
#'
#' @return A list containing:
#' \item{prob}{the probability of disease, as specified by the user.}
#' \item{Se}{the sensitivity of the diagnostic test.}
#' \item{Sp}{the specificity of the diagnostic test.}
#' \item{opt.ET, opt.MAR, opt.GR}{a list for each objective function specified
#' by the user, containing:
#' \describe{
#' \item{OTC}{a list specifying elements of the optimal testing configuration,
#' which include:
#' \describe{
#' \item{Array.dim}{the row/column size for the first stage of testing.}
#' \item{Array.sz}{the overall array size (the square of the row/column size).}}}
#' \item{p.mat}{the matrix of individual probabilities.}
#' \item{ET}{the expected testing expenditure for the OTC.}
#' \item{value}{the value of the objective function per individual.}
#' \item{PSe}{the overall pooling sensitivity for the algorithm.
#' Further details are given under 'Details'.}
#' \item{PSp}{the overall pooling specificity for the algorithm.
#' Further details are given under 'Details'.}
#' \item{PPPV}{the overall pooling positive predictive value for the algorithm.
#' Further details are given under 'Details'.}
#' \item{PNPV}{the overall pooling negative predictive value for the algorithm.
#' Further details are given under 'Details'.}}}
#'
#' @author Brianna D. Hitt
#'
#' @references
#' \insertRef{Altman1994a}{binGroup}
#'
#' \insertRef{Altman1994b}{binGroup}
#'
#' \insertRef{Graff1972}{binGroup}
#'
#' \insertRef{Hitt2018}{binGroup}
#'
#' \insertRef{Kim2007}{binGroup}
#'
#' \insertRef{Malinovsky2016}{binGroup}
#'
#' @seealso
#' \code{\link{Inf.Array}} for informative array testing without master pooling,
#' \code{\link{NI.A2M}} for non-informative array testing with master pooling, and
#' \code{\link{OTC}} for finding the optimal testing configuration for a number
#' of standard group testing algorithms.
#'
#' \url{http://chrisbilder.com/grouptesting}
#'
#' @family OTC functions
#'
#' @examples
#' # Find the OTC for non-informative array testing
#' # without master pooling over a range of group
#' # (row/column) sizes.
#' # This example takes less than 1 second to run.
#' # Estimated running time was calculated using a
#' # computer with 16 GB of RAM and one core of an
#' # Intel i7-6500U processor.
#' NI.Array(p=0.04, Se=0.95, Sp=0.95, group.sz=3:10,
#' obj.fn=c("ET", "MAR"))
#'
#' # Calculate the operating characteristics for a specified
#' # group (row/column) size for non-informative array
#' # testing without master pooling.
#' # This example takes less than 1 second to run.
#' # Estimated running time was calculated using a
#' # computer with 16 GB of RAM and one core of an
#' # Intel i7-6500U processor.
#' NI.Array(p=rep(0.01, 64), Se=0.90, Sp=0.90, group.sz=8,
#' obj.fn=c("ET", "MAR", "GR"),
#' weights=matrix(data=c(1,1,10,10,100,100),
#' nrow=3, ncol=2, byrow=TRUE))
# Brianna Hitt - 05-01-17
# Updated: Brianna Hitt - 06-20-18
NI.Array <- function(p, Se, Sp, group.sz, obj.fn, weights=NULL){
start.time<-proc.time()
set.of.I <- group.sz
save.it <- matrix(data=NA, nrow=length(set.of.I), ncol=18)
count <- 1
for(I in set.of.I){
N <- I^2
# build a matrix of probabilities
# this is the same for an overall probability p and for a vector p
p.mat <- matrix(data=p[1], nrow=I, ncol=I)
# call Array.Measures to calculate descriptive measures for the given array size
save.info <- Array.Measures(p=p.mat, se=Se, sp=Sp)
# extract accuracy measures for each individual
ET <- save.info$T
PSe.mat <- save.info$PSe
PSp.mat <- save.info$PSp
if("MAR" %in% obj.fn){
MAR <- MAR.func(ET=ET, p.vec=p.mat, PSe.vec=PSe.mat, PSp.vec=PSp.mat)
} else{MAR <- NA}
# calculate overall accuracy measures
PSe <- sum(p.mat*PSe.mat)/sum(p.mat)
PSp <- sum((1-p.mat)*(PSp.mat))/sum(1-p.mat)
PPPV <- sum(p.mat*PSe.mat)/sum(p.mat*PSe.mat + (1-p.mat)*(1-PSp.mat))
PNPV <- sum((1-p.mat)*PSp.mat)/sum((1-p.mat)*PSp.mat + p.mat*(1-PSe.mat))
# for each row in the matrix of weights, calculate the GR function
if(is.null(dim(weights))){
GR1 <- NA
GR2 <- NA
GR3 <- NA
GR4 <- NA
GR5 <- NA
GR6 <- NA
} else{
GR1 <- GR.func(ET=ET, p.vec=p.mat, PSe.vec=PSe.mat, PSp.vec=PSp.mat, D1=weights[1,1], D2=weights[1,2])
if(dim(weights)[1]>=2){
GR2 <- GR.func(ET=ET, p.vec=p.mat, PSe.vec=PSe.mat, PSp.vec=PSp.mat, D1=weights[2,1], D2=weights[2,2])
} else{GR2 <- NA}
if(dim(weights)[1]>=3){
GR3 <- GR.func(ET=ET, p.vec=p.mat, PSe.vec=PSe.mat, PSp.vec=PSp.mat, D1=weights[3,1], D2=weights[3,2])
} else{GR3 <- NA}
if(dim(weights)[1]>=4){
GR4 <- GR.func(ET=ET, p.vec=p.mat, PSe.vec=PSe.mat, PSp.vec=PSp.mat, D1=weights[4,1], D2=weights[4,2])
} else{GR4 <- NA}
if(dim(weights)[1]>=5){
GR5 <- GR.func(ET=ET, p.vec=p.mat, PSe.vec=PSe.mat, PSp.vec=PSp.mat, D1=weights[5,1], D2=weights[5,2])
} else{GR5 <- NA}
if(dim(weights)[1]>=6){
GR6 <- GR.func(ET=ET, p.vec=p.mat, PSe.vec=PSe.mat, PSp.vec=PSp.mat, D1=weights[6,1], D2=weights[6,2])
} else{GR6 <- NA}
}
save.it[count,] <- c(p[1], Se, Sp, I, N, ET, ET/N, MAR, GR1/N, GR2/N, GR3/N, GR4/N, GR5/N, GR6/N, PSe, PSp, PPPV, PNPV)
cat("Row/Column Size=", I, ", Array Size=", N, "\n", sep="")
count <- count + 1
}
# find the optimal testing configuration, over all array sizes considered
result.ET <- save.it[save.it[,7]==min(save.it[,7]),c(1:6,7,15:ncol(save.it))]
result.MAR <- save.it[save.it[,8]==min(save.it[,8]),c(1:6,8,15:ncol(save.it))]
result.GR1 <- save.it[save.it[,9]==min(save.it[,9]),c(1:6,9,15:ncol(save.it))]
result.GR2 <- save.it[save.it[,10]==min(save.it[,10]),c(1:6,10,15:ncol(save.it))]
result.GR3 <- save.it[save.it[,11]==min(save.it[,11]),c(1:6,11,15:ncol(save.it))]
result.GR4 <- save.it[save.it[,12]==min(save.it[,12]),c(1:6,12,15:ncol(save.it))]
result.GR5 <- save.it[save.it[,13]==min(save.it[,13]),c(1:6,13,15:ncol(save.it))]
result.GR6 <- save.it[save.it[,14]==min(save.it[,14]),c(1:6,14,15:ncol(save.it))]
p.mat.ET <- matrix(data=result.ET[1], nrow=result.ET[4], ncol=result.ET[4])
if("MAR" %in% obj.fn){
p.mat.MAR <- matrix(data=result.MAR[1], nrow=result.MAR[4], ncol=result.MAR[4])
} else{p.mat.MAR <- NA}
if(is.null(dim(weights))){
p.mat.GR1 <- NA
p.mat.GR2 <- NA
p.mat.GR3 <- NA
p.mat.GR4 <- NA
p.mat.GR5 <- NA
p.mat.GR6 <- NA
} else{
p.mat.GR1 <- matrix(data=result.GR1[1], nrow=result.GR1[4], ncol=result.GR1[4])
if(dim(weights)[1]>=2){
p.mat.GR2 <- matrix(data=result.GR2[1], nrow=result.GR2[4], ncol=result.GR2[4])
} else{p.mat.GR2 <- NA}
if(dim(weights)[1]>=3){
p.mat.GR3 <- matrix(data=result.GR3[1], nrow=result.GR3[4], ncol=result.GR3[4])
} else{p.mat.GR3 <- NA}
if(dim(weights)[1]>=4){
p.mat.GR4 <- matrix(data=result.GR4[1], nrow=result.GR4[4], ncol=result.GR4[4])
} else{p.mat.GR4 <- NA}
if(dim(weights)[1]>=5){
p.mat.GR5 <- matrix(data=result.GR5[1], nrow=result.GR5[4], ncol=result.GR5[4])
} else{p.mat.GR5 <- NA}
if(dim(weights)[1]>=6){
p.mat.GR6 <- matrix(data=result.GR6[1], nrow=result.GR6[4], ncol=result.GR6[4])
} else{p.mat.GR6 <- NA}
}
# create a list of results for each objective function
opt.ET <- list("OTC"=list("Array.dim"=result.ET[4], "Array.sz"=result.ET[5]), "p.mat"=p.mat.ET, "ET"=result.ET[6], "value"=result.ET[7], "PSe"=result.ET[8], "PSp"=result.ET[9], "PPPV"=result.ET[10], "PNPV"=result.ET[11])
opt.MAR <- list("OTC"=list("Array.dim"=result.MAR[4], "Array.sz"=result.MAR[5]), "p.mat"=p.mat.MAR, "ET"=result.MAR[6], "value"=result.MAR[7], "PSe"=result.MAR[8], "PSp"=result.MAR[9], "PPPV"=result.MAR[10], "PNPV"=result.MAR[11])
opt.GR1 <- list("OTC"=list("Array.dim"=result.GR1[4], "Array.sz"=result.GR1[5]), "p.mat"=p.mat.GR1, "ET"=result.GR1[6], "value"=result.GR1[7], "PSe"=result.GR1[8], "PSp"=result.GR1[9], "PPPV"=result.GR1[10], "PNPV"=result.GR1[11])
opt.GR2 <- list("OTC"=list("Array.dim"=result.GR2[4], "Array.sz"=result.GR2[5]), "p.mat"=p.mat.GR2, "ET"=result.GR2[6], "value"=result.GR2[7], "PSe"=result.GR2[8], "PSp"=result.GR2[9], "PPPV"=result.GR2[10], "PNPV"=result.GR2[11])
opt.GR3 <- list("OTC"=list("Array.dim"=result.GR3[4], "Array.sz"=result.GR3[5]), "p.mat"=p.mat.GR3, "ET"=result.GR3[6], "value"=result.GR3[7], "PSe"=result.GR3[8], "PSp"=result.GR3[9], "PPPV"=result.GR3[10], "PNPV"=result.GR3[11])
opt.GR4 <- list("OTC"=list("Array.dim"=result.GR4[4], "Array.sz"=result.GR4[5]), "p.mat"=p.mat.GR4, "ET"=result.GR4[6], "value"=result.GR4[7], "PSe"=result.GR4[8], "PSp"=result.GR4[9], "PPPV"=result.GR4[10], "PNPV"=result.GR4[11])
opt.GR5 <- list("OTC"=list("Array.dim"=result.GR5[4], "Array.sz"=result.GR5[5]), "p.mat"=p.mat.GR5, "ET"=result.GR5[6], "value"=result.GR5[7], "PSe"=result.GR5[8], "PSp"=result.GR5[9], "PPPV"=result.GR5[10], "PNPV"=result.GR5[11])
opt.GR6 <- list("OTC"=list("Array.dim"=result.GR6[4], "Array.sz"=result.GR6[5]), "p.mat"=p.mat.GR6, "ET"=result.GR6[6], "value"=result.GR6[7], "PSe"=result.GR6[8], "PSp"=result.GR6[9], "PPPV"=result.GR6[10], "PNPV"=result.GR6[11])
# create a list of results, including all objective functions
opt.all <- list("opt.ET"=opt.ET, "opt.MAR"=opt.MAR, "opt.GR1"=opt.GR1, "opt.GR2"=opt.GR2,
"opt.GR3"=opt.GR3, "opt.GR4"=opt.GR4, "opt.GR5"=opt.GR5, "opt.GR6"=opt.GR6)
# remove any objective functions not requested by the user
opt.req <- Filter(function(x) !is.na(x$ET), opt.all)
time.it(start.time)
c("prob"=list(p), "Se"=Se, "Sp"=Sp, opt.req)
}
###################################################################
|
d386a8e315143cf6f7e3d2dbe3d9d3e75c4e5289
|
f39bfc0af440bdffdc56f9bc3aa34f1eaf07ee52
|
/ca_labels/ca_labels.R
|
3846e23531cfa1b6c25807c765937eaa084b0199
|
[] |
no_license
|
bazini627/Randas
|
708e8591960b4472090677c5d720419d233c1db4
|
ebc48709135c66391150696a4547e9bb453b1273
|
refs/heads/master
| 2021-05-26T22:14:09.573151
| 2020-04-09T23:06:59
| 2020-04-09T23:06:59
| 254,176,636
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,299
|
r
|
ca_labels.R
|
library(dplyr)
library(sf)
library(tmap)
# Read in CA counties GeoJSON
ca <- st_read("../data/caCountiesNoChannelIslands.geojson")
head(ca)
# Drop and rename some columns in ca dataframe, only want to keep the 3rd and 6th column
ca <-select(ca,-c(1,2,4,5)) %>%
rename("county" = "NAME")
head(ca)
# Create outline of CA for possible use
ca_outline <- st_union(ca)
# Vector to subset dataframe with
filter_vector <- c('Siskiyou', 'Humboldt', 'Mendocino', 'Napa', 'Solano', 'Sonoma', 'Marin', 'Contra Costa')
# Subset ca data frame from filter_vector
ca_subset <- subset(ca, county %in% filter_vector)
# Plot a map
# main ca layer
ca_map <- tm_shape(ca) +
tm_fill("white") +
tm_borders('black', lwd = .5) +
# subsetted ca layer
tm_shape(ca_subset) +
tm_fill("lightblue") +
tm_borders('black', lwd=.5) +
tm_shape(ca) +
tm_text(
'county',
size = .5,
col = 'black'
) +
# specify some layout parameters
tm_layout(frame = FALSE, bg.color = '#f3ebe1')
# Save map to file and specify the size be 8.5x11"
tmap_save(ca_map, "../data/output/r_ca_labels_color_polys.png", 8.5,11)
# Save to SVG so we can then adjust labels in somehething like InkScape
tmap_save(ca_map, "../data/output/r_ca_labels_color_polys.svg", 8.5,11)
|
1aa85f60921f7966fdf5e45137aace388344e170
|
d257d0b55a7dd4569c419b4196ff79d5f1fde132
|
/Aimee/DownLoadData/server.R
|
c6a9e3452a6e7bc1cb5567f1c8f8ee254c449d32
|
[] |
no_license
|
RobHayward/Shiny
|
4720e7444ded2a6de0ed6412f4fd1bb404333f1d
|
bef01e4e92044da73443c29e60f46d684e1abcd8
|
refs/heads/master
| 2021-01-09T05:50:42.200166
| 2018-09-20T09:34:44
| 2018-09-20T09:34:44
| 22,253,800
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 879
|
r
|
server.R
|
library(shiny)
function(input, output){
simData <- reactive({
dates <- seq(input$dateRange[1], input$dateRange[2], by = "day")
nDates <- length(dates)
simData <- switch(input$distribution,
"Normal" = rnorm(nDates),
"Uniform" = runif(nDates),
"Poisson" = rpois(nDates, lambda = 5))
data.frame(dates = dates, data = simData)
})
output$datePlot <- renderPlot({
simData <- simData()
plot(simData$dates, simData$data, type = "l")
})
output$downloadData <- downloadHandler(filename = "simulatedData.csv",
content = function(file){
write.csv(simData(), file)
})
}
|
71bf42537d60cb425adf7ce3f8d4faf0fb7c1298
|
e3a0b7f672cb73c5e06dc97fb43c79d240ccf398
|
/R/optimRegModel.R
|
4d3cb594434f88be34eb842778d02ba540c3576f
|
[] |
no_license
|
jhorzek/regmx
|
621c6401b49d5ff2686a964be984523d96ffca6a
|
0a3e66cb914b690faa5d1cdf6a08699e6b93f06a
|
refs/heads/master
| 2022-10-01T08:57:18.297613
| 2022-09-14T13:14:21
| 2022-09-14T13:14:21
| 195,298,982
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,283
|
r
|
optimRegModel.R
|
#' optimRegModel
#'
#' Note: regmx is based on the R package \pkg{regsem}. Because of the early status of regmx, it is recommended to use regsem instead!
#' optimRegModel creates a range of regularized models from an mxModel. It automatically tests different penalty values and returns the best model
#'
#' @param mxModelObject an already run mxModel
#' @param alpha alpha controls the type of penalty. For lasso regularization, set alpha = 1, for ridge alpha = 0. Values between 0 and 1 implement elastic net regularization
#' @param gamma gamma sets the power in the denominator of parameter specific weights when using adaptive lasso regularization. Make sure to set alpha to 1 when using a gamma other than 0.
#' @param regValues numeric value depicting the penalty size
#' @param regOn string vector with matrices that should be regularized. The matrices must have the same name as the ones provided in the mxModelObject (e.g., "A")
#' @param regIndicators list of matrices indicating which parameters to regularize in the matrices provided in regOn. The matrices in regIndicators must to have the same names as the matrices they correspond to (e.g., regIndicators = list("A" = diag(10))). 1 Indicates a parameter that will be regularized, 0 an unregularized parameter
#' @param regValue_start initial penalty value (recommended: 0)
#' @param regValue_end highest penalty value tested
#' @param regValue_stepsize increase in penValue between iterations
#' @param criterion Criterion for chosing the best final model. Possible are: AIC, BIC, CV.m2LL (only if manualCV is provided)
#' @param autoCV logical indicating if cross-validation should be performed automatically
#' @param k number of splits performed when autoCV = TRUE
#' @param Boot logical indicating if Bootstrap should be performed. Not yet implemented!
#' @param manualCV if cross-validation should be performed manually, provide a cross-validation sample (has to be of the same class as the data in the mxModelObject; e.g., mxData)
#' @param zeroThresh Threshold at which parameters are evaluated as being zero (necessary for AIC and BIC)
#' @param scaleCV indicate if the CV samples should be scaled automatically
#' @param cores Number of cores to use (>1 for parallel processing)
#'
#' @examples
#' # The following example is adapted from the regsem help to demonstrate the equivalence of both methods:
#'
#' library(lavaan)
#' library(OpenMx)
#' # put variables on same scale for regsem
#' HS <- data.frame(scale(HolzingerSwineford1939[,7:15]))
#'
#' # define variables:
#' latent = c("f1")
#' manifest = c("x1","x2","x3","x4","x5", "x6", "x7", "x8", "x9")
#'
#' # define paths:
#' loadings <- mxPath(from = latent, to = manifest, free = c(F,T,T,T,T,T,T,T,T), values = 1)
#' lcov <- mxPath(from = latent, arrows = 2, free = T, values = 1)
#' lmanif <- mxPath(from = manifest, arrows =2 , free =T, values = 1)
#'
#' # define model:
#' myModel <- mxModel(name = "myModel", latentVars = latent, manifestVars = manifest, type = "RAM",
#' mxData(observed = HS, type = "raw"), loadings, lcov, lmanif,
#' mxPath(from = "one", to = manifest, free = T)
#' )
#'
#' fit_myModel <- mxRun(myModel)
#'
#' # Show the names of the matrices in the model:
#' names(fit_myModel$matrices)
#'
#' # Penalize specific parameters from the A matrix (directional paths):
#' regOn <- c("A")
#'
#' selectedA <- matrix(0, ncol = ncol(fit_myModel$A$values), nrow = nrow(fit_myModel$A$values))
#' selectedA[c(2,3,7,8,9),10] <-1 # parameters that should be regularized have to be marked with 1
#' regIndicators <- list("A" = selectedA) # save in a list. Note the naming of the list element
#'
#' # Run the models:
#'
#' reg_model <- optimRegModel(mxModelObject = fit_myModel, alpha = 1, gamma = 0, regOn = regOn, regIndicators = regIndicators)
#'
#' reg_model$`fit measures`
#'
#' reg_model$`best penalty`
#'
#' # Run the same model with 5-fold cross-validation
#'
#' CV_reg_model <- optimRegModel(mxModelObject = fit_myModel, alpha = 1, gamma = 0, regOn = regOn, regIndicators = regIndicators,
#' autoCV = T, k = 5)
#' CV_reg_model$`CV results`
#'
#' @author Jannik Orzek
#' @import OpenMx
#' @export
optimRegModel <- function(mxModelObject, alpha = 1, gamma = 0, regOn, regIndicators,
regValues,
criterion = "BIC", autoCV = FALSE, k = 5, Boot = FALSE, manualCV = NULL, zeroThresh = .001, scaleCV = TRUE, cores = 1){
if(cores == 1){
ret <- SingleCoreOptimRegModel(mxModelObject = mxModelObject, alpha = alpha, gamma = gamma, regOn = regOn, regIndicators = regIndicators,
regValue = regValues,
criterion = criterion, autoCV = autoCV, k = k, Boot = Boot, manualCV = manualCV, zeroThresh = zeroThresh, scaleCV = scaleCV, cores = cores)
}else{
ret <- MultiCoreOptimRegModel(mxModelObject = mxModelObject, alpha = alpha, gamma = gamma, regOn = regOn, regIndicators = regIndicators,
regValues = regValues,
criterion = criterion, autoCV = autoCV, k = k, Boot = Boot, manualCV = manualCV, zeroThresh = zeroThresh, scaleCV = scaleCV, cores = cores)
}
return(ret)
}
|
52962c2fa888aa4b90a9aed0dfbecfb20a8efc3c
|
7a24a59293bafdd638d2fb9cdc10aa00359f5797
|
/R/angsize.R
|
6a1f06d742070aea5dd5b07c6e1fcaa01122d1c2
|
[] |
no_license
|
cran/astro
|
c0c92d6ff150a68ce81cc7f75e1888ee6498c36f
|
19710691a4a1a7d18aab7ccd31a7be31e0fe2b2a
|
refs/heads/master
| 2021-01-13T01:30:32.071603
| 2014-09-08T00:00:00
| 2014-09-08T00:00:00
| 17,694,512
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,949
|
r
|
angsize.R
|
angsize = function(z = 1, r = 1, inp = "arcsec", out = "kpc", c = 3E8, H = 70, M = 0.3, L = 1-M, K = 1-M-L){
# Apparent Angular Size RA
# redshift, radius of object, speed of light (m/s), hubble constant (km/s/Mpc), DM, DE, curvature, input units (m, pc, kpc or Mpc), output units (deg, rad, arcsec)
# FROM: Hogg, 2000 (only valid for K>=0)
inverse=FALSE
if(inp=="deg"|inp=="rad"|inp=="arcsec"){
inverse=TRUE
real_r=r
real_inp=inp
real_out=out
r=1
inp="kpc"
out=real_inp
}
H = (3.24077649E-20)*H
DH = c/H
E1 = function(a){((M*(1+a)^3) + (K*(1+a)^2) + L)^-0.5}
DC1 = DH*Vectorize(function(z){integrate(E1, lower=0, upper=0)$value})(z)
if(K>0){
DM1 = DH*K^-0.5*sinh(K^0.5*DC1/DH)
}else if(K<0){
DM1 = DH*abs(K)^-0.5*sin(abs(K)^0.5*DC1/DH)
}else{
DM1 = DC1
}
DC2 = DH*Vectorize(function(z){integrate(E1, lower=0, upper=z, subdivisions=1000)$value})(z)
if(K>0){
DM2 = DH*K^-0.5*sinh(K^0.5*DC2/DH)
}else if(K<0){
DM2 = DH*abs(K)^-0.5*sin(abs(K)^0.5*DC2/DH)
}else{
DM2 = DC2
}
DA = (1/(1+z))*(DM2*(1+K*DM1^2/DH^2)^0.5 - DM1*(1+K*DM2^2/DH^2)^0.5)
if(inp=="pc"){
mfact2=3.24077649E-17
}else if(inp=="kpc"){
mfact2=3.24077649E-20
}else if(inp=="Mpc"){
mfact2=3.24077649E-23
}else{
mfact2=1
}
DA=mfact2*DA
theta=atan(r/DA) # radians
if(out=="arcsec"){
mfact=206264.806
}else if(out=="deg"){
mfact=57.2957795
}else{
mfact=1
}
result=theta*mfact
if(inverse){
result=real_r*(1/result)
if(real_out=="m"){
mfact=3.08568025E+19
}else if(real_out=="pc"){
mfact=10^3
}else if(real_out=="Mpc"){
mfact=10^-3
}else{
mfact=1
}
result=result*mfact
}
return(result)
}
|
522a478d04c76e016a1d271f055527e133c6959d
|
c8d24378e70933c30a99f4cb0097d9eeccca2b30
|
/code/1_getData/getTaxData.R
|
92acbb35c13745386c8fc45e3ed6668d9dd79913
|
[] |
no_license
|
emallickhossain/OnlineShoppingSalesTax
|
83fe86d18b8b3755261f8fac3cf0dcf75b3896c0
|
d7f26f0dc1dd0760c3d1cb42717808fecf8bc953
|
refs/heads/master
| 2021-01-14T01:14:21.098275
| 2020-07-09T19:10:30
| 2020-07-09T19:10:30
| 242,553,500
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,275
|
r
|
getTaxData.R
|
# This takes the TDS data and puts it into one file. I only get the total sales tax
# and total use tax for each ZIP code and city. I append the year and the month as well
# I'll decide if I want to use the min or max sales tax later.
library(data.table)
library(stringr)
library(purrr)
library(tidyr)
year <- c(rep(2006:2013, each = 12), rep(2014, 5), rep(2015:2016, each = 4))
month <- c(rep(1:12, 8), 1:5, rep(c(1, 4, 7, 10), 2))
loadTaxes <- function(year, month) {
if (year %in% 2006:2007) {
suffix <- " AS_complete/AS_complete.txt"
} else if (year %in% c(2008:2014)) {
suffix <- " AS_completeplus/AS_complete+.txt"
} else if (year %in% 2015:2016) {
suffix <- " AS_basicIIplus/AS_basicII+.txt"
}
data <- fread(paste0("/home/mallick/Desktop/Research/RawData/taxData/", year, "/",
str_pad(month, 2, "left", "0"), substr(year, 3, 4), suffix),
select = c("ZIP_CODE", "STATE_ABBREV", "TOTAL_SALES_TAX"))
data[, c("year", "month") := .(year, month)]
setnames(data, c("zip_code", "state", "sales_tax", "year", "month"))
return(data)
}
fullTaxData <- rbindlist(map2(year, month, loadTaxes))
# Add in missing months
dates <- seq.Date(from = as.Date("2014-01-01"), to = as.Date("2016-12-01"), by = "month")
zips <- unique(fullTaxData$zip_code)
full_dates <- setDT(expand.grid(zips, dates))
setnames(full_dates, c("zip_code", "date"))
full_dates[, c("year", "month", "date") :=
.(as.integer(year(date)), as.integer(month(date)), NULL)]
fullTaxData <- merge(fullTaxData, full_dates, keyby = c("zip_code", "year", "month"),
all = TRUE)
# Filling down missing sales taxes and getting minimum tax rates
fullTaxData <- setDT(unique(fill(fullTaxData, state, sales_tax, .direction = "down")))
fullTaxDataMin <- fullTaxData[, .(sales_tax = min(sales_tax)), by = .(year, month, state, zip_code)]
# Getting adjacent tax rate ----------------------------------------------------
# Getting column names for use later
cols <- paste0("sales_tax", sort(rep(2006:2016, 12)), "_", 1:12)
# Load county adjacency data from 2010 Census (https://www.census.gov/geo/reference/county-adjacency.html)
county_adj <- fread("./code/0_data/census_2010_county_adjacency.txt",
col.names = c("county", "own_fips", "neighbor_county", "neighbor_fips"))
county_adj <- setDT(fill(county_adj, own_fips))
# Adding in ZIP code and collapsing to ZIP
zipFip <- fread(paste0("https://www2.census.gov/geo/docs/maps-data/data/rel/",
"zcta_county_rel_10.txt"),
select = c("ZCTA5", "STATE", "COUNTY", "COPOPPCT"))
zipFip[, "fips" := paste0(str_pad(STATE, 2, "left", "0"),
str_pad(COUNTY, 3, "left", "0"))]
zipFip <- zipFip[, .SD[which.max(COPOPPCT)], by = ZCTA5]
zipFip[, c("STATE", "COUNTY", "COPOPPCT") := NULL]
setnames(zipFip, "ZCTA5", "zip_code")
# Getting FIPS codes for each zip code and then averaging by FIPS code
zipTax <- merge(fullTaxDataMin, zipFip, by = c("zip_code"))
zipTax <- zipTax[, .(sales_tax = mean(sales_tax)), by = .(year, month, fips)]
zipTax[, "fips" := as.integer(fips)]
zipTaxWide <- dcast(zipTax, fips ~ year + month, value.var = "sales_tax")
setnames(zipTaxWide, c("fips", cols))
# Merging with county adjacencies and getting minimum adjacent tax rate
neighbor_tax <- merge(county_adj, zipTaxWide, by.x = "neighbor_fips", by.y = "fips")
setcolorder(neighbor_tax, c("own_fips", "county", "neighbor_fips", "neighbor_county", cols))
min_neighbor_tax <- neighbor_tax[, lapply(.SD, min), .SDcols = cols, keyby = own_fips]
# Melting to long form
min_neighbor_tax_long <- melt(min_neighbor_tax, id.vars = c("own_fips"),
measure = patterns("^sales_tax"),
value.name = "min_sales_tax",
variable.name = "year")
min_neighbor_tax_long[, c("year", "month") :=
.(as.integer(substr(year, 10, 13)),
as.integer(substr(year, 15, 16)))]
setnames(min_neighbor_tax_long, c("fips", "year", "min_adj_tax", "month"))
# Final Tax data
final_zip_tax <- merge(zipTax, min_neighbor_tax_long, by = c("year", "month", "fips"))
fwrite(final_zip_tax, "./code/0_data/Clean/zip_tax_min.csv")
|
35d73e7329a0054bc15b3a59e81116b9e1d44e9f
|
9716896c5047d31239e87242481c9f950ba8f7ca
|
/man/HOZscale.Rd
|
1d3caf24ca1e56af7fcb2d3ef5ee63ec02d4ce5b
|
[] |
no_license
|
cran/RTOMO
|
3917a8e81969a983ef0d6b9b2ccd2005fffd30b1
|
71cceddb2ea61812734ff6d33a313c1a287b36d6
|
refs/heads/master
| 2021-01-23T20:55:07.961375
| 2018-01-18T11:32:54
| 2018-01-18T11:32:54
| 17,682,949
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 791
|
rd
|
HOZscale.Rd
|
\name{HOZscale}
\alias{HOZscale}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{add horizontal color scale}
\description{
Add horizontal color scale to existing plot.
}
\usage{
HOZscale(z, col, units = "", SIDE = 1, s1 = 0.4, s2 = 0.95)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{z}{image matrix}
\item{col}{color palette}
\item{units}{character string, units}
\item{SIDE}{Side of the plot}
\item{s1}{percent of margin for bottom}
\item{s2}{percent of margin for top}
}
\value{
Graphical Side effect
}
\author{Jonathan M. Lees<jonathan.lees.edu>}
\examples{
data(volcano)
image(volcano, col=terrain.colors(100))
HOZscale(volcano,terrain.colors(100) , units = "", SIDE = 1, s1 = 0.4, s2 = 0.95)
}
\keyword{hplot}
|
eddf9509f2bf7a15c4f9f6396d86efb5543faa39
|
cfa28306f5662022b3700cc9ab244bc7e277eebb
|
/20200825_Chopped_clean.R
|
2dab33443f75c83bf53de98ba43b2c7af677f6fd
|
[] |
no_license
|
vanderpoolrr/TidyTuesday
|
715ad73a393b49c307855934aa8d3019be9cf671
|
2d0985de14c0cb72871e4fb0ed208a26acf87eef
|
refs/heads/master
| 2023-01-30T14:02:16.268833
| 2020-12-10T02:06:02
| 2020-12-10T02:06:02
| 285,155,413
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,984
|
r
|
20200825_Chopped_clean.R
|
### TidyTuesday 8/24/2020 -- Chopped
## Rebecca R. Vanderpool
## vanderpoolrr@email.arizona.edu
## 2020-08-24
#install.packages("stringdist")
#install.packages("ggsci")
#install.packages("gridGraphics")
library(tidyverse)
library(stringdist)
library(ggpubr)
library(viridis)
library(ggsci)
library(cowplot)
library(gridGraphics)
# Get the Data
# Read in the data manually
chopped <- readr::read_tsv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-08-25/chopped.tsv')
## change season to a factor
chopped = chopped %>%
mutate(season_group = case_when(
season <=15 ~ 1,
season >15 & season <=30 ~ 2,
season > 30 ~ 3),
)
chopped$season = factor(chopped$season)
### exploring the judges and the number of judges
judges = c(unique(chopped$judge1), unique(chopped$judge2), unique(chopped$judge3))
judges[order(judges)]
unique(judges[order(judges)])
############### Identified a number of errors with judges names.
### Correct AarΓ³n SΓ‘nchez' name in the judges columns
chopped$judge1[chopped$judge1 == "AarΓ³n Sanchez"] = "AarΓ³n SΓ‘nchez"
chopped$judge2[chopped$judge2 == "AarΓ³n Sanchez"] = "AarΓ³n SΓ‘nchez"
chopped$judge3[chopped$judge3 == "AarΓ³n Sanchez"] = "AarΓ³n SΓ‘nchez"
### Correct Jody Williams name in the judges columns
chopped$judge1[chopped$judge1 == "Jody William"] = "Jody Williams"
chopped$judge2[chopped$judge2 == "Jody William"] = "Jody Williams"
chopped$judge3[chopped$judge3 == "Jody William"] = "Jody Williams"
### Correct Amanda Freitag name in the judges columns
chopped$judge1[chopped$judge1 == "Amanda Freita" | chopped$judge1 == "Amanda Frietag"] = "Amanda Freitag"
chopped$judge2[chopped$judge2 == "Amanda Freita" | chopped$judge2 == "Amanda Frietag"] = "Amanda Freitag"
chopped$judge3[chopped$judge3 == "Amanda Freita" | chopped$judge3 == "Amanda Frietag"] = "Amanda Freitag"
### Correct Geoffrey Zakarian name in the judges columns
chopped$judge1[chopped$judge1 == "Geoffrey Zacharian"] = "Geoffrey Zakarian"
chopped$judge2[chopped$judge2 == "Geoffrey Zacharian"] = "Geoffrey Zakarian"
chopped$judge3[chopped$judge3 == "Geoffrey Zacharian"] = "Geoffrey Zakarian"
### Correct Maneet Chauhan name in the judges columns
chopped$judge1[chopped$judge1 == "Maneet Chauhaun"] = "Maneet Chauhan"
chopped$judge2[chopped$judge2 == "Maneet Chauhaun"] = "Maneet Chauhan"
chopped$judge3[chopped$judge3 == "Maneet Chauhaun"] = "Maneet Chauhan"
### Correct Chris Santos name in the judges columns
chopped$judge1[chopped$judge1 == "Chris Santo"] = "Chris Santos"
chopped$judge2[chopped$judge2 == "Chris Santo"] = "Chris Santos"
chopped$judge3[chopped$judge3 == "Chris Santo"] = "Chris Santos"
## Exploring the highs and the low rated shows.
hist(chopped$episode_rating)
sum(chopped$episode_rating >9, na.rm = TRUE)
sum(chopped$episode_rating <7.2, na.rm = TRUE)
summary(chopped$episode_rating)
top_rated = which(chopped$episode_rating >9)
low_rated = which(chopped$episode_rating < 7.2)
View(chopped[top_rated, ])
View(chopped[low_rated, ])
### Figure of the episode ratings based on season.
# Which season has the highest/lowest ratings.
#col_pal = rep(viridis(15), 3)
col_pal = inferno(45)
p = ggdensity(chopped, x = "episode_rating",
# add = "mean",
legend = "right",
palette = col_pal,
color = "season",
fill = "season",
xlab = "Episode Rating"
#rug = TRUE
)
P2 = facet(p,
facet.by = "season_group",
ncol = 1,
panel.labs = list(season_group = c("Seasons 1-15", "Seasons 16-30", "Seasons 31-43")),
panel.labs.background = list(color = "Black", fill = "white", linetype = 0, size = 0)) +
theme(legend.title = element_text(size = 10),
legend.text = element_text(size = 9),
legend.key.size = unit(0.2, "lines"))+
labs(caption = "")
#### create a figure of the average season ratings to identify the seasons with the highest and lowest ratings.
average_season = chopped %>%
select("season", "season_episode", "episode_rating" ) %>%
group_by(season) %>%
summarize(mean_rating = mean(episode_rating, na.rm = TRUE), n = n())
## Exploration figure -- looks like season 33 took a big downturn...
plot(average_season$season, average_season$mean_rating)
### Plot the deviation fo the values from the reference value
## will work with the z-score as in the examples.
average_season = average_season %>%
mutate(rating_z = (mean_rating - mean(mean_rating, na.rm = TRUE))/sd(mean_rating, na.rm = TRUE),
rating_grp = factor(ifelse(rating_z < 0, "low", "high"), levels = c("low", "high")))
col_journal = rev(pal_jama()(2))
P3 = ggdotchart(average_season, x = "season", y = "rating_z",
color = "rating_grp", # Color by groups
palette = col_journal, # Custom color palette
sorting = "ascending", # Sort value in descending order
add = "segments", # Add segments from y = 0 to dots
add.params = list(color = "lightgray", size = 2), # Change segment color and size
group = "rating_grp", # Order by groups
#rotate = TRUE,
dot.size = 7, # Large dot size
label = round(average_season$mean_rating,1), # Add mean ratings values as dot labels
font.label = list(color = "white", size = 9,
vjust = 0.5), # Adjust label parameters
legend = c(0.85, 0.3),
legend.title = "Ratings",
ylab = c("Z score"),
xlab = c("Season"),
ggtheme = theme_pubr() # ggplot2 theme
) +
#geom_hline(yintercept = 0, linetype = 2, color = "lightgray") +
geom_vline(xintercept = 19.5, linetype = 2, color = "gray25") +
geom_vline(xintercept = 43.5, linetype = 2, color = "gray25")
## looking at the highest rated and the lowest rated seasons.
#chopped %>%
# filter(season == 33 | season == 32 | season == 1 | season == 40) %>%
# View()
### normalize the judges rating to the overall ratings
overall_mean = mean(average_season$mean_rating, na.rm = TRUE)
overall_sd = sd(average_season$mean_rating, na.rm = TRUE)
judges_df_long <- chopped %>%
select( "season", "season_episode", "series_episode", "episode_rating", "judge1",
"judge2", "judge3", "appetizer") %>%
gather(judge_pos, judge_name, judge1:judge3) %>%
arrange(series_episode)
judge_fig = judges_df_long %>%
add_count(judge_name, sort = TRUE) %>%
group_by(judge_name) %>%
summarize(mean_rating = mean(episode_rating, na.rm = TRUE),
sd_rating = sd(episode_rating, na.rm = TRUE),
n = n()) %>%
filter(n >= 5) %>%
mutate(appearance = ifelse(n > 20, "Regular", "Guest"),
judge_rating_z = (mean_rating - overall_mean)/overall_sd,
judge_rating_grp = factor(ifelse(judge_rating_z < 0, "low", "high"), levels = c("low", "high")))
col_journal = pal_lancet()(3)
P1 = ggdotchart(judge_fig, x = "judge_name", y = "judge_rating_z",
color = "appearance", # Color by groups
palette = col_journal, # Custom color palette
sorting = "ascending", # Sort value in descending order
add = "segments", # Add segments from y = 0 to dots
add.params = list(color = "lightgray", size = 2), # Change segment color and size
group = "appearance", # Order by groups
rotate = TRUE,
dot.size = 7, # Large dot size
label = round(judge_fig$mean_rating,1), # Add mean rating values as dot labels
font.label = list(color = "white", size = 9,
vjust = 0.5), # Adjust label parameters
legend = c(0.8, 0.9),
legend.title = "Appearances",
ylab = c("Z Score"),
xlab = c(""),
ggtheme = theme_pubr() # ggplot2 theme
) +
geom_hline(yintercept = 0, linetype = 2, color = "lightgray") +
#geom_vline(xintercept = 9.5, linetype = 2, color = "gray25") +
geom_vline(xintercept = 7.5, linetype = 2, color = "gray25")+
labs(caption = "TidyTuesday: Chopped, Analysis @RRVdpool, Created: 8/31/2020")
#labs(caption = "Ordered based on their Z-score based on episode ratings.")
## Lay out the figure
#plot_row = plot_grid(P1, P2, labels = c('A)', 'B)' ), ncol = 2, label_size = 14)
#plot_combine = plot_grid(plot_row, P3, labels = c('', 'C)'), label_size = 14, ncol = 1)
plot_row = plot_grid(P2, P1, labels = c('B)', 'C)' ), ncol = 2, align = 'v', axis = '1', label_size = 14)
plot_combine = plot_grid(P3, plot_row, labels = c('A)', ''), label_size = 14, ncol = 1)
# now add the title
title <- ggdraw() +
draw_label(
"Chopped: Episode Ratings for Frequent Judges and Seasons",
fontface = 'bold',
x = 0,
hjust = 0
) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 7)
)
plot_grid(
title, plot_combine,
ncol = 1,
# rel_heights values control vertical title margins
rel_heights = c(0.1, 1)
)
dev.copy(png, "20200831_Chopped.png", width = 9, height = 8.5, units = "in", res = 300)
dev.off()
|
72b305010f49b175fcf11d97234c7644ec592b8b
|
c6a8c13031488c4927dcd2de9b5fc0a960bb81f6
|
/R/cosine-similarity.R
|
656dffa7df4794cc4730c32feaf66ae416b17373
|
[
"MIT"
] |
permissive
|
pmcharrison/hrep
|
dcfe44f5f9dc5eced687011db59addeecb429836
|
9132b7a937063097ef0b7d91c2c7fcc5c0f83366
|
refs/heads/master
| 2023-08-31T10:48:06.875846
| 2023-01-12T09:51:52
| 2023-01-12T09:51:52
| 158,516,625
| 8
| 2
|
NOASSERTION
| 2023-03-11T14:26:31
| 2018-11-21T08:38:18
|
R
|
UTF-8
|
R
| false
| false
| 362
|
r
|
cosine-similarity.R
|
#' Cosine similarity
#'
#' Computes the cosine similarity between two numeric vectors.
#' @param x Numeric vector 1.
#' @param y Numeric vector 2.
#' @return Cosine similarity, as a numeric scalar.
#' @export
cosine_similarity <- function(x, y) {
numerator <- sum(x * y)
denominator <-
sqrt(sum(x ^ 2)) *
sqrt(sum(y ^ 2))
numerator / denominator
}
|
1b03208063c712014eef74fa3e824e418d0472b4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lrmest/examples/alte2.Rd.R
|
9d99ba6eebca8bc08ad2f4c9e874bf9b363bdb63
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 884
|
r
|
alte2.Rd.R
|
library(lrmest)
### Name: alte2
### Title: Type (2) Adjusted Liu Estimator
### Aliases: alte2
### Keywords: ~kwd1 ~kwd2
### ** Examples
## Portland cement data set is used.
data(pcd)
k<-0.1650
d<--0.1300
aa<-c(0.958451,1.021155,0.857821,1.040296)
alte2(Y~X1+X2+X3+X4-1,k,d,aa,data=pcd) # Model without the intercept is considered.
## To obtain the variation of MSE of Type (2) Adjusted Liu Estimator.
data(pcd)
k<-c(0:5/10)
d<-c(5:25/10)
aa<-c(0.958451,1.021155,0.857821,1.040296)
msemat<-alte2(Y~X1+X2+X3+X4-1,k,d,aa,data=pcd)
matplot(d,alte2(Y~X1+X2+X3+X4-1,k,d,aa,data=pcd),type="l",ylab=c("MSE"),
main=c("Plot of MSE of Type (2) Adjusted Liu Estimator"),
cex.lab=0.6,adj=1,cex.axis=0.6,cex.main=1,las=1,lty=3)
text(y=msemat[1,],x=d[1],labels=c(paste0("k=",k)),pos=4,cex=0.6)
## Use "press=TRUE" to obtain the variation of PRESS of Type (2) Adjusted Liu Estimator.
|
bc17dc9e59ce7cae87960491c7f6eccf11827730
|
37ce38ba0eff95451aebea810a1e2ab119f89a85
|
/man/dat.interp.Rd
|
e8341508ca621a513252722f7e0e61b349576441
|
[
"MIT"
] |
permissive
|
SwampThingPaul/AnalystHelper
|
39fdd58dc4c7300b6e72ff2713316809793236ce
|
eb570b69d7ea798facaf146d80bc40269a3d5028
|
refs/heads/master
| 2023-07-21T00:19:21.162374
| 2023-07-11T17:24:36
| 2023-07-11T17:24:36
| 179,672,539
| 1
| 0
|
MIT
| 2020-03-21T20:05:31
| 2019-04-05T11:53:19
|
R
|
UTF-8
|
R
| false
| true
| 593
|
rd
|
dat.interp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dat.interp.R
\name{dat.interp}
\alias{dat.interp}
\title{Nutrient Load: Linearly interpolates data to conduct nutrient load calculations}
\usage{
dat.interp(x)
}
\arguments{
\item{x}{numeric}
}
\value{
one of two functions used to calculate daily nutrient load from flow and water quality parameters
}
\description{
Nutrient Load: Linearly interpolates data to conduct nutrient load calculations
}
\examples{
data=c(rep(NA,5),runif(2,2,5),NA,NA,NA,6,7,8, NA,NA)
dat.interp(data)
}
\keyword{Nutrient}
\keyword{load}
|
6e36f4200283b876dc0d1dc02f4698a16916defc
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/rrecsys/inst/doc/b5_wALS.R
|
5d971d375463f24e197431b34d49076485974175
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 453
|
r
|
b5_wALS.R
|
## ---- eval=FALSE---------------------------------------------------------
# wALS <- rrecsys(smallML, "wALS", k = 5, lambda = 0.01, scheme = "uni", delta = 0.04)
# wALS
## ---- eval=FALSE---------------------------------------------------------
# setStoppingCriteria(nrLoops = 10)
## ---- eval=FALSE---------------------------------------------------------
# setStoppingCriteria(autoConverge = TRUE, deltaErrorThreshold = 1e-5, minNrLoops = 10)
|
9a07e46dbe2a58bbe4703a9bb2d11a4fa6202be5
|
e43ccc719a5df63664598db7614d7b10e3b4d4fb
|
/tests/testthat/test_1.R
|
234fdc6f66e874ef676bc4dcc8eedf8eb2f8d4ab
|
[] |
no_license
|
opisthokonta/goalmodel
|
58fa2236e894df745f4f5985e16c863e55fd6272
|
55a33c620a1c36b51ad634f0e47abf402766cf56
|
refs/heads/master
| 2023-09-03T09:30:18.823581
| 2023-08-29T08:39:50
| 2023-08-29T08:39:50
| 153,664,398
| 98
| 21
| null | 2019-12-28T23:10:27
| 2018-10-18T17:49:21
|
R
|
UTF-8
|
R
| false
| false
| 29,098
|
r
|
test_1.R
|
require(engsoccerdata)
require(dplyr)
require(Rcpp) # Should not be necessary to load Rcpp like this, but sometimes it fails if not.
# Load data from English Premier League, 2011-12 season.
engsoccerdata::england %>%
dplyr::filter(Season %in% c(2011),
tier==c(1)) %>%
dplyr::mutate(Date = as.Date(Date),
home = as.character(home),
visitor= as.character(visitor)) -> england_2011
# Model fitting - Default model ----
context("Model fitting - Default model")
# fit default model
gm_res <- goalmodel(goals1 = england_2011$hgoal, goals2 = england_2011$vgoal,
team1 = england_2011$home, team2=england_2011$visitor)
test_that("Fitting default model", {
expect_equal(class(gm_res), 'goalmodel')
expect_equal(gm_res$parameters$dispersion, NULL)
expect_equal(gm_res$parameters$rho, NULL)
expect_equal(gm_res$parameters$sigma, NULL)
expect_equal(gm_res$parameters$gamma, NULL)
expect_equal(any(is.na(gm_res$parameters$attack)), FALSE)
expect_equal(any(is.na(gm_res$parameters$defense)), FALSE)
expect_equal(names(gm_res$parameters$attack), names(gm_res$parameters$defense))
expect_equal(any(duplicated(names(gm_res$parameters$attack))), FALSE)
expect_equal(any(duplicated(names(gm_res$parameters$defense))), FALSE)
expect_true(gm_res$converged)
})
# Model fitting - DC model -----
context("Model fitting - DC model")
gm_res_dc <- goalmodel(goals1 = england_2011$hgoal, goals2 = england_2011$vgoal,
team1 = england_2011$home, team2=england_2011$visitor,
dc=TRUE)
test_that("Fitting Dixon-Coles model", {
expect_equal(class(gm_res_dc), 'goalmodel')
expect_equal(gm_res_dc$parameters$dispersion, NULL)
expect_equal(is.numeric(gm_res_dc$parameters$rho), TRUE)
expect_equal(gm_res_dc$parameters$gamma, NULL)
expect_equal(gm_res$parameters$sigma, NULL)
expect_equal(any(is.na(gm_res_dc$parameters$attack)), FALSE)
expect_equal(any(is.na(gm_res_dc$parameters$defense)), FALSE)
expect_equal(names(gm_res_dc$parameters$attack), names(gm_res_dc$parameters$defense))
expect_equal(any(duplicated(names(gm_res_dc$parameters$attack))), FALSE)
expect_equal(any(duplicated(names(gm_res_dc$parameters$defense))), FALSE)
expect_true(gm_res_dc$converged)
# Fit DC model on dataset with where there are no low-scoring games
expect_error(goalmodel(goals1 = england_2011$hgoal+2, goals2 = england_2011$vgoal+2,
team1 = england_2011$home, team2=england_2011$visitor,
dc=TRUE))
})
# Model fitting - Hurdle model ----
context("Model fitting - Hurdle model")
gm_res_hurdle <- goalmodel(goals1 = england_2011$hgoal, goals2 = england_2011$vgoal,
team1 = england_2011$home, team2=england_2011$visitor,
hurdle=TRUE)
gm_res_hurdle$parameters$zip
test_that("Fitting Hurdle model", {
expect_equal(class(gm_res_hurdle), 'goalmodel')
expect_equal(gm_res_hurdle$parameters$dispersion, NULL)
expect_equal(is.numeric(gm_res_hurdle$parameters$hurdle), TRUE)
expect_equal(gm_res_hurdle$parameters$gamma, NULL)
expect_equal(gm_res_hurdle$parameters$rho, NULL)
expect_equal(gm_res$parameters$sigma, NULL)
expect_equal(any(is.na(gm_res_hurdle$parameters$attack)), FALSE)
expect_equal(any(is.na(gm_res_hurdle$parameters$defense)), FALSE)
expect_equal(names(gm_res_hurdle$parameters$attack), names(gm_res_hurdle$parameters$defense))
expect_equal(any(duplicated(names(gm_res_hurdle$parameters$attack))), FALSE)
expect_equal(any(duplicated(names(gm_res_hurdle$parameters$defense))), FALSE)
expect_true(gm_res_hurdle$converged)
# Fit DC model on dataset with where there are no low-scoring games
expect_error(goalmodel(goals1 = england_2011$hgoal+2, goals2 = england_2011$vgoal+2,
team1 = england_2011$home, team2=england_2011$visitor,
dc=TRUE))
})
# Model fitting - Negbin model ----
context("Model fitting - Negbin model")
# fit negative binomial model
gm_res_nbin <- goalmodel(goals1 = england_2011$hgoal, goals2 = england_2011$vgoal,
team1 = england_2011$home, team2=england_2011$visitor, model='negbin')
test_that("Fitting Negative Binomial model", {
expect_equal(class(gm_res_nbin), 'goalmodel')
expect_true(is.null(gm_res_nbin$parameters$rho))
expect_true(is.null(gm_res_nbin$parameters$sigma))
expect_true(is.numeric(gm_res_nbin$parameters$dispersion))
expect_true(length(gm_res_nbin$parameters$dispersion) == 1)
expect_equal(any(is.na(gm_res_nbin$parameters$attack)), FALSE)
expect_equal(any(is.na(gm_res_nbin$parameters$defense)), FALSE)
expect_equal(names(gm_res_nbin$parameters$attack), names(gm_res_nbin$parameters$defense))
expect_equal(any(duplicated(names(gm_res_nbin$parameters$attack))), FALSE)
expect_equal(any(duplicated(names(gm_res_nbin$parameters$defense))), FALSE)
expect_true(gm_res_nbin$converged)
})
# Model fitting - Gaussian ----
context("Model fitting - Gaussian")
# Fit a Gaussian model.
gm_res_gaussian <- goalmodel(goals1 = england_2011$hgoal, goals2 = england_2011$vgoal,
team1 = england_2011$home, team2=england_2011$visitor, model='gaussian')
test_that("Fitting Gaussian model", {
expect_equal(class(gm_res_gaussian), 'goalmodel')
expect_equal(gm_res_gaussian$parameters$dispersion, NULL)
expect_equal(is.numeric(gm_res_gaussian$parameters$sigma), TRUE)
expect_equal(any(is.na(gm_res_gaussian$parameters$attack)), FALSE)
expect_equal(any(is.na(gm_res_gaussian$parameters$defense)), FALSE)
expect_equal(gm_res_gaussian$parameters$gamma, NULL)
expect_equal(names(gm_res_gaussian$parameters$attack), names(gm_res_gaussian$parameters$defense))
expect_equal(gm_res_gaussian$converged, TRUE)
})
# Model fitting - some fixed parameters ----
context("Model fitting - some fixed parameters")
my_fixed_params1 <- list(attack = c('Chelsea' = 0.2), defense= c('Fulham' = -0.09, 'Liverpool' = 0.1))
gm_res_fp1 <- goalmodel(goals1 = england_2011$hgoal, goals2 = england_2011$vgoal,
team1 = england_2011$home, team2=england_2011$visitor,
fixed_params = my_fixed_params1)
# Fit model with parameter fixed for some teams not in data fixed
my_fixed_params2 <- list(attack = c('NOTEXIST' = 0.2))
test_that("Fitting default model - some parameters fixed", {
# Fit model with parameter fixed for some teams not in data,
# which gives a warning.
expect_warning(gm_res_fp2 <- goalmodel(goals1 = england_2011$hgoal, goals2 = england_2011$vgoal,
team1 = england_2011$home, team2=england_2011$visitor,
fixed_params = my_fixed_params2))
expect_equal(class(gm_res_fp1), 'goalmodel')
expect_equal(gm_res_fp1$parameters$dispersion, NULL)
expect_equal(gm_res_fp1$parameters$gamma, NULL)
expect_equal(gm_res$parameters$sigma, NULL)
expect_equal(any(is.na(gm_res_fp1$parameters$attack)), FALSE)
expect_equal(any(is.na(gm_res_fp1$parameters$defense)), FALSE)
expect_equal(names(gm_res_fp1$parameters$attack), names(gm_res_fp1$parameters$defense))
expect_equal(gm_res_fp1$parameters$attack, gm_res_fp1$parameters$attack)
expect_equal(gm_res_fp1$parameters$defense, gm_res_fp1$parameters$defense)
expect_equal(any(duplicated(names(gm_res_fp1$parameters$attack))), FALSE)
expect_equal(any(duplicated(names(gm_res_fp1$parameters$defense))), FALSE)
expect_true(gm_res_fp1$converged)
expect_true(abs(gm_res_fp2$loglikelihood - gm_res$loglikelihood) < 0.0001)
expect_true('NOTEXIST' %in% gm_res_fp2$all_teams)
expect_true('NOTEXIST' %in% names(gm_res_fp2$parameters$defense))
expect_true('NOTEXIST' %in% names(gm_res_fp2$parameters$attack))
})
# Model fitting - 2-step estimation ----
context("Model fitting - 2-step estimation")
## Test if two-step estimation works as it should.
# Fit the Dixon-Coles model, with most of the parameters fixed to the values in the default model.
gm_res_dc_2s <- goalmodel(goals1 = england_2011$hgoal, goals2 = england_2011$vgoal,
team1 = england_2011$home, team2=england_2011$visitor,
dc=TRUE, fixed_params = gm_res$parameters)
test_that("Fitting Dixon-Coles model - 2step", {
expect_equal(class(gm_res_dc_2s), 'goalmodel')
expect_equal(gm_res_dc_2s$parameters$dispersion, NULL)
expect_equal(is.numeric(gm_res_dc_2s$parameters$rho), TRUE)
expect_equal(gm_res$parameters$sigma, NULL)
expect_equal(gm_res_dc_2s$parameters$gamma, NULL)
expect_equal(names(gm_res_dc_2s$parameters$attack), names(gm_res_dc_2s$parameters$defense))
expect_equal(gm_res_dc_2s$parameters$attack, gm_res$parameters$attack)
expect_equal(gm_res_dc_2s$parameters$defense, gm_res$parameters$defense)
expect_equal(any(is.na(gm_res_dc_2s$parameters$attack)), FALSE)
expect_equal(any(is.na(gm_res_dc_2s$parameters$defense)), FALSE)
expect_equal(gm_res_dc_2s$parameters$rho == gm_res_dc$parameters$rho, FALSE)
expect_equal(any(duplicated(names(gm_res_dc_2s$parameters$attack))), FALSE)
expect_equal(any(duplicated(names(gm_res_dc_2s$parameters$defense))), FALSE)
expect_true(gm_res_dc_2s$converged)
})
# Additional covariates ----
context("Additional covariates")
# Manual hfa
hfa_mat <- matrix(data = 1, ncol=1, nrow=nrow(england_2011))
colnames(hfa_mat) <- 'hfaa'
gm_res_hfax <- goalmodel(goals1 = england_2011$hgoal, goals2 = england_2011$vgoal,
team1 = england_2011$home, team2=england_2011$visitor,
hfa=FALSE, x1=hfa_mat)
test_that("Manual HFA", {
expect_equal(names(gm_res_hfax$parameters$beta), 'hfaa')
expect_true(abs(gm_res_hfax$parameters$beta - gm_res$parameters$hfa) < 0.001)
expect_true(gm_res_hfax$converged)
})
# CMP functions ----
context("CMP functions")
dcmp_vec <- dCMP(x=0:6, lambda=4.4, upsilon = 1.2)
pcmp_vec <- pCMP(q=6, lambda=4.4, upsilon = 1.2)
dp_diff <- sum(dcmp_vec) - pcmp_vec
dcmp_log_vec <- dCMP(x=0:6, lambda=4.4, upsilon = 1.2, log = TRUE)
dcmp_vec2 <- dCMP(x=c(0,NA,2,3), lambda=4.4, upsilon = 1.2)
pcmp_vec2 <- pCMP(q=c(0,NA,2,3), lambda=c(1.2, NA), upsilon = 0.98)
qCMP_vec <- qCMP(p = seq(0,1, by=0.05), lambda=2.2, upsilon=1)
qpois_vec <- qpois(p = seq(0,1, by=0.05), lambda=2.2)
test_that("CMP", {
expect_true(all(dcmp_vec >= 0))
expect_true(all(dcmp_vec <= 1))
expect_true(pcmp_vec >= 0)
expect_true(pcmp_vec <= 1)
expect_true(dp_diff < 0.0001)
expect_true(all(dcmp_log_vec == log(dcmp_vec)))
expect_true(is.na(dcmp_vec2[2]))
expect_true(all(!is.na(dcmp_vec2[-2])))
expect_true(all(is.na(pcmp_vec2[c(2,4)])))
expect_true(all(!is.na(pcmp_vec2[-c(2,4)])))
expect_true(all(qCMP_vec == qpois_vec))
})
# Model fitting - CMP 2 - step ----
context("Model fitting - CMP 2 - step")
# fit CMP model
gm_res_cmp <- goalmodel(goals1 = england_2011$hgoal, goals2 = england_2011$vgoal,
team1 = england_2011$home, team2=england_2011$visitor,
fixed_params = gm_res$parameters,
model='cmp')
# Estiamte the dispersion with the upsilon.ml function.
expg_default <- predict_expg(gm_res, team1 = england_2011$home, team2=england_2011$visitor)
upsilon_est <- upsilon.ml(x = c(england_2011$hgoal, england_2011$vgoal),
parameters=c(expg_default$expg1, expg_default$expg2),
param_type = 'mu', method='fast')
upsilon_diff <- abs(gm_res_cmp$parameters$dispersion - upsilon_est)
test_that("Fitting CMP model", {
expect_equal(class(gm_res_cmp), 'goalmodel')
expect_true(is.null(gm_res_cmp$parameters$rho))
expect_true(is.null(gm_res_cmp$parameters$sigma))
expect_true(is.numeric(gm_res_cmp$parameters$dispersion))
expect_true(length(gm_res_cmp$parameters$dispersion) == 1)
expect_equal(any(is.na(gm_res_cmp$parameters$attack)), FALSE)
expect_equal(any(is.na(gm_res_cmp$parameters$defense)), FALSE)
expect_equal(names(gm_res_cmp$parameters$attack), names(gm_res_cmp$parameters$defense))
expect_equal(any(duplicated(names(gm_res_cmp$parameters$attack))), FALSE)
expect_equal(any(duplicated(names(gm_res_cmp$parameters$defense))), FALSE)
expect_true(gm_res_cmp$converged)
expect_true(upsilon_diff < 0.001)
})
# Making predictions ----
context("Making predictions")
to_predict1 <- c('Arsenal', 'Manchester United', 'Liverpool', 'Stoke City')
to_predict2 <- c('Fulham', 'Chelsea', 'Tottenham Hotspur', 'Manchester City')
pred_expg_default <- predict_expg(gm_res, team1=to_predict1, team2=to_predict2, return_df = FALSE)
pred_expg_dc <- predict_expg(gm_res_dc, team1=to_predict1, team2=to_predict2, return_df = FALSE)
pred_expg_nbin <- predict_expg(gm_res_nbin, team1=to_predict1, team2=to_predict2, return_df = FALSE)
pred_expg_cmp <- predict_expg(gm_res_cmp, team1=to_predict1, team2=to_predict2, return_df = FALSE)
all(sapply(pred_expg_cmp, function(x) all(!is.na(x))))
gm_res_dc0 <- gm_res_dc
gm_res_dc0$parameters$rho <- 0
pred_expg_dc0 <- predict_expg(gm_res_dc0, team1=to_predict1, team2=to_predict2, return_df = FALSE)
test_that("Predict expg.", {
expect_equal(is.numeric(pred_expg_default[[1]]), TRUE)
expect_equal(is.numeric(pred_expg_dc[[1]]), TRUE)
expect_equal(is.numeric(pred_expg_nbin[[1]]), TRUE)
expect_equal(is.numeric(pred_expg_cmp[[1]]), TRUE)
expect_equal(is.numeric(pred_expg_dc0[[1]]), TRUE)
expect_equal(is.numeric(pred_expg_default[[2]]), TRUE)
expect_equal(is.numeric(pred_expg_dc[[2]]), TRUE)
expect_equal(is.numeric(pred_expg_nbin[[2]]), TRUE)
expect_equal(is.numeric(pred_expg_cmp[[2]]), TRUE)
expect_equal(is.numeric(pred_expg_dc0[[2]]), TRUE)
all(sapply(pred_expg_default, function(x) all(!is.na(x))))
all(sapply(pred_expg_dc, function(x) all(!is.na(x))))
all(sapply(pred_expg_nbin, function(x) all(!is.na(x))))
all(sapply(pred_expg_cmp, function(x) all(!is.na(x))))
all(sapply(pred_expg_dc0, function(x) all(!is.na(x))))
})
# Need to supress warnings here, otherwise the tests will not pass.
suppressWarnings({
## Copy pasta from the "some parameters fixed" tests.
gm_res_fp2 <- goalmodel(goals1 = england_2011$hgoal, goals2 = england_2011$vgoal,
team1 = england_2011$home, team2=england_2011$visitor,
fixed_params = my_fixed_params2)
# predict with team not in data
p_expg_na1 <- predict_expg(gm_res, team1=c('NOTEXIST', 'Fulham', 'Fulham'),
team2=c('Fulham', 'NOTEXIST2', 'Arsenal'), return_df = FALSE)
p_expg_na2 <- predict_expg(gm_res_fp2, team1=c('NOTEXIST'),
team2=c('Fulham'), return_df = FALSE)
p_goals_na <- predict_goals(gm_res_fp2, team1=c('NOTEXIST', 'Fulham'),
team2=c('Fulham', 'Arsenal'))
p_result_na <- predict_result(gm_res_fp2, team1=c('NOTEXIST', 'Fulham'),
team2=c('Fulham', 'Arsenal'))
})
test_that("Predict expg unknwon teams", {
expect_true(is.na(p_expg_na2$expg2[1]))
expect_true(!is.na(p_expg_na2$expg1[1]))
expect_true(all(is.na(p_goals_na[[1]])))
expect_true(all(!is.na(p_goals_na[[2]])))
expect_true(all(is.na(p_result_na[1,])))
expect_true(all(!is.na(p_result_na[2,])))
})
pred_result_default <- predict_result(gm_res, team1=to_predict1, team2=to_predict2, return_df = FALSE)
pred_result_dc <- predict_result(gm_res_dc, team1=to_predict1, team2=to_predict2, return_df = FALSE)
pred_result_nbin <- predict_result(gm_res_nbin, team1=to_predict1, team2=to_predict2, return_df = FALSE)
pred_result_cmp <- predict_result(gm_res_cmp, team1=to_predict1, team2=to_predict2, return_df = FALSE)
test_that("Predict result", {
expect_equal(is.numeric(pred_result_default[[1]]), TRUE)
expect_equal(is.numeric(pred_result_dc[[1]]), TRUE)
expect_equal(is.numeric(pred_result_nbin[[1]]), TRUE)
expect_equal(is.numeric(pred_result_cmp[[1]]), TRUE)
expect_equal(is.numeric(pred_result_default[[2]]), TRUE)
expect_equal(is.numeric(pred_result_dc[[2]]), TRUE)
expect_equal(is.numeric(pred_result_nbin[[2]]), TRUE)
expect_equal(is.numeric(pred_result_cmp[[2]]), TRUE)
expect_true(all(rowSums(pred_result_default) == 1))
expect_true(all(rowSums(pred_result_dc) == 1))
expect_true(all(rowSums(pred_result_nbin) == 1))
expect_true(all(rowSums(pred_result_cmp) == 1))
})
pred_goals_default <- predict_goals(gm_res, team1=to_predict1, team2=to_predict2)
pred_goals_dc <- predict_goals(gm_res_dc, team1=to_predict1, team2=to_predict2)
pred_goals_default_df <- predict_goals(gm_res, team1=to_predict1, team2=to_predict2, return_df = TRUE)
pred_goals_dc_df <- predict_goals(gm_res_dc, team1=to_predict1, team2=to_predict2, return_df = TRUE)
test_that("Predict goals", {
expect_equal(is.matrix(pred_goals_default[[1]]), TRUE)
expect_equal(is.matrix(pred_goals_dc[[1]]), TRUE)
expect_equal(is.matrix(pred_goals_default[[2]]), TRUE)
expect_equal(is.matrix(pred_goals_dc[[2]]), TRUE)
expect_equal(any(is.na(pred_goals_default[[1]])), FALSE)
expect_equal(any(is.na(pred_goals_dc[[1]])), FALSE)
expect_equal(any(is.na(pred_goals_default[[2]])), FALSE)
expect_equal(any(is.na(pred_goals_dc[[2]])), FALSE)
expect_equal(is.data.frame(pred_goals_default_df), TRUE)
expect_equal(is.data.frame(pred_goals_dc_df), TRUE)
# The sum of probabilites should be the number of matches predicted
# (ie the probabilities for each match should sum to 1)
expect_true(abs(sum(pred_goals_default_df$probability) - length(to_predict1)) <= 0.001)
expect_true(abs(sum(pred_goals_dc_df$probability) - length(to_predict1)) <= 0.001)
})
pred_ou_default <- predict_ou(gm_res, team1=to_predict1, team2=to_predict2)
pred_ou_dc <- predict_ou(gm_res_dc, team1=to_predict1, team2=to_predict2)
pred_ou_default_df <- predict_ou(gm_res, team1=to_predict1, team2=to_predict2, return_df = TRUE)
pred_ou_dc_df <- predict_ou(gm_res_dc, team1=to_predict1, team2=to_predict2, return_df = TRUE)
test_that("Predict ou", {
pred_ou_default
expect_true(is.list(pred_ou_default))
expect_true(is.list(pred_ou_dc))
expect_true(all(abs(pred_ou_default$prob_under + pred_ou_default$prob_over - 1) <= 0.0001))
expect_true(all(abs(pred_ou_dc$prob_under + pred_ou_dc$prob_over - 1) <= 0.0001))
expect_equal(is.data.frame(pred_ou_default_df), TRUE)
expect_equal(is.data.frame(pred_ou_dc_df), TRUE)
})
pred_btts_default <- predict_btts(gm_res, team1=to_predict1, team2=to_predict2, return_df = FALSE)
pred_btts_dc <- predict_btts(gm_res_dc, team1=to_predict1, team2=to_predict2, return_df = FALSE)
pred_btts_nbin <- predict_btts(gm_res_nbin, team1=to_predict1, team2=to_predict2, return_df = FALSE)
pred_btts_cmp <- predict_btts(gm_res_cmp, team1=to_predict1, team2=to_predict2, return_df = FALSE)
pred_btts_default_df <- predict_btts(gm_res, team1=to_predict1, team2=to_predict2, return_df = TRUE)
test_that("Predict btts", {
expect_true(is.list(pred_btts_default))
expect_true(is.list(pred_btts_dc))
expect_true(is.list(pred_btts_nbin))
expect_true(is.list(pred_btts_cmp))
expect_true(all(abs(pred_btts_default$prob_btts >= 0)))
expect_true(all(abs(pred_btts_dc$prob_btts >= 0)))
expect_true(all(abs(pred_btts_nbin$prob_btts >= 0)))
expect_true(all(abs(pred_btts_cmp$prob_btts >= 0)))
expect_true(all(abs(pred_btts_default$prob_btts <= 1)))
expect_true(all(abs(pred_btts_dc$prob_btts <= 1)))
expect_true(all(abs(pred_btts_nbin$prob_btts <= 1)))
expect_true(all(abs(pred_btts_cmp$prob_btts <= 1)))
expect_true(is.data.frame(pred_btts_default_df))
})
# DC weight function----
context("DC weights")
# the weighting function.
my_weights1 <- weights_dc(england_2011$Date, xi=0.0019)
my_weights2 <- weights_dc(england_2011$Date, xi=0.011)
my_weights3 <- weights_dc(england_2011$Date, xi=0.04)
gm_res_w <- goalmodel(goals1 = england_2011$hgoal, goals2 = england_2011$vgoal,
team1 = england_2011$home, team2=england_2011$visitor,
weights = my_weights1)
test_that("The weighting function", {
expect_equal(is.numeric(my_weights1), TRUE)
expect_equal(is.numeric(my_weights2), TRUE)
expect_equal(is.numeric(my_weights3), TRUE)
expect_equal(all(my_weights3 >= 0), TRUE)
expect_equal(all(my_weights2 >= 0), TRUE)
expect_equal(all(my_weights3 >= 0), TRUE)
expect_equal(all(unlist(gm_res_w$parameters) == unlist(gm_res$parameters)), FALSE)
expect_equal(gm_res_w$converged, TRUE)
expect_error(weights_dc(england_2011$Date, xi=-0.9), 'xi >= 0 is not TRUE')
})
# dDCP function ----
context("dDCP function")
# Sum to 1.
dcp_sum <- sum(dDCP(x1=rep(0:15,16),
x2=rep(0:15,each=16), 0.5, 1.2))
# Check that it equals independent Poisson where it should
dcp_test1 <- dDCP(x1=3, x2=2, 0.5, 1.2, rho=0.01) == dpois(3, lambda=0.5) * dpois(2, lambda=1.2)
dcp_test2 <- dDCP(x1=1, x2=2:5, 0.5, 1.2, rho=0.01) == dpois(1, lambda=0.5) * dpois(2:5, lambda=1.2)
dcp_test3 <- dDCP(x1=3, x2=2, 0.5, 1.2, rho=-0.02) == dpois(3, lambda=0.5) * dpois(2, lambda=1.2)
dcp_test4 <- dDCP(x1=1, x2=2:5, 0.5, 1.2, rho=-0.02) == dpois(1, lambda=0.5) * dpois(2:5, lambda=1.2)
# Check that it is different from the independent Poisson where it should
dcp_test5 <- dDCP(x1=1, x2=0:1, 0.5, 1.2, rho=0.01) != dpois(1, lambda=0.5) * dpois(0:1, lambda=1.2)
# Compare dDCP with the result from predict_goals using a fitted DC model.
# Only look athe first game.
to_test_idx <- pred_goals_dc_df$team1 == 'Arsenal'
# Compute the probabilities using dDCP.
dc_prob2 <- dDCP(x1 = pred_goals_dc_df$goals1[to_test_idx], x2 = pred_goals_dc_df$goals2[to_test_idx],
lambda1 = pred_expg_dc$expg1[1], lambda2 = pred_expg_dc$expg2[1],
rho = gm_res_dc$parameters$rho)
# Add to data.frame,
pred_goals_dc_df_subset <- pred_goals_dc_df[to_test_idx,]
pred_goals_dc_df_subset$probability2 <- dc_prob2
# Compare
dcp_test6 <- all(abs(pred_goals_dc_df_subset$probability - pred_goals_dc_df_subset$probability2) < 0.00001)
test_that('Dixon-Coles probability function', {
expect_true(abs(dcp_sum - 1) < 0.00001)
expect_true(dcp_test1)
expect_true(all(dcp_test2))
expect_true(dcp_test3)
expect_true(all(dcp_test4))
expect_true(all(dcp_test5))
expect_true(all(dcp_test6))
})
# expg_from_probabilities ----
context("expg_from_probabilities")
# Compute probabilities from the default model.
pred_result_default_all <- predict_result(gm_res, team1 = england_2011$home,
team2=england_2011$visitor, return_df = FALSE)
pred_result_dc_all <- predict_result(gm_res_dc, team1 = england_2011$home,
team2=england_2011$visitor, return_df = FALSE)
# Also compute expected goals, as a comparison.
pred_result_expg_all <- predict_expg(gm_res, team1 = england_2011$home,
team2=england_2011$visitor, return_df = FALSE)
# Also compute expected goals, as a comparison.
pred_result_expg_dc_all <- predict_expg(gm_res_dc, team1 = england_2011$home,
team2=england_2011$visitor, return_df = FALSE)
# Reverse-engineer the expg from the probabilities.
expgfp <- expg_from_probabilities(pred_result_default_all, uprx = 30)
expgfpdc <- expg_from_probabilities(pred_result_dc_all,
rho=gm_res_dc$parameters$rho, uprx = 30)
# hist(pred_result_expg_all$expg1 - expgfp$expg[,1])
#
# hist(pred_result_expg_dc_all$expg1 - expgfpdc$expg[,1])
# hist(pred_result_expg_dc_all$expg2 - expgfpdc$expg[,2])
test_that("expg_from_probabilities", {
# Default
expect_true(length(expgfp) == 2)
expect_true(ncol(expgfp$expg) == 2)
expect_false(any(is.na(expgfp$expg)))
expect_false(any(is.na(expgfp$sq_errors)))
expect_true(all(abs(pred_result_expg_all$expg1 - expgfp$expg[,1]) < 0.01))
expect_true(all(abs(pred_result_expg_all$expg2 - expgfp$expg[,2]) < 0.01))
# DC
expect_true(length(expgfpdc) == 2)
expect_true(ncol(expgfpdc$expg) == 2)
expect_false(any(is.na(expgfpdc$expg)))
expect_false(any(is.na(expgfpdc$sq_errors)))
expect_true(all(abs(pred_result_expg_dc_all$expg1 - expgfpdc$expg[,1]) < 0.01))
expect_true(all(abs(pred_result_expg_dc_all$expg2 - expgfpdc$expg[,2]) < 0.01))
})
# Warnings ----
context("Warnings")
# Add a disconected fake data to test warning.
england_2011_tmp <- bind_rows(england_2011,
data.frame(home=c('ff', 'aaa'), visitor=c('aaa', 'zzz'),
hgoal=c(1,1), vgoal=c(1,1), stringsAsFactors = FALSE))
test_that("Warning messages during model fitting", {
expect_warning(goalmodel(goals1 = england_2011$hgoal, goals2 = england_2011$vgoal,
team1 = england_2011$home, team2=england_2011$visitor,
fixed_params = list(dispersion=1)))
expect_warning(goalmodel(goals1 = england_2011$hgoal, goals2 = england_2011$vgoal,
team1 = england_2011$home, team2=england_2011$visitor,
fixed_params = list(dispersion=1), model='gaussian'))
expect_warning(goalmodel(goals1 = england_2011_tmp$hgoal, goals2 = england_2011_tmp$vgoal,
team1 = england_2011_tmp$home, team2=england_2011_tmp$visitor))
})
# Match schedule functions ----
context("Match schedule functions")
dslm <- days_since_last_match(england_2011$home, england_2011$visitor, england_2011$Date)
test_that("days_since_last_match", {
expect_true(ncol(dslm) == 2)
expect_true(nrow(dslm) == nrow(england_2011))
expect_true(sum(is.na(dslm)) == length(gm_res$all_teams))
})
mlxd <- matches_last_xdays(england_2011$home, england_2011$visitor, england_2011$Date)
test_that("matches_last_xdays", {
expect_true(ncol(mlxd) == 2)
expect_true(nrow(mlxd) == nrow(england_2011))
expect_true(sum(is.na(mlxd)) == length(gm_res$all_teams))
})
# p1x2 function ----
context("p1x2 function")
# Check that p1x2() gives same predictions as predict_result gives.
p1x2_res_default <- p1x2(expg1 = pred_expg_default$expg1, expg2 = pred_expg_default$expg2,
model = 'poisson')
p1x2_res_dc <- p1x2(expg1 = pred_expg_dc$expg1, expg2 = pred_expg_dc$expg2,
model = 'poisson', rho = gm_res_dc$parameters$rho)
p1x2_res_nbin <- p1x2(expg1 = pred_expg_cmp$expg1, expg2 = pred_expg_cmp$expg2,
model = 'negbin', dispersion = gm_res_nbin$parameters$dispersion)
p1x2_res_cmp <- p1x2(expg1 = pred_expg_cmp$expg1, expg2 = pred_expg_cmp$expg2,
model = 'cmp', dispersion = gm_res_cmp$parameters$dispersion)
test_that("p1x2", {
expect_true(all(abs(p1x2_res_default - pred_result_default) <= 0.0001))
expect_true(all(abs(p1x2_res_dc - pred_result_dc) <= 0.0001))
expect_true(all(abs(p1x2_res_cmp - p1x2_res_cmp) <= 0.0001))
expect_true(all(abs(p1x2_res_nbin - p1x2_res_nbin) <= 0.0001))
})
# pbtts function ----
context("pbtts function")
pbtts_res_default <- pbtts(expg1 = pred_expg_default$expg1,
expg2 = pred_expg_default$expg2,
model = 'poisson')
test_that("pbtts", {
expect_true(is.numeric(pbtts_res_default))
expect_true(all(abs(pbtts_res_default - pred_btts_default$prob_btts) <= 0.0001))
expect_true(length(pbtts_res_default) == length(to_predict1))
})
# score_predictions function ----
context("score_predictions function")
observed_hda_vec <- match(as.character(england_2011$result), c('H', 'D', 'A'))
scores_result_default <- score_predictions(predictions = pred_result_default_all,
observed = observed_hda_vec,
score = c('log', 'brier', 'rps'))
scores_result_dc <- score_predictions(predictions = pred_result_dc_all,
observed = observed_hda_vec,
score = c('log', 'brier', 'rps'))
test_that("score_predictions", {
expect_true(length(scores_result_default) == 3)
expect_true(length(scores_result_dc) == 3)
expect_true(length(scores_result_default$log) == nrow(pred_result_default_all))
expect_true(length(scores_result_dc$log) == nrow(pred_result_default_all))
expect_true(length(scores_result_default$brier) == nrow(pred_result_default_all))
expect_true(length(scores_result_dc$brier) == nrow(pred_result_default_all))
expect_true(length(scores_result_default$rps) == nrow(pred_result_default_all))
expect_true(length(scores_result_dc$rps) == nrow(pred_result_default_all))
expect_true(all(scores_result_default$log > 0))
expect_true(all(scores_result_default$brier > 0))
expect_true(all(scores_result_default$rps > 0))
expect_true(all(scores_result_default$brier < 2))
expect_true(all(scores_result_default$rps < 1))
expect_true(all(scores_result_dc$log > 0))
expect_true(all(scores_result_dc$brier > 0))
expect_true(all(scores_result_dc$rps > 0))
expect_true(all(scores_result_dc$brier < 2))
expect_true(all(scores_result_dc$rps < 1))
})
|
f1ee24695246da7ff6b7dddd60c8eb50b314894f
|
4327c535795eabb956eecaf154fa476de41e3b86
|
/R/rigid_Viterbi.R
|
a60be26549565d7a745c6ee374089c69302760bd
|
[] |
no_license
|
rfael0cm/RTIGER
|
5e02b13ed6282b82f0df34753a89d29ccb21cac9
|
d1ce04c748510269551cf0d6091726f7024e3464
|
refs/heads/master
| 2023-03-30T22:33:02.866958
| 2021-08-25T11:58:29
| 2021-08-25T11:58:29
| 347,925,332
| 2
| 5
| null | 2022-08-17T10:00:34
| 2021-03-15T10:27:17
|
Julia
|
UTF-8
|
R
| false
| false
| 4,865
|
r
|
rigid_Viterbi.R
|
### Efficient rigid Viterbi decoding
# Input: The emission matrix psi_k(o_t)
# The transition matrix A=(a_jk), possibly the initiation vector pi = (pi_k)
# rigidity parameter "rigid": The minimum length of a stretch of the same state
# before a transition may occur
# Output: The r-Viterbi path and its log-likelihood
rigid_Viterbi = function (psimat, transmat, pivec = NULL, rigid = 1){
# log transformation of the parameters
states = rownames(psimat) # check if there are rownames
psimat = log(psimat + 1e-16)
transmat = log(transmat + 1e-16)
nstates = nrow(psimat)
npos = ncol(psimat)
if (is.null(pivec)) {pivec = rep(0,nstates)} else {pivec = log(pivec + 1e-16)}
phi = matrix(0,nrow=nstates,ncol=npos) # the matrix with the max probabilities
rownames(phi) = states
back = matrix(0,nrow=nstates,ncol=npos) # the backtracking pointers
rownames(back) = states
# hilf will contain the probabilities of the contiguous stretches of length rigid
hilf = psimat
hilf[,1] = hilf[,1] + pivec
hilf[,2:npos] = hilf[,2:npos] + diag(transmat)
hilf = t(apply(hilf,1,cumsum))
hilf[,(rigid+1):npos] = hilf[,(rigid+1):npos] - hilf[,1:(npos-rigid)] # nochmal genau pr?fen
# cat("hilf:\n")
#print(hilf)
# Initialize phi
phi[,1] = hilf[,1]
# phi for positions 2 : rigid still missing here
sapply(2:rigid,function(k){
# recursive calculation of the mat path
stay = phi["mat",k-1] + transmat["mat","mat"]
hetmat = hilf["het",k-1] + transmat["het","mat"]
if (stay>hetmat) {
phi["mat",k] <<- stay + psimat["mat",k]
back["mat",k] <<- "mat"
} else {
phi["mat",k] <<- hetmat + psimat["mat",k]
back["mat",k] <<- "het"
}
# recursive calculation of the pat path
stay = phi["pat",k-1] + transmat["pat","pat"]
hetpat = hilf["het",k-1] + transmat["het","pat"]
if (stay>hetpat) {
phi["pat",k] <<- stay + psimat["pat",k]
back["pat",k] <<- "pat"
} else {
phi["pat",k] <<- hetpat + psimat["pat",k]
back["pat",k] <<- "het"
}
# recursive calculation of the het path
stay = phi["het",k-1] + transmat["het","het"]
mathet = hilf["mat",k-1] + transmat["mat","het"]
pathet = hilf["pat",k-1] + transmat["pat","het"]
if (stay>= max(mathet,pathet)) {
phi["het",k] <<- stay + psimat["het",k]
back["het",k] <<- "het"
} else if (mathet>pathet) {
phi["het",k] <<- mathet + psimat["het",k]
back["het",k] <<- "mat"
} else {
phi["het",k] <<- pathet + psimat["het",k]
back["het",k] <<- "pat"
}
#
} ) #end sapply
# phi for positions (rigid+1) until npos
sapply((rigid+1):npos,function(k){
# cat("pos = ", k, "\n\n")
# recursive calculation of the mat path
stay = phi["mat",k-1] + transmat["mat","mat"]
hetmat = phi["het",k-rigid] + hilf["het",k-1] + transmat["het","mat"]
if (stay>hetmat) {
phi["mat",k] <<- stay + psimat["mat",k]
back["mat",k] <<- "mat"
} else {
phi["mat",k] <<- hetmat + psimat["mat",k]
back["mat",k] <<- "het"
}
# recursive calculation of the pat path
stay = phi["pat",k-1] + transmat["pat","pat"]
hetpat = phi["het",k-rigid] + hilf["het",k-1] + transmat["het","pat"]
if (stay>hetpat) {
phi["pat",k] <<- stay + psimat["pat",k]
back["pat",k] <<- "pat"
} else {
phi["pat",k] <<- hetpat + psimat["pat",k]
back["pat",k] <<- "het"
}
# recursive calculation of the het path
stay = phi["het",k-1] + transmat["het","het"]
mathet = phi["mat",k-rigid] + hilf["mat",k-1] + transmat["mat","het"]
pathet = phi["pat",k-rigid] + hilf["pat",k-1] + transmat["pat","het"]
if (stay>= max(mathet,pathet)) {
phi["het",k] <<- stay + psimat["het",k]
back["het",k] <<- "het"
} else if (mathet>pathet) {
phi["het",k] <<- mathet + psimat["het",k]
back["het",k] <<- "mat"
} else {
phi["het",k] <<- pathet + psimat["het",k]
back["het",k] <<- "pat"
}
} ) #end sapply
#print(phi)
#print(back)
#backtracking
viterbipath = character(npos)
currentpos = npos
currentstate = states[which.max(phi[,currentpos])]
viterbipath[currentpos] = currentstate
loglikelihood = phi[currentstate,currentpos]
while(currentpos != 1){
zur = back[currentstate,currentpos]
if (zur == currentstate) {
currentpos = currentpos - 1
viterbipath[currentpos] = currentstate
} else {
jump = max(1,currentpos-rigid)
viterbipath[jump:(currentpos-1)] = zur
currentpos = jump
currentstate = zur
}
# if (currentpos == 1) break()
}
return(list(viterbipath=viterbipath,loglikelihood=loglikelihood))
# return(viterbipath)
} # end rigid_Viterbi
|
67d7f853f789dc9cb8566027d17a352eef712b03
|
0d3acd434222c88e4aa761c0d2d63e1b233edc0f
|
/man/edit_distance.Rd
|
e818500adfa4502dc0109fb90f4d2f42609b6809
|
[] |
no_license
|
EngleLab/englelab
|
fdc1e041e49945d7005c67f3ceafa19621de9fd2
|
7b85d9e2edd148c3570b1c380a977739e6d450f9
|
refs/heads/main
| 2023-05-24T21:23:29.756190
| 2023-05-10T01:10:58
| 2023-05-10T01:10:58
| 151,780,396
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 692
|
rd
|
edit_distance.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/edit_distance.R
\name{edit_distance}
\alias{edit_distance}
\title{Calculates EditDistance.unit and EditDistance.load scores}
\usage{
edit_distance(x, target = "MemoryTargets", recall = "Recalled")
}
\arguments{
\item{x}{dataframe}
\item{target}{the column name containing the sequence of target memory items}
\item{recall}{the column name containing the sequence of recalled items}
}
\description{
The Damerau-Levenshtein distance is implemented in the stringdist package:
van der Loo, M. P. J. (2014). The stringdist package for approximate string
matching. R Journal, 6(1), 111-122. doi:10.32614/RJ-2014-011
}
|
d41acd6b791a8d236812861f50313875224eb34f
|
9b9445f48eba04344f84f04588671eec991c7f07
|
/R/final_size.R
|
330662beef2ba9ac782998d0fb2bd5728d774c97
|
[] |
no_license
|
jameshay218/episize
|
232677ce381f30a1ac732a32ce6ec3f5774e58ea
|
8af8e097e6e766b62320ecc7108db0b5a713ddb8
|
refs/heads/master
| 2021-01-10T10:34:32.310211
| 2015-12-09T18:28:55
| 2015-12-09T18:28:55
| 47,708,152
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,606
|
r
|
final_size.R
|
#' Mutliple age and titer class final size equation system
#'
#' Given an attack rate matrix, R0, contact matrix, population proportions and immunity, gives the difference of the final size equation(see Kucharski PLoS Pathogens 2014; Andreasen 2011 and Wallinga 2006). This should return zero if the attack rate matrix is correct.
#' @param A an NxM matrix of attack rates, where N is the number of age classes and M the number of immunity classes
#' @param R0 the disease specific R0 ie. beta/gamma. Note that another parameter will mediate the contact rate
#' @param G the normalised contact rate matrix scaled by population sizes. See \code{\link{setup_C}}
#' @param P NxM matrix of population proportions; number of each age and titre class as proportion of entire population
#' @param Z NxM matrix of immunity. ie. each element is the proportion of infection prevented due to immunity for that age/titre class
#' @return difference between the LHS and RHS of the final size equation
#' @export
simeq <- function(A,R0,G,P,Z){
f1 <- A - (1-exp(-R0*Z*((G*P)%*%A)))
f1
}
#' Normalised contact rate matrix
#'
#' Given a matrix of contact frequency between age classes and a matrix of true population sizes, gives the scaled, normalised contact rate matrix for each age/titre class.
#' @param C1 the non-normalised contact matrix of contact frequencies between each age class
#' @param Ns the matrix of population sizes for each age/titre combination (non-normalised)
#' @return a matrix of the normalised contact rates between all age/titre class combinations.
#' @examples
#' C <- matrix(c(2,.5,.4,0.3),ncol=2,nrow=2)
#' N11 <- 1000
#' N12 <- 1000
#' N21 <- 2000
#' N22 <- 750
#' N <- matrix(c(N11,N21,N12,N22),ncol=2,nrow=2)
#' C1 <- setup_C(C,N)
#' @export
setup_C <- function(C1, Ns){
Ntiter <- ncol(Ns)
Nage <- nrow(Ns)
M <- kron(C1,ones(Ntiter,Ntiter)) #' Non-normalised contact matrix scaled for each titre class
propns <- Ns/rowSums(Ns) #' Age/titre population size as proportion of age population size
propns_1 <- repmat(propns,Ntiter,Nage) #' Expand to give element for each age/titre combination
propns_2 <- kron(repmat(t(t(rowSums(Ns)/sum(Ns))),1,Ntiter),ones(Ntiter,Nage)) #' Age population size as proportion of total population size
C <- M*propns_1/propns_2 #' Generate scaled contact rates for age and titer groups
return(C)
}
#' Epidemic Final Size Calculation
#'
#' Calculates the final size of an epidemic given 2-dimensional population categorisation eg. age and immunity class. Note that this uses the final size calculation similar to that in Kucharski et al. 2014 PLoS Pathogens.
#' @param C1 the non-normalised contact matrix of contact frequencies between each age class
#' @param R0 the disease specific R0 ie. beta and gamma. Note that another parameter will mediate the contact rate
#' @param Ns the matrix of population sizes for each age or titre combination (non-normalised) (ie. rows = ages, cols = immunity classes)
#' @param alphas a vector of values between 0 and 1 matching the number of immunity classes
#' @return an NxM matrix of attack rates (ie. proportion of susceptibles becoming infected)
#' @seealso \code{\link{epi_ode_size}}
#' @export
epi_final_size <- function(C1, R0, Ns, alphas){
Ntiter <- ncol(Ns)
Nage <- nrow(Ns)
C <- setup_C(C1, Ns)
propns_3 <- as.numeric(t(Ns/sum(Ns))) # Generate a matrix of Pai
propns_3 <- repmat(propns_3,Nage*Ntiter,1)
A0 <- rand(Ntiter*Nage,1) # Starting seeds for nleqslv
rep_alphas <- t(repmat(alphas,1,Nage)) # Format alphas like A0, as each group will have its incidence reduced
# Run optimiser to find attack rates for age/titer groups
final <- array(nleqslv(A0,simeq,G=C,Z=rep_alphas,P=propns_3,R0=R0,control=list(xtol=1e-15,ftol=1e-15,btol=1e-15,maxit=1000))$x)
# Format as matrix
final <- matrix(final,ncol=Ntiter,nrow=Nage,byrow=T)
return(final)
}
#' Epidemic Final Size Calculation ODE
#'
#' Calculates the final size of an epidemic given 2-dimensional population categorisation eg. age and immunity class using an SIR model
#' @param C1 the non-normalised contact matrix of contact frequencies between each age class
#' @param R0 the disease specific R0 (transmission rate). Note that another parameter will mediate the contact rate
#' @param Tg number of days spent infectious (ie. 1/gamma)
#' @param Ns the matrix of population sizes for each age/titre combination (non-normalised) (ie. rows = ages, cols = immunity classes)
#' @param alphas a vector of values between 0 and 1 matching the number of immunity classes
#' @return an NxM matrix of attack rates (ie. proportion of susceptibles becoming infected)
#' @seealso \code{\link{epi_final_size}}
#' @export
epi_ode_size <- function(C1, R0, Tg, Ns, alphas){
C <- setup_C(C1, Ns)
Nage <- nrow(Ns)
Ntiter <- ncol(Ns)
long_Ns <- as.numeric(t(Ns))
start <- NULL
start[1] <- long_Ns[1] - 1
start[2] <- 1
start[3] <- 0
index <- 4
for(i in 2:length(long_Ns)){
start[index] <- long_Ns[i]
index <- index + 1
start[index] <- 0
index <- index + 1
start[index] <- 0
index <- index + 1
}
y <- ode(y=start,t=seq(1,365,by=1),func=general_sir, parms=c(R0,alphas,Tg),C=C,Nage=Nage,Ntiter=Ntiter)
A <- NULL
for(i in 1:((ncol(y)-1)/3)){
A[i] <- y[nrow(y),(i-1)*3+4]/y[1,(i-1)*3+2]
}
A <- matrix(A,nrow=Nage,ncol=Ntiter,byrow=T)
return(A)
}
#' General SIR ode
#'
#' Generic SIR ode function (for use in deSolve), taking an arbritrary number of populations, ages and immunity classes
#' @param t current time, as for ode function
#' @param vector of compartment sizes eg. S11, I11, R11, S12, I12, R12 etc...
#' @param pars vector of R0, 1/gamma and alphas (vector of immunity conferred for each titre class)
#' @param C the normalised contact matrix of contact frequencies between each age and titre classes
#' @param Nage number of age classes
#' @param Ntiter number of titre classes
#' @return a list of compartment size changes, as required by deSolve
#' @export
general_sir <- function(t,y, pars, C, Nage,Ntiter){
R0 <- pars[1]
Tg <- pars[length(pars)]
alphas <- pars[2:(length(pars)-1)]
sir <- matrix(y,ncol=Nage*Ntiter,nrow=3)
dS <- -((R0/Tg)*alphas*sir[1,]*(sir[2,]%*%t(C))/sum(sir))
dR <- sir[2,]/Tg
dI <- -dS - dR
tmp <- as.vector(rbind(dS,dI,dR))
return(list(c(tmp)))
}
#' 3 age class SIR ode
#'
#' 3 age class SIR ode function (for use in deSolve)
#' @param t current time, as for ode function
#' @param vector of compartment sizes eg. S1, I1, R1, S2, I2, R2 etc...
#' @param pars vector of R0, alpha (immunity) and duration of infectious period
#' @param C the normalised contact matrix of contact frequencies between each age classes
#' @return a list of compartment size changes, as required by deSolve
#' @seealso \code{\link{sir}}, \code{\link{sir_2}}
#' @export
sir_3 <- function(t,y, pars, C){
S1 <- y[1]
I1 <- y[2]
R1 <- y[3]
S2 <- y[4]
I2 <- y[5]
R2 <- y[6]
S3 <- y[7]
I3 <- y[8]
R3 <- y[9]
N1 <- S1 + I1 + R1
N2 <- S2 + I2 + R2
N3 <- S3 + I3 + R3
N <- N1+N2+N3
print(N)
R0 <- pars[1]
alpha <- pars[2]
durI <- pars[3]
dS1 <- -(R0/durI)*alpha*S1*(C[1,1]*I1/(N) + C[1,2]*I2/(N) + C[1,3]*I3/N)
dI1 <- (R0/durI)*alpha*S1*(C[1,1]*I1/(N) + C[1,2]*I2/(N) + C[1,3]*I3/N) - I1/durI
dR1 <- I1/durI
dS2 <- -(R0/durI)*alpha*S2*(C[2,1]*I1/(N) + C[2,2]*I2/(N) + C[2,3]*I3/N)
dI2 <- (R0/durI)*alpha*S2*(C[2,1]*I1/(N) + C[2,2]*I2/(N) + C[2,3]*I3/N) - I2/durI
dR2 <- I2/durI
dS3 <- -(R0/durI)*alpha*S3*(C[3,1]*I1/(N) + C[3,2]*I2/(N) + C[3,3]*I3/N)
dI3 <- (R0/durI)*alpha*S3*(C[3,1]*I1/(N) + C[3,2]*I2/(N) + C[3,3]*I3/N) - I3/durI
dR3 <- I3/durI
return(list(c(dS1,dI1,dR1,dS2,dI2,dR2,dS3,dI3,dR3)))
}
#' Single population SIR case - ODE model
#'
#' 1 age class SIR ode function (for use in deSolve)
#' @param t current time, as for ode function
#' @param vector of compartment sizes ie. S, I and R
#' @param pars vector of R0 and duration of infectious period
#' @param C the contact rate
#' @return a list of compartment size changes, as required by deSolve
#' @seealso \code{\link{sir_3}}, \code{\link{sir_2}}
#' @export
sir <- function(t,y,pars,C){
S <- y[1]
I <- y[2]
R <- y[3]
N <- S + I + R
R0 <- pars[1]
durI <- pars[2]
dS <- -(R0/durI)*S*I*C/N
dI <- (R0/durI)*S*I*C/N - I/durI
dR <- I/durI
return(list(c(dS,dI,dR)))
}
#' Two population SIR case - ODE model
#'
#' 2 age class SIR ode function (for use in deSolve)
#' @param t current time, as for ode function
#' @param vector of compartment sizes ie. S1, I1, R1, S2 etc...
#' @param pars vector of R0, alpha (immunity) and duration of infectious period
#' @param C the 2x2 contact rate matrix
#' @return a list of compartment size changes, as required by deSolve
#' @seealso \code{\link{sir_3}}, \code{\link{sir}}
#' @export
sir_2 <- function(t,y, pars, C){
S1 <- y[1]
I1 <- y[2]
R1 <- y[3]
S2 <- y[4]
I2 <- y[5]
R2 <- y[6]
N1 <- S1 + I1 + R1
N2 <- S2 + I2 + R2
R0 <- pars[1]
alpha <- pars[2]
durI <- pars[3]
dS1 <- -(R0/durI)*alpha*S1*(C[1,1]*I1/(N1+N2) + C[1,2]*I2/(N1+N2))
dI1 <- (R0/durI)*alpha*S1*(C[1,1]*I1/(N1+N2) + C[1,2]*I2/(N1+N2)) - I1/durI
dR1 <- I1/durI
dS2 <- -(R0/durI)*alpha*S2*(C[2,1]*I1/(N1+N2) + C[2,2]*I2/(N1+N2))
dI2 <- (R0/durI)*alpha*S2*(C[2,1]*I1/(N1+N2) + C[2,2]*I2/(N1+N2)) - I2/durI
dR2 <- I2/durI
return(list(c(dS1,dI1,dR1,dS2,dI2,dR2)))
}
#' Two age/two titre class SIR ODE function
#'
#' Calculates the final size of an epidemic given 2-dimensional population categorisation eg. age and immunity class using an SIR model. Only takes 2 ages and 2 titres.
#' @param t current time, as for ode function
#' @param vector of compartment sizes ie. S11, I11, R11, S12 etc...
#' @param pars vector of R0, alpha1, alpha2 (immunity of titre class 1 and 2) and duration of infectious period
#' @param C the contact rate matrix for contact rates between each age/titre class (4x4 matrix)
#' @return a list of compartment size changes, as required by deSolve
#' @seealso \link{\code{epi_ode_size}}
#' @export
sir_22 <- function(t,y, pars, C){
S11 <- y[1]
I11 <- y[2]
R11 <- y[3]
S12 <- y[4]
I12 <- y[5]
R12 <- y[6]
S21 <- y[7]
I21 <- y[8]
R21 <- y[9]
S22 <- y[10]
I22 <- y[11]
R22 <- y[12]
N11 <- S11 + I11 + R11
N21 <- S21 + I21 + R21
N12 <- S12 + I12 + R12
N22 <- S22 + I22 + R22
N <- N11 + N21 + N12 + N22
N1 <- N11 + N12
N2 <- N21 + N22
R0 <- pars[1]
alpha1 <- pars[2]
alpha2 <- pars[3]
durI <- pars[4]
dS11 <- -(R0/durI)*alpha1*S11*((C[1,1]*I11 + C[1,2]*I12 + C[1,3]*I21 + C[1,4]*I22)/N)
dI11 <- (R0/durI)*alpha1*S11*((C[1,1]*I11 + C[1,2]*I12 + C[1,3]*I21 + C[1,4]*I22)/N) - I11/durI
dR11 <- I11/durI
dS12 <- -(R0/durI)*alpha2*S12*((C[2,1]*I11 + C[2,2]*I12 + C[2,3]*I21 + C[2,4]*I22)/N)
dI12 <- (R0/durI)*alpha2*S12*((C[2,1]*I11 + C[2,2]*I12 + C[2,3]*I21 + C[2,4]*I22)/N)- I12/durI
dR12 <- I12/durI
dS21 <- -(R0/durI)*alpha1*S21*((C[3,1]*I11 + C[3,2]*I12 + C[3,3]*I21 + C[3,4]*I22)/N)
dI21 <- (R0/durI)*alpha1*S21*((C[3,1]*I11 + C[3,2]*I12 + C[3,3]*I21 + C[3,4]*I22)/N) - I21/durI
dR21 <- I21/durI
dS22 <- -(R0/durI)*alpha2*S22*((C[4,1]*I11 + C[4,2]*I12 + C[4,3]*I21 + C[4,4]*I22)/N)
dI22 <- (R0/durI)*alpha2*S22*((C[4,1]*I11 + C[4,2]*I12 + C[4,3]*I21 + C[4,4]*I22)/N) - I22/durI
dR22 <- I22/durI
return(list(c(dS11,dI11,dR11,dS12,dI12,dR12, dS21,dI21,dR21,dS22,dI22,dR22)))
}
|
8407409e53591a1b905db4f721f7f9331853a332
|
8f56589e11ae57523a7d8011386738a53999a4e1
|
/R/fy.year.R
|
7f8ccf314a34a3c5966325339baf88030e6b8480
|
[] |
no_license
|
grattaninstitute/grattan
|
b7b5c738401850fc67f03f4003f3715f11b27406
|
56fac9813308cf695d850be84adddad61b50d774
|
refs/heads/master
| 2020-05-20T22:23:58.720935
| 2015-06-27T02:12:23
| 2015-06-27T02:12:23
| 38,221,337
| 0
| 0
| null | 2015-06-29T01:28:33
| 2015-06-29T01:28:33
| null |
UTF-8
|
R
| false
| false
| 722
|
r
|
fy.year.R
|
#' Convenience function for creating FY
#'
#' @param yr_ending ()
#' @return the financial year ("YYYY-YY") corresponding to June of \code{yr_ending}
is.fy <- function(fy.yr){
!grepl("^.*([12][0-9]{3}).?[0-9]{2}.*$", fy.yr) |
# Are the years consecutive?
((as.integer(gsub("^.*([12][0-9]{3}).?[0-9]{2}.*$", "\\1", fy.yr)) + 1) %% 100) == as.numeric(gsub("^.*[12][0-9]{3}.?([0-9]{2}).*$", "\\1", fy.yr))
}
fy.year <- function(yr_ending){
paste0(as.integer(yr_ending) - 1, "-", substr(yr_ending, 3, 4))
}
yr2fy <- function(...) fy.year(...)
fy2yr <- function(fy.yr){
if(is.fy(fy.yr))
stop("Doesn't look like a FY.")
else
1 + as.integer(gsub("^.*([12][0-9]{3}).?[0-9]{2}.*$", "\\1", fy.yr))
}
|
9abea98b54454cd182d725c9d1fe8d5777c03272
|
da8dae69e597072bc616936d1d72a96f65e4efa0
|
/code/oldversions/v3_20180424/model/Z1Results.R
|
d322cef315a1a1285d4c2f821006ab3b160c8473
|
[] |
no_license
|
UCL/provis
|
71e82c383cd9414840e57c2a2867826d6b4ee3e6
|
86a287c7bc705d4aeffb9bbcf96747e97e6d688b
|
refs/heads/master
| 2020-08-01T04:08:20.198284
| 2019-11-08T12:09:43
| 2019-11-08T12:09:43
| 210,310,151
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,369
|
r
|
Z1Results.R
|
# create table of model 1 results
plotflag<-2
nregions<-11
datastub<-"m11"
m2flag<-5
regnames<-data.frame(region_id=seq(1:nregions),
region=c("CaMKOx", "CornwallDevon",
"EastMid", "EastEng",
"London", "NE",
"NW", "SE",
"SW", "WestMid", "YorkshireHumber"))
OutDir<-paste0(CodeDir,"/output/allregions")
if (!file.exists(OutDir)) {
dir.create(OutDir)
}
m1<-vector("list",nregions)
for (r in 1:nregions) {
load(paste0(CodeDir,"/output/region",as.character(r),"/m1spline0.RData"))
m1[[r]]<-m1spline0
names(m1)[r]<-paste0("region",as.character(r))
}
# Plot tables of parameter estimates
stargazer(m1$region1,m1$region2,m1$region3,m1$region4,m1$region5,m1$region6,
type="latex",title="Model 1 results",
out = paste0(OutDir,"/model1A.tex"),
out.header=TRUE,
digits=3,digits.extra=2,column.sep.width="0pt",
font.size = "small",df=FALSE,
keep = c("Intercept","year","propertytype","newbuild","tenure"))
stargazer(m1$region7,m1$region8,m1$region9,m1$region10,m1$region11,
type="latex",title="Model 1 results",
out = paste0(OutDir,"/model1B.tex"),
out.header=TRUE,
digits=3,digits.extra=2,column.sep.width="0pt",
font.size = "normalsize",df=FALSE,
keep = c("Intercept","year","propertytype","newbuild","tenure"))
# Create tables of spline parameters
segments<-matrix(0,nregions,5)
segments[,1]<-c(1:nregions)
polydegree<-segments
for (r in 1:nregions) {
segments[r,2:5] <- m1[[r]]$segments
polydegree[r,2:5] <- m1[[r]]$degree
}
stargazer(segments,type="latex",title="Model 1: number of spline segments",
out = paste0(OutDir,"/model1_segments.tex"),
out.header=TRUE,
column.sep.width="0pt")
stargazer(polydegree,type="latex",title="Model 1: degree of spline polynomials",
out = paste0(OutDir,"/model1_degree.tex"),
out.header=TRUE,
column.sep.width="0pt")
# average price per
nyears<-length(levels(m1spline0$model$year))
year1<-as.numeric()
for (r in 1:nregions) {
tempdata<-regnames[rep(r,nyears),]
tempdata$year<-c(as.numeric(levels(m1[[r]]$model$year)))
tempdata$logprice<-NA
tempdata$price<-NA
for (y in 1:nyears) {
itemp <- m1[[r]]$model$year==levels(m1spline0$model$year)[y]
tempdata$logprice[y] <- mean(m1[[r]]$model$logprice[itemp])
tempdata$price[y] <- mean(exp(m1[[r]]$model$logprice[itemp]))
}
if (r==1) {
regiondata<-tempdata
} else {
regiondata<-rbind(regiondata,tempdata)
}
}
# plot time series of prices
if (plotflag>0) {
tempfile<-paste0(OutDir,"/price_timeseries.eps")
if (plotflag==2) postscript(tempfile)
ggplot(data=regiondata,aes(year,price))+geom_line(aes(col=region)) +
theme(legend.position = "bottom")
if (plotflag==1) dev.copy(postscript,tempfile)
dev.off()
}
# create tables of model 2
m2<-vector("list",nregions)
for (r in c(1:4,6:11)) {
if (m2flag==0) {
load(paste0(CodeDir,"/output/region",as.character(r),"/m2ols0.RData"))
m2[[r]]<-m2ols0
} else if (m2flag==1) {
load(paste0(CodeDir,"/output/region",as.character(r),"/m2ols1.RData"))
m2[[r]]<-m2ols1
} else if (m2flag==5) {
load(paste0(CodeDir,"/output/region",as.character(r),"/m2ols5.RData"))
m2[[r]]<-m2ols5
}
names(m2)[r]<-paste0("region",as.character(r))
}
# Plot tables of parameter estimates
keeplist1<-c("pct","localplan","\\<lu_",
"popdensity","imddec","prob_","noiseclass")
keeplist2<-c("airport","motorway","aroad",
"AONB","natpark","coast",
"station","town")
stargazer(m2$region1,m2$region2,m2$region3,m2$region4,m2$region6,
type="latex",title="Model 2 results: 1",
out = paste0(OutDir,"/model2A.tex"),
out.header=TRUE,
no.space=TRUE,
digits=3,digits.extra=2,column.sep.width="0pt",
font.size = "small",df=FALSE,
keep = keeplist1)
stargazer(m2$region7,m2$region8,m2$region9,m2$region10,m2$region11,
type="latex",title="Model 2 results 2",
out = paste0(OutDir,"/model2B.tex"),
out.header=TRUE,
no.space=TRUE,
digits=3,digits.extra=2,column.sep.width="0pt",
font.size = "small",df=FALSE,
keep = keeplist1)
stargazer(m2$region1,m2$region2,m2$region3,m2$region4,m2$region6,
type="latex",title="Model 2 results 3",
out = paste0(OutDir,"/model2C.tex"),
out.header=TRUE,
no.space=TRUE,
digits=3,digits.extra=2,column.sep.width="0pt",
font.size = "small",df=FALSE,
keep = keeplist2)
stargazer(m2$region7,m2$region8,m2$region9,m2$region10,m2$region11,
type="latex",title="Model 2 results 4",
out = paste0(OutDir,"/model2D.tex"),
out.header=TRUE,
no.space=TRUE,
digits=3,digits.extra=2,column.sep.width="0pt",
font.size = "small",df=FALSE,
keep = keeplist2)
# Create tables of summary stats
m2data3<-vector("list",nregions)
r<-1
dirs<-B2SetPath(RootDir,CodeDir,r,datastub)
# Load data
load(file=paste0(dirs$datadir,"/m2data2.RData"))
vlist1<-c("location_value","pricepaid","propertytype",
"newbuild","tenure","total_floor_area",
"builtuparea_pct","restrictedland_pct",
"localplanrate","popdensityOA","imddecile",
"prob_4band","roadnoise")
vlist2<-colnames(m2data[,grep("\\<lu_",colnames(m2data))])
vlist3<-c("drive_town","trans_town","distance_airport",
"distance_AONB","AONB","distance_motorway",
"distance_aroad","drive_motorway","drive_aroad",
"distance_station","drive_station","trans_station")
vlist<-c(vlist1,vlist2,vlist3)
nvars<-length(vlist)
vtype<-rep(NA,nvars)
nvars1<-rep(1,nvars)
for (v in 1:nvars) {
vtype<-class(m2data[,vlist[v]])
if (is.factor(m2data[,vlist[v]])) {
nvars1[v]<-length(levels(m2data[,vlist[v]]))
}
}
sumstats<-matrix(0,sum(nvars1),nregions)
rnames<-rep(NA,sum(nvars1))
colnames(sumstats)<-as.character(regnames[,2])
for (r in c(1:4,6:11)) {
dirs<-B2SetPath(RootDir,CodeDir,r,datastub)
# Load data
load(file=paste0(dirs$datadir,"/m2data2.RData"))
m2data$pricepaid<-m2data$pricepaid/1000
m2data3[[r]]<-m2data[,vlist]
names(m2data3)[r]<-paste0("region",as.character(r))
v0<-1
for (v in 1:nvars) {
if (is.factor(m2data[,vlist[v]])) {
lev<-levels(m2data[,vlist[v]])
for (i1 in lev) {
sumstats[v0,r]<-mean(m2data[,vlist[v]]==i1,na.rm=TRUE)
rnames[v0]<-paste0(vlist[v],":",as.character(i1))
v0<-v0+1
}
} else {
sumstats[v0,r]<-mean(m2data[,vlist[v]],na.rm=TRUE)
rnames[v0]<-vlist[v]
v0<-v0+1
}
}
}
rownames(sumstats)<-rnames
stargazer(sumstats[,1:6],
type="latex",title="Summary statistics 1",
out = paste0(OutDir,"/summary1A.tex"),
out.header=TRUE,
digits=2,digits.extra=2,column.sep.width="0pt",
font.size = "small")
stargazer(sumstats[,7:11],
type="latex",title="Summary statistics 2",
out = paste0(OutDir,"/summary1B.tex"),
out.header=TRUE,
digits=2,digits.extra=2,column.sep.width="0pt",
font.size = "small")
|
5e8eabf8a964ef4f7ff3c577975540da0a748630
|
ee58ee5a25e89b90d133aa0a29ccf04ed7576828
|
/Code/CellTypeAdjustment/refactor.R
|
1d45a343f1a56fa93df32199939239b1e1d9cad5
|
[] |
no_license
|
JessMurphy/WFMethylation
|
c3535d7b58cea51e6cd0563712b87896685fcabf
|
989a78e88e815879fb1314bf8d87957aac174c4c
|
refs/heads/main
| 2023-04-12T12:32:53.158170
| 2021-05-09T04:08:30
| 2021-05-09T04:08:30
| 343,962,243
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,370
|
r
|
refactor.R
|
############################################################
## Women First Trial: Guatemala Methylation Analysis (Cell Type Adjustment)
## Written by Jessica Murphy
## Last edited on July 10, 2020
## This script performs the ReFACTor method for cell type adjustment. The
## original code was downloaded from https://github.com/cozygene/refactor/releases.
## The covariates step was edited from the original code to accommodate factor
## variables in matrix form.
## Please send any questions to jessica.murphy@ucdenver.edu
############################################################
setwd("/nfs/storage/math/gross-s2/projects/guatemala/Guatemala-omics/Methylation/CellTypeAdjustment/")
# ReFACTor function from https://github.com/cozygene/refactor/releases
refactor <- function(data_file, k, covarfile = NULL, t = 500, numcomp = NULL, stdth = 0.02, out = "refactor") {
ranked_filename = paste(out, ".out.rankedlist.txt", sep="")
components_filename = paste(out, ".out.components.txt", sep="")
print('Starting ReFACTor v1.0...');
print('Reading input files...');
O = as.matrix(read.table(data_file))
sample_id <- O[1, -1] # extract samples ID
O <- O[-1,] # remove sample ID from matrix
cpgnames <- O[, 1] ## set rownames
O <- O[, -1]
O = matrix(as.numeric(O),nrow=nrow(O),ncol=ncol(O))
print(paste("Excluding sites with low variance (std < ", stdth, ")..."), sep="")
sds = apply(t(O), 2, sd)
m_before = length(sds)
include = which(sds >= stdth)
O = O[include,]
cpgnames = cpgnames[include]
print(paste((m_before - length(which(sds >= stdth))), " sites were excluded due to low variance...", sep=""))
if (is.null(numcomp) || is.na(numcomp))
{
numcomp = k
}
# Adjust the data for the covariates
if (!is.null(covarfile))
{
covs = read.table(covarfile) #removed as.matrix
sample_id2 <- covs[, 1]
if (!all(sample_id == sample_id2)){
print("ERROR: The order of the samples in the covariates file must be the same as the order in the data file")
quit()
}
covs <- covs[,-1]
#if (length(covs) > dim(O)[2])
#{
# covs = matrix(as.numeric(covs),nrow=nrow(covs),ncol=ncol(covs))
#}else{
# covs = as.numeric(covs)
#}
covs.matrix = model.matrix(~., data=covs)
covs.matrix = covs.matrix[,-1]
O_adj = O
for (site in 1:nrow(O))
{
model <- lm(O[site,] ~ covs.matrix)
O_adj[site,] = residuals(model)
}
O = O_adj
}
print('Running a standard PCA...')
pcs = prcomp(scale(t(O)));
coeff = pcs$rotation # matrix whose columns contain the eigenvectors (m x n)
score = pcs$x # the rotated data (the centerd and scaled data multiplied by the rotation matrix) (n x n)
print('Compute a low rank approximation of input data and rank sites...')
x = score[,1:k]%*%t(coeff[,1:k]); # (n x k) (k x m) = (n x m) k rank approx to O (step 2)
An = scale(t(O),center=T,scale=F) # (n x m)
Bn = scale(x,center=T,scale=F)
An = t(t(An)*(1/sqrt(apply(An^2,2,sum)))) # divide each column by its length (unit vectors)
Bn = t(t(Bn)*(1/sqrt(apply(Bn^2,2,sum)))) # divide each column by its length (unit vectors)
# Find the distance of each site from its low rank approximation.
distances = apply((An-Bn)^2,2,sum)^0.5 ; # (step 3)
dsort = sort(distances,index.return=T);
ranked_list = dsort$ix
print('Compute ReFACTor components...')
sites = ranked_list[1:t]; # top 500 DMRs
pcs = prcomp(scale(t(O[sites,]))); # (steps 4 and 5)
first_score <- score[,1:k]; # regular PCs
score = pcs$x
print('Saving a ranked list of the data features...');
write(t(cpgnames[ranked_list]),file=ranked_filename,ncol=1)
#write(t(cbind(ranked_list,cpgnames[ranked_list])),file=ranked_filename,ncol=2)
print('Saving the ReFACTor components...');
write(t(score[,1:numcomp]), file=components_filename, ncol=numcomp)
print('ReFACTor is Done');
result <- list(refactor_components=score[,1:numcomp], ranked_list=ranked_list, standard_pca=first_score)
return(result)
}
# run refactor
output = refactor("methylation_noX_noSNPs.txt", k=2, covarfile="refactor_metadata.txt")
|
4bada030b8235773a1ca854cbdbc904830d1682f
|
dc0115022f8c862310d9bb8c59d95373f237cb1a
|
/2. Subsetting data.R
|
1d4dbe894bdb941f16175012e5cbe1f4f1df11f1
|
[] |
no_license
|
Sandhya-RK/Social-Media-Analytics
|
af1ea8773714ca82371a3535ea552f66a045d2a8
|
6341c344b6070807fef8d5485dfd5d093785c1ac
|
refs/heads/master
| 2022-06-01T01:58:38.370420
| 2020-05-04T09:51:27
| 2020-05-04T09:51:27
| 261,135,968
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 818
|
r
|
2. Subsetting data.R
|
library(data.table)
setDT(final.df)
final.df[1:3, c(screen_name, source)]
final.df[1:3, .(screen_name, source)]
#Subseting data tables
final.df[country == "Ireland" | country == "United Kingdom"]
final.df[(country == "Ireland" | country == "United Kingdom") & source == "Instagram"]
final.df[retweet_count>10, (retweet_count+favorite_count)]
final.df[,.(.N), by = .(country)] [order(-N)]
final.df[,.(TotalTweets = .N,
total_reactions=sum(retweet_count, na.rm = TRUE) +
sum(favorite_count, na.rm = TRUE)+
sum(reply_count, na.rm = TRUE)+
sum(quote_count, na.rm = TRUE)),
by = .(country)] [order(-total_reactions)]
final.df[,.(.N), by = .(country, verified)]
library(magrittr)
final.df[, chunk:= created_at %>% cut(breaks = "5 min") %>% as.factor ]
|
e34aaa2e4570a4bf47d94427e376e8667f62251f
|
db2695a8bc14d14a2d7dae9b097d12da41ca1100
|
/Run_analysis.R
|
830f151aad55bd8523df83d8df5193415a1b97ea
|
[] |
no_license
|
Juspi/Getting-and-Cleaning-Data
|
d6d0a5bbc9ec6ada301b5761d02e0ca389e0a630
|
4aa5c937004a6e4bdee35a1a5353af3c353ad914
|
refs/heads/master
| 2021-01-20T07:15:08.897835
| 2014-11-23T18:28:06
| 2014-11-23T18:28:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,836
|
r
|
Run_analysis.R
|
#Reads and binds data
train<-read.csv("UCI Har Dataset/train/X_train.txt", sep="", header=FALSE)
trainlabel<-read.csv("UCI Har Dataset/train/y_train.txt", sep="", header=FALSE)
trainsubject<-read.csv("UCI Har Dataset/train/subject_train.txt", sep="", header=FALSE)
test<-read.csv("UCI Har Dataset/test/X_test.txt", sep="", header=FALSE)
testlabel<-read.csv("UCI Har Dataset/test/y_test.txt", sep="", header=FALSE)
testsubject<-read.csv("UCI Har Dataset/test/subject_test.txt", sep="", header=FALSE)
bindeddata<-rbind(train, test)
bindedlabel<-rbind(trainlabel, testlabel)
bindedsubject<-rbind(trainsubject, testsubject)
#Reads features and extracts only the measurements on the mean and standard
features <- read.csv("UCI Har Dataset/features.txt", sep="", header=FALSE)
meanStdvalues <- grep("mean\\(\\)|std\\(\\)", features[, 2])
bindeddata <- bindeddata[, meanStdvalues]
names(bindeddata) <- gsub("\\(\\)", "", features[meanStdarvot, 2]) # remove "()"
names(bindeddata) <- gsub("mean", "Mean", names(bindeddata)) # capitalize M
names(bindeddata) <- gsub("std", "Std", names(bindeddata)) # capitalize S
names(bindeddata) <- gsub("[-()]", "", names(bindeddata)) # remove "-" in column names
#Uses descriptive activity names to name the activities in the data set
activity <- read.csv("UCI Har Dataset/activity_labels.txt", sep="", header=FALSE)
activity[, 2] <- tolower(gsub("_", "", activity[, 2]))
substr(activity[2, 2], 8, 8) <- toupper(substr(activity[2, 2], 8, 8))
substr(activity[3, 2], 8, 8) <- toupper(substr(activity[3, 2], 8, 8))
activitylabel <- activity[bindedlabel[, 1], 2]
bindedlabel[, 1] <- activitylabel
names(bindedlabel) <- "activity"
names(bindedsubject) <- "subject"
#Binds the data and writes final_data
finaldata <- cbind(bindedsubject, bindedlabel, bindeddata)
write.table(finaldata, "tidy_data.txt")
|
2f6123d97bb8676bbad5fed9757251d9446a9a91
|
8fe397730828ba49422d8fd0ef444de54e99f879
|
/setup.R
|
9ca29d9573c14b167cfcc2c6cdc2c3263392735b
|
[
"MIT"
] |
permissive
|
albertsalgueda/GeoLift_execution
|
92d0632803f2958deb62e524f2bc677beced7580
|
4b11e88e56cebd7b39429959848fddf9edee5ce7
|
refs/heads/main
| 2023-06-19T08:29:53.136208
| 2021-07-22T14:01:17
| 2021-07-22T14:01:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,950
|
r
|
setup.R
|
# File finds what the best treatment group and test duration for a certain country and dataset is.
######################
## Install packages ##
######################
# install.packages("devtools")
# library(devtools)
# install_github("ArturoEsquerra/GeoLift", force=TRUE)
# install.packages("dplyr")
###################
## Load packages ##
###################
suppressPackageStartupMessages(library(GeoLift))
suppressPackageStartupMessages(library(dplyr))
import::here("TEST_DURATIONS", "KPI", "Q_DAYS_LOOKBACK_POWER",
"SIGNIFICANCE_LVL", "FIXED_EFFECTS", "EFFECT_RANGE", .from="cfg.R")
import::here("extract_transform_data", .from="extract_transform.R")
import::here("lift_power_aggregation", "plot_power_curve", .from="aux_functions.R")
#####################
## Import dataset ##
#####################
transformed_data_list <- extract_transform_data(
kpi = KPI,
q_days_lookback_power = Q_DAYS_LOOKBACK_POWER)
pre_test_geolift_data <- transformed_data_list$pre_test_geolift_data
MIN_TRAINING_PERIOD <- transformed_data_list$MIN_TRAINING_PERIOD
total_locations <- length(unique(pre_test_geolift_data$location))
Q_LOCATIONS <- seq(4, floor(total_locations / 2), 1)
#########################################
## Search for treatments w/0 effect ##
## GeoLiftPower.search ##
#########################################
# Calculate share of times in which a certain treatment does not detect an effect
# when the true effect is zero.
# Locations in treatment groups are randomly sampled.
resultsSearch <- suppressWarnings(GeoLiftPower.search(
data = pre_test_geolift_data,
treatment_periods = TEST_DURATIONS,
N = Q_LOCATIONS,
horizon = MIN_TRAINING_PERIOD,
Y_id = "Y",
location_id = "location",
time_id = "time",
top_results = 0,
alpha = SIGNIFICANCE_LVL,
type = "pValue",
fixed_effects = FIXED_EFFECTS,
ProgressBar = TRUE,
run_stochastic_process = TRUE))
head(resultsSearch %>% arrange(mean_scaled_l2_imbalance), 5)
head(resultsSearch)
#########################################
## Find best guess MDE per treatment ##
## GeoLiftPowerFinder ##
#########################################
# Apply range of effects to last date before simulation.
# Determine if estimated effect had a confidence > 90%.
# Return MDE.
resultsFind <- GeoLiftPowerFinder(data = pre_test_geolift_data,
treatment_periods = TEST_DURATIONS,
N = Q_LOCATIONS,
Y_id = "Y",
location_id = "location",
time_id = "time",
effect_size = EFFECT_RANGE[EFFECT_RANGE > 0],
top_results = 0,
alpha = SIGNIFICANCE_LVL,
fixed_effects = FIXED_EFFECTS,
ProgressBar = TRUE,
plot_best = FALSE,
run_stochastic_process = TRUE)
head(resultsFind)
head(resultsFind[resultsFind$ProportionTotal_Y > 0.15, ] %>%
arrange(ScaledL2Imbalance, MinDetectableEffect)
)
##############################################
## Find actual MDE for specific treatment ##
## GeoLiftPower ##
##############################################
# Print results from Search and Find.
head(resultsSearch %>% arrange(mean_scaled_l2_imbalance), 5)
head(resultsSearch)
head(resultsFind[resultsFind$ProportionTotal_Y > 0.15, ] %>%
arrange(ScaledL2Imbalance, MinDetectableEffect)
)
head(resultsFind)
nrank <- 30#1 # Decide ranked row to select.
use_df <- resultsFind#resultsSearch # Decide dataset to use
TREATMENT_LOCATIONS <- stringr::str_split(use_df[use_df$rank == nrank,]$location, ", ")[[1]]
duration_of_test <- use_df[use_df$rank == nrank,]$duration[[1]]
duration_of_test <- ifelse(
is.null(duration_of_test), sample(TEST_DURATIONS, 1), duration_of_test)
MIN_TRAINING_PERIOD <- max(pre_test_geolift_data$time) - duration_of_test - Q_DAYS_LOOKBACK_POWER
results_power <- GeoLiftPower(
pre_test_geolift_data,
locations = TREATMENT_LOCATIONS,
effect_size = EFFECT_RANGE,
treatment_periods = duration_of_test,
horizon = MIN_TRAINING_PERIOD,
Y_id = "Y",
location_id = "location",
time_id = "time",
cpic = 20)
power_agg_list <- lift_power_aggregation(results_power)
print(glue("Positive MDE is {power_agg_list$positive_mde * 100} %"))
print(glue("Negative MDE is {power_agg_list$negative_mde * 100} %"))
###############################
## Exploratory power plot ##
###############################
plot_power_curve(
agg_power_results = power_agg_list$agg_power_results,
power_mde = power_agg_list$positive_mde,
q_days_lookback_power = Q_DAYS_LOOKBACK_POWER,
treatment_duration = duration_of_test,
treatment_locations = TREATMENT_LOCATIONS,
font_size=10)
|
341bf82b1c322a1a2ede4fc7591a655bfd25db0a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/psychomix/examples/mptmix.Rd.R
|
e4f4ddbac937820f53d23511fe66f56bdc328aa9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 643
|
r
|
mptmix.Rd.R
|
library(psychomix)
### Name: mptmix
### Title: Finite Mixtures of Multinomial Processing Tree Models
### Aliases: mptmix FLXMCmpt
### Keywords: mixture model
### ** Examples
## No test:
## Data
data("PairClustering", package = "psychotools")
pc <- reshape(PairClustering, timevar = "trial", idvar = "ID",
direction = "wide")
## Latent-class MPT model (Klauer, 2006)
set.seed(1)
m <- mptmix(as.matrix(pc[-1]) ~ 1, data = pc, k = 1:3,
spec = mptspec("SR", .replicates = 2))
m1 <- getModel(m, which = "BIC")
## Inspect results
summary(m1)
parameters(m1)
plot(m1)
library(lattice)
xyplot(m1)
## End(No test)
|
f7d70815af7b767c3e705bcab0ff91b225ea04e7
|
608ef56860ee4ad73620fd589fa076c854c76184
|
/man/evalR2_XPASS.Rd
|
4da23ac8b0b0358da1996e81dda361dc522c977f
|
[] |
no_license
|
RitaWang0427/XPASS
|
315479a8905fdd22d682380c7cac869a174af8da
|
dc42e5814936055576b60e41f0a2b1fdb88eb7c2
|
refs/heads/master
| 2023-04-29T14:20:32.856905
| 2021-05-27T16:13:26
| 2021-05-27T16:13:26
| 371,375,233
| 0
| 0
| null | 2021-05-27T13:07:24
| 2021-05-27T13:02:23
| null |
UTF-8
|
R
| false
| true
| 771
|
rd
|
evalR2_XPASS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict_XPASS.R
\name{evalR2_XPASS}
\alias{evalR2_XPASS}
\title{Evaluate the approximated predictive R2 using an independent GWAS summary statistics.}
\usage{
evalR2_XPASS(pm, file_z_pred, file_predgeno)
}
\arguments{
\item{pm}{The output `mu` obtained from the `XPASS` function, which stores the posterior means computed by XPASS.}
\item{file_z_pred}{Summary statistics file of the test data. The format is the same as XPASS input.}
\item{file_predgeno}{Prefix of the reference genotypes in plink format.}
}
\value{
Approximated predictive R^2.
}
\description{
This function approximates the predictive R2 of XPASS using an independent GWAS summary statistics.
}
\examples{
See vignette.
}
|
0e58151637527493b6e9df4a086c8289f3303939
|
4c1d09a11ddb28e1c8e2a42465b6353455980a22
|
/data-raw/raw_tables.R
|
0741cf015390f1eefed13df22be3f25fd23afaa9
|
[] |
no_license
|
a2research/rempreq
|
2154560b1563ee94990f5ff70ef32843552a27b0
|
7c6ca499a4c1bdbaef336d2182feaa85ec3a50b3
|
refs/heads/master
| 2021-06-01T09:43:13.776739
| 2016-08-10T15:55:18
| 2016-08-10T15:55:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,197
|
r
|
raw_tables.R
|
# raw_tables.R -------------------------
library(devtools)
TABLE_YEARS <- 1997:2014
TABLE_NROWS <- 206
LATEST_YEAR <- max(TABLE_YEARS)
# Read in raw BLS domestic and total real valued (2009 USD) Employment Requirements Matrix Files
# BLS sector numbers and names
sectors <- read.table('data-raw/sect313.csv', stringsAsFactors = FALSE, sep = ',', header = TRUE)
sectors <- na.omit(sectors)
sector_name <- function(sector_number) sectors[sector_number, 'Industry.Commodity.Description']
sector_number <- function(sector_name) which(sectors$Industry.Commodity.Description == sector_name)
add_to_table_list <- function(table_files, table_years) {
all_years_empreq <- list(length(table_files))
table_years <- as.character(table_years)
i <- 1
for(f in table_files) {
all_years_empreq[[i]] <- read.table(f, stringsAsFactors = FALSE,
sep = ',', header = TRUE, row.names = NULL,
nrows = 206)
i <- i + 1
}
names(all_years_empreq) <- table_years
return(all_years_empreq)
}
# Single data frame for all years' tables, long format, stacked by row, with year field
rseries_all_years_empreq <- function(files) {
rseries <- do.call(rbind, lapply(files, function(x) read.table(x, stringsAsFactors = FALSE, sep = ',',
header = TRUE, row.names = NULL,
nrows = 206)))
rseries$year <- rep(TABLE_YEARS, each = TABLE_NROWS)
return(rseries)
}
# Single data frame for all years tables, wide format, sectors in rows, one column each per year
cseries_all_years_empreq <- function(files) {
cseries <- do.call(cbind, lapply(files, function(x) read.table(x, stringsAsFactors = FALSE, sep = ',', header = TRUE, row.names = NULL, nrows = 206)))
# remove duplicate Industry columns
cseries <- cseries[, -which(names(cseries) == 'Industry')[-1]]
names(cseries)[-1] <- sapply('year', paste, rep(TABLE_YEARS, each = TABLE_NROWS),
sep='_')
return(cseries)
}
use_data(TABLE_YEARS, TABLE_NROWS, LATEST_YEAR, sectors, overwrite = TRUE)
|
7baad2765510f4a1af4413b8d76bc1ffcf055cf7
|
25ebb294d1feae6a4ff6b17f227129170685f905
|
/plot3.R
|
0fd410aeeac4a73ccb7c4b296b0c9ad8552f40d7
|
[] |
no_license
|
Wabram/EDA
|
c1f4da1cc6387b088864b27bef2261e5b48f7e5e
|
3e0f0d89c3ab9e20b7df6dad3d2418bc78beab83
|
refs/heads/master
| 2020-04-04T16:27:49.410937
| 2018-11-04T14:43:54
| 2018-11-04T14:43:54
| 156,079,102
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 740
|
r
|
plot3.R
|
# read hpc file
hpc <- read.csv("H:/Projects/Courses/R/EDA/ExploratoryDataAnalysis/project_1/hpc.txt", sep="")
head(hpc)
# plot data from three columns
with(hpc, {
plot(Sub_metering_1,type="l", xaxt="n", xlab="", ylab="Energy sub metering")
lines(x=Sub_metering_2, col="red")
lines(x=Sub_metering_3, col="blue")
})
# add axis with abbrevations of weekdays
axis(1, at=c(1, nrow(hpc)/2, as.numeric(nrow(hpc)+1)), labels=c("Thu", "Fri", "Sat"))
# add legend
legend("topright", lty=1,lwd = 2, col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## create plot from local graphic device to the file with png format with name plot3
dev.copy(png,file="plot3.png",width=480,height=480)
dev.off()
|
b431d684a7f193c5a202af5513ed1987a8052a0b
|
f409f2b563deb8a9570bd64c10e622ac2bb89c03
|
/R/main.R
|
4866d4807871dfeaecb0187548d78e751f263ed6
|
[
"Apache-2.0"
] |
permissive
|
chrisknoll/CohortManager
|
5be0f0916b753d5e3a1cd14de2211467c7ab2c81
|
0a350df155a873241fedcbb9ef12f8b7e3578ab8
|
refs/heads/master
| 2021-01-19T10:52:12.976747
| 2017-02-28T18:03:00
| 2017-02-28T18:07:02
| 82,221,186
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,364
|
r
|
main.R
|
#' @export
cacheEnv <- new.env()
#' @export
buildCsvDef <- function (name, columns) {
csvDef <- {};
csvDef$name <- name;
csvDef$columns <- columns;
return(csvDef);
}
#' @export
resolveConcept <- function(conceptId) {
jsonlite::fromJSON(paste(cacheEnv$webAPI, "vocabulary", cacheEnv$sourceKey, "concept", conceptId, sep="/"));
}
#' @export
isFileValid <- function(directoryPath, csvDef) {
csvDF <- NULL
try (csvDF <- read.csv(paste(directoryPath,csvDef$name, sep=""), stringsAsFactors = FALSE, nrows=1), silent=TRUE);
if (is.null(csvDF)) {
return(FALSE);
}
if (length(names(csvDef$columns)[! names(csvDef$columns) %in% names(csvDF)]) > 0)
return(FALSE);
return(TRUE);
}
buildDateOffsetStrategy <- function(targetCohortId) {
dateOffsetCsv <- cacheEnv$csvData[["DateOffsetStrategy.csv"]];
dateOffsetRow <- dateOffsetCsv[dateOffsetCsv$cohortId == targetCohortId,];
if (seq_len(nrow(dateOffsetRow)) == 0) {
Warning(paste("No date offset strategy data found for cohort:", targetCohortId));
return(NULL);
}
dateOffset <- list("DateField" = dateOffsetRow$dateField, "Offset" = dateOffsetRow$offset);
return (list("DateOffset" = dateOffset));
}
buildCustomEraStrategy <- function(targetCohortId) {
customEraCsv <- cacheEnv$csvData[["CustomEraStrategy.csv"]];
customEraRow <- customEraCsv[customEraCsv$cohortId == targetCohortId,];
if (seq_len(nrow(customEraRow)) == 0) {
Warning(paste("No date custm era strategy data found for cohort:", targetCohortId));
return(NULL);
}
customEra <- list("DrugCodesetId" = customEraRow$drugCodesetId, "GapDays" = customEraRow$gapDays, "Offset" = customEraRow$offset);
return (list("CustomEra" = customEra));
}
#' @export
refreshCsv <- function() {
if (is.null(cacheEnv$workingDir))
stop("Working directory is not set. call CohortManager::setWorkingDirectory(path) before processCohort().")
directoryPath <- cacheEnv$workingDir;
csvData <- {};
for (def in cacheEnv$requiredFiles) {
# load each CSV file and store in csvData
currentCsv <- read.csv(paste(directoryPath, def$name, sep=""),
header = TRUE,
stringsAsFactors = FALSE,
colClasses = def$columns);
csvData[[def$name]] <- currentCsv;
}
cacheEnv$csvData <- csvData;
message("CSV Contents loaded.")
}
#' @export
setDataDirectory <- function(directoryPath) {
if (!dir.exists(directoryPath))
stop(paste("Directory does not exist:", directoryPath));
if (stringr::str_sub(directoryPath, -1, -1) != "/")
directoryPath <- paste(directoryPath, "/", sep = "");
# check for files to exist
fileExists <- lapply(cacheEnv$requiredFiles, function (x) { file.exists(paste(directoryPath, x$name, sep=""))})
missingFilenames <- lapply(cacheEnv$requiredFiles[which(fileExists == FALSE)], function(x) x$name);
if (length(fileExists[which(fileExists == FALSE)]) > 0)
{
stop(paste("The following files are required:", paste(collapse=",", missingFilenames)));
}
# check for valid files
validFiles <- lapply(cacheEnv$requiredFiles, function(x) { isFileValid(directoryPath, x) })
invalidFiles <- cacheEnv$requiredFiles[which(validFiles == FALSE)];
if (length(invalidFiles) > 0)
{
stop (paste("The following files are missing required columns:",
paste(lapply(invalidFiles, function (x) { paste(x$name, "[", paste(names(x$columns), collapse=","), "]") } ),
collapse=",")
)
);
}
cacheEnv$workingDir = directoryPath;
message(paste("Working directory set to: ", directoryPath));
refreshCsv();
}
#' @export
initCsv <- function (directoryPath, overwrite = FALSE) {
if (!dir.exists(directoryPath))
stop(paste("Directory does not exist:", directoryPath));
if (stringr::str_sub(directoryPath, -1, -1) != "/")
directoryPath <- paste(directoryPath, "/", sep = "");
for (csvDef in cacheEnv$requiredFiles) {
if (!file.exists(paste(directoryPath, csvDef$name, sep="")) || overwrite == TRUE)
{
cat(paste(paste(names(csvDef$columns), collapse=","), "\n", sep=""), file=paste(directoryPath, csvDef$name, sep=""));
}
else
{
warning(paste("File already exists: ", csvDef$name, ". Skipping...", sep="" ));
}
}
message(paste("The required files were created at path:", directoryPath));
}
#' @export
processCohort <- function (targetCohortId) {
if (is.null(cacheEnv$workingDir))
stop("Working directory is not set. call CohortManager::setWorkingDirectory(path) before processCohort().")
cohortDefCsv <- cacheEnv$csvData[["CohortDefinitions.csv"]];
primaryEventsCsv <- cacheEnv$csvData[["CohortPrimaryEvents.csv"]]
censorEventsCsv <- cacheEnv$csvData[["ChortCensoringEvents.csv"]]
correlatedCriteriaCsv <- cacheEnv$csvData[["CohortCorrelatedCriteria.csv"]]
demographicCriteriaCsv <- cacheEnv$csvData[["CohortDemographicCriteria.csv"]]
criteriaCsv <- cacheEnv$csvData[["CohortCriteria.csv"]]
conceptSetsCsv <- cacheEnv$csvData[["ConceptSets.csv"]];
conceptSetItemsCsv <- cacheEnv$csvData[["ConceptSetItems.csv"]]
cohortDefRow <- cohortDefCsv[cohortDefCsv$cohortId == targetCohortId,];
conceptSetIds <- c(); # this will contain the set of concept set IDs we discover as we build each criteria.
# Build PrimaryCriteria
# build Primary Events
PrimaryEvents <- list();
primaryEventRows <- subset(merge(primaryEventsCsv, criteriaCsv, by="criteriaId"), cohortId == targetCohortId);
by(primaryEventRows, seq_len(nrow(primaryEventRows)), function(peRow) {
conceptSetIds <<- c(conceptSetIds, as.numeric(peRow$conceptSetId));
criteriaParams <- {}
if (!is.na(peRow$conceptSetId)) criteriaParams$CodesetId <- as.numeric(peRow$conceptSetId);
if (!is.na(peRow$useFirst) && as.numeric(peRow$useFirst) > 0) criteriaParams$First <- TRUE;
criteria <- {};
criteria[peRow$criteriaType] <- list(criteriaParams);
# add criteria to PrimaryEvents list
PrimaryEvents <<- c(PrimaryEvents, list(criteria));
});
# Build Primary Events Observation Window
ObservationWindow <- {};
ObservationWindow$PriorDays <- as.numeric(cohortDefRow$priorObservation);
ObservationWindow$PostDays <- as.numeric(cohortDefRow$postObservation);
# add elements to Primarycriteria
PrimaryCriteria = list ("CriteriaList" = PrimaryEvents, "ObservationWindow" = ObservationWindow, "PrimaryCriteriaLimit" = list("Type" = cohortDefRow$peLimitType));
# build AdditionalCriteria
# build Qualfiying Criteria events
QualifyEvents <- list();
qualifyEventRows <- subset(merge(correlatedCriteriaCsv, criteriaCsv, by="criteriaId"), cohortId == targetCohortId);
by(qualifyEventRows, seq_len(nrow(qualifyEventRows)), function(qeRow) {
CorrelatedCriteria <- {};
conceptSetIds <<- c(conceptSetIds, as.numeric(qeRow$conceptSetId));
# create criteria object
criteria <- {};
criteria[qeRow$criteriaType] <- list(list(CodesetId=as.numeric(qeRow$conceptSetId)));
CorrelatedCriteria$Criteria <- criteria;
# create window
startElement <- {};
if (is.na(qeRow$windowStart)) {
startElement$Coeff <- as.numeric(-1);
} else {
startElement$Coeff <- if(qeRow$windowStart != 0) as.numeric(sign(qeRow$windowStart)) else as.numeric(-1);
startElement$Days <- as.numeric(abs(qeRow$windowStart));
}
endElement <- {};
if (is.na(qeRow$windowEnd)) {
endElement$Coeff <- as.numeric(1);
} else {
endElement$Coeff <- if(qeRow$windowEnd != 0) as.numeric(sign(qeRow$windowEnd)) else as.numeric(1);
endElement$Days <- as.numeric(abs(qeRow$windowEnd));
}
CorrelatedCriteria$StartWindow <- list(Start=startElement, End=endElement);
# create occurrence
occurrenceElement <- {};
occurrenceElement$Type <- switch(qeRow$type, "EXACTLY"=as.numeric(0), "AT MOST"=as.numeric(1), "AT LEAST"=as.numeric(2));
occurrenceElement$Count <- as.numeric(qeRow$occurrences)
CorrelatedCriteria$Occurrence <- occurrenceElement;
QualifyEvents <<- c(QualifyEvents, list(CorrelatedCriteria));
});
# build Demographic Criteria
DemographicCriteria <- c();
demographicRows <- subset(demographicCriteriaCsv, cohortId == targetCohortId);
by(demographicRows, seq_len(nrow(demographicRows)), function(dRow) {
criteria <- {};
ageElement <- {};
if (!is.na(dRow$ageMin) && !is.na(dRow$ageMax)) {
ageElement$op <- "bt";
ageElement$Value <- as.numeric(dRow$ageMin);
ageElement$Extent <- as.numeric(dRow$ageMax);
} else if (!is.na(dRow$ageMin)) {
ageElement$op <- "gte";
ageElement$Value <- as.numeric(dRow$ageMin);
} else if (!is.na(dRow$ageMax)) {
ageElement$op <- "lte";
ageElement$Value <- as.numeric(dRow$ageMax);
}
genderElement <- c();
if (!is.na(dRow$gender)) {
genderElement <- lapply(jsonlite::fromJSON(dRow$gender), function (conceptId) resolveConcept(conceptId));
}
startDateElement <- {};
if (nchar(dRow$startDate) > 0 && nchar(dRow$endDate) > 0) {
startDateElement$op <- "bt";
startDateElement$Value <- dRow$startDate;
startDateElement$Extent <- dRow$endDate;
} else if (nchar(dRow$startDate) > 0) {
startDateElement$op <- "gte";
startDateElement$Value <- dRow$startDate;
} else if (nchar(dRow$endDate) > 0) {
startDateElement$op <- "lte";
startDateElement$Value <- dRow$endDate;
}
if (length(ageElement) > 0) criteria$Age <- ageElement;
if (length(startDateElement) > 0) criteria$OccurrenceStartDate <- startDateElement;
if (length(genderElement) > 0) criteria$Gender <- genderElement;
DemographicCriteria <<- c(DemographicCriteria, list(criteria));
});
# add elements to AdditionalCriteria
AdditionalCriteria <- list("Type" = "ALL");
AdditionalCriteria$CriteriaList <- if (length(QualifyEvents) > 0) QualifyEvents else list();
AdditionalCriteria$DemographicCriteriaList <- if (length(DemographicCriteria) > 0) DemographicCriteria else list();
# build CensoringCriteria
CensoringEvents <- list();
censorEventRows <- subset(merge(censorEventsCsv, criteriaCsv, by="criteriaId"), cohortId == targetCohortId);
by(censorEventRows, seq_len(nrow(censorEventRows)), function(ceRow) {
conceptSetIds <<- c(conceptSetIds, as.numeric(ceRow$conceptSetId));
criteriaParams <- {}
if (!is.na(ceRow$conceptSetId)) criteriaParams$CodesetId <- as.numeric(ceRow$conceptSetId);
if (!is.na(ceRow$useFirst) && as.numeric(ceRow$useFirst) > 0) criteriaParams$First <- TRUE;
criteria <- {};
criteria[ceRow$criteriaType] <- list(criteriaParams);
# add criteria to PrimaryEvents list
CensoringEvents <<- c(CensoringEvents, list(criteria));
});
EndStrategy <- NULL;
# build EndStrategy
if (nchar(cohortDefRow$endStrategy) > 0) {
buildStrategy <- switch(cohortDefRow$endStrategy, "DateOffset"=buildDateOffsetStrategy, "CustomEra"=buildCustomEraStrategy);
if (!is.null(buildStrategy)) {
EndStrategy <- buildStrategy(cohortDefRow$cohortId);
}
else {
warning(paste("Invalid EndStrategy specified for cohort:", cohortDefRow$cohortId));
}
}
# build concept set list from conceptSetIDs
conceptSetIds <- unique(conceptSetIds);
conceptSetRows <- subset(conceptSetsCsv, conceptSetId %in% conceptSetIds);
ConceptSets <- {};
by(conceptSetRows, seq_len(nrow(conceptSetRows)), function(csRow) {
conceptSet <- list("id"=as.numeric(csRow$conceptSetId), "name"=csRow$name);
csvItems <- subset(conceptSetItemsCsv, conceptSetId == csRow$conceptSetId);
conceptSetItems <- {};
by(csvItems, seq_len(nrow(csvItems)), function(csiRow) {
item <- {};
item$concept <- resolveConcept(csiRow$conceptId);
if (csiRow$isExcluded == 1) item$isExcluded <- TRUE;
if (csiRow$includeDescendants == 1) item$includeDescendants <- TRUE;
if (csiRow$includeMapped == 1) item$includeMapped <- TRUE;
conceptSetItems <<- c(conceptSetItems, list(item));
});
conceptSet$expression <- list("items" = conceptSetItems);
ConceptSets <<- c(ConceptSets, list(conceptSet));
});
# create cohortExpression
cohortExpression <- list("ConceptSets"= if (length(ConceptSets) > 0) ConceptSets else list(),
"PrimaryCriteria"= if (length(PrimaryCriteria) > 0) PrimaryCriteria else list(),
"QualifiedLimit" = list("Type"=cohortDefRow$qeLimitType),
"ExpressionLimit" = list("Type"=cohortDefRow$cohortLimitType),
"InclusionRules" = list(),
"CensoringCriteria" = if (length(CensoringEvents) > 0) CensoringEvents else list()
);
if (!is.null(EndStrategy)) cohortExpression$EndStrategy <- EndStrategy;
# do not set AdditionalCriteria if there are no qualifiy events or demographic criteria
if (length(QualifyEvents) > 0 || length(DemographicCriteria) > 0) {
cohrotExpression$AdditionalCriteria <- AdditionalCriteria
}
# Create cohort definition object
expressionJSON <- jsonlite::toJSON(cohortExpression, auto_unbox = TRUE);
cohortDefinition <- list("name" = cohortDefRow$cohortName,
"description" = cohortDefRow$cohortDescription,
"expressionType"="SIMPLE_EXPRESSION",
"expression" = expressionJSON);
# set up a result object to return the reslut of this cohort sync
result <- {};
if (is.na(cohortDefRow$cohortDefinitionId)) {
# create the cohort definition on the server and set the result as 'new'
jsonBody <- jsonlite::toJSON(cohortDefinition, auto_unbox=TRUE);
httpheader <- c(Accept = "application/json; charset=UTF-8", "Content-Type" = "application/json");
url <- paste(cacheEnv$webAPI, "cohortdefinition", sep = "/");
createResponse <- jsonlite::fromJSON(RCurl::postForm(url, .opts = list(httpheader = httpheader, postfields = jsonBody)));
# update CohortDefinitions csvData
cacheEnv$csvData$CohortDefinitions.csv[which(cacheEnv$csvData$CohortDefinitions.csv$cohortId == targetCohortId),"cohortDefinitionId"] <- createResponse$id;
# assign result status as 'add'
result$cohortId <- cohortDefRow$cohortId;
result$cohortDefinitionId <- createResponse$id;
result$status <- "add";
write.csv(cacheEnv$csvData$CohortDefinitions.csv,
file=paste(cacheEnv$workingDir, "CohortDefinitions.csv", sep=""),
na="",
row.names = FALSE);
} else {
# assign cohort definiton id to constructed cohort definition
cohortDefinition$id <- cohortDefRow$cohortDefinitionId;
# read existing cohort definition.
url <- paste(cacheEnv$webAPI, "cohortdefinition", as.numeric(cohortDefRow$cohortDefinitionId), sep = "/");
existingCohortDef <- jsonlite::fromJSON(url);
if (existingCohortDef$expression != cohortDefinition$expression) {
# modify if expression doesn't match.
jsonBody <- jsonlite::toJSON(cohortDefinition, auto_unbox=TRUE);
httpheader <- c(Accept = "application/json; charset=UTF-8", "Content-Type" = "application/json");
updateResponse <- RCurl::httpPUT(url,jsonBody, "httpheader" = httpheader);
# assign result status as 'modify'
result$cohortId <- cohortDefRow$cohortId;
result$cohortDefinitionId <- cohortDefRow$cohortDefinitionIdd;
result$status <- "update";
} else if (existingCohortDef$name != cohortDefinition$name) {
# rename if definition name has changed.
jsonBody <- jsonlite::toJSON(cohortDefinition, auto_unbox=TRUE);
httpheader <- c(Accept = "application/json; charset=UTF-8", "Content-Type" = "application/json");
updateResponse <- RCurl::httpPUT(url,jsonBody, "httpheader" = httpheader);
# assign result status as 'rename'
result$cohortId <- cohortDefRow$cohortId;
result$cohortDefinitionId <- cohortDefRow$cohortDefinitionIdd;
result$status <- "rename";
} else {
# no action taken
result$cohortId <- cohortDefRow$cohortId;
result$cohortDefinitionId <- cohortDefRow$cohortDefinitionId;
result$status <- "none";
}
}
return(result);
}
#' @export
sync <- function() {
if (is.null(cacheEnv$workingDir))
stop("Working directory is not set. call CohortManager::setWorkingDirectory(path) before sync().")
# get the list of cohorts to process (filter Cohort Defs DF by isManual = FALSE)
cohortDefCsv <- cacheEnv$csvData[["CohortDefinitions.csv"]];
cohortIds <- cohortDefCsv[cohortDefCsv$isManual == "0","cohortId"]
results <- lapply(cohortIds, function(x) processCohort(x));
message(paste("Sync complete.",
length(Filter(function(item) { item$status == "add"}, results)), "added,",
length(Filter(function(item) { item$status == "update"}, results)), "updated,",
length(Filter(function(item) { item$status == "rename"}, results)), "renamed,",
length(Filter(function(item) { item$status == "none"}, results)), "unchanged.", sep=" "));
}
#' @export
setOptions <- function(webAPI, sourceKey) {
if (!missing(webAPI)) cacheEnv$webAPI <- webAPI;
if (!missing(sourceKey)) cacheEnv$sourceKey <- sourceKey;
}
.onLoad <- function(libname, pkgname) {
cacheEnv$requiredFiles <- list(buildCsvDef("CohortDefinitions.csv", c("cohortId"="integer", "cohortDefinitionId"="integer", "conceptId"="integer", "cohortName"="character", "isManual"="integer", "priorObservation"="integer", "postObservation"="integer", "peLimitType"="character", "qeLimitType"="character", "cohortLimitType"="character", "endStrategy"="character", "cohortDescription"="character")),
buildCsvDef("CohortCriteria.csv", c("criteriaId"="integer", "name"="character", "criteriaType"="character", "conceptSetId"="integer", "useFirst" = "integer")),
buildCsvDef("CohortPrimaryEvents.csv", c("cohortId"="integer", "criteriaId"="integer")),
buildCsvDef("CohortCorrelatedCriteria.csv", c("cohortId"="integer", "criteriaId"="integer", "type"="character", "occurrences"="integer", "windowStart"="integer", "windowEnd"="integer")),
buildCsvDef("CohortDemographicCriteria.csv", c("cohortId"="integer", "name"="character", "gender"="character", "ageMin"="integer", "ageMax"="integer", "startDate"="character", "endDate"="character")),
buildCsvDef("DateOffsetStrategy.csv", c("cohortId"="integer", "name"="character", "dateField"="character", "offset"="integer")),
buildCsvDef("CustomEraStrategy.csv", c("cohortId"="integer", "name"="character", "drugCodesetId"="integer", "gapDays"="integer", "offset"="integer")),
buildCsvDef("ChortCensoringEvents.csv", c("cohortId"="integer", "name"="character", "criteriaId"="integer")),
buildCsvDef("ConceptSets.csv", c("conceptSetId"="integer", "name"="character", "description"="character")),
buildCsvDef("ConceptSetItems.csv", c("conceptSetId"="integer", "conceptId"="integer", "isExcluded"="integer", "includeDescendants"="integer", "includeMapped"="integer"))
);
cacheEnv$workingDir <- NULL;
cacheEnv$webAPI <- "http://localhost:8080/WebAPI";
cacheEnv$sourceKey <- "VOCABULARY_20161218";
}
|
a12884e6169f868f0ad7a4bab1f31904466c9c67
|
2441f634ad77edc68aef7ba1370cf9c44cd3232e
|
/man/stripe_list_subscriptions.Rd
|
ede6ae06aeb68fcd5767a89e175048201cb1a729
|
[] |
no_license
|
muschellij2/RStripe
|
bee390283e5ed2fa718daaccf3f7d70c741ab4fc
|
2456061256a927a4711b7267942a8af01393040f
|
refs/heads/master
| 2021-01-21T21:07:02.455296
| 2017-05-24T17:12:22
| 2017-05-24T17:12:22
| 92,309,551
| 0
| 0
| null | 2017-05-24T15:45:46
| 2017-05-24T15:45:46
| null |
UTF-8
|
R
| false
| true
| 848
|
rd
|
stripe_list_subscriptions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subscriptions.R
\name{stripe_list_subscriptions}
\alias{stripe_list_subscriptions}
\title{List Subscriptions}
\usage{
stripe_list_subscriptions(customer_id, args = NULL, api_key = NULL)
}
\arguments{
\item{customer_id}{The id of the customer with the subscriptions.}
\item{args}{an optional list that can contain
\describe{
\item{ending_before}{\emph{optional:} An object id which will show
objects before}
\item{limit}{\emph{optional:} A number 1 to 100 to limit the items.
Default is 10}
\item{starting_after}{\emph{optional:} An object id which will show
objects starting here}
}}
\item{api_key}{Your Stripe API Key}
}
\value{
A data frame with subscriptions information
}
\description{
List all the subscriptions currently acted on a customer.
}
|
e6451e37d8b58b7ebc0b25ec6f45c8b2be12d51f
|
5bd80de7ff59e8d1ea755cfd10cc61a264593c18
|
/src/speedtest.R
|
171bcffab0c630843e801dc9eccb8de4e5512c3c
|
[] |
no_license
|
andymd26/coding_questions
|
c1a4a8d17ecc5db475c024813957b310b2439331
|
82d987b461e0dad7642ac15766513e1fba53028c
|
refs/heads/master
| 2021-01-18T13:02:26.517185
| 2017-03-27T19:02:52
| 2017-03-27T19:02:52
| 84,334,443
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,096
|
r
|
speedtest.R
|
install.packages("dplyr")
rm(list=ls())
options(digits=15)
options(scipen=999)
require(dplyr)
path.processed = "C:/Users/ablohm/Documents/earth_network/data/processed/"
# Change to the folder with the list_logrnml.rds dafile
setwd(path.processed)
dist.price.v2 = readRDS(file = "logrnml_price.rds")
dist.load.v2 = readRDS(file = "logrnml_load.rds")
dist.price = readRDS(file = "list_logrnml_price.rds")
dist.load = readRDS(file = "list_logrnml_load.rds")
dist.price.v2 = readRDS(file="logrnml_price.rds")
dist.load.v2 = readRDS(file="logrnml_load.rds")
# Model 1 uses the first set of inputs; model 2 the second set.
time.taken = data.frame(n = c(1000, 5000000, 10000000),
model.1 = c(0,0,0),
model.2 = c(0,0,0))
n= 1000000
start.time = Sys.time()
# Sample size
data.mc = data.frame(
month_group = round(runif(n, min= 0.5, max= 2.5)),
hour = round(runif(n, min= 14.5, max= 22.5)),
duration = round(runif(n, min= -0.5, max= 4.5)))
data.mc = data.mc %>%
left_join(dist.price, by = "month_group") %>%
# Joins the list structure we created based on the month_group variable
mutate(hour.temp = hour + 1) %>%
# To match up the indices
rowwise() %>%
mutate(price.t0 = rlnorm(1, meanlog= runif(1, min=price.list[[hour.temp]][2], max=price.list[[hour.temp]][3]),
sdlog = runif(1, min=price.list[[hour.temp]][4], max=price.list[[hour.temp]][5]))) %>%
mutate(hour.temp = ifelse(hour.temp < 24, hour.temp + 1, hour.temp + 1 - 23)) %>%
mutate(price.t1 = rlnorm(1, meanlog= runif(1, min=price.list[[hour.temp]][2], max=price.list[[hour.temp]][3]),
sdlog = runif(1, min=price.list[[hour.temp]][4], max=price.list[[hour.temp]][5]))) %>%
mutate(hour.temp = ifelse(hour.temp < 24, hour.temp + 1, hour.temp + 1 - 23)) %>%
mutate(price.t2 = rlnorm(1, meanlog= runif(1, min=price.list[[hour.temp]][2], max=price.list[[hour.temp]][3]),
sdlog = runif(1, min=price.list[[hour.temp]][4], max=price.list[[hour.temp]][5]))) %>%
mutate(hour.temp = ifelse(hour.temp < 24, hour.temp + 1, hour.temp + 1 - 23)) %>%
mutate(price.t3 = rlnorm(1, meanlog= runif(1, min=price.list[[hour.temp]][2], max=price.list[[hour.temp]][3]),
sdlog = runif(1, min=price.list[[hour.temp]][4], max=price.list[[hour.temp]][5]))) %>%
mutate(hour.temp = ifelse(hour.temp < 24, hour.temp + 1, hour.temp + 1 - 23)) %>%
mutate(price.t4 = rlnorm(1, meanlog= runif(1, min=price.list[[hour.temp]][2], max=price.list[[hour.temp]][3]),
sdlog = runif(1, min=price.list[[hour.temp]][4], max=price.list[[hour.temp]][5]))) %>%
mutate(price.t.list = list(c(price.t0, price.t1, price.t2, price.t3, price.t4))) %>%
mutate(price.avg = mean(price.t.list[1:duration])) %>%
mutate(price.end = price.t.list[duration + 1]) %>%
select(., month_group, hour.temp, hour, duration, price.list, price.t.list, price.avg, price.t0, price.end) %>%
# Determines the hour that the load recovered reenters load demand (i.e., the hour following the DR event).
# Given that load and price are for hour ending a one hour event scheduled for 14:00 would have the load recovery
left_join(dist.load, by = "month_group") %>%
# Joins the list structure we created based on the month_group variable
mutate(hour.temp = hour + 1) %>%
# To match up the indices
mutate(load.t0 = rlnorm(1, meanlog= runif(1, min=load.list[[hour.temp]][2], max=load.list[[hour.temp]][3]),
sdlog = runif(1, min=load.list[[hour.temp]][4], max=load.list[[hour.temp]][5]))) %>%
mutate(hour.temp = ifelse(hour.temp < 24, hour.temp + 1, hour.temp + 1 - 23)) %>%
mutate(load.t1 = rlnorm(1, meanlog= runif(1, min=load.list[[hour.temp]][2], max=load.list[[hour.temp]][3]),
sdlog = runif(1, min=load.list[[hour.temp]][4], max=load.list[[hour.temp]][5]))) %>%
mutate(hour.temp = ifelse(hour.temp < 24, hour.temp + 1, hour.temp + 1 - 23)) %>%
mutate(load.t2 = rlnorm(1, meanlog= runif(1, min=load.list[[hour.temp]][2], max=load.list[[hour.temp]][3]),
sdlog = runif(1, min=load.list[[hour.temp]][4], max=load.list[[hour.temp]][5]))) %>%
mutate(hour.temp = ifelse(hour.temp < 24, hour.temp + 1, hour.temp + 1 - 23)) %>%
mutate(load.t3 = rlnorm(1, meanlog= runif(1, min=load.list[[hour.temp]][2], max=load.list[[hour.temp]][3]),
sdlog = runif(1, min=load.list[[hour.temp]][4], max=load.list[[hour.temp]][5]))) %>%
mutate(hour.temp = ifelse(hour.temp < 24, hour.temp + 1, hour.temp + 1 - 23)) %>%
mutate(load.t4 = rlnorm(1, meanlog= runif(1, min=load.list[[hour.temp]][2], max=load.list[[hour.temp]][3]),
sdlog = runif(1, min=load.list[[hour.temp]][4], max=load.list[[hour.temp]][5]))) %>%
mutate(load.t.list = list(c(load.t0, load.t1, load.t2, load.t3, load.t4))) %>%
mutate(load.avg = mean(load.t.list[1:duration])) %>%
mutate(load.end = load.t.list[duration + 1]) %>%
select(., month_group, hour, hour.temp, duration, price.list, price.t.list, price.avg, price.t0, price.end,
load.list, load.t.list, load.avg, load.t0, load.end)
# This first approach uses a structured list to store information on the parameters of the load and price distribution parameters for additional later sampling operations.
end.time = Sys.time()
time.taken[2, 2] = difftime(end.time, start.time, units= "secs")
# Second model
dist.price.v2 = dist.price.v2 %>%
select(., month_group, hour, mu.lb.price, mu.ub.price, sigma.lb.price, sigma.ub.price)
dist.load.v2 = dist.load.v2 %>%
select(., month_group, hour, mu.lb.load, mu.ub.load, sigma.lb.load, sigma.ub.load)
start.time.m2 = Sys.time()
data.mc = data.frame(
month_group = round(runif(n, min= 0.5, max= 2.5)),
hour = round(runif(n, min= 14.5, max= 22.5)),
duration = round(runif(n, min= -0.5, max= 4.5)))
data.mc = data.mc %>%
mutate(hour.original = hour) %>%
# Price portion
left_join(dist.price.v2, by = c("month_group","hour")) %>%
mutate(price.t0 = rlnorm(n, meanlog = runif(n, min = mu.lb.price, max = mu.ub.price),
sdlog = runif(n, min = sigma.lb.price, max = sigma.ub.price))) %>%
select(., -(mu.lb.price:sigma.ub.price)) %>%
mutate(hour = ifelse(hour < 23, hour + 1, hour + 1 - 24)) %>%
left_join(dist.price.v2, by = c("month_group","hour")) %>%
mutate(price.t1 = rlnorm(n, meanlog = runif(n, min = mu.lb.price, max = mu.ub.price),
sdlog = runif(n, min = sigma.lb.price, max = sigma.ub.price))) %>%
select(., -(mu.lb.price:sigma.ub.price)) %>%
mutate(hour = ifelse(hour < 23, hour + 1, hour + 1 - 24)) %>%
left_join(dist.price.v2, by = c("month_group","hour")) %>%
mutate(price.t2 = rlnorm(n, meanlog = runif(n, min = mu.lb.price, max = mu.ub.price),
sdlog = runif(n, min = sigma.lb.price, max = sigma.ub.price))) %>%
select(., -(mu.lb.price:sigma.ub.price)) %>%
mutate(hour = ifelse(hour < 23, hour + 1, hour + 1 - 24)) %>%
left_join(dist.price.v2, by = c("month_group","hour")) %>%
mutate(price.t3 = rlnorm(n, meanlog = runif(n, min = mu.lb.price, max = mu.ub.price),
sdlog = runif(n, min = sigma.lb.price, max = sigma.ub.price))) %>%
select(., -(mu.lb.price:sigma.ub.price)) %>%
mutate(hour = ifelse(hour < 23, hour + 1, hour + 1 - 24)) %>%
left_join(dist.price.v2, by = c("month_group","hour")) %>%
mutate(price.t4 = rlnorm(n, meanlog = runif(n, min = mu.lb.price, max = mu.ub.price),
sdlog = runif(n, min = sigma.lb.price, max = sigma.ub.price))) %>%
select(., -(mu.lb.price:sigma.ub.price)) %>%
rowwise() %>%
mutate(price.list = list(c(price.t0, price.t1, price.t2, price.t3, price.t4))) %>%
mutate(price.avg = mean(price.list[1:duration])) %>%
mutate(price.end = price.list[duration + 1]) %>%
ungroup() %>%
# Cancels out the rowwise()
select(., -(price.t1:price.t4)) %>%
# Load portion
mutate(hour = hour.original) %>%
left_join(dist.load.v2, by = c("month_group","hour")) %>%
mutate(load.t0 = rlnorm(n, meanlog = runif(n, min = mu.lb.load, max = mu.ub.load),
sdlog = runif(n, min = sigma.lb.load, max = sigma.ub.load))) %>%
select(., -(mu.lb.load:sigma.ub.load)) %>%
mutate(hour = ifelse(hour < 23, hour + 1, hour + 1 - 24)) %>%
left_join(dist.load.v2, by = c("month_group","hour")) %>%
mutate(load.t1 = rlnorm(n, meanlog = runif(n, min = mu.lb.load, max = mu.ub.load),
sdlog = runif(n, min = sigma.lb.load, max = sigma.ub.load))) %>%
select(., -(mu.lb.load:sigma.ub.load)) %>%
mutate(hour = ifelse(hour < 23, hour + 1, hour + 1 - 24)) %>%
left_join(dist.load.v2, by = c("month_group","hour")) %>%
mutate(load.t2 = rlnorm(n, meanlog = runif(n, min = mu.lb.load, max = mu.ub.load),
sdlog = runif(n, min = sigma.lb.load, max = sigma.ub.load))) %>%
select(., -(mu.lb.load:sigma.ub.load)) %>%
mutate(hour = ifelse(hour < 23, hour + 1, hour + 1 - 24)) %>%
left_join(dist.load.v2, by = c("month_group","hour")) %>%
mutate(load.t3 = rlnorm(n, meanlog = runif(n, min = mu.lb.load, max = mu.ub.load),
sdlog = runif(n, min = sigma.lb.load, max = sigma.ub.load))) %>%
select(., -(mu.lb.load:sigma.ub.load)) %>%
mutate(hour = ifelse(hour < 23, hour + 1, hour + 1 - 24)) %>%
left_join(dist.load.v2, by = c("month_group","hour")) %>%
mutate(load.t4 = rlnorm(n, meanlog = runif(n, min = mu.lb.load, max = mu.ub.load),
sdlog = runif(n, min = sigma.lb.load, max = sigma.ub.load))) %>%
select(., -(mu.lb.load:sigma.ub.load)) %>%
rowwise() %>%
mutate(load.list = list(c(load.t0, load.t1, load.t2, load.t3, load.t4))) %>%
mutate(load.avg = mean(load.list[1:duration])) %>%
mutate(load.end = load.list[duration + 1]) %>%
ungroup() %>%
select(., -(load.t1:load.t4))
end.time.m2 = Sys.time()
time.taken[1, 3] = difftime(end.time.m2, start.time.m2, units = "secs")
View(time.taken)
|
c8e5e563334ddbf6a64001549a9b3e560dde8d95
|
8b04b06555f2b4f0f101fe7a81370490ea4bc716
|
/src/functions/3_signal_drift_correction.R
|
150fb1b781aa6c7838bb9545ab38a60c1537c786
|
[
"MIT"
] |
permissive
|
morchalabi/COMPARE-suite
|
d16f3aa6ccb9d4fb3376349ac274c35126d465e2
|
df2feaf6aa982e0f6f077eb85f26acce6bb61063
|
refs/heads/master
| 2023-08-30T12:00:12.962203
| 2021-02-25T13:28:32
| 2021-02-25T13:28:32
| 256,939,470
| 1
| 0
|
MIT
| 2021-01-27T19:07:32
| 2020-04-19T07:25:21
|
R
|
UTF-8
|
R
| false
| false
| 26,085
|
r
|
3_signal_drift_correction.R
|
# This module removes intra- and inter-plate signal drift bias. Running it with no request for correction reveals possible sources of bias.
# Possible sources of bias are like edge effect, signal drift, cell viability, auto-fluorescence and carry-over effect.
# To remove signal drift bias, it needs to know the direction along which the bias has occurred like the order by which wells have been read.
# Each well is represented by the median of each marker.
# Input arguments are:
# chnls_ (quoted string): channel (not marker) names like 'chnl1,chn2,chnl3'
# CORRECT (boolean): should the bias be corrected? like T/TRUE/true or F/FALSE/false
# drctn_ (string): direction of bias like column or row
# FITPLOT (boolean): should the regression plots be output? like T/TRUE/true or F/FALSE/false
# HEATPLOT (boolean): should the plate heatmaps be output? like T/TRUE/true or F/FALSE/false
# inURL (string): address to input data files like ../data
# outURL (string): address to output result like ../out
# Algorithm designed and implemented by Mori C.H., mor.chalabi@gmail.com
require(flowCore)
require(pheatmap)
require(gridExtra)
require(ggplot2)
step3_signal_drift_correction = function(chnls_, CORRECT, drctn_, FITPLOT, HEATPLOT, inURL, outURL)
{
# STEP 1: Computing MFIs ####
# reading in annotation file
annot_ = read.table(file = paste0(inURL,'/Annotations.txt'), header = T, sep = '\t',as.is = T, check.names = F, stringsAsFactors = F)
plates_ = sort(unique(annot_$plate)) # how many plates are there in this assay?
# reading fcs files of each plate
MFI_mats = list() # a list containing MFI matrices of each plate per channel
for(plate_ in plates_)
{
message('Reading in fcs files of plate #',plate_,':')
# reading in annotation of files of current plate
annot_tmp = annot_[annot_$plate %in% plate_,] # part of annotation table containing info for current plate
rows_ = sort(unique(annot_tmp$row)) # rows in current plate
cols_ = sort(unique(annot_tmp$column))
m_ = length(rows_) # number of rows in current plate
n_ = length(cols_) # number of columns in current plate
# creating empty MFI matrices for current plate for each channel
MFI_mat = list() # a list containing MFI matrix of current plate per channel
for(chnl_ in chnls_)
{
mat_ = matrix(data = 0, nrow = length(rows_), ncol = length(cols_))
rownames(mat_) = rows_
colnames(mat_) = cols_
MFI_mat[[chnl_]] = mat_
}
# reading in fcs files (wells) of current plate and computing their MFIs, one per channel
fcs_dt_ls = list() # a list containing all fcs files (wells) of current plate
fcs_flNms = character() # all fcs file names
for(rw_ in 1:nrow(annot_tmp))
{
fcs_dt = read.FCS(filename = paste0(inURL,'/',annot_tmp$file[rw_],'.fcs'), transformation = F, truncate_max_range = F)
# computing offset from the beginning of the MFI matrix
i_ = which(rows_ == annot_tmp$row[rw_]) # row index
j_ = which(cols_ == annot_tmp$column[rw_]) # column index
offset_ = if(drctn_ == 'column'){ m_*(j_-1)+i_ }else{ n_*(i_-1)+j_ } # column-wise (left to right) or row-wise (left to right) offset
fcs_dt_ls[[offset_]] = fcs_dt # fcs data at location (i,j), offset, of current plate
fcs_flNms[offset_] = annot_tmp$file[rw_] # fcs file name at location (i,j), offset, of current plate
# computing one MFI per channel for current fcs file at position (i,j)
# outlier event values (like cell marker expressions) are removed first; Because here we are not assigning negatives to 0, it is fine to use IQR for removing outliers, if any
for(chnl_ in chnls_)
{
dt_tmp = fcs_dt_ls[[offset_]]@exprs[,chnl_, drop = T] # event values of the fcs file in current offset
dt_tmp = dt_tmp[which(0 <= dt_tmp)] # N.B.: which() ignores NA and NaNs. Non-positives are always non-positives even in the presence of drift
IQR_ = IQR(dt_tmp) # inter-quantile range
quartiles_ = quantile(dt_tmp, probs = c(.25, .75)) # 25th and 75th percentiles
lowWhisker_ = max(min(dt_tmp), quartiles_[1] - IQR_*1.5) # lower whisker
upWhisker_ = min(max(dt_tmp), quartiles_[2] + IQR_*1.5)
MFI_mat[[chnl_]][i_,j_] = median(dt_tmp[lowWhisker_ < dt_tmp & dt_tmp < upWhisker_]) # median is taken over valid event values which are within lower-whisker and upper-whiskers
}
}
# STEP 2: Intra-plate correcting of fcs files (wells) in current plate per channel ####
if(CORRECT)
{
message('intra-plate correction of plate #',plate_,':')
# computing intra-plate correction coefficients (alpha) for each channel of current plate
for(chnl_ in chnls_)
{
# converting MFI of current plate of current channel into a vector needed for regression analysis
y_ = if(drctn_ == 'column'){ matrix(data = MFI_mat[[chnl_]], ncol = 1, nrow = length(MFI_mat[[chnl_]]), byrow = F)[,,drop = T] }else # reading matrix column-wise
{ matrix(data = MFI_mat[[chnl_]], ncol = 1, nrow = length(MFI_mat[[chnl_]]), byrow = T)[,,drop = T] } # reading matrix row-wise
x_ = 1:length(y_) # x_ is then offset of y_
# removing outliers from plate MFIs before regression using inter-quantile range
IQR_ = IQR(y_)
quartiles_ = quantile(y_, probs = c(.25, .75))
lowWhisker_ = max(min(y_), quartiles_[1] - IQR_*1.5)
upWhisker_ = min(max(y_), quartiles_[2] + IQR_*1.5)
y_tmp = y_[lowWhisker_ < y_ & y_ < upWhisker_] # valid MFIs of current plate of current channel
# fitting regression line
fit_ = lm(data = data.frame(offset = 1:length(y_tmp), chnl = y_tmp), formula = chnl~offset) # fitting simple linear regression
a_ = fit_$coefficients["offset"] # slope of the regression line
if(a_ <= 0)
{
warning(paste0('Slope for plate #',plate_,' channel #',chnl_,' was not positive, no intra-plate correction is necessary!')) # if slope was not positive, no correction
next() # goes to next channel
}
b_ = fit_$coefficients["(Intercept)"] # intercept of regression line
alpha_ = b_/(a_*x_ + b_) # correction coefficients for each well per channel: y*/y = b/ax+b -> y* = y(b/ax+b), y* is translated y
# intra-plate correction of all files of current plate of current channel
for(offset_ in 1:length(fcs_dt_ls)) # fcs files in fcs_dt_ls are already ordered according to their offset in MFI matrix (MFI_mat)
{
if(is.null(fcs_dt_ls[[offset_]])) { next() } # some wells could be empty on the plate! It goes to next well
# correcting MFI of current fcs for current channel (to avoid recomputing IQR)
if(drctn_ == 'column') # extracting (i,j) from column-wise offset
{
j_ = round(ceiling(offset_/m_)) # round is added because arithmetic on integer and double is not exact in R
i_ = round(offset_ - m_*(j_-1))
}else
{
i_ = round(ceiling(offset_/n_))
j_ = round(offset_ - n_*(i_-1))
}
MFI_mat[[chnl_]][i_,j_] = MFI_mat[[chnl_]][i_,j_]*alpha_[offset_] # y*/y = b/ax+b -> y* = y(b/ax+b)
# correcting fcs file
fcs_dt_ls[[offset_]]@exprs[,chnl_] = fcs_dt_ls[[offset_]]@exprs[,chnl_]*alpha_[offset_] # Basically we should have used y* = y + (y*m - ym) for translation of cell/event values along with their MFI for current channel where ym is y-coordinate of the MFI and y* is translated y.
# However, since this formula would translate some values to x+y- quadrant, we used the same way we translated MFI values, i.e. translation by proportion not addition.
}
}
# writing intra-plate corrected fcs files temporarily in "data" directory
for(offset_ in 1:length(fcs_dt_ls))
{
if(is.null(fcs_dt_ls[[offset_]])) { next() }
write.FCS(x = fcs_dt_ls[[offset_]], filename = paste0(inURL,'/',fcs_flNms[offset_],'.fcs')) # matrix of events in this well
}
}
rm(fcs_dt_ls)
MFI_mats[[plate_]] = MFI_mat
}
if(FITPLOT)
{
# plotting regression plots for each channel per page
if(CORRECT) { pdf(file = paste0(outURL,'/MFI_fit_intra-plate_corrected.pdf')) }else{ pdf(file = paste0(outURL,'/MFI_fit_intra-plate_no_correction.pdf')) }
for(plate_ in plates_)
{
for (chnl_ in chnls_)
{
# converting MFI of current plate of current channel into a vector needed for regression analysis
y_ = if(drctn_ == 'column'){ matrix(data = MFI_mats[[plate_]][[chnl_]], ncol = 1, nrow = length(MFI_mats[[plate_]][[chnl_]]), byrow = F)[,,drop = T]/1e4 }else # reading matrix column-wise
{ matrix(data = MFI_mats[[plate_]][[chnl_]], ncol = 1, nrow = length(MFI_mats[[plate_]][[chnl_]]), byrow = T)[,,drop = T]/1e4 } # reading matrix row-wise
x_ = 1:length(y_) # x_ then is offset of y_
# removing outliers from plate MFIs before regression using inter-quantile range
IQR_ = IQR(y_)
quartiles_ = quantile(y_, probs = c(.25, .75))
lowWhisker_ = max(min(y_), quartiles_[1] - IQR_*1.5)
upWhisker_ = min(max(y_), quartiles_[2] + IQR_*1.5)
y_tmp = y_[lowWhisker_ < y_ & y_ < upWhisker_] # valid MFIs of current plate of current channel needed for regression analysis
# fitting regression line
fit_ = lm(data = data.frame(offset = 1:length(y_tmp), chnl = y_tmp), formula = chnl~offset) # fitting simple linear regression
a_ = if(CORRECT){ 0 }else{ fit_$coefficients["offset"] } # slope of the regression line
b_ = fit_$coefficients["(Intercept)"] # intercept of the regression line
p_ = ggplot(data = data.frame(x = x_, y = y_, Plate = plate_), aes(x = x_, y = y_, color = as.factor(Plate)))+
theme(axis.line = element_line(color = 'black'), panel.background = element_blank(), legend.key = element_rect(fill = NA),
text = element_text(face = 'bold',size = 20), plot.title = element_text(vjust = 0.5, hjust = 0.5), aspect.ratio = 1)+
guides(color = guide_legend(override.aes = list(size = 5)))+
labs(x = 'Pseudo-time', y = expression('MFI ('%*%10^-4*')'), title = chnl_, color = 'Plate')+
geom_point(pch = 20, size = 2)+
scale_color_manual(values = 'red')+
geom_abline(intercept = b_, slope = a_, color = 'darkblue')+
scale_x_continuous(expand = c(0.01, 0.01)) + scale_y_continuous(expand = c(0.01, 0.01))
plot(p_)
}
}
graphics.off()
}
# STEP 3: Correcting for inter-plate signal drift ####
if(CORRECT)
{
message('inter-plate correction')
# computing inter-plate correction coefficient
old_intcpts = matrix(data = NA, nrow = length(plates_), ncol = length(chnls_)) # a matrix to store current intercepts per plate and per channel
rownames(old_intcpts) = plates_
colnames(old_intcpts) = chnls_
new_intcpts = list() # a list to store new intercepts (baselines) after correction per channel
for(chnl_ in chnls_) # for each channel
{
dt_ = numeric()
for(plate_ in plates_) # for each plate
{
dt_[plate_] = median(MFI_mats[[plate_]][[chnl_]]) # current plate's median for current channel
old_intcpts[plate_,chnl_] = dt_[plate_]
}
new_intcpts[[chnl_]] = median(dt_) # median of plate medians for current channel
}
# inter-plate correction
for(plate_ in plates_) # for each plate
{
# reading fcs files
annot_tmp = annot_[annot_$plate %in% plate_,] # annotations related to wells of current plate
rows_ = sort(unique(annot_tmp$row)) # number of rows in current plate
cols_ = sort(unique(annot_tmp$column)) # number of columns in current plate
for(rw_ in 1:nrow(annot_tmp)) # for each well on the current plate
{
fcs_dt = read.FCS(filename = paste0(inURL,'/',annot_tmp$file[rw_],'.fcs'), transformation = F, truncate_max_range = F) # event data of current well
# extracting row and column indices of current well
i_ = which(rows_ == annot_tmp$row[rw_]) # row index of current well
j_ = which(cols_ == annot_tmp$column[rw_]) # column index of current well
# inter-plate correction per channel
for(chnl_ in chnls_) # for each channel
{
# computing correction coefficient
alpha_ = new_intcpts[[chnl_]]/old_intcpts[plate_,chnl_] # correction coefficient: b*/b = y*/y -> y* = y(b*/b)
# correcting MFI of current fcs (to avoid recomputing IQR)
MFI_mats[[plate_]][[chnl_]][i_,j_] = MFI_mats[[plate_]][[chnl_]][i_,j_]*alpha_ # b*/b = y*/y -> y* = y(b*/b)
# correcting fcs file of current well for current channel
fcs_dt@exprs[,chnl_] = fcs_dt@exprs[,chnl_]*alpha_ # Basically we should have used y* = y + (y*m - ym) for translation of cell/event values along with their MFI for current channel where ym is y-coordinate of the MFI and y* is translated y.
# However, since this formula would translate some values to x+y- quadrant, we used the same way we translated MFI values, i.e. translation by proportion not addition.
}
# writing corrected fcs files
keyword(fcs_dt)[['$FIL']] = paste0(annot_tmp$file[rw_],'_compensated_corrected') # updating $FIL keyword
write.FCS(x = fcs_dt, filename = paste0(inURL,'/',annot_tmp$file[rw_],'.fcs')) # matrix of cells/events in this well
}
# writing MFI matrices
for(chnl_ in chnls_)
{
write.table(x = MFI_mats[[plate_]][[chnl_]], file = paste0(outURL,'/MFI_mat_P',plate_,chnl_,'.txt'), quote = F, sep = '\t', row.names = T, col.names = T)
}
}
}
# plotting fit lines
if(FITPLOT)
{
dt_ = list() # a matrix with these columns: MFI, plate and channel
k_ = 1
for(plate_ in plates_) # for each plate
{
for (chnl_ in chnls_) # for each channel
{
tmp = if(drctn_ == 'column'){ matrix(data = MFI_mats[[plate_]][[chnl_]], ncol = 1, nrow = length(MFI_mats[[plate_]][[chnl_]]), byrow = F)[,,drop = T]/1e4 }else # reading MFI matrix column-wise
{ matrix(data = MFI_mats[[plate_]][[chnl_]], ncol = 1, nrow = length(MFI_mats[[plate_]][[chnl_]]), byrow = T)[,,drop = T]/1e4 } # reading MFI matrix row-wise
dt_[[k_]] = data.frame(MFI = tmp, plate = as.character(plate_), channel = chnl_, stringsAsFactors = T)
k_ = k_ + 1
}
}
dt_ = do.call(what = rbind, args = dt_)
# plotting all dot plots on one PDF page per channel
if(CORRECT) { pdf(file = paste0(outURL,'/MFI_fit_inter-plate_corrected.pdf')) }else{ pdf(file = paste0(outURL,'/MFI_fit_inter-plate_no_correction.pdf')) }
for(chnl_ in chnls_) # for each channel
{
dt_tmp = dt_[dt_$channel %in% chnl_,]
p_ = ggplot(data = dt_tmp, aes(x = 1:nrow(dt_tmp), y = MFI, color = plate))+
theme(axis.line = element_line(color = 'black'), panel.background = element_blank(), legend.key = element_rect(fill = NA),
text = element_text(face = 'bold',size = 20), plot.title = element_text(vjust = 0.5, hjust = 0.5), aspect.ratio = 1)+
guides(color = guide_legend(override.aes = list(size = 5)))+
labs(x = 'Pseudo-time', y = expression('MFI ('%*%10^-4*')'), title = chnl_, color = 'Plate')+
geom_point(pch = 20, size = 2)+
geom_hline(yintercept = median(dt_tmp$MFI), color = 'darkred')+
scale_x_continuous(expand = c(0.01, 0.01)) + scale_y_continuous(expand = c(0.01, 0.01))
plot(p_)
}
graphics.off()
}
# STEP 4: Plotting plate heatmaps ####
if(HEATPLOT)
{
numOfPlates = length(plates_) # number of plates in this assay
nrow_ = floor(sqrt(numOfPlates)) # number of rows per page
ncol_ = ceiling(numOfPlates/nrow_) # number of columns per page
width_ = height_ = max(nrow_, ncol_)*8.45 # every heatmap takes up this width/height in inches
if(!CORRECT) # if no correction was requested
{
fl_ = paste0(outURL,'/plates_heatmap_no_correction.pdf')
pdf(file = fl_, width = width_, height = height_)
for(chnl_ in chnls_) # plots plates for each channel on one PDF page
{
plot_list = list()
# finding max across all plates
max_ = 0
for(plate_ in 1:numOfPlates)
{
max_tmp = max(MFI_mats[[plate_]][[chnl_]]/1e4)
if(max_ < max_tmp) { max_ = max_tmp }
}
for(plate_ in 1:numOfPlates) # plotting each plate for current channel
{
dt_mat = MFI_mats[[plate_]][[chnl_]]/1e4
dt_mat = rbind(dt_mat, MAX = c(max_, rep(0,ncol(dt_mat)-1))) # last row of each plate heatmap contain max value across all plates
# normalizing heatmap's color palette
dt_ = unique(as.numeric(dt_mat))
IQR_ = IQR(dt_)
quartiles_ = quantile(dt_, probs = c(.25, .75))
lowWhisker_ = max(min(dt_), quartiles_[1] - IQR_*1.5)
upWhisker_ = min(max(dt_), quartiles_[2] + IQR_*1.5)
dt_ = sort(dt_, decreasing = F)
cols1_ = u1_ = NULL
inds_ = which( dt_ <= lowWhisker_)
len_ = length(inds_)
if(0 < len_)
{
cols1_ = colorRampPalette(colors = c('grey80','red','red4'))(len_)
u1_ = dt_[inds_[length(inds_)]]
if(u1_ == 0) { u1_ = NULL} # to avoid redundant legend key on the heatmap
}
cols2_ = u2_ = NULL
inds_ = which(lowWhisker_ < dt_ & dt_ < quartiles_[1])
len_ = length(inds_)
if(0 < length(inds_))
{
cols2_ = colorRampPalette(colors = c('orange','orange4'))(len_)
u2_ = dt_[inds_[length(inds_)]]
}
cols3_ = u3_ = NULL
inds_ = which(quartiles_[1] <= dt_ & dt_ <= quartiles_[2])
len_ = length(inds_)
if(0 < length(inds_))
{
cols3_ = colorRampPalette(colors = c("yellow",'yellow4'))(len_)
u3_ = dt_[inds_[length(inds_)]]
}
cols4_ = u4_ = NULL
inds_ = which(quartiles_[2] < dt_ & dt_ <= upWhisker_)
len_ = length(inds_)
if(0 < length(inds_))
{
cols4_ = colorRampPalette(colors = c('green','green4'))(len_)
u4_ = dt_[inds_[length(inds_)]]
}
cols5_ = NULL
u5_ = if(max(dt_) < round(max(dt_),2)) { round(max(dt_)-0.01,2) }else{ round(max(dt_),2) }
inds_ = which(upWhisker_ < dt_)
len_ = length(inds_)
if(0 < length(inds_))
{
cols5_ = colorRampPalette(colors = c('skyblue','blue4'))(len_)
}
cols_ = c(cols1_, cols2_, cols3_, cols4_, cols5_)
col_step = 2*(diff(range(dt_))/length(cols_))
p_ = pheatmap(mat = dt_mat,
cluster_cols = F,
cluster_rows = F,
show_rownames = T, cellheight = 20,
show_colnames = T, cellwidth = 20,
main = paste0('Plate: ',plate_,', Channel: ', chnl_),
fontsize = 7,
border_color = 'grey90', color = cols_,
breaks = c(min(dt_)-col_step,dt_), # in pheatmap color intervals (showing lower and uper bounds) are open on the left and closed on the right
legend = T,
legend_breaks = round(c(0,u1_,u2_, u3_, u4_, u5_),2),
annotation_col = NA, silent = T)
plot_list[[plate_]] = p_[[4]]
}
grid.arrange(arrangeGrob(grobs= plot_list, nrow = nrow_, ncol = ncol_))
}
graphics.off()
}else # if correction was requested
{
fl_ = paste0(outURL,'/plates_heatmap_correction.pdf')
pdf(file = fl_, width = width_, height = height_)
for(chnl_ in chnls_) # plots plates for each channel on one PDF page
{
plot_list = list()
# finding max across all plates
max_ = 0
dt_ = NULL
for(plate_ in 1:numOfPlates)
{
dt_ = c(dt_, as.numeric(MFI_mats[[plate_]][[chnl_]]/1e4)) # palte plate_ showing MFI of channel chnl_
}
max_ = max(dt_)
# normalizing heatmap's color palette
dt_ = unique(dt_)
IQR_ = IQR(dt_)
quartiles_ = quantile(dt_, probs = c(.25, .75))
lowWhisker_ = max(min(dt_), quartiles_[1] - IQR_*1.5)
upWhisker_ = min(max(dt_), quartiles_[2] + IQR_*1.5)
dt_ = sort(dt_, decreasing = F)
cols1_ = u1_ = NULL
inds_ = which( dt_ <= lowWhisker_)
len_ = length(inds_)
if(0 < len_)
{
cols1_ = colorRampPalette(colors = c('grey80','red','red4'))(len_)
u1_ = dt_[inds_[length(inds_)]]
if(u1_ == 0) { u1_ = 0} # to avoid redundant legend key on heatmap
}
cols2_ = u2_ = NULL
inds_ = which(lowWhisker_ < dt_ & dt_ < quartiles_[1])
len_ = length(inds_)
if(0 < length(inds_))
{
cols2_ = colorRampPalette(colors = c('orange','orange4'))(len_)
u2_ = dt_[inds_[length(inds_)]]
}
cols3_ = u3_ = NULL
inds_ = which(quartiles_[1] <= dt_ & dt_ <= quartiles_[2])
len_ = length(inds_)
if(0 < length(inds_))
{
cols3_ = colorRampPalette(colors = c("yellow",'yellow4'))(len_)
u3_ = dt_[inds_[length(inds_)]]
}
cols4_ = u4_ = NULL
inds_ = which(quartiles_[2] < dt_ & dt_ <= upWhisker_)
len_ = length(inds_)
if(0 < length(inds_))
{
cols4_ = colorRampPalette(colors = c('green','green4'))(len_)
u4_ = dt_[inds_[length(inds_)]]
}
cols5_ = NULL
u5_ = if(max(dt_) < round(max(dt_),2)) { round(max(dt_)-0.01,2) }else{ round(max(dt_),2) }
inds_ = which(upWhisker_ < dt_)
len_ = length(inds_)
if(0 < length(inds_))
{
cols5_ = colorRampPalette(colors = c('skyblue','blue4'))(len_)
}
cols_ = c(cols1_, cols2_, cols3_, cols4_, cols5_)
col_step = 2*(diff(range(dt_))/length(cols_))
for(plate_ in 1:numOfPlates)
{
LEGEND_SHOW = F
if(plate_ == 1) { LEGEND_SHOW = T }
dt_mat = MFI_mats[[plate_]][[chnl_]]/1e4 # plate plate_ showing MFI of channel chnl_
dt_mat = rbind(dt_mat, MAX = c(max_, rep(0,ncol(dt_mat)-1)))
p_ = pheatmap(mat = dt_mat,
cluster_cols = F,
cluster_rows = F,
show_rownames = T, cellheight = 20,
show_colnames = T, cellwidth = 20,
main = paste0('Plate: ',plate_,', Channel: ', chnl_),
fontsize = 7,
border_color = 'grey90', color = cols_,
breaks = c(min(dt_)-col_step,dt_), # in pheatmap color intervals (showing lower and uper bounds) are open on the left and closed on the right
legend = LEGEND_SHOW,
legend_breaks = round(c(0,u1_,u2_, u3_, u4_, u5_),2),
annotation_col = NA, silent = T)
plot_list[[plate_]] = p_[[4]]
}
grid.arrange(arrangeGrob(grobs= plot_list, nrow = nrow_, ncol = ncol_))
}
graphics.off()
}
}
return(NULL)
}
|
4058049c12853428c747a7cb427e8d07868c13d3
|
1a27aa8d067869d7079bc6fb5732dfa1a413c76a
|
/man/DataSet.Rd
|
b648ec06c5c38227034657d9f7479a3ebb4b8160
|
[] |
no_license
|
YuLab-SMU/clusterProfiler
|
60cff769b53943f61f535d58967c76432f0c029c
|
e7abcb5c666df11806f4b798edfea152e93d8a56
|
refs/heads/devel
| 2023-08-30T18:29:32.320507
| 2023-08-15T04:03:55
| 2023-08-15T04:03:55
| 20,360,040
| 565
| 150
| null | 2023-09-08T15:47:44
| 2014-05-31T16:34:32
|
R
|
UTF-8
|
R
| false
| true
| 574
|
rd
|
DataSet.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{DataSet}
\alias{DataSet}
\alias{gcSample}
\alias{kegg_species}
\alias{kegg_category}
\alias{DE_GSE8057}
\title{Datasets
gcSample contains a sample of gene clusters.}
\description{
Datasets
gcSample contains a sample of gene clusters.
Datasets
kegg_species contains kegg species information
Datasets
kegg_category contains kegg pathway category information
Datasets
DE_GSE8057 contains differential epxressed genes obtained from GSE8057 dataset
}
\keyword{datasets}
|
e37b407a73fb3dd08f554a039d6e66c321fb02d6
|
956f493986a2e4836cd7d5565fb23be636645a24
|
/man/func_train.Rd
|
0ecdf89d4533cb70bf9c8805409004480fe9602e
|
[
"MIT"
] |
permissive
|
Bhaskers-Blu-Org2/CNTK-R
|
d2fcb0eab33a5f6a9134fa20937622b308590b4a
|
5e2b8f5320a48dc492fa7dd1654532dd9e3e856a
|
refs/heads/master
| 2021-08-20T10:36:09.764093
| 2017-11-28T22:55:53
| 2017-11-28T22:55:53
| 276,122,540
| 0
| 1
|
MIT
| 2020-06-30T14:28:11
| 2020-06-30T14:28:10
| null |
UTF-8
|
R
| false
| true
| 4,812
|
rd
|
func_train.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ops-function.R
\name{func_train}
\alias{func_train}
\title{Train Function Model}
\usage{
func_train(func, minibatch_source, minibatch_size = 32, streams = NULL,
model_inputs_to_streams = NULL, parameter_learners = c(),
callbacks = c(), progress_frequency = NULL, max_epochs = NULL,
epoch_size = NULL, max_samples = NULL)
}
\arguments{
\item{func}{- The CNTK `Function` instance on which to apply the operation}
\item{minibatch_source}{(MinibatchSource or list of matrices) β
data source used for training. For large data, use a MinibatchSource. For
small data, pass a list of matrices. The number of streams/arrays
must match the number of arguments of self.}
\item{minibatch_size}{(int or minibatch_size_schedule, defaults to 32) β
minibatch size (or schedule) for training}
\item{streams}{(list) β (only if minibatch_source is a data reader)
the streams of the minibatch_source in argument order. Not to be given if
minibatch_source is specified as numpy/scipy arrays rather than a data
reader.}
\item{model_inputs_to_streams}{(dict) β alternative to streams, specifying
the mapping as a map from input variables to streams}
\item{parameter_learners}{(list) β list of learners}
\item{callbacks}{- list of callback objects, which can be of type
ProgressWriter (for logging), CheckpointConfig (for #' check-pointing),
TestConfig (for automatic final evaluation on a test set), #' and
CrossValidationConfig (for cross-validation based training control).}
\item{progress_frequency}{(int) β frequency in samples for aggregated
progress printing. Defaults to epoch_size if given, or None otherwise}
\item{max_epochs}{(int, defaults to 1) β maximum number of samples used for
training; requires epoch_size}
\item{epoch_size}{(int) β in CNTK, epoch size means the number of samples
between outputting summary information and/or checkpointing. This must be
specified unless the user directly passes numpy/scipy arrays for the
minibatch_source.}
\item{max_samples}{(int) β maximum number of samples used for training;
mutually exclusive with max_epochs}
}
\description{
Trains a model, given by its criterion function, using the specified
training parameters and configs. Different aspects of training such as data
sources, checkpointing, cross validation, progress printing can be
configured using the corresponding config classes.
}
\details{
The input data can be specified as a data reader (MinibatchSource) for large
corpora; or directly as numpy/scipy arrays if the data is so small that it
is feasible to keep it all in RAM.
Data is processed in minibatches. The minibatch size defaults to 32, which
is a choice that commonly works well. However, for maximum efficiency, we
recommend to experiment with minibatch sizes and choose the largest that
converges well and does not exceed the GPU RAM. This is particularly
important for distributed training, where often, the minibatch size can be
increased throughout the training, which reduces data bandwidth and thus
speeds up parallel training.
If input data is given through a data reader (as opposed to directly as a
numpy/scipy array), the user must also specify the epoch size. This is
because data readers are used for large corpora, and the traditional
definition of epoch size as number of samples in the corpus is not very
relevant. Instead, CNTK really means the number of samples between summary
actions, such as printing training progress, adjusting the learning rate,
and/or checkpointing the model.
The function returns an object that contains these members: epoch_summaries
is a list that contains the progression of epoch loss (.loss) and metric
(.metric) values and the corresponding number of labels (.samples) that they
were averaged over. This is the same value that a progress printer would
print as epoch summaries. updates is a similar list with the more
fine-grained minibatch updates. If a TestConfig was specified, then
test_summary is the metric and sample count on the specified test set for
the final model.
A number of callback mechanisms can optionally be specified as a list as
callbacks. CNTK has a fixed set of callback types, and only those types are
allowed in the callbacks list: An object of type ProgressWriter from
cntk.logging is used for progress logging; a CheckpointConfig configures the
checkpointing mechanism, which keeps copies of models at regular intervals
and allows to seamlessly restart from a last checkpoint; a TestConfig allows
to specify a test set that is evaluated at the end of the training; and a
CrossValidationConfig specifies a user callback that can be used to adjust
learning hyper-parameters or to denote to stop training, optionally based on
a separate cross-validation data set.
}
|
602bf0c0463cfb0250f714173d4f64bbef918d5c
|
3228c0f11eea3ee9c9101c42cd81c48f4a4c557c
|
/cachematrix.R
|
a6f5661034f27a523cbef5312a9aeb3c959d60a5
|
[] |
no_license
|
nishantgupta95/ProgrammingAssignment2
|
8c1654e7629d195b11a26fc7e7845d1a78d4dac5
|
dd29f3184cede26428fcf6dc88bae32c8980d4b6
|
refs/heads/master
| 2020-05-27T11:37:28.574106
| 2019-05-25T20:04:39
| 2019-05-25T20:04:39
| 188,603,621
| 0
| 0
| null | 2019-05-25T19:14:18
| 2019-05-25T19:14:18
| null |
UTF-8
|
R
| false
| false
| 939
|
r
|
cachematrix.R
|
## Both functions cache the inverse of the matrix and calculates the inverse of matrix in case of
## matrix doesn't changed.
## Creates A Special Matrix Object that can cache its Inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(invrse) inv <<- invrse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## Computes Inverse of The special Matrix Object returned by makeCacheMatrix Function.
## If the inverse has already been calculated , then the cacheSolve should retrieve the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
b0fb94face67f36af3a91f36d67d0a07f5f66e1d
|
cc092e75fd111024be2e6c17d5cff76e59c51283
|
/man/multiBed3sp.Rd
|
c9186aa9381c1662d61283c63d058023311a1bab
|
[] |
no_license
|
yingxi-kaylee/ssCTPR
|
4c319a2a5cb69fc7160746997facccd8a2cec7ca
|
3930134f13e914f249a31d35f90cee762fa945de
|
refs/heads/master
| 2023-08-28T20:51:35.414297
| 2021-11-14T01:28:01
| 2021-11-14T01:28:01
| 287,055,174
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 768
|
rd
|
multiBed3sp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{multiBed3sp}
\alias{multiBed3sp}
\title{Multiply genotypeMatrix by a matrix (sparse)}
\usage{
multiBed3sp(
fileName,
N,
P,
beta,
nonzeros,
colpos,
ncol,
col_skip_pos,
col_skip,
keepbytes,
keepoffset,
trace
)
}
\arguments{
\item{fileName}{location of bam file}
\item{N}{number of subjects}
\item{P}{number of positions}
\item{col_skip_pos}{which variants should we skip}
\item{col_skip}{which variants should we skip}
\item{keepbytes}{which bytes to keep}
\item{keepoffset}{what is the offset}
\item{input}{the matrix}
}
\value{
an armadillo genotype matrix
}
\description{
Multiply genotypeMatrix by a matrix (sparse)
}
\keyword{internal}
|
c2c15d04979ec168b69d0da76cbdba45a7d515f7
|
2b45f547209112cb8040dc2c07492ad847e5c8c3
|
/resequencing/phylogenetics/snapp_treemix/reformat_plink.R
|
b53e2a66e2e9e11fc36d2c48c176893f2ab70b4a
|
[] |
no_license
|
erikenbody/Bioinformatics_Scripts
|
2ecaef2437c169385b9f40ced621d7f41eb9b292
|
0aa3edfb0a5ebd9a848bb8e5eb1105eb54299fe6
|
refs/heads/master
| 2022-06-07T11:38:17.824937
| 2022-05-26T18:50:56
| 2022-05-26T18:50:56
| 114,473,616
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 423
|
r
|
reformat_plink.R
|
#editing plink file
#plink<-read.table("data/plink_subset.fq",header=TRUE)
plink<-read.table("wsfw_plink.frq.strat",header=TRUE)
#head(plink)
#plink$SNP2<-rep(1:(nrow(plink)/5),length.out=nrow(plink),each=5)
plink$SNP<-NULL
plink$SNP<-rep(1:nrow(plink),length.out=nrow(plink),each=5)
plink<-plink[c(1,8,2:7)]
#head(plink)
write.table(plink,"wsfw_plink_rf.frq.strat",row.names=FALSE,quote = FALSE,col.names = TRUE,sep="\t")
|
bbeda055446f0f1ca504ae8a6b097605bc8d5c9b
|
45d332bd2e7b81ef8345b6339ed8d60a7540f3d9
|
/src/test4.R
|
90b040eda2a5e81eaf55e8f112b4b4d9afbbfd17
|
[] |
no_license
|
Chaitany1729/GSoC_SLOPE_tests
|
5d104255463678a807b1c3685dc820db4b1196ce
|
95ffc9f999c6d983ca51ea7891df238649444b5f
|
refs/heads/master
| 2023-03-29T12:08:00.668774
| 2021-04-02T14:05:06
| 2021-04-02T14:05:06
| 353,074,052
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 598
|
r
|
test4.R
|
library('Rcpp')
library('SLOPE')
sourceCpp('FastProxSL1.cpp')
length_list = as.integer(runif(1, 100, 10000))
time_fastprox = c()
time_proxsort = c()
for (vector_length in length_list){
y = runif(vector_length, 0, 1000)
y = sort(y, decreasing = TRUE)
lambda = runif(vector_length, 0, 1000)
lambda = sort(lambda, decreasing = TRUE)
t = proc.time()['elapsed']
FastProxSL1(y, lambda)
t = proc.time()['elapsed'] - t
time_fastprox = c(time_fastprox, t)
t = proc.time()['elapsed']
prox_sorted_L1(y, lambda)
t = proc.time()['elapsed'] - t
time_proxsort = c(time_proxsort, t)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.