blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e86f0bf8e9800ac4f9aa7afc1098697ee3806b3d
|
eca17aa44d903fb95c24412ae5d342662be0e01f
|
/scripts/utils.R
|
1060a67e4efcdfed8dfad169e4f372eb09b573c5
|
[] |
no_license
|
scworland/wu-waterbudget
|
936409c5a119f32ea31dff0303cb8d3e0e05eae4
|
6d72fa17e05def76ae7f229f149778c94f13e760
|
refs/heads/master
| 2021-09-20T11:04:15.224889
| 2018-08-08T16:26:29
| 2018-08-08T16:26:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,451
|
r
|
utils.R
|
sw_weighted_interp <- function(from,value,to,to_group_id,weight_var,sum=TRUE){
# several checks
if(inherits(weight_var,"VeloxRaster")==FALSE) stop("'weight_var' must be of class VeloxRaster")
if(inherits(from,"sf")==FALSE) stop("'from' must be of class sf")
#if(all(st_is(from, "MULTIPOLYGON"))==FALSE) stop("'from' geometry type must be 'MULTIPOLYGON'")
if(inherits(to,"sf")==FALSE) stop("'to' must be of class sf")
#if(all(st_is(to, "MULTIPOLYGON"))==FALSE) stop("'from' geometry type must be 'MULTIPOLYGON'")
if(sum==T){# use sum
# aggregate weighting variable to "from" geometry
from_weight_var <- weight_var$extract(st_transform(from,weight_var$crs),fun=function(x) sum(x,na.rm=T),df=TRUE)
# add weight var to "from" geometry
from$from_weight_var <- from_weight_var$out
# find intersection between "from" and "to"
from_to <- from %>%
st_intersection(to) %>%
st_cast('MULTIPOLYGON') %>%
st_transform(weight_var$crs)
# aggregate weighting variable to "to" geometry
to_weight_var <- weight_var$extract(from_to,fun=function(x) sum(x,na.rm=T),df=TRUE)
# calculate weight and new value
result <- from_to %>%
dplyr::mutate(to_weight_var = to_weight_var$out,
p = to_weight_var/from_weight_var,
to_value = round(!!sym(value)*p,3)) %>%
group_by(!!sym(to_group_id)) %>%
dplyr::summarize(to_value = sum(to_value, na.rm=T))
}else{#use mean
# aggregate weighting variable to "from" geometry
from_weight_var <- weight_var$extract(st_transform(from,weight_var$crs),fun=function(x) mean(x,na.rm=T),df=TRUE)
# add weight var to "from" geometry
from$from_weight_var <- from_weight_var$out
# find intersection between "from" and "to"
from_to <- from %>%
st_intersection(to) %>%
st_cast('MULTIPOLYGON') %>%
st_transform(weight_var$crs)
# aggregate weighting variable to "to" geometry
to_weight_var <- weight_var$extract(from_to,fun=function(x) mean(x,na.rm=T),df=TRUE)
# calculate weight and new value
result <- from_to %>%
dplyr::mutate(to_weight_var = to_weight_var$out,
p = to_weight_var/from_weight_var,
to_value = round(!!sym(value)*p,3)) %>%
group_by(!!sym(to_group_id)) %>%
dplyr::summarize(to_value = mean(to_value, na.rm=T))
}
return(result)
}
|
c1cf630e760340b28aed1a8259843464e3a5eb17
|
a0f4a9476114a466b21771a2ac7eec86d1b0e0d2
|
/Jason_detectWideScopeST.R
|
3a7bf05409890db4e6cf9caad337ab0f78f37bb0
|
[] |
no_license
|
jmostowy1/RS_Acoustics
|
88b4f4043b16f79b80492a28753fbae7023a5d61
|
75a99f6ee8db68820c1aa2fac5e0d1d0cd632acf
|
refs/heads/master
| 2021-02-11T00:48:05.520669
| 2020-03-26T00:36:49
| 2020-03-26T00:36:49
| 244,425,045
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,596
|
r
|
Jason_detectWideScopeST.R
|
source("init_library.R")
met = read_csv(local.EVMetadata.loc)
met = met %>% filter(has_gps == TRUE, category %in% c("ES_Transect", "Non_Transect_Useable"))
for(i in 1:nrow(met)){
ev = ev.restart(ev)
EV.File = ev$OpenFile(met$evs[i])
aco.var.list = EVListAcoVars(EV.File)
#Check to see if the wide-scope single targets object already exists. If so, close the file and move on to the next one
if('[T4 ST] Filtered in and out border single targets [for fish track detection 1]' %in% aco.var.list){
EVCloseFile(EV.File)
next
}
#Call to Sv, TS, and angular position raw variables.
SvRaw<-EV.File[['Variables']]$FindByName('Fileset1: Sv pings T1') #Sv pings
TSRaw<-EV.File[['Variables']]$FindByName('Fileset1: TS pings T1') #TS pings
apRaw<-EV.File[['Variables']]$FindByName('Fileset1: angular position pings T1') #angular position pings
#Call to processed Sv variables
SvProc<-EV.File[['Variables']]$FindByName('Full_Processed_Sv')
TSProc = EV.File[['Variables']]$FindByName('Full_Processed_TS')
#Call to school variable
school.var<-EV.File[['Variables']]$FindByname('[T4 Sv] Fish-school samples [for aggregation detection]')
#Create Region Bitmap for Fish-School Regions = FALSE
if(!('[Tx Bn] Fish-school regions = FALSE' %in% aco.var.list)){
new.reg.bm = EVNewAcousticVar(EV.File,oldVarName = school.var[["Name"]],enum=36)
EVRenameAcousticVar(EV.File,acoVarName = new.reg.bm[["Name"]], newName = '[Tx Bn] Fish-school regions = FALSE')
xx = invisible(readline(prompt = 'Change the Fish-school regions = FALSE bitmap Region to Schools and Invert the Output, then press [ENTER]'))
# AS OF ECHOVIEW V. 10, THERE IS NO COM OBJECT TO ACCESS REGION BITMAP VARIABLE PROPERTIES. SO YOU MUST MANUALLY GO IN AND SET THE REGION TO SCHOOLS AND SELECT 'INVERT OUTPUT'
}
sch.F.rbm = EV.File[['Variables']]$FindByName('[Tx Bn] Fish-school regions = FALSE')
#Mask Processed TS Data by school FALSE bitmap
if(!('[T4 TS] Fish-school samples = NO DATA' %in% aco.var.list)){
mask.schoolRegionsTS.false = EVNewAcousticVar(EV.File,oldVarName = TSProc[["Name"]],enum=3)
EVRenameAcousticVar(EV.File,acoVarName = mask.schoolRegionsTS.false[["Name"]],newName = '[T4 TS] Fish-school samples = NO DATA')
TS.school.masked = EV.File[['Variables']]$FindByName('[T4 TS] Fish-school samples = NO DATA')
sch.F.rbm = EV.File[['Variables']]$FindByName('[Tx Bn] Fish-school regions = FALSE')
TS.school.masked$SetOperand(2, sch.F.rbm) #sch.F.rbm = [Tx Bn] Fish-school regions = FALSE
TS.school.masked_PROP = TS.school.masked[['Properties']][['Mask']]
TS.school.masked_PROP[['Value']] = 'no data'
}
if(!('[T4 ST] Filtered in and out border single targets [for fish track detection 1]' %in% aco.var.list)){
M5<-EV.File[['Variables']]$FindByName('[T4 TS] Fish-school samples = NO DATA')
single.targ.1 = EVNewAcousticVar(EV.File,oldVarName = M5[["Name"]],enum=75)
EVRenameAcousticVar(EV.File,acoVarName = single.targ.1[["Name"]],newName = '[T4 ST] Filtered in and out border single targets [for fish track detection 1]')
STDet<-EV.File[['Variables']]$FindByName('[T4 ST] Filtered in and out border single targets [for fish track detection 1]')
STDet$SetOperand(2,apRaw) #apRaw = raw angular position data
STDetProp<-STDet[['Properties']][['SingleTargetDetectionSplitBeamParameters']]
STDetProp[['MaximumBeamCompensation']] <- 9
xx = invisible(readline("Change the STD pulse envelope parameters to 0.5 min and 2 max"))
}
EVSaveFile(EV.File)
EVCloseFile(EV.File)
print(i)
}
|
7ed150c2784b836e7609f32c76c73efd669089de
|
a81e1ca6fe4c13be28d29f639ec768b51d016501
|
/tests/testthat/test-extradefault.R
|
bdf873087d2f092107d16d33ddc052efc5b4bdd0
|
[] |
no_license
|
cran/bain
|
d5f139bd8e644ffc339f8e6eea9c702cddfa5329
|
5bfb947c1569788eb959f5b45a39126b7f9b0ed6
|
refs/heads/master
| 2021-12-25T13:33:41.105293
| 2021-12-06T12:20:02
| 2021-12-06T12:20:02
| 169,492,749
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,640
|
r
|
test-extradefault.R
|
# test construction prior and posterior with group_parameters=2 and joint_parameters=2
data(sesamesim)
est1 <-c(1,2)
est2 <-c(3,4)
est3 <-c(-1,1)
estimate <- c(est1,est2,est3)
names(estimate) <- c("pre1", "post1","pre2", "post2","a1", "a2")
ngroup<-c(100,50)
cov1 <-matrix(c(1,0,0,0,
0,1,0,0,
0,0,1,0,
0,0,0,1),nrow=4,ncol=4)
cov2 <- matrix(c(1,0,0,0,
0,1,0,0,
0,0,1,0,
0,0,0,1),nrow=4,ncol=4)
invtotcov <- matrix(c(1,0,0,0,0,0,
0,1,0,0,0,0,
0,0,1,0,0,0,
0,0,0,1,0,0,
0,0,0,0,2,0,
0,0,0,0,0,2),nrow=6,ncol=6)
totcov <- solve(invtotcov)
pricov1 <-matrix(c(200,0,0,0,
0,200,0,0,
0,0,200,0,
0,0,0,200),nrow=4,ncol=4)
pricov2 <- matrix(c(100,0,0,0,
0,100,0,0,
0,0,100,0,
0,0,0,100),nrow=4,ncol=4)
priinvtotcov <- matrix(c(.005,0,0,0,0,0,
0,.005,0,0,0,0,
0,0,.01,0,0,0,
0,0,0,.01,0,0,
0,0,0,0,.015,0,
0,0,0,0,0,.015),nrow=6,ncol=6)
pritotcov <- solve(priinvtotcov)
covariance<-list(cov1,cov2)
set.seed(100)
y <-bain(estimate, "pre1 - pre2 = post1 - post2;
pre1 - pre2 > post1 - post2" , n=ngroup, Sigma=covariance,
group_parameters=2, joint_parameters = 2)
# TESTS
test_that("Bain mutual", {expect_equal(y$independent_restrictions, 1)})
test_that("Bain mutual", {expect_equal(y$b, c(.005,.01))})
test_that("Bain mutual", {expect_equal(as.vector(y$posterior), as.vector(totcov))})
test_that("Bain mutual", {expect_equal(as.vector(y$prior), as.vector(pritotcov))})
des2<-summary(y, ci = 0.95)
test_that("summary", {expect_equal(des2$n , c(100,100,50,50,150,150))})
# test abuse of one-group input with group_parameters=1 and joint_parameters=1
# this is properly processed by bain and summary
estimate <-c(1,2)
names(estimate) <- c("a1", "a2")
ngroup<-100
cov1 <-matrix(c(1,0,
0,1),nrow=2,ncol=2)
covariance<-list(cov1)
set.seed(100)
y <-bain(estimate, "a1=a2" , n=ngroup, Sigma=covariance,
group_parameters=1, joint_parameters = 1)
test_that("Bain mutual", {expect_equal(as.vector(y$posterior), c(1,0,0,1))})
test_that("Bain mutual", {expect_equal(as.vector(y$prior), c(100,0,0,100))})
des1<-summary(y, ci = 0.95)
test_that("summary", {expect_equal(des1$n , c(100,100))})
# test the computation of the bfmatrix
sesamesim$site <- as.factor(sesamesim$site)
anov <- lm(sesamesim$postnumb~sesamesim$site-1)
set.seed(100)
z<-bain(anov, "site1=site2=site3=site4=site5;
site2>site5>site1>site3=site4;
site1=site2>site3=site4>site5;
site1<site2>site3<site4>site5;
site1=site5>site3=site4<site2;
site2>site3>site4;
(site1,site2,site5)>(site3,site4);
site2>(site1,site3,site4,site5)")
test_that("Bain mutual", {expect_equal(z$BFmatrix[7,8],z$fit$PMPb[7]/z$fit$PMPb[8])})
test_that("Bain mutual", {expect_equal(z$BFmatrix[2,5],z$fit$PMPb[2]/z$fit$PMPb[5])})
test_that("Bain mutual", {expect_equal(z$BFmatrix[2,8],z$fit$PMPb[2]/z$fit$PMPb[8])})
test_that("Bain mutual", {expect_equal(z$BFmatrix[8,2],z$fit$PMPb[8]/z$fit$PMPb[2])})
test_that("Bain mutual", {expect_equal(z$BFmatrix[2,1],z$fit$PMPb[2]/z$fit$PMPb[1])})
test_that("Bain mutual", {expect_equal(z$BFmatrix[1,3],z$fit$PMPb[1]/z$fit$PMPb[3])})
# test the computation of b and j
sesamesim$site <- as.factor(sesamesim$site)
anov <- lm(sesamesim$postnumb~sesamesim$site-1)
set.seed(100)
z<-bain(anov, "site1=site2=site3=site4=site5;
site2>site5>site1>site3=site4;
site1=site2>site3=site4>site5;
site1<site2>site3<site4>site5;
site1=site5>site3=site4<site2;
site2>site3>site4;
(site1,site2,site5)>(site3,site4);
site2>(site1,site3,site4,site5)")
test_that("Bain mutual", {expect_equal(z$b,c(.8/60,.8/55,.8/64,.8/43,.8/18))})
test_that("Bain mutual", {expect_equal(z$independent_restrictions,4)})
sesamesim$site <- as.factor(sesamesim$site)
anov <- lm(sesamesim$postnumb~sesamesim$site-1)
set.seed(100)
z1<-bain(anov, "site1 =site2; site2 > site3; site3 < site4; site4=site5")
test_that("Bain mutual", {expect_equal(z1$b,c(.8/60,.8/55,.8/64,.8/43,.8/18))})
test_that("Bain mutual", {expect_equal(z1$independent_restrictions,4)})
|
eab7ebca6293ae05474f363e692591dda45cbe63
|
4e0d6c32e666ddcf17963f8615c736d5fc3eb301
|
/man/cc05-1-TNoMSummary-class.Rd
|
c186d90c2bcc006fb976ad388846daa01e9cdbad
|
[] |
no_license
|
cran/ClassComparison
|
ff522e3ab4bdf6d38be6956f0f72c05ebb980f1d
|
6118a8471bbaad8167ed206ce3fd770855435e5e
|
refs/heads/master
| 2020-06-24T14:29:47.094027
| 2019-05-06T15:40:12
| 2019-05-06T15:40:12
| 96,940,058
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 970
|
rd
|
cc05-1-TNoMSummary-class.Rd
|
\name{TNoMSummary-class}
\alias{TNoMSummary}
\alias{TNoMSummary-class}
\alias{show,TNoMSummary-method}
\docType{class}
\title{Class "TNoMSummary"}
\description{
An implementation class. Users are not expected to create these objects
directly; they are produced as return objects from the summary method for
\code{TNoM}.
}
\section{Slots}{
\describe{
\item{\code{TNoM}:}{object of class \code{TNoM} ~~ }
\item{\code{counts}:}{object of class \code{numeric} ~~ }
}
}
\section{Methods}{
\describe{
\item{show}{\code{signature(object = TNoMSummary)}: Print the
object, which contains a summary of the underlying \code{TNoM}
object. In particular, the summary reports the number of genes
achieving each possible number of misclassifications.}
}
}
\author{
Kevin R. Coombes \email{krc@silicovore.com}
}
\seealso{
\code{\link{TNoM}}
}
\examples{
showClass("TNoMSummary")
}
\keyword{classes}
|
f95e1f75fed5f815d9613eefe33185eecbedfa28
|
38027635e4309eaa7850984657c9b62c966ff313
|
/man/Inf_criteria.Rd
|
750ab8a606d1b63d531598b81a9d2d1c7f1b5535
|
[] |
no_license
|
emrahgecili/BPReg
|
4ccd989aaf8b005de30f813884dee17c8c6805f8
|
e07c73107d93947110ad030fc9930134a37e9d1e
|
refs/heads/master
| 2023-04-12T12:30:09.857196
| 2022-10-02T20:40:09
| 2022-10-02T20:40:09
| 287,898,182
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 389
|
rd
|
Inf_criteria.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Inf_criteria.R
\name{Inf_criteria}
\alias{Inf_criteria}
\title{Returns information criteria such as AIC and BIC.}
\usage{
Inf_criteria(M)
}
\arguments{
\item{M}{Final MCMC output after burnin.}
}
\value{
AIC, BIC, log-likelihood.
}
\description{
Returns AIC, BIC, log-likelihood.
}
\examples{
Inf_criteria(M)
}
|
02f94d77b084f59f20c6b4a0098b628a2e8d93ef
|
608adcf47ef5c776429dfe2e555c20c0ef54547a
|
/inst/doc/widals.R
|
3f16f8efaff5d5a930b5067eceeaa78c6edff3e0
|
[] |
no_license
|
cran/widals
|
b722ad1e1e0938998461d8fe83e8b76437cbc031
|
c431b52c0455ad4568072220838b571bacc3b6ba
|
refs/heads/master
| 2021-05-15T01:43:27.321897
| 2019-12-07T21:20:02
| 2019-12-07T21:20:02
| 17,700,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,778
|
r
|
widals.R
|
### R code from vignette source 'widals.Snw'
###################################################
### code chunk number 1: widals.Snw:151-154
###################################################
options(width=49)
options(prompt=" ")
options(continue=" ")
###################################################
### code chunk number 2: b1
###################################################
options(stringsAsFactors=FALSE)
library(snowfall)
k.cpus <- 2 #### set the number of cpus for snowfall
library(widals)
data(O3)
Z.all <- as.matrix(O3$Z)[366:730, ]
locs.all <- O3$locs[ , c(2,1)]
hsa.all <- O3$helevs
xdate <- rownames(Z.all)
tau <- nrow(Z.all)
n.all <- ncol(Z.all)
xgeodesic <- TRUE
###################################################
### code chunk number 3: b2
###################################################
Z <- Z.all
locs <- locs.all
n <- n.all
dateDate <- strptime(xdate, "%Y%m%d")
doy <- as.integer(format(dateDate, "%j"))
Ht <- cbind( sin(2*pi*doy/365), cos(2*pi*doy/365) )
Hs.all <- matrix(1, nrow=n.all)
Hst.ls.all <- NULL
Hs <- Hs.all
Hst.ls <- Hst.ls.all
Ht.original <- Ht
##########################
rm.ndx <- create.rm.ndx.ls(n, 14)
b.lag <- 0
train.rng <- 30:tau
test.rng <- train.rng
GP <- c(1/10, 1)
k.glob <- 9
rho.upper.limit <- 100
rgr.lower.limit <- 10^(-7)
sds.mx <- seq(2, 0.01, length=k.glob) * matrix(1, k.glob, length(GP))
run.parallel <- TRUE
sfInit(TRUE, k.cpus)
FUN.source <- fun.load.hals.a
FUN.source()
MSS.snow(FUN.source, NA, p.ndx.ls, f.d, sds.mx=sds.mx,
k.glob, k.loc.coef=7, X = NULL)
sfStop()
###################################################
### code chunk number 4: a2
###################################################
Z.als <- Hals.snow(1, Z = Z, Hs = Hs, Ht = Ht, Hst.ls = Hst.ls,
b.lag = b.lag, GP.mx = matrix(GP, 1, 2))
resids <- Z-Z.als
sqrt( mean( resids[test.rng, ]^2 ) )
###################################################
### code chunk number 5: a3
###################################################
Hs.all <- cbind(matrix(1, nrow=n.all), hsa.all)
Hs <- Hs.all
GP <- c(1/10, 1)
sfInit(TRUE, k.cpus)
MSS.snow(FUN.source, NA, p.ndx.ls, f.d, sds.mx=sds.mx,
k.glob, k.loc.coef=7, X = NULL)
sfStop()
###################################################
### code chunk number 6: a4
###################################################
Hst.ls.all <- H.Earth.solar(locs[ , 2], locs[ , 1], dateDate)
Hst.ls <- Hst.ls.all
GP <- c(1/10, 1)
sfInit(TRUE, k.cpus)
MSS.snow(FUN.source, NA, p.ndx.ls, f.d, sds.mx=sds.mx,
k.glob, k.loc.coef=7, X = NULL)
sfStop()
###################################################
### code chunk number 7: a5
###################################################
Hst.ls.all2 <- list()
for(tt in 1:tau) {
Hst.ls.all2[[tt]] <- cbind(Hst.ls.all[[tt]], Hst.ls.all[[tt]]*hsa.all)
colnames(Hst.ls.all2[[tt]]) <- c("ISA", "ISAxElev")
}
Hst.ls <- Hst.ls.all2
GP <- c(1/10, 1)
sfInit(TRUE, k.cpus)
MSS.snow(FUN.source, NA, p.ndx.ls, f.d, sds.mx=sds.mx,
k.glob, k.loc.coef=7, X = NULL)
sfStop()
###################################################
### code chunk number 8: a6
###################################################
Hals.ses(Z, Hs, Ht, Hst.ls, GP[1], GP[2], b.lag, test.rng)
###################################################
### code chunk number 9: a7
###################################################
Z <- Z.all
Hst.ls <- stnd.Hst.ls(Hst.ls.all2)$sHst.ls
Hs <- stnd.Hs(Hs.all)$sHs
Ht <- stnd.Ht(Ht, nrow(Hs))
GP <- c(1/10, 1)
sfInit(TRUE, k.cpus)
MSS.snow(FUN.source, NA, p.ndx.ls, f.d, sds.mx=sds.mx,
k.glob, k.loc.coef=7, X = NULL)
sfStop()
###################################################
### code chunk number 10: a8
###################################################
z.mean <- mean(Z.all)
z.sd <- sd(as.vector(Z.all))
Z <- (Z.all - z.mean) / z.sd
GP <- c(1/10, 1)
sfInit(TRUE, k.cpus)
MSS.snow(FUN.source, NA, p.ndx.ls, f.d, sds.mx=sds.mx,
k.glob, k.loc.coef=7, X = NULL)
sfStop()
our.cost * z.sd
###################################################
### code chunk number 11: a9 (eval = FALSE)
###################################################
##
## Z <- Z.all
## Hst.ls <- Hst.ls.all2
## Hs <- Hs.all
## Ht <- Ht.original
##
## FUN.source <- fun.load.widals.a
##
## d.alpha.lower.limit <- 0
##
## GP <- c(1/1000, 1, 0.01, 3, 1)
## cv <- 2
## lags <- c(0)
## b.lag <- 0
##
## sds.mx <- seq(2, 0.01, length=k.glob) * matrix(1, k.glob, length(GP))
## ltco <- -10
## stnd.d <- TRUE
##
## sfInit(TRUE, k.cpus)
## FUN.source()
##
## MSS.snow(FUN.source, NA, p.ndx.ls, f.d, sds.mx=sds.mx,
## k.glob, k.loc.coef=7, X = NULL)
## sfStop()
##
###################################################
### code chunk number 12: a10
###################################################
rm.ndx <- 1:n
Z <- Z.all
Hst.ls <- Hst.ls.all2
Hs <- Hs.all
Ht <- Ht.original
FUN.source <- fun.load.widals.a
d.alpha.lower.limit <- 0
GP <- c(1/1000, 1, 0.01, 3, 1)
cv <- -2
lags <- c(0)
b.lag <- -1
sds.mx <- seq(2, 0.01, length=k.glob) * matrix(1, k.glob, length(GP))
ltco <- -10
stnd.d <- TRUE
sfInit(TRUE, k.cpus)
FUN.source()
MSS.snow(FUN.source, NA, p.ndx.ls, f.d, sds.mx=sds.mx,
k.glob, k.loc.coef=7, X = NULL)
sfStop()
###################################################
### code chunk number 13: a10
###################################################
Hals.ses(Z, Hs, Ht, Hst.ls, GP[1], GP[2], b.lag, test.rng)
###################################################
### code chunk number 14: a11
###################################################
Z <- Z.all
Hst.ls <- Hst.ls.all2
Hs <- Hs.all
Ht <- Ht.original
Hs0 <- cbind(matrix(1, length(O3$helevs0)), O3$helevs0)
Hst0isa.ls <- H.Earth.solar(O3$locs0[ , 1], O3$locs0[ , 2], dateDate)
Hst0.ls <- list()
for(tt in 1:tau) {
Hst0.ls[[tt]] <- cbind(Hst0isa.ls[[tt]], Hst0isa.ls[[tt]]*O3$helevs0)
colnames(Hst0.ls[[tt]]) <- c("ISA", "ISAxElev")
}
locs0 <- O3$locs0[ , c(2,1)]
Z0.hat <- widals.predict(Z, Hs, Ht, Hst.ls, locs, lags, b.lag, Hs0, Hst0.ls,
locs0=locs0, geodesic=xgeodesic, wrap.around=NULL, GP, stnd.d, ltco)[10:tau, ]
ydate <- xdate[10:tau]
#xcol.vec <- heat.colors(max(round(Z0.hat)))
#xcol.vec <- rev(rainbow(max(round(Z0.hat))))
xcol.vec <- rev(rainbow(630)[1:max(round(Z0.hat))])
xleg.vals <- round( seq(1, max(Z0.hat)-1, length=(5)) / 1 ) * 1
xleg.cols <- xcol.vec[xleg.vals+1]
###################################################
### code chunk number 15: a11 (eval = FALSE)
###################################################
## for(tt in 1:nrow(Z0.hat)) {
## plot(0, 0, xlim=c(-124.1, -113.9), ylim=c(32.5, 42), type="n", main=ydate[tt])
## ## points(locs0[ , c(2,1)], cex=Z0.hat[tt,] / 30) ## uncomment to see sites
## this.zvec <- round(Z0.hat[tt,])
## this.zvec[ this.zvec < 1] <- 1
## this.color <- xcol.vec[ this.zvec ]
## points(locs0[ , c(2,1)], cex=1.14, col=this.color, pch=19 )
## #points(locs[ , c(2,1)], cex=Z[tt,] / 30, col="red")
## legend(-116, 40, legend=rev(xleg.vals), fill=FALSE, col=rev(xleg.cols),
## border=NA, bty="n", text.col=rev(xleg.cols))
## Sys.sleep(0.1)
## }
###################################################
### code chunk number 16: widals.Snw:511-523
###################################################
ydate <- xdate[10:tau]
tt <- 180
plot(0, 0, xlim=c(-124.1, -113.9), ylim=c(32.5, 42), type="n", main=ydate[tt],
xlab="", ylab="")
## points(locs0[ , c(2,1)], cex=Z0.hat[tt,] / 30) ## uncomment to see sites
this.zvec <- round(Z0.hat[tt,])
this.zvec[ this.zvec < 1] <- 1
this.color <- xcol.vec[ this.zvec ]
points(locs0[ , c(2,1)], cex=1.14, col=this.color, pch=19 )
#points(locs[ , c(2,1)], cex=Z[tt,] / 30, col="red")
legend(-116, 40, legend=rev(xleg.vals), fill=FALSE, col=rev(xleg.cols), border=NA,
bty="n", text.col=rev(xleg.cols))
|
94d1ea2b5b504c9fb2a393a3ff5bbbcaafc7bbe3
|
533b2cf6461e41d128530a76a529777a33a41bd8
|
/man/eda_locationDrift.Rd
|
faaa06c943428436e9984d20e0ba026bfeed4ab3
|
[
"MIT"
] |
permissive
|
minbad/quickEDA
|
dd0cce8c5959505332258a29104ee0cdf26b9166
|
d0cf5a8a4b515448c787b39b03a3aa4786a3b696
|
refs/heads/master
| 2021-08-23T21:52:59.145093
| 2017-12-06T18:25:10
| 2017-12-06T18:25:10
| 112,795,316
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 428
|
rd
|
eda_locationDrift.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eda.R
\name{eda_locationDrift}
\alias{eda_locationDrift}
\title{EDA Assumptions Testing - Drift in Location}
\usage{
eda_locationDrift(y)
}
\arguments{
\item{y}{Numeric vector.}
}
\description{
This function takes in a vector of numerical data and tests drift in location using
a simple regression model
}
\examples{
eda_locationDrift(rnorm(1000))
}
|
8da265dedda8d6a7a93ce81779dbd8a87211e426
|
bffd2afc5e5717528138b497b923c0ba6f65ef58
|
/man/ex09.65.Rd
|
1790d88b27d88a92d0e17164d0df03c1c1868147
|
[] |
no_license
|
dmbates/Devore6
|
850565e62b68e9c01aac8af39ff4275c28b4ce68
|
b29580f67971317b4c2a5e8852f8218ecf61d95a
|
refs/heads/master
| 2016-09-10T21:47:13.150798
| 2012-05-31T19:32:53
| 2012-05-31T19:32:53
| 4,512,058
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 622
|
rd
|
ex09.65.Rd
|
\name{ex09.65}
\alias{ex09.65}
\docType{data}
\title{data from exercise 9.65}
\description{
The \code{ex09.65} data frame has 2 rows and 4 columns.
}
\format{
This data frame contains the following columns:
\describe{
\item{Method}{
a factor with levels
\code{Fixed}
\code{Floating}
}
\item{n}{
a numeric vector
}
\item{mean}{
a numeric vector
}
\item{SD}{
a numeric vector
}
}
}
\source{
Devore, J. L. (2003) \emph{Probability and Statistics for Engineering and the Sciences (6th ed)}, Duxbury
}
\examples{
str(ex09.65)
}
\keyword{datasets}
|
0af5d1e26e6c23f73d08ae7dc827c2e1835b9ae7
|
604cae5509a1fa049f64d1fcad18a2b005bcb4c7
|
/Day 3 - visualization/Maps.R
|
57f9ddcb796f8e02b123a4c876f4903fea612dbf
|
[] |
no_license
|
ammarjabakji/R-training
|
797d2556f4fad86de0e17d44387c2375b1bb6b41
|
94df4d1e7e1298120b75dd3be15827a00d12f64f
|
refs/heads/master
| 2020-08-12T20:16:39.402010
| 2019-10-18T06:16:57
| 2019-10-18T06:16:57
| 214,836,534
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 818
|
r
|
Maps.R
|
library(tidyverse)
library(leaflet)
m <- leaflet() %>%
addTiles() %>% # Add default OpenStreetMap map tiles
addMarkers(lng=174.768, lat=-36.852, popup="The birthplace of R")
m # Print the map
orstationc <- read_csv("http://geog.uoregon.edu/bartlein/old_courses/geog414s05/data/orstationc.csv")
leaflet(orstationc) %>%
addTiles() %>%
addCircleMarkers() #Add graphics elements and layers to the map widget.
leaflet(orstationc) %>%
addTiles() %>%
addCircleMarkers(~tann)
leaflet(orstationc) %>%
addTiles() %>%
addCircleMarkers(~tjul)
pal <- colorQuantile("YlOrRd", NULL, n = 8)
leaflet(orstationc) %>%
addTiles() %>%
addCircleMarkers(color = ~pal(tann))
pal <- colorQuantile("YlOrRd", NULL, n = 8)
leaflet(orstationc) %>%
addTiles() %>%
addCircleMarkers(color = ~pal(tjul))
|
30f5ced78093c3f69fba4cc29b6b23e0b287a6f0
|
e8327d77350b80110fb20a5506b180155a108e7b
|
/ED_Workflow/2_SAS/SAS.ED2.R
|
9e3678834c6a42f74b5974e91a601adfbd59327a
|
[] |
no_license
|
MortonArb-ForestEcology/URF2018-Butkiewicz
|
c537fe28c2eeb886d324b9b8e565d100187fb9ff
|
d5f3f630045e24bd165bc2a35885a5a6e3d0c2c4
|
refs/heads/master
| 2021-06-23T12:27:08.987348
| 2019-06-20T17:49:56
| 2019-06-20T17:49:56
| 136,949,391
| 0
| 0
| null | 2018-07-12T18:37:05
| 2018-06-11T16:00:18
|
R
|
UTF-8
|
R
| false
| false
| 24,130
|
r
|
SAS.ED2.R
|
##' @name SAS.ED2
##' @title Use semi-analytical solution to accellerate model spinup
##' @author Christine Rollinson, modified from original by Jaclyn Hatala-Matthes (2/18/14)
##' 2014 Feb: Original ED SAS solution Script at PalEON modeling HIPS sites (Matthes)
##' 2015 Aug: Modifications for greater site flexibility & updated ED
##' 2016 Jan: Adaptation for regional-scale runs (single-cells run independently, but executed in batches)
##' 2018 Jul: Conversion to function, Christine Rollinson July 2018
##'@description This functions approximates landscape equilibrium steady state for vegetation and
##' soil pools using the successional trajectory of a single patch modeled with disturbance
##' off and the prescribed disturbance rates for runs (Xia et al. 2012 GMD 5:1259-1271).
##' @param dir.analy Location of ED2 analyis files; expects monthly and yearly output
##' @param dir.histo Location of ED2 history files (for vars not in analy); expects monthly
##' @param outdir Location to write SAS .css & .pss files
##' @param blckyr Number of years between patch ages
##' @param lat site latitude; used for file naming
##' @param lon site longitude; used for file naming
##' @param yrs.met Number of years cycled in model spinup part 1
##' @param treefall Value to be used for TREEFALL_DISTURBANCE_RATE in ED2IN for full runs (disturbance on)
##' @param sm_fire Value to be used for SM_FIRE if INCLUDE_FIRE=2; defaults to 0 (fire off)
##' @param fire_intensity Value to be used for FIRE_PARAMTER; defaults to 0 (fire off)
##' @param slxsand Soil percent sand; used to calculate expected fire return interval
##' @param slxclay Soil percent clay; used to calculate expected fire return interval
##' @param sufx ED2 out file suffix; used in constructing file names(default "g01.h5)
##' @param decomp_scheme Decomposition scheme specified in ED2IN
##' @param kh_active_depth
##' @param Lc Used to compute nitrogen immpobilzation factor; ED default is 0.049787 (soil_respiration.f90)
##' @param c2n_slow Carbon to Nitrogen ratio, slow pool; ED Default 10.0
##' @param c2n_structural Carbon to Nitrogen ratio, structural pool. ED default 150.0
##' @param r_stsc Decomp param
##' @param rh_decay_low Param used for ED-1/CENTURY decomp schemes; ED default = 0.24
##' @param rh_decay_high Param used for ED-1/CENTURY decomp schemes; ED default = 0.60
##' @param rh_low_temp Param used for ED-1/CENTURY decomp schemes; ED default = 291
##' @param rh_high_temp Param used for ED-1/CENTURY decomp schemes; ED default = 318.15
##' @param rh_decay_dry Param used for ED-1/CENTURY decomp schemes; ED default = 12.0
##' @param rh_decay_wet Param used for ED-1/CENTURY decomp schemes; ED default = 36.0
##' @param rh_dry_smoist Param used for ED-1/CENTURY decomp schemes; ED default = 0.48
##' @param rh_wet_smoist Param used for ED-1/CENTURY decomp schemes; ED default = 0.98
##' @param resp_opt_water Param used for decomp schemes 0 & 3, ED default = 0.8938
##' @param resp_water_below_opt Param used for decomp schemes 0 & 3, ED default = 5.0786
##' @param resp_water_above_opt Param used for decomp schemes 0 & 3, ED default = 4.5139
##' @param resp_temperature_increase Param used for decomp schemes 0 & 3, ED default = 0.0757
##' @param rh_lloyd_1 Param used for decomp schemes 1 & 4 (Lloyd & Taylor 1994); ED default = 308.56
##' @param rh_lloyd_2 Param used for decomp schemes 1 & 4 (Lloyd & Taylor 1994); ED default = 1/56.02
##' @param rh_lloyd_3 Param used for decomp schemes 1 & 4 (Lloyd & Taylor 1994); ED default = 227.15
##' @export
##'
SAS.ED2 <- function(dir.analy, dir.histo, outdir, prefix, lat, lon, blckyr, yrs.met=30,
treefall, sm_fire=0, fire_intensity=0, slxsand=0.33, slxclay=0.33,
sufx="g01.h5",
decomp_scheme=2,
kh_active_depth = -0.20,
decay_rate_fsc=11, decay_rate_stsc=4.5, decay_rate_ssc=0.2,
Lc=0.049787, c2n_slow=10.0, c2n_structural=150.0, r_stsc=0.3, # Constants from ED
rh_decay_low=0.24, rh_decay_high=0.60,
rh_low_temp=18.0+273.15, rh_high_temp=45.0+273.15,
rh_decay_dry=12.0, rh_decay_wet=36.0,
rh_dry_smoist=0.48, rh_wet_smoist=0.98,
resp_opt_water=0.8938, resp_water_below_opt=5.0786, resp_water_above_opt=4.5139,
resp_temperature_increase=0.0757,
rh_lloyd_1=308.56, rh_lloyd_2=1/56.02, rh_lloyd_3=227.15
) {
if(!decomp_scheme %in% 0:4) stop("Invalid decomp_scheme")
# create a directory for the initialization files
dir.create(outdir, recursive=T, showWarnings=F)
#---------------------------------------
# Setting up some specifics that vary by site (like soil depth)
#---------------------------------------
#Set directories
# dat.dir <- dir.analy
ann.files <- dir(dir.analy, "-Y-") #yearly files only
#Get time window
# Note: Need to make this more flexible to get the thing after "Y"
yrind <- which(strsplit(ann.files,"-")[[1]] == "Y")
yeara <- as.numeric(strsplit(ann.files,"-")[[1]][yrind+1]) #first year
yearz <- as.numeric(strsplit(ann.files,"-")[[length(ann.files)]][yrind+1]) #last full year
yrs <- seq(yeara+1, yearz, by=blckyr) # The years we're going to use as time steps for the demography
nsteps <- length(yrs) # The number of blocks = the number steps we'll have
# Need to get the layers being used for calculating temp & moist
# Note: In ED there's a pain in the butt way of doing this with the energy, but we're going to approximate
# slz <- c(-5.50, -4.50, -2.17, -1.50, -1.10, -0.80, -0.60, -0.45, -0.30, -0.20, -0.12, -0.06)
# dslz <- c(1.00, 2.33, 0.67, 0.40, 0.30, 0.20, 0.15, 0.15, 0.10, 0.08, 0.06, 0.06)
nc.temp <- ncdf4::nc_open(file.path(dir.analy, ann.files[1]))
slz <- ncdf4::ncvar_get(nc.temp, "SLZ")
ncdf4::nc_close(nc.temp)
dslz <- vector(length=length(slz))
dslz[length(dslz)] <- 0-slz[length(dslz)]
for(i in 1:(length(dslz)-1)){
dslz[i] <- slz[i+1] - slz[i]
}
nsoil=which(slz >= kh_active_depth-1e-3) # Maximum depth for avg. temperature and moisture; adding a fudge factor bc it's being weird
# nsoil=length(slz)
#---------------------------------------
#---------------------------------------
# First loop over analy files (faster than histo) to aggregate initial
# .css and .pss files for each site
#---------------------------------------
#create an emtpy storage for the patch info
pss.big <- matrix(nrow=length(yrs),ncol=13) # save every X yrs according to chunks specified above
colnames(pss.big) <- c("time","patch","trk","age","area","water","fsc","stsc","stsl",
"ssc","psc","msn","fsn")
#---------------------------------------
# Finding the mean soil temp & moisture
# NOTE: I've been plyaing around with finding the best temp & soil moisture to initialize things
# with; if using the means from the spin met cycle work best, insert them here
# This will also be necessary for helping update disturbance parameter
#---------------------------------------
slmsts <- calc.slmsts(slxsand, slxclay) # Saturated Water Capacity
slpots <- calc.slpots(slxsand, slxclay) # Saturated water potential
slbs <- calc.slbs(slxsand, slxclay) # Exponent
soilcp <- calc.soilcp(slmsts, slpots, slbs) # Dry water capacity
# Calculating Soil fire characteristics
soilfr=0
if(abs(sm_fire)>0){
if(sm_fire>0){
soilfr <- smfire.pos(slmsts, soilcp, smfire=sm_fire)
} else {
soilfr <- smfire.neg(slmsts, slpots, smfire=sm_fire, slbs)
}
}
month.begin = 1
month.end = 12
tempk.air <- tempk.soil <- moist.soil <- moist.soil.mx <- moist.soil.mn <- nfire <- vector()
for(y in yrs){
air.temp.tmp <- soil.temp.tmp <- soil.moist.tmp <- soil.mmax.tmp <- soil.mmin.tmp <- vector()
ind <- which(yrs == y)
for(m in month.begin:month.end){
#Make the file name.
year.now <-sprintf("%4.4i",y)
month.now <- sprintf("%2.2i",m)
day.now <- sprintf("%2.2i",0)
hour.now <- sprintf("%6.6i",0)
file.now <- paste(prefix,"-E-",year.now,"-",month.now,"-",day.now,"-"
,hour.now,"-",sufx,sep="")
# cat(" - Reading file :",file.now,"...","\n")
now <- ncdf4::nc_open(file.path(dir.analy,file.now))
air.temp.tmp [m] <- ncdf4::ncvar_get(now, "MMEAN_ATM_TEMP_PY")
soil.temp.tmp [m] <- sum(ncdf4::ncvar_get(now, "MMEAN_SOIL_TEMP_PY")[nsoil]*dslz[nsoil]/sum(dslz[nsoil]))
soil.moist.tmp[m] <- sum(ncdf4::ncvar_get(now, "MMEAN_SOIL_WATER_PY")[nsoil]*dslz[nsoil]/sum(dslz[nsoil]))
soil.mmax.tmp [m] <- max(ncdf4::ncvar_get(now, "MMEAN_SOIL_WATER_PY"))
soil.mmin.tmp [m] <- min(ncdf4::ncvar_get(now, "MMEAN_SOIL_WATER_PY"))
ncdf4::nc_close(now)
} # End month loop
# Finding yearly means
tempk.air [ind] <- mean(air.temp.tmp)
tempk.soil [ind] <- mean(soil.temp.tmp)
moist.soil [ind] <- mean(soil.moist.tmp)
moist.soil.mx[ind] <- max(soil.mmax.tmp)
moist.soil.mn[ind] <- min(soil.mmin.tmp)
nfire [ind] <- length(which(soil.moist.tmp<soilfr)) # Number of time fire should get triggered
}
soil_tempk <- mean(tempk.soil)
# rel_soil_moist <- mean(moist.soil)+.2
rel_soil_moist <- mean(moist.soil/slmsts) # Relativizing by max moisture capacity
pfire = sum(nfire)/(length(nfire)*12)
fire_return = ifelse(max(nfire)>0, length(nfire)/length(which(nfire>0)), 0)
cat(paste0("mean soil temp : ", round(soil_tempk, 2), "\n"))
cat(paste0("mean soil moist : ", round(rel_soil_moist, 3), "\n"))
cat(paste0("fire return interval (yrs) : ", fire_return), "\n")
#---------------------------------------
#---------------------------------------
# Calculate area distribution based on geometric decay based loosely on your disturbance rates
# Note: This one varies from Jackie's original in that it lets your oldest, undisturbed bin
# start a bit larger (everything leftover) to let it get cycled in naturally
#---------------------------------------
# ------
# Calculate the Rate of fire & total disturbance
# ------
fire_rate <- pfire * fire_intensity
# Total disturbance rate = treefall + fire
# -- treefall = % area/yr
disturb <- treefall + fire_rate
# ------
stand.age <- seq(yrs[1]-yeara,nrow(pss.big)*blckyr,by=blckyr)
area.dist <- vector(length=nrow(pss.big))
area.dist[1] <- sum(dgeom(0:(stand.age[2]-1), disturb))
for(i in 2:(length(area.dist)-1)){
area.dist[i] <- sum(dgeom((stand.age[i]):(stand.age[i+1]-1),disturb))
}
area.dist[length(area.dist)] <- 1 - sum(area.dist[1:(length(area.dist)-1)])
pss.big[,"area"] <- area.dist
#---------------------------------------
#---------------------------------------
# Extraction Loop Part 1: Cohorts!!
# This loop does the following:
# -- Extract cohort info from each age slice from *annual* *analy* files (these are annual means)
# -- Write cohort info to the .css file as a new patch for each age slice
# -- Dummy extractions of patch-level variables; all of the important variables here are place holders
#---------------------------------------
cat(" - Reading analy files ...","\n")
for (y in yrs){
now <- ncdf4::nc_open(file.path(dir.analy,ann.files[y-yeara+1]))
ind <- which(yrs == y)
#Grab variable to see how many cohorts there are
ipft <- ncdf4::ncvar_get(now,'PFT')
#---------------------------------------
# organize into .css variables (Cohorts)
# Note: all cohorts from a time slice are assigned to a single patch representing a stand of age X
#---------------------------------------
css.tmp <- matrix(nrow=length(ipft),ncol=10)
colnames(css.tmp) <- c("time", "patch", "cohort", "dbh", "hite", "pft", "n", "bdead", "balive", "Avgrg")
css.tmp[,"time" ] <- rep(yeara,length(ipft))
css.tmp[,"patch" ] <- rep(floor((y-yeara)/blckyr)+1,length(ipft))
css.tmp[,"cohort"] <- 1:length(ipft)
css.tmp[,"dbh" ] <- ncdf4::ncvar_get(now,'DBH')
css.tmp[,"hite" ] <- ncdf4::ncvar_get(now,'HITE')
css.tmp[,"pft" ] <- ipft
css.tmp[,"n" ] <- ncdf4::ncvar_get(now,'NPLANT')
css.tmp[,"bdead" ] <- ncdf4::ncvar_get(now,'BDEAD')
css.tmp[,"balive"] <- ncdf4::ncvar_get(now,'BALIVE')
css.tmp[,"Avgrg" ] <- rep(0,length(ipft))
#save big .css matrix
if(y==yrs[1]){
css.big <- css.tmp
} else{
css.big <- rbind(css.big,css.tmp)
}
#---------------------------------------
#---------------------------------------
# save .pss variables (Patches)
# NOTE: patch AREA needs to be adjusted to be equal to the probability of a stand of age x on the landscape
#---------------------------------------
pss.big[ind,"time"] <- 1800
pss.big[ind,"patch"] <- floor((y-yeara)/blckyr)+1
pss.big[ind,"trk"] <- 1
pss.big[ind,"age"] <- y-yeara
# Note: the following are just place holders that will be overwritten post-SAS
# pss.big[ind,6] <- ncdf4::ncvar_get(now,"AREA")
pss.big[ind,"water"] <- 0.5
pss.big[ind,"fsc"] <- ncdf4::ncvar_get(now,"FAST_SOIL_C")
pss.big[ind,"stsc"] <- ncdf4::ncvar_get(now,"STRUCTURAL_SOIL_C")
pss.big[ind,"stsl"] <- ncdf4::ncvar_get(now,"STRUCTURAL_SOIL_L")
pss.big[ind,"ssc"] <- ncdf4::ncvar_get(now,"SLOW_SOIL_C")
pss.big[ind,"psc"] <- 0
pss.big[ind,"msn"] <- ncdf4::ncvar_get(now,"MINERALIZED_SOIL_N")
pss.big[ind,"fsn"] <- ncdf4::ncvar_get(now,"FAST_SOIL_N")
ncdf4::nc_close(now)
}
#---------------------------------------
#---------------------------------------
# Extraction Loop Part 2: Patches!
# This loop does the following:
# -- Extract age slice (new patch) soil carbon conditions from *monthly* *histo* files
# -- Note: this is done because most of the necessary inputs for SAS are instantaneous values that
# are not currently tracked in analy files, let alone annual analy files; this could
# theoretically change in the future
# -- Monthly data is then aggregated to a yearly value: sum for carbon inputs; mean for temp/moist
# (if not calculated above)
#---------------------------------------
pss.big <- pss.big[complete.cases(pss.big),]
# some empty vectors for storage etc
fsc_in_y <- ssc_in_y <- ssl_in_y <- fsn_in_y <- pln_up_y <- vector()
fsc_in_m <- ssc_in_m <- ssl_in_m <- fsn_in_m <- pln_up_m <- vector()
# # NOTE: The following line should get removed if we roll with 20-year mean temp & moist
# soil_tempk_y <- soil_tempk_m <- swc_max_m <- swc_max_y <- swc_m <- swc_y <- vector()
# switch to the histo directory
# dat.dir <- file.path(in.base,sites[s],"/analy/")
mon.files <- dir(dir.histo, "-S-") # monthly files only
#Get time window
yeara <- as.numeric(strsplit(mon.files,"-")[[1]][yrind+1]) #first year
yearz <- as.numeric(strsplit(mon.files,"-")[[length(mon.files)-1]][yrind+1]) #last year
montha <- as.numeric(strsplit(mon.files,"-")[[1]][yrind+2]) #first month
monthz <- as.numeric(strsplit(mon.files,"-")[[length(mon.files)-1]][yrind+2]) #last month
cat(" - Processing History Files \n")
for (y in yrs){
dpm <- lubridate::days_in_month(1:12)
if(lubridate::leap_year(y)) dpm[2] <- dpm[2]+1
#calculate month start/end based on year
if (y == yrs[1]){
month.begin = montha
}else{
month.begin = 1
}
if (y == yrs[length(yrs)]){
month.end = monthz
}else{
month.end = 12
}
for(m in month.begin:month.end){
#Make the file name.
year.now <-sprintf("%4.4i",y)
month.now <- sprintf("%2.2i",m)
day.now <- sprintf("%2.2i",1)
hour.now <- sprintf("%6.6i",0)
# dat.dir <- paste(in.base,sites[s],"/histo/",sep="")
file.now <- paste(prefix,"-S-",year.now,"-",month.now,"-",day.now,"-"
,hour.now,"-",sufx,sep="")
# cat(" - Reading file :",file.now,"...","\n")
now <- ncdf4::nc_open(file.path(dir.histo,file.now))
# Note: we have to convert the daily value for 1 month by days per month to get a monthly estimate
fsc_in_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"FSC_IN")*dpm[m] #kg/(m2*day) --> kg/(m2*month)
ssc_in_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"SSC_IN")*dpm[m]
ssl_in_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"SSL_IN")*dpm[m]
fsn_in_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"FSN_IN")*dpm[m]
pln_up_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"TOTAL_PLANT_NITROGEN_UPTAKE")*dpm[m]
# ssc_in_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"SSC_IN")*dpm[m]
# # NOTE: the following lines shoudl get removed if using 20-year means
# soil_tempk_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"SOIL_TEMPK_PA")[nsoil] # Surface soil temp
# swc_max_m[m-month.begin+1] <- max(ncdf4::ncvar_get(now,"SOIL_WATER_PA")) # max soil moist to avoid digging through water capacity stuff
# swc_m[m-month.begin+1] <- ncdf4::ncvar_get(now,"SOIL_WATER_PA")[nsoil] #Surface soil moist
ncdf4::nc_close(now)
}
# Find which patch we're working in
ind <- (y-yeara)/blckyr + 1
# Sum monthly values to get a total estimated carbon input
fsc_in_y[ind] <- sum(fsc_in_m,na.rm=TRUE)
ssc_in_y[ind] <- sum(ssc_in_m,na.rm=TRUE)
ssl_in_y[ind] <- sum(ssl_in_m,na.rm=TRUE)
fsn_in_y[ind] <- sum(fsn_in_m,na.rm=TRUE)
pln_up_y[ind] <- sum(pln_up_m,na.rm=TRUE)
# # Soil temp & moisture here should get deleted if using the 20-year means
# soil_tempk_y[ind] <- mean(soil_tempk_m,na.rm=TRUE)
# swc_y[ind] <- mean(swc_m,na.rm=TRUE)/max(swc_max_m,na.rm=TRUE)
}
#---------------------------------------
#---------------------------------------
# Calculate steady-state soil pools!
#
# These are the equations from soil_respiration.f90 -- if this module has changed, these need
# Note: We ignore the unit conversions here because we're now we're working with the yearly
# sum so that we end up with straight kgC/m2
# fast_C_loss <- kgCday_2_umols * A_decomp * decay_rate_fsc * fast_soil_C
# struc_C_loss <- kgCday_2_umols * A_decomp * Lc * decay_rate_stsc * struct_soil_C * f_decomp
# slow_C_loss <- kcCday_2_umols * A_decomp * decay_rate_ssc * slow_soil_C
#---------------------------------------
# -----------------------
# Calculate the annual carbon loss if things are stable
# -----------
fsc_loss <- decay_rate_fsc
ssc_loss <- decay_rate_ssc
ssl_loss <- decay_rate_stsc
# -----------
# *************************************
# Calculate A_decomp according to your DECOMP_SCPEME
# A_decomp <- temperature_limitation * water_limitation # aka het_resp_weight
# *************************************
# ========================
# Temperature Limitation
# ========================
# soil_tempk <- sum(soil_tempo_y*area.dist)
if(decomp_scheme %in% c(0, 3)){
temperature_limitation = min(1,exp(resp_temperature_increase * (soil_tempk-318.15)))
} else if(decomp_scheme %in% c(1,4)){
lnexplloyd = rh_lloyd_1 * ( rh_lloyd_2 - 1. / (soil_tempk - rh_lloyd_3))
lnexplloyd = max(-38.,min(38,lnexplloyd))
temperature_limitation = min( 1.0, resp_temperature_increase * exp(lnexplloyd) )
} else if(decomp_scheme==2) {
# Low Temp Limitation
lnexplow <- rh_decay_low * (rh_low_temp - soil_tempk)
lnexplow <- max(-38, min(38, lnexplow))
tlow_fun <- 1 + exp(lnexplow)
# High Temp Limitation
lnexphigh <- rh_decay_high*(soil_tempk - rh_high_temp)
lnexphigh <- max(-38, min(38, lnexphigh))
thigh_fun <- 1 + exp(lnexphigh)
temperature_limitation <- 1/(tlow_fun*thigh_fun)
}
# ========================
# ========================
# Moisture Limitation
# ========================
# rel_soil_moist <- sum(swc_y*area.dist)
if(decomp_scheme %in% c(0,1)){
if (rel_soil_moist <= resp_opt_water){
water_limitation = exp( (rel_soil_moist - resp_opt_water) * resp_water_below_opt)
} else {
water_limitation = exp( (resp_opt_water - rel_soil_moist) * resp_water_above_opt)
}
} else if(decomp_scheme==2){
# Dry soil Limitation
lnexpdry <- rh_decay_dry * (rh_dry_smoist - rel_soil_moist)
lnexpdry <- max(-38, min(38, lnexpdry))
smdry_fun <- 1+exp(lnexpdry)
# Wet Soil limitation
lnexpwet <- rh_decay_wet * (rel_soil_moist - rh_wet_smoist)
lnexpwet <- max(-38, min(38, lnexpwet))
smwet_fun <- 1+exp(lnexpwet)
water_limitation <- 1/(smdry_fun * smwet_fun)
} else {
water_limitation = rel_soil_moist * 4.0893 - rel_soil_moist**2 * 3.1681 - 0.3195897
}
# ========================
A_decomp <- temperature_limitation * water_limitation # aka het_resp_weight
# *************************************
# *************************************
# Calculate the steady-state pools
# NOTE: Current implementation weights carbon input by patch size rather than using the
# carbon balance from the oldest state (as was the first implementation)
# *************************************
# -------------------
# Do the carbon and fast nitrogen pools
# -------------------
fsc_ss <- fsc_in_y[length(fsc_in_y)]/(fsc_loss * A_decomp)
ssl_ss <- ssl_in_y[length(ssl_in_y)]/(ssl_loss * A_decomp * Lc) # Structural soil C
ssc_ss <- ((ssl_loss * A_decomp * Lc * ssl_ss)*(1 - r_stsc))/(ssc_loss * A_decomp )
fsn_ss <- fsn_in_y[length(fsn_in_y)]/(fsc_loss * A_decomp)
# -------------------
# -------------------
# Do the mineralized nitrogen calculation
# -------------------
#ED2: csite%mineralized_N_loss = csite%total_plant_nitrogen_uptake(ipa)
# + csite%today_Af_decomp(ipa) * Lc * K1 * csite%structural_soil_C(ipa)
# * ( (1.0 - r_stsc) / c2n_slow - 1.0 / c2n_structural)
msn_loss <- pln_up_y[length(pln_up_y)] +
A_decomp*Lc*ssl_loss*ssl_in_y[length(ssl_in_y)]*
((1.0-r_stsc)/c2n_slow - 1.0/c2n_structural)
#fast_N_loss + slow_C_loss/c2n_slow
msn_med <- fsc_loss*A_decomp*fsn_in_y[length(fsn_in_y)]+ (ssc_loss * A_decomp)/c2n_slow
msn_ss <- msn_med/msn_loss
# -------------------
# *************************************
# *************************************
# Replace dummy values in patch matrix with the steady state calculations
# *************************************
# Figure out what steady state value index we shoudl use
# Note: In the current implementaiton this should be 1 because we did the weighted averaging up front,
# but if something went wrong and dimensions are off, use this to pick the last (etc)
p.use <- 1
# write the values to file
pss.big[,"patch"] <- 1:nrow(pss.big)
pss.big[,"area"] <- area.dist
pss.big[,"fsc"] <- rep(fsc_ss[p.use],nrow(pss.big)) # fsc
pss.big[,"stsc"] <- rep(ssl_ss[p.use],nrow(pss.big)) # stsc
pss.big[,"stsl"] <- rep(ssl_ss[p.use],nrow(pss.big)) # stsl (not used)
pss.big[,"ssc"] <- rep(ssc_ss[p.use],nrow(pss.big)) # ssc
pss.big[,"msn"] <- rep(msn_ss[p.use],nrow(pss.big)) # msn
pss.big[,"fsn"] <- rep(fsn_ss[p.use],nrow(pss.big)) # fsn
# *************************************
#---------------------------------------
#---------------------------------------
# Write everything to file!!
#---------------------------------------
file.prefix=paste0(prefix, "-lat", lat, "lon", lon)
write.table(css.big,file=file.path(outdir,paste0(file.prefix,".css")),row.names=FALSE,append=FALSE,
col.names=TRUE,quote=FALSE)
write.table(pss.big,file=file.path(outdir,paste0(file.prefix,".pss")),row.names=FALSE,append=FALSE,
col.names=TRUE,quote=FALSE)
#---------------------------------------
}
|
7e6c563587d3c54119ed1618d5c812e2d13d6a2c
|
7f86f568dab6279e6f2d987c77a023bed055a11c
|
/man/simPPe.Rd
|
2e439325add5fd4f3dba4be7a7284c97db30c970
|
[] |
no_license
|
cran/AHMbook
|
b6acd2ed71319be2f0e3374d9d8960a8b04e21bf
|
d8f8ad8bef93120f187bef494b9ac1ad8200c530
|
refs/heads/master
| 2023-08-31T21:13:00.618018
| 2023-08-23T21:10:03
| 2023-08-23T22:30:32
| 88,879,777
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,099
|
rd
|
simPPe.Rd
|
\name{simPPe}
\alias{simPPe}
\encoding{UTF-8}
\title{
Simulate a spatial point pattern in a heterogeneous landscape
}
\description{
The function simulates a spatial point pattern in a heterogeneous landscape simulated on a square landscape. The study area ('core') is simulated inside the larger landscape that includes a buffer. The size of the core is defined by the lscape.size minus twice the buffer.
There is one habitat covariate X that affects the intensity of the points. X is spatially structured with negative-exponential spatial autocorrelation; the parameters of the field can be chosen to create large 'islands' of similar values or no 'islands' at all, in which case the field is spatially unstructured.
The intensity of STATIC points (e.g. animal activity centers) may be inhomogeneous and affected by the coefficient beta, which is the log-linear effect of X.
To recreate the data sets used in the book with R 3.6.0 or later, include \code{sample.kind="Rounding"} in the call to \code{set.seed}. This should only be used for reproduction of old results.
Previous versions used \pkg{RandomFields}, but that is not currently available on CRAN. \pkg{fields} is now used, but it cannot deal with large values of \code{lscape.size} and \code{theta.X}. If you have \pkg{RandomFields} installed (perhaps by getting it from the CRAN archive), you can load a version of \code{simPPe} that supports it with \code{source(system.file("RandomFieldsSupport", "simPPe.R", package="AHMbook"))}.
}
\usage{
simPPe(lscape.size = 150, buffer.width = 25, variance.X = 1, theta.X = 10,
M = 250, beta = 1, quads.along.side = 6, show.plots = TRUE)
}
\arguments{
\item{lscape.size}{
size (width = height) of the square landscape, including core and buffer.
}
\item{buffer.width}{
width of buffer around core study area.
}
\item{variance.X}{
variance of Gaussian random field (covariate X).
}
\item{theta.X}{
scale parameter of correlation in field (must be >0).
}
\item{M}{
expected number of activity centers in core area.
}
\item{beta}{
coefficient of the habitat covariate.
}
\item{quads.along.side}{
number of quadrats along the side of the core area; the total number of quadrats will be quads.along.side^2, thus indirectly defining the quadrat area.
}
\item{show.plots}{
if TRUE, summary plots are displayed.
}
}
\value{
A list with the values of the input arguments and the following additional elements:
\item{core }{range of x and y coordinates in the 'core'}
\item{M2 }{number of ACs in the total landscape, including the buffer}
\item{grid }{coordinates of the center of each pixel}
\item{pixel.size }{length of side of each pixel}
\item{size.core }{the width=height of the core area}
\item{prop.core }{the proportion of the landscape inside the core}
\item{X }{matrix of covariate values for each pixel}
\item{probs }{matrix of probabilities of an AC being inside each pixel (sums to 1)}
\item{pixel.id }{the ID of the pixel for each AC}
\item{u }{2-column matrix, coordinate of each AC}
\item{nsite }{number of quadrats}
\item{quad.size }{width = height of each quadrat}
\item{breaks }{boundaries of the quadrats}
\item{mid.pt }{mid-points of the quadrats}
\item{lambda_pp }{intensity of point pattern (ACs per unit area)}
\item{Nac }{site-specific abundance of ACs}
\item{zac }{site-specific occurrence (0/1) of ACs}
\item{E_N }{average realized abundance per quadrat}
\item{E_z }{average realized occupancy per quadrat}
}
\references{
Kéry, M. & Royle, J.A. (2021) \emph{Applied Hierarchical Modeling in Ecology} AHM2 - 10.
}
\author{
Marc Kéry & Andy Royle.
}
\examples{
# Nice plot (produces the really nice Fig. 10-2 in the book)
# RNGkind(sample.kind = "Rounding") # run this for R >= 3.6.0
set.seed(117, kind="Mersenne-Twister")
# Fails if RandomFields is not available
#try(str(dat <- simPPe(lscape.size = 200, buffer.width = 25, variance.X = 1,
# theta.X = 70, M = 200, beta = 1, quads.along.side = 6)))
str(dat <- simPPe(lscape.size = 200, buffer.width = 20, variance.X = 1,
theta.X = 5, M = 250, beta = 1, quads.along.side = 6))
}
|
ea0aecc1d1c6a5633b0b1db328bca82e68b8df13
|
99144fe0beb697c124e5271a1d395ab6477d405a
|
/man/footnote.decorated.Rd
|
adf2da8875dc450d40f66136dbc6293865ebed15
|
[] |
no_license
|
cran/yamlet
|
233e29fc38d75205d4cc04db5a81af49dc05a5d5
|
3f494a19ab2e1cdb426606af40304309c78603ca
|
refs/heads/master
| 2023-09-04T00:52:18.417901
| 2023-08-24T05:00:02
| 2023-08-24T06:31:30
| 236,960,454
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,124
|
rd
|
footnote.decorated.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xtable.R
\name{footnote.decorated}
\alias{footnote.decorated}
\title{Footnote Decorated}
\usage{
\method{footnote}{decorated}(x, ..., equal = ":", collapse = "; ")
}
\arguments{
\item{x}{decorated}
\item{...}{passed to \code{\link{append_units}}}
\item{equal}{character: a symbol suggesting equality between a name and its note}
\item{collapse}{used to \code{\link{paste}} column-wise footnotes}
}
\value{
character
}
\description{
Footnotes a decorated data.frame.
Generates a text string that defines
column names using label and unit attributes.
}
\examples{
library(magrittr)
set.seed(0)
x <- data.frame(
auc = rnorm(100, mean = 2400, sd = 200),
bmi = rnorm(100, mean = 20, sd = 5),
gen = 0:1
)
x \%<>\% decorate('auc: [AUC_0-24, ng*h/mL]')
x \%<>\% decorate('bmi: [Body Mass Index, kg/m^2]')
x \%<>\% decorate('gen: [Gender, [Male: 1, Female: 0]]')
x \%<>\% resolve
footnote(x)
footnote(x, auc)
}
\seealso{
Other footnote:
\code{\link{footnote}()}
}
\concept{footnote}
\keyword{internal}
|
bcdd51f25512ebdd93d817fd7cbe0f389b10a937
|
30554897707057f2739e63d0abdfa6dd9f105401
|
/R-package/mtrToolkit/man/KRCRT.Rd
|
def3efcf06fe5aa42e2757d228d7b31318f0bcbe
|
[] |
no_license
|
hugoabonizio/mtr-toolkit
|
14471fefda51e56485619b2695a944bff723483f
|
480b8aeaddbd4e1b7f988b65e12367c475971f71
|
refs/heads/master
| 2020-03-26T23:54:17.918999
| 2018-06-18T13:28:01
| 2018-06-18T13:28:01
| 145,576,635
| 0
| 0
| null | 2018-08-21T14:37:17
| 2018-08-21T14:37:16
| null |
UTF-8
|
R
| false
| true
| 869
|
rd
|
KRCRT.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/KRCRT.R
\name{KRCRT}
\alias{KRCRT}
\title{Creates a k-Random Clusters Regression Trees (k-RCRTRF) model.}
\usage{
KRCRT(X, Y, k = 2, max.depth = Inf, var.improvp = 0.01, min.size = NULL)
}
\arguments{
\item{X, }{Y The input features and target variables respectively}
\item{k}{The number of random clusters to be generated at each split (Default = 3)}
\item{max.depth}{Maximum depth for generated trees (Default = Inf, split will are made while it is possible)}
\item{var.improvp}{Minimum variance decrease percentual when comparing a child to its parent needed to continue splitting (Default = 0.01)}
\item{min.size}{Minimum size of generated clusteres (Default = 5, as in CLUS)}
}
\value{
A k-RCRT model
}
\description{
Creates a k-Random Clusters Regression Trees (k-RCRTRF) model.
}
|
810d14277e18b3c59b4814d3a9c87dbaceb15efb
|
a35a018fcc041d18e440b9b77a21bbd35f2dda01
|
/tests/testthat/test-colours.R
|
29e549e1fd405b83776c23fc07d384bddacd295b
|
[] |
no_license
|
Hey-Lees/ftplottools
|
366f91bd8967686f6e5cdd9fcf7d15f50fdf2adb
|
31d2c1e178a30b468f531964015874ddc0d3b83b
|
refs/heads/master
| 2023-08-26T09:30:47.874860
| 2021-11-04T09:20:12
| 2021-11-04T09:20:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 869
|
r
|
test-colours.R
|
context("test-colours")
test_that("ft_colour returns expected values", {
expect_equal(ft_colors("paper"), c("#FFF1E5"))
expect_equal(ft_colors("claret"), c("#990F3D"))
expect_equal(ft_colors("black-30"), c("#B3A9A0"))
})
test_that("ft_colors returns the correct length",{
expect_equal(length(ft_colors("paper","oxford")), 2)
expect_equal(length(ft_colors("black")), 1)
})
test_that("ft_colors gives warning if color is not found",{
expect_warning(ft_colors("blah"))
expect_warning(ft_colors("sky", "not here"))
})
test_that("ft_pal returns a function",{
expect_is(ft_pal("main"), "function")
})
test_that("ft_pal errors on unknown palette",{
expect_error(ft_pal("does not exist"), "Palette not found")
})
test_that("palettes are correct length",{
expect_equal(length(ft_pal("main")(10)), 10)
expect_equal(length(ft_pal("black")(4)), 4)
})
|
e7a98db1bfdcf0b3fb31ef480207eaaaec910041
|
f4f54eb0a4dc5e6b70f46d72a25793f4ae42d339
|
/man/bra.Rd
|
1585bf44e90b8ad862890c3f3c1f846aa5479f5e
|
[] |
no_license
|
krv/blockra
|
b0275ad14f9eca0598ba83b237730ea6f2447845
|
ec5c922537191fcd276848193b18d0971d7dfa1e
|
refs/heads/master
| 2021-01-10T07:58:10.276472
| 2015-07-30T09:55:30
| 2015-07-30T09:55:30
| 36,675,669
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,035
|
rd
|
bra.Rd
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/bra.r
\name{bra}
\alias{bra}
\title{Block rearrangement algorithm}
\usage{
bra(X, epsilon = 0.1, shuffle = TRUE, fix.first = TRUE, obj = var,
seed = 1)
}
\arguments{
\item{X}{numeric array or matrix}
\item{epsilon}{target variance of row sums is epsilon multiplied by the mean of the matrix variances}
\item{shuffle}{randomly permute each column of the matrix before rearrangement}
\item{fix.first}{don't change the order of the first column}
\item{obj}{objective function that is minimized, default is variance}
\item{seed}{random seed}
}
\value{
numeric matrix with a minimal row sum variance
}
\description{
Function which performs the block ra of Bernard and McLeish
}
\author{
Kris Boudt, \email{kris.boudt@vub.ac.be}
Steven Vanduffel, \email{steven.vanduffel@vub.ac.be}
Kristof Verbeken, \email{kristof.verbeken@vub.ac.be}
}
\references{
\url{LINK TO RA PAPER}
}
\seealso{
The \code{\link{ra}} for the rearrangement algorithm
}
|
dc5bf19fe8ffc00a4826f5a4974ae4edaf0db1da
|
a37a3e54565806ee4c9d1f925ce87cd06de5f254
|
/analysis/models/model_cox.R
|
bc5231e94867a17dc3f9e54ea15e7880ebb2ada2
|
[
"MIT"
] |
permissive
|
opensafely/comparative-ve-research
|
cdec66752a31294016a9d9b857bea359fbc8abfd
|
cf3118545d18a5777dc43d153a6261bd68de56bd
|
refs/heads/main
| 2023-08-23T10:47:30.506829
| 2022-05-06T11:36:00
| 2022-05-06T11:36:00
| 367,404,609
| 1
| 1
|
MIT
| 2022-05-06T11:36:01
| 2021-05-14T15:27:50
|
HTML
|
UTF-8
|
R
| false
| false
| 10,226
|
r
|
model_cox.R
|
# # # # # # # # # # # # # # # # # # # # #
# This script:
# imports processed data
# fits some Cox models with time-varying effects
#
# The script must be accompanied by three arguments,
# `outcome` - the dependent variable in the regression model
# `timescale` - either "timesincevax" or "calendar"
# `censor_seconddose` - second at second dose (1) or not (0)
# # # # # # # # # # # # # # # # # # # # #
# Preliminaries ----
# import command-line arguments ----
args <- commandArgs(trailingOnly=TRUE)
if(length(args)==0){
# use for interactive testing
removeobs <- FALSE
outcome <- "admitted"
timescale <- "timesincevax"
censor_seconddose <- as.integer("1")
} else {
removeobs <- TRUE
outcome <- args[[1]]
timescale <- args[[2]]
censor_seconddose <- as.integer(args[[3]])
}
## Import libraries ----
library('tidyverse')
library('here')
library('glue')
library('survival')
library('splines')
## Import custom user functions from lib
source(here("analysis", "lib", "utility_functions.R"))
source(here("analysis", "lib", "redaction_functions.R"))
source(here("analysis", "lib", "survival_functions.R"))
# create output directories ----
fs::dir_create(here("output", "models", outcome, timescale, censor_seconddose))
## create special log file ----
cat(glue("## script info for {outcome} ##"), " \n", file = here("output", "models", outcome, timescale, censor_seconddose, glue("modelcox_log.txt")), append = FALSE)
## function to pass additional log text
logoutput <- function(...){
cat(..., file = here("output", "models", outcome, timescale, censor_seconddose, glue("modelcox_log.txt")), sep = "\n ", append = TRUE)
cat("\n", file = here("output", "models", outcome, timescale, censor_seconddose, glue("modelcox_log.txt")), sep = "\n ", append = TRUE)
}
## import metadata ----
metadata_outcomes <- read_rds(here("output", "data", "metadata_outcomes.rds"))
outcome_var <- metadata_outcomes$outcome_var[metadata_outcomes$outcome==outcome]
var_labels <- read_rds(here("output", "data", "metadata_labels.rds"))
list_formula <- read_rds(here::here("output", "data", "metadata_formulas.rds"))
list2env(list_formula, globalenv())
if(censor_seconddose==1){
postvaxcuts<-postvaxcuts12
lastfupday<-lastfupday12
} else{
postvaxcuts <- postvaxcuts20
if(outcome=="postest") postvaxcuts <- postvaxcuts20_postest
lastfupday <- lastfupday20
}
# Import data ----
data_cohort <- read_rds(here("output", "data", "data_cohort.rds"))
data_tte <- data_cohort %>%
mutate(
censor_date = pmin(
vax1_date - 1 + lastfupday,
dereg_date,
death_date,
if_else(rep(censor_seconddose, n())==1, vax2_date-1, as.Date(Inf)),
end_date,
na.rm=TRUE
),
outcome_date = .[[glue("{outcome_var}")]],
# assume vaccination occurs at the start of the day, and all other events occur at the end of the day.
tte_censor = tte(vax1_date-1, censor_date, censor_date, na.censor=TRUE),
ind_censor = censor_indicator(censor_date, censor_date),
tte_outcome = tte(vax1_date-1, outcome_date, censor_date, na.censor=TRUE),
ind_outcome = censor_indicator(outcome_date, censor_date),
tte_stop = pmin(tte_censor, tte_outcome, na.rm=TRUE),
)
data_cox0 <- data_tte %>%
mutate( # this step converts logical to integer so that model coefficients print nicely in gtsummary methods
across(where(is.logical), ~.x*1L)
) %>%
mutate(
week_region = paste0(vax1_week, "__", region),
vax1_az = (vax1_type=="az")*1
)
stopifnot("there is some unvaccinated person-time" = !any(is.na(data_cox0$vax1_type)))
### print dataset size ----
logoutput(
glue("data_cox0 data size = ", nrow(data_cox0)),
glue("data_cox0 memory usage = ", format(object.size(data_cox0), units="GB", standard="SI", digits=3L))
)
# one row per patient per post-vaccination week
postvax_time <- data_cox0 %>%
select(patient_id) %>%
uncount(weights = length(postvaxcuts), .id="id_postvax") %>%
mutate(
fup_day = postvaxcuts[id_postvax],
timesincevax_pw = timesince_cut(fup_day+1, postvaxcuts)
) %>%
droplevels() %>%
select(patient_id, fup_day, timesincevax_pw)
# create dataset that splits follow-up time by
# time since vaccination (using postvaxcuts cutoffs)
data_cox <- tmerge(
data1 = data_cox0 %>% select(-starts_with("ind_"), -ends_with("_date")),
data2 = data_cox0,
id = patient_id,
tstart = 0L,
tstop = pmin(tte_censor, tte_outcome, na.rm=TRUE),
ind_outcome = event(tte_outcome)
) %>%
tmerge( # create treatment timescale variables
data1 = .,
data2 = postvax_time,
id = patient_id,
timesincevax_pw = tdc(fup_day, timesincevax_pw)
)
### print dataset size and save ----
logoutput(
glue("data_cox data size = ", nrow(data_cox)),
glue("data_cox memory usage = ", format(object.size(data_cox), units="GB", standard="SI", digits=3L))
)
write_rds(data_cox, here("output", "models", outcome, timescale, censor_seconddose, "modelcox_data.rds"), compress="gz")
## if using calendar timescale ----
# - az versus pfizer is examined as an interaction term with time since vaccination, which is a time-dependent covariate
# - delayed entry at vaccination date
# - split time into az week 1, az week 2, .... pfizer week1, pfizer week 2, ... using standard interaction term
# - no need to adjust for calender time
if(timescale=="calendar"){
# convert to calendar timescale
data_cox <- data_cox %>%
mutate(
tstart= tstart + vax1_day - 1,
tstop= tstop + vax1_day - 1,
)
formula_vaxonly <- Surv(tstart, tstop, ind_outcome) ~ vax1_az*timesincevax_pw
formula_spacetime <- . ~ . + strata(region)
}
## if using time since vaccination timescale ----
# - az versus pfizer is examined as a time-dependent effect
# - start date is vaccination date
# - post-vax follow-up is already overlapping, so can use az/pfizer : weekly strata
# - need to adjust for calendar time
if(timescale=="timesincevax"){
# # one row per patient per follow-up calendar day
# calendar_time <- data_cox %>%
# select(patient_id, vax1_day, tte_stop) %>%
# mutate(
# calendar_day = map2(vax1_day, tte_stop, ~.x:(.y+.x))
# ) %>%
# unnest(c(calendar_day)) %>%
# mutate(
# #calendar_week = paste0("week ", str_pad(floor(calendar_day/7), 2, pad = "0")),
# calendar_week = floor(calendar_day/7),
# treatment_day = calendar_day - vax1_day,
# treatment_week = floor(treatment_day/7)
# )
#
# # one row per patient per follow-up calendar week
# calendar_week <- calendar_time %>%
# group_by(patient_id, calendar_week) %>%
# filter(first(calendar_day)==calendar_day) %>%
# ungroup()
#
# # one row per patient per follow-up post-vax week
# treatment_week <- calendar_time %>%
# group_by(patient_id, treatment_week) %>%
# filter(first(treatment_day)==treatment_day) %>%
# ungroup()
#
# data_cox <- data_cox %>%
# tmerge(
# data1 = .,
# data2 = treatment_week,
# id = patient_id,
# calendar_day = tdc(treatment_day, calendar_day)
# )
# as per https://cran.r-project.org/web/packages/survival/vignettes/timedep.pdf
# only need interaction term (: but not *) because follow time is stratified by timesincevax_pw, so there's no baseline
formula_vaxonly <- Surv(tstart, tstop, ind_outcome) ~ vax1_az:strata(timesincevax_pw)
formula_spacetime <- . ~ . + strata(region) * ns(vax1_day, 3)
}
### model 0 - vaccination + timescale only, no adjustment variables
### model 1 - minimally adjusted vaccination effect model, stratification by region only
### model 2 - minimally adjusted vaccination effect model, baseline demographics only
### model 3 - fully adjusted vaccination effect model, baseline demographics + clinical characteristics
model_names = c(
"Unadjusted" = "0",
"Adjusting for time" = "1",
"Adjusting for time + demographics" = "2",
"Adjusting for time + demographics + clinical" = "3"
)
formula0_pw <- formula_vaxonly
formula1_pw <- formula_vaxonly %>% update(formula_spacetime)
formula2_pw <- formula_vaxonly %>% update(formula_spacetime) %>% update(formula_demog)
formula3_pw <- formula_vaxonly %>% update(formula_spacetime) %>% update(formula_demog) %>% update(formula_comorbs)
opt_control <- coxph.control(iter.max = 30)
cox_model <- function(number, formula_cox){
coxmod <- coxph(
formula = formula_cox,
data = data_cox,
robust = TRUE,
id = patient_id,
na.action = "na.fail",
control = opt_control
)
print(warnings())
logoutput(
glue("model{number} data size = ", coxmod$n),
glue("model{number} memory usage = ", format(object.size(coxmod), units="GB", standard="SI", digits=3L)),
glue("convergence status: ", coxmod$info[["convergence"]])
)
tidy <-
broom.helpers::tidy_plus_plus(
coxmod,
exponentiate = FALSE
) %>%
add_column(
model = number,
.before=1
)
glance <-
broom::glance(coxmod) %>%
add_column(
model = number,
convergence = coxmod$info[["convergence"]],
ram = format(object.size(coxmod), units="GB", standard="SI", digits=3L),
.before = 1
)
coxmod$data <- NULL
write_rds(coxmod, here("output", "models", outcome, timescale, censor_seconddose, glue("modelcox_model{number}.rds")), compress="gz")
lst(glance, tidy)
}
summary0 <- cox_model(0, formula0_pw)
summary1 <- cox_model(1, formula1_pw)
summary2 <- cox_model(2, formula2_pw)
summary3 <- cox_model(3, formula3_pw)
# combine results
model_glance <-
bind_rows(summary0$glance, summary1$glance, summary2$glance, summary3$glance) %>%
mutate(
model_name = fct_recode(as.character(model), !!!model_names),
outcome = outcome
)
write_csv(model_glance, here::here("output", "models", outcome, timescale, censor_seconddose, glue("modelcox_glance.csv")))
model_tidy <- bind_rows(summary0$tidy, summary1$tidy, summary2$tidy, summary3$tidy) %>%
mutate(
model_name = fct_recode(as.character(model), !!!model_names),
outcome = outcome
)
write_csv(model_tidy, here::here("output", "models", outcome, timescale, censor_seconddose, glue("modelcox_tidy.csv")))
write_rds(model_tidy, here::here("output", "models", outcome, timescale, censor_seconddose, glue("modelcox_tidy.rds")))
|
fe3e21afd229e278dd6430ec02ad0794580f192e
|
c207e66e9c50316a22a3df5661912d83ab6c1fd0
|
/R/tsml.cara.rct.R
|
363ac22858934847ef60048132bd2c105b388608
|
[] |
no_license
|
achambaz/tsml.cara.rct
|
ab8587af760a6e2f8346c7bdbaa1b2c30cdd9f87
|
2b2aa282d4a11c601b37cacb368b67d03f6e8fc9
|
refs/heads/master
| 2021-01-14T12:15:07.323927
| 2016-09-29T18:51:00
| 2016-09-29T18:51:00
| 68,332,455
| 2
| 2
| null | 2016-09-22T23:21:08
| 2016-09-15T21:23:44
|
R
|
UTF-8
|
R
| false
| false
| 22,141
|
r
|
tsml.cara.rct.R
|
setMethodS3("update", "TSMLCARA", function(#Updates a TSMLCARA Object
### Updates a TSMLCARA object.
this,
### A \code{TSMLCARA} object, as created by \code{TSMLCARA}.
flavor=c("parametric", "lasso"),
### A \code{character} indicating the 'flavor' of the procedure.
...,
### Additional parameters.
verbose=FALSE
### A \code{logical} or an \code{integer} indicating the level of verbosity
### (defaults to 'FALSE').
) {
##alias<< update
##references<< Chambaz, van der Laan, Zheng, Chapter 16, Modern Adaptive Randomized Clinical Trials: Statistical, Operational, and Regulatory Aspects, by A. Sverdlov (CRC Press, 2015).
##seealso<<tsml.cara.rct, targetPsi
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## Validate arguments
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## Arguments 'flavor'
if (missing(flavor)) {
flavor <- getFlavor(this)
}
## Argument 'verbose'
verbose <- Arguments$getVerbose(verbose)
verbose <- less(verbose, 0)
## retrieving 'what'
what <- getWhat(this)
## retrieving 'obs', preparing 'A'
obs <- getObs(this)
A <- obs[, "A"]
## retrieving 'Qmin'
Qmin <- getQmin(this)
## retrieving 'tm.ref'
tm.ref <- getTm.ref(this)
## retrieving 'learnQ'
learnQ <- getLearnQ(this)
if (what=="ATE") {
## retrieving 'tm.model' and 'tm.control'
tm.model <- getTm.model(this)
tm.control <- getTm.control(this)
## retrieving 'targetLink'
targetLink <- getTargetLink(this)
}
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## learning
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && ruler(verbose)
verbose && enter(verbose, "UPDATING STEP")
## estimating 'Q'
verbose && enter(verbose, "Estimating Q")
W <- extractW(obs)
GA <- blik(A, obs[, "G"])
weights <- blik(A, tm.ref(W))
weights <- weights/GA
Q <- estimateQ(obs, learnQ=learnQ, weights=weights, Qmin=Qmin,
flavor=flavor, ..., verbose=verbose)
Qtab <- matrix(NA, nrow=nrow(obs), ncol=3)
colnames(Qtab) <- c("A", "A=0", "A=1")
Qtab[, "A=1"] <- Q(1, W)
Qtab[, "A=0"] <- Q(0, W)
Aone <- (A==1)
Qtab[Aone, "A"] <- Qtab[Aone, "A=1"]
Qtab[!Aone, "A"] <- Qtab[!Aone, "A=0"]
setQtab(this, Qtab)
setLearnedQ(this, attr(Q, "model"))
verbose && str(verbose, Qtab)
verbose && exit(verbose)
## targeting 'Gstar'
verbose && enter(verbose, "Targeting Gstar")
if (what=="ATE") {
weights <- (obs[, "Y"]-Qtab[, "A"])^2/GA
Gstar <- targetGstar(obs=obs, weights=weights, tm.model=tm.model,
targetLink=targetLink, tm.control=tm.control,
...,
verbose=verbose)
} else if (what=="MOR") {
Gstar <- targetOptRule(this, Q, ..., verbose=verbose)
}
setGstar(this, Gstar)
verbose && str(verbose, Gstar)
verbose && exit(verbose)
## updating 'Gtab'
verbose && enter(verbose, "Updating Gtab")
setGtab(this, Gstar(W))
verbose && str(verbose, Gstar)
verbose && exit(verbose)
## updating 'history'
verbose && enter(verbose, "Updating history")
step <- getStep(this)
step["update"] <- step["update"]+1
this$.step <- step
updateHistory(this, "update")
verbose && str(verbose, tsml.cara.rct::getHistory.TSMLCARA(this))
verbose && exit(verbose)
verbose && exit(verbose)
verbose && ruler(verbose)
})
setMethodS3("targetPsi", "TSMLCARA", function(#Targets a TSMLCARA Object Toward the Parameter Psi
### Targets a TSMLCARA object toward the parameter Psi.
this,
### A \code{TSMLCARA} object, as created by \code{TSMLCARA}.
...,
### Additional parameters.
verbose=FALSE
### A \code{logical} or an \code{integer} indicating the level of verbosity
### (defaults to 'FALSE').
) {
##alias<< targetPsi
##references<< Chambaz, van der Laan, Zheng, Chapter 16, Modern Adaptive Randomized Clinical Trials: Statistical, Operational, and Regulatory Aspects, by A. Sverdlov (CRC Press, 2015).
##seealso<< tsml.cara.rct, update
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## Validate arguments
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## Argument 'verbose'
verbose <- Arguments$getVerbose(verbose)
verbose <- less(verbose, 0)
## retrieving 'what'
what <- getWhat(this)
if (what=="ATE") {
targetLink <- NULL
}
else if (what=="MOR") {
targetLink <- getTargetLink(this)
}
## retrieving 'obs', 'Gtab', 'Qtab', preparing 'Y', 'A', 'G' and 'GA'
obs <- getObs(this)
Y <- obs[, "Y"]
A <- obs[, "A"]
G <- obs[, "G"]
GA <- blik(A, G)
GstarW <- getGtab(this)
Qtab <- getQtab(this)
## retrieving 'Qmin'
Qmin <- getQmin(this)
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## targeting Psi
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && ruler(verbose)
verbose && enter(verbose, "TARGETING PSI")
W <- extractW(obs)
GAstarW <- blik(A, GstarW)
weights <- GAstarW/GA
verbose && enter(verbose, "Fluctuating")
eps <- fluctuateQ(obs=obs, Qtab=Qtab, GAstarW=GAstarW,
what=what, weights=weights, Qmin=Qmin,
targetLink=targetLink,
..., verbose)
verbose && str(verbose, eps)
## QEpstab
epsHtab <- matrix(NA, nrow=nrow(obs), ncol=3)
colnames(epsHtab) <- colnames(Qtab)
if (what=="ATE") {
epsHtab[, "A=1"] <- eps/GstarW
epsHtab[, "A=0"] <- -eps/(1-GstarW)
} else if (what=="MOR") {
rA <- ((Qtab[, "A=1"]-Qtab[, "A=0"])>0)
epsHtab[, "A=1"] <- eps/GstarW
epsHtab[!rA, "A=1"] <- 0
epsHtab[, "A=0"] <- eps/(1-GstarW)
epsHtab[rA, "A=0"] <- 0
}
Aone <- (A==1)
epsHtab[Aone, "A"] <- epsHtab[Aone, "A=1"]
epsHtab[!Aone, "A"] <- epsHtab[!Aone, "A=0"]
scaledQtab <- scaleQmat(Qtab, wrt=Y, thr=Qmin)
if (what=="ATE") {
scaledQEpstab <- binomial()$linkinv(binomial()$linkfun(scaledQtab) + epsHtab) ##ie, expit(logit(...))
} else if (what=="MOR") {
scaledQEpstab <- targetLink$linkinv(targetLink$linkfun(scaledQtab) + epsHtab)
}
QEpstab <- scaleQmat(scaledQEpstab, reverse=TRUE)
attr(QEpstab, "eps") <- eps
setQEpstab(this, QEpstab)
verbose && str(verbose, QEpstab)
verbose && exit(verbose)
## estimating Psi
verbose && enter(verbose, "estimating psi")
## CAUTION: same form in both cases! no need to fork
if (what=="ATE") {
## epsHtab <- epsHtab/abs(eps) * matrix(weights, nrow=length(weights), ncol=3)
epsHtab <- epsHtab/eps * matrix(weights, nrow=length(weights), ncol=3)
psis <- estimatePsi(obs=obs, what=what, Qtab=NULL, QEpstab=QEpstab, epsHtab=epsHtab, verbose=verbose)
} else if (what=="MOR") {
epsHtab <- epsHtab/eps * matrix(weights, nrow=length(weights), ncol=3)
psis <- estimatePsi(obs=obs, what=what, Qtab=Qtab, QEpstab=QEpstab, epsHtab=epsHtab, verbose=verbose)
}
this$.psi <- psis["psi"]
this$.psi.sd <- psis["psi.sd"]
verbose && str(verbose, psis)
verbose && exit(verbose)
if (what=="MOR") {
## if targeting mean under optimal rule, then estimating the empirical
## regret as well
verbose && enter(verbose, "estimating the regret")
regret <- estimateRegret(obs=obs, what=what, Qtab=Qtab, QEpstab=QEpstab,
epsHtab=epsHtab, psi=psis["psi"], verbose=verbose)
this$.regret <- regret["regret"]
this$.regret.sd <- regret["regret.sd"]
verbose && str(verbose, regret)
verbose && exit(verbose)
}
## updating 'history'
verbose && enter(verbose, "Updating history")
step <- getStep(this)
step["target"] <- step["target"]+1
this$.step <- step
updateHistory(this, "target")
verbose && str(verbose, tsml.cara.rct::getHistory.TSMLCARA(this))
verbose && exit(verbose)
verbose && exit(verbose)
verbose && ruler(verbose)
})
tsml.cara.rct <- structure(function#Targeted Minimum Loss Covariate-Adjusted Response-Adaptive RCT Design and Statistical Analysis
### Simulates a targeted minimum loss covariate-adjusted response-adaptive RCT
### design and statistical analysis.
(what=c("ATE", "MOR"),
### A \code{character} indicating the parameter of interest to estimate.
### Either "ATE" for the Average Treatment Effect, the difference between the
### means under '\eqn{do(A=1)}' and '\eqn{do(A=0)}', or "MOR" for the Mean
### under the Optimal treatment Rule '\eqn{do(A=r(W))}'.
flavor=c("parametric", "lasso"),
### A \code{character} indicating the 'flavor' of the procedure.
ninit=50,
### An \code{integer}, number of subjects to sample at
### initialization. Defaults to 50.
by=25,
### An \code{integer}, number of subjects to sample at each step after
### initialization. Defaults to 25.
nmax=500,
### An \code{integer}, maximum number of subjects to sample during the
### trial. Must be larger than 'ninit+by'. Defaults to 500.
tm.init=oneOne,
### A \code{function} describing the initial treatment mechanism to be
### employed. Defaults to the balanced (1:1) treatment mechanism, ie,
### \code{function} \code{\link{oneOne}}.
tm.ref=oneOne,
### A \code{function} describing the reference treatment mechanism to be
### employed. Defaults to the balanced (1:1) treatment mechanism, ie,
### \code{function} \code{\link{oneOne}}.
learnQ,
### A model \eqn{{\cal Q}} of conditional expectations of \eqn{Y} given
### \eqn{(A,W)} for both flavors 'parametric' and 'lasso', given as a
### \code{formula} or a \code{function} outputing formulas. Defaults to
### \code{formula(Y~1)} for flavors 'parametric' and 'lasso'.
tm.model=formula(A~1),
### A parametric model \eqn{{\cal G}} of treatment mechanisms, used only when
### 'what' equals "ATE". The procedure targets the optimal treatment
### mechanism within this model. Defaults to \code{formula(A~1)}.
tm.control=glm.control(maxit=500),
### A \code{list} of options for the targeting of the treatment mechanism
### within the model defined by argument 'tm.model'. Used only when 'what'
### equals "ATE", it defaults to \code{glm.control(maxit=500)}.
Gmin=1e-2,
### A small positive \code{numeric}, with default value \code{1e-2}. When
### \code{what} equals 'ATE', it is the minimum value of elements of the
### parametric model \eqn{{\cal G}} of treatment mechanisms (see argument
### \code{tm.model}). The maximum value is \code{1-Gmin}. When \code{what}
### equals 'MOR', it is the minimum value of the conditional probability
### of \eqn{A=r_n(W)} given \eqn{W}.
Gexploit=Gmin,
### A small positive \code{numeric}, with default value that of \code{Gmin},
### or a function of sample size giving such small numbers, only used when
### \code{what} equals "MOR", in conjunction with \code{Gexplore}.
Gexplore=1e-2,
### Either a small positive \code{numeric}, with default value \code{1e-2}, or
### a function of sample size giving such small numbers, only used when
### \code{what} equals "MOR", in conjunction with \code{Gexploit}.
Qmin=1e-2,
### A small positive \code{numeric}, the minimum value of scaled outcomes
### \eqn{Y}. The maximum value is \code{1-Qmin}.
conf.level=0.95,
### A \code{numeric}, the confidence level of the resulting confidence
### interval.
verbose=FALSE,
### A \code{logical} or an \code{integer} indicating the level of verbosity
### (defaults to 'FALSE').
piV=c(1/2, 1/3, 1/6),
### Marginal distribution of \eqn{V}. Defaults to \code{c(1/2, 1/3, 1/6)}.
family=c("beta", "gamma"),
### A \code{character}, either "beta" (default) or "gamma", the nature of the
### law of outcome.
Qbar=Qbar1,
### A \code{function}, the conditional expectation of \eqn{Y} given
### \eqn{(A,W)}. Defaults to \code{Qbar1}.
Vbar=Vbar1,
### A \code{function}, the conditional variance of \eqn{Y} given
### \eqn{(A,W)}. Defaults to \code{Vbar1}.
Bn=1e5,
### An \code{integer}, the sample size used to estimate the true value of the
### data-adaptive parameter at each step of the procedure when 'what' equals
### 'MOR'. Defaults to 1e5.
slice.by=1e5
### An \code{integer}. If it is smaller than argument 'n' of 'getSample', then
### the simulation is decomposed into 'n%/%slice.by' smaller simulations of
### 'slice.by' observations and one of 'n%%slice.by' observations. Defaults to
### 1e5 (hence, no decomposition if 'n' smaller than 4e5). Mainly for internal
### use.
) {
##alias<< tsml.cara.rct
##references<< Chambaz, van der Laan, Zheng, Chapter 16, Modern Adaptive Randomized Clinical Trials: Statistical, Operational, and Regulatory Aspects, by A. Sverdlov (CRC Press, 2015).
##seealso<< update, targetPsi, getSample
##details<< Defines a lower-bound on the conditional probability of
## \eqn{do(A=1-r_n(W))} given \eqn{W}.
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## Validate arguments
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## Argument 'what'
what <- match.arg(what)
## Arguments 'flavor' and 'learnQ'
flavor <- match.arg(flavor)
if (missing(learnQ)) {
learnQ <- switch(flavor,
parametric=formula(Y~1),
lasso=formula(Y~1))
} else {
learnMode <- switch(flavor,
parametric=c("formula", "function"),
lasso=c("formula", "function"))
class <- class(learnQ)
if (!class%in%learnMode) {
throw("Argument 'learnQ' should be of class '", learnMode, "', not '", class, "' for flavor: ", flavor)
}
if (class=="formula") {
if (learnQ[[2]]!="Y") {
throw("Argument 'learnQ' should be a formula with response 'Y', not:", learnQ)
}
}
}
## Argument 'ninit'
ninit <- Arguments$getNumeric(ninit)
## Argument 'by'
by <- Arguments$getNumeric(by)
## Argument 'nmax'
nmax <- Arguments$getNumeric(nmax, c(ninit+by, Inf))
## Argument 'tm.init'
mode <- mode(tm.init)
if (mode != "function") {
throw("Argument 'tm.init' should be of mode 'function', not '", mode)
}
## Argument 'tm.ref'
mode <- mode(tm.ref)
if (mode != "function") {
throw("Argument 'tm.ref' should be of mode 'function', not '", mode)
}
## Argument 'tm.model'
if (!identical(class(tm.model), "formula") | tm.model[[2]]!="A") {
throw("Argument 'tm.model' should be a formula with response 'A', not:",
deparse(substitute(form, list(form=tm.model))))
}
## Argument 'tm.control'
## Argument 'Gmin'
Gmin <- Arguments$getNumeric(Gmin, c(0, 1/2))
## Argument 'Gexploit'
mode <- mode(Gexploit)
if (!(mode %in% c("numeric", "function"))) {
throw("Argument 'Gexploit' should be of mode either 'numeric' or 'function', not ", mode)
} else if (mode=="numeric") {
Gexploit <- Arguments$getNumeric(Gexploit, c(0, 1/2))
}
## Argument 'Gexplore'
mode <- mode(Gexplore)
if (!(mode %in% c("numeric", "function"))) {
throw("Argument 'Gexplore' should be of mode either 'numeric' or 'function', not ", mode)
} else if (mode=="numeric") {
Gexplore <- Arguments$getNumeric(Gexplore, c(0, 1/2))
}
## Argument 'verbose'
verbose <- Arguments$getVerbose(verbose)
verbose <- less(verbose, 10)
## Argument 'piV':
piV <- Arguments$getNumerics(piV, range=c(0,1))
if (sum(piV)!=1) {
throw("Argument 'piV' should consist of non-negative weights summing to one.")
}
## Argument 'family':
family <- match.arg(family)
## Argument 'Qbar':
mode <- mode(Qbar);
if (mode != "function") {
throw("Argument 'Qbar' should be of mode 'function', not '", mode)
}
## Argument 'Vbar':
mode <- mode(Vbar);
if (mode != "function") {
throw("Argument 'Vbar' should be of mode 'function', not '", mode)
}
## Argument 'Bn':
Bn <- Arguments$getInteger(Bn);
if (Bn <= 1e5) {
verbose && str(verbose, "isn't 'Bn' too small?")
}
## Argument 'slice.by'
slice.by <- Arguments$getInteger(slice.by, c(1, Inf));
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## Core
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## initial sampling (under 1:1 treatment mechanism)
## (a) sampling
obs <- getSample(ninit, tm=tm.ref,
piV=piV, Qbar=Qbar, Vbar=Vbar,
family=family)
nobs <- nrow(obs)
## (b) declaring the 'TSMLCARA' object
tsml.cara <- TSMLCARA(what=what,
flavor=flavor,
obs=obs,
tm.ref=tm.ref,
tm.model=tm.model,
tm.control=tm.control,
learnQ=learnQ,
Gmin=Gmin,
Gexploit=Gexploit,
Gexplore=Gexplore,
Qmin=Qmin,
conf.level=conf.level,
verbose=verbose)
## update of 'tsml.cara'
update(tsml.cara, verbose=verbose)
targetPsi(tsml.cara, verbose=verbose)
## if 'what=="MOR"', then compute and store the targeted data-adaptive parameters
if (what=="MOR") {
Qn <- getQ(tsml.cara)
ruleQn <- ruleQbar(Qn)
##
## first data-adaptive parameter:
## mean reward under the current best estimate of the optimal rule
##
truthn <- getSample(Bn, tm=oneOne, rule=ruleQn,
piV=piV, Qbar=Qbar, Vbar=Vbar, what,
family=family, slice.by=slice.by)[c("psi", "psi.sd")]
##
## second data-adaptive parameter:
## empirical cumulated regret
##
W <- extractW(obs)
rA <- as.numeric(ruleQn(W))
eregretn <- mean(obs[, "Y"] - Qbar(cbind(A=rA, W)))
##
## third data-adaptive parameter:
## counterfactual cumulated regret
##
col <- match(c("Y0", "Y1"), colnames(obs))
rA <- col[rA+1]
counterfactual <- as.numeric(obs[cbind(1:nrow(obs), rA)])
cregretn <- mean(obs[, "Y"] - counterfactual)
}
## successive updates of 'tsml.cara'
while (nobs < nmax) {
if (nobs+by>nmax) {
by <- (nmax-nobs)
}
newObs <- getSample(by, tm=getGstar(tsml.cara),
piV=piV, Qbar=Qbar, Vbar=Vbar,
family=family)
addNewSample(tsml.cara, newObs)
nobs <- nrow(getObs(tsml.cara))
update(tsml.cara, verbose=verbose)
targetPsi(tsml.cara, verbose=verbose)
if (what=="MOR") {
Qn <- getQ(tsml.cara)
ruleQn <- ruleQbar(Qn)
##
## first data-adaptive parameter:
## mean reward under the current best estimate of the optimal rule
##
truthn <- rbind(truthn,
getSample(Bn, tm=oneOne, rule=ruleQn,
piV=piV, Qbar=Qbar, Vbar=Vbar, what,
family=family, slice.by=slice.by)[c("psi", "psi.sd")])
##
## second data-adaptive parameter:
## empirical cumulated regret
##
obs <- getObs(tsml.cara)
W <- extractW(obs)
rA <- as.numeric(ruleQn(W))
eregretn <- c(eregretn,
mean(obs[, "Y"] - Qbar(cbind(A=rA, W))))
##
## third data-adaptive parameter:
## counterfactual cumulated regret
##
rA <- col[rA+1]
counterfactual <- as.numeric(obs[cbind(1:nrow(obs), rA)])
cregretn <- c(cregretn,
mean(obs[, "Y"] - counterfactual))
}
}
if (what=="MOR") {
truthn[, "psi.sd"] <- truthn[, "psi.sd"]/sqrt(Bn)
colnames(truthn) <- c("psin", "psin.sd")
rownames(truthn) <- NULL
attr(tsml.cara, "truthn") <- truthn
attr(tsml.cara, "eregretn") <- eregretn
attr(tsml.cara, "cregretn") <- cregretn
}
return(tsml.cara)
### Returns a \code{TSMLCARA} object which summarizes the TSMLCARA undertaken
### procedure.
}, ex=function() {
##
log <- Arguments$getVerbose(-1, timestamp=TRUE)
set.seed(12345)
## ########################
## AVERAGE TREATMENT EFFECT
## ########################
tm.model <- formula(A~.)
psi.sd <- sqrt(getOptVar(n=1e5,
tm.model=tm.model,
piV=c(1/2, 1/3, 1/6),
family="gamma",
Qbar=Qbar1,
Vbar=Vbar1))
truth <- c(psi=91/72, psi.sd=psi.sd)
## parametric example
learnQ <- formula(Y~I(as.integer(A)):(U+V)+I(as.integer(1-A)):(U+V))
ATE.param <- tsml.cara.rct(what="ATE",
flavor="parametric",
ninit=200,
by=100,
nmax=400,
tm.init=oneOne,
tm.ref=oneOne,
learnQ=learnQ,
tm.model=tm.model,
conf.level=0.95,
piV=c(1/2, 1/3, 1/6),
family="gamma",
Qbar=Qbar1,
Vbar=Vbar1)
ATE.param
## Not run:
plot(ATE.param, truth=truth)
## End(**Not run**)
## See the vignette for more examples...
})
############################################################################
## HISTORY:
## 2016-09-16
## o Created.
############################################################################
|
d305a6ca7bdae1de6825de396ed5f16d1d1a39cb
|
c1439351216e4cd99ba17f3f0cdc7290e4ba6fe3
|
/man/removeCerror.Rd
|
3b9b864d5ca305d02c597334ec9454857e07fe5c
|
[] |
no_license
|
cran/piecewiseSEM
|
cb751749c7ba0485eb81840b366dd8aae3dbe12d
|
c8264234681c9954c88c5926d477f5dd181112cf
|
refs/heads/master
| 2023-03-08T20:59:05.204323
| 2023-03-04T17:00:02
| 2023-03-04T17:00:02
| 48,085,794
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 310
|
rd
|
removeCerror.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/basisSet.R
\name{removeCerror}
\alias{removeCerror}
\title{Remove correlated errors from the basis set}
\usage{
removeCerror(b, modelList)
}
\description{
Remove correlated errors from the basis set
}
\keyword{internal}
|
897a439476c9153d0c603a1efb9093cfbab87ad2
|
ef79aa2916012d0db5bf74b02cdd21266d06a934
|
/man/jaccard.mean.Rd
|
0366530edf83be07fb3f6dd4cad6110f516790a7
|
[] |
no_license
|
yijuanhu/LDM
|
c39619ce3d1ec08bcd2d3e9f8a67f9eab345b02d
|
649c49ec530f926a8420b37555266d7efc3f0de5
|
refs/heads/main
| 2023-08-27T20:28:33.777430
| 2023-08-27T07:26:44
| 2023-08-27T07:26:44
| 126,239,042
| 22
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,428
|
rd
|
jaccard.mean.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LDM_fun.R
\name{jaccard.mean}
\alias{jaccard.mean}
\title{Expected value of the Jaccard distance matrix}
\usage{
jaccard.mean(
otu.table,
rarefy.depth = min(rowSums(otu.table)),
first.order.approx.only = FALSE
)
}
\arguments{
\item{otu.table}{the \code{n.obs} by \code{n.otu} matrix of read counts.}
\item{rarefy.depth}{rarefaction depth. The default is the minimum library size observed in the OTU table.}
\item{first.order.approx.only}{a logical value indicating whether to calculate the expected value
using the first order approixmation by the delta method.
The default is FALSE, using the second order approixmation.}
}
\value{
a list consisting of
\item{jac.mean.o1}{Expected Jaccard distance matrix by the first order approixmation.}
\item{jac.mean.o2}{Expected Jaccard distance matrix by the second order approixmation.}
\item{jac.mean.sq.o1}{Expected squared Jaccard distance matrix by the first order approixmation.}
\item{jac.mean.sq.o2}{Expected squared Jaccard distance matrix by the second order approixmation.}
}
\description{
This function computes the expected value of the Jaccard distance matrix over rarefaction replicates.
}
\examples{
data(throat.otu.tab5)
res.jaccard <- jaccard.mean( throat.otu.tab5 )
}
\author{
Yi-Juan Hu <yijuan.hu@emory.edu>, Glen A. Satten <gsatten@emory.edu>
}
\keyword{microbiome}
|
fa17a86d2a54b7d64dc4febd86dd731ea570748b
|
07ca789edc86a0df1ccfc4b7fe89eb4b416f6e78
|
/SCRIPTS/rspo3/robust_validation_plot.R
|
d55d754d6fcc3509c4e925bd66568942d5e45d15
|
[] |
no_license
|
niaid/h5n1-chi-010
|
1316b5cbcb34b9699ef0405d0d97d66b9bfbbf0d
|
35487afdd368bb91c9693d5b79750c98b326614c
|
refs/heads/main
| 2023-05-24T14:47:00.117636
| 2021-06-23T21:24:04
| 2021-06-23T21:24:04
| 379,733,109
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,739
|
r
|
robust_validation_plot.R
|
library(tidyverse)
# Load robust correlation validation results.
val_result <- read.table(file.path("RESULTS", "rspo3","val_result_all_with_pre_z_norm.txt"), header = T, sep = "\t")
# val_mean_sd <- val_result %>% group_by(subject) %>% summarize(z_mean = mean(z_score), z_sd = sd(z_score)/sqrt(length(z_score)))
val_mean_sd <- val_result %>% group_by(subject) %>% summarize(z_mean = mean(avg_score), z_sd = sd(avg_score)/sqrt(length(avg_score)))
# Load titre response data.
titre <- read.table(file.path("DATA_ORIGINAL", "Titres", "titre.txt"), header = TRUE, sep = "\t")
titre$TimePt <- tolower(titre$TimePt)
d28_titre <- titre[titre$TimePt %in% c("d28", "d29", "d31") , c("Sample.ID","A.Indonesia")]
d28_titre$Sample.ID <- paste("H5N1_", str_pad(d28_titre$Sample.ID, 3, pad = "0"), sep = "")
d28_titre$A.Indonesia <- as.numeric(str_replace(d28_titre$A.Indonesia, "<", ""))
# Calculate correlation between
d28_titre <- d28_titre[d28_titre$Sample.ID %in% as.character(val_mean_sd$subject),]
corel_ <- cor(d28_titre$A.Indonesia, val_mean_sd$z_mean, method = "spearman")
all_result <- merge(val_mean_sd, d28_titre, by.y="Sample.ID", by.x="subject")
all_result$A.Indonesia <- log(all_result$A.Indonesia)
scat_plot <- ggplot(all_result, aes(x=A.Indonesia, y=z_mean)) +
geom_point() +
geom_errorbar(aes(ymin=z_mean-z_sd, ymax=z_mean+z_sd), width=.1) +
# geom_text(aes(label=subject),hjust=0, vjust=0) +
scale_y_continuous(name ="mean z-score") +
labs(x = "d28/A.Indonesia")
stop()
ggsave(file.path("FIGURES","rspo3", "val_all_with_pre_z_norm.png"), plot = scat_plot, device = "png", width = 6, height = 4)
|
e77e103b850d1d8acd67cb350cd00996792ea01f
|
4dc3c38381cd3074c51b0c1746d4fe594f8306d7
|
/MEETUPS/CRUG/APPS/GUI_R_GWidgets.R
|
72b3c472bdc688903caae37011e1b44d2599726c
|
[] |
no_license
|
ParfaitG/WORKSHOPS
|
d77918d4ab2e27e9edbf670509797b1c6ab3701b
|
b81a2581132d120c61a041cfc0dfca000b968b47
|
refs/heads/master
| 2021-06-07T05:23:05.096122
| 2020-07-28T03:47:26
| 2020-07-28T03:47:26
| 135,520,592
| 8
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,143
|
r
|
GUI_R_GWidgets.R
|
options(connectionObserver = NULL)
library(RSQLite, quietly = TRUE)
library(RGtk2, quietly = TRUE)
library(gWidgets2, quietly = TRUE)
library(gWidgets2RGtk2, quietly = TRUE)
options(guiToolkit="RGtk2")
setwd("/home/parfaitg/Documents/CRUG")
getList <- function(){
conn <- dbConnect(SQLite(), dbname = "Data/CTA_Data.db")
strSQL <- "SELECT DISTINCT [stationname] FROM Ridership r ORDER BY [stationname]"
df <- dbGetQuery(conn, strSQL)
datalist <- c("", df[[1]])
dbDisconnect(conn)
return(datalist)
}
mainWindow <- function(makelist){
win <- gwindow("GUI GWidgets Menu", height = 100, width = 550, parent=c(500, 200))
img <- gdkPixbufNewFromFile("/home/parfaitg/Documents/CRUG/Images/R_Logo.png")
getToolkitWidget(win)$setIcon(img)
tbl <- glayout(cont=win, spacing = 8, padding=20)
box <- gtkHBox(TRUE, 5)
font(tbl) <- list(size=14, family="Arial")
# IMAGE AND TITLE
tmp <- gimage(filename = "Images/R_CTA.gif", dirname = getwd(), container = tbl)
tbl[1,1] <- tmp
tmp <- glabel(" Ridership and Stations Data Filter \n", container = tbl)
font(tmp) <- list(size=16, family="Arial")
tbl[1,2] <- tmp
# YEAR
tmp <- glabel(" Year ", container = tbl)
font(tmp) <- list(size=14, family="Arial")
tbl[2,1] <- tmp
tmp <- gcombobox(c(2001:2018), selected = length(c(2001:2018)), editable = TRUE, container = tbl)
font(tmp) <- list(size=14, family="Arial")
tbl[2,2] <- yearcbo <- tmp
# STATION
tmp <- glabel(" Station ", container = tbl)
font(tmp) <- list(size=14, family="Arial")
tbl[3,1] <- tmp
station_list <- getList()
tmp <- gcombobox(station_list, selected = 1, editable = TRUE, container = tbl, font.attrs=list(size=14, family="Arial"))
font(tmp) <- list(size=14, family="Arial")
tbl[3,2] <- stationcbo <- tmp
# RAIL LINE
tmp <- glabel(" Line ", container = tbl)
font(tmp) <- list(size=14, family="Arial")
tbl[4,1] <- tmp
tmp <- gcombobox(c("", "blue", "brown", "green", "orange", "pink", "purple", "purple exp", "red", "yellow"),
selected = 1, editable = TRUE, container = tbl)
font(tmp) <- list(size=14, family="Arial")
tbl[4,2] <- linecbo <- tmp
# DIRECTION
tmp <- glabel(" Direction", container = tbl)
font(tmp) <- list(size=14, family="Arial")
tbl[5,1] <- tmp
tmp <- gcombobox(c("", "N", "S", "E", "W"), selected = 1, editable = TRUE, container = tbl)
font(tmp) <- list(size=14, family="Arial")
tbl[5,2] <- directioncbo <- tmp
# BUTTON
tmp <- gbutton("OUTPUT", container = tbl)
font(tmp) <- list(size=12, family="Arial")
tbl[6,2] <- btnoutput <- tmp
tbl[7,1] <- glabel(" ", container = tbl)
addHandlerChanged(btnoutput, handler=function(...){
yr <- svalue(yearcbo)
st <- svalue(stationcbo)
di <- svalue(directioncbo)
conn <- dbConnect(SQLite(), dbname = "Data/CTA_Data.db")
strSQL <- paste("SELECT r.station_id, ' ' || strftime('%m-%d-%Y', r.date, 'unixepoch') || ' ' As ride_date,",
" ' ' || s.station_descriptive_name || ' ' as station_name,",
" ' ' || r.rides || ' ' As rides, ' ' || s.direction_id || ' ' As direction",
"FROM Ridership r",
"INNER JOIN Stations s ON r.station_id = s.map_id",
"WHERE strftime('%m-%d-%Y', r.date, 'unixepoch') LIKE ?",
" AND r.stationname = ?",
" AND s.direction_id = ?")
res <- dbSendQuery(conn, strSQL)
dbBind(res, list(paste0("%", yr, "%"), st, di))
df <- dbFetch(res)
dbClearResult(res)
dbDisconnect(conn)
df <- setNames(df[c("ride_date", "direction", "rides", "station_name")],
c(" ride_date", " direction ", " rides", " station_name"))
tblwin <- gWidgets2::gwindow("Output Table", height = 450, width = 600, parent=c(700, 200))
tab <- gtable(df, chosencol = 2, container=tblwin)
font(tab) <- list(size=12, family="Arial")
})
return(list(win=win))
}
m <- mainWindow()
while(isExtant(m$win)) Sys.sleep(1)
|
302839c2ccfaa033a9215cf2adb51b91aa997e33
|
9f06adddff3d1003c405a77a5fb57b9153c9ff61
|
/man/percent_to_k.Rd
|
3059824f63555774132d71f8f4576a3daae60e03
|
[] |
no_license
|
kimberlyroche/rulesoflife
|
c7372572b74a964db2fb585824bf6e1c23f7793a
|
2173f2404e22c7fd6c1bf0fdf94e56905503f41d
|
refs/heads/main
| 2023-05-15T08:41:16.997396
| 2023-04-29T16:00:08
| 2023-04-29T16:00:08
| 355,001,176
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 383
|
rd
|
percent_to_k.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{percent_to_k}
\alias{percent_to_k}
\title{Converts a percent to a relative count}
\usage{
percent_to_k(percent, n_features)
}
\arguments{
\item{percent}{percent}
\item{n_features}{total number of features}
}
\value{
relative count
}
\description{
Converts a percent to a relative count
}
|
9187ad7db968edeb655dcda168c2ae2dd74645e6
|
d83745053905580ccc87478db3b1a1dbaee9b80d
|
/scripts/script.R
|
87e79b8f6f276fac7ed9861a1d799081e5ec0912
|
[] |
no_license
|
pburgov/M3_Actividad_Colaborativa
|
ea14a93adb19a7a85a23d9dd286a6cfaa2cd51c2
|
3c0d337ebfb897c1ef3958b86dbff904a491f6ed
|
refs/heads/master
| 2021-08-07T02:59:36.526798
| 2017-11-07T10:32:08
| 2017-11-07T10:32:08
| 109,650,388
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 6,807
|
r
|
script.R
|
# Comprobamos y establecemos directorio de trabajo
getwd()
pathToMain <- "/Users/pburgo/Documents/MBD/M3_Actividad_Colaborativa"
setwd(pathToMain)
# Función que toma una cadema de fecha en formato YYYYmmdd o mmddYYYY
# y devuelve otra en formato YYYY-mm-dd
CustomDateFormatting <- function(x){
x <- as.Date(parse_date_time(x, c("mdY", "Ymd")),"%Y-%m-%d", tz = "UTC")
return(format(x, "%Y-%m-%d"))
}
# Función que toma un número de teléfono sin formato y lo devuelve formateado
CustomPhoneFormatting <- function(phone, invalid = NA)
{
phone <- gsub("(\\d{2})*(\\d{1})*(\\d{3}){1}(\\d{3}){1}(\\d{4}){1}","\\3-\\4-\\5",phone)
return(phone)
}
# Nombre de los directorios en dónde se van a guardar los ficheros originales, el dataset limpio, scripts...
folderData <- "datos"
folderScripts <- "scripts"
folderRawData <- paste0(folderData, "/","rawData")
folderCleanData <- paste0(folderData, "/","cleanData")
# Se crean los directorios necesarios
if (!file.exists(folderData)) {dir.create(folderData)}
if (!file.exists(folderScripts)) {dir.create(folderScripts)}
if (!file.exists(folderRawData)) {dir.create(folderRawData)}
if (!file.exists(folderCleanData)) {dir.create(folderCleanData)}
#Cargamos las librerías que vamos a usar
if (!"stringi" %in% installed.packages()) install.packages("stringi", depend = TRUE)
if (!"tidyr" %in% installed.packages()) install.packages("tidyr", depend = TRUE)
if (!"data.table" %in% installed.packages()) install.packages("data.table", depend = TRUE)
if (!"lubridate" %in% installed.packages()) install.packages("lubridate", depend = TRUE)
if (!"knitr" %in% installed.packages()) install.packages("knitr", depend = TRUE)
library(stringi)
library(tidyr)
library(data.table,warn.conflicts = FALSE)
library(lubridate, warn.conflicts = FALSE)
library(knitr)
#Establecemos la conexión con la fuente de datos y la descargamos
fileURL <- "https://raw.githubusercontent.com/rdempsey/dataiku-posts/master/building-data-pipeline-data-science-studio/dss_dirty_data_example.csv"
con <- file(fileURL,"r")
dataToClean <- read.csv2(con, sep = ",", header = TRUE )
close(con)
# Guardamos una copia de los datos originales
originalFileName <- paste0(folderRawData,"/dirtydata_",format(Sys.time(),"%Y-%m-%d_%H-%M-%S"),".csv")
originalFileName
write.csv2(as.data.frame(dataToClean), originalFileName)
# Reordenamos y renombramos las columnas
newOrder <- c(1:7,15,8:14)
setcolorder(dataToClean,newOrder)
names(dataToClean) <- c("name","address","city","state","zip","phone","email",
"created", "work","work.address","work.city",
"work.state","work.zipcode","work.phone","work.email")
kable(head(dataToClean))
# Nos quedamos únicamente con las 8 primeras columnas.
# Los datos relativos al trabajo (work) los obviamos porque sería realizar las mismas operaciones
dataToClean <- dataToClean[ ,1:8]
# Separamos la columna name en name (nombre) y surname (apellidos)
# Primero removemos los espacios a los lados
dataToClean$name <- stri_trim_both(dataToClean$name)
dataToClean <- dataToClean %>% separate("name", c("name", "surname"), " " , remove = TRUE)
# Separamos la columna addres en addres (dirección) y flat (piso).
# Si se usase la dirección para una geolocalización inversa, el numero de apartamento o Suite no parecen relevantes.
# Como no aparecen en todos los registros los separamos para homogeneizar la columna address.
# Primero removemos los espacios a los lados y usamos la regex addressPattern
dataToClean$address <- stri_trim_both(dataToClean$address)
addresPattern <- "(?=((Apt\\.))|(Suite))"
dataToClean <- dataToClean %>% separate(address, c("address", "flat"), addresPattern, remove = TRUE)
# Separamos la columna created en created (que llevará la fecha) y created.hour (que llevará el detalle de la hora).
# No todos los registros presentan la hora, por lo que vamos a prescindir de ella. Además a priori viendo los datos
# no parece que sea algo importante para conservar.
# Primero removemos los espacios a los lados.
dataToClean$created <- stri_trim_both(dataToClean$created)
dataToClean <- dataToClean %>% separate("created", c("created", "created.hour"), " ", remove = TRUE)
# Las fechas se encuentran en dos formatos distintos mm/dd/yyyy y yyyy-mm-dd
# Para homogeneizar los datos primero vamos a eliminar los '-' y los '/' quedándonos los formatos
# mmddyyyy e yyyymmdd. Usamos una regex reemplazando las ocurrencias con ''
# También se habría podido usar la regex
# datePatttern <- "([0-9]+)-([0-9]+)-([0-9]+)|([0-9]+)\\/([0-9]+)\\/([0-9]+)"
# Usando como reemplazo los grupos encontrados
# dateReplacement <- "\\1\\2\\3\\4\\5\\6"
datePatttern <- "([-]+)|([/]+)"
dateReplacement <- ""
dataToClean$created <- gsub(datePatttern,dateReplacement,dataToClean$created)
# Aplicamos la función CustomDateFormatting a la columna created
dataToClean$created <- sapply(dataToClean$created, CustomDateFormatting)
dataToClean$created <- as.Date(dataToClean$created)
kable(head(dataToClean))
# Separamos la columna phone en phone (que llevará el número de teléfono) y ext (que llevará la extensión).
# No todos los registros presentan la extensión, por lo que vamos a prescindir de ella.
# Primero removemos los espacios a los lados.
dataToClean$phone <- stri_trim_both(dataToClean$phone)
dataToClean <- dataToClean %>% separate("phone", c("phone", "ext"), "x", remove = TRUE)
# Los teléfonos se encuentran en distintos formatos
# Para homogeneizar los datos primero vamos a eliminar todo lo que no sea un dígito
phonePatttern <- "([^0-9]+)"
dataToClean$phone <- gsub(phonePatttern,dateReplacement,dataToClean$phone)
# Aplicamos la función CustomPhoneFormatting a la columna phone
dataToClean$phone <- sapply(dataToClean$phone, CustomPhoneFormatting)
kable(head(dataToClean))
# Finalmente nos vamos a quedar únicamente con las columnas name, surname,address, city, state, zip, phone, email y created
newOrder <- c(1:3,5:6,8,10:11,4,7,9,12)
setcolorder(dataToClean,newOrder)
dataToClean <- dataToClean[ ,1:8]
dim(dataToClean)
#Nos quedamos con los registros que carecen de NA
dataToClean <- dataToClean[complete.cases(dataToClean),]
dim(dataToClean)
# A mayores vamos a filtrar a aquellos registros que en los campos phone y email tiene cadena vacía.
# No son NA pero sí están vacios.
dataToClean <- dataToClean[!(dataToClean$phone == "" | dataToClean$email == ""), ]
dim(dataToClean)
# Ordenamos el dataset por el campo created en sentido ascendente
dataToClean <- dataToClean[order(dataToClean$created), ]
# Guardamos el archivo csv de los datos procesados
outputFileName <- paste0(folderCleanData,"/cleandata_",format(Sys.time(),"%Y-%m-%d_%H-%M-%S"),".csv")
outputFileName
write.csv2(as.data.frame(dataToClean), outputFileName)
|
138da0b0380dbeba0ba174ca6848cfabb855957a
|
666aeddc20c72fc498e31c733d23bc1180baaaf2
|
/man/opt_design.Rd
|
d8a529c230f878891c0702c6217e48a0e155ccb8
|
[] |
no_license
|
MatheMax/RegReSample
|
1af5ccb90e1c9f56ac9852526659814b93182e95
|
e5613c1898a473563fcc40dadcf148c20fc5ddda
|
refs/heads/master
| 2020-04-07T16:29:02.063078
| 2018-11-27T12:56:42
| 2018-11-27T12:56:42
| 158,530,765
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,161
|
rd
|
opt_design.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimization.R
\name{opt_design}
\alias{opt_design}
\title{Compute an optimal design}
\usage{
opt_design(alpha, beta, lambda, weighted.alternative = FALSE,
delta.mcr = 0, delta.alt = 0.3, tau = 0.1, n.max = Inf)
}
\arguments{
\item{alpha}{Maximal type one error rate}
\item{beta}{Maximal type two error rate}
\item{lambda}{Two-dimensional vector with penalties for CP and ||n_2^'||_1}
\item{weighted.alternative}{Should a weighted alternative be used?}
\item{delta.mcr}{Minimal clinically relevant effect size}
\item{delta.alt}{Point alternative effect size if weighted.alternative = F,
prior mean otherwise}
\item{tau}{Standard deviation of the prior density}
\item{n.max}{Maximal sample size per stage
Smoothing splines are used to approximate n_2 and c_2 functions.
All integrals are approximates by Boole's Newton-Cotes formula
or by the \code{R}-routine \code{integrate}.}
}
\description{
\code{opt_design} returns a design that is optimal for
specified parameters and a desired mix of expected sample size,
its variability and conditional power and its variability
}
|
e330aa37d8233f088d984ec8d5a8322b993e1f76
|
7717b280fcd6fd9343a36f04c08398b153b37e40
|
/man/step_hdoutliers.Rd
|
90105f88f6a0311bf95efc27e20e07cdb06ac20d
|
[
"MIT"
] |
permissive
|
mattsq/straystep
|
b8bb1ad0205252e3650442bf15cfa9e678cbf9c9
|
69ea1291f1bdc82abbf303447a44d8664f22ebe1
|
refs/heads/master
| 2022-12-24T19:37:12.680766
| 2020-09-23T03:19:55
| 2020-09-23T03:19:55
| 296,797,506
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,724
|
rd
|
step_hdoutliers.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/step_hdoutliers.R
\name{step_hdoutliers}
\alias{step_hdoutliers}
\title{XXXX}
\usage{
step_hdoutliers(
recipe,
...,
role = NA,
trained = FALSE,
reference_colnames = NULL,
outlier_bounds = NULL,
outlier_cutoff_threshold = 0.01,
k_neighbours = 10,
knnsearchtype = "brute",
candidate_proportion = 0.5,
threshold_sample_size = 50,
options = list(normalize_method = "none"),
skip = FALSE,
id = recipes::rand_id("hdout")
)
}
\arguments{
\item{...}{One or more selector functions to choose which
variables will be used to compute the components. See
\code{\link[=selections]{selections()}} for more details. For the \code{tidy}
method, these are not currently used.}
\item{role}{For model terms created by this step, what analysis
role should they be assigned?. By default, the function assumes
that the new principal component columns created by the original
variables will be used as predictors in a model.}
\item{options}{A list of options to the default method for
\link{XXXX}.}
\item{prefix}{A character string that will be the prefix to the
resulting new variables.}
\item{ref_dist}{placeholder}
}
\value{
An updated version of \code{recipe} with the new step
added to the sequence of existing steps (if any). For the
\code{tidy} method, a tibble with columns \code{terms} (the
selectors or variables selected), \code{value} (the
loading), and \code{component}.
}
\description{
\code{XXX} creates a \emph{specification} of a recipe step
that will ....
}
\details{
Some text here
}
\examples{
x <- 1
}
\references{
Add reference
}
\concept{dwt}
\concept{preprocessing}
\concept{projection_methods}
\keyword{datagen}
|
176fd5cfb55594a7b34056408be43b021c798210
|
6e32987e92e9074939fea0d76f103b6a29df7f1f
|
/googlemlv1.auto/man/GoogleCloudMlV1__Capability.Rd
|
e51e633d362f9bb8f67a04e1c74d2f323b137454
|
[] |
no_license
|
justinjm/autoGoogleAPI
|
a8158acd9d5fa33eeafd9150079f66e7ae5f0668
|
6a26a543271916329606e5dbd42d11d8a1602aca
|
refs/heads/master
| 2023-09-03T02:00:51.433755
| 2023-08-09T21:29:35
| 2023-08-09T21:29:35
| 183,957,898
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 640
|
rd
|
GoogleCloudMlV1__Capability.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_objects.R
\name{GoogleCloudMlV1__Capability}
\alias{GoogleCloudMlV1__Capability}
\title{GoogleCloudMlV1__Capability Object}
\usage{
GoogleCloudMlV1__Capability(type = NULL, availableAccelerators = NULL)
}
\arguments{
\item{type}{No description}
\item{availableAccelerators}{Available accelerators for the capability}
}
\value{
GoogleCloudMlV1__Capability object
}
\description{
GoogleCloudMlV1__Capability Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
\concept{GoogleCloudMlV1__Capability functions}
|
4f1d9d6e3e6b7384d7399065e101a51cac7f2aa5
|
e738305b10944199874d5220cb24d067625a049d
|
/Workshop code 2. Working with data.R
|
30533b0daa4cf1a84e08771980df3d2e53cd214c
|
[] |
no_license
|
seanchrismurphy/A-Guided-Tour-of-R
|
01334c89968716cd32ed7dbbebd045c1e0272c2b
|
b0f93f603640a389b37ebc73bbcd505099454a24
|
refs/heads/master
| 2020-03-16T13:02:50.672414
| 2018-05-09T00:54:32
| 2018-05-09T00:54:32
| 132,680,212
| 26
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,074
|
r
|
Workshop code 2. Working with data.R
|
# Now that you're familiar with the R interface, let's get our hands on some data! #
# Before we do, we'll need to load up a few packages. The base version of R comes with a lot of functionality, but over the years
# people and organisations have built modules that improve over the basics in particular areas. In particular, a concerted effort
# has been made to create a set of packages known as the 'tidyverse', designed to make the process of cleaning, plotting and analyzing
# data more straight forward, clean, and user-friendly. We'll be using this set of packages to load, manipulate, and plot our data today.
# You should already have the tidyverse package installed, but if you don't, run the below line of code:
install.packages('tidyverse')
# Once installed, packages have to be activated each time you launch R. We can do this using the library command.
library(tidyverse)
library(readr)
# Now, the last step before we load in our own data is to change our working directory. This is the default folder where R will look for
# data. In RStudio, click 'Session', 'Set working directory', and 'Choose directory'. Then navigate to the folder with the data and code for
# today's course, and select this. You'll notice some code appear in the Console window - this is the function to change the working directory,
# which you can use instead of the dropdown menu once you're more used to R.
# Now let's read in our data. We'll use the read_csv function. Notice that we save the dataset into a variable in the same fashion as we did
# with simple numbers.
manylabs <- read_csv('manylabs.csv')
# This might feel a bit anticlimactic - nothing really
# happens! Or rather, something happens - we can see a
# manylabs object appear in the environment window in the
# upper-right hand corner. This will likely feel unfamiliar,
# if you're used to data appearing front-and-center in some
# other statistics packages. But remember that in R we
# interact with our data through functions, rather than
# directly. Let's take a look at what happens when we call
# the data itself, by running the following line:
manylabs
# You'll see that we have a number of variables, represented by their names at the top of the data frame, and their data type below (e.g. <chr>).
# The most common data types we'll encounter are character (string data), integer/double (numeric data), and factor (categorical data).
# This way of viewing data will likely feel uncomfortable at first, but you'll get used to it. Until then, you can also click on the data object
# in the environment window to open up a direct view on the dataset (SPSS-style), though we won't work with the data in that form.
# Below, you can test out some functions that give you different views on a dataset:
# View the column names
colnames(manylabs)
# View the last 6 rows of the dataset (this can go up to 20)
tail(manylabs, 6)
# See some data for all the variables, flipped horizontally to fit on the screen
glimpse(manylabs)
# Get some basic descriptive statistics for the data.
summary(manylabs)
# Now we're going to get familiar with the tools you'll need
# to explore, wrangle, and clean your data in R. We'll be
# using the dplyr package within the tidyverse for this.
# Almost all the data wrangling operations you might want to
# perform can be done primarily through five functions that
# take the form of 'verbs' - actions we want to take with
# our data. These are select, filter, arrange, mutate, and
# summarize. We'll work through them using the manylabs
# dataset.
### select ###
# select allows us to extract specific columns of our data to look at, and comes with some fairly powerful 'helper' functions that let us be
# quite flexible in which columns we choose
# Like all five of these functions, the first argument you need to give select is the name of the dataset in question. Then, additional arguments
# can be used to select variables. Each extra argument is used additively to select more columns to keep (or drop, in the case of the - symbol)
# Try running the following and see what happens.
select(manylabs, age, sex, citizenship)
### Assignment and saving our progress
# One key thing about R functions is that they will almost never directly change their input (i.e. our data) unless we explicitly save the result
# using the assignment operator (<-). When we use functions like we did above, we're just viewing our data in different ways.
# For instance, you can see that the manylabs dataset hasn't actually changed as a result of our selecting a subset of variables:
manylabs
# If we wanted to save our selection, we need to use assignment, like so:
manylabs_reduce <- select(manylabs, age, sex, citizenship)
# Notice that a new object has appeared in the upper right corner of the screen - this can be useful if you're performing multiple
# steps and need to save your progress along the way - more on that later.
# Back to selecting - you can use numbers indicating the indices of columns you'd like to select
select(manylabs, 1:3)
# Using the - sign, we can drop variables instead of selecting them
select(manylabs, -age)
# We can also use helper functions like starts_with, ends_with, contains, and matches to make our selections
select(manylabs, starts_with('exp'))
# And we can combine multiple methods additively
select(manylabs, starts_with('exp'), -exprace)
# Again, as you add more select statements together, they're combined additively - all will be implemented:
select(manylabs, 1:3, age, starts_with('gambfal'))
# We can also use the special everything() helper function. This selects all the variables we haven't already specified, so adding it to the end
# of a selection will keep all variables, but bring the variables you've select to the 'front' (left) of the dataset, for easy viewing.
select(manylabs, age, sex, everything())
### Exercises
# With the manylabs dataset:
# 1. select sex, gender, and race
# 2. select the first 10 columns
# 3. Select all variables that end with 'order'
# 4. Select all variables that contain 'flag', and all variables that start with 'sysjust'
# 5. Write a command to select expgender, lab_or_online, us_or_international, anchoring1, and anchoring2. Once you've verified that it works,
# use a different select statement to achieve the same result.
### The $ operator
# A way to select a single variable, (or just useful to quickly scroll through the variables to check
# their spelling!)
manylabs$age
### filter ###
# The filter functions works to select a specific subset of the rows in a dataset. Like all five of these functions, it takes a dataset as its first
# argument, and subsequent arguments are interpreted as logical tests on which to filter the dataset.
# For instance, assume we want to look at only the rows of our dataset containing male participants. We would use the following code:
filter(manylabs, sex == 'm')
# Note that the text at the top now tells us there are only 2060 rows in this filtered dataset, down from 6344.
## Remember - when testing for equality, we have to use the double equals sign (==) because the = sign means something different in R. This
## is easy to forget, but filter will give you a helpful error message if you make a mistake:
filter(manylabs, sex = 'm')
# We can use various logical operators: | for OR, ! for NOT, & for AND
# Keep rows where sex is male or female
filter(manylabs, sex == 'f' | sex == 'm')
# Keep rows where sex is female and age is above 18
filter(manylabs, sex == 'f' & age > 18)
# Keep rows where sex is not male
filter(manylabs, sex != 'm')
# This doesn't work:
filter(manylabs, sex == 'f' | 'm')
# Has to be written like this:
filter(manylabs, sex == 'f' | sex == 'm')
filter(manylabs, age > 18 & age < 75)
# If we want to filter with multiple options, we can use the special %in% operator rather than right multiple == tests.
filter(manylabs, sex %in% c('m', 'f'))
# One way to check what values are available to filter on is to use the count function, which gives a descriptive frequency table:
count(manylabs, expgender)
# Adding extra arguments turns this into a cross-table (though more than 2 variables gets unwieldy)
count(manylabs, expgender, lab_or_online)
# To test for missing data, we use the is.na() function, since we can't ask R whether something == NA.
# Find rows where experimenter gender is missing
filter(manylabs, is.na(expgender))
# Find rows where experimenter gender is not missing - notice any commonalities?
filter(manylabs, !is.na(expgender))
### Exercise
# Filter the manylabs dataset so that:
# 1. Only male experimenters are included:
# Hint: To see what values are in the variable, try:
count(manylabs, expgender)
# 2. US citizens are removed (participants with 'US' in the citizenship variable)
# 3. Only Brazillian (BR) or italian (IT) citizens are included
# 4. Only participants who missed 2 or more explicit measures (counted in the totexpmissed variable) are included
### chaining functions together with 'pipes' ###
# So far, we've taken each step one at a time - selecting and filtering. One way to bring these functions together is to perform
# multiple separate steps, where we save the results of each function as input to the next function.
manylabs_filter <- filter(manylabs, sex == 'm')
manylabs_filter <- select(manylabs_filter, 1:10)
manylabs_filter
# However, creating intermediate datasets can get messy, especially if we just want to look at the results of our filtering/selecting,
# rather than save them.
# Another way to do this is to directly wrap one function around the other, like we saw with sqrt(sqrt(16)). For instance:
select(filter(manylabs, sex == 'm'), 1:10)
# However, that quickly gets unwieldy - imagine if we wanted to perform 3, 4, or more data cleaning steps this way!
# A better option is to chain commands together using 'pipes', which look like this: %>%.
manylabs %>%
filter(sex == 'm') %>%
select(1:10)
mydat <- manylabs %>%
filter(sex == 'm') %>%
select(1:10)
# The pipe operator can be read as 'and then', and allows us to put together operations in the order we would say them. So the above would
# read "Take the manylabs data, and then filter it to include only male participants, and then select the first ten columns. Notice that we
# no longer need to tell each individual function (i.e. filter or select) that they should use the manylabs dataset - the pipe operator
# carries the dataset through, and each function knows that it's working with the output from the previous pipe. Don't worry if this seems
# confusing at first - it will become intuitive as we continue.
### Exercise
# 1. Write a command to select only the variables that start with 'sysjust', and make sure you get the columns you'd expect
# 2. Using pipes, first select the variables that start with 'sysjust', and then filter the dataset so only scores of 4 or higher (>=)
# on sysjust2 are included. You should have a dataset with 2 328 rows and 9 columns.
### arrange ###
# The arrange operator is pretty simple. It orders dataframes according to the variables you input, breaking ties successively using extra variables.
# Apparently we have some pretty young participants in this dataset!
arrange(manylabs, age, sex)
# To sort in reverse order, we use the desc() function.
arrange(manylabs, desc(age))
# These two are equivalent
arrange(manylabs, age) %>%
filter(recruitment == 'unisubjpool') %>%
select(age, citizenship, lab_or_online, recruitment)
manylabs %>%
arrange(age) %>%
filter(recruitment == 'unisubjpool') %>%
select(age, citizenship, lab_or_online, recruitment)
### mutate ###
# So far we've looked at ways to move around and select our existing variables. But often we want to create new helpful variables. That's where
# mutate comes in. It allows us to create new variables - usually as functions of existing ones. It works similarly to the commands we've looked
# at previously.
# For the next few steps, we'll use a subset of the manylabs dataset, to make our newly calculated columns easier to see
manylabs_r <- select(manylabs, 1:4, sunkDV, starts_with('flagdv'))
manylabs_r
# To create a new variable, we give it a name (we can use quotation marks here, but they're not necessary) and specify the operation to create it.
# For instance, if we wanted to reverse-score the flagdv1 variable, we could do so like this:
mutate(manylabs_r, flagdv1_rev = 8 - flagdv1) %>%
select(starts_with('flag'))
# We can also calculate a set of new variables at once:
mutate(manylabs_r, flagdv1_rev = 8 - flagdv1,
flagdv3_rev = 8 - flagdv3,
flagdv5_rev = 8 - flagdv5)
# Remember that the new variable won't appear in the dataset unless we assign it:
manylabs_r <- mutate(manylabs_r, flagdv1_rev = 8 - flagdv1)
# Here are some examples of using the if_else function to create TRUE/FALSE variables indicating
# whether each participant meets a certain criterion. This can be useful for later filtering.
manylabs_r <- manylabs_r %>%
mutate(adult_status = if_else(age >= 18, 'adult', 'junior'))
# We can now count how many participants are 18 or over
count(manylabs_r, adult_status)
# And of course, can do all that within a single piped statement
manylabs_r %>%
mutate(adult_status = if_else(age >= 18, 'adult', 'junior')) %>%
count(adult_status)
### Exercise
# 1. Using manylabs_r, create a variable, called age_months, corresponding to each participant's age in months.
# 2. Reverse-score sunkDV (it's on a 1 to 9 scale)
### summarise ###
# the summarise function is similar to mutate in that it calculates values, but rather than return a variable with the same number of rows as
# the original dataset, it calculates summaries (i.e. averages, medians, counts). By default, it will return only the summary variables, rather
# then the entire dataset, like mutate.
summarise(manylabs, age_av = mean(age, na.rm = TRUE))
# Notice the na.rm = TRUE argument - without this, if there are any NA values, the result of most summary functions will also be NA. Specifying this
# argument instead means we ignore them as if they are not there.
# While summarise can be useful to calculate summary statistics across an entire dataset, it becomes much more powerful when we combine it
# with group_by and pipes. group_by takes a dataframe, and groups it into sub-dataframes for each unique value of the grouping variable.
# After this, summarise will give us results by-group, allowing for us to easily perform a lot of powerful grouped calculations.
# Let's take the location variable, which gives us the site where the study was run. Grouping by location, we can see how age varies across testing
# sites to better characterise the sample.
manylabs %>%
group_by(location) %>%
summarise(mean_age = mean(age, na.rm = TRUE))
# We can combine this with arrange to view the sites with the youngest or oldest participants. Remember, we can use the variable we just calculated with mutate
# as input to arrange further down the pipe.
manylabs %>%
group_by(location) %>%
summarise(mean_age = mean(age, na.rm = TRUE)) %>%
arrange(desc(mean_age))
# A little trick if we want to be able to see all of the rows, is to add a function at the end of our pipe that converts from the specialised data format
# (tibble) we're using in dplyr, to the basic r data.frame. Tibbles refrain from printing more than 20 rows to stop us accidentally flooding our screen with
# data, but we don't always need that help!
manylabs %>% group_by(location) %>%
summarise(mean_age = mean(age, na.rm = TRUE)) %>%
arrange(desc(mean_age)) %>%
data.frame()
# We can add multiple summary functions together to get a full table of descriptive statistics, grouped by any characteristics we want!
# The special n() function will get the size of each group (the count function is actually a shortcut to this). This comes in handy for giving context to
# means that might be driven by small sample sizes.
manylabs %>%
group_by(location) %>%
summarise(mean_age = mean(age, na.rm = TRUE),
sd_age = sd(age, na.rm = TRUE),
sample_size = n()) %>%
arrange(desc(mean_age))
# The benefit of functions like this becomes clear when you want to look at summary statistics across multiple variables.
# Just change location to something else (like lab_or_online) and you've got a completely different table.
manylabs %>%
group_by(lab_or_online) %>%
summarise(mean_age = mean(age, na.rm = TRUE),
sd_age = sd(age, na.rm = TRUE),
sample_size = n()) %>%
arrange(desc(mean_age))
### Exercises
# Grouping by location, as in the first example above
# 1. Find the average system justifiction scale score (Sysjust) for each location
# 2. Add a command to also get the standard deviation of system justification at each location
# 3. arrange locations in order of highest to lowest system justification (remember the desc function)
# 4. Change the group_by command in your code to use sex instead of location. Notice how easily you can get different summaries!
### Advanced exercise
### Bringing everything together - this advanced example calculates some new descriptives not already in the dataset
### (whether participants are adult, male, and/or white), removes online testing sites, groups by loction, calculates
### the mean for each location on our new statistics, and then sorts them by gender. This allows is to quickly get a
### feel for the demographics at different locations. Feel free to play with this example and adapt it to your own data!
manylabs %>%
mutate(adult = if_else(age >= 18, 1, 0),
male = if_else(sex == 'm', 1, 0),
white = if_else(race == 6, 1, 0)) %>%
filter(lab_or_online == 0) %>%
group_by(location) %>%
select(adult, male, white) %>%
summarise_all(function(x) round(mean(x, na.rm = TRUE), 2)) %>%
arrange(desc(male)) %>%
as.data.frame()
|
e43fcc584698ad0cc207586bb9d15cf382b4f558
|
0a6c0442a585875b2e7d5edf738b98e4abae4a14
|
/plot3.R
|
7631e54178803ffc780fa33ad238764904058795
|
[] |
no_license
|
gesserta/ExData_Plotting1
|
665e611de64c7814385622a69e3178c3c20092cc
|
327d7900aab833ce908a57dd05225ccb770f7846
|
refs/heads/master
| 2020-12-25T09:17:48.788209
| 2014-06-06T22:48:48
| 2014-06-06T22:48:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 995
|
r
|
plot3.R
|
plot3 <- function() {
## Read data; set your working directory where file sits
epc <- read.table("household_power_consumption.txt",header=TRUE,sep=";",stringsAsFactors=FALSE)
epc$DateTime<-paste(epc$Date,epc$Time)
epc$DateTime.c<-strptime(epc$DateTime, format="%d/%m/%Y %H:%M:%S")
epc$Date<-as.Date(as.character(epc$Date),"%d/%m/%Y")
epc<-subset(epc, as.Date(Date)=='2007-02-01' | as.Date(Date)=='2007-02-02')
epc$Global_active_power<-as.numeric(epc$Global_active_power)
epc$Sub_metering_1<-as.numeric(epc$Sub_metering_1)
epc$Sub_metering_2<-as.numeric(epc$Sub_metering_2)
epc$Sub_metering_3<-as.numeric(epc$Sub_metering_3)
plot(epc$DateTime.c,epc$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(epc$DateTime.c,epc$Sub_metering_2,col="red")
lines(epc$DateTime.c,epc$Sub_metering_3,col="blue")
legend("topright", names(epc)[7:9], lty="solid",col=c("black","red","blue"))
dev.copy(png,file="plot3.png",width=480,height=480)
dev.off()
}
|
2e1c285cee24e25e021c0429be20d9e0e936a845
|
bdb594aad445bb6826d2fed16af0bdc355c80da8
|
/pathway.analysis.R
|
ffaa676716b3f76b7c0893f2e0c38167381f8b49
|
[
"MIT"
] |
permissive
|
lanagarmire/pretermBirth_metabolomics
|
ebe67342655fe9d79df33d9422a89a4727f69490
|
eec536ba5c4c3a35b29b913a852472d5b39ebc00
|
refs/heads/main
| 2023-06-18T23:10:53.137138
| 2021-07-15T12:45:52
| 2021-07-15T12:45:52
| 386,132,331
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,455
|
r
|
pathway.analysis.R
|
rm(list = ls())
wkdir <- 'C:/Users/evely/Google Drive/summer intern/Preterm birth metabolomics project'
setwd(wkdir)
#run pathifier
library(pathifier)
library(foreach)
library(doParallel)
organize_pathifier_result = function(pds,re_scale=T) {
scores = list()
pathway.names = character()
for (i in 1:length(pds)) {
scores[[i]] = as.vector(pds[[i]]$scores[[1]])
if (re_scale) scores[[i]]=scores[[i]]/max(scores[[i]])
pathway.names[i] = names(pds[[i]]$scores)
}
names(scores) = pathway.names
scores = as.data.frame(scores)
return(scores)
}
comparedensity <- function(metabolitename = 'DI(2-ETHYLHEXYL)PHTHALATE', oridat = newdat){
# newdat with both case and controls
cases <- subset(oridat, Label == 'Case')
ctrls <- subset(oridat, Label == 'Control')
casemet <- cases[,metabolitename]
ctrlmet <- ctrls[,metabolitename]
dat <- data.frame(metval = c(casemet, ctrlmet),
group = c(rep('Case', length(casemet)), rep('Control', length(ctrlmet))),
stringsAsFactors = FALSE)
wilp <- wilcox_test(casemet, ctrlmet, p.adjust.method = "fdr")$p.adj
return(wilp)
}
meta = readRDS("./quant.metabolitemeasurements.rds")
new.path_match = read.csv("./pathways.csv", header=T)
pathways =list()
j=1
for(i in unique(new.path_match$Pathway)){
if(length(grep(i, new.path_match$Pathway)) >2){
pathways2[[as.character(i)]] = as.character(new.path_match$Metabolites[grep(i, new.path_match$Pathway)])
}
}
sudo_data = t(meta[-1])
sudo_pathways = list()
sudo_pathways$genesets = pathways
sudo_pathways$geneset.names = names(pathways)
normals = rep(TRUE, nrow(meta))
normals[meta$Label == "Case"] = FALSE
cl <- makePSOCKcluster(7)
registerDoParallel(cl)
pathifier_result = foreach(i = 1:length(sudo_pathways$genesets),
.packages = "pathifier",
.errorhandling = "pass") %dopar% {
quantify_pathways_deregulation(
sudo_data,
rownames(sudo_data),
sudo_pathways$genesets[i],
sudo_pathways$geneset.names[i],
normals = normals,
min_exp = -Inf,
attempts = 10
)
}
stopCluster(cl)
new.pathi.out = list()
j=1
for(i in 1:length(pathifier_result)){
if(length(pathifier_result[[i]]) != 2){
new.pathi.out[[j]] = pathifier_result[[i]]
j = j+1
}
}
organize_pathifier_result = function(pds,re_scale=T) {
scores = list()
pathway.names = character()
for (i in 1:length(pds)) {
scores[[i]] = as.vector(pds[[i]]$scores[[1]])
if (re_scale) scores[[i]]=scores[[i]]/max(scores[[i]])
pathway.names[i] = names(pds[[i]]$scores)
}
names(scores) = pathway.names
scores = as.data.frame(scores)
return(scores)
}
scores_pathifier = organize_pathifier_result(new.pathi.out)
rownames(scores_pathifier) = rownames(meta)
pathwayscore = read.table("../pathscores.txt",sep = "\t", row.names = 1, header=T)
pathwayscore = t(pathwayscore)
final.pathscores = cbind(pathwayscore, scores_pathifier[,-1])
#final.pathscores = final.pathscores[,-2]
final.pathscores$Label = meta$Label
## wilcox. diff. test
pvals = matrix(NA, nrow=(ncol(final.pathscores)-1),ncol=1)
for(i in 2:ncol(final.pathscores)) {
pvals[(i-1),1] = comparedensity(metabolitename = colnames(final.pathscores)[i],
oridat = final.pathscores)
}
rownames(pvals) = colnames(final.pathscores)[-1]
#save(final.pathscores, file= "./final.pathscores.RData")
ord.pval = pvals[order(pvals),]
names(ord.pval)= rownames(pvals)[order(pvals)]
sigdat = final.pathscores[,c("Label","Cell.signaling","Fatty.acid.metabolism",
"Lipid.metabolism",
"Lipid.peroxidation","Lipid.transport")]
datanno <- data.frame(group = meta$Label, stringsAsFactors = FALSE)
row.names(datanno) <- row.names(sigdat)
datanno$group <- factor(datanno$group, levels = c('Case', 'Control'), ordered = TRUE)
newmat <- t(sigdat[-1])
grp_col = list(group = c('Case'= "#F8766D", 'Control' = "#00BFC4"))
pdf("final.heatmap_sig_pathway.pdf",10,7)
rownames(newmat) = gsub(".", " ", rownames(newmat), fixed = T)
pheatmap(newmat[,order(meta$Label,decreasing = T)], annotation_col = datanno,
color = colorRampPalette(colors = c('blue', 'black', 'yellow'))(100),
show_colnames = FALSE, cluster_cols = F,
annotation_colors = grp_col[1],
scale = 'row', show_rownames = T,gaps_col = 31)
dev.off()
newmat = newmat[-4,]
library(grid)
library(scales)
library(gtable)
source("./pheatmat2.r")
text = pheatmap2(newmat[,order(meta$Label,decreasing = T)], annotation_col = datanno,
color = colorRampPalette(colors = c('blue', 'black', 'yellow'))(100),
show_colnames = FALSE, cluster_cols = F,
main = 'Wilcox. Diff. Pathways', annotation_colors = grp_col[1],
scale = 'row', show_rownames = T,gaps_col = 31)
## barplot for figure 3A heatmap
vals = seq(-6,6, length.out = 100)
colors = colorRampPalette(colors = c('blue', 'black', 'yellow'))(100)
vals.mat = matrix(NA, nrow= nrow(text), ncol = ncol(text))
for(i in 1:nrow(text)) {vals.mat[i, ] = vals[match(text[i,], colors)] }
rownames(vals.mat) = rownames(text)
colnames(vals.mat) = colnames(text)
case.means = rowMeans(vals.mat[,1:31])
ctrl.means = rowMeans(vals.mat[,31:100])
barinput = data.frame(values =c(case.means, ctrl.means),
names = c(rownames(vals.mat), rownames(vals.mat)),
group = c(rep("Case", length(case.means)), rep("Control", length(ctrl.means))))
path_levels <- rownames(text)
barinput$names <- factor(barinput$names, levels =rev(path_levels))
library(ggplot2)
pdf("final2.heatmap_sig_pathway.barplot.pdf",15,10)
ggplot(data = barinput) + geom_bar(aes(x=names,y=values,fill=group),
stat="identity",position="identity") +
scale_y_continuous() +coord_flip() +
theme(text = element_text(size=15)) + xlab("") +ylab("")
dev.off()
## get data for fig.3e
setwd("C:/Users/evely/Google Drive/summer intern/Preterm birth metabolomics project/quantile norm data analysis")
new.path_match = read.csv("./pathways.csv", header=T)
sig.pathway = read.csv("./edges_in_pathway.csv")
load("limma.final.sigdat.38.Rdata")
sig.pathway = unique(sig.pathway$Pathway)
sig.pathway = as.character(sig.pathway)
edges = matrix(NA, nrow=166, ncol=2)
j=1
for(i in 1:length(sig.pathway)){
temps = intersect(colnames(final.sigdat),
new.path_match$Metabolites[which(new.path_match$Pathway==sig.pathway[i])])
edges[j:(j+length(temps)-1), 1]=temps
edges[j:(j+length(temps)-1), 2]=rep(sig.pathway[i], length(temps))
j = j+length(temps)
}
edges[which(edges[,2]=="Lipid metabolism pathway"),2] = "Lipid metabolism"
edges = unique(edges)
colnames(edges) = c("Metabolites", "Pathway")
nodes = c(edges[,1],edges[,2])
nodes = unique(nodes)
write.csv(edges, file="new.edges_in_pathway.csv")
write.csv(nodes, file = "new.nodes_inpathway.csv")
|
9567e74f3e7d88911be148d38b9ffe1172580bf7
|
ca3cee27c33debd51d59aa5d0a98bf3c2cefe4de
|
/temp/tsample-E-codes-dump.R
|
bb65528e33c4e9fc6cdcbc3d853a556e62152ca8
|
[] |
no_license
|
zmdg11/wkFocus
|
132294c28f27ad10d06fe6fb2c3553a1756ac6a8
|
e42d961b918b2d5aa0248adf0b4cd7e8b94708ef
|
refs/heads/master
| 2021-04-26T23:23:34.990177
| 2018-03-10T20:26:49
| 2018-03-10T20:26:49
| 123,984,811
| 0
| 0
| null | 2018-03-07T18:47:53
| 2018-03-05T22:05:44
|
HTML
|
UTF-8
|
R
| false
| false
| 322
|
r
|
tsample-E-codes-dump.R
|
err_inx <- which(agree_df$d == "E")
err_inx
err_t <- agree_df$t[err_inx]
err_t
tmp <- t(agree_list)
tmp <- as.data.frame(tmp)
tmp$t <- as.character(tmp$t)
err_t <- as.character(err_t)
err_list <- filter(tmp, t %in% err_t)
c1_err <- filter(coder1_df, t %in% err_t)
c1_err
c2_err <- filter(coder2_df, t %in% err_t)
c2_err
|
72410339aa647dfb2b82529a1972a592bc30a6ac
|
840944dacec0eb78b5989a2d2e4f69898ac17967
|
/R/dplyr_custom_functions.R
|
3b473859cb3d52bae84e5a1c9737c38266f7eb10
|
[
"MIT"
] |
permissive
|
Sorenson-Impact/sorensonimpact
|
e5104516366aca205f9f5c7dccf8a23487006bca
|
78796d0a720037a866160ca62d8734d48a2aaff3
|
refs/heads/master
| 2021-11-13T16:06:01.147657
| 2021-11-04T16:40:13
| 2021-11-04T16:40:13
| 108,036,549
| 12
| 7
| null | 2020-01-28T18:02:53
| 2017-10-23T20:36:58
|
R
|
UTF-8
|
R
| false
| false
| 5,529
|
r
|
dplyr_custom_functions.R
|
#' Extract duplicate rows
#' @description
#' \lifecycle{defunct}
#' Extract all rows with duplicated values in the given columns
#' @importFrom magrittr "%>%"
#' @param ... Columns to evaluate for duplication. Works via \code{group_by()}.
#' @return Filtered dataframe with duplicates in given columns
#' @examples
#' \dontrun{
#' mtcars %>% duplicates(mpg)
#' }
#' @export
duplicates <- function(data, ...) {
lifecycle::deprecate_stop(when = "0.0.1.9034", what = "duplicates()", with = "janitor::get_dupes()")
columns <- rlang::enquos(...)
data %>%
dplyr::group_by(!!!columns) %>%
dplyr::filter(dplyr::n() > 1) %>%
dplyr::ungroup() %>%
dplyr::arrange(!!!columns)
}
#' Sum selected columns by row
#' @description
#' \lifecycle{experimental}
#' Sum selected columns within mutate without \code{rowwise()} (which can be very slow).
#' @importFrom magrittr "%>%"
#' @param ... Columns to sum.
#' @param sum_col Name of sum column. Defaults to "sum".
#' @param na.rm Remove NAs? Passed to rowSums
#' @return Vector with rowwise sums.
#' @examples
#' \dontrun{
#' cars %>% sum_rowwise(speed, dist, na.rm = T, sum_col = "mysum"))
#' }
#' @export
sum_rowwise <- function(data, ..., sum_col = "sum", na.rm = FALSE) {
columns <- rlang::enquos(...)
data %>%
dplyr::select(!!! columns) %>%
dplyr::transmute(!!sum_col := rowSums(., na.rm = na.rm)) %>%
dplyr::bind_cols(data, .)
}
#' Count the NAs in each column
#' @description
#' \lifecycle{maturing}
#' Count all the NAs in each column of a data frame
#' @importFrom magrittr "%>%"
#' @return NA count for each
#' @export
col_sum_na <- function(data) {
data %>%
purrr::map_dfc(is.na) %>%
purrr::map_dfc(sum)
}
#' Generate a frequency tibble
#' @description
#' \lifecycle{defunct}
#' Generate a frequency table with marginal values
#' @importFrom magrittr "%>%"
#' @param rows The primary rows of the table (use groups for additional)
#' @param cols The columns of the table
#' @param ... Additional grouping variables that will subdivide rows.
#' @return A tibble
#' @export
freq_tibble <- function(data, rows, cols, ...) {
lifecycle::deprecate_stop(when = "0.0.1.9034", what = "freq_tibble()", with = "janitor::tabyl()")
rows <- rlang::enquo(rows)
cols <- rlang::enquo(cols)
groups <- rlang::enquos(...)
if(length(groups) == 0) {
data %>%
dplyr::count(!!rows, !!cols) %>%
tidyr::spread(!!cols, n, fill = 0) %>%
dplyr::mutate(Total := rowSums(dplyr::select(., -!!rows))) %>%
dplyr::bind_rows(dplyr::bind_cols(!!rlang::quo_name(rows) := "Total", dplyr::summarize_if(., is.numeric, sum)))
}
else{
groupnum <- data %>% dplyr::distinct(!!!groups) %>% nrow()
data %>%
dplyr::count(!!rows, !!cols, !!!groups) %>%
tidyr::spread(!!cols, n, fill = 0) %>%
dplyr::mutate(Total := rowSums(dplyr::select(., -!!rows, -c(!!!groups)))) %>%
dplyr::group_by(!!!groups) %>%
dplyr::bind_rows(dplyr::bind_cols(!!rlang::quo_name(rows) := rep("Subtotal", groupnum), dplyr::summarize_if(., is.numeric, sum)),
dplyr::bind_cols(!!rlang::quo_name(rows) := "Total", dplyr::summarize_if(dplyr::ungroup(.), is.numeric, sum)))
}
}
# unmix <- function(data, col) {
# col <- rlang::enquo(col)
#
# numname <- paste(quo(col), "num", sep = "_")
# charname <- paste(quo_name(col), "char", sep = "_")
#
#
#
# data %>%
# mutate(numname = as.numeric(!!col),
# charname = case_when(is.na(!!quo(numname)) ~ !!enquo(numname)))
# }
# unmix(x, fu)
#' Tibble Preview
#' @description
#' \lifecycle{experimental}
#' Show a sample of all tibble data without hiding columns.
#' @importFrom magrittr "%>%"
#' @return A preview of a tibble.
#' @export
tp <- function(data, rows = 10) {
data <- dplyr::sample_n(data, size = rows)
print(data, n = Inf, width = Inf)
}
#' Ordered Factor case_when()
#' @description
#' \lifecycle{experimental}
#' Can replace `case_when()` syntax and outputs an ordered factor in the same order as the cases, useful for meaningful ordering in plots and tables. This is because for `case_when()` the arguments are evaluated in order, so you must proceed from the most specific to the most general. Tables and plots will therefor be ordered by the evaluation order.
#' @param ... A sequence of two-sided formulas. See ?dplyr::case_when for details
#' @return An ordered factor vector of length 1 or n, matching the length of the logical input or output vectors, with the type (and attributes) of the first RHS. Inconsistent lengths or types will generate an error.
#' @importFrom magrittr "%>%"
#' @export
fct_case_when <- function(...) {
args <- as.list(match.call())
levels <- sapply(args[-1], function(f) f[[3]]) # extract RHS of formula
levels <- levels[!is.na(levels)]
ordered(dplyr::case_when(...), levels=levels)
}
#' Remove variables from tibble
#' @description
#' \lifecycle{maturing}
#' This is a simple negation of `dplyr::select`.
#' @param .data A data frame, data frame extension (e.g. a tibble), or a lazy data frame (e.g. from dbplyr or dtplyr). See Methods, below, for more details.
#' @param ... <tidy-select> One or more unquoted expressions separated by commas. Variable names can be used as if they were positions in the data frame, so expressions like x:y can be used to select a range of variables.
#' @return An object of the same type as .data, with the specified columns removed.
#' @importFrom magrittr "%>%"
#' @export
deselect <- function(.data, ...) {
dplyr::select(.data, -c(...))
}
|
e097de43a7ee239da956affc4c532a2aafc5312b
|
52a6cea02ee8ac8c53e1049a1df8c31494aaadd0
|
/perceptron_demo.R
|
c323e293862a57c5e10a9966e3ff940233e24e66
|
[
"MIT"
] |
permissive
|
ControlNet/ml-algorithms
|
ccd8ed592e8dfa90ca0a15b9f4aa7b1843e07cf0
|
16e37eae032250ecda7a12d84839d5ad72753635
|
refs/heads/main
| 2023-04-14T14:59:06.459439
| 2021-04-16T17:58:02
| 2021-04-16T17:58:02
| 358,675,339
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,789
|
r
|
perceptron_demo.R
|
library(MASS) # generates multivariate Gaussian sampels
library(ggplot2)
library(reshape2)
source("perceptron.R")
## Generative parameters
c0 <- '+1'; c1 <- '-1' # class labels
mu0 <- c(4.5, 0.5); p0 <- 0.60
mu1 <- c(1.0, 4.0); p1 <- 1 - p0
sigma <- matrix(c(1, 0, 0, 1), nrow=2, ncol=2, byrow = TRUE) # shared covariance matrix
sigma0 <- sigma; sigma1 <- sigma
### an examle of nonshared covariance matrices
#sigma0 <- matrix(c(0.2, 0.2, 0.2, 0.2), nrow=2, ncol=2, byrow = TRUE); sigma1 <- matrix(c(1, 0, 0, 1), nrow=2, ncol=2, byrow = TRUE)
## Initialization
set.seed(123)
N <- 1000
data <- data.frame(x1=double(), x2=double(), label=factor(levels = c(c0,c1))) # empty data.frame
## Generate class labels (Step 1)
data[1:N,'label'] <- sample(c(c0,c1), N, replace = TRUE, prob = c(p0, p1))
## calculate the size of each class
N0 <- sum(data[1:N,'label']==c0); N1 <- N - N0
## Sample from the Gaussian distribution accroding to the class labels and statitics. (Steps 2 & 3)
data[data[1:N,'label']==c0, c('x1', 'x2')] <- mvrnorm(n = N0, mu0, sigma0)
data[data[1:N,'label']==c1, c('x1', 'x2')] <- mvrnorm(n = N1, mu1, sigma1)
## Split data to train and test datasets
train.len <- round(N/2)
train.index <- sample(1:N, train.len, replace = FALSE)
train.data <- data[train.index, c('x1', 'x2')]
test.data <- data[-train.index, c('x1', 'x2')]
train.label <- data[train.index, 'label']
test.label <- data[-train.index, 'label']
# Initialization
eta <- 0.01 # Learning rate
epsilon <- 0.001 # Stoping criterion
tau.max <- 100 # Maximum number of iterations
Phi <- as.matrix(cbind(1, train.data))
T <- ifelse(train.label == c0, eval(parse(text=c0)),eval(parse(text=c1))) # Convention for class labels
perceptron <- Perceptron()$fit(train.data, T, 1, batch_size = 1, validation_data = list(
x = test.data, y = ifelse(test.label == c0, eval(parse(text=c0)),eval(parse(text=c1)))), learning_rate = 0.01,
history_per_step = TRUE, shuffle = FALSE)
# perceptron$loss(test.data, ifelse(test.label == c0, eval(parse(text=c0)),eval(parse(text=c1))))
# perceptron$history
ggplot(perceptron$history, aes(x= step, y=test_error)) + geom_line()
ggplot(data = perceptron$history, aes(x = step)) +
geom_line(aes(y = w1, color = "w1")) +
geom_line(aes(y = w2, color = "w2")) +
geom_line(aes(y = b, color = "b")) +
theme_minimal()
ggplot(data=as.data.frame(Phi), aes(x=x1, y=x2, label=ifelse(T!=c1, '+', '-'),
color = factor(perceptron$predict(train.data, perceptron$step_activation) == T))) +
geom_text(alpha=0.75) +
scale_color_discrete(guide = guide_legend(title = 'Prediction'))+
geom_abline(intercept=perceptron$b[1], slope=-perceptron$w[1]/perceptron$w[2]) +
ggtitle('Training Dataset and Decision Boundary') +
theme_minimal()
|
a16b51446d55e4c19ea6cde0fdfc3659093a3359
|
4f8a5e8a24267857ea2cb81a514f1709eee18a4f
|
/R/svg.close.R
|
9452dda428c89dc58db37923cf39d34e052c6eb2
|
[] |
no_license
|
kashenfelter/svgViewR
|
c68518377bc9cf43159b298141ffbc21875c7b07
|
cf98a1d084ac04f8355ad81252d0943fdedea80e
|
refs/heads/master
| 2021-08-06T04:44:32.680098
| 2017-11-03T03:19:40
| 2017-11-03T03:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 374
|
r
|
svg.close.R
|
svg.close <- function(){
# Give error if svg.new has not been called
if(is.null(getOption("svg_glo_con"))) stop("svg.new has not been called yet.")
# Get current connection
file <- getOption("svg_glo_con")
# Close
svgviewr.new(file=file, conn.type='close', layers=file$layers, fdir=file$fdir, debug=file$debug)
# Suppress return of value in console
ret = NULL
}
|
f311d6903edf0ec5ddfa48188f216c02504ad0a6
|
158754ef260ab3521fe71c70d391e0337f69f37a
|
/man/dgnorm.Rd
|
49a33066684b90933390bff46c63d4d29e779684
|
[] |
no_license
|
bmasch/salmonIPM
|
0ac41b05a0680a522f206f883d8e7966cf4c5038
|
374cd56323d1309fc0101372cef58abf9f6dca20
|
refs/heads/master
| 2021-05-14T18:02:40.901920
| 2017-12-04T19:11:53
| 2017-12-04T19:11:53
| 116,062,168
| 4
| 0
| null | 2018-01-02T22:09:35
| 2018-01-02T22:09:35
| null |
UTF-8
|
R
| false
| true
| 474
|
rd
|
dgnorm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dgnorm.r
\name{dgnorm}
\alias{dgnorm}
\title{Density function for generalized normal (Subbotin) distribution}
\usage{
dgnorm(x, mu = 0, sigma = 1, shape = 2)
}
\arguments{
\item{x}{variate}
\item{mu}{mean of the distribution}
\item{sigma}{standard deviation of the distribution}
\item{shape}{shape of the distribution}
}
\description{
Density function for a generalized normal distribution.
}
|
6f1236cdaef8781d5a59d256bbf2a794f086467b
|
079fb9926646dfb61bd59bb66388ff68b15fe4bb
|
/shared_code/heatmap.r
|
0f3c4f1d8f0774ddb49965998f9bcdee0d928df9
|
[] |
no_license
|
BioinformaticsArchive/chen_elife_2013
|
ed256f0574061349a2d9dcb22fe35f6b4b2aaa74
|
e3f04dccf2ce6b157f2721c536ffc1f0dc539b2f
|
refs/heads/master
| 2020-04-05T23:46:37.818338
| 2013-08-15T21:25:35
| 2013-08-15T21:25:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,596
|
r
|
heatmap.r
|
library(GenomicRanges)
library(lattice)
# given a data frame of intervals (columns chr, region_start and region_end), returns a matrix
# of values in those intervals with row names taken from id.column
read_matrix <- function(loc.df, cov.object, id.column="fb_gene_id") {
if(length(unique(loc.df$region_end - loc.df$region_start)) > 1) stop("Provided intervals are not all the same length")
heatmap.rd <- RangedData(IRanges(start=loc.df$region_start, end=loc.df$region_end), space=loc.df$chr, id=loc.df[, id.column])
heatmap.view <- RleViewsList(rleList=cov.object[names(heatmap.rd)], rangesList=ranges(heatmap.rd))
heatmap.reads <- viewApply(heatmap.view, as.numeric)
heatmap.matrix <- matrix(unlist(sapply(heatmap.reads, sapply, as.numeric)), nrow=nrow(loc.df), byrow=TRUE)
rownames(heatmap.matrix) <- as.character(as.data.frame(heatmap.rd)$id)
heatmap.matrix
}
# given a data frame listing genes (columns chr, start, end and strand), returns a matrix
# of read values for an equal-sized region in each gene defined by upstream and downstream
get_enrichment_matrix <- function(cov.object, genes, id.column="fb_gene_id", upstream=200, downstream=800) {
if(!identical(sort(unique(genes$strand)), c(-1, 1))) stop("genes data frame should have a strand column with 1 and -1")
chr_lengths <- sapply(cov.object, length)
original.genes.count <- nrow(genes)
genes <- transform(genes, region_start = ifelse(strand == 1, start - upstream, end - downstream + 1),
region_end = ifelse(strand == 1, start + downstream - 1, end + upstream))
genes <- subset(genes, region_start > 0 & region_end <= chr_lengths[as.character(chr)])
if(nrow(genes) != original.genes.count) warning(original.genes.count - nrow(genes), " gene(s) were removed due to chromosome boundary")
niv <- which(genes$strand == -1)
piv <- which(genes$strand == 1)
reads.p <- NULL
reads.n <- NULL
if(length(piv) > 0) {
reads.p <- read_matrix(genes[piv, ], cov.object, id.column)
}
if(length(niv) > 0) {
reads.n <- read_matrix(genes[niv, ], cov.object, id.column)
reads.n <- reads.n[, ncol(reads.n):1]
}
reads <- rbind(reads.p, reads.n)
reads
}
# given a list of samples (coverage objects) and a list of gene data frames, generate
# read matrices for every combination
get_enrichment_matrix_list <- function(samples, genes, ...) {
sample.names <- names(samples)
sample.counter <- 1
genelist.names <- names(genes)
lapply(samples, function(sample) {
# for each coverage object (sample), loop through gene lists
message("get_enrichment_matrix_list() On sample: ", sample.names[sample.counter])
sample.counter <<- sample.counter + 1
genelist.counter <- 1
lapply(genes, function(genelist) {
message(" - gene list: ", genelist.names[genelist.counter])
genelist.counter <<- genelist.counter + 1
get_enrichment_matrix(sample, genelist, ...)
})
})
}
normalize_matrix <- function(reads, value.limit) {
reads[reads < 0] <- 0
reads <- reads / value.limit
reads[reads > 1] <- 1
reads
}
generate_plot <- function(reads, plot.title, show.legend=TRUE) {
rgb.palette <- colorRampPalette(c("white", "blue", "red"), space = "rgb")
message("plot: ", plot.title)
rownames(reads) <- NULL
levelplot(t(reads), main=plot.title, xlab="", ylab="", col.regions=rgb.palette(120), useRaster=TRUE, cuts=16, colorkey=show.legend)
}
# For each sample in sample.list, combine all the matrices for that sample and calculate a quantile
find_upper_threshold_for_samples <- function(sample.list, quantile.threshold=0.99) {
lapply(sample.list, function(s) quantile(c(do.call(rbind, s)), quantile.threshold, na.rm=T))
}
# apply normalization to all samples in sample.list using the matching threshold in threshold.list
normalize_matrices_by_threshold <- function(sample.list, threshold.list) {
for(s in names(sample.list)) {
message("apply_normalization_targets() sample: ", s)
for(n in names(sample.list[[s]])) {
value.target <- threshold.list[[s]]
message(" - target for ", n, ": ", value.target)
sample.list[[s]][[n]] <- normalize_matrix(sample.list[[s]][[n]], value.target)
}
}
sample.list
}
# re-orders each matrix in sample.list using the matching order in order.list
reorder_genelist <- function(sample.list, order.list) {
for(s in names(sample.list)) {
for(g in names(sample.list[[s]])) {
if(g %in% names(order.list)) {
message("Re-ordering ", g, " in ", s)
sample.list[[s]][[g]] <- sample.list[[s]][[g]][order.list[[g]], ]
if(nrow(sample.list[[s]][[g]]) != length(order.list[[g]])) stop("reorder_genelist(): provided order does not have enough values for specified gene list")
}
}
}
sample.list
}
# combine a list of matrices, but add blanks between them
rbind_matrices_with_blanks <- function(mlist, blanks) {
first_one <- TRUE
m <- NULL
for(i in 1:length(mlist)) {
if(first_one == TRUE) {
m <- mlist[[i]]
first_one <- FALSE
} else {
m <- rbind(m, blanks, mlist[[i]])
}
}
m
}
# merge the gene matrices for each sample
combine_genelists <- function(sample.list, empty.rows.between) {
m.columns <- ncol(sample.list[[1]][[1]])
blank.m <- matrix(rep(NA, times=m.columns*empty.rows.between), ncol=m.columns)
lapply(sample.list, function(mlist) rbind_matrices_with_blanks(mlist, blank.m))
}
# reverse the column order of a list of matrices
reverse_list_of_matrices <- function(mlist) {
lapply(mlist, function(m) m[, ncol(m):1])
}
|
e45c3d68786fc402d76af382377a42a9cf566bdd
|
cd6a84f096cf47c8d0e6e727d6b5564b9caf8d96
|
/R/reliabilityIRT.R
|
ddb21cdcb487a3f02b795b5919c0369f5d4c71ca
|
[
"CC-BY-4.0"
] |
permissive
|
DevPsyLab/petersenlab
|
c14a24bafeb68c5f0eca886f6264391f97d66668
|
9ee5242aa04c09053ed70dd23d47312d9af25cb4
|
refs/heads/main
| 2023-09-04T02:48:36.288068
| 2023-09-01T05:15:51
| 2023-09-01T05:15:51
| 597,009,425
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,105
|
r
|
reliabilityIRT.R
|
#' @title
#' Reliability (IRT).
#'
#' @description
#' Estimate the reliability in item response theory.
#'
#' @details
#' Estimate the reliability in item response theory using the
#' test information (i.e., the sum of all items' information).
#'
#' @param information Test information.
#' @param varTheta Variance of theta.
#'
#' @return
#' Reliability for that amount of test information.
#'
#' @family IRT
#'
#' @export
#'
#' @examples
#' # Calculate information for 4 items
#' item1 <- itemInformation(b = -2, a = 0.6, theta = -4:4)
#' item2 <- itemInformation(b = -1, a = 1.2, theta = -4:4)
#' item3 <- itemInformation(b = 1, a = 1.5, theta = -4:4)
#' item4 <- itemInformation(b = 2, a = 2, theta = -4:4)
#'
#' items <- data.frame(item1, item2, item3, item4)
#'
#' # Calculate test information
#' items$testInformation <- rowSums(items)
#'
#' # Estimate reliability
#' reliabilityIRT(items$testInformation)
#'
#' @seealso
#' \url{https://groups.google.com/g/mirt-package/c/ZAgpt6nq5V8/m/R3OEeEqdAQAJ}
reliabilityIRT <- function(information, varTheta = 1){
information / (information + varTheta)
}
|
b7d41e75d30a37e1f873e06b7f7c94c808184df8
|
a70a98b37f9f88ef8b6e0ed930d0060b4777f134
|
/Code/SimulationFunctions.R
|
0eee11cd77f6306d8f2c9f0e11d711a0eaf117ec
|
[] |
no_license
|
PolCap/DemographicResilience
|
febf9139de3e3ab1db3f8b47631f5b4e49e9c1a4
|
037f217b8969244854877f114d747aa53ea26f2c
|
refs/heads/master
| 2023-04-11T10:34:51.527162
| 2022-03-08T12:30:59
| 2022-03-08T12:30:59
| 436,702,812
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,025
|
r
|
SimulationFunctions.R
|
# Function to simulate random matrices
random_matrices <- function(dimension=NA, upd=FALSE){
# Simulate x transitions from 0 to 1
matUvector <- runif(dimension^2,0,1)
# Transform into a matrix
matU <- matrix(matUvector,nrow=dimension)
# Remove the first row of the matU
matU[1,2:dimension] <- 0
# Simulate stage-specific survival vector
surv <- runif(dimension,0,1)
# Remove shrinkage if subd is true
if(isTRUE(upd)){matU[upper.tri(matU)] <- 0}
# Make sure that columns add to 1
matU <- apply(matU, 2, function(x) x/sum(x))
# Penalise by stage-specific survival
for(i in 1:dimension) {matU[,i]<-matU[,i]*surv[i]}
# Create a 0 matrix
matF <- matU*0
# Simulate random fecundity
matF[1,2:dimension] <- rpois(dimension-1, lambda=sample.int(100, 1))
# Create the Matrix A
matA <- matU+matF
matrices <- list("matrix_A" = matA,
"matrix_U" = matU,
"matrix_F" = matF)
return(matrices)
}
|
176c02f6701f7c49b937d19a719a71ada8ada6b5
|
27b70fed2f828b777dba07a6a2ad881087cf7006
|
/ui.R
|
9777c9ad25bfde8cfc0bdbf1320faac8798f28a0
|
[] |
no_license
|
catwizard/data_product
|
79f3ae43f4af04d53efe921757766c7e36f88231
|
cbaf4e683822c01ff16c415b3104ed11584ebf3f
|
refs/heads/master
| 2020-05-25T10:15:07.873440
| 2015-06-28T02:25:36
| 2015-06-28T02:25:36
| 37,263,883
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,230
|
r
|
ui.R
|
library(shiny)
library(ISLR); data(Wage); dataset <- Wage
fluidPage(
navbarPage("Wage Exploration",
tabPanel("Variables",
sidebarPanel(
HTML("Choose X, Y, Z and Facet to exploring the relationship"),
selectInput('x', 'X', names(dataset), names(dataset)[[2]]),
selectInput('y', 'Y for Pairs', names(dataset), names(dataset)[[6]]),
selectInput('z', 'Z for Pairs', names(dataset), names(dataset)[[8]]),
selectInput('color', 'Facet', names(dataset), names(dataset)[[6]]),
radioButtons('type', 'Plot type',
c('Pairs','Points', 'Density'))
),
mainPanel(
plotOutput('plot'))
),
tabPanel("Wage Range",
sidebarPanel(
sliderInput('value', 'Wage and the median',
min=min(Wage$wage), max=max(Wage$wage),
value=median(Wage$wage))
),
mainPanel(
plotOutput('wage'))
),
tabPanel("About",
h3("About"),
p(),
HTML("This web aplication uses the Wage data from package ISLR in R,",
"which includes 3000 observations with varaibles wage, year, age, sex, maritl, race, education,",
"region, jobclass, health and other."),
p(),
HTML("This app is to exploring the relationship between wage and",
"other variables for prediction afterwards."),
p(),
HTML("R Code of this Shiny app with ui.R and Server.R is stored in"),
a(href="https://github.com/catwizard/data_product",
"author's GitHub Repository."),
HTML("A simple presentation created by RStudio Presentation is also in"),
a(href="http://catwizard.github.io/Assignment.html",
"author's GitHub Repository,"),
p(),
HTML("Finally, this is a course project of"),
a(href="https://www.coursera.org/course/devdataprod",
"Developing Data Production in Coursera"),
HTML("by Johns Hopkins University.")
)
))
|
33281f8a03280e446c471cdb3e25369d4019fc11
|
f5c81db2ecd5464b3ea09efb3e6a5a9d0484f8a6
|
/R/utility_functions.R
|
d91c9baac56791dfa3c8996c259856cf3460b13d
|
[] |
no_license
|
dstanley4/fastInteraction
|
5095e69682872890c5a90f1116fa5c389801eb62
|
8461d57bea2a0b868cf081404501024a710b722b
|
refs/heads/master
| 2023-06-08T21:50:54.188804
| 2023-06-04T18:18:09
| 2023-06-04T18:18:09
| 220,544,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,664
|
r
|
utility_functions.R
|
calculate.surface <- function(lm_object, criterion, predictor, moderator, criterion.name, predictor.name, moderator.name) {
mx <- mean(predictor, na.rm = TRUE)
mm <- mean(moderator, na.rm = TRUE)
sdx <- sd(predictor, na.rm = TRUE)
sdm <- sd(moderator, na.rm = TRUE)
x.range <- c( (mx - 2*sdx), (mx + 2*sdx))
m.range <- c( (mm - 2*sdm), (mm + 2*sdm))
x.seq <- seq((mx - 2*sdx), (mx + 2*sdx), by = sdx)
m.seq <- seq((mm - 2*sdm), (mm + 2*sdm), by = sdm)
length.x.seq <- length(x.seq)
length.m.seq <- length(x.seq)
new.data <- as.data.frame(expand.grid(x.seq, m.seq))
num_cases <- length(new.data[,1])
criterion.temp <- data.frame(criterion.temp = rep(NA, num_cases))
new.data <- cbind(criterion.temp,new.data)
names(new.data) <- c(criterion.name, predictor.name, moderator.name)
new.data[,1] = predict(object = lm_object, newdata = new.data)
surface.predicted.values <- matrix(rep(NA, length.x.seq*length.m.seq), length.x.seq, length.m.seq)
rownames(surface.predicted.values) <- round(x.seq,2)
colnames(surface.predicted.values) <- round(m.seq,2)
cur_row <- 0
for (m in 1:length(m.seq)) {
for (x in 1:length(x.seq)) {
cur_row <- cur_row + 1
surface.predicted.values[m, x] <- new.data[cur_row,1]
}
}
line_data_sdym1 <- as.numeric(surface.predicted.values[2,])
line_data_sdyp1 <- as.numeric(surface.predicted.values[4,])
line1data <- data.frame(xx = x.seq, yy = rep((mm-sdm), 5), zz = line_data_sdym1)
line2data <- data.frame(xx = x.seq, yy = rep((mm+sdm), 5), zz = line_data_sdyp1)
output <- list(surface.predicted.values = surface.predicted.values,
line1data = line1data,
line2data = line2data,
x.seq = x.seq,
m.seq = m.seq)
return(output)
}
is.valid.name <- function(sub.name, data.col.names) {
is.name.valid <- FALSE
if (!is.null(sub.name)) {
is.name.valid <- any(sub.name == data.col.names)
if (is.name.valid==FALSE){
cat(sprintf("%s is not a valid column name.\n\n",as.character(sub.name)))
}
}
return(is.name.valid)
}
#' @export
print.fastintoutput <- function(x,...) {
cat("\n\n")
output <- x
print(output$apa.table,row.names=FALSE,quote=FALSE)
cat(sprintf("Regression overall R2: %s", output$Overall.R2.F))
cat("\n")
cat("\n")
cat("Simple slope table\n")
cat("------------------\n")
cat("\n")
print(output$simple.slope.table,row.names=FALSE,quote=FALSE, digits = 4)
cat("\n")
print(output$graph2D)
print(output$graph3D)
cat("\n")
cat("3D graph - see Viewer panel in RStudio\n")
cat("2D graph - see Plots panel in RStudio\n")
cat("\n")
}
|
7fd7b5c0fefbcfcf6969df37021d542d67067f82
|
c0f1ad567a5f8ab8fb376242dc1a990d2ab6b3e8
|
/Propensión/SPViajes.R
|
f3f86fa13779001e46d5d2809dcc147b43d19a2d
|
[] |
no_license
|
RAS-WB-Uniandes-Urban-Cycling/proBikePolicies
|
edda6596b693f68b22c4ad50d6746833cef167e3
|
5c82094420a38421748bbb1f997550df4852fd17
|
refs/heads/master
| 2021-06-06T17:44:25.098109
| 2021-05-09T18:06:08
| 2021-05-09T18:06:08
| 135,209,976
| 0
| 2
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 9,916
|
r
|
SPViajes.R
|
library(tidyverse)
library(lubridate)
library(sf)
EncuestaM <- Encuesta %>% mutate(id_manzana=as.character(id_manzana),SecCodigo = paste0("00",str_sub(id_manzana,7,10)),ManCodigo = paste0(SecCodigo,0,str_sub(id_manzana,13,14)))
Data <- Personas %>% select(moviliza_bicicleta,id_encuesta,numero_persona,sexo,edad,nivel_educativo,actividad_principal,actividad_economica,licenciaconduccion1) %>%
left_join(select(EncuestaM,id_encuesta,estrato,id_manzana,SecCodigo,ManCodigo,zat_hogar,vehic_personaidonea)) %>% left_join(group_by(Vehiculos,id_encuesta) %>% summarise(NumVehiculosM=n())) %>%
mutate(NumVehiculosM = ifelse(is.na(NumVehiculosM),0,NumVehiculosM),NumVehiculosNM=pmax(0,vehic_personaidonea-NumVehiculosM)) %>% filter(!is.na(moviliza_bicicleta))
ruta <- "C:/Users/pa.uriza274/Universidad de Los Andes/German Augusto Carvajal Murcia - UNIANDES - RAS - SDM/BASES DE DATOS/Mapas de Referencia IDECA/MR"
manz2014 <- st_read(paste0(ruta,"1214.gdb"),layer = "Manz",stringsAsFactors = F,quiet = T) %>% st_cast("MULTIPOLYGON") %>% st_transform(4326) %>% st_centroid(.) %>% cbind(st_coordinates(.)) %>% select(ManCodigo,SecCodigo,LongMan=X,LatMan=Y)%>% st_set_geometry(NULL)
ruta2 <- "C:/Users/pa.uriza274/Universidad de Los Andes/German Augusto Carvajal Murcia - UNIANDES - RAS - SDM/BASES DE DATOS/Marco Geoestadistico Nacional - DANE/2012/11_BOGOTA/MGN/"
MNG_BOG <- st_read(paste0(ruta2,"MGN_Manzana.shp"),stringsAsFactors = F,quiet = T) %>% st_transform(4326) %>% st_centroid(.) %>% cbind(st_coordinates(.)) %>% transmute(SecCodigo = paste0("00",SECU_SET_1),ManCodigo = paste0(SecCodigo,0,MANZ_CCDGO),LongMan=X,LatMan=Y)%>% st_set_geometry(NULL)
MNG_BOG <- st_read(paste0(ruta2,"MGN_Manzana.shp"),stringsAsFactors = F,quiet = T) %>% st_transform(4326) %>% st_centroid(.) %>% cbind(st_coordinates(.)) %>% transmute(id_manzana = paste0(SETR_CLSE_,SECR_SETR_,CPOB_SECR_,SECU_SET_1,SECU_SECU_,MANZ_CCDGO),LongMan=X,LatMan=Y)%>% st_set_geometry(NULL)
ruta2 <- "C:/Users/pa.uriza274/Universidad de Los Andes/German Augusto Carvajal Murcia - UNIANDES - RAS - SDM/BASES DE DATOS/Marco Geoestadistico Nacional - DANE/2017/11_BOGOTA/URBANO/"
MNG_BOG <- st_read(paste0(ruta2,"MGN_URB_MANZANA.shp"),stringsAsFactors = F,quiet = T) %>% st_transform(4326) %>% st_centroid(.) %>% cbind(st_coordinates(.)) %>% transmute(id_manzana = paste0(MPIO_CCDGO,CLAS_CCDGO,SETU_CCDGO,SECU_CCDGO,MANZ_CCDGO),LongMan=X,LatMan=Y)%>% st_set_geometry(NULL)
ruta3 <- "C:/Users/pa.uriza274/Universidad de Los Andes/German Augusto Carvajal Murcia - UNIANDES - RAS - SDM/BASES DE DATOS/Marco Geoestadistico Nacional - DANE/2012/25_CUNDINAMARCA/MGN/"
MNG_CUN <- st_read(paste0(ruta3,"MGN_Manzana.shp"),stringsAsFactors = F,quiet = T) %>% st_transform(4326) %>% st_centroid(.) %>% cbind(st_coordinates(.)) %>% transmute(SecCodigo = paste0("00",SECU_SET_1),ManCodigo = paste0(SecCodigo,0,MANZ_CCDGO),LongMan=X,LatMan=Y)%>% st_set_geometry(NULL)
MNG_CUN<- st_read(paste0(ruta3,"MGN_Manzana.shp"),stringsAsFactors = F,quiet = T) %>% st_transform(4326) %>% st_centroid(.) %>% cbind(st_coordinates(.)) %>% transmute(id_manzana = paste0(SETR_CLSE_,SECR_SETR_,CPOB_SECR_,SECU_SET_1,SECU_SECU_,MANZ_CCDGO),LongMan=X,LatMan=Y)%>% st_set_geometry(NULL)
MNG_CUN_SEC <- st_read(paste0(ruta3,"MGN_Seccion_urbana.shp"),stringsAsFactors = F,quiet = T) %>% st_transform(4326) %>% st_centroid(.) %>% cbind(st_coordinates(.)) %>% transmute(id_manzana = paste0(SETR_CLSE_,SECR_SETR_,CPOB_SECR_,SECU_SET_1,SECU_SECU_,MANZ_CCDGO),LongMan=X,LatMan=Y)%>% st_set_geometry(NULL)
Viajes <- read_csv2("../../Bases de datos/EncuestaMovilidad2015/Encuesta/encuesta 2015 - viajes.csv",locale = locale(encoding = stringi::stri_enc_get())) %>% `names<-`(str_to_lower(names(.)))
Orig <- Viajes %>% filter(!is.na(latitud_origen),latitud_origen>0) %>% st_as_sf(coords = c("longitud_origen","latitud_origen"),crs=4326)
zats <- st_read("../../Bases de datos/Bases.gdb",layer = "ZATs",stringsAsFactors = FALSE,quiet = TRUE) %>% st_transform(4326) %>% transmute(id = as.numeric(id),zat_hogar = as.numeric(zona_num_n)) %>% filter(!st_is_empty(.))
OrigenViajes <- Orig %>% st_join(zats,join = st_within)
tmap_mode("view")
mapO <- tm_shape(name = "Bogotá",osmdata::getbb("Bogotá", format_out = "sf_polygon")) +
tm_borders(col = "black") + tm_shape(name = "Proporcion",zatsMB) +
tm_polygons(col = "Total", palette = "YlOrRd", style = "jenks",n = 5, id = "zat_hogar", title = "Proporción")+
tm_shape(name = "Origen",filter(b,`n()`==2))+tm_dots(col = "black") + tm_legend(legend.position = c("left", "bottom")) + tm_layout(title = "Total encuestados por zat", basemaps = c( leaflet::providers$Stamen.TonerHybrid),basemaps.alpha = 0.3)
c <- OrigenViajes %>% right_join(Data) %>% group_by(id_encuesta,numero_persona)
Vp <- head(Viajes,2) %>% rowwise()%>%
mutate(geometry = list(rbind(c(longitud_origen,latitud_origen),c(longitud_destino,latitud_destino))))
Vp2 <- Vp %>% mutate(A = st_linestring(matrix(unlist(geometry),ncol = 2)))
Vp2 <- Vp %>% mutate(A = list(st_linestring(geometry)))
Vp2 <- Vp %>% mutate(A = st_sfc(st_linestring(geometry)))
l <- (lapply(Vp$geometry,FUN = st_linestring))
st_linestring(matrix(unlist(Vp$geometry),ncol = 2))
lineasDeseo <- Viajes %>% unite("id",c("id_encuesta","numero_persona","numero_viaje"),remove = F) %>%
filter(!is.na(latitud_origen),latitud_origen>0,!is.na(longitud_origen),longitud_origen>-80,
!is.na(latitud_destino),latitud_destino>0,!is.na(longitud_destino),longitud_destino>-80) %>%
rowwise() %>% mutate(geometry = st_sfc(st_linestring(rbind(c(longitud_origen,latitud_origen),c(longitud_destino,latitud_destino))))) %>%
st_as_sf(crs = 4326)
st_write(lineasDeseo,"C:/Users/pa.uriza274/Universidad de Los Andes/German Augusto Carvajal Murcia - UNIANDES - RAS - SDM/RESULTADOS/PROPENSION/GEO-DATA/LineasViajes.shp")
lineasDeseoManhattan <- Viajes %>% unite("id",c("id_encuesta","numero_persona","numero_viaje"),remove = F) %>%
filter(!is.na(latitud_origen),latitud_origen>0,!is.na(longitud_origen),longitud_origen>-80,
!is.na(latitud_destino),latitud_destino>0,!is.na(longitud_destino),longitud_destino>-80) %>%
rowwise() %>% mutate(geometry = st_sfc(st_linestring(rbind(c(longitud_origen,latitud_origen),c(longitud_destino,latitud_origen),c(longitud_destino,latitud_destino))))) %>%
st_as_sf(crs = 4326) %>% ungroup() %>% mutate(Distancia = as.numeric(st_length(.)))
st_write(lineasDeseoManhattan,"C:/Users/pa.uriza274/Universidad de Los Andes/German Augusto Carvajal Murcia - UNIANDES - RAS - SDM/RESULTADOS/PROPENSION/GEO-DATA/LineasViajesManhattan.shp")
for (i in 8:14){
rutas <- line2route(lineasDeseo[(10000*(i-1)+1):(10000*i),],"route_osrm",l_id = "id")
st_write(rutas,paste0("C:/Users/pa.uriza274/Universidad de Los Andes/German Augusto Carvajal Murcia - UNIANDES - RAS - SDM/RESULTADOS/PROPENSION/GEO-DATA/rutas",i,".shp"))
}
#### Lectura lineas viajes Manhattan
ViajesL1 <- st_read("C:/Users/pa.uriza274/Universidad de Los Andes/German Augusto Carvajal Murcia - UNIANDES - RAS - SDM/RESULTADOS/PROPENSION/GEO-DATA/LineasViajesManhattan.shp",stringsAsFactors = FALSE,quiet = TRUE) %>% st_set_geometry(NULL)
names(ViajesL1) <- c("id","id_encuesta","numero_persona","numero_viaje","motivoviaje","municipio_destino","departamento_destino","tiempo_camino","hora_inicio",
"hora_fin","medio_predominante","zat_destino","zat_origen","municipio_origen","departamento_origen","latitud_origen","latitud_destino",
"longitud_origen","longitud_destino","diferencia_horas","factor_ajuste","ponderador_calibrado","dia_habil","dia_nohabil","pico_habil",
"pico_nohabil","valle_nohabil","valle_habil","pi_k_i","pi_k_ii","pi_k_iii","fe_total","factor_ajuste_transmilenio","ponderador_calibrado_viajes",
"Distancia","geometry")
Data <- Personas %>% select(moviliza_bicicleta,id_encuesta,numero_persona,sexo,edad,nivel_educativo,actividad_principal,actividad_economica,licenciaconduccion1) %>%
left_join(select(Encuesta,id_encuesta,estrato,zat_hogar,vehic_personaidonea)) %>% left_join(group_by(Vehiculos,id_encuesta) %>% summarise(NumVehiculosM=n())) %>%
mutate(NumVehiculosM = ifelse(is.na(NumVehiculosM),0,NumVehiculosM),NumVehiculosNM=pmax(0,vehic_personaidonea-NumVehiculosM)) %>% filter(!is.na(moviliza_bicicleta)) %>%
inner_join((lineasDeseoManhattan %>% st_set_geometry(NULL) %>% filter(Distancia>0) %>% group_by(id_encuesta,numero_persona,numero_viaje,zat_origen) %>% transmute(longitud_origen,latitud_origen,DistProm = mean(Distancia,na.rm = T))),
by = c("id_encuesta","numero_persona","zat_hogar" = "zat_origen")) %>% group_by(id_encuesta,numero_persona) %>% top_n(1,wt = numero_viaje) %>%
rowwise() %>% mutate(geometry = st_sfc(st_point(c(longitud_origen,latitud_origen)))) %>% st_as_sf(crs = 4326)
mapO <- tm_shape(name = "Bogotá",osmdata::getbb("Bogotá", format_out = "sf_polygon")) +
tm_borders(col = "black") + tm_shape(name = "Proporcion",zatsMB) +
tm_polygons(col = "Total", palette = "YlOrRd", style = "jenks",n = 5, id = "zat_hogar", title = "Proporción")+
tm_shape(name = "Origen",Data)+tm_dots(col = "black",alpha = 0.2) + tm_legend(legend.position = c("left", "bottom")) + tm_layout(title = "Total encuestados por zat", basemaps = c( leaflet::providers$Stamen.TonerHybrid),basemaps.alpha = 0.3)
mapO
mapL <- tm_shape(name = "Bogotá",osmdata::getbb("Bogotá", format_out = "sf_polygon")) +
tm_borders(col = "black") + tm_shape(name = "Proporcion",zatsMB) +
tm_polygons(col = "Total", palette = "YlOrRd", style = "jenks",n = 5, id = "zat_hogar", title = "Proporción")+
tm_shape(name = "Origen",head(lineasDeseoManhattan,50))+tm_lines(col = "black") + tm_legend(legend.position = c("left", "bottom")) + tm_layout(title = "Total encuestados por zat", basemaps = c( leaflet::providers$Stamen.TonerHybrid),basemaps.alpha = 0.3)
mapL
|
5770322905e1277e526962634d9ae442dfdd70ef
|
95e564d41cea0c341c1b1f84f9c15938728d9ea8
|
/Initial Statistical Analysis.R
|
164c32f2c36dbe4a1b2492bfa802d42f4870942f
|
[] |
no_license
|
UVAHealthSystemCapstone/UVAHealthSystemCapstone
|
f838dbc41e6d9e8615e0d4fc79423a21d3d48229
|
d8c7158d2a7263d09a4bbe483f8d449e0ab87a62
|
refs/heads/master
| 2022-04-16T05:14:25.332396
| 2020-04-15T01:02:50
| 2020-04-15T01:02:50
| 211,167,990
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,081
|
r
|
Initial Statistical Analysis.R
|
library(tidyverse)
library(dplyr)
library(ggplot2)
#load data
load("//galen.storage.virginia.edu/ivy-hip-chru/ekd6bx/Carilion_ind_weather_merged")
individuals <- weather_merged_car
# Creating data frame with just diabetes patients (based on ICD codes)
library(stringr)
# ICD 9 Codes
icd9_main <- as.character(c(249,250))
icd9_list <- as.character(c(2493,2498,7751,6480,2500,2535,5881,2491,2490,2492,7902,2503,7517,2499,
3572,3620,2509,7756,2502,3371,3535,5363,7750,6488,2714,2504,
2507,2506,2508,2505,2497,7318,2494,2750,2510,2501,2512,2496,
2495,2511,7071,2496,2495,2508,2497,2494,2498,2503,2491,2493,
3620,7750,2504,2507,7800,2505,2510,2501,2762,2511,7130,7135))
icd9_list2 <- as.character(c(36641,44381,58181,58381,79029,36201,36207,36202,36203,36204,36205,36206))
icd9_list3 <- 'V180|V771|V653'
icd10_codes <- "E23.2|O24.92|E10.618|E11.618|E13.36|E13.44|Z13.1|E13.42|E10.41|E10.44|E11.41|E11.44|E13.40|E13.41|E13.618|E13.21|E09.21|E10.42|E09.36|E10.36|E10.620|E11.36|E11.620|E13.620|E08.36|E08.41|E08.44|E08.61|E09.61|E09.620|E10.31|E10.35|E10.610|E11.31|E11.35|E13.31|E13.35|E11.40|E08.42|E08.620|E11.21|E13.610|N25.1|O24.4|E10.21|E11.610|O24|E08.355|E10.40|E11.42|E11|E10|E08.21|E13|E10.65|E11.65|E13.65|Z83.3|E10.43|E13.9|O24.439|E09.41|E09.44|E11.49|E13.319|E13.359|E13.49|E08.3553|E09.35|E09.355|E09.42|E09.52|E09.610|E10.355|E11.355|E13.29|E13.33|E13.34|E13.355|E13.3553|E13.3559|E13.43|E13.52|O24.93|E10.29|E11.29|Z86.32|E08.610|E09.29|E10.32|E11.32|E11.51|E13.32|E10.311|E10.51|E10.52|E11.311|E11.39|E11.52|E13.321|E09.9|O24.419|E11.43|E09.311|E09.351|E09.40|E09.51|E10.351|E10.359|E13.311|E13.341|E13.351|E13.39|E10.9|O24.32|O24.33|E10.39|E10.649|E13.3|E13.4|E13.5|O24.414|O24.424|O24.430|O24.434|E08.9|E10.10|E11.8|E13.01|E13.641|E13.649|E13.8|O24.429|O24.919|E08.31|E08.33|E08.34|E08.35|E08.40|E08.49|E08.618|E09.31|E09.33|E09.34|E09.3553|E09.618|E10.22|E10.319|E10.33|E10.34|E10.3551|E10.3552|E10.3553|E10.3559|E11.319|E11.321|E11.33|E11.34|E11.351|E11.3551|E11.3552|E11.3553|E11.3559|E13.3551|E13.3552|E13.37|E13.51|E08.29|E08.32|E09.32|E10.49|E11.359|E13.10|E09|E13.22|E10.69|E09.22|E11.22|E08.3213|E08.3219|E08.3313|E08.3319|E08.3413|E08.3419|E08.3513|E08.3519|E08.352|E08.354|E08.3543|E08.3551|E08.3552|E08.3559|E08.37|E13.354|E11.9|E10.321|E10.331|E10.341|E11.331|E11.341|E13.331|E11.649|E09.01|E09.11|E09.8|E10.11|E10.641|E11.01|E13.11|P70.0|E08.0|E08.1|E08.2|E08.3|E08.4|E08.5|E08.62|E08.63|E08.630|E08.64|E08.65|E08.8|E09.0|E09.2|E09.3|E09.4|E09.5|E09.62|E09.63|E09.65|E10.628|E13.6|E13.628|E13.638|E08|E08.39|E08.43|E09.39|O24.319|E13.329|E11.59|E10.630|E10.8|E11.630|E11.641|E11.628|E13.59|E09.319|E09.359|E08.3549|E08.22|E08.52|E11.69|E08.3211|E08.3212|E08.3291|E08.3293|E08.3299|E08.3311|E08.3393|E08.3411|E08.3412|E08.3491|E08.3493|E08.3499|E08.353|E08.37X3|E09.321|E09.329|E09.3551|E09.3559|E10.3213|E10.354|E11.3213|E11.354|E13.3211|E13.3213|E13.3219|E13.3293|E13.3313|E13.3413|E13.3419|E13.3543|E10.59|E08.01|E08.11|E08.641|E13.621|E13.69|E08.6|E09.6|O24.311|O24.312|O24.313|O24.811|O24.812|O24.813|O24.819|E09.621|E09.630|E13.622|E10.329|E11.329|O24.415|O24.435|E09.43|E11.349|E09.331|E09.341|E10.339|E10.349|E11.339|E13.339|E13.349|E08.3541|E09.49|E09.641|E09.10|E10.622|E08.621|O24.9|E08.10|E08.59|E08.628|E08.638|E08.649|E08.69|E09.59|E09.628|E09.638|E09.69|O24.011|O24.012|O24.013|O24.019|O24.111|O24.112|O24.113|O24.119|E09.339|E09.349|E11.621|E10.621|E11.622|P70.1|O24.0|E09.649|E13.00|E08.622|O24.425|O24.1|E09.622|E09.00|E11.00|O24.3|O24.8|E08-E13|R73.03|E10.61|E11.61|E13.61|O24.41|O24.42|O24.43|O24.91|P70.2|E08.311|E08.351|E08.51|E10.3513|E10.37|E11.3513|E11.37|E13.3513|E10.1|E10.64|E11.0|E11.1|E11.64|E13.0|E13.1|E13.64|E08.319|E08.321|E08.329|E08.3292|E08.331|E08.3312|E08.339|E08.3391|E08.3392|E08.3399|E08.341|E08.349|E08.3492|E08.3511|E08.3512|E08.3521|E08.3522|E08.3523|E08.3529|E08.3531|E08.3532|E08.3533|E08.3539|E08.359|E08.3591|E08.3592|E08.3593|E08.3599|E08.37X1|E08.37X2|E08.37X9|E09.3211|E09.3212|E09.3213|E09.3219|E09.3291|E09.3292|E09.3293|E09.3299|E09.3311|E09.3312|E09.3313|E09.3319|E09.3391|E09.3392|E09.3393|E09.3399|E09.3411|E09.3412|E09.3413|E09.3419|E09.3491|E09.3492|E09.3493|E09.3499|E09.3511|E09.3512|E09.3513|E09.3519|E09.352|E09.3521|E09.3522|E09.3523|E09.3529|E09.353|E09.3531|E09.3532|E09.3533|E09.3539|E09.354|E09.3543|E09.3552|E09.3591|E09.3592|E09.3593|E09.3599|E09.37|E09.37X1|E09.37X2|E09.37X3|E09.37X9|E10.3211|E10.3212|E10.3219|E10.3291|E10.3292|E10.3293|E10.3299|E10.3311|E10.3312|E10.3313|E10.3319|E10.3391|E10.3392|E10.3393|E10.3399|E10.3411|E10.3412|E10.3413|E10.3419|E10.3491|E10.3492|E10.3493|E10.3499|E10.3511|E10.3512|E10.3519|E10.352|E10.3521|E10.3522|E10.3523|E10.3529|E10.353|E10.3531|E10.3532|E10.3533|E10.3539|E10.3541"
icd10 <- unlist(strsplit(icd10_codes, "\\|"))
icd9_3 <- unlist(strsplit(icd9_list3, "\\|"))
individuals$Principal.Diagnosis4 <- substr(individuals$Principal.Diagnosis, 2, 5)
individuals$Principal.Diagnosis3 <- substr(individuals$Principal.Diagnosis, 2, 4)
all_diabetes <- c(icd9_main, icd9_list, icd9_list2, icd9_3, icd10)
individuals$diabetic <- ifelse(individuals$Principal.Diagnosis %in% all_diabetes | individuals$Principal.Diagnosis4 %in% all_diabetes | individuals$Principal.Diagnosis3 %in% all_diabetes,1,0)
# Loading UVA data
load("//galen.storage.virginia.edu/ivy-hip-chru/ekd6bx/UVA_ind_weather_merged")
# Creating diabetes subset for UVA data
# There are 14086 observations that have 0 in the dx column - ignore *for now
missing <- subset(weather_merged, dx == '0')
weather_merged2 <- subset(weather_merged, dx != '0')
# Running t tests
# Race
# Null hypothesis: There is no difference in proportion of a given race between populations of diabetes and non-diabetes patients
# Alternative hypothesis: There is a difference in proportion of a given race between populations of diabetes and non-diabetes patients
individuals$Black <- ifelse(individuals$Race == "Black", 1,0)
black <- glm(diabetic~Black, data = individuals, family=binomial(logit))
individuals$White <- ifelse(individuals$Race == "White", 1,0)
white <- glm(diabetic~White, data = individuals, family=binomial(logit))
individuals$Hispanic <- ifelse(individuals$Race == "HISPANIC", 1,0)
hispanic <- glm(diabetic~Hispanic, data = individuals, family=binomial(logit))
individuals$Asian <- ifelse(individuals$Race == "Asian", 1,0)
asian <- glm(diabetic~Asian, data = individuals, family=binomial(logit))
individuals$Biracial <- ifelse(individuals$Race == "BIRACIAL", 1,0)
biracial <- glm(diabetic~Biracial, data = individuals, family=binomial(logit))
individuals$PacIslander <- ifelse(individuals$Race == "Pac Islander", 1,0)
pacislander <- glm(diabetic~PacIslander, data = individuals, family=binomial(logit))
individuals$AmIndian <- ifelse(individuals$Race == "Am Indian", 1,0)
amindian <- glm(diabetic~AmIndian, data = individuals, family=binomial(logit))
individuals$Unknown <- ifelse(individuals$Race == "Unknown", 1,0)
unknown <- glm(diabetic~Unknown, data = individuals, family=binomial(logit))
individuals$Other <- ifelse(individuals$Race == "Other", 1,0)
other <- glm(diabetic~Other, data = individuals, family=binomial(logit))
individuals$Refused <- ifelse(individuals$Race == "Pt Refused", 1,0)
refused <- glm(diabetic~Refused, data = individuals, family=binomial(logit))
# Have already created columns for all races, add these column names to this list
race_list = c("Black","White","Hispanic","Asian","Biracial", "PacIslander", "AmIndian", "Unknown","Other","Refused")
race_p_values = c(summary(black)$coefficients[,4][2],summary(white)$coefficients[,4][2],
summary(hispanic)$coefficients[,4][2],summary(asian)$coefficients[,4][2],
summary(biracial)$coefficients[,4][2],summary(pacislander)$coefficients[,4][2],
summary(amindian)$coefficients[,4][2],summary(unknown)$coefficients[,4][2],
summary(other)$coefficients[,4][2],summary(refused)$coefficients[,4][2])
race_coefficients = c(black$coefficients[2],white$coefficients[2],hispanic$coefficients[2],
asian$coefficients[2],biracial$coefficients[2],pacislander$coefficients[2],
amindian$coefficients[2], unknown$coefficients[2],other$coefficients[2],
refused$coefficients[2])
race_lower = c(confint.default(black)[,1][2],confint.default(white)[,1][2],confint.default(hispanic)[,1][2],
confint.default(asian)[,1][2],confint.default(biracial)[,1][2],confint.default(pacislander)[,1][2],
confint.default(amindian)[,1][2],confint.default(unknown)[,1][2],confint.default(other)[,1][2],
confint.default(refused)[,1][2])
race_upper = c(confint.default(black)[,2][2],confint.default(white)[,2][2],confint.default(hispanic)[,2][2],
confint.default(asian)[,2][2],confint.default(biracial)[,2][2],confint.default(pacislander)[,2][2],
confint.default(amindian)[,2][2],confint.default(unknown)[,2][2],confint.default(other)[,2][2],
confint.default(refused)[,2][2])
race <- tibble(race_list)
race$p_value <- race_p_values
race$coefficient <- race_coefficients
race$lower <- race_lower
race$upper <- race_upper
race_subset <- race[-c(6,7,8,10),]
ggplot(race, aes(x=race_list, y=coefficient, group=1, ymin = lower, ymax = upper)) +
geom_point(shape=21, size=3, fill="blue") +
geom_errorbar(aes(ymin=lower, ymax=upper), width=.1) + labs(x="Race",y="Log Odds Ratio")
ggplot(race_subset, aes(x=race_list, y=coefficient, group=1, ymin = lower, ymax = upper)) +
geom_point(shape=21, size=3, fill="blue") +
geom_errorbar(aes(ymin=lower, ymax=upper), width=.1) + labs(x="Race",y="Log Odds Ratio") +
ggtitle("Log Odds Ratio Among Different Races for Carilion Data")
# Ethnicity
# Null hypothesis: There is no difference in proportion of a given ethnicity between populations of diabetes and non-diabetes patients
# Alternative hypothesis: There is a difference in proportion of a given ethnicity between populations of diabetes and non-diabetes patients
ethnicity <- glm(diabetic~Ethnicity, data = individuals, family=binomial(logit))
coefficients <- summary(ethnicity)$coefficients
ethnicity_list <- c("Hispanic", "Non-Hispanic","Pt Refused","Unknown")
ethnicity_table <- tibble(ethnicity_list)
ethnicity_table$estimate <- coefficients[-1,1]
ethnicity_table$p_value <- coefficients[-1,4]
ethnicity_table$lower <- c(confint.default(ethnicity)[,1][2],confint.default(ethnicity)[,1][3],
confint.default(ethnicity)[,1][4],confint.default(ethnicity)[,1][5])
ethnicity_table$upper <- c(confint.default(ethnicity)[,2][2],confint.default(ethnicity)[,2][3],
confint.default(ethnicity)[,2][4],confint.default(ethnicity)[,2][5])
ggplot(ethnicity_table, aes(x=ethnicity_list, y=estimate, ymin = lower, ymax = upper)) +
geom_point(shape=21, size=3, fill="blue") +
geom_errorbar(aes(ymin=lower, ymax=upper), width=.1) + labs(x="Ethnicity",y="Log Odds Ratio")+
ggtitle("Log Odds Ratio Among Different Ethnicities for Carilion Data")
# Gender
# Null hypothesis: There is no difference in proportion of a given gender between populations of diabetes and non-diabetes patients
# Alternative hypothesis: There is a difference in proportion of a given gender between populations of diabetes and non-diabetes patients
gender <- glm(diabetic~Gender, data = individuals, family=binomial(logit))
summary(gender)$coefficients
confint.default(gender)
# UVA Data
# Loading UVA data
load("//galen.storage.virginia.edu/ivy-hip-chru/ekd6bx/UVA_ind_weather_merged")
# Creating diabetes subset for UVA data
# There are 14086 observations that have 0 in the dx column - ignore *for now
missing <- subset(weather_merged, dx == '0')
weather_merged2 <- subset(weather_merged, dx != '0')
weather_merged2$Principal.Diagnosis4 <- substr(weather_merged2$dx, 2, 5)
weather_merged2$Principal.Diagnosis3 <- substr(weather_merged2$dx, 2, 4)
all_diabetes <- c(icd9_main, icd9_list, icd9_list2, icd9_3, icd10)
weather_merged2$diabetic <- ifelse(weather_merged2$dx %in% all_diabetes | weather_merged2$Principal.Diagnosis4 %in% all_diabetes | weather_merged2$Principal.Diagnosis3 %in% all_diabetes,1,0)
unique(weather_merged2$race)
# Running t tests
# Race
weather_merged2$Black <- ifelse(weather_merged2$race == "B", 1,0)
uva_black <- glm(diabetic~Black, data = weather_merged2, family=binomial(logit))
weather_merged2$White <- ifelse(weather_merged2$race == "W", 1,0)
uva_white <- glm(diabetic~White, data = weather_merged2, family=binomial(logit))
weather_merged2$Hispanic <- ifelse(weather_merged2$race == "H", 1,0)
uva_hispanic <- glm(diabetic~Hispanic, data = weather_merged2, family=binomial(logit))
weather_merged2$Asian <- ifelse(weather_merged2$race == "A", 1,0)
uva_asian <- glm(diabetic~Asian, data = weather_merged2, family=binomial(logit))
weather_merged2$Other <- ifelse(weather_merged2$race == "O", 1,0)
uva_other <- glm(diabetic~Other, data = weather_merged2, family=binomial(logit))
weather_merged2$Unknown <- ifelse(weather_merged2$race == "U", 1,0)
uva_unknown <- glm(diabetic~Unknown, data = weather_merged2, family=binomial(logit))
weather_merged2$I <- ifelse(weather_merged2$race == "I", 1,0)
uva_i <- glm(diabetic~I, data = weather_merged2, family=binomial(logit))
weather_merged2$N <- ifelse(weather_merged2$race == "N", 1,0)
uva_n <- glm(diabetic~N, data = weather_merged2, family=binomial(logit))
uva_race_list = c("Black","White","Hispanic","Asian","Other","Unknown","I","N")
uva_race_p_values = c(summary(uva_black)$coefficients[,4][2],summary(uva_white)$coefficients[,4][2],
summary(uva_hispanic)$coefficients[,4][2],summary(uva_asian)$coefficients[,4][2],
summary(uva_other)$coefficients[,4][2],summary(uva_unknown)$coefficients[,4][2],
summary(uva_i)$coefficients[,4][2],summary(uva_n)$coefficients[,4][2])
uva_race_coefficients = c(uva_black$coefficients[2],uva_white$coefficients[2],uva_hispanic$coefficients[2],
uva_asian$coefficients[2],uva_other$coefficients[2],uva_unknown$coefficients[2],
uva_i$coefficients[2],uva_n$coefficients[2])
uva_race_lower = c(confint.default(uva_black)[,1][2],confint.default(uva_white)[,1][2],confint.default(uva_hispanic)[,1][2],
confint.default(uva_asian)[,1][2],confint.default(uva_other)[,1][2],confint.default(uva_unknown)[,1][2],
confint.default(uva_i)[,1][2],confint.default(uva_n)[,1][2])
uva_race_upper = c(confint.default(uva_black)[,2][2],confint.default(uva_white)[,2][2],confint.default(uva_hispanic)[,2][2],
confint.default(uva_asian)[,2][2],confint.default(uva_other)[,2][2],confint.default(uva_unknown)[,2][2],
confint.default(uva_i)[,2][2],confint.default(uva_n)[,2][2])
uva_race <- tibble(uva_race_list)
uva_race$p_value <- uva_race_p_values
uva_race$coefficient <- uva_race_coefficients
uva_race$lower <- uva_race_lower
uva_race$upper <- uva_race_upper
uva_race_subset <- uva_race[-c(6,7,8),]
ggplot(uva_race, aes(x=uva_race_list, y=coefficient, group=1, ymin = lower, ymax = upper)) +
geom_point(shape=21, size=3, fill="blue") +
geom_errorbar(aes(ymin=lower, ymax=upper), width=.1) + labs(x="Race",y="Log Odds Ratio")
ggplot(uva_race_subset, aes(x=uva_race_list, y=coefficient, group=1, ymin = lower, ymax = upper)) +
geom_point(shape=21, size=3, fill="blue") +
geom_errorbar(aes(ymin=lower, ymax=upper), width=.1) + labs(x="Race",y="Log Odds Ratio") +
ggtitle("Log Odds Ratio Among Different Races for UVA Data")
# Ethnicity
unique(weather_merged2$ethnicity)
uva_ethnicity <- glm(diabetic~ethnicity, data = weather_merged2, family=binomial(logit))
summary(uva_ethnicity)
|
6aa8e39514ea7cf69648be8cd38960df4bd7b0b2
|
1685e0fcd453743bf2806af06b9adb86aaaf705c
|
/server.R
|
36bf4975dbd16a7ed6350f432abfd97eee6a67b7
|
[] |
no_license
|
NZF85/Coursera-Developing-Data-Products
|
53e065af233a9e88461813135113efca0f4da06c
|
9779ff2215688945117b2530c38a869707ee3de9
|
refs/heads/master
| 2016-09-05T22:39:44.683525
| 2015-09-20T02:46:10
| 2015-09-20T02:46:10
| 42,778,404
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,213
|
r
|
server.R
|
library(shiny)
# Load data processing file
source("data.R")
Country <- sort(unique(data$Country))
# Shiny server
shinyServer(
function(input, output) {
output$setid <- renderText({input$setid})
output$address <- renderText({
input$goButtonAdd
isolate(paste("http://data.un.org/Data.aspx?d=UNODC&f=tableCode%3a1",
input$setid, sep=""))
})
# getPage<-function(url) {
# return(tags$iframe(src = url,
# style="width:100%;",
# frameborder="0", id="iframe",
# height = "500px"))
# }
openPage <- function(url) {
return(tags$a(href=url, "Click here!", target="_blank"))
}
output$inc <- renderUI({
input$goButtonDirect
isolate(openPage(paste("http://data.un.org/Data.aspx?d=UNODC&f=tableCode%3a1",
input$setid, sep="")))
})
# Initialize reactive values
values <- reactiveValues()
values$Country <- Country
# Create event type checkbox
output$CountryControl <- renderUI({
checkboxGroupInput('Country', 'Country:',
Country, selected = values$Country)
})
# Add observer on select-all button
observe({
if(input$selectAll == 0) return()
values$Country <- Country
})
# Add observer on clear-all button
observe({
if(input$clearAll == 0) return()
values$Country <- c() # empty list
})
# Prepare dataset
dataTable <- reactive({
groupByCountry(data, input$timeline[1],
input$timeline[2], input$Rate[1],
input$Rate[2], input$Country)
})
dataTableByYear <- reactive({
groupByYearAgg(data, input$timeline[1],
input$timeline[2], input$Rate[1],
input$Rate[2], input$Country)
})
dataTableByRate <- reactive({
groupByYearRate(data, input$timeline[1],
input$timeline[2], input$Rate[1],
input$Rate[2], input$Country)
})
dataTableByRateAvg <- reactive({
groupByRateAvg(data, input$timeline[1],
input$timeline[2], input$Rate[1],
input$Rate[2], input$Country)
})
dataTableByRateCountryAvg <- reactive({
groupByRateCountryAvg(data, input$timeline[1],
input$timeline[2], input$Rate[1],
input$Rate[2], input$Country)
})
# Render data table
output$dTable <- renderDataTable({
dataTable()
} #, options = list(bFilter = FALSE, iDisplayLength = 50)
)
output$CountryByYear <- renderChart({
plotCountryCountByYear(dataTableByYear())
})
output$RateByYear <- renderChart({
plotRateByYear(dataTableByRate())
})
output$RateByYearAvg <- renderChart({
plotRateByYearAvg(dataTableByRateAvg())
})
output$RateByCountryAvg <- renderChart({
plotRateByCountryAvg(dataTableByRateCountryAvg())
})
} # end of function(input, output)
)
|
b6ebe427c50a266fe094f851cee343fba084643f
|
faf431062499cce160bf7ef6ee34737c5091ff3d
|
/finalAssigment/ui.R
|
92b20d9403c11d907cdd1775b20bd8716eff97ec
|
[] |
no_license
|
mcastrol/dataProductFinalAssignment
|
f8593832c9ac47bf0d3814083c8a2f140112e1a0
|
e1c8f14911dcf0eca63e0942d3880a41f19f7177
|
refs/heads/master
| 2020-03-11T12:56:48.216696
| 2018-04-18T06:38:41
| 2018-04-18T06:38:41
| 130,011,239
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,599
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Wage linear regression analysis and prediction"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput("predictor", "Choose a predictor:", choices = c("year", "age", "education","race")),
sliderInput("year", "Year to predict", min = 2000, max = 2012, value = 2006),
sliderInput("age", "Age", min = 18, max = 80, value = 42),
selectInput("education", "Education", choices = c("<HS Grad", "Hs Grad", "Some College","Advanced Degree")),
selectInput("race", "Race", choices = c("White","Black", "Asian", "Other"))
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("regression"),
tags$b('Predicted Wage ($):'),
tags$b(textOutput("wagepredicted")),
br(),
br(),
h4('Instructions'),
helpText("This application is for see the linear regression of Wage respect to year, age, education and race"),
helpText("You have to choose a predictor and the app gives you the Adj.R2, interceptor, the slope and pvalue"),
helpText("Moreover, you can predict the wage ($) of a person by entering the year, age, education and race")
)
)
))
|
0023b4a4ce98a14e9ba98bebfb47075d1b22120e
|
07028f1e1126661a945e35ef3a7baa0f4745da0e
|
/cachematrix.R
|
7cb615f81bd6ce56f9fcd6abf02f4e036a59b33d
|
[] |
no_license
|
hpalenqueoroz/ProgrammingAssignment2
|
00934512b91b1339171bd471f0179949240c834d
|
50a7056e5e9d938a99dacf3b9ec2ad07df1fb644
|
refs/heads/master
| 2022-12-20T08:07:09.061590
| 2020-10-20T17:37:51
| 2020-10-20T17:37:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,249
|
r
|
cachematrix.R
|
## The function makeCacheMatrix creates a special vector
## that works saving the inverse of a matrix in a cache (this func).
makeCacheMatrix <- function(x = matrix()) {
## Here we are creating a "inv" value that it's undefined by now.
##
inv <- NULL
## 1. set the value of the matrix
set <- function(y){
x <<- y
inv <<- NULL
}
## 2. get the value of the matrix
get <- function() {x}
## 3. set the value of the inverse (of the matrix)
setInverse <- function(inverse) {inv <<- inverse}
## 4. get the value of the inverse (of the matrix)
getInverse <- function() {inv}
## Return a list of the methods
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## The second part of the function, cacheSolve shows the result
## (inversed matrix) from the cache saved in makeCacheMatrix
## function
cacheSolve <- function(x, ...) {
## Here we says that inv contains the inverse matrix of x
inv <- x$getInverse()
## When exist a value in inv (previously saved or used)
## the function shows a message
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
## To show the matrix from our object
mat <- x$get()
## Now, solve the inverse using the inv.
inv <- solve(mat, ...)
##Set the inverse to the object
x$setInverse(inv)
## Return a matrix that is the inverse of 'x'
inv
}
## TO USE.
## first you can source the .R file ("cachematrix.R" in this case)
## You must create a matrix (or use any stored matrix), in this
## case, we use "probe"
## > probe <- makeCachematrix(matrix(1:4,2,2))
## > probe$get()
## [,1] [,2]
## [1,] 1 3
## [2,] 2 4
## if we use probe$getInverse(), the result is NULL
## Now, to resolve the inverse of matrix probe:
## > cacheSolve(Probe)
## Now the cache is stored and we can use
## > probe$getInverse()
## what if we want to probe with a 4*4 matrix:
## > probe$set(matrix(c(22,15,10,9,5,12,5,21,1,10,11,9,8,13,2,7),4,4))
## > probe$get()
## [,1] [,2] [,3] [,4]
## [1,] 22 5 1 8
## [2,] 15 12 10 13
## [3,] 10 5 11 2
## [4,] 9 21 9 7
## now probe$getInverse() give us NULL
## cacheSolve(probe) give us the inverse.
|
c61718cba976e3e265d9f15f484b0454981a81b2
|
56b32941415e9abe063d6e52754b665bf95c8d6a
|
/R-Portable/App/R-Portable/library/igraph/tests/test-notable.R
|
40906ae84c2e0d078ab54313ed0bf50a5825f92b
|
[
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-newlib-historical",
"GPL-2.0-or-later",
"MIT"
] |
permissive
|
voltek62/seo-viz-install
|
37ed82a014fc36e192d9a5e5aed7bd45327c8ff3
|
e7c63f4e2e4acebc1556912887ecd6a12b4458a0
|
refs/heads/master
| 2020-05-23T08:59:32.933837
| 2017-03-12T22:00:01
| 2017-03-12T22:00:01
| 84,758,190
| 1
| 0
|
MIT
| 2019-10-13T20:51:49
| 2017-03-12T21:20:14
|
C++
|
UTF-8
|
R
| false
| false
| 936
|
r
|
test-notable.R
|
context("Notable graphs")
test_that("notable graphs work with make_graph", {
g <- make_graph("Levi")
g2 <- graph.famous("Levi")
expect_true(identical_graphs(g, g2))
})
test_that("make_graph for notable graphs is case insensitive", {
g <- make_graph("Levi")
g2 <- make_graph("levi")
expect_true(identical_graphs(g, g2))
})
test_that("spaces are replaced in make_graph for notable graphs", {
g <- make_graph("Krackhardt_Kite")
g2 <- make_graph("Krackhardt kite")
expect_true(identical_graphs(g, g2))
})
test_that("warnings are given for extra arguments in make_graph for notables", {
g0 <- make_graph("Levi")
expect_warning(g1 <- make_graph("Levi", n = 10))
expect_warning(g2 <- make_graph("Levi", isolates = "foo"))
expect_warning(g3 <- make_graph("Levi", directed = FALSE))
expect_true(identical_graphs(g0, g1))
expect_true(identical_graphs(g0, g2))
expect_true(identical_graphs(g0, g3))
})
|
3e3eb97c49f057b349cc395b274b43fd5a97aec0
|
1c50623e94dd4bdf27ba0140002e367426261dc1
|
/RNASeqAna/man/edgeRAnaRPKM.Rd
|
19ac02d7f43436c8f4ccc5306688646aa53ed9e7
|
[] |
no_license
|
fxy1018/EdgeR_RNA_Analysis_R_Tools
|
cee772e8262a68ce0079cbeb6da8faf70dba60ac
|
5a47792a42effa183aeea1862d0dce1160c2feb2
|
refs/heads/master
| 2021-08-06T05:49:01.763325
| 2017-11-03T15:34:22
| 2017-11-03T15:34:22
| 104,395,098
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 627
|
rd
|
edgeRAnaRPKM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/edgeRAnaRPKM.R
\name{edgeRAnaRPKM}
\alias{edgeRAnaRPKM}
\title{A edgeRAnaRPKM Function}
\usage{
edgeRAnaRPKM(files, dir, group, outprefix, spe)
}
\arguments{
\item{files}{a dataframe which contain edgeR pathway analysis results}
\item{dir}{directory which count files are located}
\item{group}{experiment condition, which is a factor}
\item{outprefix}{the prefix of output file}
}
\description{
This function generate the fit model based on edgeR
}
\examples{
edgeRAnaRPKM(pathway, up=FALSE)
edgeRAnaRPKM()
}
\keyword{analysis}
\keyword{pathway}
|
30bce0c5ddd8ac20e8b0607f69ccb64ad2e31a91
|
eb4667b178e418d936c35569383e5cb0663f93ad
|
/R/multtest.gp.bin.R
|
f4e260ec39a25f2ba4208d2b47523ab4cfc2e7ee
|
[] |
no_license
|
cran/RVAideMemoire
|
21081d49de9999a7438c40de05ab67a145336a02
|
6a48aaa7facac606e954b06a9cc1ea46b387d575
|
refs/heads/master
| 2023-08-31T00:44:09.327145
| 2023-08-23T07:30:05
| 2023-08-23T09:30:39
| 17,692,998
| 7
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,249
|
r
|
multtest.gp.bin.R
|
# grDevices: n2mfrow
# car: Anova
multtest.gp.bin <- function(tab,fac,test=c("LRT","Fisher"),p.method="fdr",ordered=TRUE,...) {
test <- match.arg(test)
tab <- as.data.frame(tab)
fac <- droplevels(factor(fac))
nlev <- nlevels(fac)
if (nlev<2) {stop("at least 2 groups are needed")}
gp.prop <- as.matrix(t(aggregate(as.matrix(tab)~fac,FUN=function(x) sum(na.omit(x))/length(na.omit(x))))[-1,])
colnames(gp.prop) <- paste0("Prop.",abbreviate(levels(fac),1))
mode(gp.prop) <- "numeric"
gp.se <- as.matrix(t(aggregate(as.matrix(tab)~fac,FUN=function(x) se(sum(na.omit(x)),length(na.omit(x)))))[-1,])
colnames(gp.se) <- paste0("SE.",abbreviate(levels(fac),1))
mode(gp.se) <- "numeric"
test.f <- switch(test,LRT=multtest.gp.bin.lrt,Fisher=multtest.gp.bin.fisher)
tab.res <- test.f(tab,fac,...)
tab.res$P.value <- p.adjust(tab.res$P.value,method=p.method)
nc <- ncol(tab.res)
tab.res[,nc+1] <- integer(ncol(tab))
tab.res <- cbind(tab.res,gp.prop,gp.se)
tab.res <- signif(tab.res,5)
tab.res <- as.data.frame(tab.res)
tab.res[,nc+1] <- .psignif(tab.res$P.value)
colnames(tab.res)[nc+1] <- " "
if (ordered) {tab.res <- tab.res[order(tab.res$P.value),]}
res <- list(tab=tab.res,p.method=p.method,labs=levels(fac))
class(res) <- c("multtest","multtest.gp.bin","multtest.gp","list")
return(res)
}
multtest.gp.bin.lrt <- function(tab,fac,...) {
nvar <- ncol(tab)
lab <- colnames(tab)
res <- data.frame(Chisq=integer(nvar),P.value=integer(nvar),row.names=lab)
for (i in 1:ncol(tab)) {
x <- tab[,i]
mod <- glm(x~fac,family="binomial")
test <- car::Anova(mod,test="LR",...)
res[i,] <- test[1,c("LR Chisq","Pr(>Chisq)")]
}
return(res)
}
multtest.gp.bin.fisher <- function(tab,fac,...) {
nvar <- ncol(tab)
lab <- colnames(tab)
res <- data.frame(P.value=integer(nvar),row.names=lab)
for (i in 1:ncol(tab)) {
x <- tab[,i]
tab.cont <- table(fac,relevel(factor(x),ref="1"))
test <- fisher.test(tab.cont,...)
res[i,] <- test$p.value
}
return(res)
}
plot.multtest.gp.bin <- function(x,signif=FALSE,alpha=0.05,vars=NULL,xlab="Group",ylab="Mean (+/- SE) proportion",
titles=NULL,groups=NULL,...) {
rows <- if (signif) {
which(x$tab$P.value<=alpha)
} else {
1:nrow(x$tab)
}
rows <- if (is.null(vars)) {
1:length(rows)
} else {
vars
}
tab2 <- x$tab[rows,]
n <- length(rows)
nc <- which(colnames(tab2)==" ")
col.m <- (nc+1):(nc+length(x$labs))
col.s <- ((nc+1)+length(x$labs)):(nc+2*length(x$labs))
labs <- if (is.null(groups)) {x$labs} else {groups}
par(mfrow=grDevices::n2mfrow(n))
for (i in 1:n) {
m <- unlist(tab2[i,col.m])
names(m) <- labs
s <- unlist(tab2[i,col.s])
ymin <- ifelse(any(m-s<0),1.3*min(m-s),0)
ymax <- ifelse(any(m+s>0),1.3*max(m+s),0)
g <- barplot(m,main=ifelse(is.null(titles),rownames(tab2)[i],titles[i]),xlab=xlab,ylab=ylab,
ylim=c(ymin,ymax),...)
arrows(g,m-s,g,m+s,code=3,angle=90,length=0.06)
ypval <- ifelse(any(m+s>0),1.2*max(m+s),1.2*min(m-s))
P <- ifelse(tab2$P.value[i]<0.0001,"P < 0.0001",paste("P =",round(tab2$P.value[i],4)))
text(mean(g),ypval,P)
}
}
|
85875cc3b5675eb18dc46b6f58472b5d18432e2d
|
17d582790e37f4a1fa3cfcfc531fdf5c4f4086d4
|
/packrat/lib/x86_64-redhat-linux-gnu/3.5.1/lme4/tests/testthat/test-rank.R
|
2f9bcb44ddc267a25e5292b87ae72a263d0b6411
|
[] |
no_license
|
teyden/asthma-research
|
bcd02733aeb893074bb71fd58c5c99de03888640
|
09c1fb98d09e897e652620dcab1482a19743110f
|
refs/heads/master
| 2021-01-26T08:20:58.263136
| 2020-02-27T04:12:56
| 2020-02-27T04:12:56
| 243,374,255
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,906
|
r
|
test-rank.R
|
library("testthat")
library("lme4")
context("testing fixed-effect design matrices for full rank")
test_that("lmerRank", {
set.seed(101)
n <- 20
x <- y <- rnorm(n)
d <- data.frame(x,y,
z = rnorm(n),
r = sample(1:5, size=n, replace=TRUE),
y2 = y + c(0.001, rep(0,n-1)))
expect_message(fm <- lmer( z ~ x + y + (1|r), data=d),
"fixed-effect model matrix is .*rank deficient")
## test reconstitution of full parameter vector (with NAs)
expect_equal(names(fixef(fm,add.dropped=TRUE)),
c("(Intercept)","x","y"))
expect_equal(fixef(fm,add.dropped=TRUE)[1:2],
fixef(fm))
expect_equal(nrow(anova(fm)), 1L)
expect_error(lmer( z ~ x + y + (1|r), data=d,
control=lmerControl(check.rankX="stop")),
"rank deficient")
expect_error(lmer( z ~ x + y + (1|r), data=d,
control=lmerControl(check.rankX="ignore")),
"not positive definite")
## should work:
expect_is(lmer( z ~ x + y2 + (1|r), data=d), "lmerMod")
d2 <- expand.grid(a=factor(1:4),b=factor(1:4),rep=1:10)
n <- nrow(d2)
d2 <- transform(d2,r=sample(1:5, size=n, replace=TRUE),
z=rnorm(n))
d2 <- subset(d2,!(a=="4" & b=="4"))
expect_error(lmer( z ~ a*b + (1|r), data=d2,
control=lmerControl(check.rankX="stop")),
"rank deficient")
expect_message(fm <- lmer( z ~ a*b + (1|r), data=d2),
"fixed-effect model matrix is rank deficient")
d2 <- transform(d2, ab=droplevels(interaction(a,b)))
## should work:
expect_is(fm2 <- lmer( z ~ ab + (1|r), data=d2), "lmerMod")
expect_equal(logLik(fm), logLik(fm2))
expect_equal(sum(anova(fm)[, "Df"]), anova(fm2)[, "Df"])
expect_equal(sum(anova(fm)[, "Sum Sq"]), anova(fm2)[, "Sum Sq"])
})
test_that("glmerRank", {
set.seed(111)
n <- 100
x <- y <- rnorm(n)
d <- data.frame(x, y,
z = rbinom(n,size=1,prob=0.5),
r = sample(1:5, size=n, replace=TRUE),
y2 = ## y + c(0.001,rep(0,n-1)), ## too small: get convergence failures
## FIXME: figure out how small a difference will still fail?
rnorm(n))
expect_message(fm <- glmer( z ~ x + y + (1|r), data=d, family=binomial),
"fixed-effect model matrix is rank deficient")
expect_error(glmer( z ~ x + y + (1|r), data=d, family=binomial,
control=glmerControl(check.rankX="stop")),
"rank deficient.*rank.X.")
expect_is(glmer( z ~ x + y2 + (1|r), data=d, family=binomial), "glmerMod")
})
test_that("nlmerRank", {
set.seed(101)
n <- 1000
nblock <- 15
x <- abs(rnorm(n))
y <- rnorm(n)
z <- rnorm(n,mean=x^y)
r <- sample(1:nblock, size=n, replace=TRUE)
d <- data.frame(x,y,z,r)
## save("d","nlmerRank.RData") ## see what's going on with difference in contexts
fModel <- function(a,b) (exp(a)*x)^(b*y)
fModf <- deriv(body(fModel), namevec = c("a","b"),
func = fModel)
fModel2 <- function(a,b,c) (exp(a+c)*x)^(b*y)
fModf2 <- deriv(body(fModel2), namevec = c("a","b","c"),
func = fModel2)
## should be OK: fails in test mode?
nlmer(y ~ fModf(a,b) ~ a|r, d, start = c(a=1,b=1))
## FIXME: this doesn't get caught where I expected
expect_error(nlmer(y ~ fModf2(a,b,c) ~ a|r, d, start = c(a=1,b=1,c=1)),"Downdated VtV")
})
test_that("ranksim", {
set.seed(101)
x <- data.frame(id = factor(sample(10, 100, replace = TRUE)))
x$y <- rnorm(nrow(x))
x$x1 <- 1
x$x2 <- ifelse(x$y<0, rnorm(nrow(x), mean=1), rnorm(nrow(x), mean=-1))
m <- suppressMessages(lmer(y ~ x1 + x2 + (1 | id), data=x))
expect_equal(simulate(m, nsim = 1, use.u = FALSE, newdata=x, seed=101),
simulate(m, nsim = 1, use.u = FALSE, seed=101))
})
|
feaa13746a9a648f1e9ee8b1e568c0fc6b2f9d97
|
3f3e0d69fd9d9c8e9c9555756949568037971a8b
|
/Ch. 4/Results/Table_movements.r
|
9e46ab07bf72d8fe53b626186f20c399cf93d9e8
|
[] |
no_license
|
anasanz/MyScripts
|
28d5a6f244029674017d53d01f8c00307cb81ecb
|
d762b9582d99c6fc285af13150f95ffd2622c1a8
|
refs/heads/master
| 2021-05-10T08:56:54.228036
| 2021-03-08T07:11:51
| 2021-03-08T07:11:51
| 118,910,393
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,819
|
r
|
Table_movements.r
|
rm(list = ls())
library(dplyr)
library(rgdal)
library(tidyr)
setwd("D:/PhD/Fourth chapter/Results/Analisis3_bottleneck_effect")
## -------------------------------------------------
## TABLE MOVEMENTS
## -------------------------------------------------
## -------------------------------------------------
## Distance between consecutive positions
## -------------------------------------------------
eudist <- read.table("D:/PhD/Fourth chapter/Results/Analisis3_bottleneck_effect/Euclidean_distances.txt", header = T, dec = ",", sep = "\t")
# Summary statistics
mean_dist <- eudist %>%
group_by(Period) %>%
summarise(
mean = mean(eu.dist, na.rm = TRUE),
sd = sd(eu.dist, na.rm = TRUE),
)
mean_dist <- as.data.frame(mean_dist)
## ---- Anova ----
d <- data.frame(y = eudist$eu.dist, period = eudist$Period)
m <- aov(y ~ period, data = d)
summary.lm(m)
par(mfrow = c(2,2))
plot(m) # Distribucion no es muy normal...
par(mfrow = c(1,1))
hist(d$y, breaks = 100)
m_dist <- summary(m)
tukey_dist <- TukeyHSD(m)
# Prove with logaritmic transformation
eudist$log_eudist <- log(eudist$eu.dist + 0.00001)
## ---- Anova ----
d <- data.frame(y = eudist$log_eudist, period = eudist$Period)
m <- aov(y ~ period, data = d)
summary.lm(m)
par(mfrow = c(2,2))
plot(m) # Distribucion mejora
par(mfrow = c(1,1))
hist(d$y, breaks = 100)
m_dist <- summary(m)
tukey_dist <- TukeyHSD(m)
## -------------------------------------------------
## Proportion of change of field
## -------------------------------------------------
campos <- read.table("D:/PhD/Fourth chapter/Results/Analisis3_bottleneck_effect/uso_campos.txt", header = T, dec = ",", sep = "\t")
# Summary statistics
change_loc <- campos %>%
group_by(Period) %>%
summarise(
mean = mean(prop.cambios, na.rm = TRUE),
sd = sd(prop.cambios, na.rm = TRUE))
change_loc <- as.data.frame(change_loc)
## ---- Anova ----
d <- data.frame(y = campos$prop.cambios, period = campos$Period)
m <- aov(y ~ period, data = d)
m_change <- summary(m)
tukey_change <- TukeyHSD(m)
## -------------------------------------------------
## MCP
## -------------------------------------------------
mcp.hab <- read.table("D:/PhD/Fourth chapter/Results/Analisis3_bottleneck_effect/MCP_indiv_hab_avai.txt", header = T, dec = ",",
sep = "\t")
mcp.hab$MCP.area_ha <- mcp.hab$MCP.area/10000
# Summary statistics
mcp <- mcp.hab %>%
group_by(Period) %>%
summarise(
mean = mean(MCP.area_ha, na.rm = TRUE),
sd = sd(MCP.area_ha, na.rm = TRUE))
mcp <- as.data.frame(mcp)
## ---- Anova ----
d <- data.frame(y = mcp.hab$MCP.area, period = mcp.hab$Period)
m <- aov(y ~ period, data = d)
m_mcp <- summary(m)
tukey_mcp <- TukeyHSD(m)
## -------------------------------------------------
## FLYING
## -------------------------------------------------
d <- read.table("D:/PhD/Fourth chapter/GPS Cataluña/Ganga/Updated_24-2-2020/FINAL_ALLpos_no_regadio_ETRS89.txt", header = T, dec = ",",
sep = "\t")
period <- c("Pre", "PreRep", "Rep")
fly <- list()
for (p in 1:length(period)){
d_p <- d[which(d$period %in% period[p]), ]
prop_fly <- (nrow(d_p[d_p$fly == 1, ])/nrow(d_p))*100
fly[[p]] <- prop_fly
}
## ---- Nº flying positions per individual ----
data <- as.data.frame(matrix(NA, nrow = length(unique(d$Logger_ID)), ncol = 3))
rownames(data) <- unique(d$Logger_ID)
for (p in 1:length(period)){
d_p <- d[which(d$period %in% period[p]), ]
id <- unique(d_p$Logger_ID)
for (i in 1:length(id)){
d_p_id <- d_p[which(d_p$Logger_ID %in% id[i]), ]
prop_fly <- round(((nrow(d_p_id[d_p_id$fly == 1, ]) + 0.0001)/nrow(d_p_id))*100, 3) # +0.00001 so that there is no error
data[rownames(data) %in% id[i],p] <- prop_fly
}
}
## ---- Mean and se ----
m_fly <- apply(data,2,mean, na.rm = TRUE)
sd_fly <- apply(data,2,sd, na.rm = TRUE)
mean_fly <- data.frame(Period = c("Pre", "PreRep", "Rep"), mean = m_fly, sd = sd_fly)
## ---- Anova ----
colnames(data) <- period
data2 <- gather(data, key = "Period", value = "Prop_fly")
data2$Period <- factor(data2$Period)
d <- data.frame(y = data2$Prop_fly, period = data2$Period)
d <- d[complete.cases(d), ]
m <- aov(y ~ period, data = d)
summary(m)
tukey_flying <- TukeyHSD(m)
## -------------------------------------------------
## Join
## -------------------------------------------------
mean_dist
m_dist
tukey_dist2 <- data.frame(tukey_dist$period)
tukey_dist2 <- round(tukey_dist2,2)
tukey_dist2$CI <- paste(tukey_dist2$lwr,"-",tukey_dist2$upr, sep = "")
tukey_dist2 <- tukey_dist2[,c(1,5,4)]
change_loc
m_change
tukey_change
tukey_change2 <- data.frame(tukey_change$period)
tukey_change2 <- round(tukey_change2,2)
tukey_change2$CI <- paste(tukey_change2$lwr,"-",tukey_change2$upr, sep = "")
tukey_change2 <- tukey_change2[,c(1,5,4)]
mcp
m_mcp
tukey_mcp
tukey_mcp2 <- data.frame(tukey_mcp$period)
tukey_mcp2 <- round(tukey_mcp2,2)
tukey_mcp2$CI <- paste(tukey_mcp2$lwr,"-",tukey_mcp2$upr, sep = "")
tukey_mcp2 <- tukey_mcp2[,c(1,5,4)]
mean_fly
m_flying
tukey_flying
tukey_flying2 <- data.frame(tukey_flying$period)
tukey_flying2 <- round(tukey_flying2,2)
tukey_flying2$CI <- paste(tukey_flying2$lwr,"-",tukey_flying2$upr, sep = "")
tukey_flying2 <- tukey_flying2[,c(1,5,4)]
# 1. Save mean+sd
table1 <- cbind(mean_dist, change_loc, mcp, mean_fly)
table1 <- table1[,-c(1,4,7,10)]
table1 <- round(table1,2)
setwd("D:/PhD/Fourth chapter/Results/Analisis3_bottleneck_effect")
write.csv(table1, file = "Table_movement_est.csv")
#2. Save Tukey
table2 <- cbind(tukey_dist2, tukey_change2, tukey_mcp2, tukey_flying2)
setwd("D:/PhD/Fourth chapter/Results/Analisis3_bottleneck_effect")
write.csv(table2, file = "Table_movement_tukey.csv")
|
5e2f6da2e5818986cac4849ffa3a2d519aaad926
|
650f02c3d940eac1f33db33d0320e46aa44868cc
|
/code/04train_model.R
|
975373bbdb512049b6be72af44e56583403dd5e5
|
[
"MIT"
] |
permissive
|
lucasjamar/pokeML
|
4c9be2297956ac6ccec309b49c144706ce087279
|
060169314d89f478516865b05a27286794fe42da
|
refs/heads/master
| 2022-09-08T10:47:16.419354
| 2020-06-04T11:38:09
| 2020-06-04T11:38:09
| 267,602,922
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,670
|
r
|
04train_model.R
|
#' ---
#' title: "Model Training"
#' author: "Lucas Jamar"
#' ---
#' Using the optimal parameters found during our grid search, we train our final
#' [lightGBM](https://lightgbm.readthedocs.io/en/latest/)
#' L2 regressor of the difference between the true and expected portion of remaining
#' HP of player 1. Once again, Name_2 was removed from the list of features.
#' We then use our new model to make predictions on the combinations of submission
#' and available pokemons data and we compute some feature importance metrics.
#+ setup, include=FALSE
knitr::opts_chunk$set(eval = FALSE)
#' Read in the data with features
library(MLmetrics)
library(lightgbm)
library(data.table)
train_dt <- data.table::fread("data/04_features/pokemon.csv")
#' Features to keep for modelling
feature_columns <- c(
"Name_1",
"Level_1",
"Price_1",
"HP_1",
"Attack_1",
"Defense_1",
"Sp_Atk_1",
"Sp_Def_1",
"Speed_1",
"Legendary_1",
"Level_2",
"Price_2",
"HP_2",
"Attack_2",
"Defense_2",
"Sp_Atk_2",
"Sp_Def_2",
"Speed_2",
"Legendary_2",
"WeatherAndTime",
"MaxStrength_1",
"MaxStrength_2",
"Type_1",
"Type_2",
"WeatherInfluence_1",
"WeatherInfluence_2",
"Modifier_1",
"Modifier_2",
"Damage_1",
"Damage_2",
"Sp_Damage_1",
"Sp_Damage_2",
"MaxDamage_1",
"MaxDamage_2",
"ExpectedRounds_1_ToDefeat_2",
"ExpectedRounds_2_ToDefeat_1",
"ExpectedRounds",
"ExpectedRemainingHP_1",
"ExpectedRemainingHP_2",
"RatioLevel",
"RatioPrice",
"RatioHP",
"RatioAttack",
"RatioDefense",
"RatioSp_Atk",
"RatioSp_Def",
"RatioSpeed",
"RatioWeatherInfluence",
"RatioDamage",
"RatioSp_Damage",
"RatioMaxDamage",
"RatioExpectedRounds_ToDefeat",
"RatioExpectedRemainingHP",
"RatioMaxStrength"
)
#' Separate available data from submission data
submission_dt <- train_dt[Set == "submission"]
train_dt <- train_dt[Set != "submission"]
#' Separate target from features
train_result <- train_dt[, .(Set, BattleOutcome, PortionRemainingHP_1, ExpectedPortionRemainingHP_1, TrueMinusExpectedPortionRemainingHP_1)]
submission_result <- submission_dt[, .(ExpectedPortionRemainingHP_1)]
#' Keep only features for model training
keep_columns <- function(df, columns) {
columns <- colnames(df)[!colnames(df) %in% columns]
df[, (columns) := NULL]
}
keep_columns(train_dt, feature_columns)
keep_columns(submission_dt, feature_columns)
#' Determine which features are categorical
features <- colnames(train_dt)
categorical_features <- features[lapply(train_dt, class) == "character"]
#' Encode categorical variables using LGB encoder
train_dt <- lightgbm::lgb.prepare_rules(data = train_dt)
rules <- train_dt$rules
submission_dt <- lightgbm::lgb.prepare_rules(data = submission_dt, rules = rules)
train_dt <- as.matrix(train_dt$data)
submission_dt <- as.matrix(submission_dt$data)
#' Transform train data to LGB dataset
dtrain <- lightgbm::lgb.Dataset(
label = train_result$TrueMinusExpectedPortionRemainingHP_1,
data = train_dt,
categorical_feature = categorical_features,
free_raw_data = FALSE
)
valids <- list(train = dtrain)
#' Parameters chosen from grid search
parameter <- list(
nthread = -1,
boosting = "gbdt",
num_iterations = 2111,
learning_rate = 0.1,
feature_fraction = 0.95,
num_leaves = 30,
seed = 12345
)
#' Train on full available data
compute_time <- Sys.time()
lgb_pokemon <- lightgbm::lgb.train(
data = dtrain,
objective = "regression",
valids = valids,
params = parameter,
eval = c("rmse", "mae")
)
#' Make predictions for training data followed by predictions for submission data
train_result$PredictedPortionRemainingHP_1 <- predict(lgb_pokemon, train_dt)
train_result[, PredictedPortionRemainingHP_1 := PredictedPortionRemainingHP_1 + ExpectedPortionRemainingHP_1]
train_result[PredictedPortionRemainingHP_1 < 0, PredictedPortionRemainingHP_1 := 0]
train_result[PredictedPortionRemainingHP_1 > 1, PredictedPortionRemainingHP_1 := 1]
submission_result$PredictedPortionRemainingHP_1 <- predict(lgb_pokemon, submission_dt)
submission_result[, PredictedPortionRemainingHP_1 := PredictedPortionRemainingHP_1 + ExpectedPortionRemainingHP_1]
submission_result[PredictedPortionRemainingHP_1 < 0, PredictedPortionRemainingHP_1 := 0]
submission_result[PredictedPortionRemainingHP_1 > 1, PredictedPortionRemainingHP_1 := 1]
#' Save model & predictions
lightgbm::lgb.save(lgb_pokemon, "data/07_model_output/lgb_pokemon.model")
data.table::fwrite(train_result, "data/07_model_output/train_prediction.csv")
data.table::fwrite(submission_result, "data/07_model_output/submission_prediction.csv")
#' Save importance matrix
importance_matrix <- lightgbm::lgb.importance(model = lgb_pokemon)
data.table::fwrite(importance_matrix, "data/07_model_output/feature_importance.csv")
#' Calculate and print regression metrics
results <- train_result[, .(RMSE = MLmetrics::RMSE(PredictedPortionRemainingHP_1, PortionRemainingHP_1),
MAE = MLmetrics::MAE(PredictedPortionRemainingHP_1, PortionRemainingHP_1),
R2 = MLmetrics::R2_Score(PredictedPortionRemainingHP_1, PortionRemainingHP_1),
compute_time = Sys.time() - compute_time)]
print(results)
#' Calculate and print regression metrics per class of BattleOutcome
results <- train_result[, .(RMSE = MLmetrics::RMSE(PredictedPortionRemainingHP_1, PortionRemainingHP_1),
MAE = MLmetrics::MAE(PredictedPortionRemainingHP_1, PortionRemainingHP_1),
R2 = MLmetrics::R2_Score(PredictedPortionRemainingHP_1, PortionRemainingHP_1)),
by = BattleOutcome]
print(results)
|
ab5303afd0beddccb93cbbc02d549f7b46eecbee
|
1c0ffc7bb3953258e728cd5640b6dc3467c3d0df
|
/tests/test-all.R
|
c9e52698ba250a89bad8507db59a4d9675d7512c
|
[] |
no_license
|
isabella232/aws.cloudtrail
|
47fe3ac177f0ae23e52bc058f24a15bc9d9eeccb
|
815ad6f2f4ab5a7994730db50f50c54b903e2e2d
|
refs/heads/master
| 2022-04-12T01:48:40.943779
| 2020-01-12T14:24:44
| 2020-01-12T14:24:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 197
|
r
|
test-all.R
|
library("testthat")
k1 <- Sys.getenv("AWS_ACCESS_KEY_ID")
k2 <- Sys.getenv("AWS_SECRET_ACCESS_KEY")
if ((k1 != "") && (k2 != "")) {
library("aws.cloudtrail")
test_check("aws.cloudtrail")
}
|
e5cb48d773dd64e5029d11f00cb04fe011a64e45
|
0c383177e26bd40f4bced4a571e47e0b3062cc41
|
/man/has_file_search_pat.Rd
|
6477ecd7a2e195779cab08b7f6f2687d7c6b1256
|
[] |
no_license
|
charlotte-ngs/rmddochelper
|
3e8eca2b52f56e049137737c0398fc67214ebdba
|
8bffa21dd725c00cc8127f16d65382589024869a
|
refs/heads/master
| 2021-01-17T17:37:40.906734
| 2019-06-26T06:29:04
| 2019-06-26T06:29:04
| 60,537,436
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 712
|
rd
|
has_file_search_pat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/odg_graphics.R
\name{has_file_search_pat}
\alias{has_file_search_pat}
\title{Check whether ps_pattern is found in ps_file}
\usage{
has_file_search_pat(ps_file, ps_pattern)
}
\arguments{
\item{ps_file}{name of file in which search is done}
\item{ps_pattern}{pattern for which we search for}
}
\value{
TRUE, if we have an exact match of ps_pattern in ps_file, FALSE otherwise
}
\description{
The check is done via grep for the pattern in the
file. First, the file content is read into a character
vector, then we search for the pattern. We are
only interested in exact matches, hence the argument
fixed=TRUE is specified for grep().
}
|
1523e9d94dda76060f90b1064a3157daa68fdbe7
|
9dbe86526c0a7edf52050c59146a7cc6d1b9a306
|
/R/HB/FLIC_clocklab_protocol.R
|
5078bb0db2d6ee3fadc11ffb2bdd4cad4b646cd9
|
[] |
no_license
|
austindreyer/FLIC
|
a7593fb90cc733bb7b9ade6eabf70e22d5a97772
|
67b03c2e05bd3987e2259013e0b09cb3184ced65
|
refs/heads/master
| 2021-04-15T10:22:49.987515
| 2020-01-31T19:32:04
| 2020-01-31T19:32:04
| 126,266,561
| 0
| 0
| null | 2018-05-17T19:32:41
| 2018-03-22T02:14:00
|
R
|
UTF-8
|
R
| false
| false
| 7,952
|
r
|
FLIC_clocklab_protocol.R
|
#### FLIC_clocklab_protocol ####
### Last updated 6/11/2019 ###
## The following protocol is also available in the FLIC_Protocols file in the
## "Protocols" folder of the OneDrive -> Feeding Project directory
# ClockLab Analysis
### 1 ###
# Go to the ClockLab software and open the text files created in R using txtclabdata() in the
# Feeding Project>FLIC_Data folder (just clicking on the first one will open all of them).
# NOTE: if asked, select open individual files
### 2 ###
# Create a new subfolder for your analysis in your personal “/Feeding Project/ClockLab_Analysis” folder
# (example: 18_0724_siftrpa1_FLIC).
### 3 ###
## Actogram settings ##
# Adjust the settings of both the actogram (open by default) and the periodogram (opened by selecting Analyses>Periodogram).
# For the actogram, Double Plot and Zero Lines (both found under Settings) should be selected. If the experiment
# included a temperature shift, the start date should be day 3 and the end date should be day 8. If not, the start
# date should be day 2 and the end date should be day 7. Both numbers under Hour should match the start of the day
# based on the entrainment schedule of the flies in the experiment. Also need to consider daylight savings time,
# making the start time one hour earlier than actual time if the experiment was run during daylight savings time,
# because the data laptops and incubators are not adjusted for daylight savings time.
# Other settings for actogram: Tau=24, Bin=30, Type=Scale, and Max=10. When actogram settings are set, use the
# "Snipping Tool" to take a screenshot of the left side of the panel, including the file name at the top
# of the window, and the settings themselves, for future reference should there be any discrepancies for what the
# settings were during analysis. This image should be saved in the folder created in step 2
## Periodogram settings ##
#For the periodogram, Analysis should be Chi Squared, Start=16, End=32, and Significance=0.01. When periodogram
# settings are set, use the "Snipping Tool" to take a screenshot of the left panel, including the file name at the top
# of the window, the settings, and the data for that particular fly for future reference. This image should be
# saved in the folder created in step 2
### 4 ###
# Export the analyzed data to Excel by pressing Export>Batch Export Table Data from the Periodogram window and selecting
# the text files of all the wells from the appropriate FLIC_Data folder by clicking the first text file, holding “Shift”,
# and clicking the last text file.
### 5 ###
# Open the FLIC_Data_analysis_template, which can be found in the OneDrive (Feeding Project). There are three tabs in the template
# file, from left to right: 1. date_genotype, 2. date_genotype.csv, 3. genotype_data. They are populated during the analysis first
# into the genotype_data tab, then the date_genotype tab, then the date_genotype.csv tab, and described below in that order
## genotype_data tab ##
# Copy the exported data to the genotype_data tab in its entirety. Save this file with the appropriate name (Ex: 18_0724_siftrpa1_FLIC_Data)
# to the subfolder within the ClockLab_Analysis made in step 2 above to avoid inadvertently overriding the template.
## date_genotype tab ##
# 1 #
# In the date_genotype tab, you start by filling in the genotypes column (column A), date (column B), and monitor type used for
# the experiment (column Q) as either “v2.1” or “Sable”.
# 2 #
# Using the OneNote entry of the experiment for reference, copy the data from the proper wells (only the columns from Filename to Chi^2 2)
# from the Periodogram_Table Data.csv file that was produced via Batch Export Table Data in ClockLab and place them under their
# respective genotypes. To copy data, it easiest to use the temporary Excel file output from ClockLab rather than the separate tab
# on the new analysis file. To select genotypes by each DFM, highlighting the appropriate columns for the first fly
# then press and hold "Ctrl" and highlight the next set of columns for the next fly, etc. Selecting multiple flies in this way
# can be cumbersome as it is difficult to undo an accidental selection of a wrong fly, so recommend only doing one DFM worth
# of flies at a time rather than large groups of flies to avoid reselecting multiple times.Once all flies of a given genotype from
# a DFM are are selected, hit “C” (because you should already be holding down "Ctrl") to copy the files. Move to the
# genotype_data tab of the analysis file and then hit “Ctrl” + “V” to paste the rows, which will be pasted together with no row
# spaces between rows even if they were spaced out when copied. This is the advantage of using "Copy" rather than "Cut" to move
# data, "Cut" will not paste the rows together.
# Return to the temporary Excel file from ClockLab and with the recently copied rows still selected, hit “Delete” to remove the
# data from the file so you can confirm you are taking all the data as you go, and avoid confusion.
# Repeat for each DFM and each genotype, by the end you should have no rows left in the temporary Excel file from ClockLab,
# confirming successful migration of all data. Once finished, format all pasted cells to be font Arial size 10 with All Borders.
### 6 ###
# Once the data have been moved to the new analysis file, inspect the actogram data in ClockLab by using Ctrl+N to move forward
# and Ctrl+L to move backward in order to identify any dead flies, empty wells, or abnormalities in feeding behavior.
# Dead flies should be marked as “Dead” in the "Dead column "Fly Status" column of the Excel sheet made in step 2, and their
# values in the Period and Power columns (columns N&O) should be deleted. Wells without any data should have their entire row of data
# deleted from column F to O, but leave the File Name column for future reference of the missing fly. This flies should also
# be marked as "Exclude" in the "Fly Status" column to avoid including them in Rhythm percentage calculation (column P). Wells
# that have strange data in actogram (e.g. data gets significantly stronger or weaker over the course of the experiment) should
# have a short description of the issue added in the "Notes" column (column R) and also be maked as "Exclude" with the values
# for the Period and Power columns removed.
# Each fly should be determined as either rhythmic or arrhythmic, indicated by a Y or N in the Rhythmic column (column P).
## FLIC v2.1 monitors ##
# Using the FLIC v2.1 monitors, the fly is considered rhythmic if its Power is 10 or greater, and it is considered
# arrhythmic if its Power is less than 10.
## FLIC Sable monitors ##
# Using the Sable monitors, the fly is considered rhythmic if its Power is 25 or greater, and it is considered arrhythmic
# if its Power is less than 25.
# If the fly’s Power is less than 0, change the value to 0. For any fly that is classified as arrythmic, delete the Period value
# for that fly (column N) because an arrythmic fly should not have it's period included in the genotype mean.
## date_genotype.csv tab ##
# Copy all data from the main tab to the csv tab, excluding the empty rows, and those with labels and summary totals.
# and save the csv tab as a “CSV (Comma delimited) (*.csv)” file. Excel will warn you that only the active tab can be
# saved as a .csv, that is fine because we are saving the tab as a .csv to be read in to R for analysis later and
# only want that one tab's worth of data.
### 7 ###
# After the .csv tab has been saved, select the genotype_date tab and be sure to save the enitre FLIC_Data file as an
# .xlsx file again to ensure all three tabs are saved for future reference. The Periodogram_Table Data file exported
# from ClockLab does not need to be saved.
# Copy the entire FLIC_Data file to the OneDrive (Feeding Project>Clocklab_Analysis).
|
46b548cadd26f34a37e4ac22c2f123ce77239297
|
69884df13130b6d843ab5eb51d9ad4bd6a38d705
|
/man/mod_mainDataOutput.Rd
|
99692d547ab94f7b72536272a2c6cacd1bb8a1f9
|
[] |
no_license
|
MalditoBarbudo/nfiApp
|
a1a625ec9207e3272ff3c373d453b95b430c8f42
|
baebe9fea0fb018cb002bf84803834928e089ae8
|
refs/heads/main
| 2023-06-08T01:38:34.382460
| 2023-04-28T06:56:28
| 2023-04-28T06:56:28
| 249,689,113
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 311
|
rd
|
mod_mainDataOutput.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_mainDataOutput.R
\name{mod_mainDataOutput}
\alias{mod_mainDataOutput}
\title{mod_mainDataOutput and mod_mainData}
\usage{
mod_mainDataOutput(id)
}
\arguments{
\item{id}{}
}
\description{
Shiny module to get the data as tbl_sql
}
|
14ffa4f6afdaca62ec1a14d07e712efd18dbe6f7
|
593f8cf964d4e1280f559a4f600938b95f3cd81f
|
/TucsonAZ.R
|
65c8e0633493886ea334e17b5f91a50bdae2619d
|
[
"MIT"
] |
permissive
|
QUAY17/stats_correlation
|
11a0c177341b3aaf2402435b601ca83bf75ac110
|
cfe22b371513f685874c9beeb55884d0ffed53ac
|
refs/heads/main
| 2023-07-03T09:00:11.372024
| 2021-08-12T03:10:12
| 2021-08-12T03:10:12
| 395,137,544
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 95,860
|
r
|
TucsonAZ.R
|
month,day,year,temp,residual
1,1,1995,48.1,-3.8
1,2,1995,55.1,3.3
1,3,1995,51.8,0.1
1,4,1995,50.6,-1
1,5,1995,53.3,1.7
1,6,1995,47.2,-4.3
1,7,1995,48.1,-3.4
1,8,1995,51.8,0.4
1,9,1995,53.5,2.1
1,10,1995,56.7,5.4
1,11,1995,54.9,3.6
1,12,1995,54.4,3.1
1,13,1995,53.7,2.5
1,14,1995,52.4,1.2
1,15,1995,55.5,4.3
1,16,1995,50.9,-0.3
1,17,1995,43.7,-7.5
1,18,1995,39.5,-11.7
1,19,1995,47.7,-3.5
1,20,1995,52.7,1.5
1,21,1995,49.3,-1.9
1,22,1995,48.8,-2.5
1,23,1995,49.9,-1.4
1,24,1995,56.3,5
1,25,1995,59.6,8.2
1,26,1995,51.9,0.5
1,27,1995,48,-3.5
1,28,1995,46.9,-4.6
1,29,1995,46.9,-4.7
1,30,1995,51.1,-0.6
1,31,1995,51.7,0
2,1,1995,54.5,2.7
2,2,1995,58.6,6.7
2,3,1995,61.3,9.3
2,4,1995,65.9,13.8
2,5,1995,62,9.8
2,6,1995,58.6,6.3
2,7,1995,59.5,7.1
2,8,1995,61.5,9
2,9,1995,60.5,7.9
2,10,1995,56.6,3.8
2,11,1995,51.4,-1.5
2,12,1995,54.6,1.6
2,13,1995,58.1,4.9
2,14,1995,59.8,6.5
2,15,1995,55.3,1.8
2,16,1995,53.4,-0.2
2,17,1995,56.5,2.7
2,18,1995,60.2,6.2
2,19,1995,61.6,7.5
2,20,1995,67.4,13.1
2,21,1995,66.4,11.9
2,22,1995,56.3,1.6
2,23,1995,54.1,-0.8
2,24,1995,60.6,5.5
2,25,1995,60.2,4.9
2,26,1995,59.6,4.1
2,27,1995,62.1,6.4
2,28,1995,60.7,4.8
3,1,1995,57.9,1.8
3,2,1995,59.3,3
3,3,1995,59.6,3.1
3,4,1995,60.5,3.7
3,5,1995,65,8
3,6,1995,60.8,3.6
3,7,1995,55.1,-2.4
3,8,1995,57.5,-0.2
3,9,1995,61.1,3.1
3,10,1995,63.7,5.5
3,11,1995,65.5,7
3,12,1995,57.4,-1.3
3,13,1995,60.1,1.1
3,14,1995,64.5,5.3
3,15,1995,66.7,7.2
3,16,1995,70.9,11.1
3,17,1995,70.3,10.3
3,18,1995,69.8,9.5
3,19,1995,68.1,7.5
3,20,1995,67.7,6.8
3,21,1995,69,7.8
3,22,1995,65.3,3.9
3,23,1995,63.3,1.6
3,24,1995,56.8,-5.2
3,25,1995,53,-9.3
3,26,1995,52.4,-10.2
3,27,1995,54.9,-8
3,28,1995,57,-6.2
3,29,1995,58,-5.5
3,30,1995,58.1,-5.7
3,31,1995,58.2,-5.9
4,1,1995,62,-2.4
4,2,1995,64.3,-0.4
4,3,1995,63.5,-1.6
4,4,1995,65,-0.4
4,5,1995,69,3.3
4,6,1995,70.6,4.6
4,7,1995,68.2,1.9
4,8,1995,69.6,3
4,9,1995,66.8,-0.1
4,10,1995,57.1,-10.2
4,11,1995,59,-8.6
4,12,1995,65.4,-2.5
4,13,1995,72.1,3.9
4,14,1995,69,0.5
4,15,1995,68.6,-0.3
4,16,1995,64.5,-4.7
4,17,1995,53.4,-16.1
4,18,1995,55.7,-14.1
4,19,1995,49.6,-20.6
4,20,1995,50.1,-20.4
4,21,1995,57.7,-13.1
4,22,1995,57.8,-13.3
4,23,1995,64.3,-7.1
4,24,1995,68.5,-3.3
4,25,1995,71,-1.1
4,26,1995,73.2,0.8
4,27,1995,75.8,3.1
4,28,1995,74.9,1.9
4,29,1995,74.5,1.1
4,30,1995,79.3,5.6
5,1,1995,78.8,4.8
5,2,1995,77.9,3.6
5,3,1995,72.9,-1.7
5,4,1995,75,0.1
5,5,1995,73.5,-1.7
5,6,1995,62.7,-12.8
5,7,1995,54.9,-21
5,8,1995,60.8,-15.4
5,9,1995,66.2,-10.3
5,10,1995,70.8,-6
5,11,1995,76.1,-1
5,12,1995,78.5,1.1
5,13,1995,76.4,-1.3
5,14,1995,75.8,-2.1
5,15,1995,74.1,-4.1
5,16,1995,76,-2.5
5,17,1995,63.9,-14.9
5,18,1995,70.7,-8.4
5,19,1995,77.1,-2.3
5,20,1995,78.8,-0.9
5,21,1995,81.6,1.7
5,22,1995,80,-0.2
5,23,1995,74.6,-5.9
5,24,1995,72.7,-8
5,25,1995,73.9,-7.1
5,26,1995,72.8,-8.5
5,27,1995,75.6,-5.9
5,28,1995,78.3,-3.5
5,29,1995,78.4,-3.6
5,30,1995,80.1,-2.2
5,31,1995,81.3,-1.2
6,1,1995,83.8,1.1
6,2,1995,83.9,0.9
6,3,1995,77.3,-5.9
6,4,1995,80.6,-2.8
6,5,1995,83.4,-0.2
6,6,1995,83.8,-0.1
6,7,1995,81.1,-3
6,8,1995,76.2,-8.1
6,9,1995,73.3,-11.2
6,10,1995,78,-6.7
6,11,1995,84,-0.9
6,12,1995,90.2,5.1
6,13,1995,90.8,5.5
6,14,1995,89.9,4.5
6,15,1995,86.8,1.2
6,16,1995,82.8,-3
6,17,1995,75.2,-10.8
6,18,1995,77.6,-8.5
6,19,1995,84,-2.3
6,20,1995,85.5,-0.9
6,21,1995,86,-0.6
6,22,1995,84.5,-2.2
6,23,1995,85.4,-1.5
6,24,1995,88.8,1.8
6,25,1995,90.6,3.5
6,26,1995,89.9,2.6
6,27,1995,91.5,4.1
6,28,1995,91.1,3.6
6,29,1995,90.4,2.8
6,30,1995,90.9,3.2
7,1,1995,87.8,0
7,2,1995,87.7,-0.2
7,3,1995,86.8,-1.2
7,4,1995,83,-5.1
7,5,1995,87.1,-1.1
7,6,1995,89.5,1.3
7,7,1995,94.6,6.3
7,8,1995,94.8,6.4
7,9,1995,93.3,4.9
7,10,1995,93.7,5.2
7,11,1995,90.6,2.1
7,12,1995,85.9,-2.6
7,13,1995,82.9,-5.7
7,14,1995,86.7,-1.9
7,15,1995,81.8,-6.8
7,16,1995,82.2,-6.4
7,17,1995,84.3,-4.4
7,18,1995,84.2,-4.5
7,19,1995,86.7,-2
7,20,1995,86.8,-1.9
7,21,1995,89.5,0.8
7,22,1995,89.4,0.8
7,23,1995,89.1,0.5
7,24,1995,89.9,1.3
7,25,1995,90.3,1.7
7,26,1995,92.9,4.4
7,27,1995,95.8,7.3
7,28,1995,94.1,5.7
7,29,1995,94.9,6.5
7,30,1995,92.6,4.3
7,31,1995,88.8,0.6
8,1,1995,94.1,5.9
8,2,1995,94.2,6.1
8,3,1995,93.6,5.6
8,4,1995,92.5,4.6
8,5,1995,94.1,6.3
8,6,1995,95.1,7.4
8,7,1995,95.6,8
8,8,1995,91.5,4
8,9,1995,93.9,6.5
8,10,1995,91.4,4.1
8,11,1995,89.5,2.3
8,12,1995,77.5,-9.5
8,13,1995,86.7,-0.2
8,14,1995,82.2,-4.6
8,15,1995,75.3,-11.3
8,16,1995,78.6,-7.9
8,17,1995,80.5,-5.8
8,18,1995,82,-4.2
8,19,1995,78.9,-7.1
8,20,1995,80.1,-5.7
8,21,1995,76.5,-9.2
8,22,1995,85.3,-0.2
8,23,1995,85,-0.3
8,24,1995,78.2,-6.9
8,25,1995,82.2,-2.7
8,26,1995,82.4,-2.3
8,27,1995,83.4,-1.1
8,28,1995,82.3,-2
8,29,1995,84.6,0.5
8,30,1995,83.6,-0.3
8,31,1995,87.5,3.8
9,1,1995,91.6,8.1
9,2,1995,92.3,9.1
9,3,1995,89.5,6.5
9,4,1995,89.4,6.6
9,5,1995,86.9,4.4
9,6,1995,85.9,3.6
9,7,1995,82,-0.1
9,8,1995,84.9,3.1
9,9,1995,84.9,3.3
9,10,1995,79.2,-2.1
9,11,1995,83.5,2.5
9,12,1995,84.1,3.3
9,13,1995,85.1,4.6
9,14,1995,86.4,6.2
9,15,1995,79.9,-0.1
9,16,1995,76.9,-2.8
9,17,1995,81,1.6
9,18,1995,81.1,2
9,19,1995,79.5,0.6
9,20,1995,81.1,2.5
9,21,1995,83,4.7
9,22,1995,83.1,5.1
9,23,1995,83.1,5.4
9,24,1995,81.1,3.7
9,25,1995,80.7,3.6
9,26,1995,80.1,3.3
9,27,1995,81.3,4.8
9,28,1995,76.7,0.5
9,29,1995,75.9,0
9,30,1995,72.9,-2.7
10,1,1995,73.6,-1.7
10,2,1995,72.4,-2.6
10,3,1995,72.8,-1.9
10,4,1995,73.7,-0.7
10,5,1995,71.3,-2.8
10,6,1995,71.1,-2.6
10,7,1995,73.2,-0.2
10,8,1995,71.7,-1.4
10,9,1995,75.9,3.1
10,10,1995,74.4,1.9
10,11,1995,78.6,6.5
10,12,1995,79,7.2
10,13,1995,78,6.5
10,14,1995,78.2,7
10,15,1995,80,9.1
10,16,1995,77.9,7.4
10,17,1995,73.3,3.1
10,18,1995,73.5,3.6
10,19,1995,71.8,2.2
10,20,1995,74,4.7
10,21,1995,75.1,6.2
10,22,1995,72.8,4.2
10,23,1995,65.1,-3.2
10,24,1995,66.9,-1.1
10,25,1995,62.6,-5
10,26,1995,62.6,-4.7
10,27,1995,65,-2
10,28,1995,68.5,1.8
10,29,1995,71.7,5.3
10,30,1995,66.7,0.6
10,31,1995,60.8,-4.9
11,1,1995,61.1,-4.3
11,2,1995,61.2,-3.9
11,3,1995,61.6,-3.2
11,4,1995,62.9,-1.6
11,5,1995,60.1,-4.1
11,6,1995,58.2,-5.7
11,7,1995,62.7,-0.9
11,8,1995,63,-0.3
11,9,1995,63.9,0.9
11,10,1995,68.1,5.4
11,11,1995,63.2,0.8
11,12,1995,62.2,0.1
11,13,1995,62.6,0.8
11,14,1995,63.3,1.8
11,15,1995,63.2,2
11,16,1995,66.4,5.5
11,17,1995,63.2,2.5
11,18,1995,63.5,3.1
11,19,1995,61,0.9
11,20,1995,61.6,1.8
11,21,1995,64,4.4
11,22,1995,62.7,3.4
11,23,1995,60.5,1.5
11,24,1995,63.4,4.6
11,25,1995,65.5,7
11,26,1995,63.6,5.3
11,27,1995,57.9,-0.1
11,28,1995,51.1,-6.7
11,29,1995,51.3,-6.2
11,30,1995,56.1,-1.2
12,1,1995,59.1,2.1
12,2,1995,60.9,4.1
12,3,1995,59.6,3
12,4,1995,58.2,1.9
12,5,1995,59.1,3
12,6,1995,60.9,5
12,7,1995,61.5,5.8
12,8,1995,55.8,0.3
12,9,1995,55.1,-0.2
12,10,1995,57.5,2.4
12,11,1995,58.1,3.2
12,12,1995,59.2,4.5
12,13,1995,59.8,5.3
12,14,1995,53.4,-0.9
12,15,1995,50.6,-3.6
12,16,1995,50.1,-3.9
12,17,1995,44.6,-9.2
12,18,1995,45.9,-7.8
12,19,1995,47.2,-6.3
12,20,1995,45.1,-8.2
12,21,1995,47.3,-5.9
12,22,1995,43.4,-9.7
12,23,1995,43.4,-9.5
12,24,1995,49.4,-3.4
12,25,1995,50.8,-1.9
12,26,1995,52.3,-0.2
12,27,1995,49.8,-2.6
12,28,1995,50.2,-2.1
12,29,1995,48.8,-3.4
12,30,1995,48.8,-3.3
12,31,1995,49.1,-2.9
1,1,1996,49.9,-2
1,2,1996,44.3,-7.5
1,3,1996,43.4,-8.3
1,4,1996,50.4,-1.3
1,5,1996,51.2,-0.4
1,6,1996,50,-1.5
1,7,1996,51.5,0
1,8,1996,61.5,10.1
1,9,1996,57.8,6.4
1,10,1996,57.8,6.5
1,11,1996,54.7,3.4
1,12,1996,59.6,8.3
1,13,1996,62.1,10.9
1,14,1996,56.5,5.3
1,15,1996,55.6,4.4
1,16,1996,60.7,9.5
1,17,1996,63.8,12.6
1,18,1996,49.7,-1.5
1,19,1996,51.9,0.7
1,20,1996,51.4,0.2
1,21,1996,49.6,-1.6
1,22,1996,54.8,3.6
1,23,1996,44.8,-6.5
1,24,1996,42.6,-8.7
1,25,1996,44.7,-6.7
1,26,1996,49.3,-2.1
1,27,1996,51.5,0.1
1,28,1996,53,1.5
1,29,1996,52.2,0.6
1,30,1996,57,5.4
1,31,1996,60.3,8.6
2,1,1996,59.3,7.5
2,2,1996,52.7,0.8
2,3,1996,53.8,1.8
2,4,1996,56.2,4.1
2,5,1996,58.6,6.4
2,6,1996,59.5,7.2
2,7,1996,60.6,8.2
2,8,1996,62.3,9.8
2,9,1996,63.5,10.9
2,10,1996,63.1,10.4
2,11,1996,63.1,10.2
2,12,1996,62.9,9.9
2,13,1996,59.8,6.7
2,14,1996,54.7,1.4
2,15,1996,58.1,4.7
2,16,1996,60,6.4
2,17,1996,61,7.3
2,18,1996,62.5,8.6
2,19,1996,61.6,7.5
2,20,1996,65,10.7
2,21,1996,67.1,12.7
2,22,1996,63.1,8.5
2,23,1996,56.2,1.4
2,24,1996,56.9,1.9
2,25,1996,58.3,3.1
2,26,1996,48.3,-7.1
2,27,1996,44,-11.6
2,28,1996,46.7,-9.1
2,29,1996,48.1,-7.9
3,1,1996,49.7,-6.6
3,2,1996,49.5,-7
3,3,1996,56.3,-0.4
3,4,1996,57.8,0.9
3,5,1996,62.9,5.7
3,6,1996,57.2,-0.2
3,7,1996,54.9,-2.7
3,8,1996,63.8,5.9
3,9,1996,67.1,9
3,10,1996,69.7,11.3
3,11,1996,62.3,3.7
3,12,1996,64.5,5.6
3,13,1996,63.4,4.2
3,14,1996,53.8,-5.6
3,15,1996,49.9,-9.8
3,16,1996,55.3,-4.7
3,17,1996,60.5,0.3
3,18,1996,64.2,3.7
3,19,1996,65.2,4.4
3,20,1996,68.1,7
3,21,1996,68.8,7.4
3,22,1996,71.3,9.6
3,23,1996,69.4,7.4
3,24,1996,57.8,-4.4
3,25,1996,58.1,-4.4
3,26,1996,59.8,-3
3,27,1996,60.3,-2.8
3,28,1996,62.6,-0.8
3,29,1996,63.5,-0.2
3,30,1996,57.3,-6.7
3,31,1996,63.7,-0.7
4,1,1996,70.5,5.8
4,2,1996,71.5,6.5
4,3,1996,66.4,1.1
4,4,1996,64.1,-1.5
4,5,1996,58.3,-7.6
4,6,1996,61.6,-4.6
4,7,1996,63.2,-3.3
4,8,1996,68.7,1.8
4,9,1996,71.7,4.5
4,10,1996,75.4,7.9
4,11,1996,63.9,-3.9
4,12,1996,64.7,-3.4
4,13,1996,68.1,-0.4
4,14,1996,62.4,-6.4
4,15,1996,63.5,-5.6
4,16,1996,70.1,0.7
4,17,1996,71.9,2.1
4,18,1996,71.3,1.2
4,19,1996,68.3,-2.1
4,20,1996,63.6,-7.1
4,21,1996,67.9,-3.1
4,22,1996,68.1,-3.3
4,23,1996,73.5,1.8
4,24,1996,79.6,7.6
4,25,1996,79.8,7.5
4,26,1996,80,7.4
4,27,1996,83.7,10.7
4,28,1996,80.9,7.6
4,29,1996,76.2,2.6
4,30,1996,73.1,-0.8
5,1,1996,76,1.8
5,2,1996,78.6,4.1
5,3,1996,79.6,4.7
5,4,1996,79.2,4
5,5,1996,78.5,3
5,6,1996,80.1,4.3
5,7,1996,81.3,5.2
5,8,1996,80.8,4.4
5,9,1996,81.2,4.5
5,10,1996,78.8,1.8
5,11,1996,81.7,4.4
5,12,1996,88.6,11
5,13,1996,88.2,10.3
5,14,1996,86.2,8
5,15,1996,84.2,5.7
5,16,1996,82.9,4.2
5,17,1996,82.1,3.1
5,18,1996,83.5,4.2
5,19,1996,87.9,8.3
5,20,1996,84.9,5
5,21,1996,83.5,3.4
5,22,1996,82.5,2.1
5,23,1996,79.9,-0.8
5,24,1996,75.1,-5.8
5,25,1996,67,-14.2
5,26,1996,66.4,-15
5,27,1996,71.6,-10.1
5,28,1996,78.5,-3.4
5,29,1996,79.3,-2.9
5,30,1996,79.9,-2.5
5,31,1996,78.3,-4.4
6,1,1996,79.3,-3.6
6,2,1996,83.4,0.3
6,3,1996,88.9,5.5
6,4,1996,90.4,6.8
6,5,1996,91.2,7.4
6,6,1996,89.4,5.4
6,7,1996,89.6,5.4
6,8,1996,91.7,7.3
6,9,1996,90.8,6.2
6,10,1996,91.1,6.3
6,11,1996,89,4
6,12,1996,88.4,3.2
6,13,1996,85.4,0
6,14,1996,83,-2.6
6,15,1996,84.5,-1.3
6,16,1996,91,5.1
6,17,1996,92,5.9
6,18,1996,92,5.8
6,19,1996,96.2,9.8
6,20,1996,91.6,5
6,21,1996,93.6,6.9
6,22,1996,92.8,6
6,23,1996,95,8
6,24,1996,93.4,6.3
6,25,1996,89.5,2.3
6,26,1996,86.1,-1.3
6,27,1996,83.2,-4.3
6,28,1996,82.7,-4.9
6,29,1996,85.7,-2
6,30,1996,86.5,-1.3
7,1,1996,93.9,6
7,2,1996,94.6,6.6
7,3,1996,83.4,-4.7
7,4,1996,85,-3.1
7,5,1996,91.2,3
7,6,1996,90,1.7
7,7,1996,86.2,-2.1
7,8,1996,90.1,1.7
7,9,1996,79.7,-8.7
7,10,1996,84.3,-4.2
7,11,1996,85,-3.5
7,12,1996,88.1,-0.5
7,13,1996,89.5,0.9
7,14,1996,81.3,-7.3
7,15,1996,79.1,-9.5
7,16,1996,83.4,-5.3
7,17,1996,87.1,-1.6
7,18,1996,88.3,-0.4
7,19,1996,90.6,1.9
7,20,1996,93,4.3
7,21,1996,92.7,4.1
7,22,1996,92.2,3.6
7,23,1996,91.5,2.9
7,24,1996,92.7,4.1
7,25,1996,93,4.5
7,26,1996,82.9,-5.6
7,27,1996,88.2,-0.2
7,28,1996,81.2,-7.2
7,29,1996,88.5,0.2
7,30,1996,89.8,1.5
7,31,1996,93.5,5.3
8,1,1996,84.4,-3.7
8,2,1996,81.5,-6.5
8,3,1996,83.6,-4.3
8,4,1996,86.2,-1.7
8,5,1996,85.6,-2.2
8,6,1996,88.2,0.5
8,7,1996,89,1.4
8,8,1996,90.4,3
8,9,1996,83.2,-4.1
8,10,1996,84,-3.2
8,11,1996,86.9,-0.2
8,12,1996,89.4,2.5
8,13,1996,90.7,3.9
8,14,1996,90.9,4.2
8,15,1996,82.7,-3.8
8,16,1996,88.7,2.3
8,17,1996,84.6,-1.6
8,18,1996,86,0
8,19,1996,80.4,-5.5
8,20,1996,84.6,-1.1
8,21,1996,85.7,0.2
8,22,1996,86.2,0.9
8,23,1996,84.6,-0.6
8,24,1996,89.3,4.3
8,25,1996,90,5.2
8,26,1996,83.2,-1.4
8,27,1996,85.4,1
8,28,1996,81.3,-2.9
8,29,1996,83.2,-0.8
8,30,1996,84.1,0.4
8,31,1996,85.9,2.4
9,1,1996,86.8,3.5
9,2,1996,80.2,-2.9
9,3,1996,77,-5.8
9,4,1996,75.8,-6.8
9,5,1996,78.7,-3.7
9,6,1996,79.8,-2.3
9,7,1996,78.3,-3.6
9,8,1996,81.6,0
9,9,1996,84.2,2.8
9,10,1996,79,-2.1
9,11,1996,74.5,-6.3
9,12,1996,76.3,-4.3
9,13,1996,80.1,-0.2
9,14,1996,77.8,-2.2
9,15,1996,72.3,-7.5
9,16,1996,74.7,-4.8
9,17,1996,74.8,-4.4
9,18,1996,75.7,-3.2
9,19,1996,76.1,-2.6
9,20,1996,77.2,-1.2
9,21,1996,77.8,-0.3
9,22,1996,80.5,2.7
9,23,1996,79.5,2
9,24,1996,74.7,-2.5
9,25,1996,73.3,-3.6
9,26,1996,71,-5.6
9,27,1996,75.1,-1.2
9,28,1996,75,-1
9,29,1996,78.5,2.8
9,30,1996,80,4.6
10,1,1996,78.6,3.5
10,2,1996,79.4,4.6
10,3,1996,78.9,4.5
10,4,1996,79.5,5.4
10,5,1996,84,10.2
10,6,1996,84.5,11
10,7,1996,82.4,9.2
10,8,1996,80.8,7.9
10,9,1996,82,9.4
10,10,1996,82,9.8
10,11,1996,80.9,9
10,12,1996,79.7,8.1
10,13,1996,80.4,9.1
10,14,1996,75.8,4.9
10,15,1996,68.6,-2
10,16,1996,73,2.7
10,17,1996,72.2,2.2
10,18,1996,72.2,2.5
10,19,1996,76,6.7
10,20,1996,70.1,1.1
10,21,1996,59.9,-8.8
10,22,1996,55.5,-12.9
10,23,1996,60.8,-7.2
10,24,1996,58.1,-9.6
10,25,1996,62.8,-4.6
10,26,1996,52.4,-14.7
10,27,1996,45.7,-21.1
10,28,1996,51.7,-14.8
10,29,1996,49.9,-16.2
10,30,1996,57.5,-8.3
10,31,1996,61.4,-4.1
11,1,1996,57.6,-7.6
11,2,1996,66.4,1.5
11,3,1996,62.6,-2
11,4,1996,57.6,-6.7
11,5,1996,59.9,-4.1
11,6,1996,57.7,-6
11,7,1996,55.9,-7.5
11,8,1996,59.9,-3.2
11,9,1996,68.6,5.8
11,10,1996,69.3,6.8
11,11,1996,67.1,4.9
11,12,1996,67,5.1
11,13,1996,66.9,5.3
11,14,1996,67.2,5.9
11,15,1996,68.6,7.6
11,16,1996,58.9,-1.8
11,17,1996,54.2,-6.2
11,18,1996,58.8,-1.4
11,19,1996,62.2,2.3
11,20,1996,63.6,4
11,21,1996,63.6,4.2
11,22,1996,64.9,5.8
11,23,1996,60.1,1.3
11,24,1996,52.9,-5.7
11,25,1996,56,-2.3
11,26,1996,56.8,-1.3
11,27,1996,56.4,-1.4
11,28,1996,57.4,-0.2
11,29,1996,53.3,-4
11,30,1996,43.7,-13.4
12,1,1996,43.3,-13.6
12,2,1996,46.6,-10
12,3,1996,48.5,-7.9
12,4,1996,50.5,-5.7
12,5,1996,49.6,-6.4
12,6,1996,56.9,1.1
12,7,1996,56.8,1.3
12,8,1996,60.3,5
12,9,1996,65.4,10.3
12,10,1996,66.6,11.7
12,11,1996,58.3,3.5
12,12,1996,56.2,1.6
12,13,1996,56.9,2.5
12,14,1996,55.8,1.6
12,15,1996,51,-3
12,16,1996,49,-4.9
12,17,1996,46.3,-7.4
12,18,1996,41.1,-12.4
12,19,1996,42.5,-10.9
12,20,1996,43.7,-9.5
12,21,1996,51.7,-1.4
12,22,1996,55.3,2.3
12,23,1996,49.8,-3
12,24,1996,46.5,-6.2
12,25,1996,55.7,3.1
12,26,1996,51.1,-1.3
12,27,1996,55,2.7
12,28,1996,55.5,3.3
12,29,1996,53.9,1.8
12,30,1996,54.3,2.3
12,31,1996,56.5,4.6
1,1,1997,56,4.2
1,2,1997,58.4,6.6
1,3,1997,61.5,9.8
1,4,1997,58.6,7
1,5,1997,54.4,2.9
1,6,1997,45.4,-6.1
1,7,1997,37,-14.4
1,8,1997,36.3,-15.1
1,9,1997,39.4,-11.9
1,10,1997,42.4,-8.9
1,11,1997,48.5,-2.8
1,12,1997,52.9,1.7
1,13,1997,54.1,2.9
1,14,1997,51.4,0.2
1,15,1997,45.1,-6.1
1,16,1997,42.7,-8.5
1,17,1997,45.2,-6
1,18,1997,55.3,4.1
1,19,1997,55.7,4.5
1,20,1997,56,4.8
1,21,1997,57.7,6.5
1,22,1997,49.8,-1.5
1,23,1997,54,2.7
1,24,1997,53.7,2.4
1,25,1997,54.4,3
1,26,1997,55,3.6
1,27,1997,54.8,3.3
1,28,1997,55.8,4.3
1,29,1997,56.3,4.7
1,30,1997,59.6,7.9
1,31,1997,59.4,7.6
2,1,1997,56.9,5.1
2,2,1997,57,5.1
2,3,1997,55.5,3.5
2,4,1997,55.3,3.2
2,5,1997,59.8,7.6
2,6,1997,53.4,1.1
2,7,1997,43.7,-8.8
2,8,1997,52.8,0.2
2,9,1997,51,-1.7
2,10,1997,52.9,0.1
2,11,1997,53.8,0.8
2,12,1997,46.5,-6.6
2,13,1997,52.5,-0.7
2,14,1997,49.5,-3.9
2,15,1997,56.6,3.1
2,16,1997,63,9.3
2,17,1997,66.3,12.4
2,18,1997,61.1,7.1
2,19,1997,54.6,0.4
2,20,1997,57.6,3.2
2,21,1997,55.1,0.5
2,22,1997,50,-4.8
2,23,1997,52.4,-2.6
2,24,1997,55.7,0.6
2,25,1997,49.7,-5.6
2,26,1997,48,-7.6
2,27,1997,47.7,-8.1
2,28,1997,49,-7
3,1,1997,42.6,-13.6
3,2,1997,47.5,-8.9
3,3,1997,54,-2.6
3,4,1997,57.8,0.9
3,5,1997,55.4,-1.7
3,6,1997,64.2,6.9
3,7,1997,61,3.4
3,8,1997,64.4,6.6
3,9,1997,63.1,5
3,10,1997,69.8,11.5
3,11,1997,72.1,13.5
3,12,1997,70.6,11.8
3,13,1997,67.6,8.5
3,14,1997,67.8,8.4
3,15,1997,68.4,8.8
3,16,1997,68.9,9
3,17,1997,69.9,9.7
3,18,1997,64.9,4.4
3,19,1997,71.6,10.9
3,20,1997,73,12
3,21,1997,73.1,11.8
3,22,1997,71.6,10
3,23,1997,71.2,9.3
3,24,1997,64.2,2
3,25,1997,64.6,2.1
3,26,1997,60.5,-2.3
3,27,1997,62.9,-0.2
3,28,1997,66.6,3.2
3,29,1997,66.3,2.6
3,30,1997,66.3,2.3
3,31,1997,65.5,1.2
4,1,1997,65.1,0.5
4,2,1997,64.5,-0.4
4,3,1997,51.9,-13.3
4,4,1997,45.1,-20.4
4,5,1997,55.1,-10.7
4,6,1997,59.7,-6.4
4,7,1997,62.1,-4.4
4,8,1997,64.9,-1.9
4,9,1997,68.8,1.7
4,10,1997,64.8,-2.6
4,11,1997,59.4,-8.3
4,12,1997,60.8,-7.3
4,13,1997,61,-7.4
4,14,1997,61.3,-7.4
4,15,1997,65.5,-3.5
4,16,1997,70,0.7
4,17,1997,72.5,2.8
4,18,1997,68.8,-1.2
4,19,1997,72.8,2.5
4,20,1997,73.7,3.1
4,21,1997,76,5
4,22,1997,76.7,5.4
4,23,1997,78.9,7.3
4,24,1997,71.5,-0.4
4,25,1997,60.4,-11.8
4,26,1997,63,-9.6
4,27,1997,69.2,-3.7
4,28,1997,76.3,3.1
4,29,1997,74.3,0.8
4,30,1997,74.2,0.4
5,1,1997,76.1,2
5,2,1997,73.7,-0.8
5,3,1997,76,1.2
5,4,1997,82,6.9
5,5,1997,81,5.6
5,6,1997,82.9,7.2
5,7,1997,82.8,6.8
5,8,1997,81.8,5.5
5,9,1997,80.2,3.6
5,10,1997,80.4,3.5
5,11,1997,73.8,-3.4
5,12,1997,77.8,0.3
5,13,1997,77.4,-0.4
5,14,1997,82.5,4.4
5,15,1997,84.9,6.5
5,16,1997,86.3,7.6
5,17,1997,86.4,7.5
5,18,1997,83.2,4
5,19,1997,77.9,-1.6
5,20,1997,76.2,-3.6
5,21,1997,71.6,-8.5
5,22,1997,78.8,-1.5
5,23,1997,81,0.4
5,24,1997,77.2,-3.7
5,25,1997,76.4,-4.7
5,26,1997,76,-5.4
5,27,1997,79.3,-2.3
5,28,1997,83.3,1.4
5,29,1997,84.8,2.7
5,30,1997,86.8,4.4
5,31,1997,88.1,5.5
6,1,1997,87.6,4.8
6,2,1997,87.3,4.2
6,3,1997,87,3.7
6,4,1997,87.9,4.4
6,5,1997,87.4,3.7
6,6,1997,81.3,-2.7
6,7,1997,73.1,-11.1
6,8,1997,71.6,-12.8
6,9,1997,79.3,-5.3
6,10,1997,83.7,-1.1
6,11,1997,84.4,-0.6
6,12,1997,84.5,-0.7
6,13,1997,81.4,-4
6,14,1997,80.5,-5
6,15,1997,78.5,-7.2
6,16,1997,77.3,-8.6
6,17,1997,83.3,-2.7
6,18,1997,87.9,1.7
6,19,1997,90.3,3.9
6,20,1997,88.2,1.7
6,21,1997,85.2,-1.5
6,22,1997,86.5,-0.3
6,23,1997,86.3,-0.6
6,24,1997,85.4,-1.7
6,25,1997,86.6,-0.6
6,26,1997,87.8,0.5
6,27,1997,88.4,1
6,28,1997,88.2,0.6
6,29,1997,87.9,0.2
6,30,1997,89.3,1.5
7,1,1997,87.7,-0.2
7,2,1997,91.6,3.6
7,3,1997,93.4,5.4
7,4,1997,92.8,4.7
7,5,1997,91.6,3.4
7,6,1997,92.6,4.3
7,7,1997,92.7,4.4
7,8,1997,91.6,3.2
7,9,1997,89.1,0.7
7,10,1997,82.8,-5.7
7,11,1997,82.3,-6.2
7,12,1997,86.8,-1.8
7,13,1997,86.7,-1.9
7,14,1997,88.7,0.1
7,15,1997,93,4.4
7,16,1997,95.5,6.8
7,17,1997,94.2,5.5
7,18,1997,87.4,-1.3
7,19,1997,87.4,-1.3
7,20,1997,83.6,-5.1
7,21,1997,78.7,-9.9
7,22,1997,77.8,-10.8
7,23,1997,87.2,-1.4
7,24,1997,90.6,2
7,25,1997,93.3,4.8
7,26,1997,89.9,1.4
7,27,1997,86.2,-2.2
7,28,1997,83.2,-5.2
7,29,1997,86.9,-1.4
7,30,1997,80.6,-7.7
7,31,1997,88.1,-0.1
8,1,1997,92.5,4.4
8,2,1997,82,-6.1
8,3,1997,84.2,-3.8
8,4,1997,83.5,-4.4
8,5,1997,87.4,-0.4
8,6,1997,85.1,-2.6
8,7,1997,88.3,0.7
8,8,1997,83.5,-4
8,9,1997,76.8,-10.6
8,10,1997,81.2,-6
8,11,1997,87.5,0.4
8,12,1997,86.7,-0.3
8,13,1997,83.2,-3.6
8,14,1997,82.3,-4.4
8,15,1997,82,-4.6
8,16,1997,84.3,-2.1
8,17,1997,80.8,-5.4
8,18,1997,78.8,-7.3
8,19,1997,85.9,0
8,20,1997,88.2,2.5
8,21,1997,88.9,3.3
8,22,1997,87.6,2.2
8,23,1997,89.5,4.3
8,24,1997,85,0
8,25,1997,85.7,0.9
8,26,1997,87.4,2.8
8,27,1997,84.9,0.5
8,28,1997,85.8,1.6
8,29,1997,78.7,-5.3
8,30,1997,82.5,-1.3
8,31,1997,80.2,-3.4
9,1,1997,83.2,-0.2
9,2,1997,77.8,-5.3
9,3,1997,83.4,0.5
9,4,1997,89.3,6.6
9,5,1997,84.4,2
9,6,1997,79.7,-2.5
9,7,1997,81.9,0
9,8,1997,83.8,2.1
9,9,1997,84.1,2.7
9,10,1997,86.2,5
9,11,1997,86.9,6
9,12,1997,81.6,0.9
9,13,1997,85.6,5.2
9,14,1997,80.3,0.2
9,15,1997,84.6,4.8
9,16,1997,82.3,2.7
9,17,1997,86.4,7.1
9,18,1997,87.4,8.4
9,19,1997,85.3,6.6
9,20,1997,85.6,7.2
9,21,1997,82.9,4.7
9,22,1997,79.6,1.7
9,23,1997,79.1,1.5
9,24,1997,82.6,5.3
9,25,1997,83,6
9,26,1997,77.7,1
9,27,1997,78.7,2.3
9,28,1997,82.2,6.1
9,29,1997,82.2,6.4
9,30,1997,82.1,6.6
10,1,1997,85.3,10.1
10,2,1997,84.9,10.1
10,3,1997,83.2,8.7
10,4,1997,82.1,7.9
10,5,1997,81.2,7.3
10,6,1997,80.4,6.8
10,7,1997,78.8,5.5
10,8,1997,66.3,-6.7
10,9,1997,70.6,-2
10,10,1997,77.1,4.8
10,11,1997,73,1
10,12,1997,58.3,-13.4
10,13,1997,60.8,-10.6
10,14,1997,72.3,1.3
10,15,1997,75.2,4.5
10,16,1997,75.5,5.1
10,17,1997,75.4,5.3
10,18,1997,71.7,2
10,19,1997,71.2,1.8
10,20,1997,70.6,1.5
10,21,1997,68.7,-0.1
10,22,1997,63.7,-4.8
10,23,1997,59.3,-8.8
10,24,1997,65.9,-1.9
10,25,1997,56.2,-11.3
10,26,1997,55.2,-12
10,27,1997,58.8,-8.1
10,28,1997,59.9,-6.6
10,29,1997,63.4,-2.8
10,30,1997,62.4,-3.5
10,31,1997,63.4,-2.2
11,1,1997,65,-0.3
11,2,1997,66.8,1.8
11,3,1997,67.1,2.4
11,4,1997,64.9,0.6
11,5,1997,64.2,0.2
11,6,1997,65.5,1.8
11,7,1997,66.9,3.5
11,8,1997,65.5,2.4
11,9,1997,64.2,1.4
11,10,1997,61.5,-1
11,11,1997,65.8,3.6
11,12,1997,58,-3.9
11,13,1997,53.8,-7.9
11,14,1997,57.7,-3.7
11,15,1997,50.7,-10.4
11,16,1997,52.5,-8.3
11,17,1997,53.7,-6.8
11,18,1997,53.9,-6.3
11,19,1997,56.7,-3.3
11,20,1997,58.3,-1.4
11,21,1997,57.5,-1.9
11,22,1997,55.9,-3.3
11,23,1997,59.3,0.4
11,24,1997,61.4,2.8
11,25,1997,63.3,4.9
11,26,1997,65.9,7.8
11,27,1997,61.8,3.9
11,28,1997,49.2,-8.4
11,29,1997,52.6,-4.8
11,30,1997,52.1,-5.1
12,1,1997,48.5,-8.4
12,2,1997,48.2,-8.5
12,3,1997,47.8,-8.7
12,4,1997,51.7,-4.5
12,5,1997,53,-3
12,6,1997,55.1,-0.7
12,7,1997,54.8,-0.8
12,8,1997,55.2,-0.2
12,9,1997,46.5,-8.7
12,10,1997,41.9,-13.1
12,11,1997,39.8,-15
12,12,1997,42.8,-11.8
12,13,1997,42.9,-11.5
12,14,1997,54.2,-0.1
12,15,1997,52.1,-2
12,16,1997,51.7,-2.2
12,17,1997,50.5,-3.2
12,18,1997,54.5,0.9
12,19,1997,50.6,-2.8
12,20,1997,48.4,-4.9
12,21,1997,50.5,-2.6
12,22,1997,47.7,-5.3
12,23,1997,41.4,-11.5
12,24,1997,41.3,-11.4
12,25,1997,39.5,-13.1
12,26,1997,38.1,-14.4
12,27,1997,42.5,-9.9
12,28,1997,40.3,-12
12,29,1997,43.1,-9
12,30,1997,51.7,-0.3
12,31,1997,55.6,3.6
1,1,1998,56.7,4.8
1,2,1998,62.1,10.3
1,3,1998,59.4,7.7
1,4,1998,53.6,2
1,5,1998,50,-1.6
1,6,1998,44.7,-6.8
1,7,1998,42.3,-9.1
1,8,1998,42.8,-8.6
1,9,1998,47.1,-4.2
1,10,1998,51.4,0.1
1,11,1998,52.6,1.3
1,12,1998,46.8,-4.4
1,13,1998,50.1,-1.1
1,14,1998,48.3,-2.9
1,15,1998,48.6,-2.6
1,16,1998,51.3,0.1
1,17,1998,54.4,3.2
1,18,1998,55.3,4.1
1,19,1998,58,6.8
1,20,1998,52.5,1.3
1,21,1998,53.2,2
1,22,1998,47.2,-4.1
1,23,1998,46.7,-4.6
1,24,1998,51.3,0
1,25,1998,51.8,0.4
1,26,1998,53.8,2.4
1,27,1998,56.2,4.7
1,28,1998,55.7,4.2
1,29,1998,56.6,5
1,30,1998,53.3,1.6
1,31,1998,49.8,-1.9
2,1,1998,53.3,1.5
2,2,1998,53.1,1.2
2,3,1998,57.6,5.6
2,4,1998,52.1,0
2,5,1998,46.3,-5.9
2,6,1998,48.8,-3.5
2,7,1998,56.9,4.5
2,8,1998,53.6,1.1
2,9,1998,49.5,-3.2
2,10,1998,49,-3.8
2,11,1998,48.5,-4.4
2,12,1998,50.6,-2.5
2,13,1998,49.9,-3.3
2,14,1998,52.1,-1.3
2,15,1998,52.9,-0.6
2,16,1998,46.1,-7.6
2,17,1998,45.2,-8.6
2,18,1998,45.3,-8.7
2,19,1998,45.5,-8.7
2,20,1998,49.4,-4.9
2,21,1998,42.8,-11.7
2,22,1998,49.7,-5
2,23,1998,55.8,0.9
2,24,1998,59.8,4.7
2,25,1998,48.7,-6.6
2,26,1998,46.6,-8.9
2,27,1998,48.6,-7.1
2,28,1998,45.9,-10
3,1,1998,47.6,-8.5
3,2,1998,51.9,-4.5
3,3,1998,56.2,-0.4
3,4,1998,60.1,3.3
3,5,1998,62.6,5.6
3,6,1998,61.3,4
3,7,1998,50.7,-6.8
3,8,1998,45.8,-12
3,9,1998,51.1,-6.9
3,10,1998,56.8,-1.5
3,11,1998,65.9,7.4
3,12,1998,66.2,7.4
3,13,1998,63.3,4.3
3,14,1998,59.8,0.5
3,15,1998,48.8,-10.8
3,16,1998,54.9,-4.9
3,17,1998,58.1,-2
3,18,1998,58.6,-1.8
3,19,1998,59.1,-1.6
3,20,1998,61.2,0.3
3,21,1998,63,1.8
3,22,1998,63.8,2.3
3,23,1998,66.5,4.7
3,24,1998,68.9,6.8
3,25,1998,74.7,12.3
3,26,1998,59.8,-2.9
3,27,1998,52.8,-10.2
3,28,1998,59.5,-3.8
3,29,1998,47,-16.6
3,30,1998,43.2,-20.7
3,31,1998,47.2,-17
4,1,1998,52.9,-11.6
4,2,1998,45.1,-19.7
4,3,1998,53,-12.1
4,4,1998,57.9,-7.5
4,5,1998,57.8,-7.9
4,6,1998,58,-8.1
4,7,1998,54.8,-11.6
4,8,1998,53.5,-13.2
4,9,1998,56.3,-10.7
4,10,1998,63.5,-3.8
4,11,1998,68.9,1.2
4,12,1998,61.2,-6.8
4,13,1998,57.3,-11
4,14,1998,57.6,-11
4,15,1998,55.9,-13
4,16,1998,51.8,-17.5
4,17,1998,53,-16.6
4,18,1998,57.7,-12.2
4,19,1998,62.7,-7.5
4,20,1998,67,-3.6
4,21,1998,72.7,1.8
4,22,1998,77.4,6.2
4,23,1998,76.8,5.3
4,24,1998,73.4,1.6
4,25,1998,67.8,-4.4
4,26,1998,60.4,-12.1
4,27,1998,61.3,-11.5
4,28,1998,68.7,-4.4
4,29,1998,68.5,-4.9
4,30,1998,72,-1.7
5,1,1998,74,-0.1
5,2,1998,72.6,-1.8
5,3,1998,71,-3.7
5,4,1998,71.6,-3.4
5,5,1998,70,-5.3
5,6,1998,67.1,-8.5
5,7,1998,68.3,-7.6
5,8,1998,68.2,-8
5,9,1998,66.5,-10
5,10,1998,73,-3.8
5,11,1998,72.6,-4.5
5,12,1998,71.2,-6.2
5,13,1998,67.3,-10.4
5,14,1998,62.7,-15.3
5,15,1998,64.7,-13.6
5,16,1998,71.3,-7.3
5,17,1998,79.4,0.5
5,18,1998,79.3,0.1
5,19,1998,81.6,2.2
5,20,1998,82.9,3.2
5,21,1998,78.1,-1.9
5,22,1998,72.6,-7.7
5,23,1998,73.6,-6.9
5,24,1998,75.3,-5.5
5,25,1998,78,-3
5,26,1998,75.9,-5.4
5,27,1998,76.9,-4.7
5,28,1998,79.8,-2
5,29,1998,81.1,-1
5,30,1998,80.6,-1.7
5,31,1998,78.4,-4.1
6,1,1998,80.7,-2.1
6,2,1998,83.8,0.8
6,3,1998,83,-0.2
6,4,1998,72.8,-10.7
6,5,1998,74,-9.7
6,6,1998,79.1,-4.8
6,7,1998,82.3,-1.8
6,8,1998,73.7,-10.6
6,9,1998,76.3,-8.2
6,10,1998,76.2,-8.5
6,11,1998,77,-7.9
6,12,1998,80.7,-4.4
6,13,1998,78.4,-6.9
6,14,1998,77.4,-8.1
6,15,1998,79.4,-6.3
6,16,1998,84.5,-1.3
6,17,1998,81.8,-4.2
6,18,1998,81.6,-4.6
6,19,1998,84.8,-1.5
6,20,1998,88.2,1.7
6,21,1998,87.4,0.8
6,22,1998,84.4,-2.4
6,23,1998,88.8,1.9
6,24,1998,90.3,3.3
6,25,1998,90.6,3.4
6,26,1998,90.1,2.8
6,27,1998,88.4,1
6,28,1998,90.7,3.2
6,29,1998,92.3,4.7
6,30,1998,92.1,4.4
7,1,1998,91,3.2
7,2,1998,93.3,5.4
7,3,1998,91.6,3.6
7,4,1998,87.1,-1
7,5,1998,77.5,-10.7
7,6,1998,77.3,-10.9
7,7,1998,78.7,-9.6
7,8,1998,77.6,-10.8
7,9,1998,86.9,-1.5
7,10,1998,90.7,2.2
7,11,1998,93.3,4.8
7,12,1998,91.7,3.1
7,13,1998,92.4,3.8
7,14,1998,92.5,3.9
7,15,1998,94,5.4
7,16,1998,96,7.3
7,17,1998,94.1,5.4
7,18,1998,90.1,1.4
7,19,1998,83.8,-4.9
7,20,1998,78.6,-10.1
7,21,1998,77.5,-11.1
7,22,1998,74.3,-14.3
7,23,1998,76.3,-12.3
7,24,1998,80.6,-8
7,25,1998,85,-3.5
7,26,1998,86.5,-2
7,27,1998,89.2,0.7
7,28,1998,90.9,2.5
7,29,1998,86.9,-1.5
7,30,1998,86.4,-1.9
7,31,1998,83.4,-4.8
8,1,1998,86.1,-2.1
8,2,1998,81.4,-6.7
8,3,1998,87.8,-0.2
8,4,1998,88.4,0.5
8,5,1998,90.7,2.9
8,6,1998,83.4,-4.3
8,7,1998,87.1,-0.5
8,8,1998,82.3,-5.2
8,9,1998,85,-2.4
8,10,1998,82.5,-4.8
8,11,1998,86.4,-0.7
8,12,1998,81.6,-5.4
8,13,1998,84.5,-2.4
8,14,1998,78.1,-8.6
8,15,1998,80.9,-5.7
8,16,1998,89,2.6
8,17,1998,82.8,-3.5
8,18,1998,79.8,-6.3
8,19,1998,84.6,-1.4
8,20,1998,87.6,1.8
8,21,1998,85.2,-0.4
8,22,1998,87.1,1.7
8,23,1998,89.6,4.3
8,24,1998,87.8,2.7
8,25,1998,81,-3.9
8,26,1998,84.9,0.2
8,27,1998,88.6,4.1
8,28,1998,92,7.7
8,29,1998,84.6,0.5
8,30,1998,89.9,6
8,31,1998,88.1,4.5
9,1,1998,88.9,5.5
9,2,1998,86.9,3.7
9,3,1998,87.5,4.5
9,4,1998,77.1,-5.6
9,5,1998,77.5,-5
9,6,1998,79.6,-2.6
9,7,1998,81.7,-0.3
9,8,1998,83.4,1.7
9,9,1998,83.9,2.4
9,10,1998,81.1,-0.1
9,11,1998,83.2,2.2
9,12,1998,81.4,0.7
9,13,1998,84.6,4.1
9,14,1998,85.2,5
9,15,1998,86,6.1
9,16,1998,87.2,7.6
9,17,1998,85.8,6.4
9,18,1998,86.3,7.2
9,19,1998,85.1,6.3
9,20,1998,86.4,7.9
9,21,1998,83,4.8
9,22,1998,79.2,1.3
9,23,1998,78.3,0.7
9,24,1998,78.5,1.1
9,25,1998,77,-0.1
9,26,1998,75,-1.8
9,27,1998,77.3,0.8
9,28,1998,80.8,4.6
9,29,1998,80.6,4.8
9,30,1998,78.2,2.7
10,1,1998,74,-1.2
10,2,1998,73.2,-1.7
10,3,1998,75.4,0.8
10,4,1998,79.2,4.9
10,5,1998,70.3,-3.7
10,6,1998,68.3,-5.4
10,7,1998,76.1,2.7
10,8,1998,78.9,5.9
10,9,1998,77.4,4.7
10,10,1998,75.3,2.9
10,11,1998,73.5,1.4
10,12,1998,76.2,4.4
10,13,1998,81.2,9.8
10,14,1998,77,5.9
10,15,1998,70.2,-0.6
10,16,1998,64.1,-6.4
10,17,1998,60.6,-9.5
10,18,1998,65.2,-4.6
10,19,1998,69.3,-0.2
10,20,1998,73.5,4.3
10,21,1998,72.3,3.4
10,22,1998,69.8,1.3
10,23,1998,69.8,1.6
10,24,1998,71.8,3.9
10,25,1998,74,6.4
10,26,1998,72.4,5.1
10,27,1998,62.3,-4.6
10,28,1998,61,-5.6
10,29,1998,64.4,-1.9
10,30,1998,68.6,2.6
10,31,1998,58.2,-7.5
11,1,1998,53.5,-11.9
11,2,1998,60.7,-4.3
11,3,1998,59.4,-5.3
11,4,1998,59.5,-4.9
11,5,1998,60.1,-4
11,6,1998,60.1,-3.7
11,7,1998,60.6,-2.9
11,8,1998,60.5,-2.7
11,9,1998,60.8,-2.1
11,10,1998,48.9,-13.7
11,11,1998,54.2,-8.1
11,12,1998,53.9,-8.1
11,13,1998,51.1,-10.6
11,14,1998,56.5,-4.9
11,15,1998,58.6,-2.6
11,16,1998,64.8,3.9
11,17,1998,64.5,3.9
11,18,1998,63.8,3.5
11,19,1998,59.3,-0.7
11,20,1998,56.9,-2.9
11,21,1998,55.9,-3.6
11,22,1998,61.4,2.2
11,23,1998,62.7,3.7
11,24,1998,63.4,4.7
11,25,1998,59.9,1.5
11,26,1998,62.2,4
11,27,1998,64.4,6.5
11,28,1998,63.5,5.8
11,29,1998,54,-3.5
11,30,1998,53.3,-3.9
12,1,1998,58.6,1.6
12,2,1998,60.4,3.6
12,3,1998,48.8,-7.7
12,4,1998,53.5,-2.8
12,5,1998,54.8,-1.3
12,6,1998,41.3,-14.6
12,7,1998,37,-18.7
12,8,1998,41.2,-14.2
12,9,1998,42.4,-12.8
12,10,1998,42.1,-12.9
12,11,1998,45.5,-9.4
12,12,1998,48.9,-5.8
12,13,1998,56.8,2.3
12,14,1998,60.2,5.9
12,15,1998,65.8,11.7
12,16,1998,62.1,8.1
12,17,1998,63,9.2
12,18,1998,53.2,-0.4
12,19,1998,57.4,3.9
12,20,1998,58.4,5.1
12,21,1998,53.4,0.2
12,22,1998,42.9,-10.1
12,23,1998,38.7,-14.2
12,24,1998,42,-10.8
12,25,1998,42,-10.6
12,26,1998,46.9,-5.6
12,27,1998,50.4,-2
12,28,1998,51.1,-1.2
12,29,1998,49.2,-3
12,30,1998,49,-3.1
12,31,1998,49,-3
1,1,1999,58.3,6.4
1,2,1999,48.7,-3.1
1,3,1999,47.5,-4.2
1,4,1999,45.9,-5.8
1,5,1999,49.4,-2.2
1,6,1999,50.7,-0.8
1,7,1999,54.1,2.6
1,8,1999,53.2,1.8
1,9,1999,48.3,-3.1
1,10,1999,49,-2.3
1,11,1999,58,6.7
1,12,1999,59.8,8.5
1,13,1999,51.6,0.4
1,14,1999,52.8,1.6
1,15,1999,57.8,6.6
1,16,1999,55.4,4.2
1,17,1999,54.4,3.2
1,18,1999,56.2,5
1,19,1999,58.9,7.7
1,20,1999,58.2,7
1,21,1999,56.9,5.7
1,22,1999,49.9,-1.4
1,23,1999,50.4,-0.9
1,24,1999,61.5,10.2
1,25,1999,69,17.6
1,26,1999,53.1,1.7
1,27,1999,44.9,-6.6
1,28,1999,43.3,-8.2
1,29,1999,41.3,-10.3
1,30,1999,45.3,-6.3
1,31,1999,52.8,1.1
2,1,1999,52.2,0.4
2,2,1999,50,-1.9
2,3,1999,49.3,-2.7
2,4,1999,53.6,1.5
2,5,1999,57.4,5.2
2,6,1999,49.8,-2.5
2,7,1999,50.1,-2.3
2,8,1999,59.2,6.7
2,9,1999,59.6,7
2,10,1999,57.4,4.6
2,11,1999,41,-11.9
2,12,1999,54.9,1.9
2,13,1999,62,8.8
2,14,1999,62.4,9.1
2,15,1999,57.6,4.1
2,16,1999,52,-1.6
2,17,1999,54.2,0.4
2,18,1999,56.5,2.5
2,19,1999,62.1,8
2,20,1999,60.3,6
2,21,1999,61.3,6.8
2,22,1999,64.3,9.6
2,23,1999,53.5,-1.4
2,24,1999,58.3,3.3
2,25,1999,64.5,9.3
2,26,1999,59.3,3.9
2,27,1999,58.7,3
2,28,1999,60.5,4.6
3,1,1999,64.5,8.4
3,2,1999,64.7,8.4
3,3,1999,64,7.5
3,4,1999,63.6,6.9
3,5,1999,62.2,5.2
3,6,1999,63.2,6
3,7,1999,66.1,8.6
3,8,1999,52.2,-5.5
3,9,1999,56.4,-1.5
3,10,1999,59.2,1
3,11,1999,61.6,3.2
3,12,1999,54.5,-4.2
3,13,1999,54.1,-4.9
3,14,1999,61.8,2.6
3,15,1999,65.7,6.2
3,16,1999,57.9,-1.9
3,17,1999,46.5,-13.5
3,18,1999,50.1,-10.2
3,19,1999,60.9,0.3
3,20,1999,67.6,6.7
3,21,1999,67.9,6.7
3,22,1999,65,3.6
3,23,1999,66.5,4.8
3,24,1999,65.1,3.1
3,25,1999,66.1,3.8
3,26,1999,65.5,2.9
3,27,1999,53.9,-9
3,28,1999,61.3,-1.9
3,29,1999,64.3,0.8
3,30,1999,69.1,5.3
3,31,1999,70.9,6.8
4,1,1999,54.6,-9.8
4,2,1999,45.7,-19
4,3,1999,47,-18
4,4,1999,48.8,-16.6
4,5,1999,47,-18.7
4,6,1999,57.8,-8.2
4,7,1999,63,-3.3
4,8,1999,54.7,-11.9
4,9,1999,54.5,-12.4
4,10,1999,51.3,-16
4,11,1999,60.9,-6.7
4,12,1999,65.9,-2
4,13,1999,56.7,-11.5
4,14,1999,60.1,-8.4
4,15,1999,67.1,-1.8
4,16,1999,70,0.8
4,17,1999,67.1,-2.4
4,18,1999,71.5,1.7
4,19,1999,72.9,2.8
4,20,1999,73.9,3.4
4,21,1999,78.5,7.7
4,22,1999,76.4,5.3
4,23,1999,72.7,1.3
4,24,1999,64.9,-6.9
4,25,1999,62.8,-9.3
4,26,1999,65.6,-6.8
4,27,1999,71.8,-0.9
4,28,1999,76.4,3.4
4,29,1999,61.9,-11.4
4,30,1999,57,-16.7
5,1,1999,58,-16
5,2,1999,63,-11.3
5,3,1999,72,-2.6
5,4,1999,66,-8.9
5,5,1999,66.9,-8.3
5,6,1999,71,-4.5
5,7,1999,74.3,-1.5
5,8,1999,78.4,2.3
5,9,1999,77.7,1.2
5,10,1999,72,-4.8
5,11,1999,73,-4.1
5,12,1999,75.3,-2
5,13,1999,79.7,2.1
5,14,1999,79.8,1.9
5,15,1999,77.8,-0.4
5,16,1999,75.3,-3.2
5,17,1999,75.8,-3
5,18,1999,79.3,0.2
5,19,1999,78.4,-1
5,20,1999,77.2,-2.4
5,21,1999,78.7,-1.2
5,22,1999,78.6,-1.6
5,23,1999,75,-5.5
5,24,1999,76.3,-4.4
5,25,1999,76.8,-4.2
5,26,1999,79,-2.2
5,27,1999,82.3,0.8
5,28,1999,85.6,3.9
5,29,1999,84.9,2.9
5,30,1999,83.8,1.6
5,31,1999,80.4,-2.1
6,1,1999,78.7,-4
6,2,1999,79.9,-3.1
6,3,1999,76.2,-7
6,4,1999,72.9,-10.5
6,5,1999,66.8,-16.8
6,6,1999,73.4,-10.4
6,7,1999,80.8,-3.3
6,8,1999,80.5,-3.8
6,9,1999,79.3,-5.2
6,10,1999,79.2,-5.5
6,11,1999,81,-3.9
6,12,1999,81.1,-4
6,13,1999,84.2,-1.1
6,14,1999,86.8,1.4
6,15,1999,89.9,4.3
6,16,1999,85.4,-0.4
6,17,1999,87.3,1.3
6,18,1999,85.1,-1
6,19,1999,87.3,1
6,20,1999,91.2,4.8
6,21,1999,91,4.4
6,22,1999,88.1,1.4
6,23,1999,86.5,-0.4
6,24,1999,90.1,3.1
6,25,1999,91,3.9
6,26,1999,92.7,5.4
6,27,1999,92.4,5
6,28,1999,91.2,3.7
6,29,1999,90.7,3.1
6,30,1999,93.9,6.2
7,1,1999,94,6.2
7,2,1999,92.9,5
7,3,1999,84.2,-3.8
7,4,1999,87.2,-0.9
7,5,1999,89.4,1.2
7,6,1999,81.8,-6.4
7,7,1999,78.4,-9.9
7,8,1999,79,-9.4
7,9,1999,81.7,-6.7
7,10,1999,81.6,-6.9
7,11,1999,80.4,-8.1
7,12,1999,87.4,-1.1
7,13,1999,83.8,-4.8
7,14,1999,84.4,-4.2
7,15,1999,76.7,-11.9
7,16,1999,80.5,-8.1
7,17,1999,82.3,-6.4
7,18,1999,75.6,-13.1
7,19,1999,81.5,-7.2
7,20,1999,85.3,-3.4
7,21,1999,84.3,-4.4
7,22,1999,84.5,-4.1
7,23,1999,80,-8.6
7,24,1999,80.3,-8.3
7,25,1999,81.6,-7
7,26,1999,83.9,-4.6
7,27,1999,79.5,-9
7,28,1999,75.7,-12.7
7,29,1999,76.7,-11.7
7,30,1999,78.7,-9.6
7,31,1999,83.8,-4.4
8,1,1999,86.9,-1.3
8,2,1999,86.3,-1.8
8,3,1999,86.2,-1.8
8,4,1999,86.8,-1.1
8,5,1999,81.8,-6
8,6,1999,78.9,-8.8
8,7,1999,81.6,-6
8,8,1999,85.2,-2.3
8,9,1999,85.3,-2.1
8,10,1999,81.6,-5.7
8,11,1999,83.2,-4
8,12,1999,83.3,-3.7
8,13,1999,86.8,-0.1
8,14,1999,86.5,-0.3
8,15,1999,81.6,-5
8,16,1999,80.8,-5.7
8,17,1999,85,-1.3
8,18,1999,84.6,-1.6
8,19,1999,84.5,-1.5
8,20,1999,82.9,-2.9
8,21,1999,85.2,-0.5
8,22,1999,89.3,3.8
8,23,1999,88.2,2.9
8,24,1999,86.6,1.5
8,25,1999,88.9,4
8,26,1999,88.8,4.1
8,27,1999,83.3,-1.2
8,28,1999,78.2,-6.1
8,29,1999,80.7,-3.4
8,30,1999,82.6,-1.3
8,31,1999,83,-0.7
9,1,1999,75.2,-8.3
9,2,1999,81.1,-2.1
9,3,1999,82.1,-0.9
9,4,1999,80.6,-2.2
9,5,1999,82.4,-0.1
9,6,1999,85.8,3.5
9,7,1999,84.6,2.5
9,8,1999,83.5,1.7
9,9,1999,85.7,4.1
9,10,1999,76.7,-4.6
9,11,1999,82.6,1.5
9,12,1999,84.4,3.6
9,13,1999,89.3,8.8
9,14,1999,85.2,4.9
9,15,1999,81.9,1.9
9,16,1999,76,-3.7
9,17,1999,80.7,1.3
9,18,1999,81.9,2.7
9,19,1999,80,1.1
9,20,1999,75.6,-3
9,21,1999,82.6,4.3
9,22,1999,85,7
9,23,1999,78.6,0.9
9,24,1999,77.5,0.1
9,25,1999,79,1.9
9,26,1999,78.2,1.4
9,27,1999,80.2,3.7
9,28,1999,81,4.8
9,29,1999,81.7,5.8
9,30,1999,82.8,7.2
10,1,1999,79.9,4.6
10,2,1999,79.4,4.4
10,3,1999,78.2,3.5
10,4,1999,77.7,3.3
10,5,1999,82.8,8.7
10,6,1999,80.4,6.6
10,7,1999,73.3,-0.1
10,8,1999,72.7,-0.4
10,9,1999,80.4,7.6
10,10,1999,79.1,6.6
10,11,1999,81.8,9.6
10,12,1999,84.6,12.8
10,13,1999,78.2,6.7
10,14,1999,78.6,7.4
10,15,1999,77.4,6.5
10,16,1999,74.8,4.2
10,17,1999,68.5,-1.7
10,18,1999,63.5,-6.4
10,19,1999,67.9,-1.7
10,20,1999,72.3,3
10,21,1999,72.3,3.4
10,22,1999,74.1,5.5
10,23,1999,76.3,8
10,24,1999,73.6,5.6
10,25,1999,73.4,5.7
10,26,1999,71.5,4.2
10,27,1999,69.4,2.4
10,28,1999,69.4,2.7
10,29,1999,69.5,3.1
10,30,1999,68.4,2.3
10,31,1999,72.8,7
11,1,1999,68.7,3.3
11,2,1999,67.5,2.4
11,3,1999,71.8,7
11,4,1999,68.4,3.9
11,5,1999,68.4,4.2
11,6,1999,69.6,5.7
11,7,1999,73.4,9.8
11,8,1999,67.7,4.4
11,9,1999,63.8,0.8
11,10,1999,67.1,4.4
11,11,1999,70,7.6
11,12,1999,73.9,11.8
11,13,1999,74,12.2
11,14,1999,71.3,9.8
11,15,1999,69.7,8.5
11,16,1999,70.8,9.9
11,17,1999,69.2,8.5
11,18,1999,66.6,6.2
11,19,1999,63.7,3.6
11,20,1999,61.7,1.9
11,21,1999,59.1,-0.5
11,22,1999,55.3,-4
11,23,1999,46.9,-12.1
11,24,1999,47.4,-11.4
11,25,1999,48,-10.5
11,26,1999,53.4,-4.9
11,27,1999,58.9,0.9
11,28,1999,60,2.2
11,29,1999,61.9,4.4
11,30,1999,68.7,11.4
12,1,1999,65.7,8.7
12,2,1999,56.3,-0.5
12,3,1999,50,-6.6
12,4,1999,42.8,-13.6
12,5,1999,49.5,-6.6
12,6,1999,52.5,-3.4
12,7,1999,49.3,-6.4
12,8,1999,50.4,-5.1
12,9,1999,44.2,-11.1
12,10,1999,50.9,-4.2
12,11,1999,47,-7.9
12,12,1999,44.1,-10.6
12,13,1999,47,-7.5
12,14,1999,46.7,-7.6
12,15,1999,45.9,-8.3
12,16,1999,48.7,-5.3
12,17,1999,49.2,-4.6
12,18,1999,53.7,0
12,19,1999,54.9,1.4
12,20,1999,48.9,-4.5
12,21,1999,45.7,-7.5
12,22,1999,46.6,-6.5
12,23,1999,50.1,-2.8
12,24,1999,54.2,1.4
12,25,1999,51,-1.7
12,26,1999,52.1,-0.4
12,27,1999,55,2.6
12,28,1999,57.4,5.1
12,29,1999,55.9,3.7
12,30,1999,56.6,4.5
12,31,1999,55.8,3.8
1,1,2000,55.2,3.3
1,2,2000,42.9,-8.9
1,3,2000,38.9,-12.8
1,4,2000,45.2,-6.5
1,5,2000,44.1,-7.5
1,6,2000,42.8,-8.7
1,7,2000,46.9,-4.6
1,8,2000,46.2,-5.2
1,9,2000,46.2,-5.2
1,10,2000,49.8,-1.5
1,11,2000,51.8,0.5
1,12,2000,53.7,2.4
1,13,2000,54.9,3.7
1,14,2000,64.4,13.2
1,15,2000,62.1,10.9
1,16,2000,63.4,12.2
1,17,2000,61.7,10.5
1,18,2000,64.2,13
1,19,2000,62.9,11.7
1,20,2000,61.2,10
1,21,2000,58.9,7.7
1,22,2000,59.6,8.4
1,23,2000,56.3,5
1,24,2000,59.8,8.5
1,25,2000,61.5,10.2
1,26,2000,62.9,11.5
1,27,2000,53,1.6
1,28,2000,49.7,-1.8
1,29,2000,49.3,-2.3
1,30,2000,52.8,1.2
1,31,2000,55.2,3.5
2,1,2000,51.4,-0.4
2,2,2000,55.9,4
2,3,2000,57.4,5.4
2,4,2000,56.2,4.2
2,5,2000,58.6,6.5
2,6,2000,58.1,5.8
2,7,2000,58.5,6.1
2,8,2000,64.9,12.4
2,9,2000,63,10.4
2,10,2000,61.4,8.7
2,11,2000,58,5.1
2,12,2000,54.3,1.3
2,13,2000,55.8,2.7
2,14,2000,56.4,3.1
2,15,2000,60.1,6.7
2,16,2000,61.8,8.2
2,17,2000,60.2,6.5
2,18,2000,50.6,-3.3
2,19,2000,55.2,1.1
2,20,2000,63.6,9.3
2,21,2000,64.1,9.7
2,22,2000,55.6,1
2,23,2000,53.6,-1.2
2,24,2000,56.3,1.3
2,25,2000,45.7,-9.5
2,26,2000,50.5,-4.9
2,27,2000,59,3.4
2,28,2000,64.2,8.4
2,29,2000,55.2,-0.8
3,1,2000,56.7,0.5
3,2,2000,54.8,-1.7
3,3,2000,59.4,2.7
3,4,2000,63.9,7
3,5,2000,54.8,-2.4
3,6,2000,49.6,-7.8
3,7,2000,41,-16.6
3,8,2000,47.7,-10.2
3,9,2000,52.7,-5.4
3,10,2000,53,-5.4
3,11,2000,58.2,-0.4
3,12,2000,62.9,4
3,13,2000,64.5,5.3
3,14,2000,62.6,3.2
3,15,2000,64.9,5.2
3,16,2000,66.1,6.1
3,17,2000,65.5,5.3
3,18,2000,63.9,3.4
3,19,2000,64.2,3.4
3,20,2000,67.4,6.3
3,21,2000,48,-13.4
3,22,2000,51.2,-10.5
3,23,2000,55.5,-6.4
3,24,2000,61.4,-0.8
3,25,2000,64.7,2.2
3,26,2000,64.6,1.8
3,27,2000,65.5,2.4
3,28,2000,63.4,0
3,29,2000,61.5,-2.2
3,30,2000,65.3,1.3
3,31,2000,61.7,-2.6
4,1,2000,53.2,-11.5
4,2,2000,53.9,-11.1
4,3,2000,61.6,-3.7
4,4,2000,69.1,3.5
4,5,2000,74.6,8.7
4,6,2000,72.7,6.5
4,7,2000,71.6,5.1
4,8,2000,72.7,5.8
4,9,2000,78.9,11.7
4,10,2000,75.7,8.2
4,11,2000,69.9,2.1
4,12,2000,70.2,2.1
4,13,2000,73.8,5.4
4,14,2000,75.4,6.6
4,15,2000,65.3,-3.8
4,16,2000,67.6,-1.8
4,17,2000,73,3.3
4,18,2000,71.2,1.1
4,19,2000,63.4,-7
4,20,2000,66.7,-4
4,21,2000,75.6,4.6
4,22,2000,67.3,-4
4,23,2000,68.3,-3.4
4,24,2000,74.9,2.9
4,25,2000,76.9,4.6
4,26,2000,80,7.4
4,27,2000,80.2,7.3
4,28,2000,82.8,9.5
4,29,2000,77.7,4.1
4,30,2000,70.9,-3
5,1,2000,73.1,-1.1
5,2,2000,77.2,2.7
5,3,2000,80.9,6.1
5,4,2000,81.1,6
5,5,2000,82.1,6.6
5,6,2000,83,7.2
5,7,2000,79.4,3.3
5,8,2000,80.7,4.3
5,9,2000,79.3,2.6
5,10,2000,83.6,6.6
5,11,2000,82.9,5.6
5,12,2000,72.1,-5.5
5,13,2000,72.2,-5.7
5,14,2000,79,0.8
5,15,2000,84.4,6
5,16,2000,83.2,4.5
5,17,2000,69.3,-9.7
5,18,2000,70.9,-8.4
5,19,2000,76,-3.6
5,20,2000,79.1,-0.7
5,21,2000,84,3.9
5,22,2000,87.4,7
5,23,2000,84.5,3.8
5,24,2000,83.8,2.9
5,25,2000,82.8,1.6
5,26,2000,81.8,0.4
5,27,2000,85,3.3
5,28,2000,91.5,9.6
5,29,2000,92.7,10.5
5,30,2000,91.2,8.8
5,31,2000,89.4,6.7
6,1,2000,87.4,4.5
6,2,2000,85.9,2.8
6,3,2000,87.4,4
6,4,2000,86.9,3.3
6,5,2000,89.2,5.4
6,6,2000,92.7,8.7
6,7,2000,91.8,7.6
6,8,2000,86.5,2.1
6,9,2000,83.2,-1.4
6,10,2000,80.3,-4.5
6,11,2000,82.7,-2.3
6,12,2000,84.8,-0.4
6,13,2000,88.6,3.2
6,14,2000,90.2,4.6
6,15,2000,90.5,4.8
6,16,2000,91.1,5.2
6,17,2000,89.6,3.5
6,18,2000,81.5,-4.7
6,19,2000,81.3,-5.1
6,20,2000,75.6,-11
6,21,2000,82.1,-4.6
6,22,2000,83.4,-3.4
6,23,2000,76.8,-10.2
6,24,2000,77.9,-9.2
6,25,2000,86.5,-0.7
6,26,2000,89.2,1.8
6,27,2000,89.3,1.8
6,28,2000,82.8,-4.8
6,29,2000,81.9,-5.8
6,30,2000,79.4,-8.4
7,1,2000,82.5,-5.4
7,2,2000,81.9,-6.1
7,3,2000,83.5,-4.6
7,4,2000,87.3,-0.8
7,5,2000,86.4,-1.8
7,6,2000,85.9,-2.4
7,7,2000,85.9,-2.4
7,8,2000,80.2,-8.2
7,9,2000,82.4,-6
7,10,2000,86.6,-1.9
7,11,2000,88.4,-0.1
7,12,2000,89.8,1.2
7,13,2000,87.8,-0.8
7,14,2000,88.5,-0.1
7,15,2000,88.1,-0.5
7,16,2000,85.1,-3.6
7,17,2000,90.5,1.8
7,18,2000,92.5,3.8
7,19,2000,94.2,5.5
7,20,2000,93.4,4.7
7,21,2000,92.5,3.9
7,22,2000,88.9,0.3
7,23,2000,87.4,-1.2
7,24,2000,91,2.4
7,25,2000,91.3,2.8
7,26,2000,92.5,4
7,27,2000,92.7,4.3
7,28,2000,92,3.6
7,29,2000,92.1,3.8
7,30,2000,87.1,-1.2
7,31,2000,89,0.8
8,1,2000,86.4,-1.7
8,2,2000,89.9,1.9
8,3,2000,91.3,3.3
8,4,2000,92.1,4.2
8,5,2000,89.8,2
8,6,2000,82.1,-5.6
8,7,2000,83.7,-3.9
8,8,2000,80.4,-7
8,9,2000,86.1,-1.2
8,10,2000,87,-0.2
8,11,2000,87.6,0.5
8,12,2000,83.7,-3.2
8,13,2000,86.1,-0.7
8,14,2000,82.9,-3.8
8,15,2000,86.7,0.2
8,16,2000,90.5,4.1
8,17,2000,89.7,3.5
8,18,2000,79.8,-6.2
8,19,2000,86.1,0.2
8,20,2000,83.5,-2.2
8,21,2000,86.6,1.1
8,22,2000,85.7,0.3
8,23,2000,78.7,-6.5
8,24,2000,82.4,-2.6
8,25,2000,82.3,-2.5
8,26,2000,83.1,-1.5
8,27,2000,82.2,-2.2
8,28,2000,74.9,-9.3
8,29,2000,73.5,-10.5
8,30,2000,73.2,-10.5
8,31,2000,75.5,-8
9,1,2000,79.9,-3.4
9,2,2000,81.2,-1.9
9,3,2000,81.2,-1.6
9,4,2000,84.1,1.5
9,5,2000,88,5.6
9,6,2000,86.4,4.3
9,7,2000,86.8,4.9
9,8,2000,89.4,7.8
9,9,2000,87.9,6.5
9,10,2000,86.5,5.4
9,11,2000,80.5,-0.4
9,12,2000,84,3.4
9,13,2000,87,6.7
9,14,2000,88.4,8.3
9,15,2000,91.7,11.9
9,16,2000,88.1,8.6
9,17,2000,88.3,9.1
9,18,2000,85.8,6.8
9,19,2000,85.1,6.4
9,20,2000,83.9,5.5
9,21,2000,84.6,6.5
9,22,2000,84.9,7.1
9,23,2000,82.3,4.8
9,24,2000,76.5,-0.7
9,25,2000,78.4,1.5
9,26,2000,85.6,9
9,27,2000,83.1,6.8
9,28,2000,85.7,9.7
9,29,2000,84.4,8.7
9,30,2000,84.7,9.3
10,1,2000,83.5,8.4
10,2,2000,82.5,7.7
10,3,2000,83.7,9.2
10,4,2000,80.3,6.2
10,5,2000,81,7.2
10,6,2000,80.5,7
10,7,2000,81.5,8.3
10,8,2000,77.2,4.3
10,9,2000,64.3,-8.3
10,10,2000,67.3,-4.9
10,11,2000,66,-5.9
10,12,2000,60.6,-11
10,13,2000,64.3,-7
10,14,2000,64.2,-6.8
10,15,2000,66.2,-4.4
10,16,2000,67.4,-2.9
10,17,2000,70.1,0.1
10,18,2000,73.1,3.4
10,19,2000,71.4,2.1
10,20,2000,65.2,-3.8
10,21,2000,64.8,-3.9
10,22,2000,59.8,-8.6
10,23,2000,56.4,-11.7
10,24,2000,59.4,-8.3
10,25,2000,60.3,-7.1
10,26,2000,63.3,-3.8
10,27,2000,61.4,-5.4
10,28,2000,56.3,-10.2
10,29,2000,57.5,-8.6
10,30,2000,59.3,-6.5
10,31,2000,55.6,-9.9
11,1,2000,51.1,-14.1
11,2,2000,52.6,-12.3
11,3,2000,57,-7.6
11,4,2000,56.8,-7.5
11,5,2000,52.9,-11.1
11,6,2000,54.6,-9.1
11,7,2000,44.1,-19.3
11,8,2000,43.8,-19.3
11,9,2000,50.1,-12.7
11,10,2000,55.2,-7.3
11,11,2000,51.4,-10.8
11,12,2000,49.3,-12.6
11,13,2000,47.2,-14.4
11,14,2000,52.2,-9.1
11,15,2000,52.1,-8.9
11,16,2000,46.5,-14.2
11,17,2000,44.8,-15.7
11,18,2000,44.8,-15.4
11,19,2000,45.9,-14
11,20,2000,52.3,-7.3
11,21,2000,60.6,1.2
11,22,2000,62.6,3.5
11,23,2000,53.8,-5
11,24,2000,49.5,-9.1
11,25,2000,52,-6.3
11,26,2000,52.2,-5.9
11,27,2000,53.6,-4.2
11,28,2000,55.4,-2.2
11,29,2000,57.5,0.2
11,30,2000,58.6,1.5
12,1,2000,57.2,0.3
12,2,2000,59.6,3
12,3,2000,59.6,3.2
12,4,2000,56.5,0.3
12,5,2000,58.5,2.5
12,6,2000,55.2,-0.6
12,7,2000,57.6,2
12,8,2000,58.6,3.2
12,9,2000,54.7,-0.5
12,10,2000,54.8,-0.2
12,11,2000,52.9,-1.9
12,12,2000,55.2,0.6
12,13,2000,52.1,-2.3
12,14,2000,50.4,-3.8
12,15,2000,49.4,-4.6
12,16,2000,49.1,-4.8
12,17,2000,50.9,-2.8
12,18,2000,49.2,-4.3
12,19,2000,55.8,2.4
12,20,2000,47.1,-6.1
12,21,2000,50.4,-2.7
12,22,2000,53.3,0.3
12,23,2000,54.8,2
12,24,2000,55,2.3
12,25,2000,50.8,-1.8
12,26,2000,44.7,-7.8
12,27,2000,48.8,-3.5
12,28,2000,46.7,-5.5
12,29,2000,50.1,-2
12,30,2000,52.8,0.8
12,31,2000,50.4,-1.5
1,1,2001,52.3,0.5
1,2,2001,50.5,-1.3
1,3,2001,57.1,5.4
1,4,2001,59.2,7.6
1,5,2001,59.4,7.8
1,6,2001,58.9,7.4
1,7,2001,52.6,1.2
1,8,2001,58.3,6.9
1,9,2001,53.6,2.3
1,10,2001,47.6,-3.7
1,11,2001,50.2,-1.1
1,12,2001,48,-3.2
1,13,2001,41.5,-9.7
1,14,2001,42.5,-8.7
1,15,2001,44.6,-6.6
1,16,2001,44.3,-6.9
1,17,2001,39.9,-11.3
1,18,2001,39.6,-11.6
1,19,2001,41.4,-9.8
1,20,2001,46.9,-4.3
1,21,2001,48.6,-2.6
1,22,2001,56.5,5.2
1,23,2001,53.5,2.2
1,24,2001,52.3,1
1,25,2001,52.3,0.9
1,26,2001,47.5,-3.9
1,27,2001,49.2,-2.3
1,28,2001,40.3,-11.2
1,29,2001,42.7,-8.9
1,30,2001,42.7,-9
1,31,2001,42,-9.8
2,1,2001,45.6,-6.2
2,2,2001,51.1,-0.8
2,3,2001,50.3,-1.7
2,4,2001,56,3.9
2,5,2001,60.5,8.3
2,6,2001,62.1,9.8
2,7,2001,63.4,11
2,8,2001,47.5,-5.1
2,9,2001,39,-13.7
2,10,2001,48.2,-4.6
2,11,2001,51.6,-1.4
2,12,2001,50.8,-2.3
2,13,2001,57.1,3.9
2,14,2001,49.7,-3.7
2,15,2001,43,-10.5
2,16,2001,44.6,-9.1
2,17,2001,51.9,-2
2,18,2001,60.8,6.8
2,19,2001,56.9,2.7
2,20,2001,57.4,3
2,21,2001,56.5,1.9
2,22,2001,56.5,1.7
2,23,2001,55.5,0.6
2,24,2001,47.4,-7.7
2,25,2001,51,-4.3
2,26,2001,52.2,-3.3
2,27,2001,50,-5.8
2,28,2001,49.6,-6.4
3,1,2001,48.1,-8.1
3,2,2001,50,-6.4
3,3,2001,50.1,-6.5
3,4,2001,53.1,-3.8
3,5,2001,58.2,1.1
3,6,2001,64,6.7
3,7,2001,54.4,-3.2
3,8,2001,50.4,-7.4
3,9,2001,55.7,-2.4
3,10,2001,57,-1.3
3,11,2001,50.2,-8.4
3,12,2001,51.1,-7.7
3,13,2001,54.6,-4.5
3,14,2001,55.8,-3.6
3,15,2001,57.5,-2.1
3,16,2001,56.7,-3.2
3,17,2001,59,-1.2
3,18,2001,56.6,-3.8
3,19,2001,62.5,1.8
3,20,2001,68.8,7.8
3,21,2001,69.8,8.5
3,22,2001,70.6,9
3,23,2001,67.6,5.7
3,24,2001,66.2,4
3,25,2001,68.3,5.8
3,26,2001,68.9,6.2
3,27,2001,66.2,3.2
3,28,2001,65.6,2.3
3,29,2001,68.4,4.7
3,30,2001,69.7,5.7
3,31,2001,67.5,3.2
4,1,2001,69.7,5.1
4,2,2001,71.6,6.7
4,3,2001,68.8,3.6
4,4,2001,67.3,1.8
4,5,2001,65.8,0
4,6,2001,51.8,-14.3
4,7,2001,56.2,-10.3
4,8,2001,59.3,-7.5
4,9,2001,59.8,-7.3
4,10,2001,52.7,-14.7
4,11,2001,49.1,-18.6
4,12,2001,55,-13
4,13,2001,60,-8.4
4,14,2001,65.1,-3.6
4,15,2001,68.4,-0.6
4,16,2001,70.7,1.4
4,17,2001,74.8,5.1
4,18,2001,78,8
4,19,2001,78.9,8.6
4,20,2001,75.4,4.8
4,21,2001,67.1,-3.8
4,22,2001,52.7,-18.6
4,23,2001,60.4,-11.2
4,24,2001,71.9,0
4,25,2001,80.8,8.6
4,26,2001,79.5,7
4,27,2001,78.2,5.3
4,28,2001,77.2,4
4,29,2001,75.3,1.8
4,30,2001,75.6,1.8
5,1,2001,77.1,3
5,2,2001,78.2,3.8
5,3,2001,72.6,-2.2
5,4,2001,67.5,-7.6
5,5,2001,67.3,-8.1
5,6,2001,70.9,-4.8
5,7,2001,76,0
5,8,2001,82.2,5.9
5,9,2001,84.6,8
5,10,2001,83.6,6.7
5,11,2001,84.3,7.1
5,12,2001,84.6,7.1
5,13,2001,77.5,-0.3
5,14,2001,78.6,0.5
5,15,2001,79.3,0.9
5,16,2001,80.4,1.7
5,17,2001,82,3.1
5,18,2001,84.2,5
5,19,2001,77.4,-2.1
5,20,2001,77.6,-2.2
5,21,2001,80.3,0.3
5,22,2001,84.1,3.8
5,23,2001,86.2,5.6
5,24,2001,87.4,6.6
5,25,2001,87.9,6.8
5,26,2001,87.6,6.2
5,27,2001,88.7,7.1
5,28,2001,83.4,1.5
5,29,2001,78.8,-3.3
5,30,2001,81.9,-0.5
5,31,2001,88.2,5.6
6,1,2001,87.7,4.9
6,2,2001,91.4,8.3
6,3,2001,86.6,3.3
6,4,2001,82.6,-0.9
6,5,2001,79.9,-3.8
6,6,2001,82.9,-1.1
6,7,2001,85.7,1.5
6,8,2001,90.3,5.9
6,9,2001,88.8,4.2
6,10,2001,89.2,4.4
6,11,2001,89.3,4.3
6,12,2001,89.1,3.9
6,13,2001,84.3,-1
6,14,2001,81.5,-4
6,15,2001,81.5,-4.2
6,16,2001,86.4,0.5
6,17,2001,90,4
6,18,2001,91.5,5.3
6,19,2001,91.2,4.8
6,20,2001,90.9,4.4
6,21,2001,82.4,-4.3
6,22,2001,85.2,-1.6
6,23,2001,86.3,-0.6
6,24,2001,88.7,1.6
6,25,2001,84.5,-2.7
6,26,2001,78.1,-9.2
6,27,2001,85,-2.4
6,28,2001,89,1.4
6,29,2001,90.9,3.2
6,30,2001,93.1,5.3
7,1,2001,93.1,5.2
7,2,2001,92.6,4.7
7,3,2001,93.6,5.6
7,4,2001,87.7,-0.4
7,5,2001,83.2,-5
7,6,2001,75.9,-12.4
7,7,2001,81.7,-6.6
7,8,2001,81.7,-6.7
7,9,2001,82.6,-5.8
7,10,2001,82.7,-5.8
7,11,2001,82.9,-5.6
7,12,2001,83.3,-5.3
7,13,2001,88,-0.6
7,14,2001,90.2,1.6
7,15,2001,86.6,-2
7,16,2001,88.7,0
7,17,2001,81.8,-6.9
7,18,2001,83.9,-4.8
7,19,2001,81.7,-7
7,20,2001,82.5,-6.2
7,21,2001,83.7,-4.9
7,22,2001,85.4,-3.2
7,23,2001,86.7,-1.9
7,24,2001,87.9,-0.7
7,25,2001,85.2,-3.3
7,26,2001,81.8,-6.7
7,27,2001,85.4,-3.1
7,28,2001,88.7,0.3
7,29,2001,82.8,-5.5
7,30,2001,81.5,-6.8
7,31,2001,82.8,-5.4
8,1,2001,82.4,-5.7
8,2,2001,85.1,-3
8,3,2001,88.9,0.9
8,4,2001,86.6,-1.3
8,5,2001,81.3,-6.5
8,6,2001,86.8,-0.9
8,7,2001,90,2.4
8,8,2001,86.3,-1.2
8,9,2001,77.4,-10
8,10,2001,81.3,-5.9
8,11,2001,86,-1.1
8,12,2001,81.1,-5.9
8,13,2001,81.3,-5.5
8,14,2001,79.5,-7.2
8,15,2001,82.8,-3.8
8,16,2001,86,-0.4
8,17,2001,83.5,-2.8
8,18,2001,79.5,-6.6
8,19,2001,84.3,-1.6
8,20,2001,84.4,-1.4
8,21,2001,86.7,1.1
8,22,2001,86.5,1.1
8,23,2001,84.8,-0.4
8,24,2001,87,2
8,25,2001,88.8,4
8,26,2001,92.2,7.6
8,27,2001,91,6.6
8,28,2001,91.1,6.9
8,29,2001,88.2,4.2
8,30,2001,81,-2.8
8,31,2001,82.3,-1.3
9,1,2001,83.2,-0.2
9,2,2001,83.8,0.7
9,3,2001,87.5,4.6
9,4,2001,88.8,6.1
9,5,2001,88.9,6.5
9,6,2001,88.1,5.9
9,7,2001,85.3,3.4
9,8,2001,82.5,0.8
9,9,2001,81.9,0.5
9,10,2001,88.6,7.4
9,11,2001,89.6,8.7
9,12,2001,91.7,11
9,13,2001,82.7,2.3
9,14,2001,77.6,-2.5
9,15,2001,81.8,1.9
9,16,2001,82.5,2.9
9,17,2001,83.4,4.1
9,18,2001,82.8,3.8
9,19,2001,83.6,4.9
9,20,2001,84.4,5.9
9,21,2001,84.4,6.2
9,22,2001,84.1,6.2
9,23,2001,85.1,7.5
9,24,2001,86.5,9.2
9,25,2001,84.2,7.2
9,26,2001,84.1,7.4
9,27,2001,85,8.6
9,28,2001,85.3,9.2
9,29,2001,84.2,8.4
9,30,2001,81.3,5.8
10,1,2001,72.2,-3
10,2,2001,74.1,-0.8
10,3,2001,76.1,1.6
10,4,2001,72.2,-2
10,5,2001,74.2,0.3
10,6,2001,74.3,0.7
10,7,2001,71.2,-2.1
10,8,2001,67.7,-5.3
10,9,2001,67.2,-5.4
10,10,2001,69.4,-2.9
10,11,2001,68.5,-3.5
10,12,2001,68.4,-3.3
10,13,2001,65.3,-6.1
10,14,2001,68.6,-2.4
10,15,2001,71.3,0.6
10,16,2001,75.4,5
10,17,2001,78.7,8.6
10,18,2001,73.9,4.1
10,19,2001,71.8,2.4
10,20,2001,73.7,4.6
10,21,2001,73.9,5.1
10,22,2001,74.4,5.9
10,23,2001,72.1,4
10,24,2001,70.6,2.8
10,25,2001,71.4,3.9
10,26,2001,73.3,6.1
10,27,2001,81.1,14.2
10,28,2001,77.3,10.8
10,29,2001,72.7,6.5
10,30,2001,75,9.1
10,31,2001,78.2,12.6
11,1,2001,71.2,5.9
11,2,2001,69.3,4.3
11,3,2001,71.6,6.9
11,4,2001,80.2,15.8
11,5,2001,75.6,11.5
11,6,2001,67.7,4
11,7,2001,69.2,5.8
11,8,2001,67.1,4
11,9,2001,71.6,8.8
11,10,2001,73.2,10.7
11,11,2001,68,5.8
11,12,2001,66.6,4.6
11,13,2001,66.1,4.4
11,14,2001,57.4,-4
11,15,2001,57.3,-3.8
11,16,2001,61.8,1
11,17,2001,64,3.5
11,18,2001,63.9,3.6
11,19,2001,64.8,4.8
11,20,2001,66.6,6.9
11,21,2001,61.6,2.2
11,22,2001,61.4,2.2
11,23,2001,63.9,5
11,24,2001,52.2,-6.4
11,25,2001,60.7,2.3
11,26,2001,52.1,-6
11,27,2001,44.3,-13.6
11,28,2001,42.2,-15.4
11,29,2001,48.2,-9.2
11,30,2001,53.1,-4.1
12,1,2001,52.5,-4.4
12,2,2001,54.7,-2
12,3,2001,57.3,0.8
12,4,2001,51.2,-5.1
12,5,2001,42.1,-13.9
12,6,2001,45.3,-10.5
12,7,2001,47.8,-7.8
12,8,2001,51.8,-3.6
12,9,2001,55.3,0.1
12,10,2001,57.5,2.5
12,11,2001,45.9,-8.9
12,12,2001,39.8,-14.8
12,13,2001,37.2,-17.2
12,14,2001,42,-12.3
12,15,2001,47.4,-6.7
12,16,2001,38.4,-15.5
12,17,2001,41.9,-11.9
12,18,2001,45.3,-8.3
12,19,2001,50.1,-3.3
12,20,2001,58.4,5.1
12,21,2001,58.1,5
12,22,2001,45,-8
12,23,2001,43.9,-9
12,24,2001,44,-8.7
12,25,2001,43.2,-9.4
12,26,2001,44.1,-8.4
12,27,2001,45.5,-6.9
12,28,2001,48.3,-4
12,29,2001,55.5,3.3
12,30,2001,55.1,3
12,31,2001,52.8,0.8
1,1,2002,51.4,-0.5
1,2,2002,50.5,-1.3
1,3,2002,55,3.3
1,4,2002,50,-1.6
1,5,2002,46.1,-5.5
1,6,2002,46.2,-5.3
1,7,2002,50.5,-0.9
1,8,2002,57.1,5.7
1,9,2002,60,8.6
1,10,2002,60.3,9
1,11,2002,56.2,4.9
1,12,2002,54.4,3.2
1,13,2002,54.5,3.3
1,14,2002,55.1,3.9
1,15,2002,56.2,5
1,16,2002,61.8,10.6
1,17,2002,55.2,4
1,18,2002,48.5,-2.7
1,19,2002,43.7,-7.5
1,20,2002,45.8,-5.4
1,21,2002,44.5,-6.7
1,22,2002,50.2,-1.1
1,23,2002,48.6,-2.7
1,24,2002,38.6,-12.7
1,25,2002,49.1,-2.3
1,26,2002,55,3.6
1,27,2002,57.6,6.1
1,28,2002,57.6,6.1
1,29,2002,51.6,0
1,30,2002,42.4,-9.3
1,31,2002,35.3,-16.4
2,1,2002,43.9,-7.9
2,2,2002,54.5,2.6
2,3,2002,51.3,-0.7
2,4,2002,55.9,3.8
2,5,2002,52.4,0.2
2,6,2002,49.5,-2.8
2,7,2002,50.8,-1.6
2,8,2002,54.6,2.1
2,9,2002,51.9,-0.8
2,10,2002,53.6,0.8
2,11,2002,54.6,1.7
2,12,2002,55.5,2.4
2,13,2002,56.5,3.3
2,14,2002,59.9,6.6
2,15,2002,58.6,5.1
2,16,2002,62.8,9.1
2,17,2002,65.2,11.4
2,18,2002,58.2,4.2
2,19,2002,48.2,-6
2,20,2002,51.7,-2.6
2,21,2002,56.1,1.6
2,22,2002,64.4,9.7
2,23,2002,65.7,10.8
2,24,2002,64.2,9.1
2,25,2002,59.8,4.5
2,26,2002,59.8,4.3
2,27,2002,59.6,3.9
2,28,2002,63.5,7.6
3,1,2002,62.1,6
3,2,2002,48.4,-7.9
3,3,2002,42.5,-14.1
3,4,2002,53.3,-3.5
3,5,2002,54.9,-2.1
3,6,2002,57.3,0
3,7,2002,62.3,4.8
3,8,2002,61.6,3.8
3,9,2002,57.6,-0.4
3,10,2002,64.9,6.7
3,11,2002,62.9,4.4
3,12,2002,63.2,4.4
3,13,2002,67.2,8.2
3,14,2002,63.4,4.1
3,15,2002,52.3,-7.3
3,16,2002,49.9,-9.9
3,17,2002,51.7,-8.4
3,18,2002,51.7,-8.7
3,19,2002,46.9,-13.7
3,20,2002,58.8,-2.1
3,21,2002,71.6,10.4
3,22,2002,72,10.5
3,23,2002,69.1,7.3
3,24,2002,61.6,-0.5
3,25,2002,56.1,-6.3
3,26,2002,65.5,2.8
3,27,2002,65.9,2.9
3,28,2002,68.8,5.5
3,29,2002,62.2,-1.4
3,30,2002,62,-1.9
3,31,2002,67.8,3.6
4,1,2002,72.2,7.7
4,2,2002,74.1,9.3
4,3,2002,73,7.9
4,4,2002,73.3,7.9
4,5,2002,74.2,8.5
4,6,2002,72.8,6.7
4,7,2002,61.5,-4.9
4,8,2002,66.1,-0.6
4,9,2002,70.4,3.4
4,10,2002,73.5,6.2
4,11,2002,78,10.4
4,12,2002,77.2,9.2
4,13,2002,77.7,9.4
4,14,2002,78.8,10.2
4,15,2002,79.1,10.2
4,16,2002,69,-0.2
4,17,2002,70.1,0.5
4,18,2002,70.8,0.9
4,19,2002,70.4,0.2
4,20,2002,64.2,-6.3
4,21,2002,63,-7.9
4,22,2002,69.2,-2
4,23,2002,74.8,3.3
4,24,2002,75.6,3.8
4,25,2002,80.1,8
4,26,2002,75.7,3.2
4,27,2002,64.3,-8.5
4,28,2002,70,-3.1
4,29,2002,75.7,2.3
4,30,2002,77.5,3.8
5,1,2002,73.2,-0.8
5,2,2002,68.8,-5.6
5,3,2002,66.4,-8.3
5,4,2002,70.9,-4.1
5,5,2002,74.3,-1
5,6,2002,73.4,-2.2
5,7,2002,74.7,-1.2
5,8,2002,73.8,-2.4
5,9,2002,75.4,-1.1
5,10,2002,78.6,1.8
5,11,2002,79.7,2.6
5,12,2002,73.7,-3.7
5,13,2002,80,2.3
5,14,2002,85.4,7.4
5,15,2002,82.4,4.1
5,16,2002,78.8,0.2
5,17,2002,79.9,1
5,18,2002,81.8,2.7
5,19,2002,81.4,2
5,20,2002,80.2,0.5
5,21,2002,73.3,-6.7
5,22,2002,67.1,-13.1
5,23,2002,72.8,-7.7
5,24,2002,75.4,-5.4
5,25,2002,77.8,-3.2
5,26,2002,79.3,-2
5,27,2002,77.4,-4.2
5,28,2002,79.7,-2.1
5,29,2002,84.3,2.3
5,30,2002,86.7,4.4
5,31,2002,87.8,5.3
6,1,2002,88.6,5.8
6,2,2002,86.8,3.8
6,3,2002,84.3,1.1
6,4,2002,78.3,-5.2
6,5,2002,82.2,-1.5
6,6,2002,89,5.1
6,7,2002,91.1,7
6,8,2002,90.4,6.1
6,9,2002,89.2,4.7
6,10,2002,85.4,0.7
6,11,2002,83.3,-1.6
6,12,2002,87.5,2.4
6,13,2002,89.1,3.8
6,14,2002,90.5,5
6,15,2002,92.4,6.7
6,16,2002,92,6.2
6,17,2002,92.5,6.5
6,18,2002,92,5.8
6,19,2002,92,5.7
6,20,2002,92,5.5
6,21,2002,92,5.4
6,22,2002,90.5,3.7
6,23,2002,89.7,2.8
6,24,2002,91.5,4.5
6,25,2002,92.8,5.6
6,26,2002,96.3,9
6,27,2002,94.5,7.1
6,28,2002,92.1,4.6
6,29,2002,90.2,2.6
6,30,2002,92.1,4.4
7,1,2002,94.2,6.4
7,2,2002,93.3,5.4
7,3,2002,93.6,5.6
7,4,2002,91.7,3.6
7,5,2002,87.9,-0.3
7,6,2002,89.8,1.6
7,7,2002,90.2,1.9
7,8,2002,93.5,5.1
7,9,2002,91.4,3
7,10,2002,89.4,0.9
7,11,2002,89.6,1.1
7,12,2002,85.3,-3.3
7,13,2002,91.6,3
7,14,2002,91.7,3.1
7,15,2002,80.3,-8.3
7,16,2002,84.4,-4.3
7,17,2002,84.8,-3.9
7,18,2002,83.9,-4.8
7,19,2002,85.9,-2.8
7,20,2002,85.3,-3.4
7,21,2002,88.6,0
7,22,2002,87.7,-0.9
7,23,2002,79.7,-8.9
7,24,2002,85.5,-3.1
7,25,2002,85.9,-2.6
7,26,2002,87.5,-1
7,27,2002,76.7,-11.8
7,28,2002,81.3,-7.1
7,29,2002,84.5,-3.9
7,30,2002,84.4,-3.9
7,31,2002,82.7,-5.5
8,1,2002,85.2,-3
8,2,2002,86.3,-1.8
8,3,2002,81.5,-6.5
8,4,2002,76.1,-11.8
8,5,2002,78.8,-9
8,6,2002,74.7,-13
8,7,2002,81.4,-6.2
8,8,2002,87.6,0.1
8,9,2002,89.4,2
8,10,2002,85.5,-1.8
8,11,2002,89.4,2.3
8,12,2002,89,2
8,13,2002,90.4,3.5
8,14,2002,91.6,4.9
8,15,2002,90.4,3.8
8,16,2002,90.9,4.5
8,17,2002,86.1,-0.2
8,18,2002,89.5,3.4
8,19,2002,81.3,-4.7
8,20,2002,83.1,-2.7
8,21,2002,86.1,0.5
8,22,2002,85.7,0.3
8,23,2002,84.8,-0.5
8,24,2002,84.2,-0.9
8,25,2002,96.4,11.5
8,26,2002,88.5,3.8
8,27,2002,89.5,5
8,28,2002,91.5,7.2
8,29,2002,81.3,-2.8
8,30,2002,82.6,-1.3
8,31,2002,88.4,4.8
9,1,2002,88.3,4.9
9,2,2002,89.3,6.1
9,3,2002,89.4,6.4
9,4,2002,89.9,7.2
9,5,2002,90.6,8.1
9,6,2002,87.4,5.1
9,7,2002,75.6,-6.4
9,8,2002,75.7,-6.1
9,9,2002,73.1,-8.4
9,10,2002,76.9,-4.4
9,11,2002,75.6,-5.4
9,12,2002,79.2,-1.5
9,13,2002,79.5,-1
9,14,2002,79.6,-0.6
9,15,2002,84.3,4.4
9,16,2002,85.2,5.5
9,17,2002,85.9,6.5
9,18,2002,83.2,4.1
9,19,2002,79,0.2
9,20,2002,79.4,0.9
9,21,2002,79.8,1.6
9,22,2002,81.5,3.5
9,23,2002,87.6,9.9
9,24,2002,87.9,10.5
9,25,2002,86.2,9.1
9,26,2002,83.7,6.9
9,27,2002,83.8,7.3
9,28,2002,78.9,2.7
9,29,2002,78.1,2.2
9,30,2002,75.8,0.2
10,1,2002,77.5,2.3
10,2,2002,72.3,-2.6
10,3,2002,64.8,-9.8
10,4,2002,64.5,-9.8
10,5,2002,69.6,-4.4
10,6,2002,84.8,11.1
10,7,2002,74.2,0.8
10,8,2002,72.9,-0.1
10,9,2002,75.9,3.2
10,10,2002,77.2,4.8
10,11,2002,77.2,5.1
10,12,2002,74.5,2.7
10,13,2002,76.5,5.1
10,14,2002,75.7,4.6
10,15,2002,77.4,6.6
10,16,2002,75.1,4.6
10,17,2002,74.6,4.4
10,18,2002,63.8,-6
10,19,2002,64.3,-5.2
10,20,2002,67.5,-1.7
10,21,2002,71.3,2.4
10,22,2002,69.8,1.3
10,23,2002,69.8,1.6
10,24,2002,65.9,-2
10,25,2002,64.9,-2.7
10,26,2002,72.6,5.3
10,27,2002,60.1,-6.8
10,28,2002,56.8,-9.8
10,29,2002,58.9,-7.4
10,30,2002,61.8,-4.2
10,31,2002,63.6,-2.1
11,1,2002,65.3,-0.1
11,2,2002,64.6,-0.5
11,3,2002,61.3,-3.4
11,4,2002,59.5,-4.9
11,5,2002,56.1,-8
11,6,2002,61,-2.8
11,7,2002,66.2,2.7
11,8,2002,66,2.8
11,9,2002,66.2,3.3
11,10,2002,66.4,3.8
11,11,2002,58.5,-3.8
11,12,2002,60.4,-1.6
11,13,2002,66.2,4.5
11,14,2002,60.1,-1.4
11,15,2002,56.1,-5.1
11,16,2002,58.6,-2.3
11,17,2002,62.2,1.6
11,18,2002,57.9,-2.4
11,19,2002,56.3,-3.7
11,20,2002,60.1,0.3
11,21,2002,69.2,9.7
11,22,2002,66.1,6.9
11,23,2002,64.7,5.7
11,24,2002,58.9,0.2
11,25,2002,56,-2.5
11,26,2002,54.8,-3.4
11,27,2002,58,0
11,28,2002,57.6,-0.1
11,29,2002,58.8,1.3
11,30,2002,61.1,3.9
12,1,2002,56.4,-0.6
12,2,2002,54.6,-2.2
12,3,2002,54.1,-2.4
12,4,2002,49.3,-7
12,5,2002,52.8,-3.3
12,6,2002,58.4,2.5
12,7,2002,56.1,0.4
12,8,2002,51.3,-4.2
12,9,2002,52.7,-2.6
12,10,2002,54.7,-0.4
12,11,2002,49.2,-5.7
12,12,2002,49.4,-5.3
12,13,2002,49.7,-4.8
12,14,2002,53.3,-1
12,15,2002,55.4,1.3
12,16,2002,53.6,-0.4
12,17,2002,61.3,7.5
12,18,2002,48.4,-5.2
12,19,2002,42.7,-10.8
12,20,2002,46.8,-6.5
12,21,2002,45.2,-8
12,22,2002,45.4,-7.6
12,23,2002,42.7,-10.2
12,24,2002,41.3,-11.5
12,25,2002,41.5,-11.1
12,26,2002,40.6,-11.9
12,27,2002,44.2,-8.2
12,28,2002,56.5,4.2
12,29,2002,54.8,2.6
12,30,2002,48.9,-3.2
12,31,2002,46.1,-5.9
1,1,2003,48.5,-3.4
1,2,2003,50.8,-1
1,3,2003,58.2,6.5
1,4,2003,57.3,5.6
1,5,2003,56.4,4.8
1,6,2003,59.9,8.4
1,7,2003,62.3,10.8
1,8,2003,63.8,12.4
1,9,2003,54.9,3.5
1,10,2003,53.4,2.1
1,11,2003,53.6,2.3
1,12,2003,51.5,0.2
1,13,2003,52.9,1.7
1,14,2003,58.9,7.7
1,15,2003,65.5,14.3
1,16,2003,56.5,5.3
1,17,2003,68.2,17
1,18,2003,57.9,6.7
1,19,2003,58.9,7.7
1,20,2003,59.5,8.3
1,21,2003,53.7,2.5
1,22,2003,53.3,2
1,23,2003,56,4.7
1,24,2003,61.2,9.9
1,25,2003,59.4,8
1,26,2003,58.2,6.8
1,27,2003,64.4,12.9
1,28,2003,60.5,9
1,29,2003,58.9,7.3
1,30,2003,57.8,6.2
1,31,2003,60.3,8.6
2,1,2003,66.2,14.4
2,2,2003,65.7,13.8
2,3,2003,50.5,-1.5
2,4,2003,48.4,-3.7
2,5,2003,54.5,2.3
2,6,2003,47.5,-4.8
2,7,2003,47.9,-4.5
2,8,2003,52.7,0.2
2,9,2003,47.4,-5.2
2,10,2003,47.2,-5.6
2,11,2003,57.9,5
2,12,2003,59.6,6.6
2,13,2003,60.8,7.6
2,14,2003,54.6,1.3
2,15,2003,51,-2.5
2,16,2003,53.9,0.3
2,17,2003,58.4,4.6
2,18,2003,63,9.1
2,19,2003,56.3,2.2
2,20,2003,56.6,2.3
2,21,2003,49,-5.5
2,22,2003,54.7,0
2,23,2003,56.4,1.6
2,24,2003,61.1,6.1
2,25,2003,58.9,3.7
2,26,2003,53.8,-1.6
2,27,2003,50.7,-4.9
2,28,2003,48.5,-7.4
3,1,2003,48,-8.1
3,2,2003,48.6,-7.7
3,3,2003,45.1,-11.4
3,4,2003,53.1,-3.6
3,5,2003,51.8,-5.2
3,6,2003,50.5,-6.7
3,7,2003,56.6,-0.8
3,8,2003,60.1,2.4
3,9,2003,61.8,3.9
3,10,2003,65.3,7.1
3,11,2003,64.8,6.4
3,12,2003,65.7,7
3,13,2003,68.1,9.1
3,14,2003,69.4,10.2
3,15,2003,65.3,5.8
3,16,2003,63.9,4.1
3,17,2003,53.1,-6.9
3,18,2003,50.3,-10
3,19,2003,56.2,-4.4
3,20,2003,55.6,-5.3
3,21,2003,59.5,-1.6
3,22,2003,60,-1.4
3,23,2003,64,2.3
3,24,2003,67.5,5.5
3,25,2003,66.1,3.8
3,26,2003,66.8,4.2
3,27,2003,69,6.1
3,28,2003,60.8,-2.4
3,29,2003,58.4,-5.1
3,30,2003,62.7,-1.1
3,31,2003,69.3,5.2
4,1,2003,72.5,8.1
4,2,2003,73,8.3
4,3,2003,62.6,-2.4
4,4,2003,57.3,-8
4,5,2003,59.1,-6.6
4,6,2003,54.9,-11.1
4,7,2003,57.6,-8.7
4,8,2003,65.3,-1.3
4,9,2003,74.7,7.8
4,10,2003,72.9,5.7
4,11,2003,69.2,1.6
4,12,2003,71.3,3.4
4,13,2003,73,4.8
4,14,2003,73.3,4.8
4,15,2003,63.7,-5.1
4,16,2003,59.8,-9.4
4,17,2003,68,-1.5
4,18,2003,59.6,-10.2
4,19,2003,59.2,-10.9
4,20,2003,65.9,-4.6
4,21,2003,70.8,0
4,22,2003,67,-4.1
4,23,2003,58.5,-12.9
4,24,2003,63.1,-8.6
4,25,2003,68.1,-4
4,26,2003,72.2,-0.2
4,27,2003,75.3,2.6
4,28,2003,75.2,2.2
4,29,2003,74.2,0.9
4,30,2003,69.5,-4.2
5,1,2003,70.3,-3.7
5,2,2003,72.2,-2.1
5,3,2003,81.5,6.9
5,4,2003,69.7,-5.2
5,5,2003,66.5,-8.7
5,6,2003,69.9,-5.6
5,7,2003,73.2,-2.6
5,8,2003,68.1,-8
5,9,2003,70,-6.4
5,10,2003,68.2,-8.5
5,11,2003,69.4,-7.6
5,12,2003,76.1,-1.2
5,13,2003,80,2.4
5,14,2003,82.2,4.3
5,15,2003,81.9,3.7
5,16,2003,78.2,-0.3
5,17,2003,84.6,5.8
5,18,2003,83.4,4.3
5,19,2003,81,1.6
5,20,2003,82.6,3
5,21,2003,88.6,8.7
5,22,2003,87.7,7.5
5,23,2003,85.3,4.9
5,24,2003,88,7.3
5,25,2003,80.9,-0.1
5,26,2003,81.3,0.1
5,27,2003,86.2,4.7
5,28,2003,92.2,10.5
5,29,2003,90.6,8.6
5,30,2003,80.1,-2.1
5,31,2003,80.3,-2.2
6,1,2003,86.6,3.9
6,2,2003,89.4,6.5
6,3,2003,87.8,4.6
6,4,2003,95.7,12.3
6,5,2003,87.6,4
6,6,2003,86.7,2.9
6,7,2003,85.9,1.8
6,8,2003,95.9,11.6
6,9,2003,88.2,3.7
6,10,2003,83,-1.7
6,11,2003,81.4,-3.5
6,12,2003,82.7,-2.4
6,13,2003,81.9,-3.3
6,14,2003,84,-1.4
6,15,2003,95.8,10.2
6,16,2003,90.1,4.3
6,17,2003,90.8,4.8
6,18,2003,89.9,3.8
6,19,2003,88.3,2
6,20,2003,85.4,-1
6,21,2003,83.6,-3
6,22,2003,84.5,-2.2
6,23,2003,86.7,-0.2
6,24,2003,85.8,-1.2
6,25,2003,85.5,-1.6
6,26,2003,87.8,0.5
6,27,2003,91.7,4.3
6,28,2003,93.6,6.1
6,29,2003,92.7,5.1
6,30,2003,92.6,4.9
7,1,2003,92.3,4.5
7,2,2003,91.4,3.5
7,3,2003,90.9,2.9
7,4,2003,93.3,5.2
7,5,2003,91.5,3.3
7,6,2003,91.8,3.6
7,7,2003,93.1,4.8
7,8,2003,93.1,4.7
7,9,2003,93.2,4.8
7,10,2003,93.2,4.7
7,11,2003,94,5.5
7,12,2003,90.3,1.8
7,13,2003,89.4,0.8
7,14,2003,91.3,2.7
7,15,2003,95.7,7.1
7,16,2003,92,3.4
7,17,2003,94.3,5.6
7,18,2003,80.2,-8.5
7,19,2003,86.9,-1.8
7,20,2003,92.1,3.4
7,21,2003,88,-0.7
7,22,2003,90.7,2.1
7,23,2003,87.8,-0.8
7,24,2003,88.8,0.2
7,25,2003,89.1,0.5
7,26,2003,84,-4.5
7,27,2003,84.7,-3.8
7,28,2003,84.6,-3.8
7,29,2003,77.9,-10.5
7,30,2003,76.4,-11.9
7,31,2003,83.2,-5
8,1,2003,78,-10.2
8,2,2003,78.6,-9.5
8,3,2003,85.8,-2.2
8,4,2003,88.9,1
8,5,2003,90.2,2.4
8,6,2003,90.9,3.2
8,7,2003,93,5.4
8,8,2003,91.3,3.8
8,9,2003,90,2.6
8,10,2003,95,7.7
8,11,2003,93.8,6.6
8,12,2003,92.7,5.7
8,13,2003,91.8,4.9
8,14,2003,82.2,-4.6
8,15,2003,75.2,-11.4
8,16,2003,78.6,-7.9
8,17,2003,85.6,-0.7
8,18,2003,88.5,2.3
8,19,2003,80.4,-5.6
8,20,2003,81.1,-4.7
8,21,2003,84.1,-1.6
8,22,2003,90.2,4.7
8,23,2003,82.7,-2.6
8,24,2003,82.2,-2.9
8,25,2003,84.7,-0.2
8,26,2003,80.7,-4
8,27,2003,75.4,-9.1
8,28,2003,76.9,-7.4
8,29,2003,83.8,-0.3
8,30,2003,85.1,1.2
8,31,2003,88.7,5
9,1,2003,88.8,5.3
9,2,2003,89.8,6.5
9,3,2003,88.8,5.8
9,4,2003,87.3,4.5
9,5,2003,86.8,4.2
9,6,2003,84.9,2.6
9,7,2003,82.7,0.6
9,8,2003,83.8,2
9,9,2003,82.3,0.7
9,10,2003,79,-2.3
9,11,2003,79.8,-1.3
9,12,2003,80.1,-0.7
9,13,2003,81.2,0.7
9,14,2003,82.7,2.4
9,15,2003,84.2,4.2
9,16,2003,84.3,4.6
9,17,2003,84.6,5.2
9,18,2003,83.5,4.3
9,19,2003,83.5,4.6
9,20,2003,84.2,5.6
9,21,2003,83.5,5.2
9,22,2003,86.2,8.2
9,23,2003,83.2,5.5
9,24,2003,69.8,-7.6
9,25,2003,72.4,-4.7
9,26,2003,76.8,0
9,27,2003,78.8,2.3
9,28,2003,81,4.8
9,29,2003,83,7.1
9,30,2003,83.5,7.9
10,1,2003,84.6,9.3
10,2,2003,82.6,7.6
10,3,2003,81,6.3
10,4,2003,77.4,3
10,5,2003,76.9,2.8
10,6,2003,76.8,3
10,7,2003,71.7,-1.7
10,8,2003,71.1,-2
10,9,2003,75.2,2.4
10,10,2003,70.9,-1.6
10,11,2003,71.8,-0.4
10,12,2003,77.3,5.4
10,13,2003,76.5,5
10,14,2003,76.5,5.3
10,15,2003,76.5,5.6
10,16,2003,76.2,5.6
10,17,2003,77.3,7.1
10,18,2003,78.8,8.9
10,19,2003,78.2,8.6
10,20,2003,78.9,9.6
10,21,2003,78.8,9.8
10,22,2003,78.5,9.9
10,23,2003,76.5,8.2
10,24,2003,74.6,6.6
10,25,2003,72.4,4.7
10,26,2003,68.6,1.2
10,27,2003,64,-3
10,28,2003,65.4,-1.3
10,29,2003,67,0.6
10,30,2003,71.4,5.3
10,31,2003,70.7,4.9
11,1,2003,71.9,6.4
11,2,2003,66.4,1.3
11,3,2003,62.8,-2
11,4,2003,52.5,-12
11,5,2003,54.6,-9.6
11,6,2003,57.5,-6.4
11,7,2003,57.8,-5.8
11,8,2003,58.4,-4.9
11,9,2003,61.6,-1.4
11,10,2003,64.5,1.8
11,11,2003,62.1,-0.3
11,12,2003,63.3,1.2
11,13,2003,61.9,0.1
11,14,2003,59.8,-1.7
11,15,2003,57.6,-3.6
11,16,2003,55.5,-5.5
11,17,2003,52.9,-7.8
11,18,2003,53.2,-7.2
11,19,2003,62.4,2.3
11,20,2003,64.4,4.6
11,21,2003,62.1,2.5
11,22,2003,61.2,1.9
11,23,2003,45.8,-13.2
11,24,2003,48.8,-10
11,25,2003,54.6,-3.9
11,26,2003,56.9,-1.4
11,27,2003,53.2,-4.8
11,28,2003,55.8,-2
11,29,2003,58.6,1.1
11,30,2003,57,-0.3
12,1,2003,59,1.9
12,2,2003,57.9,1.1
12,3,2003,54.6,-2
12,4,2003,55.4,-1
12,5,2003,59.7,3.6
12,6,2003,65.9,10
12,7,2003,60.9,5.2
12,8,2003,60.2,4.7
12,9,2003,47.8,-7.5
12,10,2003,49.8,-5.3
12,11,2003,46.6,-8.3
12,12,2003,47.5,-7.2
12,13,2003,42.9,-11.6
12,14,2003,49.5,-4.9
12,15,2003,49.3,-4.9
12,16,2003,45.3,-8.7
12,17,2003,53.6,-0.2
12,18,2003,53.4,-0.3
12,19,2003,62.5,9
12,20,2003,61.7,8.3
12,21,2003,57.1,3.9
12,22,2003,51.9,-1.2
12,23,2003,57.5,4.6
12,24,2003,58.1,5.3
12,25,2003,50.1,-2.6
12,26,2003,52.5,0
12,27,2003,41.2,-11.2
12,28,2003,35.7,-16.6
12,29,2003,37.6,-14.6
12,30,2003,50,-2.1
12,31,2003,49.6,-2.4
1,1,2004,52.8,0.9
1,2,2004,52.5,0.7
1,3,2004,52.5,0.8
1,4,2004,46.9,-4.8
1,5,2004,41.6,-10
1,6,2004,49.2,-2.3
1,7,2004,56.2,4.7
1,8,2004,55,3.6
1,9,2004,56.2,4.8
1,10,2004,61.8,10.5
1,11,2004,63.2,11.9
1,12,2004,60,8.7
1,13,2004,58.9,7.7
1,14,2004,60.4,9.2
1,15,2004,53.7,2.5
1,16,2004,52.9,1.7
1,17,2004,53.4,2.2
1,18,2004,50.3,-0.9
1,19,2004,51.4,0.2
1,20,2004,53.7,2.5
1,21,2004,52,0.8
1,22,2004,50.6,-0.6
1,23,2004,49.6,-1.7
1,24,2004,50.3,-1
1,25,2004,49.1,-2.2
1,26,2004,44.8,-6.6
1,27,2004,44.2,-7.2
1,28,2004,46.2,-5.3
1,29,2004,47.3,-4.3
1,30,2004,49.3,-2.3
1,31,2004,53.9,2.2
2,1,2004,48.7,-3.1
2,2,2004,46.2,-5.7
2,3,2004,50.2,-1.8
2,4,2004,46.8,-5.2
2,5,2004,44.4,-7.7
2,6,2004,43.4,-8.8
2,7,2004,50.1,-2.3
2,8,2004,48.6,-3.9
2,9,2004,45.3,-7.3
2,10,2004,47.3,-5.4
2,11,2004,48.4,-4.4
2,12,2004,47.7,-5.3
2,13,2004,43,-10.1
2,14,2004,44.8,-8.5
2,15,2004,50.6,-2.8
2,16,2004,52.2,-1.4
2,17,2004,58.3,4.6
2,18,2004,65.3,11.4
2,19,2004,59.8,5.7
2,20,2004,54.6,0.4
2,21,2004,57.5,3.1
2,22,2004,54.6,0
2,23,2004,54.2,-0.6
2,24,2004,49,-6
2,25,2004,50.4,-4.8
2,26,2004,54.9,-0.5
2,27,2004,57.2,1.6
2,28,2004,48,-7.8
2,29,2004,46.3,-9.7
3,1,2004,50,-6.2
3,2,2004,56.5,0
3,3,2004,50.3,-6.4
3,4,2004,44.4,-12.5
3,5,2004,50.4,-6.7
3,6,2004,51.5,-5.9
3,7,2004,60.6,3
3,8,2004,72.4,14.5
3,9,2004,72.8,14.7
3,10,2004,72.6,14.2
3,11,2004,71.8,13.2
3,12,2004,64.8,5.9
3,13,2004,63.1,4
3,14,2004,62.5,3.1
3,15,2004,66.8,7.1
3,16,2004,68.7,8.7
3,17,2004,68.8,8.6
3,18,2004,69.2,8.7
3,19,2004,71.8,11
3,20,2004,73.6,12.5
3,21,2004,77,15.6
3,22,2004,74.5,12.9
3,23,2004,75.5,13.6
3,24,2004,73.5,11.3
3,25,2004,71.9,9.4
3,26,2004,72.2,9.4
3,27,2004,65.5,2.4
3,28,2004,66.8,3.4
3,29,2004,73.3,9.6
3,30,2004,77.4,13.4
3,31,2004,74.9,10.6
4,1,2004,66.1,1.5
4,2,2004,55.5,-9.4
4,3,2004,51.1,-14.2
4,4,2004,52.5,-13.1
4,5,2004,53.5,-12.4
4,6,2004,62.5,-3.7
4,7,2004,66.3,-0.2
4,8,2004,66.3,-0.5
4,9,2004,67.7,0.5
4,10,2004,67.4,-0.1
4,11,2004,62.1,-5.7
4,12,2004,60.8,-7.3
4,13,2004,67.4,-1
4,14,2004,71.6,2.8
4,15,2004,71.4,2.3
4,16,2004,71.6,2.2
4,17,2004,74.9,5.2
4,18,2004,62.7,-7.3
4,19,2004,65.1,-5.3
4,20,2004,65.7,-5
4,21,2004,66.9,-4.1
4,22,2004,72.2,0.9
4,23,2004,65.2,-6.5
4,24,2004,66.3,-5.7
4,25,2004,70.3,-2
4,26,2004,76.9,4.3
4,27,2004,80.7,7.8
4,28,2004,75.8,2.5
4,29,2004,77.4,3.8
4,30,2004,69.2,-4.7
5,1,2004,68.1,-6.1
5,2,2004,76.7,2.2
5,3,2004,78.7,3.9
5,4,2004,77.9,2.8
5,5,2004,80.3,4.9
5,6,2004,80.6,4.8
5,7,2004,81.3,5.2
5,8,2004,83.6,7.2
5,9,2004,82.3,5.6
5,10,2004,87.8,10.8
5,11,2004,81.5,4.2
5,12,2004,77.9,0.3
5,13,2004,73,-4.8
5,14,2004,76.3,-1.8
5,15,2004,80,1.6
5,16,2004,82.2,3.5
5,17,2004,82.5,3.5
5,18,2004,81.3,2
5,19,2004,82,2.4
5,20,2004,82,2.2
5,21,2004,80.5,0.4
5,22,2004,75.8,-4.6
5,23,2004,76.3,-4.3
5,24,2004,78.9,-2
5,25,2004,80.9,-0.3
5,26,2004,80.5,-0.9
5,27,2004,74.6,-7.1
5,28,2004,77,-4.9
5,29,2004,78.5,-3.7
5,30,2004,78.4,-4
5,31,2004,81.6,-1
6,1,2004,84.4,1.5
6,2,2004,87.4,4.3
6,3,2004,89.8,6.5
6,4,2004,90.9,7.3
6,5,2004,90.5,6.7
6,6,2004,89.7,5.7
6,7,2004,88.8,4.6
6,8,2004,86.2,1.8
6,9,2004,84.9,0.3
6,10,2004,76.5,-8.3
6,11,2004,78,-7
6,12,2004,81.2,-4
6,13,2004,83.9,-1.5
6,14,2004,95.2,9.6
6,15,2004,86.1,0.4
6,16,2004,85.9,0
6,17,2004,85.5,-0.6
6,18,2004,86.5,0.3
6,19,2004,87.2,0.8
6,20,2004,90.1,3.6
6,21,2004,86.7,0
6,22,2004,85.4,-1.4
6,23,2004,81.9,-5.1
6,24,2004,87.2,0.1
6,25,2004,91.5,4.3
6,26,2004,89.9,2.6
6,27,2004,89.9,2.4
6,28,2004,87.4,-0.2
6,29,2004,86.4,-1.3
6,30,2004,85,-2.8
7,1,2004,83.4,-4.5
7,2,2004,95,7
7,3,2004,87.7,-0.4
7,4,2004,86,-2.1
7,5,2004,87.4,-0.8
7,6,2004,88.4,0.1
7,7,2004,90.7,2.4
7,8,2004,87.7,-0.7
7,9,2004,81.3,-7.1
7,10,2004,88.5,0
7,11,2004,86.9,-1.6
7,12,2004,90.9,2.3
7,13,2004,87,-1.6
7,14,2004,79.9,-8.7
7,15,2004,83,-5.6
7,16,2004,89.7,1
7,17,2004,87.6,-1.1
7,18,2004,85.5,-3.2
7,19,2004,90.5,1.8
7,20,2004,89.8,1.1
7,21,2004,93.2,4.6
7,22,2004,93,4.4
7,23,2004,89.1,0.5
7,24,2004,81.9,-6.7
7,25,2004,87.1,-1.4
7,26,2004,91.2,2.7
7,27,2004,80.7,-7.7
7,28,2004,84.9,-3.5
7,29,2004,81.3,-7
7,30,2004,87.6,-0.7
7,31,2004,89.8,1.6
8,1,2004,86.8,-1.3
8,2,2004,82.2,-5.8
8,3,2004,87.1,-0.9
8,4,2004,86.3,-1.6
8,5,2004,87.1,-0.7
8,6,2004,84.7,-3
8,7,2004,86.1,-1.5
8,8,2004,90.5,3.1
8,9,2004,91,3.7
8,10,2004,88.6,1.4
8,11,2004,91.6,4.5
8,12,2004,86.9,-0.1
8,13,2004,88,1.2
8,14,2004,75.2,-11.5
8,15,2004,81.1,-5.4
8,16,2004,79.6,-6.8
8,17,2004,78.7,-7.5
8,18,2004,81.7,-4.4
8,19,2004,80.3,-5.6
8,20,2004,81.8,-3.9
8,21,2004,81,-4.5
8,22,2004,82.4,-3
8,23,2004,86.4,1.2
8,24,2004,81.3,-3.7
8,25,2004,78.4,-6.4
8,26,2004,84.6,0
8,27,2004,85.8,1.4
8,28,2004,86.9,2.7
8,29,2004,87.1,3.1
8,30,2004,87.3,3.5
8,31,2004,87.3,3.8
9,1,2004,88.1,4.8
9,2,2004,89.2,6.1
9,3,2004,87.1,4.2
9,4,2004,80.9,-1.7
9,5,2004,76.4,-6
9,6,2004,80.9,-1.2
9,7,2004,86.6,4.7
9,8,2004,85,3.4
9,9,2004,87.4,6
9,10,2004,86.4,5.3
9,11,2004,85.5,4.6
9,12,2004,87.1,6.5
9,13,2004,88.6,8.3
9,14,2004,87.5,7.4
9,15,2004,86.5,6.7
9,16,2004,85.3,5.8
9,17,2004,85.2,6
9,18,2004,82.5,3.5
9,19,2004,74.4,-4.3
9,20,2004,75.3,-3.1
9,21,2004,73.4,-4.7
9,22,2004,68.1,-9.7
9,23,2004,71.8,-5.7
9,24,2004,76.3,-0.9
9,25,2004,78,1.1
9,26,2004,76,-0.6
9,27,2004,78.2,1.9
9,28,2004,80.4,4.4
9,29,2004,80.1,4.4
9,30,2004,72.9,-2.5
10,1,2004,71.8,-3.3
10,2,2004,75,0.2
10,3,2004,77,2.5
10,4,2004,76.9,2.7
10,5,2004,74.8,1
10,6,2004,73.9,0.4
10,7,2004,74.6,1.4
10,8,2004,75.5,2.6
10,9,2004,79.9,7.3
10,10,2004,83,10.7
10,11,2004,72.8,0.9
10,12,2004,74.1,2.5
10,13,2004,73.2,1.9
10,14,2004,74.1,3.1
10,15,2004,73.8,3.1
10,16,2004,75.1,4.8
10,17,2004,75.8,5.8
10,18,2004,65.5,-4.2
10,19,2004,65.2,-4.2
10,20,2004,70.2,1.2
10,21,2004,73.4,4.7
10,22,2004,57,-11.4
10,23,2004,59.8,-8.3
10,24,2004,64.2,-3.6
10,25,2004,68.6,1.2
10,26,2004,68.6,1.5
10,27,2004,68.5,1.7
10,28,2004,66.8,0.3
10,29,2004,53.6,-12.6
10,30,2004,55.1,-10.7
10,31,2004,55.6,-9.9
11,1,2004,54.2,-11
11,2,2004,51,-13.9
11,3,2004,58,-6.6
11,4,2004,60.3,-4
11,5,2004,70.2,6.2
11,6,2004,70.4,6.7
11,7,2004,71,7.6
11,8,2004,65,1.9
11,9,2004,56.8,-6
11,10,2004,56.3,-6.2
11,11,2004,58.2,-4
11,12,2004,56.9,-5
11,13,2004,53.1,-8.5
11,14,2004,58,-3.3
11,15,2004,56.2,-4.8
11,16,2004,56.7,-4
11,17,2004,55.8,-4.7
11,18,2004,57.4,-2.8
11,19,2004,56.5,-3.4
11,20,2004,58.2,-1.4
11,21,2004,60.9,1.5
11,22,2004,56.6,-2.5
11,23,2004,46.2,-12.7
11,24,2004,48.1,-10.5
11,25,2004,48.9,-9.4
11,26,2004,50.4,-7.7
11,27,2004,53.1,-4.7
11,28,2004,60.2,2.6
11,29,2004,43,-14.4
11,30,2004,40.6,-16.5
12,1,2004,44.5,-12.4
12,2,2004,45,-11.7
12,3,2004,48.7,-7.7
12,4,2004,49.4,-6.8
12,5,2004,46.3,-9.7
12,6,2004,47.3,-8.5
12,7,2004,45.1,-10.5
12,8,2004,47,-8.4
12,9,2004,49,-6.2
12,10,2004,54.6,-0.4
12,11,2004,63.1,8.3
12,12,2004,60,5.4
12,13,2004,60.3,5.9
12,14,2004,60.3,6.1
12,15,2004,56.1,2.1
12,16,2004,50.1,-3.8
12,17,2004,55.9,2.2
12,18,2004,52.5,-1.1
12,19,2004,56.9,3.5
12,20,2004,56.7,3.4
12,21,2004,55,1.9
12,22,2004,48.1,-4.9
12,23,2004,44.7,-8.1
12,24,2004,44.3,-8.4
12,25,2004,42.4,-10.2
12,26,2004,50,-2.5
12,27,2004,53.7,1.4
12,28,2004,54.4,2.2
12,29,2004,63,10.9
12,30,2004,57.9,5.9
12,31,2004,53.6,1.7
1,1,2005,51.9,0
1,2,2005,53.8,2
1,3,2005,56.3,4.6
1,4,2005,53.1,1.5
1,5,2005,44.4,-7.2
1,6,2005,43,-8.5
1,7,2005,46.6,-4.8
1,8,2005,55.2,3.8
1,9,2005,55.1,3.8
1,10,2005,56.7,5.4
1,11,2005,59.8,8.5
1,12,2005,50.1,-1.1
1,13,2005,44.4,-6.8
1,14,2005,50.4,-0.8
1,15,2005,51.8,0.6
1,16,2005,54.6,3.4
1,17,2005,58.3,7.1
1,18,2005,58.4,7.2
1,19,2005,64.6,13.4
1,20,2005,63.8,12.6
1,21,2005,60.6,9.4
1,22,2005,55.7,4.4
1,23,2005,60.4,9.1
1,24,2005,59.5,8.2
1,25,2005,57.1,5.7
1,26,2005,57.2,5.8
1,27,2005,55.6,4.1
1,28,2005,52.6,1.1
1,29,2005,51.4,-0.2
1,30,2005,45.3,-6.4
1,31,2005,45.3,-6.5
2,1,2005,50.7,-1.1
2,2,2005,50.9,-1
2,3,2005,52,0
2,4,2005,54.4,2.3
2,5,2005,50.5,-1.7
2,6,2005,53.8,1.5
2,7,2005,55.5,3.1
2,8,2005,52.8,0.2
2,9,2005,53.2,0.5
2,10,2005,61.6,8.8
2,11,2005,57.5,4.6
2,12,2005,56.1,3
2,13,2005,53.8,0.6
2,14,2005,54.3,0.9
2,15,2005,58.5,5
2,16,2005,59.9,6.2
2,17,2005,59.5,5.6
2,18,2005,60.1,6.1
2,19,2005,56.5,2.3
2,20,2005,55.8,1.4
2,21,2005,55.8,1.2
2,22,2005,59.4,4.7
2,23,2005,56.6,1.7
2,24,2005,52.5,-2.6
2,25,2005,52.8,-2.5
2,26,2005,54.3,-1.2
2,27,2005,53.5,-2.2
2,28,2005,53,-3
3,1,2005,54.5,-1.7
3,2,2005,56,-0.4
3,3,2005,57.1,0.5
3,4,2005,57.1,0.2
3,5,2005,57,-0.1
3,6,2005,51.1,-6.2
3,7,2005,50.2,-7.4
3,8,2005,56.9,-0.9
3,9,2005,61.5,3.4
3,10,2005,64.6,6.3
3,11,2005,68.1,9.5
3,12,2005,68.8,10
3,13,2005,67.4,8.3
3,14,2005,65.7,6.4
3,15,2005,55.6,-4
3,16,2005,52.9,-7
3,17,2005,55.6,-4.6
3,18,2005,62.6,2.2
3,19,2005,59.3,-1.4
3,20,2005,59.7,-1.3
3,21,2005,56.6,-4.7
3,22,2005,58.7,-2.9
3,23,2005,65.6,3.7
3,24,2005,58.4,-3.7
3,25,2005,58.3,-4.1
3,26,2005,56.8,-5.9
3,27,2005,57.4,-5.6
3,28,2005,65.2,1.9
3,29,2005,60.5,-3.1
3,30,2005,58.5,-5.4
3,31,2005,56.9,-7.4
4,1,2005,61.5,-3.1
4,2,2005,67.8,2.9
4,3,2005,70.9,5.7
4,4,2005,69.4,3.9
4,5,2005,61.3,-4.5
4,6,2005,64.8,-1.3
4,7,2005,77.3,10.9
4,8,2005,76.6,9.8
4,9,2005,62.7,-4.4
4,10,2005,57.7,-9.7
4,11,2005,60.2,-7.5
4,12,2005,65.4,-2.6
4,13,2005,73.1,4.7
4,14,2005,73.7,5
4,15,2005,73.2,4.2
4,16,2005,75.3,6
4,17,2005,75.5,5.9
4,18,2005,74.4,4.4
4,19,2005,72.4,2.1
4,20,2005,66.8,-3.8
4,21,2005,69.3,-1.6
4,22,2005,71.4,0.1
4,23,2005,74.5,2.9
4,24,2005,65.4,-6.5
4,25,2005,62.6,-9.6
4,26,2005,64.7,-7.8
4,27,2005,69.6,-3.3
4,28,2005,70.1,-3.1
4,29,2005,64.8,-8.7
4,30,2005,64.6,-9.2
5,1,2005,71.3,-2.8
5,2,2005,70.6,-3.8
5,3,2005,71.6,-3.1
5,4,2005,75,-0.1
5,5,2005,78.7,3.3
5,6,2005,73.4,-2.3
5,7,2005,65.4,-10.6
5,8,2005,68.5,-7.8
5,9,2005,73.2,-3.4
5,10,2005,75.4,-1.5
5,11,2005,68.2,-9
5,12,2005,69.3,-8.2
5,13,2005,73.6,-4.2
5,14,2005,77.8,-0.3
5,15,2005,79.8,1.4
5,16,2005,82.2,3.6
5,17,2005,80.5,1.6
5,18,2005,75.9,-3.3
5,19,2005,79.6,0.1
5,20,2005,84.8,5
5,21,2005,89.9,9.9
5,22,2005,91.7,11.4
5,23,2005,91.3,10.7
5,24,2005,90.3,9.5
5,25,2005,87.4,6.3
5,26,2005,93.2,11.8
5,27,2005,84.4,2.8
5,28,2005,74,-7.9
5,29,2005,73.4,-8.7
5,30,2005,82.5,0.2
5,31,2005,78.6,-4
6,1,2005,82.7,-0.1
6,2,2005,83.5,0.4
6,3,2005,78.2,-5.1
6,4,2005,79.3,-4.2
6,5,2005,81.1,-2.6
6,6,2005,81.7,-2.2
6,7,2005,81.6,-2.6
6,8,2005,80.6,-3.8
6,9,2005,81.2,-3.4
6,10,2005,79.5,-5.3
6,11,2005,81.5,-3.5
6,12,2005,80.3,-4.8
6,13,2005,82.1,-3.2
6,14,2005,87.7,2.2
6,15,2005,90.8,5.1
6,16,2005,94.8,8.9
6,17,2005,86.6,0.6
6,18,2005,85.1,-1.1
6,19,2005,87.9,1.5
6,20,2005,91.5,5
6,21,2005,97.3,10.6
6,22,2005,95.5,8.7
6,23,2005,91.2,4.3
6,24,2005,88.1,1
6,25,2005,90.1,2.9
6,26,2005,89.9,2.6
6,27,2005,90.9,3.5
6,28,2005,93.7,6.2
6,29,2005,91.4,3.7
6,30,2005,92.2,4.4
7,1,2005,93.8,5.9
7,2,2005,92.8,4.9
7,3,2005,92,4
7,4,2005,92.6,4.5
7,5,2005,93.8,5.6
7,6,2005,95.1,6.8
7,7,2005,94.5,6.2
7,8,2005,92,3.6
7,9,2005,90.9,2.5
7,10,2005,90.1,1.6
7,11,2005,91.4,2.9
7,12,2005,92.4,3.8
7,13,2005,97.8,9.2
7,14,2005,93.2,4.6
7,15,2005,95.3,6.7
7,16,2005,94.7,6
7,17,2005,95.4,6.7
7,18,2005,93,4.3
7,19,2005,90.9,2.2
7,20,2005,89.8,1.1
7,21,2005,92.4,3.8
7,22,2005,88.4,-0.2
7,23,2005,83.5,-5.1
7,24,2005,80.8,-7.8
7,25,2005,85,-3.5
7,26,2005,87.2,-1.3
7,27,2005,89.9,1.4
7,28,2005,84.9,-3.5
7,29,2005,90.5,2.2
7,30,2005,83.1,-5.2
7,31,2005,79.1,-9.1
8,1,2005,81.2,-6.9
8,2,2005,83.3,-4.8
8,3,2005,78.5,-9.5
8,4,2005,83.1,-4.8
8,5,2005,83.3,-4.5
8,6,2005,84.8,-2.9
8,7,2005,82.3,-5.3
8,8,2005,78.6,-8.9
8,9,2005,78.6,-8.8
8,10,2005,77.9,-9.3
8,11,2005,80.8,-6.3
8,12,2005,84.9,-2.1
8,13,2005,81,-5.9
8,14,2005,73.9,-12.8
8,15,2005,76.7,-9.9
8,16,2005,81.2,-5.2
8,17,2005,85.5,-0.8
8,18,2005,84.5,-1.6
8,19,2005,84.9,-1
8,20,2005,84.9,-0.9
8,21,2005,88.9,3.3
8,22,2005,89.2,3.8
8,23,2005,75.8,-9.4
8,24,2005,77.5,-7.5
8,25,2005,83.4,-1.4
8,26,2005,86.2,1.6
8,27,2005,87.3,2.9
8,28,2005,87.3,3.1
8,29,2005,88.2,4.2
8,30,2005,89,5.2
8,31,2005,86.9,3.3
9,1,2005,85.3,1.9
9,2,2005,84.9,1.8
9,3,2005,85.1,2.2
9,4,2005,83.8,1.1
9,5,2005,86.7,4.3
9,6,2005,86.6,4.4
9,7,2005,89.5,7.5
9,8,2005,84.8,3.1
9,9,2005,83.5,2
9,10,2005,81,-0.2
9,11,2005,82.5,1.6
9,12,2005,83,2.3
9,13,2005,78.1,-2.3
9,14,2005,77.1,-3
9,15,2005,76.5,-3.4
9,16,2005,80.9,1.3
9,17,2005,81.4,2.1
9,18,2005,82.5,3.5
9,19,2005,81.9,3.1
9,20,2005,85.6,7.1
9,21,2005,85.5,7.3
9,22,2005,84,6.1
9,23,2005,85.5,7.9
9,24,2005,85.6,8.3
9,25,2005,84.6,7.6
9,26,2005,83.8,7.1
9,27,2005,85.8,9.4
9,28,2005,85.8,9.7
9,29,2005,81.5,5.7
9,30,2005,78,2.5
10,1,2005,78.4,3.2
10,2,2005,82,7.1
10,3,2005,82,7.4
10,4,2005,80.5,6.3
10,5,2005,81.1,7.2
10,6,2005,82.4,8.8
10,7,2005,78.3,5
10,8,2005,81.4,8.4
10,9,2005,75.3,2.6
10,10,2005,67.3,-5
10,11,2005,65.8,-6.2
10,12,2005,66.2,-5.5
10,13,2005,73.7,2.3
10,14,2005,74.8,3.7
10,15,2005,70.3,-0.4
10,16,2005,72.7,2.3
10,17,2005,77.4,7.3
10,18,2005,69.9,0.1
10,19,2005,64.1,-5.3
10,20,2005,63.7,-5.4
10,21,2005,70.1,1.3
10,22,2005,70.3,1.8
10,23,2005,69.9,1.7
10,24,2005,70.8,3
10,25,2005,74.1,6.6
10,26,2005,66,-1.2
10,27,2005,66.4,-0.5
10,28,2005,67.9,1.3
10,29,2005,66.3,0.1
10,30,2005,66.4,0.5
10,31,2005,64.8,-0.8
11,1,2005,72,6.7
11,2,2005,72.4,7.4
11,3,2005,70.1,5.4
11,4,2005,68,3.6
11,5,2005,65.3,1.2
11,6,2005,65.2,1.4
11,7,2005,69.1,5.6
11,8,2005,69.7,6.5
11,9,2005,72.1,9.2
11,10,2005,71.1,8.5
11,11,2005,67.6,5.3
11,12,2005,58.7,-3.3
11,13,2005,59.5,-2.2
11,14,2005,61.9,0.5
11,15,2005,61.7,0.6
11,16,2005,62.3,1.5
11,17,2005,63.3,2.8
11,18,2005,58.8,-1.5
11,19,2005,59,-1
11,20,2005,62.2,2.5
11,21,2005,62.5,3.1
11,22,2005,60.8,1.6
11,23,2005,63.6,4.7
11,24,2005,63.1,4.4
11,25,2005,60.9,2.5
11,26,2005,59.7,1.5
11,27,2005,54.2,-3.7
11,28,2005,40.8,-16.9
11,29,2005,44.7,-12.7
11,30,2005,52.9,-4.3
12,1,2005,54.2,-2.7
12,2,2005,57.6,0.9
12,3,2005,60.5,4
12,4,2005,51.4,-4.9
12,5,2005,45.4,-10.6
12,6,2005,43.5,-12.3
12,7,2005,45.8,-9.8
12,8,2005,47.9,-7.5
12,9,2005,52.9,-2.3
12,10,2005,52.6,-2.4
12,11,2005,56.7,1.9
12,12,2005,58.5,3.9
12,13,2005,56.2,1.8
12,14,2005,48.6,-5.7
12,15,2005,49.9,-4.2
12,16,2005,47.2,-6.7
12,17,2005,47,-6.8
12,18,2005,49.2,-4.4
12,19,2005,52.5,-0.9
12,20,2005,51,-2.3
12,21,2005,56.5,3.4
12,22,2005,61.4,8.4
12,23,2005,59.2,6.3
12,24,2005,58.2,5.5
12,25,2005,59,6.4
12,26,2005,58.9,6.4
12,27,2005,56.7,4.3
12,28,2005,55.1,2.8
12,29,2005,54.2,2
12,30,2005,52.5,0.4
12,31,2005,54.2,2.2
1,1,2006,58.7,6.8
1,2,2006,61.7,9.9
1,3,2006,62.4,10.7
1,4,2006,52.3,0.7
1,5,2006,54.5,2.9
1,6,2006,63,11.5
1,7,2006,62.9,11.4
1,8,2006,58.5,7.1
1,9,2006,51.7,0.3
1,10,2006,53.8,2.5
1,11,2006,54.2,2.9
1,12,2006,52.1,0.9
1,13,2006,55.3,4.1
1,14,2006,65.6,14.4
1,15,2006,59.1,7.9
1,16,2006,46.9,-4.3
1,17,2006,41.9,-9.3
1,18,2006,47.9,-3.3
1,19,2006,52.5,1.3
1,20,2006,45.1,-6.1
1,21,2006,45.9,-5.3
1,22,2006,45.3,-6
1,23,2006,50.8,-0.5
1,24,2006,61.2,9.9
1,25,2006,62.9,11.5
1,26,2006,48.7,-2.7
1,27,2006,49.6,-1.9
1,28,2006,48.3,-3.2
1,29,2006,49.9,-1.7
1,30,2006,52.5,0.8
1,31,2006,56.2,4.5
2,1,2006,52.9,1.1
2,2,2006,54.3,2.4
2,3,2006,56,4
2,4,2006,58.7,6.6
2,5,2006,60.1,7.9
2,6,2006,55.8,3.5
2,7,2006,60.1,7.7
2,8,2006,65.4,12.9
2,9,2006,67.3,14.6
2,10,2006,53.3,0.5
2,11,2006,55.8,2.9
2,12,2006,59.9,6.8
2,13,2006,59.3,6.1
2,14,2006,60.5,7.2
2,15,2006,65,11.5
2,16,2006,59.5,5.8
2,17,2006,56.6,2.8
2,18,2006,58.2,4.2
2,19,2006,53.8,-0.4
2,20,2006,49.9,-4.4
2,21,2006,50.5,-4
2,22,2006,50.2,-4.5
2,23,2006,52.8,-2.1
2,24,2006,56,0.9
2,25,2006,56.1,0.8
2,26,2006,61.6,6.1
2,27,2006,63,7.3
2,28,2006,66.3,10.4
3,1,2006,64.7,8.6
3,2,2006,58.2,1.9
3,3,2006,60.9,4.3
3,4,2006,60,3.2
3,5,2006,57.9,0.9
3,6,2006,62.6,5.3
3,7,2006,64.8,7.3
3,8,2006,57.8,0.1
3,9,2006,51.2,-6.8
3,10,2006,56.7,-1.5
3,11,2006,51.7,-6.8
3,12,2006,46.4,-12.3
3,13,2006,45.2,-13.8
3,14,2006,52.1,-7.2
3,15,2006,62.4,2.9
3,16,2006,60.4,0.6
3,17,2006,62.1,2
3,18,2006,61.6,1.2
3,19,2006,48.4,-12.2
3,20,2006,45.2,-15.7
3,21,2006,52.4,-8.8
3,22,2006,52,-9.5
3,23,2006,56.7,-5.1
3,24,2006,66.3,4.2
3,25,2006,65.8,3.4
3,26,2006,66.6,3.9
3,27,2006,69.6,6.6
3,28,2006,67,3.7
3,29,2006,66.8,3.2
3,30,2006,60.1,-3.8
3,31,2006,64.4,0.2
4,1,2006,66.3,1.8
4,2,2006,62.7,-2.1
4,3,2006,70.4,5.3
4,4,2006,70.8,5.4
4,5,2006,70.6,4.9
4,6,2006,57.8,-8.2
4,7,2006,59.8,-6.6
4,8,2006,68.1,1.4
4,9,2006,70.7,3.7
4,10,2006,74.4,7.1
4,11,2006,67.1,-0.5
4,12,2006,71.8,3.9
4,13,2006,75.1,6.8
4,14,2006,79.5,10.9
4,15,2006,66.9,-2
4,16,2006,67.1,-2.1
4,17,2006,70,0.4
4,18,2006,68.1,-1.8
4,19,2006,68.5,-1.7
4,20,2006,69.7,-0.8
4,21,2006,73.5,2.7
4,22,2006,77.9,6.7
4,23,2006,73.1,1.6
4,24,2006,64.2,-7.6
4,25,2006,67.2,-4.9
4,26,2006,72.4,-0.1
4,27,2006,67.4,-5.4
4,28,2006,58.8,-14.3
4,29,2006,66.1,-7.3
4,30,2006,74.3,0.6
5,1,2006,78.2,4.2
5,2,2006,81.9,7.5
5,3,2006,78.4,3.7
5,4,2006,77.3,2.3
5,5,2006,74.9,-0.4
5,6,2006,72.1,-3.5
5,7,2006,74.4,-1.5
5,8,2006,75.7,-0.5
5,9,2006,79.2,2.7
5,10,2006,76.3,-0.5
5,11,2006,81.5,4.4
5,12,2006,83,5.6
5,13,2006,80.6,2.9
5,14,2006,83.9,5.9
5,15,2006,84.4,6.1
5,16,2006,85.8,7.2
5,17,2006,84.2,5.3
5,18,2006,81.1,2
5,19,2006,85.6,6.2
5,20,2006,85.8,6.1
5,21,2006,86.6,6.6
5,22,2006,82.6,2.4
5,23,2006,72.9,-7.6
5,24,2006,79.2,-1.6
5,25,2006,84.4,3.4
5,26,2006,83.7,2.4
5,27,2006,81.9,0.4
5,28,2006,79,-2.8
5,29,2006,76.3,-5.7
5,30,2006,78.6,-3.7
5,31,2006,81.3,-1.2
6,1,2006,85.9,3.1
6,2,2006,92.3,9.3
6,3,2006,88,4.8
6,4,2006,92.1,8.7
6,5,2006,90.6,6.9
6,6,2006,88.3,4.4
6,7,2006,84.4,0.3
6,8,2006,84,-0.3
6,9,2006,88.6,4.1
6,10,2006,87.4,2.7
6,11,2006,87.4,2.5
6,12,2006,87.5,2.4
6,13,2006,89,3.7
6,14,2006,94.4,8.9
6,15,2006,89.8,4.2
6,16,2006,85.2,-0.6
6,17,2006,86.9,0.9
6,18,2006,89.3,3.1
6,19,2006,91.6,5.3
6,20,2006,92.2,5.7
6,21,2006,92.4,5.8
6,22,2006,84,-2.8
6,23,2006,89.2,2.3
6,24,2006,92.4,5.4
6,25,2006,93.9,6.7
6,26,2006,89.2,1.9
6,27,2006,84.5,-2.9
6,28,2006,84.6,-2.9
6,29,2006,87.9,0.3
6,30,2006,85.2,-2.5
7,1,2006,88.4,0.6
7,2,2006,87,-0.9
7,3,2006,87.7,-0.3
7,4,2006,88.1,0
7,5,2006,81.2,-7
7,6,2006,79.4,-8.8
7,7,2006,83.6,-4.7
7,8,2006,80.4,-8
7,9,2006,89.2,0.8
7,10,2006,91.9,3.4
7,11,2006,91.1,2.6
7,12,2006,92.5,3.9
7,13,2006,94.2,5.6
7,14,2006,93,4.4
7,15,2006,93.6,5
7,16,2006,91.1,2.4
7,17,2006,87.5,-1.2
7,18,2006,90.6,1.9
7,19,2006,88.3,-0.4
7,20,2006,90.5,1.8
7,21,2006,95.3,6.7
7,22,2006,99.4,10.8
7,23,2006,97.7,9.1
7,24,2006,91.8,3.2
7,25,2006,94.9,6.4
7,26,2006,83.8,-4.7
7,27,2006,80.3,-8.2
7,28,2006,76.8,-11.6
7,29,2006,76.6,-11.8
7,30,2006,77.7,-10.6
7,31,2006,77.5,-10.7
8,1,2006,82.1,-6.1
8,2,2006,84.2,-3.9
8,3,2006,83.2,-4.8
8,4,2006,77.8,-10.1
8,5,2006,81.6,-6.2
8,6,2006,84.3,-3.4
8,7,2006,86.3,-1.3
8,8,2006,86.8,-0.7
8,9,2006,81.4,-6
8,10,2006,83.9,-3.4
8,11,2006,84.6,-2.6
8,12,2006,81.7,-5.3
8,13,2006,81.9,-5
8,14,2006,81.4,-5.3
8,15,2006,78.8,-7.8
8,16,2006,79.2,-7.3
8,17,2006,80.7,-5.6
8,18,2006,82.8,-3.3
8,19,2006,85.2,-0.8
8,20,2006,85.2,-0.6
8,21,2006,85.9,0.3
8,22,2006,82.2,-3.3
8,23,2006,83.3,-2
8,24,2006,81.8,-3.3
8,25,2006,78.6,-6.3
8,26,2006,79.4,-5.3
8,27,2006,83.5,-1
8,28,2006,83.9,-0.4
8,29,2006,86.8,2.7
8,30,2006,85.3,1.4
8,31,2006,84.2,0.5
9,1,2006,86.5,3.1
9,2,2006,85.5,2.3
9,3,2006,79.7,-3.3
9,4,2006,78.2,-4.5
9,5,2006,76.8,-5.7
9,6,2006,78.8,-3.5
9,7,2006,75,-7
9,8,2006,74.2,-7.6
9,9,2006,75.4,-6.1
9,10,2006,76.6,-4.7
9,11,2006,78.5,-2.5
9,12,2006,77.3,-3.4
9,13,2006,74.5,-6
9,14,2006,78.3,-1.9
9,15,2006,77.9,-2
9,16,2006,74.1,-5.6
9,17,2006,74.3,-5.1
9,18,2006,74.8,-4.3
9,19,2006,76.4,-2.4
9,20,2006,78.2,-0.3
9,21,2006,76.4,-1.9
9,22,2006,76.7,-1.3
9,23,2006,74.9,-2.8
9,24,2006,75.9,-1.5
9,25,2006,81.4,4.3
9,26,2006,83.6,6.8
9,27,2006,81.8,5.3
9,28,2006,78.5,2.3
9,29,2006,77.4,1.5
9,30,2006,77.9,2.3
10,1,2006,78.3,3
10,2,2006,79.5,4.6
10,3,2006,79.7,5.1
10,4,2006,80,5.7
10,5,2006,81.5,7.5
10,6,2006,76.7,3
10,7,2006,75.1,1.7
10,8,2006,74.7,1.6
10,9,2006,69.2,-3.5
10,10,2006,66.3,-6.1
10,11,2006,68.8,-3.3
10,12,2006,74,2.2
10,13,2006,74.3,2.8
10,14,2006,75.7,4.6
10,15,2006,64.1,-6.7
10,16,2006,61.1,-9.4
10,17,2006,69.6,-0.6
10,18,2006,64.9,-5
10,19,2006,60.4,-9.1
10,20,2006,62.8,-6.4
10,21,2006,65.5,-3.4
10,22,2006,67.8,-0.8
10,23,2006,71.1,2.9
10,24,2006,71.7,3.8
10,25,2006,66.9,-0.7
10,26,2006,65.5,-1.8
10,27,2006,59.3,-7.7
10,28,2006,70.4,3.8
10,29,2006,66,-0.3
10,30,2006,62.8,-3.2
10,31,2006,63.3,-2.4
11,1,2006,63,-2.4
11,2,2006,62.4,-2.7
11,3,2006,65.9,1.1
11,4,2006,66.2,1.7
11,5,2006,64.5,0.4
11,6,2006,64.6,0.8
11,7,2006,66.7,3.2
11,8,2006,69.3,6.1
11,9,2006,69.3,6.4
11,10,2006,65.8,3.2
11,11,2006,66.8,4.5
11,12,2006,66.6,4.6
11,13,2006,58.4,-3.4
11,14,2006,61.1,-0.4
11,15,2006,60.7,-0.5
11,16,2006,61.9,1
11,17,2006,62.2,1.6
11,18,2006,64.1,3.8
11,19,2006,64.1,4
11,20,2006,72,12.2
11,21,2006,69.5,10
11,22,2006,65.4,6.2
11,23,2006,63.4,4.4
11,24,2006,62.7,4
11,25,2006,61.1,2.6
11,26,2006,59.1,0.9
11,27,2006,54.7,-3.3
11,28,2006,54.6,-3.1
11,29,2006,53.4,-4.1
11,30,2006,40,-17.2
12,1,2006,43.6,-13.4
12,2,2006,48.7,-8.1
12,3,2006,49.5,-7
12,4,2006,54.1,-2.2
12,5,2006,57.7,1.6
12,6,2006,53.3,-2.6
12,7,2006,56.5,0.8
12,8,2006,58.1,2.6
12,9,2006,58.4,3.1
12,10,2006,56.6,1.5
12,11,2006,50.6,-4.3
12,12,2006,46.6,-8.1
12,13,2006,48.2,-6.3
12,14,2006,52.2,-2.1
12,15,2006,55.5,1.4
12,16,2006,59.9,5.9
12,17,2006,60.2,6.4
12,18,2006,49.3,-4.3
12,19,2006,44.6,-8.9
12,20,2006,38.3,-15
12,21,2006,38.6,-14.6
12,22,2006,42.9,-10.1
12,23,2006,41.5,-11.4
12,24,2006,44.8,-8
12,25,2006,47,-5.6
12,26,2006,51.6,-0.9
12,27,2006,56.3,3.9
12,28,2006,46.3,-6
12,29,2006,42.1,-10.1
12,30,2006,41.9,-10.2
12,31,2006,44.3,-7.7
1,1,2007,46,-5.9
1,2,2007,45.8,-6
1,3,2007,53,1.3
1,4,2007,51.3,-0.4
1,5,2007,51.9,0.3
1,6,2007,43.3,-8.2
1,7,2007,43.5,-8
1,8,2007,52.7,1.3
1,9,2007,61.5,10.1
1,10,2007,61,9.7
1,11,2007,59.8,8.5
1,12,2007,60,8.7
1,13,2007,47.6,-3.6
1,14,2007,36.3,-14.9
1,15,2007,32.9,-18.3
1,16,2007,41.1,-10.1
1,17,2007,47,-4.2
1,18,2007,51.5,0.3
1,19,2007,50.7,-0.5
1,20,2007,43.6,-7.6
1,21,2007,41.4,-9.8
1,22,2007,32.5,-18.8
1,23,2007,39,-12.3
1,24,2007,45.9,-5.4
1,25,2007,51.4,0
1,26,2007,48.3,-3.1
1,27,2007,48.5,-3
1,28,2007,51.4,-0.1
1,29,2007,53.6,2
1,30,2007,53.3,1.7
1,31,2007,48,-3.7
2,1,2007,44.3,-7.5
2,2,2007,45.4,-6.5
2,3,2007,45.1,-6.9
2,4,2007,50.6,-1.5
2,5,2007,56.7,4.5
2,6,2007,62.8,10.5
2,7,2007,61.2,8.8
2,8,2007,61.1,8.6
2,9,2007,61.3,8.7
2,10,2007,62.4,9.7
2,11,2007,63.6,10.7
2,12,2007,56.9,3.9
2,13,2007,54.8,1.6
2,14,2007,49.2,-4.1
2,15,2007,46.2,-7.3
2,16,2007,50,-3.6
2,17,2007,58.9,5.1
2,18,2007,68.3,14.4
2,19,2007,62.1,8
2,20,2007,51.5,-2.8
2,21,2007,52.3,-2.2
2,22,2007,58.9,4.3
2,23,2007,58.2,3.4
2,24,2007,46.5,-8.5
2,25,2007,48.4,-6.8
2,26,2007,55.1,-0.3
2,27,2007,59.2,3.6
2,28,2007,57.9,2.1
3,1,2007,45.4,-10.7
3,2,2007,47,-9.3
3,3,2007,48.4,-8.1
3,4,2007,52.7,-4
3,5,2007,60.8,3.8
3,6,2007,61.6,4.4
3,7,2007,66,8.6
3,8,2007,71.6,13.9
3,9,2007,67.3,9.4
3,10,2007,66.8,8.6
3,11,2007,67.5,9.1
3,12,2007,67.9,9.2
3,13,2007,68.8,9.9
3,14,2007,70.4,11.2
3,15,2007,70.5,11
3,16,2007,72.8,13.1
3,17,2007,80.1,20.1
3,18,2007,74,13.7
3,19,2007,71.6,11
3,20,2007,70.4,9.6
3,21,2007,71.9,10.8
3,22,2007,65.1,3.7
3,23,2007,54,-7.7
3,24,2007,55.3,-6.7
3,25,2007,61.8,-0.5
3,26,2007,66.1,3.5
3,27,2007,69.7,6.8
3,28,2007,55.2,-8
3,29,2007,52.7,-10.8
3,30,2007,56.4,-7.4
3,31,2007,59.5,-4.6
4,1,2007,67.1,2.7
4,2,2007,71.6,6.9
4,3,2007,74.3,9.3
4,4,2007,73.3,8
4,5,2007,76,10.4
4,6,2007,77.2,11.2
4,7,2007,71.9,5.6
4,8,2007,74.1,7.5
4,9,2007,72.3,5.4
4,10,2007,70.3,3.1
4,11,2007,70.9,3.4
4,12,2007,71.2,3.3
4,13,2007,56.4,-11.8
4,14,2007,61.9,-6.6
4,15,2007,67.7,-1.1
4,16,2007,58.6,-10.6
4,17,2007,57.8,-11.7
4,18,2007,66.9,-2.9
4,19,2007,67.6,-2.5
4,20,2007,67.9,-2.5
4,21,2007,60.6,-10.2
4,22,2007,61,-10.1
4,23,2007,67.5,-3.9
4,24,2007,61.9,-9.8
4,25,2007,68,-4
4,26,2007,74.2,1.8
4,27,2007,74.7,2
4,28,2007,77.5,4.5
4,29,2007,75.5,2.2
4,30,2007,78.7,5.1
5,1,2007,79,5
5,2,2007,73.7,-0.6
5,3,2007,81,6.4
5,4,2007,75.4,0.5
5,5,2007,71.1,-4.1
5,6,2007,65.6,-9.9
5,7,2007,69.3,-6.5
5,8,2007,72.6,-3.5
5,9,2007,75.2,-1.2
5,10,2007,78.4,1.7
5,11,2007,83.2,6.2
5,12,2007,85.9,8.6
5,13,2007,85.3,7.7
5,14,2007,84.3,6.4
5,15,2007,82.9,4.7
5,16,2007,83.5,5
5,17,2007,81.9,3.1
5,18,2007,82.5,3.4
5,19,2007,82.6,3.3
5,20,2007,80.9,1.3
5,21,2007,79.5,-0.4
5,22,2007,77.9,-2.3
5,23,2007,76.6,-3.8
5,24,2007,82.2,1.5
5,25,2007,82.3,1.3
5,26,2007,81.9,0.7
5,27,2007,82.6,1.1
5,28,2007,82.2,0.5
5,29,2007,81.6,-0.4
5,30,2007,80.4,-1.8
5,31,2007,82.4,-0.1
6,1,2007,82.6,-0.1
6,2,2007,83.7,0.8
6,3,2007,84.1,0.9
6,4,2007,85.9,2.5
6,5,2007,87.9,4.3
6,6,2007,86.3,2.5
6,7,2007,77,-7
6,8,2007,79,-5.3
6,9,2007,82.4,-2.1
6,10,2007,83,-1.7
6,11,2007,81.6,-3.3
6,12,2007,73.4,-11.7
6,13,2007,83.1,-2.1
6,14,2007,88.4,3
6,15,2007,90,4.4
6,16,2007,91.8,6
6,17,2007,89.6,3.7
6,18,2007,89.1,3
6,19,2007,89.2,2.9
6,20,2007,90.9,4.5
6,21,2007,92.4,5.8
6,22,2007,93.6,6.9
6,23,2007,92,5.1
6,24,2007,90.8,3.8
6,25,2007,91.1,4
6,26,2007,92.2,4.9
6,27,2007,93.6,6.2
6,28,2007,94.3,6.8
6,29,2007,93.4,5.8
6,30,2007,92.8,5.1
7,1,2007,93.1,5.3
7,2,2007,93.7,5.8
7,3,2007,92.8,4.8
7,4,2007,93.9,5.8
7,5,2007,98.3,10.2
7,6,2007,96.2,8
7,7,2007,91.9,3.6
7,8,2007,89.3,1
7,9,2007,90.5,2.1
7,10,2007,90.5,2
7,11,2007,90,1.5
7,12,2007,91.5,3
7,13,2007,87,-1.6
7,14,2007,91.3,2.7
7,15,2007,91.4,2.8
7,16,2007,91.8,3.2
7,17,2007,90,1.3
7,18,2007,91.8,3.1
7,19,2007,88.4,-0.3
7,20,2007,82.3,-6.4
7,21,2007,81.4,-7.3
7,22,2007,80.7,-7.9
7,23,2007,83.2,-5.4
7,24,2007,79.1,-9.5
7,25,2007,77.4,-11.2
7,26,2007,79.2,-9.3
7,27,2007,82.3,-6.2
7,28,2007,78,-10.4
7,29,2007,76.1,-12.3
7,30,2007,83.1,-5.2
7,31,2007,80.4,-7.8
8,1,2007,79.4,-8.8
8,2,2007,80.5,-7.6
8,3,2007,85.3,-2.7
8,4,2007,85.2,-2.7
8,5,2007,79.3,-8.5
8,6,2007,74.3,-13.4
8,7,2007,78.5,-9.1
8,8,2007,84.2,-3.3
8,9,2007,88.3,0.9
8,10,2007,86.9,-0.4
8,11,2007,84.3,-2.9
8,12,2007,88.2,1.1
8,13,2007,91.3,4.4
8,14,2007,85.9,-0.9
8,15,2007,84.1,-2.5
8,16,2007,87.6,1.1
8,17,2007,86.3,0
8,18,2007,89,2.8
8,19,2007,87,1
8,20,2007,83,-2.9
8,21,2007,87.8,2.1
8,22,2007,90,4.5
8,23,2007,91.3,6
8,24,2007,87.4,2.3
8,25,2007,80.6,-4.3
8,26,2007,82.8,-1.9
8,27,2007,87.3,2.8
8,28,2007,88,3.7
8,29,2007,89.5,5.4
8,30,2007,91.8,7.9
8,31,2007,87.7,4
9,1,2007,90.5,7
9,2,2007,85,1.7
9,3,2007,88.6,5.6
9,4,2007,84.6,1.8
9,5,2007,84.3,1.7
9,6,2007,77,-5.3
9,7,2007,80.7,-1.4
9,8,2007,81.9,0.1
9,9,2007,86.2,4.6
9,10,2007,85.6,4.3
9,11,2007,88.8,7.7
9,12,2007,88.2,7.4
9,13,2007,86.8,6.3
9,14,2007,86.8,6.5
9,15,2007,87.3,7.3
9,16,2007,81.7,2
9,17,2007,78.3,-1.2
9,18,2007,79.2,0
9,19,2007,81.1,2.2
9,20,2007,80.8,2.2
9,21,2007,80.7,2.4
9,22,2007,84.4,6.4
9,23,2007,79.4,1.7
9,24,2007,74.9,-2.6
9,25,2007,74.8,-2.4
9,26,2007,77.8,0.9
9,27,2007,78.7,2.1
9,28,2007,81.6,5.3
9,29,2007,82,6
9,30,2007,78.4,2.8
10,1,2007,77.5,2.2
10,2,2007,75.6,0.6
10,3,2007,85.1,10.4
10,4,2007,81.4,7
10,5,2007,79.9,5.8
10,6,2007,71.3,-2.5
10,7,2007,63.6,-9.9
10,8,2007,69.2,-3.9
10,9,2007,76.7,3.9
10,10,2007,79.2,6.7
10,11,2007,78.4,6.2
10,12,2007,74.8,2.9
10,13,2007,71.7,0.2
10,14,2007,65.5,-5.7
10,15,2007,68.1,-2.8
10,16,2007,70.2,-0.4
10,17,2007,67.2,-3.1
10,18,2007,66.9,-3
10,19,2007,68.9,-0.7
10,20,2007,73.1,3.8
10,21,2007,70.9,1.9
10,22,2007,64.3,-4.3
10,23,2007,68.4,0.1
10,24,2007,76.6,8.6
10,25,2007,76.8,9.1
10,26,2007,72.3,4.9
10,27,2007,73.3,6.3
10,28,2007,74.6,7.9
10,29,2007,78.6,12.2
10,30,2007,77.7,11.6
10,31,2007,69.4,3.6
11,1,2007,67.5,2
11,2,2007,67.2,2
11,3,2007,68.4,3.6
11,4,2007,72.9,8.4
11,5,2007,71.9,7.7
11,6,2007,73.1,9.2
11,7,2007,72.4,8.8
11,8,2007,73,9.7
11,9,2007,69.6,6.6
11,10,2007,67.9,5.2
11,11,2007,64.6,2.2
11,12,2007,63.5,1.4
11,13,2007,64.4,2.6
11,14,2007,67,5.5
11,15,2007,69.9,8.6
11,16,2007,67.6,6.6
11,17,2007,63.7,3
11,18,2007,64.9,4.5
11,19,2007,69,8.9
11,20,2007,67.4,7.5
11,21,2007,66.5,6.9
11,22,2007,68.2,8.9
11,23,2007,60,0.9
11,24,2007,53.2,-5.6
11,25,2007,49.2,-9.3
11,26,2007,53.5,-4.8
11,27,2007,54.7,-3.3
11,28,2007,58.8,1
11,29,2007,60.7,3.2
11,30,2007,62.2,4.9
12,1,2007,58.8,1.7
12,2,2007,54.4,-2.4
12,3,2007,55.1,-1.5
12,4,2007,56.9,0.5
12,5,2007,61.2,5
12,6,2007,61.3,5.4
12,7,2007,65.1,9.4
12,8,2007,55.9,0.4
12,9,2007,50,-5.3
12,10,2007,49.4,-5.7
12,11,2007,46,-8.9
12,12,2007,44.4,-10.3
12,13,2007,45.4,-9.1
12,14,2007,45.5,-8.9
12,15,2007,43.2,-11
12,16,2007,47,-7
12,17,2007,53,-0.8
12,18,2007,50.2,-3.5
12,19,2007,49.2,-4.3
12,20,2007,47.6,-5.8
12,21,2007,49,-4.2
12,22,2007,39.2,-13.9
12,23,2007,40.3,-12.6
12,24,2007,49.3,-3.5
12,25,2007,48.7,-4
12,26,2007,44.6,-8
12,27,2007,39.7,-12.7
12,28,2007,36.6,-15.7
12,29,2007,40.5,-11.7
12,30,2007,43.1,-9
12,31,2007,46.2,-5.8
1,1,2008,49.6,-2.3
1,2,2008,56,4.2
1,3,2008,58.4,6.6
1,4,2008,60.5,8.8
1,5,2008,59.4,7.8
1,6,2008,60.1,8.6
1,7,2008,55.4,3.9
1,8,2008,51.6,0.2
1,9,2008,49,-2.4
1,10,2008,47.6,-3.7
1,11,2008,47.8,-3.5
1,12,2008,48.4,-2.9
1,13,2008,48.1,-3.1
|
8be7847c6dd09bcb220cff7559acbbb2d64e87b0
|
64178eeec231869fd9fd03f67cfbd8f59647a53b
|
/V2/analyse.r
|
0887e8063c48de601fed357413534cadd98d659e
|
[] |
no_license
|
drtrev/sigeng
|
30174e9ea5791186460505ce62bebcf6896297be
|
a14aee8ebc5cabce080edf73c0bc2d7a011bf2b1
|
refs/heads/master
| 2021-07-14T03:38:56.323884
| 2016-10-02T12:04:10
| 2016-10-02T12:04:10
| 6,940,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,588
|
r
|
analyse.r
|
load.packages <- function()
{
library(lme4)
library(ez)
library(nortest)
library(plyr)
}
analyse <- function(dat, analysis)
# Given analysis id, do a specific analysis/order:
{
test <- F; if (test) analysis <- analyses[1,]
outliers <- function(dat, analysis, norm=T)
{
outliers.sub <- function(dat, outliers.method, outliers.response)
{
test <- F; if (test) outliers.response <- analysis$outliers.response
outliers.response.func <- function(value, MEAN, SD, outliers.response)
{
value.out <- NULL
if (outliers.response=="SD2") value.out <- ifelse(value > MEAN, MEAN + 2 * SD, MEAN - 2 * SD)
if (outliers.response=="remove") value.out <- NA
if (is.null(value.out)) stop(paste0("Error with outliers response; not found: ", outliers.response))
cat("outliers.response:", as.character(outliers.response), "\n")
cat("Outlier converted to", as.character(value.out), "\n")
value.out
}
classical1 <- function(datgroup, outliers.response)
{
classical1.sub <- function(value, MEAN, SD, outliers.response, debug.group)
{
if (value > MEAN + SD * 3.29 || value < MEAN - SD * 3.29)
{
cat("Outlier group:", debug.group, "id:", names(value), "value:", value, "\n")
value <- outliers.response.func(value, MEAN, SD, outliers.response)
}else{
# cat("value:", value, " not > ", MEAN + SD * 3.29, "\n")
}
value
}
values <- datgroup$value
names(values) <- 1:length(values)
datgroup$value <- aaply(values, 1, classical1.sub, mean(datgroup$value), sd(datgroup$value), outliers.response, as.character(datgroup$group[1]))
datgroup
}
classical2 <- function(datgroup, outliers.response)
{
classical2.sub <- function(value, MEAN, SD, outliers.response, debug.group)
{
if (value > MEAN + SD * 3 || value < MEAN - SD * 3)
{
cat("Outlier group:", debug.group, "id:", names(value), "value:", value, "\n")
value <- outliers.response.func(value, MEAN, SD, outliers.response)
}else{
# cat("value:", value, " not > ", MEAN + SD * 3.29, "\n")
}
value
}
values <- datgroup$value
names(values) <- 1:length(values)
# TODO change to values below in mean sd and in func above
datgroup$value <- aaply(values, 1, classical2.sub, mean(datgroup$value), sd(datgroup$value), outliers.response, as.character(datgroup$group[1]))
datgroup
}
boxplotout <- function(datgroup, outliers.response)
{
# 1.5 IQR, apparantly from Tukey's work on boxplots
boxplotout.sub <- function(value, q1, q3, MEAN, SD, outliers.response, debug.group)
{
iqr <- q3 - q1
if (value > q3 + iqr * 1.5 || value < q1 - iqr * 1.5)
{
cat("Boxplot outlier group", debug.group, "id:", names(value), "value:", value, "\n")
value <- outliers.response.func(value, MEAN, SD, outliers.response)
}
value
}
# test:
#datgroup <- data.frame(group="A1", value=rnorm(100))
#outliers.response <- "SD2"
values <- datgroup$value
names(values) <- 1:length(values)
datgroup$value <- aaply(values, 1, boxplotout.sub, quantile(values, 1/4), quantile(values, 3/4), mean(values), sd(values), outliers.response, as.character(datgroup$group[1]))
datgroup
}
dat2 <- NULL
if (outliers.method=="classical1")
{
#debugonce(classical1)
dat2 <- ddply(dat, .(group), classical1, outliers.response)
}
if (outliers.method=="classical2")
{
dat2 <- ddply(dat, .(group), classical1, outliers.response)
}
if (outliers.method=="boxplot")
{
dat2 <- ddply(dat, .(group), boxplotout, outliers.response)
}
#if (outliers.method=="robust")
#{
# dat2 <- dat # TODO
#}
if (is.null(dat2)) stop(paste0("Outliers method not found: ", outliers.method))
dat2$outlier <- !(dat2$value==dat$value)
dat2
}
if (norm)
{
dat <- outliers.sub(dat, analysis$outliers, analysis$outliers.response)
}else{
dat <- outliers.sub(dat, analysis$outliers.notnormal, analysis$outliers.response)
}
dat
}
normality <- function(dat, analysis)
{
normality.sub <- function(values, normality.func)
# call for each group separately
{
if (normality.func=="KS")
{
return(lillie.test(values)$p.value)
}
else if (normality.func=="Shapiro-Wilk")
{
return(shapiro.test(values)$p.value)
}
else
{
stop(paste0("normality.func not found: ", normality.func))
}
}
# Call normality.sub for each group
if (analysis$normality.on=="groups")
{
normal.p <- ddply(dat, .(group), function(x) normality.sub(x$value, analysis$normality.func))
# For now, normal if < half are sig
if (sum(normal.p$V1 < .05) > 0) cat(paste0("Groups not-normal: ", sum(normal.p$V1 < .05), "\n"))
if (sum(normal.p$V1 < .05) < nrow(normal.p) / 2)
{
normal <- T
}else{
normal <- F
}
}
else if (analysis$normality.on=="resids")
{
# Remove ids that have an NA
remove.ids <- dat[is.na(dat$value),"id"]
dat <- dat[!(dat$id %in% remove.ids),]
dat$id <- factor(dat$id)
aov1 <- aov(value ~ factor1*factor2+Error(id/(factor1*factor2)), data=dat)
#print(summary(aov1))
#print(head(dat))
pvals <- normality.sub(resid(aov1[[3]]), analysis$normality.func) # factor1
pvals <- c(pvals, normality.sub(resid(aov1[[4]]), analysis$normality.func)) # factor2
pvals <- c(pvals, normality.sub(resid(aov1[[5]]), analysis$normality.func)) # factor1:2
# From MASS:
aov1p <- proj(aov1)
pvals2 <- normality.sub(aov1p[[3]][,"Residuals"], analysis$normality.func) # factor1
pvals2 <- c(pvals2, normality.sub(aov1p[[4]][,"Residuals"], analysis$normality.func)) # factor2
pvals2 <- c(pvals2, normality.sub(aov1p[[5]][,"Residuals"], analysis$normality.func)) # factor1:2
normal <- T
num.notnorm <- sum(pvals < .05)
if (num.notnorm > 1)
{
cat(paste0(num.notnorm, " sets of resids are not normal\n"))
normal <- F
}
num.notnorm <- sum(pvals2 < .05)
if (num.notnorm > 1)
{
cat(paste0(num.notnorm, " sets of projected resids are not normal\n"))
normal <- F
}
}else
{
stop(paste0("normality.on not found: ", normality.on))
}
normal
}
analysis.anova.type2 <- function(dat)
{
# For within-subjects, if participant has missing value then remove participant
remove.ids <- dat[is.na(dat$value),"id"]
dat <- dat[!(dat$id %in% remove.ids),]
dat$id <- factor(dat$id)
out <- ezANOVA(dat, dv=value, wid=id, within=.(factor1, factor2), type=2)
out$ANOVA$p
}
analysis.anova.type3 <- function(dat)
{
# TODO could use aov here, faster (and type 3)
# For within-subjects, if participant has missing value then remove participant
remove.ids <- dat[is.na(dat$value),"id"]
dat <- dat[!(dat$id %in% remove.ids),]
dat$id <- factor(dat$id)
out <- ezANOVA(dat, dv=value, wid=id, within=.(factor1, factor2), type=3)
out$ANOVA$p
}
analysis.lme <- function(dat)
{
# TODO could also nest factors wihtin ID see field example: participants/animal or something,
# although that seems dubious (full model?)
lm1 <- lmer(value ~ 1 + (1|id), data=dat)
lm2 <- lmer(value ~ factor1 + (1|id), data=dat)
lm3 <- lmer(value ~ factor1 + factor2 + (1|id), data=dat)
lm4 <- lmer(value ~ factor1 + factor2 + factor1:factor2 + (1|id), data=dat)
an1 <- anova(lm1, lm2, lm3, lm4)
#qplot(resid(lm2))
pvals <- an1$`Pr(>Chisq)`[2:4]
pvals
}
if (analysis$diag.order == "outliers-normality")
{
dat <- outliers(dat, analysis)
norm <- normality(dat, analysis)
}else{
norm <- normality(dat, analysis)
dat <- outliers(dat, analysis, norm)
if (!norm) norm <- normality(dat, analysis) # may be normal after removing outliers
}
if (!norm)
{
# could transform data TODO Field
# or just use robust test TODO Field
cat("Would use robust test now.\n")
}
pvals <- NULL
if (analysis$analysis=="anova.type2")
{
pvals <- analysis.anova.type2(dat)
}
if (analysis$analysis=="anova.type3")
{
pvals <- analysis.anova.type3(dat)
}
if (analysis$analysis=="lme")
{
pvals <- analysis.lme(dat)
}
if (is.null(pvals)) stop("Invalid analysis supplied")
analysis$factor1.pval <- pvals[1]
analysis$factor2.pval <- pvals[2]
analysis$factor1.2.pval <- pvals[3] # interaction factor1:factor2
analysis$star <- ""
if (sum(pvals < .05) > 0) analysis$star <- "*"
# return dat (marks outliers) and analysis used (now including p value)
list(dat=dat, analysis=analysis)
}
test.normal.anova <- function()
{
# Summary: It will be 14% because there are three F tests.
# What about normally:
analyses.normal <- expand.grid(diag.order=factor("outliers-normality"),
outliers=factor("classical1"),
outliers.notnormal=factor("boxplot"),
outliers.response=factor("SD2"),
normality=factor("shapiro-wilk"),
analysis=factor("anova.type3"))
args(sim)
pvals.normal <- aaply(1:100, 1, function(x) sim(analyses.normal))
sum(pvals.normal < .05) # 14%
just.anova <- function(dat)
{
out <- ezANOVA(dat, dv=value, wid=id, within=.(factor1, factor2))
min(out$ANOVA$p)
}
pvals.just.anova <- aaply(1:100, 1, function(x) just.anova(generate.dat()))
sum(pvals.just.anova < .05)
pvals.just.anova <- aaply(1:100, 1, function(x) just.anova(generate.dat.within()))
sum(pvals.just.anova < .05) # 14%
pvals.just.anova <- aaply(1:1000, 1, function(x) just.anova(generate.dat.within()))
sum(pvals.just.anova < .05) / 1000 # 13%
# why is type I error above 5% for anova?
just.anova.between <- function(dat)
{
out <- ezANOVA(dat, dv=value, wid=id, between=.(factor1, factor2))
min(out$ANOVA$p)
}
pvals.just.anova <- aaply(1:100, 1, function(x) just.anova.between(generate.dat.between()))
sum(pvals.just.anova < .05) # 12%
generate.dat.between2 <- function(N=30)
# N is per group
{
dat <- data.frame(id=factor(1:(N*2)), group=factor(c(rep("A", each=N), rep("B", each=N))), value=rnorm(N*2))
dat
}
just.anova.between2 <- function(dat)
{
out <- ezANOVA(dat, dv=value, wid=id, between=group)
min(out$ANOVA$p)
}
pvals.just.anova <- aaply(1:100, 1, function(x) just.anova.between2(generate.dat.between2()))
sum(pvals.just.anova < .05) # 5%
length(pvals.just.anova)
}
##################################################
# Check whether generating data produces sphericity
##################################################
checkSphericity <- function(N=30)
{
source("generateData.r")
dat <- generate.dat(N)
#head(dat)
#args(ezANOVA)
ezANOVA(dat, dv=value, within=.(factor1, factor2), wid=id)
# Cannot fail to have sphericity with only two levels per factor.
# But there are still two ways of generating data: with or without
# participant id means.
}
##################################################
# Simulate: run through analyses
##################################################
sim <- function(analyses, N=30)
{
dat <- generate.dat.within(N)
out <- NULL
for (i in 1:nrow(analyses))
{
temp <- analyse(dat, analyses[i,])
out <- rbind(out, temp$analysis)
}
#out
#head(out)
min.p <- min(c(min(out$factor1.pval), min(out$factor2.pval), min(out$factor1.2.pval)))
min.p
}
|
56d17a7fdb5d8c2a3316f93f39b0a064e4b4cca5
|
3375e9c749a9e096ae7a89dc53cf3188e3d1b599
|
/hospital-CA.R
|
d062b98b390a025431eeb5f3b272b37014d1196d
|
[] |
no_license
|
BattaLiu/referral-network
|
f57815b52c274694d075f14769e8c94eb558f67a
|
9c62449c2e904ec454b394b31ca16e3fdffeb15d
|
refs/heads/master
| 2022-05-25T16:21:06.180862
| 2016-11-28T06:12:57
| 2016-11-28T06:12:57
| 74,306,379
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,386
|
r
|
hospital-CA.R
|
# dig into the hospital data of CA
library(dplyr)
df.hospital.ca <- read.csv("/Users/batta/Dropbox/phd4/nursing-homes/read/Data/hospital-CA.csv")
system <- df.hospital.ca %>%
group_by(PARENT_NAME,PARENT_ZIP_9) %>%
tally
system$initial.name <- gsub("^([[:alpha:]]+)([[:blank:]]|[[:punct:]]).*", "\\1", system$PARENT_NAME)
system$PARENT_ZIP_5 <- gsub("^([[:digit:]]{5}).*", "\\1", system$PARENT_ZIP_9)
system$LOWER_PARENT_NAME <- tolower(system$PARENT_NAME)
system$name <- system$LOWER_PARENT_NAME
pattern <- "[[:punct:]]|inc|incorporated|llc|central|and"
system$name <- gsub(pattern, "", system$name)
system$name <- gsub("avanti hospitals", "avanti hospital", system$name)
system$name <- gsub("avanti hospital", "avanti hospitals", system$name)
system$name <- gsub("crestwod", "crestwood", system$name)
system$name <- gsub("daughters of charity health systems", "daughters of charity health system", system$name)
system$name <- gsub("daughters of charity healthcare systems", "daughters of charity health system", system$name)
system$name <- gsub("dignith health", "dignity health", system$name)
system$name <- gsub("dignity health (formerly catholic healthcare west)", "dignity health", system$name)
system$name <- gsub("hca holdings", "hca", system$name)
system$name <- gsub("interhealth corporation dba pih health", "interhealth corp dba pih health", system$name)
system$name <- gsub("kaier", "kaiser", system$name)
system$name <- gsub("kaiser foundation hospitals", "kaiser foundation hospital", system$name)
system$name <- gsub("kaiser foundation hospital", "kaiser foundation hospitals", system$name)
# to be finished, stops at kaiser permanente
##############
system$tmp <- gsub("Crestwod", "Crestwood", system$initial.name,fixed = TRUE)
system$tmp <- gsub("Dignith", "Dignity", system$initial.name,fixed = TRUE)
system$tmp <- gsub("Los", "LAC", system$initial.name,fixed = TRUE)
system$tmp <- gsub("none", "N/A", system$initial.name,fixed = TRUE)
# system$tmp <- gsub("//N", "N///A", system$initial.name,fixed = TRUE)
system$tmp <- gsub("NONE", "N/A", system$initial.name,fixed = TRUE)
system$tmp <- gsub("Non", "N/A", system$initial.name,fixed = TRUE)
#########
system$tmp <- gsub("North", "N/A", system$initial.name,fixed = TRUE)
system$tmp <- gsub("city", "N/A", system$initial.name,fixed = TRUE)
system$tmp <- gsub("county", "N/A", system$initial.name,fixed = TRUE)
|
f91e0a8f67eb0fa3fe9cb8712feda994a1d0277f
|
708bcae7afadb711182594f8beafd655ab6d4884
|
/Rscript/3.1/part 1/complexity_bench.R
|
84c5c821141091dbe3bf84a020469e05b5619dac
|
[
"Unlicense"
] |
permissive
|
wzzlcss/wzzlcss.github.io
|
be7905b625090a0fc4e444803d5508cf8b5d7da5
|
5262f9313ecec37e0e04f45b65dfb2ceacc3b316
|
refs/heads/master
| 2021-07-15T12:27:10.974511
| 2020-06-19T05:46:17
| 2020-06-19T05:46:17
| 175,936,935
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,650
|
r
|
complexity_bench.R
|
# in order to run this script #
# please get a version of sgdnet on the batch-opt branch #
# and download optB.R to your working dir #
#------ Helper function ------#
library(lattice)
library(latticeExtra)
library(grid)
lattice.options(default.theme = list(fontsize = list(points = 4, text = 8)))
complexity_plot <- function(dataset, data, text) {
p <- xyplot(epoch ~ batch|dataset+lambda,
data = data,
type = "l",
panel = function(...) {
panel.xyplot(...)
panel.abline(v = data[1,'optB'],
col="red", lwd=1, lty=2)
grid.text(text,
unit(0.3, 'npc'),
unit(0.9, 'npc'))
},
auto.key = TRUE)
p
}
opt_step_complexity <- function(datasets, family, alpha, lambda, batch_max, batch_freq) {
# enable step size input in source code first
# specify the working dir
source("optB.R")
# one dataset
x <- datasets[[1]]$x
y <- datasets[[1]]$y
batch_seq <- seq(from = 1, to = batch_max, by = batch_freq)
batch_info <- opt(x, y, family, lambda)
step <- batch_info$step
opt_B <- batch_info$B
data <- data.frame(dataset = character(),
lambda = double(),
npass = integer(),
batch = integer(),
optB = integer())
# increase to full batch
for (i in 1:length(batch_seq)) {
saga <- sgdnet(x, y, family,
standardize = FALSE,
alpha = alpha, lambda = lambda,
batchsize = batch_seq[i],
stepsize = step,
maxit = 1e3)
data <- rbind(data,
data.frame(
dataset = names(datasets)[1],
lambda = toString(lambda),
epoch = saga$npasses,
batch = batch_seq[i],
optB = opt_B
))
}
data
}
complexity <- function(datasets, family, alpha, lambda, batch_max, batch_freq) {
# specify the working dir
source("optB.R")
# one dataset
x <- datasets[[1]]$x
y <- datasets[[1]]$y
batch_seq <- seq(from = 1, to = batch_max, by = batch_freq)
batch_info <- opt(x, y, family, lambda)
opt_B <- batch_info$B
data <- data.frame(dataset = character(),
lambda = double(),
npass = integer(),
batch = integer(),
optB = integer())
# increase batch
for (i in 1:length(batch_seq)) {
saga <- sgdnet(x, y, family,
standardize = FALSE,
alpha = alpha, lambda = lambda,
batchsize = batch_seq[i],
maxit = 1e3)
data <- rbind(data,
data.frame(
dataset = names(datasets)[1],
lambda = toString(lambda),
epoch = saga$npasses,
batch = batch_seq[i],
optB = opt_B
))
}
data
}
#------ Benchmark ------#
data("abalone")
abalone$x <- scale(abalone$x)
dataset <- list(abalone = abalone)
data <- complexity(dataset, "gaussian", 0, 10, 1000, 10)
p <- complexity_plot(dataset, data, "Defazio step size")
png(filename="abalone_com.png", width = 3, height = 3, units = 'in', res = 300)
p
dev.off()
data <- opt_step_complexity(dataset, "gaussian", 0, 10, 1000, 10)
p <- complexity_plot(dataset, data, "optimal step size")
png(filename="abalone_com_step_opt.png", width = 3, height = 3, units = 'in', res = 300)
p
dev.off()
library(glmnet)
x <- scale(abalone$x)
y <- abalone$y
fit <- glmnet(x, y, alpha = 0)
lambda_seq <- fit$lambda[50:100]
B <- step <- rep(0, length(lambda_seq))
for (i in 1:length(lambda_seq)) {
batch_info <- opt(x, y, "gaussian", lambda_seq[i])
B[i] <- batch_info$B
step[i] <- batch_info$step
}
data_batch <- list(optimal = B,
lambda = lambda_seq,
grp = rep("optimal batch", length(lambda_seq)))
data_step <- list(optimal = step,
lambda = lambda_seq,
grp = rep("optimal step size", length(lambda_seq)))
p1 <- xyplot(optimal~ lambda|grp,
data = data_batch,
type = "l")
p2 <- xyplot(optimal~ lambda|grp,
data = data_step,
type = "l")
png(filename="abalone_batch.png", width = 6, height = 2, units = 'in', res = 300)
p1
dev.off()
png(filename="abalone_step.png", width = 6, height = 2, units = 'in', res = 300)
p2
dev.off()
|
16e35ac0885697a597d956ea1d1587960d37ff42
|
631b105158e5fb7317b1c3f6b607bfb5147339db
|
/base-scripts/all_varType_counting.R
|
64229ffa4b328227e4dca6a1aedf39055d491aeb
|
[] |
no_license
|
Amfgcp/neoseq_shiny
|
c314e15b37b547615e8943fa793e61d776c5e7b5
|
e528a0b08dda967dbae198c532dbea27bb0cd58b
|
refs/heads/master
| 2023-02-08T12:19:03.797815
| 2020-12-31T23:31:22
| 2020-12-31T23:31:22
| 295,021,823
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,231
|
r
|
all_varType_counting.R
|
library(dplyr)
library(ggplot2)
# Data handling
# Hardcoded pep results folder
#pep.info <- list("path" = paste0('O:/ImmunoGenomics/ngsdata', '/results'))
pep.info <- list("path" = paste0('/mnt/patharchief/ImmunoGenomics/ngsdata', '/results'))
# List all .pep.txt files
pep.info$samples = list.files(pep.info$path, pattern = "^NIC.*pep.txt$")
pep.info$n = length(pep.info$samples)
# all_varType_count <- tribble()
variantTYPES_gathered <- tibble()
for (i in 1:pep.info$n) {
pep_path = paste0(pep.info$path, "/", pep.info$samples[i])
# Read file and filter by selected & expressed variants
pepr <- read.table(pep_path, header = TRUE, sep = "\t") %>%
dplyr::select(varID, selection, Expressed, variantTYPE) %>%
filter(selection=='yes', Expressed=='yes') %>%
dplyr::select(varID, variantTYPE)
# split variantType column values and put them into new rows by varID,
# then remove duplicates rows (so we don't count more than once for
# the same variant a given varType)
pepr_split <- pepr %>%
mutate(variantTYPE = strsplit(as.character(variantTYPE), ",")) %>%
tidyr::unnest(variantTYPE) %>%
unique()
# filter unwanted varType rows
pepr_relevant <- pepr_split %>%
subset(variantTYPE != 'coding_sequence_variant' &
variantTYPE != 'NMD_transcript_variant' &
variantTYPE != 'splice_region_variant' &
variantTYPE != '5_prime_UTR_variant' &
variantTYPE != '3_prime_UTR_variant' &
variantTYPE != 'intron_variant' &
variantTYPE != 'non_coding_transcript_variant' &
variantTYPE != 'non_coding_transcript_exon_variant')
# count occurrences of each variant type
pepr_count <- pepr_relevant %>%
dplyr::select(variantTYPE) %>%
group_by(variantTYPE) %>%
mutate(occurrences = n()) %>%
unique()
# create column with sample name
pepr_count$sample <- rep(sub(".25Lpep.txt", "", pep.info$samples[i]), nrow(pepr_count))
# accumulates number of variantType occurrences
# all_varType_count <- data.table::rbindlist(list(all_varType_count, pepr_count))[, lapply(.SD, sum, na.rm = TRUE), by = variantTYPE]
variantTYPES_gathered = rbind(variantTYPES_gathered, as_tibble(pepr_count))
}
subset <- variantTYPES_gathered[!(variantTYPES_gathered$sample %in% c("NIC12", "NIC13")),]
bar_plot <- ggplot(data=subset, aes(x=variantTYPE, y=occurrences, fill=sample)) +
geom_bar(stat="identity") +
ylab("Number of Occurrences") +
xlab("Variant Type") +# + coord_cartesian(ylim = c(0, 750)) +
theme_bw() +
theme(axis.text.x=element_text(angle=90, vjust = 1, size=6),
# remove the vertical grid lines
panel.grid.major.x = element_blank(),
panel.spacing = unit(0.02, "lines"),
panel.border = element_blank(),
strip.text = element_text(size=12))
# legend.position="top")
bar_plot
ggsave("~/Dropbox/Presentations/neoseq/typeVar_Nics_MMRp.jpeg", width = 6, height = 4)
|
2f71da7e292b515cf1741d00204bab57ec9c283d
|
77949294d765a96e46fc2c71deb3da2fb82bf12f
|
/案例演示/3、图形初阶/1、使用图形.R
|
0eab254aa2d696095082275ecc0f3cb8d4858fa1
|
[] |
no_license
|
ocxz/RLange
|
7715cb8c2b6b21995ac948f65036dbb043634026
|
9e68822e1eb8aed048f6b917db91121707c84097
|
refs/heads/master
| 2020-08-07T23:00:35.700567
| 2019-10-14T07:28:48
| 2019-10-14T07:28:48
| 213,614,549
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 681
|
r
|
1、使用图形.R
|
# 保存图形
# 保存为pdf()、png()、jpeg()……
png("F:/R语言/案例演示/3、图形初阶/mypng.png")
# 加载mtcars数据库
attach(mtcars)
# 根据wt(x坐标) mpg(y坐标) 画出散点图
plot(wt, mpg)
# 添加一条最优拟合曲线
abline(lm(mpg~wt))
# 给散点图添加标题
title("Regression of MPG On Weight")
# 解绑数据库
detach(mtcars)
# 关闭输出
dev.off()
# 打开一个新的窗口
dev.new()
# 加载mtcars数据库
attach(mtcars)
# 根据wt(x坐标) mpg(y坐标) 画出散点图
plot(wt, mpg)
# 添加一条最优拟合曲线
abline(lm(mpg~wt))
# 给散点图添加标题
title("Regression of MPG On Weight")
# 解绑数据库
detach(mtcars)
|
da705a25847670ddcde85a2e9cda4fef3d9353ed
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pmr/examples/leisure.white.Rd.R
|
c9c8c7f56f3c8af7ad486dccd289e6cee0227ba6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 209
|
r
|
leisure.white.Rd.R
|
library(pmr)
### Name: leisure.white
### Title: leisure.white
### Aliases: leisure.white
### Keywords: datasets
### ** Examples
data(leisure.white)
## maybe str(leisure.white) ; plot(leisure.white) ...
|
16a340582eb61e3dff93651f9886ae2e59e41f6b
|
363b2ecc5154498e9587c9213bde894684775637
|
/s.r
|
fee48975ae061f559a45e2b2193d53073b630e16
|
[] |
no_license
|
kennybob/RepData_PeerAssessment1
|
f0bd21492fd60377da7696cd9414f9c4bf635eb0
|
a4341405d943afa3cb871ade9647083fc37f0ad7
|
refs/heads/master
| 2020-03-29T17:41:36.228222
| 2015-04-12T03:00:45
| 2015-04-12T03:00:45
| 33,689,006
| 0
| 0
| null | 2015-04-09T19:45:04
| 2015-04-09T19:45:03
| null |
UTF-8
|
R
| false
| false
| 2,255
|
r
|
s.r
|
getData <- function() {
if (!file.exists("activity.zip")) {
unzip("activity.zip")
}
read.csv("activity.csv")
}
get_mean_per_day <- function(data) {
total_steps_per_day <- tapply(data$steps, data$date, sum)
hist(total_steps_per_day)
cat("mean: ",mean(total_steps_per_day, na.rm=TRUE), "\n")
cat("median: ",median(total_steps_per_day, na.rm=TRUE), "\n")
}
## Main Processing
## Fetch the data##
act_df <- getData()
#What is mean total number of steps taken per day?
get_mean_per_day(act_df)
#What is the average daily activity pattern?
mean_Steps_per_interval <- tapply(act_df$steps, act_df$interval, mean, na.rm = TRUE)
df <- as.data.frame(mean_Steps_per_interval)
df$interval = rownames(df)
plot(df$interval, df$mean_Steps_per_interval, type ="l")
df[df$mean_Steps_per_interval==max(df$mean_Steps_per_interval),]
#Inputing missing values
sum(is.na(act_df[1]))
sum(is.na(act_df[2]))
sum(is.na(act_df[3]))
##Fill in the blanks
library(plyr)
mergeDf <- join(act_df,df)
mergeDf[is.na(mergeDf[1]),]$steps = mergeDf[is.na(mergeDf[1]),]$mean_Steps_per_interval
## Now recreate the histogram as before
get_mean_per_day(mergeDf)
#Are there differences in activity patterns between weekdays and weekends?
library(lubridate)
mergeDf_wk <- mutate(mergeDf, wkend=factor(weekdays(ymd(as.character(mergeDf$date))) %in% c("Saturday", "Sunday"), labels = c("Weekday","Weekend")))
##weekdays
get_mean_per_day(mergeDf_wk[mergeDf_wk$wkend=="Weekday",])
##weekends
get_mean_per_day(mergeDf_wk[mergeDf_wk$wkend=="Weekend",])
library(lattice)
#xyplot scatterplot y~x|A
#Make a panel plot containing a time series plot (i.e. type = "l")
#of the 5-minute interval (x-axis)
#and the average number of steps taken, averaged across all
#weekday days or weekend days (y-axis).
#What is the average daily activity pattern?
agg <- aggregate( mergeDf_wk[,"steps"], mergeDf_wk[,c("interval","wkend")], FUN = mean )
big_join <- join(mergeDf_wk,agg)
panel.smoother <- function(x, y) {
panel.xyplot(x, y) # show points
panel.loess(x, y) # show smoothed line
}
big_join = transform(big_join, interval)
xyplot(x~interval|wkend, data=big_join, layout=c(1,2), xlab = "Interval", ylab = "Average steps per interval", type = "o")
|
56dc1bccdbdb4001388cdf6b8bf5d9098321d484
|
75ed4d67d5f2315344c824183847dd3f382e3e1b
|
/plot1.R
|
561c70ada34056ec5e4e0d9aa2f74e57f6572c66
|
[] |
no_license
|
wanyx2015/ExData_Plotting2
|
75c29e848c6afdeca6e49ea0fc5e58b6e7fc8bc2
|
37285e34af487885d2d8388a57871e1b7521f338
|
refs/heads/master
| 2021-01-10T01:38:09.657336
| 2015-12-27T03:40:26
| 2015-12-27T03:40:26
| 48,618,011
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 892
|
r
|
plot1.R
|
##
## Question 1: Have total emissions from PM2.5 decreased in the United States from
## from 1999 to 2008? Using the base plotting system, make a plot showing the total
## PM2.5 emission from all sources for each of the years 1999, 2002, 2005, and 2008.
##
library(dplyr)
# preparing the data
rm(list = ls())
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# calculate the sum of emissions group by year
data <- summarize(group_by(NEI, year), emissions = sum(Emissions))
# Let us plot
png("./plot1.png")
# set xaxt = n to disable x-axis
plot(data$year, data$emissions/1000, type = "b",
xaxt = "n", xlab = "Year", ylab = "PM2.5 Emissions (thousands of tons)",
main = "Total US PM2.5 Emissions (1999 ~ 2008)",
lwd = 2, col = "blue")
# customize the x-axis, because default axis has no 1999
axis(side = 1, at = data$year)
dev.off()
|
e62be065dc619a3163419ced37d950033ee9e41f
|
5dd5fbe1e7fc1854b507b7dec048a2e0d5232510
|
/man/plot_return_residual_cox.Rd
|
90182b22ea7ce0c664f291f9a4362910a76b727a
|
[] |
no_license
|
cran/packDAMipd
|
33fc903b293f9fd63fd587925cd898287a09b7cf
|
bd61a85bf1171c1f97625486279de0fbdb6f3538
|
refs/heads/master
| 2023-03-18T21:22:37.105435
| 2021-03-03T08:20:14
| 2021-03-03T08:20:14
| 312,234,163
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,041
|
rd
|
plot_return_residual_cox.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/help_parameter_estimation_survival.R
\name{plot_return_residual_cox}
\alias{plot_return_residual_cox}
\title{Plotting and return the residuals after cox proportional hazard model}
\usage{
plot_return_residual_cox(
param_to_be_estimated,
indep_var,
covariates,
fit,
dataset
)
}
\arguments{
\item{param_to_be_estimated}{parameter to be estimated}
\item{indep_var}{independent variable}
\item{covariates}{covariates}
\item{fit}{fit object from coxph method}
\item{dataset}{data used for cox ph model}
}
\value{
plot and the residuals
}
\description{
Plotting and return the residuals after cox proportional hazard model
}
\examples{
\donttest{
data_for_survival <- survival::lung
surv_estimated <- use_coxph_survival("status", data_for_survival, "sex",
covariates = c("ph.ecog"), "time")
plot_return_residual_cox("status", "sex", covariates = c("ph.ecog"),
surv_estimated$fit,data_for_survival )
}
}
|
0b31186af3065833257094fca9d0671cac7edf55
|
c0fb7f572c90e5314c319d688649cc17224c4e88
|
/split_data.R
|
217c3a149c19c3e61ca35d312839ced0176b5994
|
[] |
no_license
|
toshkaexe/prediction-in-R
|
915bdca459bade8b5460bde6108868563353e5eb
|
50ed898c3e02ef2f07e5f7b78b842d816decfc3b
|
refs/heads/master
| 2021-01-20T06:44:25.597576
| 2017-08-29T15:01:46
| 2017-08-29T15:01:46
| 101,512,062
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 2,183
|
r
|
split_data.R
|
#ï..created_at
file_Name <- "C:/Users/azeltser/Desktop/Projekt/prediction_07082017/scanned_beacons.csv"
#ScannedBeacons <- read.csv(file = "C:/Users/azeltser/Desktop/Projekt/prediction_07082017/scanned_beacons.csv",head=TRUE,sep=",")
ScannedBeacons <- read.csv(file = file_Name, head=TRUE, sep=",")
ScannedBeacons
summary(ScannedBeacons)
head(ScannedBeacons)
#i..created_at
#id
#pos_id
#Document.name
#name of rows
nameofRows <- colnames(ScannedBeacons)
ScannedBeacons$Document.name
#ScannedBeacons<- data.frame(Time, id, poi_id, Document.name)
Hours <- format(as.POSIXct(strptime(ScannedBeacons$ï..created_at, "%Y-%m-%d %H:%M:%S",tz="")) ,format = "%H:%M:%S")
Dates <- format(as.POSIXct(strftime(ScannedBeacons$ï..created_at, "%Y-%m-%d %H:%M:%S",tz="")) ,format = "%Y-%m-%d")
ScannedBeacons$Dates <- Dates
ScannedBeacons$Hours <- Hours
# ScannedBeacons$col.name
ScannedBeacons
write.table(ScannedBeacons, file = "C:/Users/azeltser/Desktop/Projekt/prediction_07082017/11day_hours_scanned_beacons.csv",col.names=TRUE, sep=",", quote = FALSE)
hist(ScannedBeacons$Dates)
datetime <- ScannedBeacons$Hours
datetime
write.csv(ScannedBeacons, file = "C:/Users/azeltser/Desktop/Projekt/prediction_07082017/1__datatime_beacons.csv")
write.table(ScannedBeacons, file = "C:/Users/azeltser/Desktop/Projekt/prediction_07082017/1111111datatime_beacons.csv", na="",col.names=TRUE, sep=",", quote = FALSE)
heure <- as.integer(substr(datetime, 12, 13))
conversion <- data.frame(datetime=datetime, heure=heure,
period=cut(heure, c(-Inf, 7, 10, 12, 17, Inf),
labels=c("night", "morning", "noon", "afternoon", "evening", "tonight")))
#night <- interval(hms("00:00:00"), hms("05:00:00"))
night <- interval(as.POSIXct("00:00:00", format = "%H:%M:%S"), as.POSIXct("5:00:00", format = "%H:%M:%S"))
morning <- interval(hms("05:01:00"), hms("12:00:00"))
day <- interval(hms("12:01:00"), hms("17:00:00"))
tonight <- interval(hms("17:01:00"), hms("23:59:00"))
night
morning
day
tonight
as.POSIXct("00:00:00", format = "%H:%M:%S")
as.POSIXct("00:00:00", format = "%H:%M:%S")
|
a67c59a483818c85b5e8034d399638fa1f5c8f2b
|
c1b3f39f75bc72a4e5273ca9892a3e154508f918
|
/R/01_model_inputs_functions.R
|
f41e3ae863bd495e881ed515c0e46f8e57017c0c
|
[
"MIT"
] |
permissive
|
W-Mohammed/cdx2cea
|
4d6eaa58c6ab69601db0ee06b76c4fd39f2999da
|
ba40e252744a671cdca589973de3fb9d8366c3b1
|
refs/heads/master
| 2023-09-02T02:34:31.364339
| 2021-11-02T16:36:19
| 2021-11-02T16:36:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,651
|
r
|
01_model_inputs_functions.R
|
#' Base-case initial parameter set
#'
#' \code{load_params_init} generates the initial values of the CDX2 CEA model
#'
#' @param n_age_init Initial age of the cohort.
#' @param n_age_max Oldest age of the cohort.
#' @param n_cycles_year Number of cycles per year
#' @param d_c Discount factor for costs
#' @param d_e Discount factor for effectiveness
#' @param index_pce Personal consumption expenditures (PCE) price index
#' @param p_CDX2neg Proportion of CDX2-negative patients
#' @param r_DieMets Cancer mortality rate
#' @param r_RecurCDX2pos Rate of recurrence in CDX2 positive patients
#' @param hr_RecurCDX2neg Hazard ratio of recurrence in CDX2 negative vs
#' positive patients
#' @param hr_Recurr_CDXneg_Rx Hazard ratio for disease recurrence among patients
#' with CDX2-negative under chemotherapy versus CDX2-negative patients without
#' chemotherapy.
#' @param hr_Recurr_CDXpos_Rx Hazard ratio for disease recurrence among patients
#' with CDX2-positive under chemotherapy versus CDX2-positive patients without
#' chemotherapy.
#' @param p_Mets Proportion of recurrence being metastatic
#' @param c_Chemo Cost of chemotherapy
#' @param c_ChemoAdmin Cost of chemotherapy administration
#' @param c_CRCStg2_init Initial costs in CRC Stage II (minus chemo and
#' chemotherapy administration)
#' @param c_CRCStg2_cont Continuing costs in CRC Stage II
#' @param c_CRCStg4_cont Continuing costs in CRC Stage IV
#' @param ic_DeathCRCStg2 Increase in cost when dying from cancer while in
#' Stage II
#' @param ic_DeathOCStg2 Increase in cost when dying from Other Causes (OC)
#' while in Stage II
#' @param c_Test Cost of IHC staining
#' @param u_Stg2 Utility for CRC Stage II patients
#' @param u_Stg2Chemo Utility for CRC Stage II patients under chemotherapy
#' @param u_Mets Utility for metastatic recurrence state
#' @return
#' List of all parameters
#' @export
load_params_init <- function(
# Initial and final ages
n_age_init = 65,
n_age_max = 100,
# Number of cycles per year
n_cycles_year = 12,
# Discount factors
d_c = 0.03,
d_e = 0.03,
# Personal consumption expenditures (PCE) price index to inflate cancer costs
index_pce = 0.018,
# Proportion of CDX2-negative patients obtained from Step 3 of Figure 1 in page 213
p_CDX2neg = 0.07174887892376682, # (23+25)/((23+25) + (389+232))
# Proportion of recurrence being metastatic (CALIBRATED)
p_Mets = 0.980840626,
# Cancer mortality rate (CALIBRATED)
r_DieMets = 0.03870286,
# Rate of recurrence in CDX2 positive patients (CALIBRATED)
r_RecurCDX2pos = 0.003328773,
# Hazard ratio of recurrence in CDX2 negative vs positive patients (CALIBRATED)
hr_RecurCDX2neg = 3.601069078,
# # Hazard ratio for disease recurrence among patients with CDX2-negative
# # under chemo versus CDX2-negative patients without chemotherapy. From:
# # André et al. JCO 2015 Table 1, Stage III DFS: 0.79 [0.67, 0.94]
# hr_Recurr_CDXneg_Rx = 0.79,
## Hazard ratio for disease recurrence among patients with CDX2-negative
# under chemo versus CDX2-negative patients without chemotherapy. From:
# QUASAR. Lancet 2007 Figure 3, Stage II RR [99% CI]: 0.82 [0.63, 1.08]
hr_Recurr_CDXneg_Rx = 0.82,
# Hazard ratio for disease recurrence among patients with CDX2-positive
# under chemo versus CDX2-positive patients without chemotherapy. From: [TO BE ADDED]
hr_Recurr_CDXpos_Rx = 1.00,
### State rewards
## Costs
# Cost of chemotherapy
c_Chemo = 1576,
# Cost of chemotherapy administration
c_ChemoAdmin = 315,
# Initial costs in CRC Stage II (minus chemo and chemo admin) in 2004 USD
c_CRCStg2_init = (32039 - (1391+315)),
# Continuing costs in CRC Stage II in 2004 USD
c_CRCStg2_cont = 1722,
# Continuing costs in CRC Stage IV in 2004 USD
c_CRCStg4_cont = 7629,
# Increase in cost when dying from cancer while in Stage II in 2004 USD
ic_DeathCRCStg2 = 41500,
# Increase in cost when dying from Other Causes (OC) while in Stage II in 2004 USD
ic_DeathOCStg2 = 8969,
# Cost of IHC staining
c_Test = 112,
## Utilities
u_Stg2 = 0.74, # Ness 1999, Outcome state "A" from table 3
u_Stg2Chemo = 0.67, # Ness 1999, Outcome state "BC" from table 4
u_Mets = 0.25 # Ness 1999, Outcome state "FG" from table 3
){
# Number of cycles
n_cycles <- (n_age_max - n_age_init)*n_cycles_year # Time horizon, number of monthly cycles
# Inflation factor based on PCE from 2004 USD to 2020 USD
inf_pce <- (1 + index_pce)^16
# Inflate costs
c_Chemo <- c_Chemo*(1 + index_pce)^2 # Cost of chemotherapy
c_ChemoAdmin <- c_ChemoAdmin*(1 + index_pce)^2 # Cost of chemotherapy administration
c_CRCStg2_init <- (c_CRCStg2_init*inf_pce)/n_cycles_year # Initial costs in CRC Stage II (minus chemo and chemo admin) inflated from 2004 USD to 2018 USD using price index from PCE
c_CRCStg2_cont <- (c_CRCStg2_cont*inf_pce)/n_cycles_year # Continuing costs in CRC Stage II inflated from 2004 USD to 2018 USD using price index from PCE
c_CRCStg4_cont <- (c_CRCStg4_cont*inf_pce)/n_cycles_year # Continuing costs in CRC Stage IV inflated from 2004 USD to 2018 USD using price index from PCE
ic_DeathCRCStg2 <- ic_DeathCRCStg2*inf_pce # 92851, # Increase in cost when dying from cancer while in Stage II inflated from 2004 USD to 2018 USD using price index from PCE
ic_DeathOCStg2 <- ic_DeathOCStg2*inf_pce # Increase in cost when dying from Other Causes (OC) while in Stage II inflated from 2004 USD to 2018 USD
c_Test <- c_Test*(1 + index_pce)^2 # Cost of IHC staining
### Create list of initial parameters
l_params_init <- list(
# Initial and final ages
n_age_init = n_age_init,
n_age_max = n_age_max,
# Number of cycles
n_cycles = n_cycles,
# Inflation factor based on PCE
inf_pce = inf_pce,
# Number of cycles per year
n_cycles_year = n_cycles_year,
# Discount factors
d_c = d_c,
d_e = d_e,
# Personal consumption expenditures (PCE) price index
index_pce = index_pce,
# Disease parameters
p_CDX2neg = p_CDX2neg,
r_DieMets = r_DieMets,
r_RecurCDX2pos = r_RecurCDX2pos,
hr_RecurCDX2neg = hr_RecurCDX2neg,
hr_Recurr_CDXneg_Rx = hr_Recurr_CDXneg_Rx,
hr_Recurr_CDXpos_Rx = hr_Recurr_CDXpos_Rx,
p_Mets = p_Mets,
# Costs
c_Chemo = c_Chemo,
c_ChemoAdmin = c_ChemoAdmin,
c_CRCStg2_init = c_CRCStg2_init,
c_CRCStg2_cont = c_CRCStg2_cont,
c_CRCStg4_cont = c_CRCStg4_cont,
ic_DeathCRCStg2 = ic_DeathCRCStg2,
ic_DeathOCStg2 = ic_DeathOCStg2,
c_Test = c_Test,
# Utilities
u_Stg2 = u_Stg2,
u_Stg2Chemo = u_Stg2Chemo,
u_Mets = u_Mets
)
return(l_params_init)
}
#' Load mortality data
#'
#' \code{load_mort_data} is used to load age-specific mortality from .csv file
#' into vector.
#'
#' @param file String with the location and name of the file with mortality
#' data. If \code{NULL}, \code{v_r_mort_by_age} will be used as default
#' @return
#' A vector with mortality by age.
#' @export
load_mort_data <- function(file = NULL){
# Load mortality data from file
if(!is.null(file)) {
df_r_mort_by_age <- read.csv(file = file)}
else{
df_r_mort_by_age <- all_cause_mortality
}
# Vector with mortality rates
v_r_mort_by_age <- dplyr::select(df_r_mort_by_age,
.data$Age, .data$Total)
return(v_r_mort_by_age)
}
#' Load all parameters
#'
#' \code{load_all_params} loads all parameters for the decision model from multiple sources and creates a list.
#'
#' @param file.init String with the location and name of the file with initial set of parameters
#' @param file.mort String with the location and name of the file with mortality data
#' @return
#' A list of all parameters used for the decision model.
#' @export
load_all_params <- function(l_params_init = NULL,
file_init = NULL,
file_mort = NULL){ # User defined
#### Load initial set of initial parameters from .csv file ####
if(is.null(l_params_init)) {
l_params_init <- load_params_init()
} else{
l_params_init <- l_params_init
}
#### All-cause age-specific mortality from .csv file ####
v_r_mort_by_age <- load_mort_data(file = file_mort)
l_params_all <- with(as.list(l_params_init), {
#### General setup ####
v_names_str <- c("No Treat", "Test & treat")# CEA strategies
n_str <- length(v_names_str) # Number of strategies
v_age_names <- paste(rep(n_age_init:(n_age_max-1), each = n_cycles_year),
1:n_cycles_year,
sep = ".")
# Vector with the 6 health states of the model
v_names_states <- c("CDX2pos", "CDX2neg",
"Mets", "Dead_OC", "Dead_C")
n_states <- length(v_names_states) # number of health states
# Within-cycle correction (WCC) using Simpson's 1/3 rule
v_wcc <- darthtools::gen_wcc(n_cycles = n_cycles,
# method = "Simpson1/3") # vector of wcc
method = "half-cycle") # vector of wcc
# Filter for selected ages
v_r_mort_by_age <- v_r_mort_by_age %>%
filter(Age >= n_age_init & Age < n_age_max) %>%
dplyr::select(Total) %>%
as.matrix()
# Compute monthly mortality rates
v_r_mort_by_age_month <- rep(v_r_mort_by_age,
each = n_cycles_year)/n_cycles_year
#### Create list with all parameters ####
l_params_all <- list(
v_names_str = v_names_str,
n_str = n_str,
n_age_init = n_age_init,
n_cycles = n_cycles,
v_age_names = v_age_names,
v_names_states = v_names_states,
n_states = n_states,
v_r_mort_by_age_month = v_r_mort_by_age_month,
v_wcc = v_wcc
)
return(l_params_all)
}
)
l_params_all <- c(l_params_all,
l_params_init) # Add initial set of parameters
return(l_params_all)
}
#' Update parameters
#'
#' \code{update_param_list} is used to update list of all parameters with new
#' values for specific parameters.
#'
#' @param l_params_all List with all parameters of decision model
#' @param params_updated Parameters for which values need to be updated
#' @return
#' A list with all parameters updated.
#' @export
update_param_list <- function(l_params_all, params_updated){
if (typeof(params_updated)!="list"){
params_updated <- split(unname(params_updated),names(params_updated)) #converte the named vector to a list
}
l_params_all <- modifyList(l_params_all, params_updated) #update the values
return(l_params_all)
}
|
86864437e527d5fdf152d742d0c932a358cfbf37
|
a71b7fe35d652d86f136823cd1801eb51d902839
|
/highway.R
|
31bc91b6244b92d95152d38a429aefcc6116d6c6
|
[] |
no_license
|
StaThin/data
|
9efd602022db768b927c3338e5ce7483f57e3469
|
d7f6c6b5d4df140527c269b032bb3b0be45ceeeb
|
refs/heads/master
| 2023-03-29T18:40:09.694794
| 2023-03-15T09:32:42
| 2023-03-15T09:32:42
| 29,299,462
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,637
|
r
|
highway.R
|
"highway" <-
structure(list(inc = c(4.58, 2.86, 3.02, 2.29, 1.61, 6.87, 3.85,
6.12, 3.29, 5.88, 4.2, 4.61, 4.8, 3.85, 2.69, 1.99, 2.01, 4.22,
2.76, 2.55, 1.89, 2.34, 2.83, 1.81, 9.23, 8.6, 8.21, 2.93, 7.48,
2.57, 5.77, 2.9, 2.97, 1.84, 3.78, 2.76, 4.27, 3.05, 4.12), lun = c(4.99,
16.11, 9.75, 10.65, 20.01, 5.97, 8.57, 5.24, 15.79, 8.26, 7.03,
13.28, 5.4, 2.96, 11.75, 8.86, 9.78, 5.49, 8.63, 20.31, 40.09,
11.81, 11.39, 22, 3.58, 3.23, 7.73, 14.41, 11.54, 11.1, 22.09,
9.39, 19.49, 21.01, 27.16, 14.03, 20.63, 20.06, 12.91), traf = c(69,
73, 49, 61, 28, 30, 46, 25, 43, 23, 23, 20, 18, 21, 27, 22, 19,
9, 12, 12, 15, 8, 5, 5, 23, 13, 7, 10, 12, 9, 4, 5, 4, 5, 2,
3, 1, 3, 1), camion = c(8, 8, 10, 13, 12, 6, 8, 9, 12, 7, 6,
9, 14, 8, 7, 9, 9, 11, 8, 7, 13, 8, 9, 15, 6, 6, 8, 10, 7, 8,
8, 10, 13, 12, 10, 8, 11, 11, 10), velo = c(55, 60, 60, 65, 70,
55, 55, 55, 50, 50, 60, 50, 50, 60, 55, 60, 60, 50, 55, 60, 55,
60, 50, 60, 40, 45, 55, 55, 45, 60, 45, 55, 55, 55, 55, 50, 55,
60, 55), lar = c(12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 11, 13, 12, 10, 12, 12, 11, 12, 12), lar0 = c(10,
10, 10, 10, 10, 10, 8, 10, 4, 5, 10, 2, 8, 10, 10, 10, 10, 6,
6, 10, 8, 10, 8, 7, 2, 2, 8, 6, 3, 7, 3, 1, 4, 8, 3, 4, 4, 8,
3), super = c(1.2, 1.43, 1.54, 0.94, 0.65, 0.34, 0.47, 0.38,
0.95, 0.12, 0.29, 0.15, 0, 0.34, 0.26, 0.68, 0.2, 0.18, 0.14,
0.05, 0.05, 0, 0, 0, 0.56, 0.31, 0.13, 0, 0.09, 0, 0, 0, 0, 0,
0.04, 0.07, 0, 0, 0), svin = c(0, 0, 0, 0, 0, 1.84, 0.7, 0.38,
1.39, 1.21, 1.85, 1.21, 0.56, 0, 0.6, 0, 0.1, 0.18, 0, 0.99,
0.12, 0, 0.09, 0, 2.51, 0.93, 0.52, 0.07, 0.09, 0, 0.14, 0, 0,
0.1, 0.04, 0, 0, 0, 0), accessi = c(4.6, 4.4, 4.7, 3.8, 2.2,
24.8, 11, 18.5, 7.5, 8.2, 5.4, 11.2, 15.2, 5.4, 7.9, 3.2, 11,
8.9, 12.4, 7.8, 9.6, 4.3, 11.1, 6.8, 53, 17.3, 27.3, 18, 30.2,
10.3, 18.2, 12.3, 7.1, 14, 11.3, 16.3, 9.6, 9, 10.4), corsie = c(8,
4, 4, 6, 4, 4, 4, 4, 4, 4, 4, 4, 2, 4, 4, 4, 4, 2, 2, 4, 4, 2,
2, 2, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2), tipo = structure(c(1,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4), .Label = c("a",
"b", "c", "d"), class = "factor")), .Names = c("inc", "lun",
"traf", "camion", "velo", "lar", "lar0", "super", "svin", "accessi",
"corsie", "tipo"), class = "data.frame", row.names = c("1", "2",
"3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14",
"15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25",
"26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36",
"37", "38", "39"))
|
bb25bdaec49128c35fa2634460dd58577a19d3b7
|
c353229e39ed2709bcc73fc918269ed88a2588ce
|
/tests/testthat/test-annoplot_accessors.R
|
11ebe3e31ff01740e44447630bee054a2ed8ea54
|
[] |
no_license
|
amytildazhang/annoplots
|
282236f21fd848fce2e643a147c52201de970f55
|
865301c0d8a35e85dac6a33f347c0f29f0bce2ba
|
refs/heads/main
| 2023-08-14T20:22:35.773416
| 2021-09-13T23:27:09
| 2021-09-13T23:27:09
| 406,158,736
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,207
|
r
|
test-annoplot_accessors.R
|
source("../../data-raw/test-objects.R")
test_that("Can access base plots", {
expect_silent(ap_plots(test_ap))
expect_silent(ap_plots(test_apfade))
ps <- ap_plots(test_ap)[[1]]
expect_s3_class(ps, "gg")
})
test_that("'highlight' returns plots", {
# expect_silent(ap_highlight(test_ap, 1:10)) # currently not silent becuase of join in filter
ps <- ap_highlight(test_ap, 1:10)[[1]]
expect_s3_class(ps, "gg")
})
test_that("Can access public values", {
for (valname in c("app_df", "plots", "annotype", "annotation_function",
"nano_plots", "data", "filter_points", "hover_cols", "id_col",
"mutate", "n_plots", "plot_dims")) {
expect_silent(ap_get(test_ap, valname))
}
})
#
#
# test_that("Can set public values", {
#
#
# vals <- list("app_df" = radon, "plots" = list(p_radon),
# "annotation_function" = )
# for (valname in c("app_df", "plots", "annotype", "annotation_function",
# "nano_plots", "data", "filter_points", "hover_cols", "id_col",
# "mutate", "n_plots", "plot_dims")) {
# expect_silent(ap_get(test_ap, valname))
# }
#
# })
|
1df40dd89396ad0990a07f85a7a511ccd150c10c
|
9cb71c08fb66a2b5bd6d24b50737392e33663e78
|
/scripts/data_transform.R
|
21752f4095e89f43a6aa73c874c38a5b7f801827
|
[] |
no_license
|
lulzzz/agro
|
d49562a6af10bd61f14952e4011d195c7f77b1a2
|
b6c88bf052a68daeb8fc7423221b61190ab4d015
|
refs/heads/master
| 2021-01-21T10:46:18.846667
| 2015-10-13T20:46:06
| 2015-10-13T20:46:06
| 83,484,832
| 4
| 1
| null | 2017-02-28T22:16:58
| 2017-02-28T22:16:58
| null |
UTF-8
|
R
| false
| false
| 144
|
r
|
data_transform.R
|
require(tidyr)
# creating ndvi avrage value time series for all fields
ndvi_avg <- spread(veg_sample[, c(1,2,4)], key = date, value = ndvi_avg)
|
e256bea79c599dde12b22a8c136387bb8885edc0
|
7584c4b6119cf7985b1ea152f03de0a2619fe13b
|
/man/root.Rd
|
4a9bad5e1dbc92fd4efc83756dc65db58db8c987
|
[] |
no_license
|
blueraleigh/macroevolution
|
2e380d14d91c7312d6ce1298d808f32b4b1becbd
|
bfa0644f4941940d7812106914add06fd5540656
|
refs/heads/master
| 2021-12-09T20:12:57.472284
| 2021-11-10T22:43:14
| 2021-11-10T22:43:14
| 213,418,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 283
|
rd
|
root.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/treeio.R
\name{root}
\alias{root}
\title{Root node index}
\usage{
root(phy)
}
\arguments{
\item{phy}{An object of class \code{tree}.}
}
\value{
The index of the root node
}
\description{
Root node index
}
|
3338eddbc3fe37e9eeda5ce594af65a3cdaeb3b6
|
3f4cedffbce92b6bb385b5c6b597531d5b1b868d
|
/R/merge_duplicate_alerts.R
|
dabc3d69e3fd04dfb2cb2fa6ae7aa4c488eacf86
|
[
"MIT"
] |
permissive
|
theagent/promedr
|
32f35896039e46b7cded6bec9b24d3c0b65d4522
|
0782feab24307774327e1967f513a0abcf63c520
|
refs/heads/master
| 2021-04-21T07:08:29.291426
| 2020-03-09T15:40:53
| 2020-03-09T15:40:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,450
|
r
|
merge_duplicate_alerts.R
|
##' Merge rows with duplicate alerts
##'
##' Each record in ProMED and HealthMap data feeds is (in principle)
##' associated with
##' a unique alert-id. Occasionally, we get multiple rows that have
##' the same alert-id. In such instances, we want to merge these rows
##' into a single row in a meaningful way. For the meta-data associated
##' with the records e.g., the URL, it would be useful to retain all
##' of them, especially if these columns are not being used in the
##' analysis downstream. For others, e.g., the longitude and latitude,
##' we expect them to be the same across the set of records, but if they
##' are not, we want to retain one of them. Finally, for numeric columns
##' (particularly cases) we want a summary statistic like median or
##' mean.
##' This function merges the records with the user picking which
##' columns should be merged in which way i.e., whether all values
##' or only one of them should be retained.
##' The only exception is the column called cases, which is always
##' summarised using a mathematical function specified by the arg rule.
##'
##' @param df data frame containing duplicate alerts. Must contain a
##' column called cases. All columns except cases will be merged by
##' collpasing their content into a single string for each column.
##' The column cases will be merged accrding to the rule argument.
##' E.g., median will return the median of cases.
##' @param keep_all character vector. Names of columns for which values
##' in all rows should be retained.
##' @param keep_first character vector. Names of columns for which values
##' for which only the first value should be retained.
##' @param use_rule columns that should be summarised using rule.
##' These should all be numeric.
##' @param rule any valid R function that accepts a numeric vector
##' and returns a number. Defaults to median
##' @param sep separator used to paste multiple values
##' from a column
##' @return data.frame with a single row
##' @author Sangeeta Bhatia
##' @examples ## Made-up data
##' made_up <- data.frame(
##' country = rep("singapore", 3),
##' cases = c(3, 7, 9),
##' alert_id = rep(letters[1], 3),
##' longitude = c(103.8, 103.8, 103.8),
##' latitude = c(1.4, 1.5, 1.4)
##' )
##' ##Alert-ids in this data.frame are duplicated. Merging the rows then
##' merged <- merge_duplicate_alerts(
##' made_up,
##' keep_all = c("country", "alert_id"),
##' keep_first = c("longitude", "latitude"))
##' @importFrom stats median
##' @export
merge_duplicate_alerts <- function(df,
keep_all,
keep_first,
use_rule = c("cases"),
rule = stats::median,
sep = " / ") {
all_cols <- colnames(df)
## Check that at least one column has duplicated values across
## all rows, else there would be little point in merging.
unique_vals <- sapply(all_cols, function(x) length(unique(df[[x]])))
if (! any(unique_vals == 1)) {
masg <- "None of the columns in the data have identical values
across rows. Will merge anyway but check that you
really want to merge rows."
warning(msg)
}
missing <- which(! all_cols %in% c(keep_all, keep_first, use_rule))
if (length(missing) > 0) {
msg <- "A merging rule should be specified for all columns."
msg <- paste(msg, "No rule specified for following columns: ")
msg <- paste(msg, all_cols[missing], " Defaults to keep_first.")
warning(msg)
}
## cases really should be dealt separately. Issue warning if user
## specifies keep_first or keep_all for cases.
if ("cases" %in% keep_first) {
msg <- "You have chosen to retain only the first value in cases"
warning(msg)
}
## This is an error since cases will no longer be numeric and will
## cause problem in downstream analysis.
if ("cases" %in% keep_all) {
msg <- "You have chosen to retain all values in column cases."
msg <- paste(
msg, "This will make column cases non-numeric."
)
stop(msg, call. = FALSE)
}
common <- intersect(keep_first, keep_all)
if (length(common) > 0) {
msg <- paste(
"Columns", common, "are in both keep_first and keep_all."
)
msg <- paste(
msg, "Only first value will be retained for these columns."
)
warning(msg)
}
are_numeric <- sapply(
use_rule, function(x) is.numeric(df[[x]])
)
if (! all(are_numeric)) {
msg <- "All columns specified using use_rule should be numeric."
msg <- paste(
msg, "Not numeric ", use_rule[! are_numeric]
)
stop(msg, call. = FALSE)
}
##template for output
out <- df[1, ]
for (column in keep_all) {
out[[column]] <- paste(
df[[column]], sep = sep, collapse = sep
)
}
for (column in keep_first) {
vals <- unique(out[[column]])
if (length(vals) > 1) {
msg <- "Not all values in"
msg <- paste(column, "are identical. Retaining only first")
warning(msg)
}
out[[column]] <- df[[column]][1]
}
for (column in use_rule) {
out[[column]] <- rule(df[[column]], na.rm = TRUE)
}
out
}
|
59a50a2878356936a3542945bc922573444a008c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gsscopu/examples/sscopu.Rd.R
|
cb29e86f4fd8a3c25a8b4abc0718fdc0285d04f7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 500
|
r
|
sscopu.Rd.R
|
library(gsscopu)
### Name: sscopu
### Title: Estimating Copula Density Using Smoothing Splines
### Aliases: sscopu sscopu2
### Keywords: smooth models distribution
### ** Examples
## simulate 2-D data
x <- matrix(runif(200),100,2)
## fit copula density
fit <- sscopu(x)
## "same fit"
fit2 <- sscopu2(x,id=fit$id)
## symmetric fit
fit.s <- sscopu(x,sym=TRUE,id=fit$id)
## Kendall's tau and Spearman's rho
summary(fit); summary(fit2); summary(fit.s)
## clean up
## Not run: rm(x,fit,fit2,fit.s)
|
c73398882972c25dda137584f4a896b05e359552
|
809619e09165bb59d4b068eb8bad833d0a30c411
|
/man/result_inspector.Rd
|
bd190d1c93d6dbe056aaa789a422520858fd9e69
|
[] |
no_license
|
cran/GWASinspector
|
2910c12799e24c0c7e9f34df871f7d19c658c36a
|
5fabba85bf8d9ce8eb30c51344be4cb4a59489fe
|
refs/heads/master
| 2023-05-24T16:53:12.048188
| 2023-05-15T17:30:02
| 2023-05-15T17:30:02
| 236,609,635
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 725
|
rd
|
result_inspector.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/result_inspector.R
\name{result_inspector}
\alias{result_inspector}
\title{Displays a brief report after running the QC pipeline}
\usage{
result_inspector(inspector)
}
\arguments{
\item{inspector}{An instance of \linkS4class{Inspector} class. Check \code{\link{setup_inspector}} for more details.}
}
\value{
A data.table containing a brief report about the results.
}
\description{
This function displays a brief report about the results of running the Inspector algorithm on a set of GWAS result files.
The full report including plots, cleaned files and summary statistics are generated and saved in the output folder during the algorithm run.
}
|
403c8f4edd725e165f551acea9cfc30676aa6d4c
|
04f349102910e5052ea34d3e7744e4d79a2fbb4f
|
/R/cof_lv_ugb.R
|
25c5a488fd803da9423350a23e9fa4f1262c26fa
|
[
"MIT"
] |
permissive
|
scoultersdcoe/CNAIM
|
f0728b00f0d0628e554975c78d767ee2c472fb3b
|
5c77ce4c50ef92fd05b9bb44b33fdca18302d020
|
refs/heads/master
| 2023-08-23T22:54:59.450292
| 2021-03-12T15:52:54
| 2021-03-12T15:52:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,973
|
r
|
cof_lv_ugb.R
|
#' @title Financial cost of Failure for LV UGB
#' @description This function calculates financial consequences of failure
#' (cf. section 7.3, page 75, CNAIM, 2017). Financial consequences
#' of failure is used in
#' the derivation of consequences of failure see \code{\link{cof}}().
#' @param lv_asset_category String The type of LV asset category
#' @return Numeric. Financial consequences of failure for LV UGB
#' @source DNO Common Network Asset Indices Methodology (CNAIM),
#' Health & Criticality - Version 1.1, 2017:
#' \url{https://www.ofgem.gov.uk/system/files/docs/2017/05/dno_common_network_asset_indices_methodology_v1.1.pdf}
#' @export
#' @examples
#' financial_cof_lv_ugb(lv_asset_category = "LV UGB")
financial_cof_lv_ugb <- function(lv_asset_category){
`Asset Register Category` = `Health Index Asset Category` = `Asset Category` =
`Type Financial Factor Criteria` = NULL
asset_category <- gb_ref$categorisation_of_assets %>%
dplyr::filter(`Asset Register Category` == lv_asset_category) %>%
dplyr::select(`Health Index Asset Category`) %>% dplyr::pull()
# Reference cost of failure table 16 --------------------------------------
reference_costs_of_failure_tf <- dplyr::filter(gb_ref$reference_costs_of_failure,
`Asset Register Category` ==
lv_asset_category)
# Reference financial cost of failure -------------------------------------
fcost <- reference_costs_of_failure_tf$`Financial - (GBP)`
# Type financial factor ---------------------------------------------------
type_financial_factor <- 1
# Access financial factor -------------------------------------------------
access_financial_factor <- 1
# Financial consequences factor -------------------------------------------
fc_factor <- type_financial_factor * access_financial_factor
# Financial consequences of failure ---------------------------------------
return(fc_factor * fcost)
}
#' @title Safety cost of Failure for LV UGB
#' @description This function calculates safety consequences of failure
#' (cf. section 7.3, page 75, CNAIM, 2017). Safety consequences
#' of failure is used in
#' the derivation of consequences of failure see \code{\link{cof}}().
#' @param lv_asset_category String The type of LV asset category
#' @param location_risk String Type Financial factor criteria for LV UGB
#' (cf. section D1.2.1, page 162, CNAIM, 2017).
#' @param type_risk String. Asses Financial factor criteria for LV UGB
#' setting (cf. table 214, page 164, CNAIM, 2017).
#' @return Numeric. Financial consequences of failure for LV UGB
#' @source DNO Common Network Asset Indices Methodology (CNAIM),
#' Health & Criticality - Version 1.1, 2017:
#' \url{https://www.ofgem.gov.uk/system/files/docs/2017/05/dno_common_network_asset_indices_methodology_v1.1.pdf}
#' @export
#' @examples
#' safety_cof_lv_ugb(lv_asset_category = "LV UGB", location_risk = "Default", type_risk = "Default")
safety_cof_lv_ugb <- function(lv_asset_category,
location_risk,
type_risk){
`Asset Register Category` = `Health Index Asset Category` = `Asset Category` = NULL
asset_category <- gb_ref$categorisation_of_assets %>%
dplyr::filter(`Asset Register Category` == lv_asset_category) %>%
dplyr::select(`Health Index Asset Category`) %>% dplyr::pull()
reference_costs_of_failure_tf <- dplyr::filter(gb_ref$reference_costs_of_failure,
`Asset Register Category` ==
lv_asset_category)
# Reference financial cost of failure -------------------------------------
scost <- reference_costs_of_failure_tf$`Safety - (GBP)`
if (location_risk == "Default") location_risk <- "Medium (Default)"
if (location_risk == "Medium") location_risk <- "Medium (Default)"
if (type_risk == "Default") type_risk <- "Medium"
safety_conseq_factor_sg_tf_oh <- gb_ref$safety_conseq_factor_sg_tf_oh
row_no <- which(safety_conseq_factor_sg_tf_oh$
`Safety Consequence Factor - Switchgear, Transformers & Overhead Lines...2` ==
location_risk)
col_no <- grep(type_risk, colnames(safety_conseq_factor_sg_tf_oh))
safety_consequence_factor <- safety_conseq_factor_sg_tf_oh[row_no, col_no]
# Safety consequence of failure -------------------------------------------
safety_cof <- safety_consequence_factor * scost
return(safety_cof)
}
#' @title Environmental cost of Failure for LV UGB
#' @description This function calculates environmental consequences of failure
#' (cf. section 7.3, page 75, CNAIM, 2017). Environmental consequences
#' of failure is used in
#' the derivation of consequences of failure see \code{\link{cof}}().#' @return Numeric. Financial consequences of failure for LV UGB
#' @param lv_asset_category String The type of LV asset category
#' @return Numeric. Environmental consequences of failure for LV UGB
#' @source DNO Common Network Asset Indices Methodology (CNAIM),
#' Health & Criticality - Version 1.1, 2017:
#' \url{https://www.ofgem.gov.uk/system/files/docs/2017/05/dno_common_network_asset_indices_methodology_v1.1.pdf}
#' @export
#' @examples
#' environmental_cof_lv_ugb(lv_asset_category = "LV UGB")
environmental_cof_lv_ugb <- function(lv_asset_category){
`Asset Register Category` = `Health Index Asset Category` = `Asset Category` = NULL
asset_category <- gb_ref$categorisation_of_assets %>%
dplyr::filter(`Asset Register Category` == lv_asset_category) %>%
dplyr::select(`Health Index Asset Category`) %>% dplyr::pull()
reference_costs_of_failure_tf <- dplyr::filter(gb_ref$reference_costs_of_failure,
`Asset Register Category` ==
lv_asset_category)
# Reference financial cost of failure -------------------------------------
ecost <- reference_costs_of_failure_tf$`Environmental - (GBP)`
type_environmental_factor <- 1
size_environmental_factor <- 1
location_environmental_factor <- 1
environmental_consequences_factor <- (type_environmental_factor *
size_environmental_factor *
location_environmental_factor)
# Environmental consequences ----------------------------------------------
environmental_cof <- environmental_consequences_factor * ecost
return(environmental_cof)
}
#' @title Network cost of Failure for LV UGB
#' @description This function calculates network cost of failure for
#' all asset categories exclusive the assets EHV and 132kV transformers.
#' (cf. section 7.6, page 83, CNAIM, 2017). Network cost of failure
#' is used in the derivation of consequences of failure see \code{\link{cof}}().
#' @param lv_asset_category String The type of LV asset category
#' @param no_customers Numeric. The numner of customers
#' fed by an individual asset.
#' @param kva_per_customer Numeric. If the asset have an exceptionally high
#' demand per customer type in kVA per customer. A setting of \code{"Default"}
#' results in a multiplication factor of 1 (cf. table 18, page 86, CNAIM, 2017).
#' @return Numeric. Network cost of failure.
#' @source DNO Common Network Asset Indices Methodology (CNAIM),
#' Health & Criticality - Version 1.1, 2017:
#' \url{https://www.ofgem.gov.uk/system/files/docs/2017/05/dno_common_network_asset_indices_methodology_v1.1.pdf}
#' @export
#' @examples
#' network_cof_lv_ugb(lv_asset_category = "LV UGB",
#' no_customers = 750, kva_per_customer = 51)
network_cof_lv_ugb <- function(lv_asset_category,
no_customers,
kva_per_customer = "Default") {
`Asset Register Category` = `Health Index Asset Category` = `Asset Category` = NULL
reference_costs_of_failure_tf <- dplyr::filter(gb_ref$reference_costs_of_failure,
`Asset Register Category` ==
lv_asset_category)
# Reference financial cost of failure -------------------------------------
ncost <- reference_costs_of_failure_tf$`Network Performance - (GBP)`
# Customer factor ---------------------------------------------------------
ref_nw_perf_cost_fail_lv_hv <- gb_ref$ref_nw_perf_cost_fail_lv_hv
ref_nw_perf_cost_fail_lv_hv_tf <- dplyr::filter(ref_nw_perf_cost_fail_lv_hv,
`Asset Category` ==
lv_asset_category)
ref_no_cust <-
ref_nw_perf_cost_fail_lv_hv_tf$`Reference Number of Connected Customers`
customer_no_adjust_lv_hv_asset <- gb_ref$customer_no_adjust_lv_hv_asset
for (n in 1:nrow(customer_no_adjust_lv_hv_asset)){
if (kva_per_customer == 'Default'){
adj_cust_no <- 1
break
} else if (kva_per_customer >= as.numeric(
customer_no_adjust_lv_hv_asset$Lower[n]) &
kva_per_customer < as.numeric(
customer_no_adjust_lv_hv_asset$Upper[n])){
adj_cust_no <-
customer_no_adjust_lv_hv_asset$
`No. of Customers to be used in the derivation of Customer Factor`[n]
break
}
}
adj_cust_no <-
adj_cust_no %>% stringr::str_match_all("[0-9]+") %>% unlist %>% as.numeric
customer_factor <- (adj_cust_no * no_customers) / ref_no_cust
# Customer sensitivity factor ---------------------------------------------
customer_sensitivity_factor <- 1 # See section 7.6.2.2, p. 86 in CNAIM (2017)
# Network perfomance consequence factor -----------------------------------
network_performance_consequence_factor <- customer_factor *
customer_sensitivity_factor
# Network performance cost of failure -------------------------------------
network_cof <- network_performance_consequence_factor * ncost
return(network_cof)
}
|
3609108c3e61f8cd0b420d4932797bfb31fdb138
|
55cd81bfa7426eab1472c7bda8a98552b16941f3
|
/R/helpers.R
|
31c90735f62cc22277dc1b295a1130b78afb5f32
|
[] |
no_license
|
vonshick/ETLtool
|
d42e082ffca39286866efa7f667bf83a133a9e07
|
a68a9639f2bcb15e1aca6bbb08662e879cea0dfc
|
refs/heads/master
| 2020-08-07T15:17:15.195025
| 2019-10-08T09:58:24
| 2019-10-08T09:58:24
| 213,503,334
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,334
|
r
|
helpers.R
|
#' @importFrom purrr map2
#' @importFrom dplyr bind_rows %>%
drop_sublists <- function(list) {
list_without_sublists <- list()
names <- names(list)
map2(list, names, function(element, name) {
if (!is.list(element)) {
list_without_sublists[[name]] <<- element
}
})
list_without_sublists %>%
bind_rows() %>%
return()
}
rename_columns <- function(data, new_part) {
names(data) <- paste(names(data), new_part, sep = "_")
return(data)
}
#' @importFrom dplyr %>%
#' @export
get_header <- function(data, new_part) {
data %>%
drop_sublists() %>%
rename_columns(new_part) %>%
return()
}
#' @importFrom tibble tibble
#' @importFrom dplyr filter pull %>%
#' @importFrom stringr str_ends
#' @export
get_json_files_paths <- function() {
tibble(files = list.files(path = Sys.getenv("JSON_DIRECTORY"), full.names = TRUE)) %>%
filter(str_ends(files, ".json")) %>%
pull(files) %>%
return()
}
#' @importFrom RPostgres Postgres dbConnect
create_database_connection <- function() {
connection <- dbConnect(
Postgres(),
host = Sys.getenv("DB_HOST"),
user = Sys.getenv("DB_USER"),
password = Sys.getenv("DB_PASSWORD"),
dbname = Sys.getenv("DB_NAME"),
port = Sys.getenv("DB_PORT"),
options = paste("-c search_path=", Sys.getenv("DB_SCHEMA"), sep = "")
)
}
|
adf8df5088ffc21641f36a6129ed3d33e18a6e82
|
2a83dfd6f09f9977ba2fd2d97fbb606ebe5494c4
|
/rmbl2019/man/mbl_load_data.Rd
|
83de548d0bb505cdce543e4a7a4fc4916cf39da8
|
[] |
no_license
|
tomsing1/mbl2019
|
c9fa801ecddadd38798b4c60c9c8c6e10152e2fa
|
ff42c461f6a0f8db66ce31f97b9e412dfbd4606c
|
refs/heads/master
| 2020-06-20T05:11:49.935341
| 2019-07-29T05:35:12
| 2019-07-29T05:35:12
| 197,006,213
| 0
| 0
| null | 2019-07-29T05:35:13
| 2019-07-15T13:40:03
|
R
|
UTF-8
|
R
| false
| true
| 523
|
rd
|
mbl_load_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mbl_load_data.R
\name{mbl_load_data}
\alias{mbl_load_data}
\title{Load DGEList with expression data from aws.s3}
\usage{
mbl_load_data(organism = c("mouse", "fly", "fish", "planaria", "worm"),
dataset = c("pre_mbl", "mbl"))
}
\arguments{
\item{organism}{either "mouse", "fly", "planaria", "worm", or "fish"}
\item{dataset}{either "prem_mbl" or "mbl"}
}
\value{
a DGElist object
}
\description{
Load DGEList with expression data from aws.s3
}
|
349b6ea30dbb9636e1c3ce70db0c4c12629e566b
|
200477836bf1e3ec08131092653e46fd26259136
|
/SherryPlot/demo_cellmarker.R
|
4da1dd30f058b79db87ffd6063f03e986579ceee
|
[] |
no_license
|
SherryDong/create_plot_by_R_base
|
0d8b4fc074e40993b977008c0c5f27e8702b24bb
|
22a52779e5ec17a15767ef84024f594d0c3b7459
|
refs/heads/master
| 2023-08-31T05:30:04.611672
| 2023-08-22T01:21:28
| 2023-08-22T01:21:28
| 195,830,801
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,206
|
r
|
demo_cellmarker.R
|
## markers
panglodb <- read.delim('D:/写写文章/GD-Driver/BrainCortexDriver_project/data/PanglaoDB_markers_27_Mar_2020.tsv')
panglodb_brain <- panglodb[which(panglodb$organ=='Brain'&panglodb$gene.type=='protein-coding gene'&panglodb$species!='Mm'&panglodb$official.gene.symbol%in%feature.data$geneSymbol),]
genes_of_interest_marker_panglodb <- data.frame(celltype=panglodb_brain$cell.type,
markers=panglodb_brain$official.gene.symbol,
weight=1)
#### from paper PMID34390642
genes_of_interest_list <- list(
# area
Cortex=c('FOXG1'),Cerebellum=c('ZIC2'),allocortex=c('NRP1'),
SubplateMarkers=c('NR4A2','CRYM','NEFL','SERPINI1'), # SP
SubplateNeurons=c('NR4A2','CRYM','ST18','CDH18'), #
MedialGanglionicEminence=c('LHX6','SST'), # MGE (interneurons)
CaudalGanglionicEminence=c('SP8','NR2F2'), # CGE (interneurons)
PallialSubpallialBoundary=c('MEIS2','ETV1','PAX6'), # PSB (interneurons)
# cell type
RadialGlia=c('SOX9','HES1','SOX2','PAX6','GFAP','VIM','NES','ATP1A2'), # RG
OPC_Oligodendrocyte=c('SOX10','NKX2-2','MBP'), #OPC_Oligo
ExcitatoryNeurons=c('NEUROD2','NEUROD6','RBFOX1'), # eN
MicroGlia=c('AIF1','CCL3','C1QC','CX3CR1','PTPRC'), # MG
IntermediateProgenitorCell=c('EOMES','PPP1R17','NEUROG1'), # IPC
InterNeurons=c('DLX2','GAD2','GAD1'), # IN (cortical interneurons)
EndotheliaCells=c('CLDN5','PECAM1'), # EC
Astrocyte=c('AQP4','APOE','AGT'), #
#
NeuronalIntermediateProgenitorCell=c('EOMES','PPP1R17','PENK','NEUROG1','NEUROD2'), # nIPC
Pericytes=c('FOXC2','PDGFRB'), # Peric
LeptomeningealCells=c('COL1A1','LUM'), # VLMC
RedBloodCells=c('HEMGN'), # RBC
CiliatedEpendymalCells=c('FOXJ1'), #
Astroglia=c('GFAP','HOPX','EGFR','ASCL1','AQP4'),
CorticoGenesis=c('SOX9','EOMES','NEUROD2','DLX2'),
GlutamatergicNeurons=c('NEUROD2','TBR1','BCL11B','CTIP2','SATB2','SLC17A7','VGLUT1'), # GluN
CyclingCells=c('TOP2A','MKI67','CLSPN','AURKA'),
VentricularRadialGlia=c('FBXO32','CTGF','HMGA2'), # vRG
OuterRadialGlia=c('MOXD1','HOPX','FAM107A','MT3'), # oRG
EarlyRadialGlia=c('NPY','FGFR3'), # EarlyRG
LateRadialGlia=c('CD9','GPX3','TNC'), # LateRG
TruncatedRadialGlia=c('CRYAB','NR4A1','FOXJ1'), # tRG
MultipotentGlialProgenitorCells=c('ASCL1','OLIG1','PDGFRA','EGFR'), # mGPC
GABAergicNeurons=c('DLX2'), #
##
StemCell=c("OLIG2","SOX10","NKX2-2","NKX6-2","PAX7","DBX2","EMX1"),
OligodendrocyteProgenitorCells=c("OLIG2","OLIG1","SOX10","SOX9","CSPG4","CNP"), # OPC
MatureOligodendrocyte=c("CNP","CLDN11","MAG","MAL","PLP1","SMARCA4","GEMIN2","CD9","MYT1"),
Myelin=c("CNP","MBP","GALC","CD9","MAG","MOBP","MOG","MAL","PLP1","MYT1"),
Vasculogenesis=c("VEGFA"),
Angiogenesis=c("VEGFA","VEGFB","VEGFC","NRP1","NRP2"),
NeuronStemstage=c("FOXG1","OTX2","DLX2","PAX6","EMX1","HES5","LHX2","EMX2","NKX2-1","SIX3","GSX2","EOMES","MEIS2","DLX1","ISL1","ASCL1"),
Neuronalprecursor=c("SOX2","SOX9","NHLH1","EBF2","NEUROG1","NEUROD4","DCX"),
PostMitoticNeuronalMarker=c("TUBB3","MAP2","MAPT","CUX1","CUX2","SATB2","CDH10","DKK3","TBR1","FEZF2","SST","NPY","PROX1"),
# GSE104276
PFC_Microglia=c('PTPRC','P2RY12'),
PFC_NPCs=c('PAX6','SFPR1'),
PFC_OPCs=c('OLIG1','PDGFRA','COL20A1','PMP2'),
PFC_ExcitatoryNeurons=c('NEUROD2','RBFOX1'),
PFC_InterNeurons=c('GAD1','PDE4DIP'),
PFC_Astrocytes=c('GFAP','AQP4','SLCO1C1'),
# Organoid single-cell genomic atlas uncovers human-specific features of brain development
Organoid_CorticalEN=c('MAP2','ENO2','FOXG1','NEUROD6','NEUROD2','SLC17A7'),
Organoid_MGECGEIN=c('MAP2','ENO2','FOXG1','DLX5','DLX2','DLX1','GAD1','GAD2','SLC32A1'),
Organoid_LGEIN=c('MAP2','ENO2','FOXG1','DLX5','DLX2','GAD2','SLC32A1','ISL1','EBF1'),
Organoid_DiencephalonEN=c('MAP2','ENO2','EBF1','NHLH2','SLC17A6','LHX9','GBX2','SHOX2'),
Organoid_MesencephalonEN=c('MAP2','ENO2','EBF1','NHLH2','SLC17A6','LHX9','TFAP2B'),
Organoid_MesencephalonIN=c('MAP2','ENO2','GAD1','GAD2','SLC32A1','GATA3','OTX2','SOX14'),
# target
CTDNEP1='CTDNEP1',DYRK1A='DYRK1A',ALMS1='ALMS1')
genes_of_interest <- unique(unlist(genes_of_interest_list))
genes_of_interest_marker <- list2df_narrow(genes_of_interest_list)
colnames(genes_of_interest_marker) <-c('celltype','markers')
genes_of_interest_marker$weight <- 1;
## cell marker from LKY
tmp1 <- read.delim('D:/写写文章/GD-Driver/BrainCortexDriver_project/data/cell_marker.txt',
header = F)
tmp2 <- list()
for(i in tmp1$V1){
if(grepl('\\$',i)){ct=i;}else{tmp2[[gsub('\\$(.*)','\\1',ct)]]<-unique(unlist(strsplit(gsub(".*\\] (.*)","\\1",i),',')))}
}
genes_of_interest_marker_LKY <- list2df_narrow(tmp2)
colnames(genes_of_interest_marker_LKY) <-c('celltype','markers')
genes_of_interest_marker_LKY$weight <- 1;
genes_of_interest_marker_LKY$celltype<-gsub('\\`','',genes_of_interest_marker_LKY$celltype)
## cell marker from CY
tmp1 <- read.xlsx('D:/写写文章/GD-Driver/BrainCortexDriver_project/data/Single cell gene list_CY.xlsx')
tmp2 <- list()
for(i in 1:nrow(tmp1)){
if(is.na(tmp1[i,1])==F){ct=gsub('^ ','',tmp1[i,1])}else{
tmp2[[ct]] <- c(tmp2[[ct]],toupper(tmp1[i,2]))
}
}
genes_of_interest_marker_CY <- list2df_narrow(tmp2)
colnames(genes_of_interest_marker_CY) <-c('celltype','markers')
genes_of_interest_marker_CY$weight <- 1;
##
genes_of_interest_marker_panglodb$source <- 'panglodb'
genes_of_interest_marker$source <- 'PMID'
genes_of_interest_marker_LKY$source <- 'LKY'
genes_of_interest_marker_CY$source <- 'CY'
genes_of_interest_marker_combine <- unique(rbind(genes_of_interest_marker_panglodb,
genes_of_interest_marker,
genes_of_interest_marker_LKY,
genes_of_interest_marker_CY))
genes_of_interest_marker_combine$celltype <- sprintf('%s-%s',genes_of_interest_marker_combine$source,
genes_of_interest_marker_combine$celltype)
save(genes_of_interest_marker_combine,file='D:/analysis_eng/SherryPlot/data/genes_of_interest_marker_combine-Brain.RData')
|
40128ab711ff465747e528c41c9ede36d1b8dd85
|
47b1491bdcd0335f6df4cbc750af9b7fcac9c72c
|
/code/functions/format_training.R
|
5a40140e24b7b969cd581856b2504264f71e5f94
|
[
"CC-BY-4.0"
] |
permissive
|
UKRN-Open-Research/survey-reports
|
772c63e508a84cbf4f6c19830a75361a9d42862e
|
701b4d6067df94bafca1fad9b2530b2129958e1c
|
refs/heads/main
| 2023-04-05T03:13:10.905127
| 2021-04-14T15:12:46
| 2021-04-14T15:12:46
| 340,357,501
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,804
|
r
|
format_training.R
|
format_training_familiarity <- function(df, training){
if(training == "Yes"){
# Subset responses of those who want training
df = df %>%
select(OpenAccess, OpenCode, DataSharing, Preprints, Preregistration, Primary, Unit, randomID) %>%
pivot_longer(cols = OpenAccess:Preregistration, names_to = "OR_Area", values_to = "Response") %>%
filter(grepl("I would find training on this useful", Response)) %>%
mutate(Training = "Yes")
# Format text responses by familiarity
df$Response <- replace(df$Response, grep("I already engage with this practice", df$Response), "Already engage")
df$Response <- replace(df$Response, grep("I am familiar with this concept", df$Response), "Familiar")
df$Response <- replace(df$Response, grep("I am NOT familiar with this concept", df$Response), "Not familiar")
df$Response <- replace(df$Response, grep("I would find training on this useful", df$Response), "Unknown familiarity")
return(df)
} else if(training == "No"){
# Subset responses of those who dont mention wanting training
df <- df %>%
select(OpenAccess, OpenCode, DataSharing, Preprints, Preregistration, Primary, Unit, randomID) %>%
pivot_longer(cols = OpenAccess:Preregistration, names_to = "OR_Area", values_to = "Response") %>%
filter(!grepl("I would find training on this useful", Response)) %>%
mutate(Training = "No")
# Format text responses by organisation provides and/or familiarity
df$Response <- replace(df$Response, grep("I already engage with this practice", df$Response), "Already engage")
df$Response <- replace(df$Response, grep("I am familiar with this concept", df$Response), "Familiar")
df$Response <- replace(df$Response, grep("I am NOT familiar with this concept", df$Response), "Not familiar")
df$Response <- replace(df$Response, grep("I think my organisation already provides sufficient training on this", df$Response), "Unknown familiarity")
# Replace NA
df$Response <- as.character(df$Response)
df$Response[is.na(df$Response)] <- "Unknown familiarity"
return(df)
} else{
print("Error: please select either training = \"Yes\" or \"No\"")
}
}
format_training_provided <- function(df){
df = df %>%
select(OpenAccess, OpenCode, DataSharing, Preprints, Preregistration, Primary, Unit, randomID) %>%
pivot_longer(cols = OpenAccess:Preregistration, names_to = "OR_Area", values_to = "TrainingProvided") %>%
filter(grepl("I think my organisation already provides sufficient training on this", TrainingProvided))
df$TrainingProvided <- replace(df$TrainingProvided, grep("I think my organisation already provides sufficient training on this", df$TrainingProvided), "Yes")
return(df)
}
|
cafdd5f4834a2cfec422031b5296b3e942050e34
|
f0afd5a4bdfc8f61d748a2944d75aef9fa361f4a
|
/plot2.R
|
3ab59547fac31ff8a607749b2d0bff5bfec6c922
|
[] |
no_license
|
KunalSharma2209/ExData_Plotting1
|
1604aaa5bba26850d7192ff0fcf08e5abf5f9924
|
0012db3f6b2a3377f59a3024da78bc1b50978000
|
refs/heads/master
| 2020-12-23T23:25:24.311297
| 2020-02-05T11:22:08
| 2020-02-05T11:22:08
| 237,307,718
| 0
| 0
| null | 2020-01-30T21:18:46
| 2020-01-30T21:18:45
| null |
UTF-8
|
R
| false
| false
| 1,328
|
r
|
plot2.R
|
### Writing code and setting your working directory
getwd()
dir()
setwd("~/R/ExploratoryWeek1Assignment")
library(lubridate)
### Read in the data
power_data <- read.delim("household_power_consumption.txt", header=TRUE, sep=";")
head(power_data)
power_data[1,]
### Add a variable to the data table, combining the date and time into one
power_data[,10] <- dmy_hms(paste(power_data$Date, power_data$Time))
names(power_data) <- c(names(power_data)[1:9],"Date_time")
head(power_data)
power_data$Date <- dmy(power_data$Date)
power_data$Time <- hms(power_data$Time)
### Use this newly created variable to form a condensed data table showing only data for the two days of interest
power_data_subset1 <- power_data[date(power_data[,10])==dmy("1/2/2007"),]
head(power_data_subset1)
nrow(power_data_subset1)
power_data_subset2 <- power_data[date(power_data[,10])==dmy("2/2/2007"),]
head(power_data_subset2)
nrow(power_data_subset2)
power_data_condensed <- rbind(power_data_subset1, power_data_subset2)
head(power_data_condensed)
nrow(power_data_condensed)
### Plot 2
png(filename = "plot2.png", width=480, height=480)
plot(power_data_condensed$Date_time, power_data_condensed$Global_active_power, type="l",
ylab = "Global Active Power (kilowatts)", xlab="")
dev.off()
|
8715357c7a94001b0f07693f40f47b2b71f41559
|
18b5b5ea60ef362374e8ed60e651a2cffc4e221c
|
/inst/rmarkdown/templates/flexible-webframework/skeleton/plan_skeleton.R
|
948759b681517d64f58e985e3a03e0410c10aa02
|
[
"MIT"
] |
permissive
|
tpemartin/webtemplate
|
a6bfc468e8d3d64f7ca042676a6d247198e4c5b0
|
061eff584dbc0e8659cb1ca6a00119e4afe76658
|
refs/heads/master
| 2022-12-30T17:43:34.926760
| 2020-10-10T13:20:39
| 2020-10-10T13:20:39
| 292,805,709
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,393
|
r
|
plan_skeleton.R
|
# plan_skeleton------------
plan_skeleton=drake::drake_plan(
# > plan begins -----------
# >> frameworkChoice--------------
frameworkChoice = {
menu <- get_menu()
myChoice <- list(
menu$materialize(),
menu$jQuery(),
menu$googleLogin()
)
myChoice
},
# >> body--------------
body = {
tagList(
tags$header(
"Title"),
tags$div(class="division"),
tags$main(
"Main content"
),
tags$footer(
"footer"
)
)
},
# >> myStyle--------------
myStyle={
template <- webtemplate::menu_itemTemplate()
template$dep <-
htmltools::htmlDependency(
name="myStyle",
version="0.1",
src="css/myStyle.css"
)
},
# >> myJs--------------
myJs = {
template <- webtemplate::menu_itemTemplate()
template$afterbodyJs <-
htmltools::includeScript(
"js/myJs.js"
)
template
},
# >> myAfterbodyHtml--------------
myAfterbodyHtml = {
template <- webtemplate::menu_itemTemplate()
template$afterbodyHtml <-
htmltools::includeHTML(
"html/myJsWithScriptTag.html"
)
template
},
# >> outputWeblayout--------------
outputWeblayout = {
bodyLayout <- generate_template(
frameworkChoice,
.body=body
)
save_html(bodyLayout, file="template.html")
}
# > plan ends ------------
)
# make plan -----------------
mk_plan_skeleton = function(cachePath="/Users/martin/Github/webtemplate/.template"){
# no params in the frontmatter
library(htmltools);
library(dplyr);library(webtemplate)
library(drake)
options(rstudio_drake_cache = storr::storr_rds("/Users/martin/Github/webtemplate/.template", hash_algorithm = "xxhash64"))
make(plan_skeleton, cache=drake::drake_cache(path=cachePath))
###{r hijackHtml, afterMake=T}
hijackHtml = {
# if you need to modify template.html after it was made, put code here.
}
###
}
vis_plan_skeleton <- function(cachePath="/Users/martin/Github/webtemplate/.template"){
# no params in the frontmatter
library(htmltools);
library(dplyr);library(webtemplate)
drake::vis_drake_graph(plan_skeleton, cache=drake::drake_cache(path=cachePath))
}
meta_plan_skeleton=
list(
cachePath="/Users/martin/Github/webtemplate/.template",
readd=function(t) {
drake::readd(t,cache=drake::drake_cache(path="/Users/martin/Github/webtemplate/.template"))},
clean=function(t=NULL) {
drake::clean(t,cache=drake::drake_cache(path="/Users/martin/Github/webtemplate/.template"))})
|
afb9ce3d9b2abfd6fa896dbb97f70cf41219f4de
|
a320c7353d59c30a3d7bc2c303c5b6793449ae7d
|
/run_analysis.R
|
b6a2a95fd91e2d8000c4f679f08021c2c22b4746
|
[] |
no_license
|
mitsmis/ProgrammingAssignment4
|
85595e61935eb913391f23238eb1b71c2dbdbc0e
|
e231a0cfdd8b9295331c3821f1d610ccb74baab9
|
refs/heads/master
| 2021-01-09T20:01:17.082824
| 2016-07-17T20:19:51
| 2016-07-17T20:19:51
| 63,536,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,516
|
r
|
run_analysis.R
|
# Data was downloaded the old fashioned way and placed in
# appropriate Folder in order to complete the analysis.
# (features) Read and Bind the test and training datasets together
# Leverage 'read.table' and 'rbind' to do this.
x.train <- read.table('./UCI HAR Dataset/train/X_train.txt')
x.test <- read.table('./UCI HAR Dataset/test/X_test.txt')
features <- rbind(x.train, x.test)
# Examine the properties of 'features' to see if it worked
# Leverage 'str' to accomplish this
str(features)
# (subjects) Read and Bind the test and training datasets together
# Leverage 'read.table' and 'rbind' to do this
subj.train <- read.table('./UCI HAR Dataset/train/subject_train.txt')
subj.test <- read.table('./UCI HAR Dataset/test/subject_test.txt')
subjects <- rbind(subj.train, subj.test)
# Examine the properties of 'subjects' to see if it worked
# Leverage 'str' to accomplish this
str(subjects)
# (activities) Read and Bind the properties and training
# Leverage 'read.table' and 'rbind' to do this.
y.train <- read.table('./UCI HAR Dataset/train/y_train.txt')
y.test <- read.table('./UCI HAR Dataset/test/y_test.txt')
activities <- rbind(y.train, y.test)
# Examine the properties of 'activities' to see if it worked
# Leverage 'str' to accomplish this
str(activities)
# Add Variable Headers to the data
# I leveraged the 'names' function to do this
names(subjects) <- c("subject")
names(activities) <- c("activity")
featuresName <- read.table('./UCI HAR Dataset/features.txt',
head = FALSE)
names(features) <- featuresName$V2
# Now to combine the cleaned up disparate data sets into one
# super-amazing tidy dataset
# Step 1: Combine the 'subjects' and 'activities' data sets.
# Leverage 'cbind'
combSubAct <- cbind(subjects, activities)
# Step 2: Combine 'combSubAct' and 'features' dataset
# Leverage 'cbind'
data <- cbind(features, combSubAct)
# Extraction of only the measurements on the mean and standard
# deviation for each measurement
# Leverage 'grep', regular expression syntax
extractFeatureNames <- featuresName$V2[grep("mean\\(\\)|std\\(\\)", featuresName$V2)]
# Subset data to get only the features desired
# Step 1: Get the elements we want from our 'data' set headers into
# a character vector which we will feed to a subset command on our riginal data.
# names must match perfectly.
subNames <- c(as.character(extractFeatureNames), "subject", "activity")
# Step 2: Subset our data into subdata using 'subNames' as the second
# argument in our 'subset' function. The first argument will be our 'data'
# we defined earlier.
subdata <- subset(data, select = subNames)
# Check the 'subdata' set
# Leverages 'str' function
str(subdata)
# Thank God this finally validated. I had a hell of a time with
# approriate regular expression logic three steps above.
# Bring in 'activity_labels.txt'
actLabels <- read.table('./UCI HAR Dataset/activity_labels.txt',
head = FALSE)
# Update the 'activity' variable in actitivies as a factor
# Label the activity using the actLabel, did this using 'plyr' revalue
subdata$activity <- as.factor(subdata$activity)
subdata$activity <- revalue(subdata$activity, c("1" = "walking",
"2" = "walking_upstairs",
"3" = "walking_downstairs",
"4" = "sitting",
"5" = "standing",
"6" = "laying"))
# Check Structure of Data
str(subdata)
# Rename Variables to Better Give End User An Idea of what they are looking at
# As in the documentation for the dataset found on the UCI Machine Learning Repo
# I will be changing variables that that start with 't' to start with 'time', variables
# that start with 'f' with 'frequency'. Leverage 'gsub' to do this.
# Step 1: Examine Current Naming Convention
namesdf <- as.data.frame(names(subdata))
# Step 2: Replace 't' with 'time' using 'gsub'
names(subdata) <- gsub("^t", "time", names(subdata))
# Step 3: Examine Changes
namesdf <- as.data.frame(names(subdata))
# Step 4: Replace 'f' with 'freq' using 'gsub'
names(subdata) <- gsub("^f", "freq", names(subdata))
# Step 5: Examine Changes
namesdf <- as.data.frame(names(subdata))
# Note: Noticed duplication of Body in some variable names
# I changed "BodyBody" to "Body" to tidy it up the names a bit
# Step 6: Replace 'BodyBody' with 'Body' using gsub
names(subdata) <- gsub("BodyBody", "Body", names(subdata))
# Step 7: Examine Changes
namesdf <- as.data.frame(names(subdata))
# Step 8: Replace 'Acc' with 'Acceleration' using 'gsub'
names(subdata) <- gsub("Acc","Acceleration", names(subdata))
# Step 9: Replace 'Gyro' with 'Angularvelocity' using 'gsub'
names(subdata) <- gsub("Gyro","AngularVelocity", names(subdata))
# Finally, I can create a second, independent tidy data set with the average of
# each variable for eah activity and each subject by leveraging the power of the
# 'plyr' package.
library(plyr)
# Leverage the power of the 'aggregate' the data.
subdata2 <- aggregate(. ~subject + activity, subdata, mean)
# Sort the data, so 'subjects' are ordered together.
subdata2 <- subdata2[order(subdata2$subject,subdata2$activity),]
# Write the Data to File
write.table(subdata2, file = "tidydata.txt", row.names = FALSE)
|
ec3d0ed286543b281ddce3c672835c11575b0a5e
|
1f59624d8e1d90a8232367d901574faf84ee35d6
|
/Plot1.R
|
f17cc67d08e9360ace5a0f7c8f6f76919795128e
|
[] |
no_license
|
riveraor6/ExData_Plotting2
|
d3ceac9eeee1d3ab5072cd599bcdf7d203bbc790
|
ea2b53cfe17e6e484f24e87b022632f97d4753c4
|
refs/heads/master
| 2020-12-25T18:19:38.342239
| 2014-10-27T01:39:18
| 2014-10-27T01:39:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,209
|
r
|
Plot1.R
|
##Setting library
library(plyr)
library(ggplot2)
library(data.table)
dir <- setwd("D:/Users/Mad Labs PR/Documents/Exploratory_2")
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
filename1 <- "exdata-data-NEI_data.zip"
filename2 <- "Source_Classification_Code.rds"
filename3 <- "summarySCC_PM25.rds"
##Download the data is file does not exist in the working directory
if (!file.exists(filename1)) {
download.file(url, paste("./",filename1))
unzip(filename1, overwrite = TRUE, exdir = dir)
}
##Unzip file if rds file is not in working directory
if (!file.exists(filename2)) {
unzip(filename1, overwrite = TRUE, exdir = dir)
}
##Read the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
##Aggreate emission per year
Yearly_Emissions <- with(NEI, aggregate(Emissions, by = list(year), sum))
##Plot1
png(filename = "plot1.png", width = 480, height = 480, units = "px", bg = "transparent")
plot(Yearly_Emissions, type = "l", col = "dark red", ylab = "Total PM2.5 Emmision From All Sources Between 1999-2008", xlab = "Year", main = "Annual Emissions")
axis(1, at=as.integer(Yearly_Emissions$year), las=1)
dev.off()
|
38db7befe04fe63160329eb88696e7c262c74a95
|
38d3251ce2d0a946da522729eb2b6476bfb5d612
|
/hw04/code/clean-data-script.R
|
4983882765d2f1a91781fe0b2185cee387c47353
|
[] |
no_license
|
jennyhdw/stat133-hws-fall17
|
411b243fbf0c1f00a421d28e20110ba101d586b0
|
0bf7616d745b457b7abe74458891fc7407eb829d
|
refs/heads/master
| 2021-08-23T07:10:41.070354
| 2017-12-04T02:31:29
| 2017-12-04T02:31:29
| 103,677,915
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,648
|
r
|
clean-data-script.R
|
# ==============================================================
# Title: Cleaning Data
# Description: Data preparation
# Input(s): rawscores.csv
# Output(s): cleaned data
# Author: Jenny Huang
# Date: 11-09-2017
# ===============================================================
#packages
library(readr) # importing data
#source functions
source('functions.R')
# importing data
rawscores <- read.csv("../data/rawdata/rawscores.csv",
colClasses = c(rep("real",16)))
# sinking the structure of the data frame
sink(file = '../output/summary-rawscores.txt')
str(rawscores)
for (i in (1:ncol(rawscores))){
print (summary_stats(rawscores[,i]))
}
for (i in (1:ncol(rawscores))){
print (print_stats(rawscores[,i]))
}
sink()
#replacing all missing values with zero
for (i in (1:ncol(rawscores))){
for (j in (1:nrow(rawscores))){
if (is.na(rawscores[j,i])){
rawscores[j,i] <- 0
}
}
}
#rescaling quiz1
for (i in (1:nrow(rawscores))) {
rawscores[i,11] <- rescale100(rawscores[i,11],0,12)
}
#rescaling quiz2
for (i in (1:nrow(rawscores))) {
rawscores[i,12] <- rescale100(rawscores[i,12],0,18)
}
#rescaling quiz3
for (i in (1:nrow(rawscores))) {
rawscores[i,13] <- rescale100(rawscores[i,13],0,20)
}
#rescaling quiz4
for (i in (1:nrow(rawscores))) {
rawscores[i,14] <- rescale100(rawscores[i,14],0,20)
}
#adding test1
rawscores$Test1 <- rescale100(rawscores$EX1,0,80)
#adding test2
rawscores$Test2 <- rescale100(rawscores$EX2,0,90)
#adding Homework
rawscores$Homework <- c(0)
for (i in (1:nrow(rawscores))){
rawscores[i,19] <- round(score_homework(as.numeric(rawscores[i,1:9]),
drop = TRUE),digits = 2)
}
#adding quiz
rawscores$Quiz <- c(0)
for (i in (1:nrow(rawscores))){
rawscores[i,20] <- round(score_quiz(as.numeric(rawscores[i,11:14]),
drop = TRUE),digits = 2)
}
#adding lab score
rawscores$Lab <- c(0)
for (i in (1:nrow(rawscores))) {
rawscores[i,21] <- score_lab(rawscores[i,10])
}
# adding overall score
rawscores$Overall <- c(0)
for (i in (1:nrow(rawscores))){
rawscores[i,22] <- 0.1*rawscores[i,21] +
0.3*rawscores[i,19] + 0.15*rawscores[i,20] + 0.2*rawscores[i,17] +
0.25*rawscores[i,18]
}
#adding Grade
rawscores$Grade <- c("")
for (i in (1:nrow(rawscores))){
if (rawscores[i,22]<50){rawscores[i,23] <- "F"}
if (rawscores[i,22]>= 50 & rawscores[i,22]<60){rawscores[i,23] <- "D"}
if (rawscores[i,22]>= 60 & rawscores[i,22]<70){rawscores[i,23] <- "C-"}
if (rawscores[i,22]>= 70 & rawscores[i,22]<77.5){rawscores[i,23] <- "C"}
if (rawscores[i,22]>= 77.5 & rawscores[i,22]<79.5){rawscores[i,23] <- "C+"}
if (rawscores[i,22]>= 79.5 & rawscores[i,22]<82){rawscores[i,23] <- "B-"}
if (rawscores[i,22]>= 82 & rawscores[i,22]<86){rawscores[i,23] <- "B"}
if (rawscores[i,22]>= 86 & rawscores[i,22]<88){rawscores[i,23] <- "B+"}
if (rawscores[i,22]>= 88 & rawscores[i,22]<90){rawscores[i,23] <- "A-"}
if (rawscores[i,22]>= 90 & rawscores[i,22]<95){rawscores[i,23] <- "A"}
if (rawscores[i,22]>= 95 & rawscores[i,22]<=100){rawscores[i,23] <- "A+"}
}
#sinking text files
for (i in (17:22)){
filepath <- file.path("../output/",paste0(names(rawscores[i]),"-stats.txt",
collapse =""))
sink(file = filepath)
print(summary_stats(rawscores[,i]))
print(print_stats(rawscores[,i]))
sink()
}
#sinking data structure
sink(file = '../output/summary-cleanscores.txt')
str(rawscores)
sink()
#sinking clean data frame
write.csv(rawscores,file = "../data/cleandata/cleanscores.csv",
row.names = FALSE)
|
b49f28b26ece7b901b2f553e4951e76efa1a4f95
|
f8ef7a9663ea5cdce9776314719eafc44bba24fb
|
/man/BF.Rd
|
ce44187fdb797b83bd347af610de7f28a25c5d44
|
[] |
no_license
|
jomulder/BFpack
|
cd00253f44f520ab8a8952807cb361a4c17a0c57
|
e0bd669ddb8ebfdc3b8e7f56b77eeafd0ab7cc1a
|
refs/heads/master
| 2023-08-16T10:42:12.051035
| 2023-08-15T09:45:29
| 2023-08-15T09:45:29
| 134,589,180
| 13
| 4
| null | 2021-11-23T15:55:07
| 2018-05-23T15:23:20
|
R
|
UTF-8
|
R
| false
| true
| 10,789
|
rd
|
BF.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BF.gaussian.R, R/BF.lm.R, R/BF_methods.R,
% R/BFttest.R
\name{BF.default}
\alias{BF.default}
\alias{BF.lm}
\alias{BF}
\alias{BF.t_test}
\title{Bayes factors for Bayesian exploratory and confirmatory hypothesis
testing}
\usage{
\method{BF}{default}(x, hypothesis = NULL, prior.hyp = NULL, complement = TRUE, Sigma, n, ...)
\method{BF}{lm}(x, hypothesis = NULL, prior.hyp = NULL, complement = TRUE, BF.type = 2, ...)
\method{BF}{t_test}(x, hypothesis = NULL, prior.hyp = NULL, complement = TRUE, BF.type = 2, ...)
}
\arguments{
\item{x}{An R object containing the outcome of a statistical analysis.
An R object containing the outcome of a statistical analysis. Currently, the
following objects can be processed: t_test(), bartlett_test(), lm(), aov(),
manova(), cor_test(), lmer() (only for testing random intercep variances),
glm(), coxph(), survreg(), polr(), zeroinfl(), rma(), ergm(), or named vector objects.
In the case \code{x} is a named vector, the arguments \code{Sigma} and \code{n}
are also needed. See vignettes for elaborations.}
\item{hypothesis}{A character string containing the constrained (informative) hypotheses to
evaluate in a confirmatory test. The default is NULL, which will result in standard exploratory testing
under the model \code{x}.}
\item{prior.hyp}{A vector specifying the prior probabilities of the hypotheses.
The default is NULL which will specify equal prior probabilities.}
\item{complement}{a logical specifying whether the complement should be added
to the tested hypothesis under \code{hypothesis}.}
\item{Sigma}{An approximate posterior covariance matrix (e.g,. error covariance
matrix) of the parameters of interest. This argument is only required when \code{x}
is a named vector.}
\item{n}{The (effective) sample size that was used to acquire the estimates in the named vector
\code{x} and the error covariance matrix \code{Sigma}. This argument is only required when \code{x}
is a named vector.}
\item{...}{Parameters passed to and from other functions.}
\item{BF.type}{An integer that specified the type of Bayes factor (or prior) that is used for the test.
Currently, this argument is only used for models of class 'lm' and 't_test',
where \code{BF.type=2} implies an adjusted fractional Bayes factor with a 'fractional prior mean' at the null value (Mulder, 2014),
and \code{BF.type=1} implies a regular fractional Bayes factor (based on O'Hagan (1995)) with a 'fractional prior mean' at the MLE.}
}
\value{
The output is an object of class \code{BF}. The object has elements:
\itemize{
\item BFtu_exploratory: The Bayes factors of the constrained hypotheses against
the unconstrained hypothesis in the exploratory test.
\item PHP_exploratory: The posterior probabilities of the constrained hypotheses
in the exploratory test.
\item BFtu_confirmatory: The Bayes factors of the constrained hypotheses against
the unconstrained hypothesis in the confirmatory test using the \code{hypothesis}
argument.
\item PHP_confirmatory: The posterior probabilities of the constrained hypotheses
in the confirmatory test using the \code{hypothesis} argument.
\item BFmatrix_confirmatory: The evidence matrix which contains the Bayes factors
between all possible pairs of hypotheses in the confirmatory test.
\item BFtable_confirmatory: The \code{Specification table} (output when printing the
\code{summary} of a \code{BF} for a confirmatory test) which contains the different
elements of the extended Savage Dickey density ratio where
\itemize{
\item The first column `\code{complex=}' quantifies the relative complexity of the
equality constraints of a hypothesis (the prior density at the equality constraints in the
extended Savage Dickey density ratio).
\item The second column `\code{complex>}' quantifies the relative complexity of the
order constraints of a hypothesis (the prior probability of the order constraints in the extended
Savage Dickey density ratio).
\item The third column `\code{fit=}' quantifies the relative fit of the equality
constraints of a hypothesis (the posterior density at the equality constraints in the extended
Savage Dickey density ratio).
\item The fourth column `\code{fit>}' quantifies the relative fit of the order
constraints of a hypothesis (the posterior probability of the order constraints in the extended
Savage Dickey density ratio)
\item The fifth column `\code{BF=}' contains the Bayes factor of the equality constraints
against the unconstrained hypothesis.
\item The sixth column `\code{BF>}' contains the Bayes factor of the order constraints
against the unconstrained hypothesis.
\item The seventh column `\code{BF}' contains the Bayes factor of the constrained hypothesis
against the unconstrained hypothesis.
\item The eighth column `\code{BF=}' contains the posterior probabilities of the
constrained hypotheses.
}
\item prior: The prior probabilities of the constrained hypotheses in a confirmatory test.
\item hypotheses: The tested constrained hypotheses in a confirmatory test.
\item estimates: The unconstrained estimates.
\item model: The input model \code{x}.
\item call: The call of the \code{BF} function.
}
}
\description{
The \code{BF} function can be used for hypothesis testing and
model
selection using the Bayes factor. By default exploratory hypothesis tests are
performed of whether each model parameter equals zero, is negative, or is
positive.
Confirmatory hypothesis tests can be executed by specifying hypotheses with
equality and/or order constraints on the parameters of interest.
}
\details{
The function requires a fitted modeling object. Current analyses
that are supported: \code{\link[bain]{t_test}},
\code{\link[BFpack]{bartlett_test}},
\code{\link[stats]{aov}}, \code{\link[stats]{manova}},
\code{\link[stats]{lm}}, \code{mlm},
\code{\link[stats]{glm}}, \code{\link[polycor]{hetcor}},
\code{\link[lme4]{lmer}}, \code{\link[survival]{coxph}},
\code{\link[survival]{survreg}},
\code{\link[pscl]{zeroinfl}}, \code{\link[metafor]{rma}} and \code{\link[MASS]{polr}}.
For testing parameters from the results of t_test(), lm(), aov(),
manova(), and bartlett_test(), hypothesis testing is done using
adjusted fractional Bayes factors are computed (using minimal fractions).
For testing measures of association (e.g., correlations) via \code{cor_test()},
Bayes factors are computed using joint uniform priors under the correlation
matrices. For testing intraclass correlations (random intercept variances) via
\code{lmer()}, Bayes factors are computed using uniform priors for the intraclass
correlations. For all other tests, approximate adjusted fractional Bayes factors
(with minimal fractions) are computed using Gaussian approximations, similar as
a classical Wald test.
}
\section{Methods (by class)}{
\itemize{
\item \code{BF(default)}: S3 method for a named vector 'x'
\item \code{BF(lm)}: S3 method for an object of class 'lm'
\item \code{BF(t_test)}: BF S3 method for an object of class 't_test'
}}
\examples{
\dontshow{
# EXAMPLE 1. One-sample t test
ttest1 <- t_test(therapeutic, mu = 5)
print(ttest1)
# confirmatory Bayesian one sample t test
BF1 <- BF(ttest1, hypothesis = "mu = 5")
summary(BF1)
# exploratory Bayesian one sample t test
BF(ttest1)
# EXAMPLE 2. ANOVA
aov1 <- aov(price ~ anchor * motivation,data = tvprices)
BF1 <- BF(aov1, hypothesis = "anchorrounded = motivationlow;
anchorrounded < motivationlow")
summary(BF1)
# EXAMPLE 3. Logistic regression
fit <- glm(sent ~ ztrust + zfWHR + zAfro + glasses + attract + maturity +
tattoos, family = binomial(), data = wilson)
BF1 <- BF(fit, hypothesis = "ztrust > zfWHR > 0;
ztrust > 0 & zfWHR = 0")
summary(BF1)
# EXAMPLE 4. Correlation analysis
set.seed(123)
cor1 <- cor_test(memory[1:20,1:3])
BF1 <- BF(cor1)
summary(BF1)
BF2 <- BF(cor1, hypothesis = "Wmn_with_Im > Wmn_with_Del > 0;
Wmn_with_Im = Wmn_with_Del = 0")
summary(BF2)
# EXAMPLE 5. Bayes factor testing on a named vector
# A Poisson regression model is used to illustrate the computation
# of Bayes factors with a named vector as input
poisson1 <- glm(formula = breaks ~ wool + tension,
data = datasets::warpbreaks, family = poisson)
# extract estimates, error covariance matrix, and sample size:
estimates <- poisson1$coefficients
covmatrix <- vcov(poisson1)
samplesize <- nobs(poisson1)
# compute Bayes factors on equal/order constrained hypotheses on coefficients
BF1 <- BF(estimates, Sigma = covmatrix, n = samplesize, hypothesis =
"woolB > tensionM > tensionH; woolB = tensionM = tensionH")
summary(BF1)
}
\donttest{
# EXAMPLE 1. One-sample t test
ttest1 <- bain::t_test(therapeutic, mu = 5)
print(ttest1)
# confirmatory Bayesian one sample t test
BF1 <- BF(ttest1, hypothesis = "mu = 5")
summary(BF1)
# exploratory Bayesian one sample t test
BF(ttest1)
# EXAMPLE 2. ANOVA
aov1 <- aov(price ~ anchor * motivation, data = tvprices)
# check the names of the model parameters
names(aov1$coefficients)
BF1 <- BF(aov1, hypothesis = "anchorrounded = motivationlow;
anchorrounded < motivationlow;
anchorrounded > motivationlow")
summary(BF1)
# EXAMPLE 3. Logistic regression
fit <- glm(sent ~ ztrust + zfWHR + zAfro + glasses + attract + maturity +
tattoos, family = binomial(), data = wilson)
BF1 <- BF(fit, hypothesis = "ztrust > (zfWHR, zAfro) > 0;
ztrust > 0 & zfWHR=zAfro= 0")
summary(BF1)
# EXAMPLE 4. Correlation analysis
set.seed(123)
cor1 <- cor_test(memory[1:20,1:3])
BF1 <- BF(cor1)
summary(BF1)
BF2 <- BF(cor1, hypothesis = "Wmn_with_Im > Wmn_with_Del > 0;
Wmn_with_Im = Wmn_with_Del = 0")
summary(BF2)
# EXAMPLE 5. Bayes factor testing on a named vector
# We illustrate the computation of Bayes factors using a named vector
# as input on a Poisson regression model
poisson1 <- glm(formula = breaks ~ wool + tension,
data = datasets::warpbreaks, family = poisson)
# extract estimates, error covariance matrix, and sample size,
# from fitted object
estimates <- poisson1$coefficients
covmatrix <- vcov(poisson1)
samplesize <- nobs(poisson1)
# compute Bayes factors on equal/order constrained hypotheses on coefficients
BF1 <- BF(estimates, Sigma = covmatrix, n = samplesize, hypothesis =
"woolB > tensionM > tensionH; woolB = tensionM = tensionH")
summary(BF1)
}
}
\references{
Mulder, J., D.R. Williams, Gu, X., A. Tomarken,
F. Böing-Messing, J.A.O.C. Olsson-Collentine, Marlyne Meyerink, J. Menke,
J.-P. Fox, Y. Rosseel, E.J. Wagenmakers, H. Hoijtink., and van Lissa, C.
(2021). BFpack: Flexible Bayes Factor Testing of Scientific Theories
in R. Journal of Statistical Software. <DOI:10.18637/jss.v100.i18>
}
|
e1e88c9292a697fb65d66e524e562a07a2fbc17f
|
2656a078cef4fbb1a32b78dc946e945b5424ba35
|
/man/lesmis.Rd
|
0d61bda7212b1e7a5bc797c96d1213d63f8a4123
|
[] |
no_license
|
fanatichuman/geomnet
|
fa5ccf2049eae8f6131160815442fff779cb221f
|
6aff9ca74a581e773e8b7f502863c8bef483e473
|
refs/heads/master
| 2021-01-24T20:25:07.135518
| 2016-07-20T16:00:24
| 2016-07-20T16:00:24
| 64,494,837
| 1
| 0
| null | 2016-07-29T16:27:48
| 2016-07-29T16:27:47
|
R
|
UTF-8
|
R
| false
| true
| 1,523
|
rd
|
lesmis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{lesmis}
\alias{lesmis}
\title{Coappearance network of characters in Les Miserables (undirected)}
\format{A list of two data frames:
\itemize{
\item the edges data set consists of three variables of length 254:
\itemize{
\item from: Character 1
\item to: Character 2
\item degree: number of times they appear together in a chapter of Les Miserables
}
\item the vertices data set consists of two variables with information on 77 characters:
\itemize{
\item id: Character ID number
\item label: Character name
}
}}
\usage{
lesmis
}
\description{
A list of two datasets, vertices and edges, containing data on characters and their coapperance in chapters in Victor Hugo's Les Miserables.
The variables are as follows:
}
\examples{
# prep the data
lesmisnet <- merge(lesmis$edges, lesmis$vertices, by.x = "from",
by.y = "label", all = TRUE)
lesmisnet$degree[is.na(lesmisnet$degree)] <- 0
# create plot
ggplot(data = lesmisnet, aes(from_id = from, to_id = to,
linewidth = degree / 5 + 0.1 )) +
geom_net(aes(size = degree, alpha = degree),
colour = "grey30", ecolour = "grey60",
layout = "fruchtermanreingold", label = TRUE, vjust = -0.75) +
scale_alpha(range = c(0.3, 1)) +
theme_net()
}
\references{
D. E. Knuth, The Stanford GraphBase: A Platform for Combinatorial Computing, Addison-Wesley, Reading, MA (1993).
}
\keyword{datasets}
|
1c58e94adc6a07fb73aa1a02f049475ba7ab9d3a
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/RPPairwiseDesign/R/PPrect.R
|
cebfc25c058fed3717dbe7d78649bcd097628376
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 605
|
r
|
PPrect.R
|
PPrect <-
function(n,l) {
M<-3
s<-n*l;a<-c();b<-c();c<-c()
A<-matrix(1:s, ncol = l, byrow=TRUE)
for (i in 1:n) {
for (j in 1:l) {
a1<-A[i,];b2<-A[-i,];b1<-b2[,j]
c1<-A[-i,];c1<-c1[,-j];c2<-as.vector(c1);f<-length(c2)
a<-c(a,a1);b<-c(b,b1);c<-c(c,c2)}}
AA<-matrix(a, ncol = l, byrow=TRUE)
BB<-matrix(b, ncol = n-1, byrow=TRUE)
CC<-matrix(c ,ncol = f, byrow=TRUE)
Q<-Reduce("cbind",list(AA,"||",BB,"||",CC))
k1<-l
k2<-(n-1)
k3<-(n-1)*(l-1)
lam1<-l+(n-1)*(l-2)
lam2<-(n-2)+(n-2)*(l-1)
lam3<-(n-2)*(l-2)
return(list(RPPBD=noquote(Q),v=s,b=s*M,r=s,K=c(k1,k2,k3),lamda=c(lam1,lam2,lam3)))}
|
12c76877c95189305b806d8e0e929b9243e1a525
|
be429607b3bcbf89d8ced76bd2a3aa4ee8e64301
|
/man/dt_stat.Rd
|
8fdc921f77860c1477254348dda4a7a6f8035243
|
[] |
no_license
|
lshreyas/ehR
|
b66034d860a5a61b6ad7b38d22a667633c855c01
|
a39e1d6c13851be7eb0b123b1a1c6c12329a6e84
|
refs/heads/master
| 2021-01-18T16:07:00.081227
| 2017-03-30T01:59:13
| 2017-03-30T01:59:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 593
|
rd
|
dt_stat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dt_stat.R
\name{dt_stat}
\alias{dt_stat}
\title{Count number of observations in a data.table over a given timeframe/a given id list.}
\usage{
dt_stat(id_list, empi_list, date_list, dt, dt_date_var, timeframe_day = 365,
buffer_day = 0, timeframe_hour = 24, buffer_hour = 0, exp_list = NA,
exp_list_agg = NA, mode = "day", mode_direction = "backward")
}
\arguments{
\item{TBC}{}
}
\value{
TBC
}
\description{
Count number of observations in a data.table over a given timeframe/a given id list.
}
\examples{
TBC
}
|
ba4293a34bcf97e3a4d539917250b34e7e45a1e0
|
4b3e218c2081e897d23e40da99f7767c0ca710af
|
/man/shinydivajs.Rd
|
89d2dd2ebb41d3d59665c0b8352196545c8139de
|
[] |
no_license
|
byzheng/shinydivajs
|
9f410e81f6c77e4ded1f6960ab8050363371c4cd
|
54f3b3b6301a5a7d9adccd2dd483f7fd5020a90a
|
refs/heads/master
| 2021-01-20T22:10:15.739909
| 2016-09-24T11:54:15
| 2016-09-24T11:54:15
| 53,198,295
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,110
|
rd
|
shinydivajs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/divajs.R
\name{shinydivajs}
\alias{shinydivajs}
\title{a JavaScript book image viewer}
\usage{
shinydivajs(objectData, inGrid = FALSE, enableAutoTitle = TRUE,
enableFullscreen = TRUE, enableLinkIcon = TRUE, width = NULL,
height = NULL)
}
\arguments{
\item{objectData}{URL to the JSON file that provides the object dimension data}
\item{inGrid}{enableAutoTitle}
\item{enableAutoTitle}{Shows the title within a div of id diva-title}
\item{enableFullscreen}{Enable or disable fullscreen icon (mode still available)}
\item{enableLinkIcon}{Controls the visibility of the link icon}
\item{width}{Width in pixels (optional, defaults to automatic sizing)}
\item{height}{Height in pixels (optional, defaults to automatic sizing)}
\item{iipServerURL}{The URL to the IIPImage installation}
}
\value{
Interactive dygraph plot
}
\description{
R interface to a JavaScript book image viewer designed to
present multi-page documents at multiple resolutions using
\href{https://github.com/DDMAL/diva.js}{diva.js} JavaScript library.
}
|
e1cdcd0d6182daaf51b6c820a781586f5379a8cd
|
8c84b89f304133224eac4c4819b3826c9ff84958
|
/man/switch.norm.funcs.Rd
|
bdf3248628f45bf478a5a12927fe7c934c9af431
|
[] |
no_license
|
QihangYang/PRECISION.array
|
7ca6010838e6dc0e65fec57014e736d1dc91f67c
|
a413b8ad068c548486d5e8fe20c7fa831aaeb10a
|
refs/heads/main
| 2023-03-27T11:27:55.657639
| 2021-03-15T15:08:53
| 2021-03-15T15:08:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 829
|
rd
|
switch.norm.funcs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/switch.norm.funcs.R
\name{switch.norm.funcs}
\alias{switch.norm.funcs}
\title{Switch Normalization Functions}
\usage{
switch.norm.funcs(norm.list = c("NN", "MN", "QN", "VSN"), norm.funcs = NULL)
}
\arguments{
\item{norm.list}{Switch all the build-in normalization methods into function names, including "NN", "MN", "QN", and "VSN".}
\item{norm.funcs}{New functions that user can create by themselves.}
}
\value{
A list that transforms the normalization method name into function name.
}
\description{
Switch the normalization method name into function name, for running in the normalization procedure
}
\details{
switch.norm.funcs
}
\examples{
switch.norm.funcs(norm.list = c("NN", "MN", "QN"), norm.funcs = NULL)
}
\keyword{names}
\keyword{switch}
|
46da8178685ce57c2616194ddeceddea65b7db95
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/btergm/R/interpretation.R
|
0eee87ca6f2517d00fdb479c01f314007a0cc7f6
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,123
|
r
|
interpretation.R
|
# interpretation function for ergm objects
interpret.ergm <- function(object, formula = object$formula,
coefficients = coef(object), target = eval(parse(text =
deparse(formula[[2]]))), type = "tie", i, j, ...) {
if (length(j) > 1 && (type == "tie" || type == "dyad")) {
stop(paste("For computing dyadic or tie probabilities, only a single 'j'",
"node can be specified."))
}
# extract response network and adjust formula
nw <- target
dir <- is.directed(nw)
if (dir == FALSE && type == "dyad") {
type <- "tie"
warning(paste("Dyadic probabilities not available for undirected",
"networks. Reporting tie probability instead."))
}
# disassemble formula and preprocess rhs
tilde <- deparse(formula[[1]])
lhs <- "nw"
rhs <- paste(deparse(formula[[3]]), collapse = "")
rhs <- gsub("\\s+", " ", rhs)
if (grepl("(gw.{0,2}degree)|(gwdsp)|(gwesp)|(gwnsp)", rhs)) {
stop("The interpretation functions do not work with curved model terms.")
}
# reassemble formula
f <- paste(lhs, tilde, rhs)
form <- as.formula(f)
if (type == "tie") {
nw[i, j] <- 0 # set zero and compute stats
mod <- ergm.getmodel(form, nw, drop = FALSE)
stat0 <- ergm.getglobalstats(nw, mod)
nw[i, j] <- 1 # set one and compute stats
mod <- ergm.getmodel(form, nw, drop = FALSE)
stat1 <- ergm.getglobalstats(nw, mod)
# compute change statistics and ratio
chgstat <- stat1 - stat0
if (length(chgstat) != length(coefficients)) {
stop(paste("Number of coefficients and statistics differ.",
"Did you fit a curved model? Curved models with non-fixed",
"parameters are currently not supported."))
}
lp <- t(chgstat) %*% cbind(coefficients)
results <- c(1 / (1 + exp(-lp)))
names(results) <- "i->j = 1"
} else if (type == "dyad") {
eta_mat <- matrix(NA, 2, 2)
for (xi in 0:1) {
for (xj in 0:1) {
nw[i, j] <- xi
nw[j, i] <- xj
mod <- ergm.getmodel(form, nw, drop = FALSE)
stat <- ergm.getglobalstats(ergm.getnetwork(form), mod)
if (length(stat) != length(coefficients)) {
stop(paste("Number of coefficients and statistics differ.",
"Did you fit a curved model? Curved models with non-fixed",
"parameters are currently not supported."))
}
eta_mat[xi + 1, xj + 1] <- t(coefficients) %*% cbind(stat)
}
}
prob_mat <- matrix(NA, 2, 2)
for (xi in 0:1) {
for (xj in 0:1) {
etas <- c(eta_mat) - eta_mat[xi + 1, xj + 1]
prob_mat[xi + 1, xj + 1] <- 1 / (sum(exp(etas)))
}
}
rownames(prob_mat) <- c("i->j = 0", "i->j = 1")
colnames(prob_mat) <- c("j->i = 0", "j->i = 1")
results <- prob_mat
} else if (type == "node") {
m <- length(i)
n <- length(j)
if (m == 1 && n > 1) {
labels <- c("Sender", "Receiver")
} else if (m > 1 && n == 1) {
labels <- c("Receiver", "Sender")
j.old <- j
j <- i
i <- j.old
m <- length(i)
n <- length(j)
} else {
stop("Either 'i' or 'j' must contain more than one node.")
}
vecs <- rbind(rep(0, n), rep(1, n))
base <- rep(0, n)
for (l in 1:(n - 1)) {
places <- t(combn(1:n, l))
for (k in 1:nrow(places)) {
veci <- base
veci[places[k, ]] <- 1
vecs <- rbind(vecs, veci)
}
}
eta <- numeric(nrow(vecs))
for (l in 1:nrow(vecs)) {
nw[i, j] <- vecs[l, ]
mod <- ergm.getmodel(form, nw, drop = FALSE)
stat <- ergm.getglobalstats(ergm.getnetwork(form), mod)
if (length(stat) != length(coefficients)) {
stop(paste("Number of coefficients and statistics differ.",
"Did you fit a curved model? Curved models with non-fixed",
"parameters are currently not supported."))
}
eta[l] <- t(coefficients) %*% cbind(stat)
}
prob <- numeric(nrow(vecs))
for (l in 1:nrow(vecs)) {
prob[l] <- 1 / sum(exp(eta - eta[l]))
}
colnames(vecs) <- paste(labels[2], j)
rownames(vecs) <- rep(paste(labels[1], i), nrow(vecs))
results <- cbind(prob, vecs)
colnames(results)[1] <- "probability"
} else {
stop("'type' argument undefined or not recognized.")
}
return(results)
}
# interpretation method for btergm objects
interpret.btergm <- function(object, formula = getformula(object),
coefficients = coef(object), target = NULL, type = "tie", i, j,
t = 1:object@time.steps, ...) {
env <- tergmprepare(formula = formula, offset = FALSE, blockdiag = FALSE,
verbose = FALSE)
parent.env(env) <- environment()
# extract response networks and adjust formula
if (!is.null(target)) {
env$networks <- target
}
# prepare i and j
if (!is.list(i)) {
i <- rep(list(i), length(env$networks))
num.actors <- numeric()
for (k in t) {
num.actors[k] <- nrow(as.matrix(env$networks[[k]]))
}
if (length(table(num.actors)) > 1) {
warning(paste("'i' does not vary across time steps, but the number of",
"actors does. 'i' can be provided as a list or as a name."))
}
}
if (!is.list(j)) {
j <- rep(list(j), length(env$networks))
num.actors <- numeric()
for (k in t) {
num.actors[k] <- nrow(as.matrix(env$networks[[k]]))
}
if (length(table(num.actors)) > 1) {
warning(paste("'j' does not vary across time steps, but the number of",
"actors does. 'j' can be provided as a list or as a name."))
}
}
for (l in 1:length(j)) {
if (length(j[[l]]) > 1 && (type == "tie" || type == "dyad")) {
stop(paste("For computing dyadic or tie probabilities, only a single 'j'",
"node can be specified per time step."))
}
}
node_i <- i
node_j <- j
if (type == "tie") {
results <- numeric()
for (i in t) {
env$networks[[i]][node_i[[i]], node_j[[i]]] <- 0
stat0 <- summary(remove.offset.formula(env$form), response = NULL)
env$networks[[i]][node_i[[i]], node_j[[i]]] <- 1
stat1 <- summary(remove.offset.formula(env$form), response = NULL)
chgstat <- stat1 - stat0
if (length(chgstat) != length(coefficients)) {
stop(paste("Number of coefficients and statistics differ.",
"Did you fit a curved model? Curved models with non-fixed",
"parameters are currently not supported."))
}
lp <- t(chgstat) %*% cbind(coefficients)
result <- c(1 / (1 + exp(-lp)))
names(result) <- "i->j = 1"
results[i] <- result
}
results <- results[!is.na(results)]
names(results) <- paste("t =", t)
} else if (type == "dyad") {
results <- list()
for (i in t) {
eta_mat <- matrix(NA, 2, 2)
for (xi in 0:1) {
for (xj in 0:1) {
env$networks[[i]][node_i[[i]], node_j[[i]]] <- xi
env$networks[[i]][node_j[[i]], node_i[[i]]] <- xj
stat <- summary(remove.offset.formula(env$form), response = NULL)
if (length(stat) != length(coefficients)) {
stop(paste("Number of coefficients and statistics differ.",
"Did you fit a curved model? Curved models with non-fixed",
"parameters are currently not supported."))
}
eta_mat[xi + 1, xj + 1] <- t(coefficients) %*% cbind(stat)
}
}
prob_mat <- matrix(NA, 2, 2)
for (xi in 0:1) {
for (xj in 0:1) {
etas <- c(eta_mat) - eta_mat[xi + 1, xj + 1]
prob_mat[xi + 1, xj + 1] <- 1 / (sum(exp(etas)))
}
}
rownames(prob_mat) <- c("i->j = 0", "i->j = 1")
colnames(prob_mat) <- c("j->i = 0", "j->i = 1")
results[[i]] <- prob_mat
}
results <- results[!sapply(results, is.null)]
names(results) <- paste("t =", t)
} else if (type == "node") {
results <- list()
for (i in t) {
m <- length(node_i[[i]])
n <- length(node_j[[i]])
if (m == 1 && n > 1) {
labels <- c("Sender", "Receiver")
} else if (m > 1 && n == 1) {
labels <- c("Receiver", "Sender")
j.old <- node_j[[i]]
node_j[[i]] <- node_i[[i]]
node_i[[i]] <- j.old
m <- length(node_i[[i]])
n <- length(node_j[[i]])
} else {
stop(paste("Either 'i' or 'j' must contain more than one node per",
"time step."))
}
vecs <- rbind(rep(0, n), rep(1, n))
base <- rep(0, n)
for (l in 1:(n - 1)) {
places <- t(combn(1:n, l))
for (r in 1:nrow(places)) {
veci <- base
veci[places[r, ]] <- 1
vecs <- rbind(vecs, veci)
}
}
eta <- numeric(nrow(vecs))
for (l in 1:nrow(vecs)) {
ik <- node_i[[i]]
jk <- node_j[[i]]
env$networks[[i]][ik, jk] <- vecs[l, ]
stat <- summary(remove.offset.formula(env$form), response = NULL)
if (length(stat) != length(coefficients)) {
stop(paste("Number of coefficients and statistics differ.",
"Did you fit a curved model? Curved models with non-fixed",
"parameters are currently not supported."))
}
eta[l] <- t(coefficients) %*% cbind(stat)
}
prob <- numeric(nrow(vecs))
for (l in 1:nrow(vecs)) {
prob[l] <- 1 / sum(exp(eta - eta[l]))
}
colnames(vecs) <- paste(labels[2], node_j[[i]])
rownames(vecs) <- rep(paste(labels[1], node_i[[i]]), nrow(vecs))
result <- cbind(prob, vecs)
colnames(result)[1] <- "probability"
results[[i]] <- result
}
results <- results[!sapply(results, is.null)]
names(results) <- paste("t =", t)
} else {
stop("'type' argument undefined or not recognized.")
}
return(results)
}
# register generic methods with ergm and btergm objects
setMethod("interpret", signature = className("ergm", "ergm"),
definition = interpret.ergm)
setMethod("interpret", signature = className("btergm", "btergm"),
definition = interpret.btergm)
setMethod("interpret", signature = className("mtergm", "btergm"),
definition = interpret.btergm)
|
c17b723895bd07c9cee154f898ea39133d7ce423
|
43348304944732564b0dc500472a95b0ccd7c47d
|
/cachematrix.R
|
696632646e99c1537d30d2ae8eac52e36f7f8161
|
[] |
no_license
|
amsdias/ProgrammingAssignment2
|
7aa700d7d8bef392e54a70eb49578a7b6591e25c
|
fab4ad6014082515c88d9ee1ec88455c3ea036d3
|
refs/heads/master
| 2021-01-24T15:33:30.019542
| 2015-07-25T17:24:14
| 2015-07-25T17:24:14
| 39,217,658
| 0
| 0
| null | 2015-07-16T20:01:18
| 2015-07-16T20:01:16
| null |
UTF-8
|
R
| false
| false
| 1,093
|
r
|
cachematrix.R
|
## The following two functions are used to cache the inverse of a matrix
## This is useful since it is a costly operation
## This function creates a list containing functions that:
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse matrix
## 4. get the value of the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## This function returns the inverse of the matrix
## It first tries to get the data from the cache
## If this data does not exist, then it calculates the inverse matrix and sets it in the cache
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if (!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data)
x$setinverse(i)
i
}
|
434b48daa05df78e094ac6b74fff8d18e87de967
|
e98ca256c242b0e333e4631905f49fdc0aeb8def
|
/calc.rmseP.R
|
e986e7dc21533f3a9bb7deb3a67c8d90330b28e0
|
[] |
no_license
|
xime377/RedEdge-calibration
|
635268b2003a3f8897cf3dd1f1d903b826c29c7a
|
1026cb2732114915c5d6a70436dbb1ac7ad98dea
|
refs/heads/master
| 2021-01-23T01:08:47.344638
| 2018-02-01T12:57:27
| 2018-02-01T12:57:27
| 85,881,512
| 6
| 3
| null | 2017-03-30T10:35:57
| 2017-03-22T22:17:13
| null |
UTF-8
|
R
| false
| false
| 911
|
r
|
calc.rmseP.R
|
##Calculate RMSE%
#Error calculation
#'
#' @param Pred predicted values
#' @param Ref Reference values
#'
#' @return E Error in %
EP<-function(Pred,Ref){
E=(Pred-Ref)/Ref
E
}
#Calculation of SE %
#'
#' @param Pred predicted values
#' @param Ref Reference values
#'
#' @return SE Standard Error in %
seP<-function(Pred,Ref){
SE= (EP(Pred, Ref))^2
return(SE)
}
#Calculation of MSE %
#'
#' @param Pred predicted values
#' @param Ref Reference values
#'
#' @return MSE Mean Squared Error in %
mseP<-function(Pred,Ref){
MSE= mean(seP(Pred, Ref))
return(MSE)
}
#Calculation of RMSE %
#'
#' @param Pred predicted values
#' @param Ref Reference values
#'
#' @return RMSE in %
rmseP<-function(Pred,Ref){
MSE= sqrt(mseP(Pred, Ref))
return(MSE)
}
##################################################
|
5f7e42c26b0631ca39302f79c9f9e4d4a1eacc29
|
b7955a94f382c890f12ee2506583273548045ec7
|
/Combination.r
|
03c2c7caa7d1bcb7a6160f2377ea10be4bd9f687
|
[] |
no_license
|
DSSAT/glue
|
7e9e590ccff4e0204700b7409dde23fb49cfbabb
|
139a26d4b8ef752145a04ec81bb552c3a5568dd9
|
refs/heads/develop
| 2023-08-17T05:28:52.363786
| 2021-08-27T20:01:27
| 2021-08-27T20:01:27
| 120,203,785
| 1
| 12
| null | 2023-08-11T20:17:17
| 2018-02-04T16:50:51
|
R
|
UTF-8
|
R
| false
| false
| 463
|
r
|
Combination.r
|
## This is the function to combine the likelihood values from different measurements.
Combination<-function(LikelihoodMatrix)
{
#print(LikelihoodMatrix);
Dimension<-dim(LikelihoodMatrix);
#print(Dimension);
if (Dimension[2]==2)
{
CombinedLikelihood<-LikelihoodMatrix[2];
} else
{
CombinedLikelihood<-LikelihoodMatrix[2];
for (i in 3:Dimension[2])
{
CombinedLikelihood<-CombinedLikelihood*LikelihoodMatrix[i];
}
}
return(CombinedLikelihood);
}
|
3d3fd4aa96958620aeb68babdbae21f82e85f7ea
|
f79cd4e052c5cbb24e7ef3e4bec1c39f9ce4e413
|
/BEMTOOL-ver2.5-2018_0901/bmtgui/biological/assessment/VITpaths_females/add.VITpaths_females.r
|
d9a66775cc57b36b728249abc182971c37cefc1c
|
[] |
no_license
|
gresci/BEMTOOL2.5
|
4caf3dca3c67423af327a8ecb1e6ba6eacc8ae14
|
619664981b2863675bde582763c5abf1f8daf34f
|
refs/heads/master
| 2023-01-12T15:04:09.093864
| 2020-06-23T07:00:40
| 2020-06-23T07:00:40
| 282,134,041
| 0
| 0
| null | 2020-07-24T05:47:24
| 2020-07-24T05:47:23
| null |
UTF-8
|
R
| false
| false
| 1,245
|
r
|
add.VITpaths_females.r
|
# BEMTOOL - Bio-Economic Model TOOLs - version 2.5
# Authors: G. Lembo, I. Bitetto, M.T. Facchini, M.T. Spedicato 2018
# COISPA Tecnologia & Ricerca, Via dei Trulli 18/20 - (Bari), Italy
# In case of use of the model, the Authors should be cited.
# If you have any comments or suggestions please contact the following e-mail address: facchini@coispa.it
# BEMTOOL is believed to be reliable. However, we disclaim any implied warranty or representation about its accuracy,
# completeness or appropriateness for any particular purpose.
#
#
#
#
#
#
#
#
#
# ------------------------------------------------------------------------------
# add elements to the list of the selectivity values
# ------------------------------------------------------------------------------
#
add.VITpaths_females <- function() {
if (!is.null(matrix_VITpath) ) {
VITpaths_femaless <<- matrix_VITpath[,c(1,2)]
if ( nrow(VITpaths_femaless) != 0) {
for (r in 1:nrow(VITpaths_femaless)) {
dis_temp <- as.list(VITpaths_femaless[r,])
names(dis_temp) <- c( "Year", "File")
VITpaths_females_list <<- c(VITpaths_females_list, list(dis_temp))
}
}
}
# print("DISCARD (simulation) list successfully updated!", quote=F)
#print(selectivities[1])
}
|
c964ed4e18bcb187e202c283d51b512e44002214
|
b8883d2e0019778f2dd66919d39baad425e7dbe6
|
/R/detect titles.R
|
753b225247fc18f6a2acf003e88c4bb940e706c0
|
[
"MIT"
] |
permissive
|
marissasmith8/Citation-Network-Analysis
|
e952fb5431b33efea6d898c95dbc1624fe909a30
|
e2af77670fd9c21415dfe97fd288962aaf1e7e95
|
refs/heads/master
| 2023-03-29T05:06:40.350490
| 2021-03-29T15:03:00
| 2021-03-29T15:03:00
| 347,930,282
| 0
| 0
|
MIT
| 2021-03-17T11:40:53
| 2021-03-15T10:44:26
|
HTML
|
UTF-8
|
R
| false
| false
| 1,998
|
r
|
detect titles.R
|
library(readxl)
library(tidyverse)
library(rebus)
full <- read_xlsx("Refs full (08.04.19).xlsx", sheet = "Full References ")[,1]
clean <- read_xlsx("Refs full (08.04.19).xlsx", sheet = "Clean References ")[,1]
table(is.na(full))
table(is.na(clean))
refs <- colnames(read_xlsx("Refs full (08.04.19).xlsx", sheet = "Clean References "))[2:19]
cleannames <- c("WHO 2014", clean[(which(is.na(clean$Reference))+1),][["Reference"]])
fullnames <- c("WHO 2014", full[(which(is.na(full$`Full Reference`))+1),][["Full Reference"]])
tibble(refs, full = refs %in% full$`Full Reference`,
clean = refs %in% clean$Reference,
refname = cleannames,
fullname = fullnames)
full2 <- full[which(!full$`Full Reference` %in% fullnames), ]
clean2 <- clean[which(!clean$Reference %in% cleannames), ]
full3 <- rbind(full2[1:1356,], tibble("Full Reference" = "National Institute for Health and Care Excellence (2013)"), full2[1357:2870,])
allrefs <- tibble(full = full3$`Full Reference`, clean = clean2$Reference) %>% na.omit()
updatedTb <- allrefs %>%
mutate(year = str_match(allrefs$clean, "(\\d{4})([az]?)")[,2],
addTerm = str_match(allrefs$clean, "(\\d{4})([az]?)")[,3],
firstWrdFull = str_match(allrefs$full, "^\\s?([A-Za-z]*)")[,2],
firstWrdClean = str_match(allrefs$clean, "^\\s?([A-Za-z]*)")[,2])
title_pattern <- "\\. ?" %R%
capture(one_or_more(WRD) %R%
optional(or(":", "-")) %R%
one_or_more(WRD) %R%
SPC %R%
one_or_more(WRD) %R%
optional(or(":", "-")) %R%
one_or_more(WRD))
title_pattern2 <- START %R% capture(one_or_more(WRD) %R%
optional(or(":", "-")) %R%
optional(one_or_more(WRD)) %R%
SPC %R%
one_or_more(WRD) %R%
optional(or(":", "-")) %R%
optional(one_or_more(WRD))) %R%
optional(".")
str_view(updatedTb[1:100,]$full,
pattern = ifelse(str_detect(updatedTb[1:100,]$full, START %R% updatedTb[1:100,]$firstWrdClean),
title_pattern,
title_pattern2))
|
e37d90ff868fbe471be3981e115bfe34eca3276e
|
dda08ebff68da583ec11f861cf1d0e75293fd2c5
|
/tests/testthat/tests.seqtest.R
|
f2e33cb9a6f8d7c2a4f5d16a29d3985c7ab177c6
|
[] |
no_license
|
lnalborczyk/ESTER
|
314f65f1a52d925f475cff2b0b54cbbf85fc5e0a
|
eee73e59b3e62caa936d64d563b6fa9d69e593b7
|
refs/heads/master
| 2021-01-11T17:03:10.560357
| 2018-05-19T08:57:35
| 2018-05-19T08:57:35
| 69,504,922
| 1
| 2
| null | 2017-01-26T14:34:38
| 2016-09-28T21:23:41
|
R
|
UTF-8
|
R
| false
| false
| 36
|
r
|
tests.seqtest.R
|
context("Tests for seqtest output")
|
3349e3a4c4548de8b5db99399b3b097d4c341aac
|
f94f004442845afd0ca9ff5e2881bb57044f8ec0
|
/tests/testthat/test-gather.R
|
d2712d54118909c5247197c33a63743a275da146
|
[] |
no_license
|
jreisner/biclustermd
|
f715ce4f0673c5a7206651d4fc7be459960d2303
|
975af747fce1adf46feba4ec65c05debf6cc8b17
|
refs/heads/master
| 2021-07-23T19:13:22.140337
| 2021-06-17T14:10:44
| 2021-06-17T14:10:44
| 127,488,714
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,008
|
r
|
test-gather.R
|
context("gather.biclustermd")
test_that("test that gather() returns all entries in the data provided to biclustermd()", {
sbc <- biclustermd(synthetic)
gsbc <- gather(sbc)
expect_equal(nrow(gsbc), prod(dim(sbc$data)))
})
test_that("test that gather() retains all row names", {
sbc <- biclustermd(synthetic)
gsbc <- gather(sbc)
expect_equal(sort(unique(gsbc$row_name)), sort(rownames(sbc$data)))
})
test_that("test that gather() retains all column names", {
sbc <- biclustermd(synthetic)
gsbc <- gather(sbc)
expect_equal(sort(unique(gsbc$col_name)), sort(colnames(sbc$data)))
})
test_that("test that gather()$value matches the sparsity of the provided data", {
sbc <- biclustermd(synthetic)
gsbc <- gather(sbc)
expect_equal(mean(is.na(gsbc$value)), mean(is.na(sbc$data)))
})
test_that("test that gather() contains the correct number of biclusters", {
sbc <- biclustermd(synthetic)
gsbc <- gather(sbc)
expect_equal(max(gsbc$bicluster_no), prod(dim(sbc$A)))
})
|
39a193a62224a27436270f5f1b0691ca8dec6a9c
|
d36827d1ae6c78b62ab2b67b2e6b48c7f9f9b4ef
|
/Legacy/Oldtrain/pr_roc_start.R
|
1d94ad51f6151e379d391f28f29aff27989abc74
|
[] |
no_license
|
agawes/CNN-iPSC
|
c6c3f944d32b30ecfd01bbd478712d2e63244c76
|
a6485ea11d3cdb7c1fb5dc3abaa3a0cdfb6c34ae
|
refs/heads/master
| 2020-09-07T12:39:59.019667
| 2019-10-04T09:48:02
| 2019-10-04T09:48:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 811
|
r
|
pr_roc_start.R
|
library(precrec)
library(ggplot2)
library(grid)
library(tidyr)
library(plyr)
library(dplyr)
setwd("~/Oxford 2.0/Scripts/CNN_project/Data/better_train/iter1")
temp = list.files(pattern="roc*")
for (i in 1:length(temp)) assign(temp[i], read.delim(temp[i], header = FALSE))
selected <- ls()[grep('roc', ls())]
roc1.txt$name <- "BLC"
roc2.txt$name <- "DE"
roc3.txt$name <- "EN"
roc4.txt$name <- "EP"
roc5.txt$name <- "GT"
roc6.txt$name <- "PE"
roc7.txt$name <- "PF"
roc8.txt$name <- "iPSC"
roc_all.txt <- dplyr::bind_rows(roc1.txt, roc2.txt, roc3.txt, roc4.txt, roc5.txt, roc6.txt, roc7.txt, roc8.txt)
ggplot(data=roc_all.txt, aes(x=V1, y=V2, group=name, color=name)) +
geom_line() +
scale_color_brewer(palette="Paired")+
labs(title="ROC plots of model test set",x="FPR", y = "TPR") +
theme_minimal()
|
227d38acc193d8a603dc9b432dd38a83335a1c55
|
ec1f284fa56cb3270ad64d22615aa63b8da47f6e
|
/doc/cours/summerSchool/2016CSSS/nonlinear.R
|
d79ace805bd73e0aad298daf3f4d60272c14d081
|
[] |
no_license
|
simoncarrignon/phd
|
681d9935d5bf66b9b51a0e852b851a6487deb425
|
542eeaf080b2e779adc86ac8899c503c647d0204
|
refs/heads/master
| 2021-01-25T20:59:48.073803
| 2019-01-28T15:28:21
| 2019-01-28T15:28:21
| 39,514,198
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 159
|
r
|
nonlinear.R
|
f<-function(x0,R,n){
res=
R*x[n-1](1-x[n-1])
}
x=1:10
fq2<-function(x,r){
return(r*x*(1-x))
}
x=seq(0,1,.001)
plot(x,fq2(x,3.1))
line(x,x)
fq2(x,3.1)
|
f6c4ec33fd5d71e13e9f229bf28b60d179a1a3d7
|
5bb2c8ca2457acd0c22775175a2722c3857a8a16
|
/man/get_pvalue.Rd
|
c27840a130b6444d4fd29366713e2e6d02f2491a
|
[] |
no_license
|
IQSS/Zelig
|
d65dc2a72329e472df3ca255c503b2e1df737d79
|
4774793b54b61b30cc6cfc94a7548879a78700b2
|
refs/heads/master
| 2023-02-07T10:39:43.638288
| 2023-01-25T20:41:12
| 2023-01-25T20:41:12
| 14,958,190
| 115
| 52
| null | 2023-01-25T20:41:13
| 2013-12-05T15:57:10
|
R
|
UTF-8
|
R
| false
| true
| 356
|
rd
|
get_pvalue.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrappers.R
\name{get_pvalue}
\alias{get_pvalue}
\title{Extract p-values from a Zelig estimated model}
\usage{
get_pvalue(object)
}
\arguments{
\item{object}{an object of class Zelig}
}
\description{
Extract p-values from a Zelig estimated model
}
\author{
Christopher Gandrud
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.