blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d571753bca092f40ace60045f386ec199d2bc249 | b30a6a9d69305509e197bd36d5307578a05ad46f | /PARMskew.R | 0ec47826450515b54e229258e6aa781e3c57dc98 | [] | no_license | amwootte/analysisscripts | 49b4d6736d1701805a960425f96d01e7397ef852 | 9ab5dd1a7659664daf652c0138510e5a3644ee62 | refs/heads/master | 2022-07-20T05:09:10.418987 | 2022-07-06T15:02:10 | 2022-07-06T15:02:10 | 116,304,534 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,778 | r | PARMskew.R | source("/data2/3to5/I35/scripts/analysisfunctions.R")
library(ncdf4)
library(maps)
library(fields)
library(sp)
setwd("/home/woot0002/")
PARMdtrfile = "dtr_day_I35dtrdetrp1-PARM-B10P01K00_historical_r6i1p1_I35Land_19810101-20051231.nc"
PARMtmaxfile = "tasmax_day_I35txdetrp1-PARM-B10P01K00_historical_r6i1p1_I35Land_19810101-20051231.nc"
PARMtminfile = "tasmin_day_I35tndetrp1-PARM-Bd10P01K00_historical_r6i1p1_I35Land_19810101-20051231.nc"
EDQMtminfile = "/data2/3to5/I35/tasmin/EDQM/tasmin_day_I35tnp1-EDQM-A10P01K00_historical_r6i1p1_I35Land_19810101-20051231.nc"
EDQMtmaxfile = "/data2/3to5/I35/tasmax/EDQM/tasmax_day_I35txp1-EDQM-A10P01K00_historical_r6i1p1_I35Land_19810101-20051231.nc"
PRISMtminfile = "/data2/3to5/I35/tasmin/PRISM/tasmin_day_prism_historical_r0i0p0_SCCSC0p1_19810101-20051231.nc"
PRISMtmaxfile = "/data2/3to5/I35/tasmax/PRISM/tasmax_day_prism_historical_r0i0p0_SCCSC0p1_19810101-20051231.nc"
test = nc_open(PARMdtrfile)
PARMdtr = ncvar_get(test,"dtr")
lat = ncvar_get(test,"lat")
lon = ncvar_get(test,"lon")
nc_close(test)
test = nc_open(PARMtmaxfile)
PARMtmax = ncvar_get(test,"tasmax")
nc_close(test)
test = nc_open(PARMtminfile)
PARMtmin = ncvar_get(test,"tasmin")
nc_close(test)
histdates= seq(as.Date("1981-01-01"),as.Date("2005-12-31"),by="day")
if(length(histdates)>dim(PARMdtr)[3]){
histdates2 = histdates[-which(substr(histdates,6,10)=="02-29")]
}
test = nc_open(EDQMtmaxfile)
EDQMtmax = ncvar_get(test,"tasmax")
nc_close(test)
test = nc_open(EDQMtminfile)
EDQMtmin = ncvar_get(test,"tasmin")
nc_close(test)
EDQMdtr = EDQMtmax-EDQMtmin
test = nc_open(PRISMtmaxfile)
PRISMtmax = ncvar_get(test,"tasmax")
nc_close(test)
test = nc_open(PRISMtminfile)
PRISMtmin = ncvar_get(test,"tasmin")
nc_close(test)
PRISMdtr = PRISMtmax-PRISMtmin
PARMdtrcheck = PARMtmax - PARMtmin
PARMdtr2 = ifelse(PARMdtr<0.01,0.01,PARMdtr)
PARMtmincheck = PARMtmax - PARMdtr2
range(PARMdtrcheck-PARMdtr2,na.rm=TRUE)
library(e1071)
PARMdtrskew = PARMdtr2skew = EDQMdtrskew = PRISMdtrskew = PARMtmaxskew = EDQMtmaxskew = PRISMtmaxskew = PARMtminskew = EDQMtminskew = PRISMtminskew = matrix(NA,nrow=length(lon),ncol=length(lat))
for(r in 1:length(lon)){
for(c in 1:length(lat)){
if(is.na(PARMtmax[r,c,1])==FALSE){
PARMdtrskew[r,c] = skewness(PARMdtr[r,c,],na.rm=TRUE)
PARMdtr2skew[r,c] = skewness(PARMdtr2[r,c,],na.rm=TRUE)
EDQMdtrskew[r,c] = skewness(EDQMdtr[r,c,],na.rm=TRUE)
PRISMdtrskew[r,c] = skewness(PRISMdtr[r,c,],na.rm=TRUE)
PARMtmaxskew[r,c] = skewness(PARMtmax[r,c,],na.rm=TRUE)
EDQMtmaxskew[r,c] = skewness(EDQMtmax[r,c,],na.rm=TRUE)
PRISMtmaxskew[r,c] = skewness(PRISMtmax[r,c,],na.rm=TRUE)
PARMtminskew[r,c] = skewness(PARMtmin[r,c,],na.rm=TRUE)
EDQMtminskew[r,c] = skewness(EDQMtmin[r,c,],na.rm=TRUE)
PRISMtminskew[r,c] = skewness(PRISMtmin[r,c,],na.rm=TRUE)
}
}
message("Finished calcs for row ",r," / ",length(lon))
}
######
range(c(PARMdtrskew,PARMdtr2skew,EDQMdtrskew,PRISMdtrskew,PARMtminskew,EDQMtminskew,PRISMtminskew,PARMtmaxskew,EDQMtmaxskew,PRISMtmaxskew),na.rm=TRUE)
rawcolorbar = colorramp(c(PARMdtrskew,PARMdtr2skew,EDQMdtrskew,PRISMdtrskew,PARMtminskew,EDQMtminskew,PRISMtminskew,PARMtmaxskew,EDQMtmaxskew,PRISMtmaxskew),colorchoice="bluetored",type="difference",Blimit=50,use_fixed_scale = TRUE, fixed_scale = c(-1.3,1.3))
#testsfc1 = list(x=(lon-360),y=lat,z=PARMdtrskew)
#surface(testsfc1,type="I",main="PARM dtr skew",zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
#map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=PARMdtr2skew)
surface(testsfc1,type="I",main="PARM dtr (post-processed) skew",zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=EDQMdtrskew)
surface(testsfc1,type="I",main="EDQM dtr skew",zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=PRISMdtrskew)
surface(testsfc1,type="I",main="PRISM dtr skew",zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=PARMtmaxskew)
surface(testsfc1,type="I",main="PARM tmax skew",zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=EDQMtmaxskew)
surface(testsfc1,type="I",main="EDQM tmax skew",zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=PRISMtmaxskew)
surface(testsfc1,type="I",main="PRISM tmax skew",zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=PARMtminskew)
surface(testsfc1,type="I",main="PARM tmin skew",zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=EDQMtminskew)
surface(testsfc1,type="I",main="EDQM tmin skew",zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=PRISMtminskew)
surface(testsfc1,type="I",main="PRISM tmin skew",zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
mask = ifelse(PARMtminskew<0 & PARMtmaxskew<0,1,0)
PARMskewdiff = ifelse(mask==1,PARMtminskew-PARMtmaxskew,NA)
testsfc1 = list(x=(lon-360),y=lat,z=mask)
surface(testsfc1,type="I",main="PARM skew mask")
map("state",add=TRUE)
diffcolorbar = colorramp(c(PARMskewdiff),colorchoice="bluetored",type="difference",Blimit=50,use_fixed_scale = TRUE,fixed_scale = c(-0.55,0.3))
testsfc1 = list(x=(lon-360),y=lat,z=PARMskewdiff)
surface(testsfc1,type="I",main="PARM skew difference (tmin-tmax)",zlim=diffcolorbar[[1]],col=diffcolorbar[[3]],breaks=diffcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
which(PARMskewdiff==min(PARMskewdiff,na.rm=TRUE),arr.ind = TRUE)
nrow(which(PARMskewdiff<=0,arr.ind = TRUE))
nrow(which(is.na(PARMskewdiff)==FALSE,arr.ind = TRUE))
i=5
j=117
PARMdtr2vals = PARMdtr2[i,j,]
PARMtmaxvals = PARMtmax[i,j,]
PARMtminvals = PARMtmin[i,j,]
PRISMtmaxvals = PRISMtmax[i,j,]
PRISMtminvals = PRISMtmin[i,j,]
PRISMdtrvals = PRISMdtr[i,j,]
EDQMtmaxvals = EDQMtmax[i,j,]
EDQMtminvals = EDQMtmin[i,j,]
EDQMdtrvals = EDQMdtr[i,j,]
PARMtmaxquant = PARMtminquant = PARMdtrquant = c()
EDQMtmaxquant = EDQMtminquant = EDQMdtrquant = c()
PRISMtmaxquant = PRISMtminquant = PRISMdtrquant = c()
probsin = seq(0.01,0.99,by=0.01)
for(p in 1:length(probsin)){
PARMtmaxquant[p] = quantile(PARMtmaxvals,probs=probsin[p],na.rm=TRUE)
PARMtminquant[p] = quantile(PARMtminvals,probs=probsin[p],na.rm=TRUE)
PARMdtrquant[p] = quantile(PARMdtr2vals,probs=probsin[p],na.rm=TRUE)
EDQMtmaxquant[p] = quantile(EDQMtmaxvals,probs=probsin[p],na.rm=TRUE)
EDQMtminquant[p] = quantile(EDQMtminvals,probs=probsin[p],na.rm=TRUE)
EDQMdtrquant[p] = quantile(EDQMdtrvals,probs=probsin[p],na.rm=TRUE)
PRISMtmaxquant[p] = quantile(PRISMtmaxvals,probs=probsin[p],na.rm=TRUE)
PRISMtminquant[p] = quantile(PRISMtminvals,probs=probsin[p],na.rm=TRUE)
PRISMdtrquant[p] = quantile(PRISMdtrvals,probs=probsin[p],na.rm=TRUE)
}
PARMtmaxquantanom = PARMtmaxquant-mean(PARMtmaxquant)
PARMtminquantanom = PARMtminquant-mean(PARMtminquant)
PARMdtrquantanom = PARMdtrquant-mean(PARMdtrquant)
plot(probsin~PARMtmaxquantanom,xlim=range(c(PARMtmaxquantanom,PARMtminquantanom,PARMdtrquantanom)),lwd=2,type="l",col="red")
lines(probsin~PARMtminquantanom,lwd=2,col="blue")
lines(probsin~PARMdtrquantanom,lwd=2,col="black")
plot(probsin~PARMdtrquant,lwd=2,type="l")
##########
PARMdtr2skew = EDQMdtrskew = PRISMdtrskew = PARMtmaxskew = EDQMtmaxskew = PRISMtmaxskew = PARMtminskew = EDQMtminskew = PRISMtminskew = array(NA,dim=c(length(lon),length(lat),12))
for(m in 1:12){
monidx = which(as.numeric(substr(histdates2,6,7))==m)
for(r in 1:length(lon)){
for(c in 1:length(lat)){
if(is.na(PARMtmax[r,c,1])==FALSE){
PARMdtr2skew[r,c,m] = skewness(PARMdtr2[r,c,monidx],na.rm=TRUE)
EDQMdtrskew[r,c,m] = skewness(EDQMdtr[r,c,monidx],na.rm=TRUE)
PRISMdtrskew[r,c,m] = skewness(PRISMdtr[r,c,monidx],na.rm=TRUE)
PARMtmaxskew[r,c,m] = skewness(PARMtmax[r,c,monidx],na.rm=TRUE)
EDQMtmaxskew[r,c,m] = skewness(EDQMtmax[r,c,monidx],na.rm=TRUE)
PRISMtmaxskew[r,c,m] = skewness(PRISMtmax[r,c,monidx],na.rm=TRUE)
PARMtminskew[r,c,m] = skewness(PARMtmin[r,c,monidx],na.rm=TRUE)
EDQMtminskew[r,c,m] = skewness(EDQMtmin[r,c,monidx],na.rm=TRUE)
PRISMtminskew[r,c,m] = skewness(PRISMtmin[r,c,monidx],na.rm=TRUE)
}
}
message("Finished calcs for row ",r," / ",length(lon))
}
message("Finished calcs for month ",m)
}
####
rawcolorbar = colorramp(c(PARMdtr2skew,EDQMdtrskew,PRISMdtrskew,PARMtminskew,EDQMtminskew,PRISMtminskew,PARMtmaxskew,EDQMtmaxskew,PRISMtmaxskew),colorchoice="bluetored",type="difference",Blimit=30)
pdf("monthlyskewness.pdf",onefile=TRUE,width=15,height=5)
par(mfrow=c(1,3))
for(m in 1:12){
testsfc1 = list(x=(lon-360),y=lat,z=PARMdtr2skew[,,m])
surface(testsfc1,type="I",main=paste("PARM dtr (post-processed) skew for month: ",m,sep=""),zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=EDQMdtrskew[,,m])
surface(testsfc1,type="I",main=paste("EDQM dtr skew for month: ",m,sep=""),zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=PRISMdtrskew[,,m])
surface(testsfc1,type="I",main=paste("PRISM dtr skew for month: ",m,sep=""),zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=PARMtmaxskew[,,m])
surface(testsfc1,type="I",main=paste("PARM tmax skew for month: ",m,sep=""),zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=EDQMtmaxskew[,,m])
surface(testsfc1,type="I",main=paste("EDQM tmax skew for month: ",m,sep=""),zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=PRISMtmaxskew[,,m])
surface(testsfc1,type="I",main=paste("PRISM tmax skew for month: ",m,sep=""),zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=PARMtminskew[,,m])
surface(testsfc1,type="I",main=paste("PARM tmin skew for month: ",m,sep=""),zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=EDQMtminskew[,,m])
surface(testsfc1,type="I",main=paste("EDQM tmin skew for month: ",m,sep=""),zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
testsfc1 = list(x=(lon-360),y=lat,z=PRISMtminskew[,,m])
surface(testsfc1,type="I",main=paste("PRISM tmin skew for month: ",m,sep=""),zlim=rawcolorbar[[1]],col=rawcolorbar[[3]],breaks=rawcolorbar[[2]],xlab="Longitude",ylab="Latitude")
map("state",add=TRUE)
}
dev.off()
j=5
i=110
PARMdtr2skew[i,j,]
PARMtmaxskew[i,j,]
PARMtminskew[i,j,]
probsin = seq(0.01,0.99,by=0.01)
PARMtmaxquant = PARMtminquant = PARMdtrquant = matrix(NA,nrow=length(probsin),ncol=12)
EDQMtmaxquant = EDQMtminquant = EDQMdtrquant = matrix(NA,nrow=length(probsin),ncol=12)
PRISMtmaxquant = PRISMtminquant = PRISMdtrquant = matrix(NA,nrow=length(probsin),ncol=12)
for(m in 1:12){
monidx = which(as.numeric(substr(histdates2,6,7))==m)
for(p in 1:length(probsin)){
PARMtmaxquant[p,m] = quantile(PARMtmax[i,j,monidx],probs=probsin[p],na.rm=TRUE)
PARMtminquant[p,m] = quantile(PARMtmin[i,j,monidx],probs=probsin[p],na.rm=TRUE)
PARMdtrquant[p,m] = quantile(PARMdtr2[i,j,monidx],probs=probsin[p],na.rm=TRUE)
EDQMtmaxquant[p,m] = quantile(EDQMtmax[i,j,monidx],probs=probsin[p],na.rm=TRUE)
EDQMtminquant[p,m] = quantile(EDQMtmin[i,j,monidx],probs=probsin[p],na.rm=TRUE)
EDQMdtrquant[p,m] = quantile(EDQMdtr[i,j,monidx],probs=probsin[p],na.rm=TRUE)
PRISMtmaxquant[p,m] = quantile(PRISMtmax[i,j,monidx],probs=probsin[p],na.rm=TRUE)
PRISMtminquant[p,m] = quantile(PRISMtmin[i,j,monidx],probs=probsin[p],na.rm=TRUE)
PRISMdtrquant[p,m] = quantile(PRISMdtr[i,j,monidx],probs=probsin[p],na.rm=TRUE)
}
}
plot(probsin~PARMtmaxquant[,5],xlim=range(c(PARMtmaxquant[,c(5,9)],PARMtminquant[,c(5,9)])),lwd=2,type="l",col="red")
lines(probsin~PARMtminquant[,5],lwd=2,col="blue")
lines(probsin~PARMtmaxquant[,9],lwd=2,col="red",lty=2)
lines(probsin~PARMtminquant[,9],lwd=2,col="blue",lty=2)
plot(probsin~PARMdtrquant[,5],xlim=range(c(PARMdtrquant[,c(5,9)])),lwd=2,type="l",col="black")
lines(probsin~PARMdtrquant[,9],lwd=2,col="black",lty=2)
m=5
monidx = which(as.numeric(substr(histdates2,6,7))==m)
hist(PARMtmax[i,j,monidx],breaks=20)
hist(PARMtmin[i,j,monidx],breaks=20)
hist(PARMdtr2[i,j,monidx],breaks=20)
m=9
monidx = which(as.numeric(substr(histdates2,6,7))==m)
hist(PARMtmax[i,j,monidx],breaks=20)
hist(PARMtmin[i,j,monidx],breaks=20)
hist(PARMdtr2[i,j,monidx],breaks=20)
|
1f6f5edf7ceec1be4fe6f0738bc132c2a319ed29 | c33b1ac130ab154cc300800fe6bbd887328f3c50 | /man/lm_each_subsample.Rd | 389527841af8d2659da19cad3f43bf5843a16670 | [
"MIT"
] | permissive | zheng003/blblm | a71d244fbff63997ae60be6cf7c68668846685bd | 24485772f7e0e16915538279a9069843546a21ce | refs/heads/master | 2022-11-02T03:15:32.818662 | 2020-06-11T11:39:45 | 2020-06-11T11:39:45 | 270,378,179 | 0 | 0 | null | 2020-06-07T17:19:36 | 2020-06-07T17:19:36 | null | UTF-8 | R | false | true | 409 | rd | lm_each_subsample.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blblm.R
\name{lm_each_subsample}
\alias{lm_each_subsample}
\title{compute the estimates}
\usage{
lm_each_subsample(formula, data, n, B)
}
\arguments{
\item{formula}{fomula: V1~V2}
\item{data}{dataframe}
\item{n}{total number of rows in data}
\item{B}{number of bootstraps}
}
\value{
list
}
\description{
compute the estimates
}
|
fde09347272fc8cc54dc6c88d02c85d35168f31f | fd1019e277941bf5ddb0c193c4a7c844eb3e9baf | /man/CCMouse.Rd | 000ccf7c5df6c1c17c3449b91da4b7300cac14aa | [] | no_license | cran/gontr | 0bfbe5de33fe4cc41195863ee99183b7dda25466 | bb027d123d8d28fe4082b7c6155d6a07feb21406 | refs/heads/master | 2023-01-05T23:14:06.832606 | 2020-11-05T08:30:02 | 2020-11-05T08:30:02 | 310,512,523 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 600 | rd | CCMouse.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doc.R
\docType{data}
\name{CCMouse}
\alias{CCMouse}
\title{CC GO-terms reduced GO-DAG for Mouse}
\format{
A list with 3 objects:
\describe{
\item{v}{GO-terms in the respective nodes.}
\item{dat.d}{Data frame object that shows how the respective nodes are connected.}
\item{df}{Sketelon of the reduced GO-dags.}
}
}
\source{
<https://www.nature.com/articles/s41598-020-73326-3>
}
\usage{
CCMouse
}
\description{
A data set with list containing information about the reduced GO-BP DAG for Mouse.
}
\keyword{datasets}
|
33925bb68f9b3452eafa4ad8c11922833caf6e16 | b6541fbb8c42e341c91e8c89006fe61236d1e99d | /plot1.R | 6b588fc13dff8f480c64ec6632243b09175e7cc7 | [] | no_license | jfausto/ExData_Plotting1 | fcb715eabd932613ca981d84d79d954f705ce77b | 0011331876072679860914f2c8101aa993d7f75e | refs/heads/master | 2021-01-21T16:00:32.652048 | 2016-04-18T09:51:12 | 2016-04-18T09:51:12 | 56,434,390 | 0 | 0 | null | 2016-04-17T12:08:08 | 2016-04-17T12:08:08 | null | UTF-8 | R | false | false | 465 | r | plot1.R | ## Coursera - JHU - Exploratory Graphs
## Week 1. Assignment
## Date: 4/17/2016
## IMPORTANT NOTE #####
## DataSetLoad.R must be executed before to get the pwcFeb07 data frame
source('DataSetLoad.R')
## Draw the first Plot
hist(pwcFeb07$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## Saving to file
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
22cf6c222f9d62a05e5c53e764e106c0bad49629 | 44fb6d163b5c56abfdf1500e27a2efec03fcc1db | /02 Shiny/server.R | 6e03d751510fef9166c7ea48615bd9f126b2d6f0 | [] | no_license | rmelendez94/DV_SProject | 10e5d37eb4071ff9830a98fe970cc26d0767bbef | 03ffac3dcb814b51809a0af87456dc8339ce88e8 | refs/heads/master | 2021-01-10T05:14:47.471972 | 2015-11-21T20:30:38 | 2015-11-21T20:30:38 | 46,313,173 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,308 | r | server.R | # server.R
require(jsonlite)
require(RCurl)
require(ggplot2)
require(dplyr)
require(tidyr)
require(shiny)
require(leaflet)
require(shinydashboard)
require(DT)
shinyServer(function(input, output) {
df <- data.frame(fromJSON(getURL(URLencode('skipper.cs.utexas.edu:5001/rest/native/?query="select * from BNKMKTG"'),httpheader=c(DB='jdbc:oracle:thin:@sayonara.microlab.cs.utexas.edu:1521:orcl', USER='C##cs329e_rm46926', PASS='orcl_rm46926', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE), ))
output$scatterPlot <- renderPlot({
# Start your code here.
# Here is the scatter plot
if (input$OutcomeSelectionFilter == 1)
ofilter = 'no'
else if (input$OutcomeSelectionFilter == 2)
ofilter = 'yes'
else
ofilter = 'all'
dfs <- df %>% select(DURATION, Y, CONS_PRICE_IDX) %>% filter(Y != ofilter)
plot1 <- ggplot() +
coord_cartesian() +
scale_x_continuous() +
scale_y_continuous() +
labs(title='Portuguese Bank Marketing Campaign Effectiveness\nScatter Plot') +
labs(x="Duration", y=paste("Consumer Price Index")) +
layer(data=dfs,
mapping=aes(x=as.numeric(as.character(DURATION)), y=as.numeric(as.character(CONS_PRICE_IDX)), color=Y),
stat="identity",
stat_params=list(),
geom="point",
geom_params=list(alpha=.8),
position=position_jitter(width=0, height=0)
) +
layer(data=dfs,
mapping=aes(x=as.numeric(as.character(DURATION)), y=as.numeric(as.character(CONS_PRICE_IDX)), color=Y),
stat="smooth",
stat_params=list(method= lm, se= FALSE),
geom="smooth",
geom_params=list(alpha= .8),
position=position_jitter(width=0, height=0)
)
# End your code here.
return(plot1)
})
output$barPlot <- renderPlot({
# Start your code here.
# Here is the bar chart
plottitle = "Portuguese Bank Marketing Campaign Effectiveness\nBar Chart:"
dfb <- df %>% group_by(POUTCOME, Y) %>% summarise(AVG_CAMPAIGN = mean(CAMPAIGN))
if (input$ReferenceLine == 1) {
# Window Avg
subtitle = "AVG_CAMPAIGN, WINDOW_AVG_CAMPAIGN"
dfb1 <- dfb %>% ungroup %>% group_by(POUTCOME) %>% summarise(measure_value = mean(AVG_CAMPAIGN))}
else if (input$ReferenceLine == 2) {
# Window Min
subtitle = "AVG_CAMPAIGN, WINDOW_MIN_CAMPAIGN"
dfb1 <- dfb %>% ungroup %>% group_by(POUTCOME) %>% summarise(measure_value = min(AVG_CAMPAIGN))}
else if (input$ReferenceLine == 3) {
# Window Max
subtitle = "AVG_CAMPAIGN, WINDOW_MAX_CAMPAIGN"
dfb1 <- dfb %>% ungroup %>% group_by(POUTCOME) %>% summarise(measure_value = max(AVG_CAMPAIGN))}
else {
# Window Sum
subtitle = "AVG_CAMPAIGN, WINDOW_SUM_CAMPAIGN"
dfb1 <- dfb %>% ungroup %>% group_by(POUTCOME) %>% summarise(measure_value = sum(AVG_CAMPAIGN))}
dfb <- inner_join(dfb, dfb1, by="POUTCOME")
#spread(dfb, Y, AVG_CAMPAIGN) %>% View
plot2 <- ggplot() +
coord_cartesian() +
scale_x_discrete() +
scale_y_continuous() +
facet_wrap(~POUTCOME, ncol=1) +
labs(title=paste(plottitle,subtitle,sep = " ")) +
labs(x=paste("Y (OUTCOME)"), y=paste("AVG_CAMPAIGN")) +
layer(data=dfb,
mapping=aes(x=Y, y=AVG_CAMPAIGN, color=Y, fill=Y),
stat="identity",
stat_params=list(),
geom="bar",
geom_params=list(width=.25),
position=position_identity()
) + coord_flip() +
layer(data=dfb,
mapping=aes(x=Y, y=measure_value, label=round(measure_value, 4)),
stat="identity",
stat_params=list(),
geom="text",
geom_params=list(colour="black", hjust=1.5, size=3.5),
position=position_identity()
) +
layer(data=dfb,
mapping=aes(yintercept = measure_value),
geom="hline",
geom_params=list(colour="red")
)
# End your code here.
return(plot2)
})
output$crosstabPlot <- renderPlot({
# Start your code here.
# Here is the Crosstab and KPI
KPI_Low_Max_value = input$KPI1
KPI_Medium_Max_value = input$KPI2
#df %>% group_by(JOB) %>% summarize() %>% View()
dfc <- df %>% mutate(Yyes = ifelse(Y == 'yes', 1, 0), Yno = ifelse(Y == 'no', 1, 0)) %>% group_by(EDUCATION) %>% mutate(Ratio = sum(Yyes)/sum(Yno)) %>% ungroup() %>% group_by(EDUCATION, Y, HOUSING) %>% summarize(AVG_DURATION = round(mean(DURATION),1), Ratio = mean(Ratio)) %>% mutate(KPI = ifelse(Ratio <= KPI_Low_Max_value, '03 Low', ifelse(Ratio <= KPI_Medium_Max_value, '02 Medium', '01 High')))
#spread(dfc, Y, AVG_DURATION) %>% View
dfc$EDUCATION <- factor(dfc$EDUCATION, levels = c("illiterate", "basic4y", "basic6y", "basic9y", "highschool", "universitydegree", "professionalcourse", "unknown"))
plot3 <- ggplot() +
coord_cartesian() +
scale_x_discrete() +
scale_y_discrete() +
scale_fill_manual(values = c("green","yellow","red")) +
facet_grid(.~EDUCATION) +
labs(title='Portuguese Bank Marketing Campaign Effectiveness\nCrosstab\nAVG_DURATION') +
labs(x=paste("EDUCATION/Y"), y=paste("HOUSING")) +
layer(data=dfc,
mapping=aes(x=Y, y=HOUSING, label=AVG_DURATION),
stat="identity",
stat_params=list(),
geom="text",
geom_params=list(colour="black", size=2.8),
position=position_identity()
) +
layer(data=dfc,
mapping=aes(x=Y, y=HOUSING, fill=KPI),
stat="identity",
stat_params=list(),
geom="tile",
geom_params=list(alpha=0.50),
position=position_identity()
)
# End your code here.
return(plot3)
})
output$blendedPlot <- renderPlot({
# Start your code here.
# Here is the blended bar chart
plottitle = "Portuguese Bank Marketing Campaign Effectiveness\nBlending\nAVG_SALARY:"
dfbl <-
data.frame(fromJSON(getURL(
URLencode(
gsub(
"\n", " ", 'skipper.cs.utexas.edu:5001/rest/native/?query=
"""select JOB_TYPE as job_name, \\\'AVERAGE_SALARY\\\' as measure_names,
sum(AVERAGE_SALARY) as measure_values
from JOBTYPE
group by JOB_TYPE
union all
select JOB as job_name, \\\'CAMPAIGN\\\' as measure_names, sum(CAMPAIGN) as measure_values from BNKMKTG
group by JOB;"""'
)
), httpheader = c(
DB = 'jdbc:oracle:thin:@sayonara.microlab.cs.utexas.edu:1521:orcl', USER =
'C##cs329e_rm46926', PASS = 'orcl_rm46926', MODE = 'native_mode', MODEL = 'model', returnDimensions = 'False', returnFor = 'JSON'
), verbose = TRUE
))); #View(dfbl)
# Rearranges measure_names into usable columns
ndfbl <- spread(dfbl, MEASURE_NAMES, MEASURE_VALUES) %>% arrange(desc(AVERAGE_SALARY))
# Creates an ordered column of job type to be used for ordering in ggplot
ndfbl$ORDERED_JOBS <- reorder(ndfbl$JOB_NAME, ndfbl$AVERAGE_SALARY)
plot4 <- ggplot() +
coord_cartesian() +
scale_x_discrete() +
scale_y_continuous(limits = c(0,100000)) +
scale_fill_gradient(low = "grey90", high = "darkgreen", na.value = "grey90", guide = "colourbar") +
labs(title = 'Portuguese Bank Marketing Campaign Effectiveness\nBlending\nAVG_SALARY') +
labs(x = paste("JOB TYPE"), y = paste("AVERAGE SALARY")) +
theme(panel.background=element_rect(fill='grey100')) +
layer(
data = ndfbl,
mapping = aes(x = ORDERED_JOBS, y = AVERAGE_SALARY, fill = CAMPAIGN),
stat = "identity",
stat_params = list(),
geom = "bar",
geom_params = list(width=.5),
position = position_identity()
) +
layer(
data = ndfbl,
mapping = aes(
x = ORDERED_JOBS, y = AVERAGE_SALARY, label = round(CAMPAIGN)
),
stat = "identity",
stat_params = list(),
geom = "text",
geom_params = list(colour = "black", hjust = -0.1),
position = position_identity()
) + coord_flip()
# End your code here.
return(plot4)
})
})
|
a40b75bec2d727fbe641e5cdd324ffcbe35f1fca | 61e4c8a154719f5f85f8220cbb2a50a4d756b97b | /R_basic/reveiw_day3.R | 5caa29f82b899d090a4a3a2eb679ae21789bc1bf | [] | no_license | HeekyungKim6424/study_R | d1b152ffccc31633d11d6e3a5e74eb3f170323e6 | 20fff8cf34a8890fbe3b1e54a81ceed52aa8ea2b | refs/heads/main | 2023-02-24T08:28:35.111068 | 2021-01-25T19:43:00 | 2021-01-25T19:43:00 | 328,738,209 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 413 | r | reveiw_day3.R | #세번째날 복습!!
#좀 늦은감이 없지않아 있지만....일단 그래도 해봐야지!!
news=read.csv("news_ecommerce_lda_k10.csv")
head(news, 2)
# Q1. news객체는 몇개의 언론사 뉴스 기사가 있는가?
length(unique(news$press))
nrow(news)
# Q2. 연도별 월별 뉴스기사 개수를 확인하시오.
for num in unique(news$year){
count=nrow(news$year==num)
paste(num,count)
}
|
d495bdb653a8b738403dff15a8532be51b59302f | ddd877e4cf2df00596dbcd4c01411b48ebf6da3c | /1_data_and_scripts/Fig3_data_and_code/Fig3F/0_code_Fig4C.R | 8a20f25f6c46d2cc0496b3bbbecb1084582a9d1d | [
"MIT"
] | permissive | PLeeLab/methane_oxidation_genetic_trait | 5b5ee719d82df2316de0fdfdfca6281094748fd4 | 0d76cf1276d6a26f95ce930dad1cb2955bacc976 | refs/heads/master | 2020-04-10T20:42:34.643222 | 2019-07-25T07:59:57 | 2019-07-25T07:59:57 | 161,276,280 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,261 | r | 0_code_Fig4C.R | #install.packages('seqinr')
#install.packages("robustbase")
#install.packages("gplots")
#install.packages("reshape")
#install.packages("ggplot2")
#install.packages("ggpubr")
#install.packages("RColorBrewer")
#install.packages("ggrepel")
library('seqinr')
library("robustbase")
library("gplots")
library("reshape")
library("ggplot2")
library("ggpubr")
library("RColorBrewer")
library("ggrepel")
genes_metadata <- read.delim(file = "QC_data_CH4_IV.txt")
ALL_query_CDSs <- read.fasta(file = "query_ALL_clean.fasta", seqtype = 'DNA')
dim(genes_metadata)[1] == length(ALL_query_CDSs)
rscu_all <- sapply(ALL_query_CDSs, uco, index = "rscu")
rscu_all <- t(rscu_all)
rscu_all[is.na(rscu_all)] <- 0
rscu_all <- rscu_all[,-c(15, 49,51,57,59)] # removing Met, Trp and Stop codons
#----------------- Amino acid usage
AA_matrix <- matrix(nrow = length(ALL_query_CDSs), ncol = length(aaa()))
for (j in 1:length(ALL_query_CDSs)) {
prot_temp <- AAstat(seqinr::translate(ALL_query_CDSs[[j]]), plot = FALSE)$Compo/ (length(ALL_query_CDSs[[j]])/3)
AA_matrix[j,] <- prot_temp
}
colnames(AA_matrix) <- aaa()
rownames(AA_matrix) <- names(ALL_query_CDSs)
AA_df <- as.data.frame(AA_matrix)
AA_df$CDS <- rownames(AA_matrix)
AA_integrated_df <- merge(x = genes_metadata, y = AA_df, by="CDS", sort = FALSE)
AA_melted <- melt(data = AA_integrated_df, measure.vars = 23:43)
colnames(AA_melted)[c(23, 24)] <- c("AA", "Fraction")
AA_melted <- AA_melted[AA_melted$AA != "Stp",]
AA_melted$nFraction <- AA_melted$Fraction*(AA_melted$Length/3)
#-----------------
df_rscu_all <- as.data.frame(rscu_all)
df_rscu_all$CDS <- rownames(rscu_all)
df_rscu_integrated_all <- merge(x = genes_metadata, y = df_rscu_all, by="CDS")
# New function
s2c_n_translate_fun <- function(x) {translate(s2c(x))}
colnames(df_rscu_integrated_all)[23:81] <- paste(aaa(sapply(X = colnames(df_rscu_integrated_all)[23:81], FUN = s2c_n_translate_fun)),
"_",
toupper(colnames(df_rscu_integrated_all)[23:81]),
sep = "")
# New function
sort_df = function (data, vars = names(data), decreasing = F){
if (length(vars) == 0 || is.null(vars))
return(data)
data[do.call("order", list(what = data[, vars, drop = FALSE], decreasing = decreasing)), , drop = FALSE]
}
# New function
colSdApply <- function(x, ...)apply(X=x, MARGIN=2, FUN=sd, ...)
# linear model RSCU vs AA usage
linear_model_rscu_aafraction <- function(pick_group = "Ia", pick_gene = "pmo/amo", pick_colour = "blue"){
sub_medians <- colMedians(as.matrix(df_rscu_integrated_all[df_rscu_integrated_all$group==pick_group & (df_rscu_integrated_all$gene_category == pick_gene), c(23:81)]))
sub_sds <- colSdApply(x = as.matrix(df_rscu_integrated_all[df_rscu_integrated_all$group==pick_group & (df_rscu_integrated_all$gene_category == pick_gene), c(23:81)]))
#sub_medians_ordered <- sub_medians[order(names(sub_medians))]
pmo_rscu_df <- data.frame(aaa_codon = names(sub_medians), rscu_median = sub_medians, rscu_sd = sub_sds)
pmo_rscu_df$aa <- gsub(pattern = "_...", replacement = "", x = pmo_rscu_df$aaa_codon)
pmo_rscu_df$codon <- gsub(pattern = "..._", replacement = "", x = pmo_rscu_df$aaa_codon)
rownames(pmo_rscu_df) <- NULL
pmo_rscu_df <- sort_df(data = pmo_rscu_df, vars = "rscu_median", decreasing = TRUE)
pmo_rscu_df <- pmo_rscu_df[!duplicated(pmo_rscu_df[,c('aa')]),]
# aa fraction
pmo_aa_fraction <- AA_integrated_df[AA_integrated_df$group == pick_group, ]
pmo_aa_fraction <- pmo_aa_fraction[, c(8, 24:43)]
pmo_aa_fraction <- pmo_aa_fraction[pmo_aa_fraction$gene_category == pick_gene, ]
pmo_aa_fraction_vector <- as.numeric(x = colMedians(x = as.matrix(pmo_aa_fraction[,c(2:21)])))
names(pmo_aa_fraction_vector) <- colnames(pmo_aa_fraction[c(2:21)])
pmo_aa_fraction_df <- data.frame(aa = names(pmo_aa_fraction_vector), aa_fraction_median = pmo_aa_fraction_vector)
rownames(pmo_aa_fraction_df) <- NULL
pmo_aa_fraction_df$aa_fraction_sd <- colSdApply(x = pmo_aa_fraction[,c(2:21)])
# rscu and aa fraction together with plot
pmo_final_df <- merge(x = pmo_rscu_df, y = pmo_aa_fraction_df, by = "aa")
prebiotic_amino_acids <- c("Ala", "Asp", "Glu", "Gly", "Ile", "Leu", "Pro", "Ser", "Thr", "Val")# Longo, et al. (2013) PNAS, 110(6), 2135-2139.
pmo_final_df$prebiotic <- ifelse(test = pmo_final_df$aa %in% prebiotic_amino_acids, yes = "prebiotic",no = "modern")
plot_model <- ggplot(data = pmo_final_df, mapping = aes(x = rscu_median, y = aa_fraction_median, label=aa)) +
geom_errorbar(aes(ymin=aa_fraction_median-aa_fraction_sd, ymax=aa_fraction_median+aa_fraction_sd), width=.1, colour="grey") +
geom_errorbarh(aes(xmin=rscu_median-rscu_sd, xmax=rscu_median+rscu_sd), height = .002, colour="grey") +
geom_point(aes(fill=prebiotic, shape=prebiotic), size=2) +
scale_shape_manual(values=c(21, 24))+
scale_fill_manual(values = c("white", "black")) +
geom_smooth(method = "lm", colour = pick_colour, lwd=2) +
geom_text_repel()+
#xlim(c(0,max(pmo_final_df$rscu_median)))+
ggtitle(label = "", subtitle = paste(pick_group, pick_gene, "| P =", signif(summary(lm(formula = aa_fraction_median ~ rscu_median, data = pmo_final_df))$coef[2,4], 2))) +
xlab(label = "RSCU") +
ylab(label = "Amino acid usage fraction") +
theme_bw()+
theme(strip.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
NULL
return(plot_model)
}
ggsave(filename = "Fig4C.pdf",
plot = ggarrange(linear_model_rscu_aafraction(pick_group = "Ia", pick_gene = "pmo/amo", pick_colour = "#984EA3"),
linear_model_rscu_aafraction(pick_group = "Ia", pick_gene = "mmo", pick_colour = "#377EB8"),
linear_model_rscu_aafraction(pick_group = "Ia", pick_gene = "mxa", pick_colour = "#4DAF4A"),
linear_model_rscu_aafraction(pick_group = "Ia", pick_gene = "xox", pick_colour = "#FF7F00"),
ncol = 2,
nrow = 2),
device = "pdf",
width = 11,
height = 9,
useDingbats=FALSE)
ggsave(filename = "Fig4CS1.pdf",
plot = ggarrange(linear_model_rscu_aafraction(pick_group = "Ib", pick_gene = "pmo/amo", pick_colour = "#984EA3"),
linear_model_rscu_aafraction(pick_group = "Ib", pick_gene = "mmo", pick_colour = "#377EB8"),
linear_model_rscu_aafraction(pick_group = "Ib", pick_gene = "mxa", pick_colour = "#4DAF4A"),
linear_model_rscu_aafraction(pick_group = "Ib", pick_gene = "xox", pick_colour = "#FF7F00"),
ncol = 2,
nrow = 2),
device = "pdf",
width = 11,
height = 9,
useDingbats=FALSE)
ggsave(filename = "Fig4CS3.pdf",
plot = ggarrange(linear_model_rscu_aafraction(pick_group = "IIa", pick_gene = "pmo/amo", pick_colour = "#984EA3"),
linear_model_rscu_aafraction(pick_group = "IIa", pick_gene = "mmo", pick_colour = "#377EB8"),
linear_model_rscu_aafraction(pick_group = "IIa", pick_gene = "mxa", pick_colour = "#4DAF4A"),
ncol = 2,
nrow = 2),
device = "pdf",
width = 11,
height = 9,
useDingbats=FALSE)
ggsave(filename = "Fig4CS4.pdf",
plot = ggarrange(linear_model_rscu_aafraction(pick_group = "IIb", pick_gene = "pmo/amo", pick_colour = "#984EA3"),
linear_model_rscu_aafraction(pick_group = "IIb", pick_gene = "mmo", pick_colour = "#377EB8"),
linear_model_rscu_aafraction(pick_group = "IIb", pick_gene = "mxa", pick_colour = "#4DAF4A"),
ncol = 2,
nrow = 2),
device = "pdf",
width = 11,
height = 9,
useDingbats=FALSE)
ggsave(filename = "Fig4CS5.pdf",
plot = ggarrange(linear_model_rscu_aafraction(pick_group = "III", pick_gene = "pmo/amo", pick_colour = "#984EA3"),
linear_model_rscu_aafraction(pick_group = "III", pick_gene = "xox", pick_colour = "#FF7F00"),
ncol = 2,
nrow = 2),
device = "pdf",
width = 11,
height = 9,
useDingbats=FALSE)
# Prebiotuc AA enrichment
prebiotic_AA_enrichment <- function(pick_group = "Ia", pick_gene = "pmo/amo", pick_colour = "blue"){
# aa fraction
pmo_aa_fraction <- AA_integrated_df[AA_integrated_df$group == pick_group, ]
pmo_aa_fraction <- pmo_aa_fraction[, c(8, 24:43)]
pmo_aa_fraction <- pmo_aa_fraction[pmo_aa_fraction$gene_category == pick_gene, ]
pmo_aa_fraction_vector <- as.numeric(x = colMedians(x = as.matrix(pmo_aa_fraction[,c(2:21)])))
names(pmo_aa_fraction_vector) <- colnames(pmo_aa_fraction[c(2:21)])
pmo_aa_fraction_df <- data.frame(aa = names(pmo_aa_fraction_vector), aa_fraction_median = pmo_aa_fraction_vector)
rownames(pmo_aa_fraction_df) <- NULL
pmo_aa_fraction_df$aa_fraction_sd <- colSdApply(x = pmo_aa_fraction[,c(2:21)])
prebiotic_amino_acids <- c("Ala", "Asp", "Glu", "Gly", "Ile", "Leu", "Pro", "Ser", "Thr", "Val") # Longo, et al. (2013) PNAS, 110(6), 2135-2139.
pmo_aa_fraction_df$prebiotic <- ifelse(test = pmo_aa_fraction_df$aa %in% prebiotic_amino_acids, yes = "prebiotic",no = "modern")
plot_prebiotic_enrich <- ggplot(data = pmo_aa_fraction_df, mapping = aes(x = prebiotic, y = aa_fraction_median, label=aa)) +
geom_boxplot(outlier.alpha = 0)+
geom_point(aes(shape=prebiotic, fill=prebiotic), size=2) +
stat_compare_means(comparisons = list(c("modern", "prebiotic")), method = "t.test")+
scale_shape_manual(values=c(21, 24))+
scale_fill_manual(values = c("white", "black")) +
geom_text_repel()+
ggtitle(label = "", subtitle = paste(pick_group, pick_gene)) +
xlab(label = "Amino acid") +
ylab(label = "Median amino acid usage fraction") +
theme_classic2()+
theme(strip.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), legend.position = "none")+
NULL
return(plot_prebiotic_enrich)
}
ggsave(filename = "Fig4HS1.pdf",
plot = ggarrange(prebiotic_AA_enrichment(pick_group = "Ia", pick_gene = "pmo/amo", pick_colour = "#984EA3"),
prebiotic_AA_enrichment(pick_group = "Ia", pick_gene = "mmo", pick_colour = "#377EB8"),
prebiotic_AA_enrichment(pick_group = "Ia", pick_gene = "mxa", pick_colour = "#4DAF4A"),
prebiotic_AA_enrichment(pick_group = "Ia", pick_gene = "xox", pick_colour = "#FF7F00"),
prebiotic_AA_enrichment(pick_group = "Ia", pick_gene = "pxm", pick_colour = "#FF7F00"),
prebiotic_AA_enrichment(pick_group = "Ia", pick_gene = "hao", pick_colour = "#FF7F00"),
ncol = 6,
nrow = 1),
device = "pdf",
width = 15,
height = 5,
useDingbats=FALSE)
ggsave(filename = "Fig4HS2.pdf",
plot = ggarrange(prebiotic_AA_enrichment(pick_group = "Ib", pick_gene = "pmo/amo", pick_colour = "#984EA3"),
prebiotic_AA_enrichment(pick_group = "Ib", pick_gene = "mmo", pick_colour = "#377EB8"),
prebiotic_AA_enrichment(pick_group = "Ib", pick_gene = "mxa", pick_colour = "#4DAF4A"),
prebiotic_AA_enrichment(pick_group = "Ib", pick_gene = "xox", pick_colour = "#FF7F00"),
ncol = 6,
nrow = 1),
device = "pdf",
width = 15,
height = 5,
useDingbats=FALSE)
ggsave(filename = "Fig4HS4.pdf",
plot = ggarrange(prebiotic_AA_enrichment(pick_group = "IIa", pick_gene = "pmo/amo", pick_colour = "#984EA3"),
prebiotic_AA_enrichment(pick_group = "IIa", pick_gene = "mmo", pick_colour = "#377EB8"),
prebiotic_AA_enrichment(pick_group = "IIa", pick_gene = "mxa", pick_colour = "#4DAF4A"),
ncol = 6,
nrow = 1),
device = "pdf",
width = 15,
height = 5,
useDingbats=FALSE)
ggsave(filename = "Fig4HS5.pdf",
plot = ggarrange(prebiotic_AA_enrichment(pick_group = "IIb", pick_gene = "pmo/amo", pick_colour = "#984EA3"),
prebiotic_AA_enrichment(pick_group = "IIb", pick_gene = "mmo", pick_colour = "#377EB8"),
prebiotic_AA_enrichment(pick_group = "IIb", pick_gene = "mxa", pick_colour = "#4DAF4A"),
ncol = 6,
nrow = 1),
device = "pdf",
width = 15,
height = 5,
useDingbats=FALSE)
ggsave(filename = "Fig4HS6.pdf",
plot = ggarrange(prebiotic_AA_enrichment(pick_group = "III", pick_gene = "pmo/amo", pick_colour = "#984EA3"),
prebiotic_AA_enrichment(pick_group = "III", pick_gene = "xox", pick_colour = "#FF7F00"),
ncol = 6,
nrow = 1),
device = "pdf",
width = 15,
height = 5,
useDingbats=FALSE)
|
d432829fcb57340def919d51a065d398fb67feb7 | e05e6718bdc2c25571e54670405cb1fae39dc2f9 | /R code/Modular Structure.R | f42bc88aaeef20461111fec07459e3f2f0be31f0 | [] | no_license | angelayuan/Development-Emotion-Connectome | 49fcd01e3c6edc2614ac52256c18d884bfa9325d | 9d25fcc08c1a8728b016088f036d2f52f308fcac | refs/heads/master | 2020-03-22T16:29:22.347836 | 2018-07-18T00:23:52 | 2018-07-18T00:23:52 | 140,330,358 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,466 | r | Modular Structure.R | setwd('/Users/zhangyuan/Google Drive/Files_PNC_Analysis')
sub <- read.table('SubInfo_2018.txt',header = TRUE, sep = '\t',
colClasses=c("Subj"="character"),stringsAsFactors=FALSE)
library(corrplot)
library(R.matlab)
library(ade4)
cons = c('fear','anger','sad','happy')
age <- data.frame(coef=numeric(length(cons)), p=numeric(length(cons)),
cond=character(length(cons)),stringsAsFactors=FALSE)
# for(c in 1:length(cons)){
# f = paste('BS_Conn_50ROIs_2018/Modularity/Q_',cons[c],'_759s.mat',sep="")
# cmat <- readMat(f)
# Q <- cmat$all.Qs
# modules <- cmat$all.mod
#
# cdata <- cbind(sub, Q)
# cdata$Sex <- as.factor(cdata$Sex)
# cdata$Race <- as.factor(cdata$Race)
# cdata$Agebin <- as.factor(cdata$Agebin)
# cdata$Agebin <- factor(cdata$Agebin,levels(cdata$Agebin)[c(3,1,2)])
# cdata$GGroup <- as.factor(cdata$GGroup)
# cdata$DGroup <- as.factor(cdata$DGroup)
#
# if(cons[c]=='fear' | cons[c] == 'anger'){
# f <- paste("scale(Q) ~ ","scale(Age) + GGroup + Sex + Race + scale(Motion)", sep="")
#
# }else if(cons[c]=='sad' | cons[c] == 'happy'){
# f <- paste("scale(Q) ~ ","scale(Age) + DGroup + Sex + Race + scale(Motion)", sep="")
# }
# fit <- lm(as.formula(f), data = cdata)
#
# age$coef[c] = summary(fit)$coefficients['scale(Age)',1]
# age$p[c] = summary(fit)$coefficients['scale(Age)',4]
# age$cond[c] = cons[c]
# }
#
# save(age,file='BS_Conn_50ROIs_2018/Modularity/Q_vs_Age.RData')
# co-occurence matrix
template_mat = matrix(0,nrow=dim(modules)[2],ncol=dim(modules)[2])
template_mat = matrix(0,nrow=dim(modules)[2],ncol=dim(modules)[2])
template_mat[c(5:6,17:18,23:24,27:34,41:42,45:50),c(5:6,17:18,23:24,27:34,41:42,45:50)] = 1
template_mat[c(19:22,25:26,35:40,43:44),c(19:22,25:26,35:40,43:44)] = 1
template_mat[c(1:4,7:16),c(1:4,7:16)] = 1
# roi_names <- read.table('Beta_50ROIs/roiname_list_old.txt',header = FALSE, sep = '\t',stringsAsFactors=FALSE)
# dimnames(template_mat) = list(roi_names$V1,roi_names$V1)
# net <- c(5:6,17:18,23:24,27:34,41:42,45:50, 19:22,25:26,35:40,43:44, 1:4,7:16)
# corrplot(template_mat[net,net], method="square", type="full",
# tl.col="black", tl.srt=20, tl.cex=1,outline = FALSE)
dist = data.frame(fear_dice=numeric(dim(modules)[1]),anger_dice=numeric(dim(modules)[1]),
sad_dice=numeric(dim(modules)[1]),happy_dice=numeric(dim(modules)[1]))
for(c in 1:length(cons)){
f = paste('BS_Conn_50ROIs_2018/Modularity/Q_',cons[c],'_759s.mat',sep="")
cmat <- readMat(f)
modules <- cmat$all.mod
for(s in 1:dim(modules)[1]){
v = modules[s,]
co = matrix(0,nrow=dim(modules)[2],ncol=dim(modules)[2])
for(i in 1:dim(modules)[2]){
m = v[i]
ind = which(v==m)
co[i,ind] = 1
}
A = template_mat[lower.tri(template_mat,diag=FALSE)]
B = co[lower.tri(co,diag=FALSE)]
dist[s,c] = as.numeric(dist.binary(rbind(A,B),method=5))
# # plot
# col0 <- colorRampPalette(c("#053061","#2166AC", "#4393C3","#92C5DE","#D1E5F0","#FFFFFF",
# "#FDDBC7","#F4A582","#D6604D","#B2182B","#67001F"))
# roi_names <- read.table('Beta_50ROIs/roiname_list_old.txt',header = FALSE, sep = '\t',stringsAsFactors=FALSE)
# num_rois <- length(roi_names$V1)
# dimnames(co) = list(roi_names$V1,roi_names$V1)
# net <- c(5:6,17:18,23:24,27:34,41:42,45:50, 19:22,25:26,35:40,43:44, 1:4,7:16)
# corrplot(co[net,net], method="square", type="full",
# diag = TRUE, col=col0(50),
# tl.col="black", tl.srt=20, tl.cex=1, #tl.pos='n',
# cl.cex = 1, is.corr=FALSE, cl.lim = c(0, 1), cl.length=5,
# outline = FALSE)
# order_v = order(v)
# corrplot(co[order_v,order_v], method="square", type="full",
# diag = TRUE, col=col0(50),
# tl.col="black", tl.srt=20, tl.cex=1, #tl.pos='n',
# cl.cex = 1, is.corr=FALSE, cl.lim = c(0, 1), cl.length=5,
# outline = FALSE)
}
}
# check distance vs. age
age <- data.frame(coef=numeric(length(cons)), p=numeric(length(cons)),
cond=character(length(cons)),stringsAsFactors=FALSE)
for(c in 1:length(cons)){
cdata <- cbind(sub, dist)
cdata$Sex <- as.factor(cdata$Sex)
cdata$Race <- as.factor(cdata$Race)
cdata$Agebin <- as.factor(cdata$Agebin)
cdata$Agebin <- factor(cdata$Agebin,levels(cdata$Agebin)[c(3,1,2)])
cdata$GGroup <- as.factor(cdata$GGroup)
cdata$DGroup <- as.factor(cdata$DGroup)
if(cons[c]=='fear' | cons[c] == 'anger'){
f <- paste("scale(",colnames(cdata)[10+c],") ~ ","scale(Age) + GGroup + Sex + Race + scale(Motion)", sep="")
}else if(cons[c]=='sad' | cons[c] == 'happy'){
f <- paste("scale(",colnames(cdata)[10+c],") ~ ","scale(Age) + DGroup + Sex + Race + scale(Motion)", sep="")
}
fit <- lm(as.formula(f), data = cdata)
age$coef[c] = summary(fit)$coefficients['scale(Age)',1]
age$p[c] = summary(fit)$coefficients['scale(Age)',4]
age$cond[c] = cons[c]
}
save(age,file='BS_Conn_50ROIs_2018/Modularity/Dissimilarity_vs_Age.RData')
# plot template co-occurence matrix
template_mat = matrix(0,nrow=50,ncol=50)
template_mat[c(5:6,17:18,23:24,27:34,41:42,45:50),c(5:6,17:18,23:24,27:34,41:42,45:50)] = 1
template_mat[c(19:22,25:26,35:40,43:44),c(19:22,25:26,35:40,43:44)] = 2
template_mat[c(1:4,7:16),c(1:4,7:16)] = 3
net <- c(5:6,17:18,23:24,27:34,41:42,45:50, 19:22,25:26,35:40,43:44, 1:4,7:16)
corrplot(template_mat[net,net], method="square", type="full",
is.corr = FALSE,
tl.col="black", tl.srt=20, tl.cex=1,outline = FALSE)
# plot CH fear and anger
roi_names <- read.table('Beta_50ROIs/roiname_list_old.txt',header = FALSE, sep = '\t',stringsAsFactors=FALSE)
ch_mat = matrix(0,nrow=50,ncol=50)
ch_mat[c(5:6,17:18,23:24,27:34,41:42,45:50),c(5:6,17:18,23:24,27:34,41:42,45:50)] = 1
ch_mat[c(13,14,19:22,25:26,35:40,43:44),c(13,14,19:22,25:26,35:40,43:44)] = 2
ch_mat[c(1:4,7:12,15,16),c(1:4,7:12,15,16)] = 3
net <- c(5:6,17:18,23:24,27:34,41:42,45:50, 19:22,25:26,35:40,43:44, 1:4,7:16)
corrplot(ch_mat[net,net], method="square", type="full",
is.corr = FALSE,
tl.col="black", tl.srt=20, tl.cex=1,outline = FALSE)
# plot CH sad and happy
roi_names <- read.table('Beta_50ROIs/roiname_list_old.txt',header = FALSE, sep = '\t',stringsAsFactors=FALSE)
ch_mat = matrix(0,nrow=50,ncol=50)
ch_mat[c(5:6,17:18,23:24,27:33,41:42,45:50),c(5:6,17:18,23:24,27:33,41:42,45:50)] = 1
ch_mat[c(13,14,19:22,25:26,34:40,43:44),c(13,14,19:22,25:26,34:40,43:44)] = 2
ch_mat[c(1:4,7:12,15,16),c(1:4,7:12,15,16)] = 3
net <- c(5:6,17:18,23:24,27:34,41:42,45:50, 19:22,25:26,35:40,43:44, 1:4,7:16)
corrplot(ch_mat[net,net], method="square", type="full",
is.corr = FALSE,
tl.col="black", tl.srt=20, tl.cex=1,outline = FALSE)
# plot ADU sad
roi_names <- read.table('Beta_50ROIs/roiname_list_old.txt',header = FALSE, sep = '\t',stringsAsFactors=FALSE)
adu_mat = matrix(0,nrow=50,ncol=50)
adu_mat[c(5:6,11,12,17:18,23:24,27:34,41:42,45:50),c(5:6,11,12,17:18,23:24,27:34,41:42,45:50)] = 1
adu_mat[c(13,14,19:22,25:26,35:40,43:44),c(13,14,19:22,25:26,35:40,43:44)] = 2
adu_mat[c(1:4,7:10,15,16),c(1:4,7:10,15,16)] = 3
net <- c(5:6,17:18,23:24,27:34,41:42,45:50, 19:22,25:26,35:40,43:44, 1:4,7:16)
corrplot(adu_mat[net,net], method="square", type="full",
is.corr = FALSE,
tl.col="black", tl.srt=20, tl.cex=1,outline = FALSE)
|
449d176fa053fbdd00129682d4eb9360c7085d91 | db1e3f2b4e55ad254acfe196f82c963e1b13fbb7 | /train_two_online.R | 3bd5cd435154fa0b49e53528d5be77c570a0d983 | [] | no_license | willtom0334/-o2o- | a4dca28d50ecf8d3867ca22a79c72134bd9c8520 | 3d521bb69a3d5d55f2fd94d8144fba0df2da5cab | refs/heads/master | 2020-04-17T16:22:54.623207 | 2019-02-06T06:40:59 | 2019-02-06T06:40:59 | 166,737,871 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,055 | r | train_two_online.R | ###############线上集的处理######## Train & Revised ####################################
rm(list = ls())
require(dplyr)
require(reshape)
require(tidyr)
require(sqldf)
require(snowfall)
require(smbinning)
setwd('C:/Users/Administrator/Desktop/tc')
Sys.setenv(JAVA_HOME='C:\\Program Files\\Java\\jdk1.7.0_79\\jre')
memory.limit(102400)
sfInit( parallel=TRUE, cpus=2 )
online <- read.csv("E:/R 3.4.2 for Windows/O2O_tc/ccf_online_stage1_train.csv")
######## 数据格式转化 ###########
online$User_id <- as.character(online$User_id)
online$Merchant_id <- as.character(online$Merchant_id)
online$Coupon_id <- as.character((online$Coupon_id))
online$Discount_rate <- as.character(online$Discount_rate)
online$Date_received <- as.character(online$Date_received)
online$Date_received <- as.Date(online$Date_received,'%Y%m%d')
online$Date <- as.character(online$Date)
online$Date <- as.Date(online$Date,'%Y%m%d')
online$weekday_r <- weekdays(online$Date_received)
######## 打标识 ############
online$Discount_fac <- NA
online$Discount_fac[grepl("^[z0-9]{1,4}\\:[z0-9]{1,4}$",online$Discount_rate) == T] <- 1
online$Discount_fac[online$Discount_rate == "null"] <- 2
online$Discount_fac[online$Discount_rate == "fixed"] <- 3
#1:满减,2:什么也不用的普通消费3:限时消费
online$buy_fac <- NA
online$buy_fac[is.na(online$Date) == T & is.na(online$Date_received) == F] <- 2
online$buy_fac[is.na(online$Date) == F & online$Discount_rate == 'null' ] <- 3
online$buy_fac[is.na(online$Date) == F & online$Discount_rate == 'fixed' ] <- 4
online$buy_fac[is.na(online$Date) == F & online$Discount_rate != 'null' &
online$Discount_rate != 'fixed'] <- 1
# 1:领卷已消费 2:领卷未消费,3:什么也不用的消4:限时消费(也领卷)
######## 分集 #########
online <- tbl_df(online)
train_online_1 <- filter(online,Date>='2016-01-01' & Date<='2016-04-30' &
Date_received>='2016-01-01' & Date_received<='2016-04-30')
train_online_2 <- filter(online,Date_received>='2016-01-01' & Date_received<='2016-04-30',is.na(Date))
train_online_3 <- filter(online,Date>='2016-01-01' & Date<='2016-04-30',is.na(Date_received))
train_online <- rbind(train_online_1,train_online_2,train_online_3)
print(c(min(train_online$Date_received,na.rm = T), max(train_online$Date_received,na.rm = T)))
print(c(min(train_online$Date,na.rm = T), max(train_online$Date,na.rm = T)))
train <- read.csv("E:/R 3.4.2 for Windows/O2O_tc/train_two.csv")
train <- train[-1]
revised_online_1 <- filter(online,Date>='2016-02-16' & Date<='2016-06-15' &
Date_received>='2016-02-16' & Date_received<='2016-06-15')
revised_online_2 <- filter(online,Date_received>='2016-02-16' & Date_received<='2016-06-15',is.na(Date))
revised_online_3 <- filter(online,Date>='2016-02-16' & Date<='2016-06-15',is.na(Date_received))
revised_online <- rbind(revised_online_1,revised_online_2,revised_online_3)
print(c(min(revised_online$Date_received,na.rm = T), max(revised_online$Date_received,na.rm = T)))
print(c(min(revised_online$Date,na.rm = T), max(revised_online$Date,na.rm = T)))
revised <- read.csv("E:/R 3.4.2 for Windows/O2O_tc/revised_two.csv")
revised <- revised[-1]
rm(revised_online_1,revised_online_2,revised_online_3)
rm(train_online_1,train_online_2,train_online_3)
rm(online)
gc()
##################线上处理开始###################
#---客户领取 非限时 卷次数(线上)get_count_on_notfixed----
midd <- train_online%>%select(User_id,Coupon_id)%>%
filter(Coupon_id != 'null'&Coupon_id != 'fixed')%>%
group_by(User_id)%>%summarise(get_count_on_notfixed=n())
train <- merge(train,midd,by = 'User_id',all.x = T)
train$get_count_on_notfixed[is.na(train$get_count_on_notfixed) == T] <- 0
#----客户领取非限时优惠卷未购买的次数(线上)get_no_buy_on----
midd <- train_online%>%select(User_id,buy_fac)%>%filter(buy_fac == 2)%>%
group_by(User_id)%>%summarise(get_no_buy_on=n())
train <- merge(train,midd,by = 'User_id',all.x = T)
train$get_no_buy_on[is.na(train$get_no_buy_on) == T] <- 0
#---客户领取 限时 卷次数(线上)get_count_on_fixed----
midd <- train_online%>%select(User_id,Coupon_id)%>%filter(Coupon_id == 'fixed')%>%
group_by(User_id)%>%summarise(get_count_on_fixed=n())
train <- merge(train,midd,by = 'User_id',all.x = T)
train$get_count_on_fixed[is.na(train$get_count_on_fixed) == T] <- 0
#----客户领取优惠卷并且已经购买的次数(线上)get_buy_on(含fixed)-----------------
midd <- train_online%>%select(User_id,buy_fac)%>%filter(buy_fac == 1 | buy_fac == 4)%>%
group_by(User_id)%>%summarise(get_buy_on=n())
train <- merge(train,midd,by = 'User_id',all.x = T)
train$get_buy_on[is.na(train$get_buy_on) == T] <- 0
#-------------客户什么卷也不用的普通购买次数(线上)nodiscount_buy_on--------
midd <- train_online%>%select(User_id,buy_fac)%>%filter(buy_fac == 3)%>%
group_by(User_id)%>%summarise(nodiscount_buy_on = n())
train <- merge(train,midd,by = 'User_id',all.x = T)
train$nodiscount_buy_on[is.na(train$nodiscount_buy_on) == T] <- 0
#------------ 客户在线上领卷的比重 -- 线上领卷/(线上领卷+线下领卷) get_count_rate_on ------
train$get_count_rate_on <- (train$get_count_on_notfixed + train$get_count_on_fixed) /
(train$get_count_on_notfixed + train$get_count_on_fixed + train$get_count_off)
train$get_count_rate_on[is.na(train$get_count_rate_on)] <- 0
#------------ 客户在线上领卷购买的比重 -- 线上购买 /(线上购买+线下购买) get_buy_rate_on------
train$get_buy_rate_on <- train$get_buy_on / (train$get_buy_on + train$get_buy_off)
train$get_buy_rate_on[is.na(train$get_buy_rate_on)] <- 0
#------------ 客户在线上所有购买的比重 -- 线上购买 /(线上购买+线下购买) get_buy_rate_on_all------
train$get_buy_rate_on_all <- (train$get_buy_on + train$nodiscount_buy_on)/
(train$get_buy_on + train$nodiscount_buy_on + train$get_buy_off + train$nodiscount_buy_off)
train$get_buy_rate_on_all[is.na(train$get_buy_rate_on_all)] <- 0
#------------ 客户 线上领卷未购买的比重 -- 线上领卷未购买次数 /(线上+线下)get_no_buy_rate_on------
train$get_no_buy_rate_on <- train$get_no_buy_on / (train$get_no_buy_on + train$get_no_buy_off)
train$get_no_buy_rate_on[is.na(train$get_no_buy_rate_on)] <- 0
#----历史上 该客户是否只在 线上购买 is_all_on_buy --------
train$is_all_on_buy <- ifelse((train$get_buy_on+train$nodiscount_buy_on)!= 0 &
(train$nodiscount_buy_off+train$get_buy_off)== 0,1,0)
#----历史上 该客户是否只在 线下购买 is_all_off_buy --------
train$is_all_off_buy <- ifelse((train$get_buy_on+train$nodiscount_buy_on)== 0 &
(train$nodiscount_buy_off+train$get_buy_off)!= 0,1,0)
#----历史上 该客户是否 线下线下都没购买过 is_all_not_buy --------
train$is_all_not_buy <- ifelse((train$get_buy_on+train$nodiscount_buy_on) == 0 &
(train$nodiscount_buy_off+train$get_buy_off)== 0,1,0)
#----历史上 该客户是否 线下线下都买过 is_all_buy --------
train$is_all_buy <- ifelse((train$get_buy_on+train$nodiscount_buy_on) != 0 &
(train$nodiscount_buy_off+train$get_buy_off)!= 0,1,0)
#----历史上 该客户是否线上只是NULL购买 is_null_buy_on --------
train$is_null_buy_on <- ifelse(train$nodiscount_buy_on != 0 & train$get_buy_on ==0,1,0 )
#----历史上 该客户是否线下只是NULL购买 is_null_buy_off --------
train$is_null_buy_off <- ifelse(train$nodiscount_buy_off != 0 & train$get_buy_off ==0,1,0 )
#----历史上 该客户不管线上还是线下都是NULL购买 is_null_buy_all --------
train$is_null_buy_all <- ifelse( train$is_null_buy_on == 1 & train$is_null_buy_off == 1,1,0 )
#----历史上 该客户是否只是FIXED购买 is_fixed_buy --------
train$is_fixed_buy <- ifelse((train$get_buy_on -train$get_count_on_fixed + train$nodiscount_buy_on) == 0 &
train$get_count_on_fixed != 0 &
(train$nodiscount_buy_off+train$get_buy_off)== 0,1,0)
##----历史上 线上领卷从未购买过 is_all_getnobuy_on --------
train$is_all_getnobuy_on <- ifelse(train$get_buy_on == 0,1,0)
##----历史上 线下领卷从未购买过 is_all_getnobuy_off --------
train$is_all_getnobuy_off <- ifelse(train$get_buy_off == 0,1,0)
##----历史上 上线下领卷都从未购买过 is_all_getnobuy--------
train$is_all_getnobuy <- ifelse(train$get_buy_on == 0 & train$get_buy_off == 0,1,0)
#-----当期的折扣,是否在线上购过 Discount_is_buy_on----
midd <- train_online%>%select(User_id,Discount_rate,buy_fac)%>%
filter(buy_fac == 1)%>%group_by(User_id)
midd <- midd[c(1:2)]
midd <- distinct(midd,.keep_all = T)
midd <- midd%>%select(User_id,Discount_rate)%>%group_by(User_id)%>%
summarise(items = paste(Discount_rate, collapse=',') )
train <- merge(train,midd,by = 'User_id',all.x = T)
grepFun <- function(train){
grepl(train['Discount_rate'],train['items'],fixed=TRUE)
}
train$Discount_is_buy_on <- apply(train,1,grepFun)
train$Discount_is_buy_on[train$Discount_is_buy_on == TRUE] <- 1
train$Discount_is_buy_on[train$Discount_is_buy_on == FALSE] <- 0
train <- subset(train,select = -c(items))
rm(grepFun,train_online,midd)
#=====================================
#---客户领取 非限时 卷次数(线上)get_count_on_notfixed----
midd <- revised_online%>%select(User_id,Coupon_id)%>%
filter(Coupon_id != 'null'&Coupon_id != 'fixed')%>%
group_by(User_id)%>%summarise(get_count_on_notfixed=n())
revised <- merge(revised,midd,by = 'User_id',all.x = T)
revised$get_count_on_notfixed[is.na(revised$get_count_on_notfixed) == T] <- 0
#----客户领取非限时优惠卷未购买的次数(线上)get_no_buy_on----
midd <- revised_online%>%select(User_id,buy_fac)%>%filter(buy_fac == 2)%>%
group_by(User_id)%>%summarise(get_no_buy_on=n())
revised <- merge(revised,midd,by = 'User_id',all.x = T)
revised$get_no_buy_on[is.na(revised$get_no_buy_on) == T] <- 0
#---客户领取 限时 卷次数(线上)get_count_on_fixed----
midd <- revised_online%>%select(User_id,Coupon_id)%>%filter(Coupon_id == 'fixed')%>%
group_by(User_id)%>%summarise(get_count_on_fixed=n())
revised <- merge(revised,midd,by = 'User_id',all.x = T)
revised$get_count_on_fixed[is.na(revised$get_count_on_fixed) == T] <- 0
#----客户领取优惠卷并且已经购买的次数(线上)get_buy_on(含fixed)-----------------
midd <- revised_online%>%select(User_id,buy_fac)%>%filter(buy_fac == 1 | buy_fac == 4)%>%
group_by(User_id)%>%summarise(get_buy_on=n())
revised <- merge(revised,midd,by = 'User_id',all.x = T)
revised$get_buy_on[is.na(revised$get_buy_on) == T] <- 0
#-------------客户什么卷也不用的普通购买次数(线上)nodiscount_buy_on--------
midd <- revised_online%>%select(User_id,buy_fac)%>%filter(buy_fac == 3)%>%
group_by(User_id)%>%summarise(nodiscount_buy_on = n())
revised <- merge(revised,midd,by = 'User_id',all.x = T)
revised$nodiscount_buy_on[is.na(revised$nodiscount_buy_on) == T] <- 0
#------------ 客户在线上领卷的比重 -- 线上领卷/(线上领卷+线下领卷) get_count_rate_on ------
revised$get_count_rate_on <- (revised$get_count_on_notfixed + revised$get_count_on_fixed) /
(revised$get_count_on_notfixed + revised$get_count_on_fixed + revised$get_count_off)
revised$get_count_rate_on[is.na(revised$get_count_rate_on)] <- 0
#------------ 客户在线上领卷购买的比重 -- 线上购买 /(线上购买+线下购买) get_buy_rate_on------
revised$get_buy_rate_on <- revised$get_buy_on / (revised$get_buy_on + revised$get_buy_off)
revised$get_buy_rate_on[is.na(revised$get_buy_rate_on)] <- 0
#------------ 客户在线上所有购买的比重 -- 线上购买 /(线上购买+线下购买) get_buy_rate_on_all------
revised$get_buy_rate_on_all <- (revised$get_buy_on + revised$nodiscount_buy_on)/
(revised$get_buy_on + revised$nodiscount_buy_on + revised$get_buy_off + revised$nodiscount_buy_off)
revised$get_buy_rate_on_all[is.na(revised$get_buy_rate_on_all)] <- 0
#------------ 客户 线上领卷未购买的比重 -- 线上领卷未购买次数 /(线上+线下)get_no_buy_rate_on------
revised$get_no_buy_rate_on <- revised$get_no_buy_on / (revised$get_no_buy_on + revised$get_no_buy_off)
revised$get_no_buy_rate_on[is.na(revised$get_no_buy_rate_on)] <- 0
#----历史上 该客户是否只在 线上购买 is_all_on_buy --------
revised$is_all_on_buy <- ifelse((revised$get_buy_on+revised$nodiscount_buy_on)!= 0 &
(revised$nodiscount_buy_off+revised$get_buy_off)== 0,1,0)
#----历史上 该客户是否只在 线下购买 is_all_off_buy --------
revised$is_all_off_buy <- ifelse((revised$get_buy_on+revised$nodiscount_buy_on)== 0 &
(revised$nodiscount_buy_off+revised$get_buy_off)!= 0,1,0)
#----历史上 该客户是否 线下线下都没购买过 is_all_not_buy --------
revised$is_all_not_buy <- ifelse((revised$get_buy_on+revised$nodiscount_buy_on) == 0 &
(revised$nodiscount_buy_off+revised$get_buy_off)== 0,1,0)
#----历史上 该客户是否 线下线下都买过 is_all_buy --------
revised$is_all_buy <- ifelse((revised$get_buy_on+revised$nodiscount_buy_on) != 0 &
(revised$nodiscount_buy_off+revised$get_buy_off)!= 0,1,0)
#----历史上 该客户是否线上只是NULL购买 is_null_buy_on --------
revised$is_null_buy_on <- ifelse(revised$nodiscount_buy_on != 0 & revised$get_buy_on ==0,1,0 )
#----历史上 该客户是否线下只是NULL购买 is_null_buy_off --------
revised$is_null_buy_off <- ifelse(revised$nodiscount_buy_off != 0 & revised$get_buy_off ==0,1,0 )
#----历史上 该客户不管线上还是线下都是NULL购买 is_null_buy_all --------
revised$is_null_buy_all <- ifelse( revised$is_null_buy_on == 1 & revised$is_null_buy_off == 1,1,0 )
#----历史上 该客户是否只是FIXED购买 is_fixed_buy --------
revised$is_fixed_buy <- ifelse((revised$get_buy_on -revised$get_count_on_fixed + revised$nodiscount_buy_on) == 0 &
revised$get_count_on_fixed != 0 &
(revised$nodiscount_buy_off+revised$get_buy_off)== 0,1,0)
##----历史上 线上领卷从未购买过 is_all_getnobuy_on --------
revised$is_all_getnobuy_on <- ifelse(revised$get_buy_on == 0,1,0)
##----历史上 线下领卷从未购买过 is_all_getnobuy_off --------
revised$is_all_getnobuy_off <- ifelse(revised$get_buy_off == 0,1,0)
##----历史上 上线下领卷都从未购买过 is_all_getnobuy--------
revised$is_all_getnobuy <- ifelse(revised$get_buy_on == 0 & revised$get_buy_off == 0,1,0)
#-----当期的折扣,是否在线上购过 Discount_is_buy_on----
midd <- revised_online%>%select(User_id,Discount_rate,buy_fac)%>%
filter(buy_fac == 1)%>%group_by(User_id)
midd <- midd[c(1:2)]
midd <- distinct(midd,.keep_all = T)
midd <- midd%>%select(User_id,Discount_rate)%>%group_by(User_id)%>%
summarise(items = paste(Discount_rate, collapse=',') )
revised <- merge(revised,midd,by = 'User_id',all.x = T)
grepFun <- function(revised){
grepl(revised['Discount_rate'],revised['items'],fixed=TRUE)
}
revised$Discount_is_buy_on <- apply(revised,1,grepFun)
revised$Discount_is_buy_on[revised$Discount_is_buy_on == TRUE] <- 1
revised$Discount_is_buy_on[revised$Discount_is_buy_on == FALSE] <- 0
revised <- subset(revised,select = -c(items))
rm(grepFun,revised_online,midd)
write.csv(revised,"E:/R 3.4.2 for Windows/O2O_tc/2019.1.19/revised_two_1.csv")
write.csv(train,"E:/R 3.4.2 for Windows/O2O_tc/2019.1.19/train_two_1.csv")
|
0bd15638bc3225068b89db58b78dea0eeddcd312 | 798ccd069fc99bb33a0e476d6f8043659c319c3b | /man/sampleDirs.Rd | bfe12bdfb549532574d3763d661e5913b1b1e4b6 | [
"MIT"
] | permissive | microsud/bcbioRNASeq | 076beefe9a928f7ac17ca785f23e060a0a181f58 | ebccb143522938ad87388e1f1d734862b1d81d6d | refs/heads/master | 2021-07-07T06:51:34.259240 | 2017-09-28T14:28:28 | 2017-09-28T14:28:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 599 | rd | sampleDirs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/methods-sampleDirs.R
\docType{methods}
\name{sampleDirs}
\alias{sampleDirs}
\alias{sampleDirs,bcbioRNADataSet-method}
\title{Sample Directories}
\usage{
sampleDirs(object)
\S4method{sampleDirs}{bcbioRNADataSet}(object)
}
\arguments{
\item{object}{Object.}
}
\value{
Named character vector containing sample directory paths.
}
\description{
This method will be used to access folders where sample information is kept.
}
\examples{
data(bcb)
sampleDirs(bcb) \%>\% basename
}
\author{
Michael Steinbaugh
}
|
206997c8c869b2bf1e07359ae7544cc1504136ba | 0416dd359c092e40d6a8e625810f058bff1adf78 | /R/mod_progression.R | cc6052188777cc6c7b6a681503dbba7ad56ab7b6 | [
"MIT"
] | permissive | ericvenot/suivicovid | d8bd28626380c44f915053b844ff9c4c3dbe3b4d | 33639e28056eeee28c87f91226defaa9588e6820 | refs/heads/master | 2022-07-04T16:49:01.258843 | 2020-05-14T07:43:14 | 2020-05-14T07:43:14 | 253,522,243 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,614 | r | mod_progression.R | #' progression UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
#' @importFrom lubridate date
mod_progression_ui <- function(id){
ns <- NS(id)
tagList(
fluidRow(
h2("Progression du nombre de d\u00E9c\u00E8s journaliers pour les pays avec plus de 100 d\u00E9c\u00E8s"),
plotOutput(ns("progression"),height = 800),
h2("(Rem: package ggplot)")
)
)
}
#' progression Server Function
#'
#' @noRd
mod_progression_server <- function(input, output, session,r){
ns <- session$ns
observe({
output$progression <- renderPlot({
req(r$covid)
r$covid %>%
filter(pays %in% r$ordre) %>%
mutate(date=date(x = date)) %>%
ggplot()+
aes(x=date,y=morts,group=pays)+
theme_minimal()+
geom_line(aes(color=pays),size=1)+
labs(title=(paste0("Nombre de morts par jour (mise a jour du ",r$hier,")")),
x="Pays",y="Nombre de deces"
)+
theme(legend.position="bottom",
plot.title = element_text(hjust=0.5),
axis.title.x = element_text(face="bold"),
axis.title.y = element_text(face="bold")
)+
scale_x_date(NULL,
breaks = scales::breaks_width("5 days"),
labels = scales::label_date_short()
)
})
})
}
## To be copied in the UI
# mod_progression_ui("progression_ui_1")
## To be copied in the server
# callModule(mod_progression_server, "progression_ui_1")
|
66a7b65af535be00cd17b58b4421693a9a6c482e | 6e2b27cd75e5e76022510b1682c9d888c9fb646d | /man/ocrpng.Rd | 401689527682b0c00e9d87365fe1372cc077e39b | [
"MIT"
] | permissive | nrode/CORproject | 3a57cabaafe0062483450af140fea2edaf8d938f | 49e1b2b77296ba36608962a97cee3fc805ffc981 | refs/heads/main | 2023-01-08T09:35:53.002989 | 2020-11-06T16:50:15 | 2020-11-06T16:50:15 | 310,327,274 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 366 | rd | ocrpng.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ocrpng.R
\name{ocrpng}
\alias{ocrpng}
\title{Title}
\usage{
ocrpng(
impagepath = "data/test.png",
n.col = NULL,
lang = "eng",
header = TRUE,
cleaning = FALSE,
outcsv = "/output/fitnessOCR.csv"
)
}
\arguments{
\item{cleaning}{}
}
\value{
}
\description{
Title
}
\examples{
}
|
0d8e1048e088869f54da90896614351b640e125c | 6ca362731c11fc3c6db6ff2bb0852be4c3f9fba3 | /code/spare_R_codes.R | 21246bbab1028ca68ac0bcf91c603569f12841f7 | [] | no_license | willofbigdata/real_estate | 50d817b2e85cbc3a80edfb3f0f25bcde7d93877e | 47ee4c967bc25bf8ea650b36560dcef163799347 | refs/heads/master | 2021-01-22T03:49:19.779268 | 2017-07-30T23:18:56 | 2017-07-30T23:18:56 | 92,406,083 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,847 | r | spare_R_codes.R |
# The center-and-spread data frame used for model-building
# ***** This approach requires all categories to have at least 2 observations
# since ths approach uses the standard deviation.
# Obtain the mean and stdev values of lat and lng from each category
cns <- data.frame(uniq_cat_id=sort(unique(source$cat_id)),
mean_lng = tapply(source$lng,source$cat_id,mean),
mean_lat = tapply(source$lat,source$cat_id,mean),
sd_lng = tapply(source$lng,source$cat_id,sd),
sd_lat = tapply(source$lat,source$cat_id,sd)
)
# Re-express mean and stdev as vectors
# to be fed into tapply below
ll_mean <- c(t(data.matrix(cns[,c('mean_lat','mean_lng')])))
ll_sd <- c(t(data.matrix(cns[,c('sd_lat','sd_lng')])))
# Use tapply to acquire the standardized difference
# ln lat and lng between each point and the center
# of each category
ll_mat <- data.matrix(source[,c('lat','lng')])
ll_cols <- t(apply(ll_mat,1,function(e) ((e - ll_mean)^2)/ll_sd))
# Name the columns of ll_cols
colnames(ll_cols) <-
paste(c("lat_std","lng_std"),rep(sapply(cns$uniq_cat_id,deparse),each=2),sep="_")
# Column-bind source with ll_cols
source <- cbind(source,ll_cols)
summary(source)
# Add other columns that might be helpful for making predictions.
# Distance of neighbouring elements
# dists <- dist(matrix(c(x,y),ncol=2), method ="minkowski", p=2)
# data.frame(uniq_cat_id=unique(source$cat_id))
# # The neighbourhood characteristic approach
# # For each observation p and category k
# # compute the k-t neighbouring characteristics value nc(i)
#
# set.seed(1)
# dists_2 <- dist(matrix(1:10,ncol=2))
# dists_2 <- as.matrix(dists_2)
# cat_vec <- sample(1:5,size=5,replace=TRUE)
# cats <- sort(unique(cat_vec))
#
# cat_vec_true <- cat_vec == 1
#
# # Find the zero-one matrix which expresses whether each observation
# # belong to each category or not.
# # row: observation
# # col: category
#
# n_cats <- length(cats)
# cats_matrix <- matrix(rep(cat_vec,n_cats),ncol=n_cats)
# cats_matrix <- t(apply(cats_matrix,1,function(e) e == cats))
#
# # For each column, count how many (except self) is TRUE
# count_others <- function(cats_matrix){
# nrows <- nrow(cats_matrix)
# anti_iden <- 1 - diag(nrows)
# return(anti_iden %*% cats_matrix)
# }
#
# count_matrix <- count_others(cats_matrix)
#
# c_matrix <- (dists_2 %*% cats_matrix) / count_matrix
# c_matrix <- exp(-c_matrix)
# c_matrix <- ifelse(is.nan(c_matrix),0,c_matrix)
# Gaussian discriminant analysis
cat_lda <- lda(cat_id ~ .,data=source[,c("cat_id","lat","lng",
"nc_1","nc_2","nc_3")])
cat_lda_vals <- predict(cat_lda,test,type="response")
ldahist(data = cat_lda_vals$x[,2], g=cat_id) # visualize contributions
cat_lda_pred <- cat_lda_vals$class
cat_lda
mce <- mean(test$cat_id != cat_lda_pred)
accuracy <- 1 - mce
accuracy
# Multinomial regression
mnlg_fit <- multinom(cat_id ~ lat + lng + nc_1 + nc_2 + nc_3
,data = train)
# z scores
z <- summary(mnlg_fit)$coefficients /
summary(mnlg_fit)$standard.errors
print(z)
# 2-tailed z test
p <- (1 - pnorm(abs(z), 0, 1)) * 2
print(p)
pR2(mnlg_fit)
# generate sample
sample_size <- 1000
weight <- 0.2
sample <- ifelse(runif(sample_size,0,1)<weight,1,2)
sample_1_count <- sum(sample==1)
sample_2_count <- sum(sample==2)
sample[which(sample==1)] <- rnorm(sample_1_count,mean=2,1)
sample[which(sample==2)] <- rnorm(sample_2_count,mean=5,1)
hist(sample,breaks=100)
# sample <- 0.5 * rnorm(sample_size,mean=-5000,0.00001) + 0.5 * rnorm(sample_size,5000,0.00005)
hist(sample,breaks=100)
# import
library(mixtools)
# wait = faithful$waiting
# mixmdl = normalmixEM(wait)
# plot(mixmdl,which=2)
# lines(density(wait), lty=2, lwd=2)
# test on simulated data set
mix_own <- normalmixEM(sample)
plot(mix_own,which=2)
lines(density(sample), lty=2, lwd=2)
summary(mix_own)
# Test out multivariate normal distributions
# Try 2-dimensional distributions
library(mixtools)
library(MASS)
library(ggplot2)
# generate sample
sample_size <- 1000
weight <- 0.6
# Define the distribution parameters
# each sigma must be a positive-definite symmetric matrix
mu_1 <- c(0,0)
sigma_1 <- matrix(c(1,0.2,0.2,1),nrow=2)
mu_2 <- c(2,2)
sigma_2 <- matrix(c(1,-0.3,-0.3,1)/2,nrow=2)
draw <- ifelse(runif(sample_size,0,1)<weight,1,2)
sample <- matrix(draw,nrow=sample_size,ncol=3)
sample_1_count <- sum(draw == 1)
sample_2_count <- sum(draw == 2)
sample[which(sample[,1] == 1),c(2,3)] <- mvrnorm(sample_1_count,mu_1,sigma_1)
sample[which(sample[,1] == 2),c(2,3)] <- mvrnorm(sample_2_count,mu_2,sigma_2)
sample <- as.data.frame(sample)
# Perform the fit
mv_result <- mvnormalmixEM(sample[,c(2,3)])
summary(mv_result)
geom_raster(aes(fill = density))
plot_gen <- ggplot(data = sample,aes(x=V2,y=V3)) +
geom_point(aes(colour = V1,size=0.5))
plot_gen
mv_result_pos <- mv_result$posterior
mv_pos_den <- dmvnorm(mv_result$posterior,
mv_result$mu[[1]],
mv_result$sigma[[1]])
mv_result_pos <- cbind(mv_result_pos,mv_pos_den)
mv_result_pos <- data.frame(mv_result_pos)
# plot_gen_2 <- ggplot(data = mv_result_pos,aes(x=comp.1,y=comp.2,z=density)) +
# geom_contour()
# plot_gen_2
plot(mv_result,which=2,alpha=c(0:50)/50)
lines(density(sample), lty=2, lwd=2)
# ellipse(mv_result$mu[[1]],mv_result$sigma[[1]],alpha=c(0.01,0.02),
# newplot = FALSE,type="l",lwd=0.01)
height <- n_bin_lat
width <- n_bin_lng
n_cat <- nc_list$n_cat
lng_gap <- nc_list$gap[1]
lat_gap <- nc_list$gap[2]
# Distance discount rate
r_lng <- 10^3
r_lat <- 10^3
# exclude self?
exclude_self <- TRUE
# Prepare the batches used to calculate the
# number of blocks relative to each lng and lat bin.
lng_batch <- block_lng_batch(n_bin_lat,n_bin_lng)
lat_batch <- block_lng_batch(n_bin_lat,n_bin_lng)
if(exclude_self){
self_lng <- ifelse(lng_batch == 0,1,0)
self_lat <- ifelse(lat_batch == 0,1,0)
}
# Transform to compute hrizontal and vertical distances.
lng_batch <- lng_gap * lng_batch
lat_batch <- lat_gap * lat_batch
# Scale using the natural exponent.
# Subtract self from the calculation if required.
lng_batch <- exp(-r_lng * lng_batch)
lat_batch <- exp(-r_lat * lat_batch)
if(exclude_self){
lng_batch <- lng_batch - self_lng
lat_batch <- lat_batch - self_lat
}
# Obtain adjusted neighbourhood characteristics
# for lng and lat.
nc_frame <- nc_list$nc_frame[,(1:n_cat+5)]
nc_frame <- as.matrix(nc_frame)
nc_lng_adj <- lng_batch %*% nc_frame
nc_lat_adj <- lat_batch %*% nc_frame
# Compute the adjusted nc as a weighted average
# of nc_lng_adj and nc_lat_adj.
weight_lng <- 0.5
weight_lat <- 1 - weight_lng
nc_adj <- apply((nc_list$bin_map),1,function(row)
weight_lng * nc_lng_adj[row[2],] +
weight_lat * nc_lat_adj[row[3],])
nc_adj <- t(nc_adj)
nc_adj <- as.data.frame(nc_adj)
nc_adj <- cbind(nc_list$bin_map,nc_adj)
# Plot neighbourhood characteristics
plot_nc <- ggplot(data = nc_adj,
aes(x=lng_bin_ID,y=lat_bin_ID)) +
geom_tile(aes(fill = V2),colour = "white") +
scale_fill_gradient(low = "white",high = "steelblue")
plot_nc
# Plot adjusted neighbourhood characteristics
plot_nc <- ggplot(data = nc_sample,aes(x=n_bin_lng,
y=n_bin_lat)) +
geom_point(aes(colour = V1, shape = factor(cat_id)),size=0.5) +
scale_colour_gradient(low = "white",high = "red") +
scale_shape_manual(values=seq(0,length(unique(dataset$label))))
# plot_nc
# Plot neighbourhood characteristics percentages
plot_nc_per <- ggplot(data = sample_dataset,aes(x=lng,y=lat)) +
geom_point(aes(colour = nc_9_per, shape = factor(cat_id)),size=0.5) +
scale_colour_gradient(low = "white",high = "red") +
scale_shape_manual(values=seq(0,length(unique(dataset$label))))
# plot_nc_per |
eaceac1f0ae4ca2456957a201511540345e3f913 | e9cdaf3e58814c042f94059614fa91c63d6d1ca9 | /R/transpose_df.R | 63a3ada4aff049462683b9fa71acc7bab7680ab5 | [
"MIT"
] | permissive | trendlock/brush | 45f3337d2cbf90a2a244ecf6ce1491aa351d46fe | dd32a4785f6add08d3370283fd5abe93a0132e17 | refs/heads/master | 2021-09-04T20:24:36.511755 | 2018-01-10T22:28:14 | 2018-01-10T22:28:14 | 115,588,529 | 0 | 0 | MIT | 2018-01-10T22:13:27 | 2017-12-28T05:31:33 | R | UTF-8 | R | false | false | 87 | r | transpose_df.R |
#' @export
transpose_df <- function(df) {
df %>%
t() %>%
as_tibble()
}
|
c43b1c0cb9c32aa44eb533841c435d5ef42f9e21 | a3b8be8f07b3f32002e7a6433e3da61f6f55f512 | /man/spatial_rj_neighborhoods.Rd | 254a84d9bd4c025cda6e2bf7bfb0fbea45429b72 | [
"MIT"
] | permissive | mralbu/coronavirusbrazil | 20e077a22e0df40397f93a845d2c41d9aebc0bf1 | 9e0eeaf848d2a31370b5deff48a6e363cc19c54c | refs/heads/master | 2021-05-21T23:07:12.653930 | 2021-03-21T02:36:17 | 2021-03-21T02:36:17 | 252,846,329 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 941 | rd | spatial_rj_neighborhoods.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{spatial_rj_neighborhoods}
\alias{spatial_rj_neighborhoods}
\title{Spatial Dataset of Rio de Janeiro cases by neighborhood}
\format{A spatial data.frame object (sf)
\describe{
\item{neighborhood}{Rio de Janeiro Neighborhood}
\item{cases}{Confirmed Cases}
\item{lat}{Latitude}
\item{lon}{Longitude}
\item{AP}{AP}
\item{objectId}{objectId}
\item{geoms}{Simple Features geometries}
}}
\source{
\href{http://painel.saude.rj.gov.br/monitoramento/covid19.html}{Secretaria de Saúde - RJ}
}
\usage{
spatial_rj_neighborhoods
}
\description{
Spatial summary of the Coronavirus (COVID-19) cases in Rio de Janeiro neighborhoods
}
\details{
The spatial dataset contains cases of covid-19 in Rio de Janeiro neighborhoods
}
\examples{
data(spatial_rj_neighborhoods)
}
\keyword{COVID19}
\keyword{coronavirus_br}
\keyword{datasets}
|
ae451dcfec47f140b2c56c6ea6e391ccf06b10ed | 2b0e7454e2c87076f4f97d35000bf3426b7d9aaa | /man/TS_split.Rd | 33cb216a6100fe8c72ba434ee99a3363dc5f2dfe | [] | no_license | raphael210/QDataGet | 52df9d791d7d1d8933555dbdfa9d81e42558a5ee | 83531020e180fe8d07fdfa4a75413fd2b95cd6b4 | refs/heads/master | 2020-04-12T06:29:33.198718 | 2019-02-01T07:50:14 | 2019-02-01T07:50:14 | 64,194,185 | 0 | 5 | null | 2017-03-16T03:29:45 | 2016-07-26T06:00:12 | R | UTF-8 | R | false | true | 560 | rd | TS_split.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fct01_frameBuildingFuncs.R
\name{TS_split}
\alias{TS_split}
\title{TS_split}
\usage{
TS_split(TS, by)
}
\arguments{
\item{by}{a vector, with the same length of TS}
}
\value{
a list is TS
}
\description{
split TS to lists, which are usually TSFs, TSFRs
}
\examples{
tsfr <- getSectorID(tsfr,sectorAttr = defaultSectorAttr("ind",336))
TSFRs1 <- TS_split(tsfr,by=tsfr$sector)
MC.chart.IC(tsfrs1)
TSFRs2 <- TS_split(tsfr,by=cut.Date2(tsfr$date,breaks = "2 year"))
MC.chart.IC(tsfrs2)
}
|
9a44607336c10ac19c675b5835cc203bc649beb5 | 45965a51c28dca7ca4ec5a8205b8d11fc6de8e6b | /DT_C5.R | 9a8108acd03cfe262739f358c77dbf4745af2cb4 | [] | no_license | Dhrupad101/Data | 955be72b195c0a25167a395778d9b5d66e2a60f5 | 58687de5fbe9d0a19c759397694d1b19d38006bb | refs/heads/master | 2020-12-09T16:18:46.087646 | 2020-08-23T23:19:55 | 2020-08-23T23:19:55 | 233,357,472 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 485 | r | DT_C5.R | data("iris")
install.packages("C50")
library(C50)
library(caret)
#create partitions
inTraining<-createDataPartition(iris$Species,p=0.65,list = F)
training <-iris[inTraining,]
testing <-iris[-inTraining,]
#model
model<-C5.0(training$Species~.,data = training) #species is the dependent variable
summary(model)
#predication
head(testing)
pred<-predict.C5.0(model,testing[,-5])
a<-table(testing$Species,pred)
sum(diag(a))/sum(a) # accuracyt testing
plot(model)
|
a82bc1a676cf66d27d85ec7c3244ccc6b9041c99 | 5788fc4ed667bcbb31cf6a47cec65d0e62db367f | /server.R | 42b6efc63b27f48bd4c593d048dadf7e380a1cb7 | [] | no_license | zhoutongfu/DevelopingDataProject | 0650a591f83343b468dbb6a60236ff84d503a0a4 | 04048d5431d6a09919fdaaa6b2842f425dc23c37 | refs/heads/master | 2016-09-01T22:56:09.005178 | 2015-06-19T00:31:53 | 2015-06-19T00:31:53 | 37,634,710 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,076 | r | server.R | library(shiny)
library(png)
source("dataFile.R")
showPic <- function(pic){
# rotate the picture because image function in R has weild angle for veiwing
# cat(dim(pic),file=stderr())
rotated_pic <- t(pic[nrow(pic):1,])
image(rotated_pic,col = gray((0:32)/32),asp=1,xlim=c(0,1),ylim=c(0,1) )
}
compressedPic <- function(pic,n){
p.svd <- svd(pic)
p.svd$u[,1:n] %*% diag(p.svd$d[1:n],nrow=n,ncol=n) %*% t(p.svd$v[,1:n])
}
# Define server logic required to generate and plot a random distribution
shinyServer(function(input, output,session) {
picChoice <- reactive({
switch(input$picture,
"Taylor Swift" = swift,
"Warriors" = warriors,
"Bruno Mars" = mars)
})
observe({
# update sliderInput max value
updateSliderInput(session, "components", max= min(dim(picChoice())), value = 1 )
})
output$compressedPlt <- renderPlot({
n <- input$components
compressed <- compressedPic(picChoice(),n)
showPic(compressed)
})
output$original <- renderPlot({
showPic(picChoice())
})
})
|
6a2a6e6cb23014ebfa868d55f38f76dbd263d295 | 4201e9b754760dc35fc0aeef9df5a8b9d801c47f | /bin/R-3.5.1/src/library/base/man/formatc.Rd | a29ae4240296e6063da32f7679275d78584f2330 | [
"MIT",
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"LGPL-2.1-only",
"LGPL-3.0-only",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | lifebit-ai/exomedepth | cbe59cb7fcf2f9183d187f8d466c6620fb1a0c2e | 5a775ae5e2a247aeadc5208a34e8717c7855d080 | refs/heads/master | 2020-03-27T12:55:56.400581 | 2018-10-11T10:00:07 | 2018-10-11T10:00:07 | 146,578,924 | 0 | 0 | MIT | 2018-08-29T09:43:52 | 2018-08-29T09:43:51 | null | UTF-8 | R | false | false | 11,704 | rd | formatc.Rd | % File src/library/base/man/formatc.Rd
% Part of the R package, https://www.R-project.org
% Copyright 1995-2017 R Core Team
% Distributed under GPL 2 or later
\name{formatC}
\title{Formatting Using C-style Formats}
\alias{formatC}
\alias{prettyNum}
\alias{.format.zeros}
\description{
Formatting numbers individually and flexibly, \code{formatC()} using
\code{C} style format specifications.
\code{prettyNum()} is used for \dQuote{prettifying} (possibly
formatted) numbers, also in \code{\link{format.default}}.
\code{.format.zeros()}, an auxiliary function of \code{prettyNum()}
re-formats the zeros in a vector \code{x} of formatted numbers.
}
\usage{
formatC(x, digits = NULL, width = NULL,
format = NULL, flag = "", mode = NULL,
big.mark = "", big.interval = 3L,
small.mark = "", small.interval = 5L,
decimal.mark = getOption("OutDec"),
preserve.width = "individual", zero.print = NULL,
drop0trailing = FALSE)
prettyNum(x, big.mark = "", big.interval = 3L,
small.mark = "", small.interval = 5L,
decimal.mark = getOption("OutDec"), input.d.mark = decimal.mark,
preserve.width = c("common", "individual", "none"),
zero.print = NULL, drop0trailing = FALSE, is.cmplx = NA,
\dots)
.format.zeros(x, zero.print, nx = suppressWarnings(as.numeric(x)))
}
\arguments{
\item{x}{an atomic numerical or character object, possibly
\code{\link{complex}} only for \code{prettyNum()}, typically a
vector of real numbers. Any class is discarded, with a warning.}
\item{digits}{the desired number of digits after the decimal
point (\code{format = "f"}) or \emph{significant} digits
(\code{format = "g"}, \code{= "e"} or \code{= "fg"}).
Default: 2 for integer, 4 for real numbers. If less than 0,
the C default of 6 digits is used. If specified as more than 50, 50
will be used with a warning unless \code{format = "f"} where it is
limited to typically 324. (Not more than 15--21 digits need be
accurate, depending on the OS and compiler used. This limit is
just a precaution against segfaults in the underlying C runtime.)
}
\item{width}{the total field width; if both \code{digits} and
\code{width} are unspecified, \code{width} defaults to 1,
otherwise to \code{digits + 1}. \code{width = 0} will use
\code{width = digits}, \code{width < 0} means left
justify the number in this field (equivalent to \code{flag = "-"}).
If necessary, the result will have more characters than
\code{width}. For character data this is interpreted in characters
(not bytes nor display width).
}
\item{format}{equal to \code{"d"} (for integers), \code{"f"},
\code{"e"}, \code{"E"}, \code{"g"}, \code{"G"}, \code{"fg"} (for
reals), or \code{"s"} (for strings). Default is \code{"d"} for
integers, \code{"g"} for reals.
\code{"f"} gives numbers in the usual
\code{xxx.xxx} format; \code{"e"} and \code{"E"} give \code{n.ddde+nn} or
\code{n.dddE+nn} (scientific format); \code{"g"} and \code{"G"} put
\code{x[i]} into scientific format only if it saves space to do so.
\code{"fg"} uses fixed format as \code{"f"}, but \code{digits} as
the minimum number of \emph{significant} digits. This can lead
to quite long result strings, see examples below. Note that unlike
\code{\link{signif}} this prints large numbers with
more significant digits than \code{digits}. Trailing zeros are
\emph{dropped} in this format, unless \code{flag} contains
\code{"#"}.}
\item{flag}{for \code{formatC}, a character string giving a
format modifier as in Kernighan and Ritchie (1988, page 243) or the
C+99 standard.
\code{"0"} pads leading zeros; \code{"-"} does left adjustment,
others are \code{"+"}, \code{" "}, and \code{"#"}; on some
platform--locale combination, \code{"'"} activates
\dQuote{thousands' grouping} for decimal conversion, and versions of
\file{glibc} allow \code{"I"} for integer conversion to use the locale's
alternative output digits, if any.
There can be more than one of these, in any order. Other characters
used to have no effect for \code{character} formatting, but signal
an error since \R 3.4.0.
}
\item{mode}{\code{"double"} (or \code{"real"}), \code{"integer"} or
\code{"character"}.
Default: Determined from the storage mode of \code{x}.}
\item{big.mark}{character; if not empty used as mark between every
\code{big.interval} decimals \emph{before} (hence \code{big}) the
decimal point.}
\item{big.interval}{see \code{big.mark} above; defaults to 3.}
\item{small.mark}{character; if not empty used as mark between every
\code{small.interval} decimals \emph{after} (hence \code{small}) the
decimal point.}
\item{small.interval}{see \code{small.mark} above; defaults to 5.}
\item{decimal.mark}{the character to be used to indicate the numeric
decimal point.}
\item{input.d.mark}{if \code{x} is \code{\link{character}}, the
character known to have been used as the numeric decimal point in
\code{x}.}
\item{preserve.width}{string specifying if the string widths should
be preserved where possible in those cases where marks
(\code{big.mark} or \code{small.mark}) are added. \code{"common"},
the default, corresponds to \code{\link{format}}-like behavior
whereas \code{"individual"} is the default in
\code{formatC()}. Value can be abbreviated.}
\item{zero.print}{logical, character string or \code{NULL} specifying
if and how \emph{zeros} should be formatted specially. Useful for
pretty printing \sQuote{sparse} objects.}
\item{drop0trailing}{logical, indicating if trailing zeros,
i.e., \code{"0"} \emph{after} the decimal mark, should be removed;
also drops \code{"e+00"} in exponential formats.}
\item{is.cmplx}{optional logical, to be used when \code{x} is
\code{"\link{character}"} to indicate if it stems from
\code{\link{complex}} vector or not. By default (\code{NA}),
\code{x} is checked to \sQuote{look like} complex.}
\item{\dots}{arguments passed to \code{format}.}
\item{nx}{numeric vector of the same length as \code{x}, typically the
numbers of which the character vector \code{x} is the pre-format.}
}
\value{
A character object of same size and attributes as \code{x} (after
discarding any class), in the current locale's encoding.
Unlike \code{\link{format}}, each number is formatted individually.
Looping over each element of \code{x}, the C function
\code{sprintf(\dots)} is called for numeric inputs (inside the C
function \code{str_signif}).
\code{formatC}: for character \code{x}, do simple (left or right)
padding with white space.
}
\details{
If you set \code{format} it overrides the setting of \code{mode}, so
\code{formatC(123.45, mode = "double", format = "d")} gives \code{123}.
The rendering of scientific format is platform-dependent: some systems
use \code{n.ddde+nnn} or \code{n.dddenn} rather than \code{n.ddde+nn}.
\code{formatC} does not necessarily align the numbers on the decimal
point, so \code{formatC(c(6.11, 13.1), digits = 2, format = "fg")} gives
\code{c("6.1", " 13")}. If you want common formatting for several
numbers, use \code{\link{format}}.
\code{prettyNum} is the utility function for prettifying \code{x}.
\code{x} can be complex (or \code{\link{format}(<complex>)}), here. If
\code{x} is not a character, \code{format(x[i], ...)} is applied to
each element, and then it is left unchanged if all the other arguments
are at their defaults. Use the \code{input.d.mark} argument for
\code{prettyNum(x)} when \code{x} is a \code{character} vector not
resulting from something like \code{format(<number>)} with a period as
decimal mark.
Because \code{\link{gsub}} is used to insert the \code{big.mark}
and \code{small.mark}, special characters need escaping. In particular,
to insert a single backslash, use \code{"\\\\\\\\"}.
The C doubles used for \R numerical vectors have signed zeros, which
\code{formatC} may output as \code{-0}, \code{-0.000} \dots.
There is a warning if \code{big.mark} and \code{decimal.mark} are the
same: that would be confusing to those reading the output.
}
\note{
The default for \code{decimal.mark} in \code{formatC()} was changed in
\R 3.2.0: for use within \code{\link{print}} methods in packages which might
be used with earlier versions: use \code{decimal.mark = getOption("OutDec")}
explicitly.
}
\author{
\code{formatC} was originally written by Bill Dunlap for S-PLUS, later
much improved by Martin Maechler.
It was first adapted for \R by Friedrich Leisch and since much
improved by the R Core team.
%% The utilities \code{prettyNum()} and \code{.format.zeros()} are by
%% Martin Maechler
}
\references{
Kernighan, B. W. and Ritchie, D. M. (1988)
\emph{The C Programming Language.} Second edition. Prentice Hall.
}
\seealso{
\code{\link{format}}.
\code{\link{sprintf}} for more general C-like formatting.
}
\examples{
xx <- pi * 10^(-5:4)
cbind(format(xx, digits = 4), formatC(xx))
cbind(formatC(xx, width = 9, flag = "-"))
cbind(formatC(xx, digits = 5, width = 8, format = "f", flag = "0"))
cbind(format(xx, digits = 4), formatC(xx, digits = 4, format = "fg"))
formatC( c("a", "Abc", "no way"), width = -7) # <=> flag = "-"
formatC(c((-1:1)/0,c(1,100)*pi), width = 8, digits = 1)
## note that some of the results here depend on the implementation
## of long-double arithmetic, which is platform-specific.
xx <- c(1e-12,-3.98765e-10,1.45645e-69,1e-70,pi*1e37,3.44e4)
## 1 2 3 4 5 6
formatC(xx)
formatC(xx, format = "fg") # special "fixed" format.
formatC(xx[1:4], format = "f", digits = 75) #>> even longer strings
formatC(c(3.24, 2.3e-6), format = "f", digits = 11, drop0trailing = TRUE)
r <- c("76491283764.97430", "29.12345678901", "-7.1234", "-100.1","1123")
## American:
prettyNum(r, big.mark = ",")
## Some Europeans:
prettyNum(r, big.mark = "'", decimal.mark = ",")
(dd <- sapply(1:10, function(i) paste((9:0)[1:i], collapse = "")))
prettyNum(dd, big.mark = "'")
## examples of 'small.mark'
pN <- stats::pnorm(1:7, lower.tail = FALSE)
cbind(format (pN, small.mark = " ", digits = 15))
cbind(formatC(pN, small.mark = " ", digits = 17, format = "f"))
cbind(ff <- format(1.2345 + 10^(0:5), width = 11, big.mark = "'"))
## all with same width (one more than the specified minimum)
## individual formatting to common width:
fc <- formatC(1.234 + 10^(0:8), format = "fg", width = 11, big.mark = "'")
cbind(fc)
## Powers of two, stored exactly, formatted individually:
pow.2 <- formatC(2^-(1:32), digits = 24, width = 1, format = "fg")
## nicely printed (the last line showing 5^32 exactly):
noquote(cbind(pow.2))
## complex numbers:
r <- 10.0000001; rv <- (r/10)^(1:10)
(zv <- (rv + 1i*rv))
op <- options(digits = 7) ## (system default)
(pnv <- prettyNum(zv))
stopifnot(pnv == "1+1i", pnv == format(zv),
pnv == prettyNum(zv, drop0trailing = TRUE))
## more digits change the picture:
options(digits = 8)
head(fv <- format(zv), 3)
prettyNum(fv)
prettyNum(fv, drop0trailing = TRUE) # a bit nicer
options(op)
## The ' flag :
doLC <- FALSE # R warns, so change to TRUE manually if you want see the effect
if(doLC)
oldLC <- Sys.setlocale("LC_NUMERIC", "de_CH.UTF-8")
formatC(1.234 + 10^(0:4), format = "fg", width = 11, flag = "'")
## --> ..... " 1'001" " 10'001" on supported platforms
if(doLC) ## revert, typically to "C" :
Sys.setlocale("LC_NUMERIC", oldLC)
}
\keyword{character}
\keyword{print}
|
448e317b4421526cee482c4fa2b8021494dae716 | 29585dff702209dd446c0ab52ceea046c58e384e | /TreePar/R/pnshift.R | e3ffcb672cfc5fd7691fced14f693a2906d37705 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 273 | r | pnshift.R | pnshift<-function(n,time,t,lambda,mu,rho=1){
i <- inter(time,t)
rho1<-rho
rho<-lambda*0+1
rho[1]<-rho1
probext<-q2(i,time,t,lambda,mu,rho)
if (n==0){res<-probext} else {
res<-1-probext
Finv<- 1/Ffuncshift(time,t,lambda,mu,rho1)
res<-res*Finv*(1-Finv)^(n-1)}
res
} |
32a17f31acb096020d6d42cfffc9cac9b629dd1c | 4a73e641f5fa27ad41bd07d4be32d2f01b1c9983 | /grafico_temperatura_tiempo2.R | 76c06b9138b54ef36aea5b3a38c8b807b8cf6e98 | [] | no_license | smansilla16/PROYECTO_STAT_NT_2019 | b4869672fef5f1d6f2671223fc915dda266a096d | e9db1671577f741e1b22f5dcbc29efdbfd5dbb15 | refs/heads/master | 2020-05-29T15:29:37.019709 | 2019-07-02T00:11:21 | 2019-07-02T00:11:21 | 189,223,206 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,569 | r | grafico_temperatura_tiempo2.R | source("carga_datos_clima_consumo.R", encoding = "UTF-8")
grafico.titulo <- "¿Hay alguna tendencia de la temperatura con el paso de los años?"
grafico.descripcion <- paste("Grafico de caja para la temperatura respecto al tiempo",
"para el período comprendido entre el", ajustes$rango[1],
"y el", ajustes$rango[2], ". Se muestran, respecto a la temperatura
media histórica diaria, la máxima (rojo), mediana (negro) y mínima (azul) temperatura.")
grafico.observacion <- paste("No podemos distinguir una relación entre la temperatura y el año. si tomamos los
últimos 10 años completos del registro (2007-2017), 6/10 años, han tenido una mediana
de temperatura mayor a la mediana de la temperatura histórica. De todas maneras esto no
permite sacar ningún tipo de conclusión acerca de cambio climático en Uruguay.")
grafico <-
clima.datos %>% select(Fecha, temp_c) %>%
filter(Fecha <= ajustes$rango[2]) %>%
ggplot(aes(x=format(Fecha,'%Y'),y=temp_c)) +
geom_boxplot() +
geom_hline(yintercept=median(consumoEE.datos3$temp_c), linetype="dashed", color = "black") +
geom_hline(yintercept=min(consumoEE.datos3$temp_c), linetype="dashed", color = "blue") +
geom_hline(yintercept=max(consumoEE.datos3$temp_c), linetype="dashed", color = "red") +
labs(x="Año",y="Temperatura media anual (ºC)") +
theme(axis.text.x = element_text(angle = 45,vjust = 0.5)) |
ff0c02ecd636f7ee24ff77db4c73f186b72ab4a6 | 15f6d542ed3978587d34dc6c77100b26eb492fea | /R/access.R | 06ce0c08d6592663413db4c63cba7f6f1b92f9b6 | [] | no_license | statnet/networkDynamic | a1d508f120bd8c19092e451c85f957e23125cb3d | 2ec10feff699bcf8a9471e689bb48587742eec01 | refs/heads/master | 2023-02-21T14:44:07.474175 | 2023-02-16T07:52:48 | 2023-02-16T07:52:48 | 95,258,091 | 10 | 3 | null | 2023-02-05T15:38:00 | 2017-06-23T21:34:18 | R | UTF-8 | R | false | false | 41,866 | r | access.R | # File networkDynamic/R/access.R
# Part of the statnet package, http://statnetproject.org
#
# This software is distributed under the GPL-3 license. It is free,
# open source, and has the attribution requirements (GPL Section 7) in
# http://statnetproject.org/attribution
#
# Copyright 2012 the statnet development team
######################################################################
# This file contains various routines for accessing network class objects with
# dynamic extensions.
#
# Contents:
#
# activate.edges
# activate.vertices
# add.edges.active
# add.vertices.active
# deactivate.edges
# deactivate.vertices
# delete.edge.activity
# delete.vertex.activity
# get.edgeIDs.active
# get.edges.active
# get.neighborhood.active
# get.change.times
# is.active
# is.adjacent.active
# network.dyadcount.active
# network.edgecount.active
# network.naedgecount.active
# network.size.active
# insert.spell
# delete.spell
#
######################################################################
#Function to activate the selected edges at the appropriate time. If already
#active, activation has no effect; otherwise, it inserts an onset time at
#the appropriate mark. Edges without an "active" attribute are given one.
activate.edges <- function(x, onset=NULL, terminus=NULL, length=NULL, at=NULL,
e=seq_along(x$mel)){
xn <- substitute(x) # needed for proper assignment in calling environment
# checks for proper inputs, translations into onset and terminus
if(!is.network(x))
stop("activate.edges requires an argument of class network.\n")
if(!is.null(at)) {
if(!is.vector(at) || !is.numeric(at))
stop("Activation times must be a numeric vector in activate.edges.\n")
if(!(is.null(onset) && is.null(terminus) && is.null(length)))
stop("Spells must be specified by exactly 1 of {at, onset+terminus, onset+length, length+terminus}")
} else {
if(!is.null(onset) && (!is.vector(onset) || !is.numeric(onset)))
stop("Onset times must be a numeric vector in activate.edges.\n")
if(!is.null(terminus) && (!is.vector(terminus) || !is.numeric(terminus)))
stop("Terminus times must be a numeric vector in activate.edges.\n")
if(!is.null(length) && (!is.vector(length) || !is.numeric(length) || any(length < 0)))
stop("Interval lengths must be a non-negative numeric vector in activate.edges.\n")
if(!is.null(onset)) {
if(!xor(is.null(terminus),is.null(length)))
stop("Spells must be specified by exactly 1 of {at, onset+terminus, onset+length, length+terminus}")
} else {
if(xor(is.null(terminus),is.null(length)))
stop("Spells must be specified by exactly 1 of {at, onset+terminus, onset+length, length+terminus}")
}
}
if(!is.vector(e) || !is.numeric(e))
stop("Edge ID's, e, must be a numeric vector in activate.edges.\n")
if(length(x$mel)>0) {
if((min(e,Inf) < 1) || (max(e,-Inf) > x%n%"mnext"-1))
stop("Illegal edge in activate.edges.\n")
# preliminaries
e <- e[!sapply(x$mel[e], is.null)] #Filter out non-edges
if(!is.null(at)) {
onset <- terminus <- rep(at, length=length(e))
} else if (!is.null(onset)) {
onset <- rep(onset, length=length(e))
if(!is.null(terminus))
terminus <- rep(terminus, length=length(e))
else if (!is.null(length))
terminus <- onset + rep(length, length=length(e))
} else {
if (is.null(terminus)) {
onset <- rep(-Inf, length=length(e))
terminus <- rep(Inf, length=length(e))
} else {
terminus <- rep(terminus, length=length(e))
onset <- terminus - rep(length, length=length(e))
}
}
if(any(onset>terminus))
stop("Onset times must precede terminus times in activate.edges.\n")
x <- .Call(ActivateEdges_R, x, onset, terminus, e, FALSE)
}
set.nD.class(x)
if(.validLHS(xn, parent.frame()))
on.exit(eval.parent(call('<-',xn, x)))
invisible(x)
}
#Function to activate the selected vertices at the appropriate time. If already
#active, activation has no effect; otherwise, it inserts an onset time at
#the appropriate mark. Vertices without an "active" attribute are given one.
activate.vertices <- function(x, onset=NULL, terminus=NULL, length=NULL, at=NULL,
v=seq_len(network.size(x))) {
xn <- substitute(x) # needed for proper assignment in calling environment
# checks for proper inputs
if(!is.network(x))
stop("activate.vertices requires an argument of class network.\n")
if(!is.null(at)) {
if(!is.vector(at) || !is.numeric(at))
stop("Activation times must be a numeric vector in activate.vertices.\n")
if(!(is.null(onset) && is.null(terminus) && is.null(length)))
stop("Spells must be specified by exactly 1 of {at, onset+terminus, onset+length, length+terminus}")
} else {
if(!is.null(onset) && (!is.vector(onset) || !is.numeric(onset)))
stop("Onset times must be a numeric vector in activate.vertices.\n")
if(!is.null(terminus) && (!is.vector(terminus) || !is.numeric(terminus)))
stop("Terminus times must be a numeric vector in activate.vertices.\n")
if(!is.null(length) && (!is.vector(length) || !is.numeric(length) || any(length < 0)))
stop("Interval lengths must be a non-negative numeric vector in activate.vertices.\n")
if(!is.null(onset)) {
if(!xor(is.null(terminus),is.null(length)))
stop("Spells must be specified by exactly 1 of {at, onset+terminus, onset+length, length+terminus}")
} else {
if(xor(is.null(terminus),is.null(length)))
stop("Spells must be specified by exactly 1 of {at, onset+terminus, onset+length, length+terminus}")
}
}
if(!is.vector(v) || !is.numeric(v))
stop("Vertex ID's, v, must be a numeric vector in activate.vertices.\n")
if((min(v,Inf) < 1) || (max(v,-Inf) > network.size(x)))
stop("Illegal vertex in activate.vertices.\n")
# preliminaries
v <- v[!sapply(x$val[v], is.null)] #Filter out non-vertices
if(!is.null(at)) {
onset <- terminus <- rep(at, length=length(v))
} else if (!is.null(onset)) {
onset <- rep(onset, length=length(v))
if(!is.null(terminus))
terminus <- rep(terminus, length=length(v))
else if (!is.null(length))
terminus <- onset + rep(length, length=length(v))
} else {
if (is.null(terminus)) {
onset <- rep(-Inf, length=length(v))
terminus <- rep(Inf, length=length(v))
} else {
terminus <- rep(terminus, length=length(v))
onset <- terminus - rep(length, length=length(v))
}
}
if(any(onset>terminus))
stop("Onset times must precede terminus times in activate.vertices.\n")
# choosing to ignore activation requests of (Inf,Inf) or (-Inf, -Inf)
ignore <- (onset==Inf) | (terminus==-Inf)
if(any(ignore)){
onset<-onset[!ignore]; terminus<-terminus[!ignore]; v<-v[!ignore]
}
if(length(v) > 0) {
# get current active matrices and insert spells
uniqueV<-unique(v)
active <- lapply(x$val[uniqueV], "[[", "active")
infMat<-matrix(c(-Inf,Inf),1,2)
for(i in 1:length(v)){
index<-which(uniqueV==v[i])
if(!(identical(active[[index]], infMat)))
active[[index]] <- insert.spell(active[[index]], onset[i], terminus[i])
}
set.vertex.attribute(x, "active", active, uniqueV)
}
set.nD.class(x)
if(.validLHS(xn, parent.frame()))
on.exit(eval.parent(call('<-',xn, x)))
invisible(x)
}
#Function to deactivate the selected edges at the appropriate time. If already
#inactive, activation has no effect; otherwise, it inserts a termination time at
#the appropriate mark. Edges without an "active" attribute are given one.
deactivate.edges<-function(x, onset=NULL, terminus=NULL, length=NULL, at=NULL,
e=seq_along(x$mel)){
xn <- substitute(x) # needed for proper assignment in calling environment
# checks for proper inputs
if(!is.network(x))
stop("deactivate.edges requires an argument of class network.\n")
if(!is.null(at)) {
if(!is.vector(at) || !is.numeric(at))
stop("Deactivation times must be a numeric vector in deactivate.edges.\n")
if(!(is.null(onset) && is.null(terminus) && is.null(length)))
stop("Spells must be specified by exactly 1 of {at, onset+terminus, onset+length, length+terminus}")
} else {
if(!is.null(onset) && (!is.vector(onset) || !is.numeric(onset)))
stop("Onset times must be a numeric vector in deactivate.edges.\n")
if(!is.null(terminus) && (!is.vector(terminus) || !is.numeric(terminus)))
stop("Terminus times must be a numeric vector in deactivate.edges.\n")
if(!is.null(length) && (!is.vector(length) || !is.numeric(length) || any(length < 0)))
stop("Interval lengths must be a non-negative numeric vector in deactivate.edges.\n")
if(!is.null(onset)) {
if(!xor(is.null(terminus),is.null(length)))
stop("Spells must be specified by exactly 1 of {at, onset+terminus, onset+length, length+terminus}")
} else {
if(xor(is.null(terminus),is.null(length)))
stop("Spells must be specified by exactly 1 of {at, onset+terminus, onset+length, length+terminus}")
}
}
if(!is.vector(e) || !is.numeric(e))
stop("Edge ID's, e, must be a numeric vector in deactivate.edges.\n")
if(length(x$mel) > 0) {
if((min(e,Inf) < 1) || (max(e,-Inf) > x%n%"mnext"-1))
stop("Illegal edge in deactivate.edges.\n")
# preliminaries
e <- e[!sapply(x$mel[e], is.null)] #Filter out non-edges
if(length(e)==0) return(invisible(set.nD.class(x)))
if(!is.null(at)) {
onset <- terminus <- rep(at, length=length(e))
} else if (!is.null(onset)) {
onset <- rep(onset, length=length(e))
if(!is.null(terminus))
terminus <- rep(terminus, length=length(e))
else if (!is.null(length))
terminus <- onset + rep(length, length=length(e))
} else {
if (is.null(terminus)) {
onset <- terminus <- rep(Inf, length=length(e))
}else {
terminus <- rep(terminus, length=length(e))
onset <- terminus - rep(length, length=length(e))
}
}
if(any(onset>terminus))
stop("Onset times must precede terminus times in deactivate.edges.\n")
#Get existing activity attributes and update as needed
active<-lapply(lapply(x$mel[e],"[[","atl"),"[[","active")
for(i in seq_along(active)){
if(is.infinite(onset[i]) && is.infinite(terminus[i])){
active[[i]]<-matrix(c(Inf, Inf),1,2)
}else if(is.null(active[[i]])){
if(is.infinite(onset[i]))
active[[i]]<-matrix(c(terminus[i],Inf),1,2)
else if (is.infinite(terminus[i]))
active[[i]]<-matrix(c(-Inf,onset[i]),1,2)
else
active[[i]]<-matrix(c(-Inf,terminus[i],onset[i],Inf),2,2)
}else if(!all(active[[i]]==Inf) && !all(active[[i]]==-Inf)){
active[[i]] <- delete.spell(active[[i]], onset[i], terminus[i])
}
}
set.edge.attribute(x=x,attrname="active",value=active,e=e)
}
set.nD.class(x)
if(.validLHS(xn, parent.frame()))
on.exit(eval.parent(call('<-',xn, x)))
invisible(x)
}
#Function to deactivate the selected vertices at the appropriate time. If
#already inactive, activation has no effect; otherwise, it inserts a termination
#time at the appropriate mark. Vertices without an "active" attribute are given
#one.
deactivate.vertices<-function(x, onset=NULL, terminus=NULL, length=NULL, at=NULL,
v=seq_len(network.size(x)), deactivate.edges=FALSE){
xn <- substitute(x) # needed for proper assignment in calling environment
# checks for proper inputs
if(!is.network(x))
stop("deactivate.vertices requires an argument of class network.\n")
if(!is.null(at)) {
if(!is.vector(at) || !is.numeric(at))
stop("Deactivation times must be a numeric vector in deactivate.vertices.\n")
if(!(is.null(onset) && is.null(terminus) && is.null(length)))
stop("Spells must be specified by exactly 1 of {at, onset+terminus, onset+length, length+terminus}")
} else {
if(!is.null(onset) && (!is.vector(onset) || !is.numeric(onset)))
stop("Onset times must be a numeric vector in deactivate.vertices.\n")
if(!is.null(terminus) && (!is.vector(terminus) || !is.numeric(terminus)))
stop("Terminus times must be a numeric vector in deactivate.vertices.\n")
if(!is.null(length) && (!is.vector(length) || !is.numeric(length) || any(length < 0)))
stop("Interval lengths must be a non-negative numeric vector in deactivate.vertices.\n")
if(!is.null(onset)) {
if(!xor(is.null(terminus),is.null(length)))
stop("Spells must be specified by exactly 1 of {at, onset+terminus, onset+length, length+terminus}")
} else {
if(xor(is.null(terminus),is.null(length)))
stop("Spells must be specified by exactly 1 of {at, onset+terminus, onset+length, length+terminus}")
}
}
if(!is.vector(v) || !is.numeric(v))
stop("Vertices, v, must be a numeric vector in deactivate.vertices.\n")
if((min(v,Inf) < 1) || (max(v,-Inf) > network.size(x)))
stop("Illegal vertex in deactivate.vertices.\n")
# preliminaries
v <- v[!sapply(x$val[v], is.null)] #Filter out non-vertices
if(length(v) > 0) {
if(!is.null(at)) {
onset <- terminus <- rep(at, length=length(v))
} else if (!is.null(onset)) {
onset <- rep(onset, length=length(v))
if(!is.null(terminus))
terminus <- rep(terminus, length=length(v))
else if (!is.null(length))
terminus <- onset + rep(length, length=length(v))
} else {
if (is.null(terminus)) {
onset <- terminus <- rep(Inf, length=length(v))
}else {
terminus <- rep(terminus, length=length(v))
onset <- terminus - rep(length, length=length(v))
}
}
if(any(onset>terminus))
stop("Onset times must precede terminus times in deactivate.vertices.\n")
#Get existing activity attributes and update as needed
active<-lapply(x$val[v],"[[","active")
for(i in seq_along(active)){
if(is.infinite(onset[i]) && is.infinite(terminus[i])){
active[[i]]<-matrix(c(Inf, Inf),1,2)
}else if(is.null(active[[i]])){
if(is.infinite(onset[i]))
active[[i]]<-matrix(c(terminus[i],Inf),1,2)
else if (is.infinite(terminus[i]))
active[[i]]<-matrix(c(-Inf,onset[i]),1,2)
else
active[[i]]<-matrix(c(-Inf,terminus[i],onset[i],Inf),2,2)
}else if(!all(active[[i]]==Inf) && !all(active[[i]]==-Inf)){
active[[i]] <- delete.spell(active[[i]], onset[i], terminus[i])
}
}
set.vertex.attribute(x=x,attrname="active",value=active,v=v)
}
# deactivate the associated edges, if user wants
if (deactivate.edges) {
e = NULL
for (vert in v) {
e = c(e, get.edgeIDs.active(x, v=vert, onset=onset, terminus=terminus,
length=length, at=at, neighborhood="combined"))
}
if (length(e) > 0) {
deactivate.edges(x, onset=onset, terminus=terminus, length=length, at=at, e=unique(e))
}
}
set.nD.class(x)
if(.validLHS(xn, parent.frame()))
on.exit(eval.parent(call('<-',xn, x)))
invisible(x)
}
# adds new edges, active at the given time
add.edges.active <- function(x, tail, head, names.eval=NULL, vals.eval=NULL, onset=NULL, terminus=NULL, length=NULL, at=NULL, ...) {
xn <- substitute(x) # needed for proper assignment in calling environment
if(!is.network(x))
stop("add.edges.active requires an argument of class network.\n")
if(!is.numeric(tail) || !is.numeric(head))
stop("The vertex ID's given in 'tail' and 'head' must be a numeric vector in add.edges.active.\n")
if(min(tail) < 1 || max(tail) > network.size(x))
stop("Illegal vertex in 'tail' vector in add.edges.active .\n")
if(min(head) < 1 || max(head) > network.size(x))
stop("Illegal vertex in 'head' vector in add.edges.active .\n")
n = max(length(tail), length(head))
if(length(tail) != length(head)) {
tail = rep(tail, length=n)
head = rep(head, length=n)
}
add.edges(x, tail, head,names.eval,vals.eval)
activate.edges(x, onset, terminus, length, at, e=seq(x%n%"mnext"-n, x%n%"mnext"-1))
if(.validLHS(xn, parent.frame()))
on.exit(eval.parent(call('<-',xn, x)))
invisible(x)
}
# adds new vertices, active at the given time
add.vertices.active <- function(x, nv, vattr=NULL, last.mode=TRUE, onset=NULL, terminus=NULL, length=NULL, at=NULL,...) {
if(!is.network(x))
stop("add.vertices.active requires an argument of class network.\n")
if(!is.numeric(nv))
stop("The number of vertices given in 'nv' must be numeric in add.verices.active.\n")
xn <- substitute(x) # needed for proper assignment in calling environment
if (nv>0){
add.vertices(x, nv,vattr,last.mode)
activate.vertices(x, onset, terminus, length, at, v=seq(x%n%"n"-nv+1, x%n%"n"))
} else {
if(!is.networkDynamic(x)){
x<-set.nD.class(x)
}
}
if(.validLHS(xn, parent.frame()))
on.exit(eval.parent(call('<-',xn, x)))
invisible(x)
}
# ------------- get.change.times ---------
# pulls out all of the times at which acitvity changes
# TODO: may be problems with the 'null' (Inf,Inf) spell
get.change.times <- function (x, vertex.activity=TRUE,edge.activity=TRUE, ignore.inf=TRUE,vertex.attribute.activity=TRUE,edge.attribute.activity=TRUE,network.attribute.activity=TRUE) {
if(!is.network(x))
stop("get.change.times requires an argument of class network.\n")
if(!is.logical(vertex.activity) | !is.logical(edge.activity))
stop("get.change.times requires that vertex.activity and edge.activity be logicals.\n")
times <- numeric(0)
if(edge.activity & network.edgecount(x)>0){
spls<-get.edge.attribute(x$mel, "active",unlist=FALSE)
spls<-.removeNullSpells(spls)
times <- c(times,unlist(spls) )
}
if(vertex.activity & network.size(x)>0){
if("active"%in%list.vertex.attributes(x)){
spls<-get.vertex.attribute(x, "active",unlist=FALSE)
spls<-.removeNullSpells(spls)
times <- c(times, unlist(spls))
}
}
if(vertex.attribute.activity & network.size(x)>0){
attrs<-list.vertex.attributes.active(x,onset=-Inf,terminus=Inf,dynamic.only=TRUE)
for(attr in attrs){
vals <- get.vertex.attribute.active(x,sub('.active','',attr),onset=-Inf,terminus=Inf,return.tea=TRUE)
vals <- vals[!is.na(vals)]
times<-c(times, unique(unlist(sapply(vals,'[[',2,simplify=FALSE))))
}
}
if(edge.attribute.activity & network.edgecount(x)>0){
attrs<-list.edge.attributes.active(x,onset=-Inf,terminus=Inf,dynamic.only=TRUE)
for(attr in attrs){
vals<-get.edge.attribute.active(x,sub('.active','',attr),onset=-Inf,terminus=Inf,return.tea=TRUE)
vals<-vals[!is.na(vals)]
times<-c(times, unique(unlist(sapply(vals,'[[',2,simplify=FALSE))))
}
}
if(network.attribute.activity){
attrs<-list.network.attributes.active(x,onset=-Inf,terminus=Inf,dynamic.only=TRUE)
for(attr in attrs){
times<-c(times, unique(as.vector(get.network.attribute.active(x,sub('.active','',attr),onset=-Inf,terminus=Inf,return.tea=TRUE)[[2]])))
}
}
if(ignore.inf){
times <- sort(unique(times[!is.infinite(times)]))
} else {
times <- sort(unique(times))
}
return(times)
}
#Variant of get.edgeIDs with dynamic query support
get.edgeIDs.active<-function(x,v,onset=NULL,terminus=NULL,length=NULL, at=NULL,
alter=NULL,neighborhood=c("out", "in", "combined"),
rule=c("any","all","earliest","latest"),na.omit=TRUE,active.default=TRUE){
if(missing(v)){
stop("'v' parameter must be specified with a vertex id to indicate which vertex to search for incident edges")
}
rule<-match.arg(rule)
# get IDs and filter by activity
eid<-get.edgeIDs(x=x,v=v,alter=alter,neighborhood=neighborhood, na.omit=na.omit)
if(length(eid)==0)
return(integer(0))
active = is.active(x=x,onset=onset,terminus=terminus,length=length,at=at,
e=eid,v=NULL,rule=rule, active.default=active.default)
if(!any(active))
return(integer(0))
eid[active]
}
#Variant of get.edges with dynamic query support. (Note: not safe in the long
#run...)
get.edges.active<-function(x,v,onset=NULL,terminus=NULL,length=NULL, at=NULL,
alter=NULL,neighborhood=c("out", "in", "combined"),
rule=c("any","all","earliest","latest"),na.omit=TRUE,active.default=TRUE){
if(missing(v)){
stop("'v' parameter must be specified with vertex id to indicate which vertex to search for incident edges")
}
rule<-match.arg(rule)
# get IDs and filter by activity
eid<-get.edgeIDs(x=x,v=v,alter=alter,neighborhood=neighborhood, na.omit=na.omit)
if(length(eid)==0)
return(list())
active = is.active(x=x,onset=onset,terminus=terminus,length=length,at=at,
e=eid,v=NULL,rule=rule, active.default=active.default)
if(!any(active))
return(list())
x$mel[eid][active]
}
#Variant of get.neighborhood with dynamic query support. Slow, most likely.
get.neighborhood.active<-function(x,v,onset=NULL,terminus=NULL,length=NULL, at=NULL,
type=c("out", "in", "combined"),rule=c("any","all","earliest","latest"),na.omit=TRUE,active.default=TRUE){
rule<-match.arg(rule)
# get active edges and assemble neighborhood in questions
if(!is.directed(x)){
el<-get.edges.active(x=x,v=v,onset=onset,terminus=terminus,length=length, at=at,
alter=NULL, neighborhood="out", rule=rule,na.omit=na.omit,
active.default=active.default)
if(length(el)>0){
neigh<-sort(unique(c(sapply(el,"[[","inl"),sapply(el,"[[","outl"))))
#Loop check
if(!any(sapply(el,function(z){(v%in%z[["inl"]])&&(v%in%z[["outl"]])})))
neigh<-neigh[neigh!=v]
}else
neigh<-integer(0)
}else{
if(match.arg(type)=="out"){ # directed out neighboorhood
el<-get.edges.active(x=x,v=v,onset=onset,terminus=terminus,length=length, at=at,
alter=NULL, neighborhood="out",rule=rule,na.omit=na.omit,
active.default=active.default)
if(length(el)>0)
neigh<-sort(unique(sapply(el,"[[","inl")))
else
neigh<-integer(0)
}else if(match.arg(type)=="in"){ # directed in neighboorhood
el<-get.edges.active(x=x,v=v,onset=onset,terminus=terminus,length=length, at=at,
alter=NULL, neighborhood="in",rule=rule,na.omit=na.omit,
active.default=active.default)
if(length(el)>0){
neigh<-sort(unique(sapply(el,"[[","outl")))
}else
neigh<-integer(0)
}else{ # directed in/out neighboorhood
out.el<-get.edges.active(x=x,v=v,onset=onset,terminus=terminus,length=length,at=at,
alter=NULL, neighborhood="out",rule=rule,na.omit=na.omit,
active.default=active.default)
if(length(out.el)>0)
neigh<-sort(unique(sapply(out.el,"[[","inl")))
else
neigh<-integer(0)
in.el<-get.edges.active(x=x,v=v,onset=onset,terminus=terminus,length=length,at=at,
alter=NULL, neighborhood="in",rule=rule,na.omit=na.omit,
active.default=active.default)
if(length(in.el)>0)
neigh<-sort(unique(c(neigh,sapply(in.el,"[[","outl"))))
}
}
neigh
}
# wrapper functions to return activity matrices of edges and vertices
# THESE WERE NOT BEING USED, see version in utilities.R
#Function to assess activity of edges (e) or vertices (v) at a given point
#or in a given interval. If an interval is specified, then rule=="any"
#returns TRUE for elements active at any time in the interval. The rule=="all"
#setting returns TRUE for elements active during the entire interval. Unless
#given either e or v, the function returns NA.
#
#Note that there are a lot of complications here surrounding Inf values. If
#an activity spell starts at time Inf, it can never match anything (including
#query onsets of Inf). If an activity spell starts at finite time and ends
#at Inf, however, it _does_ match an onset/terminus of Inf. By turns, a
#spell which begins at time -Inf should match -Inf onset times. All this is
#very annoying, and makes me wish that I'd just outlawed infinity. But that's
#how things are.
is.active<-function(x,onset=NULL,terminus=NULL,length=NULL, at=NULL, e=NULL,v=NULL, rule=c("any","all","earliest","latest"),active.default=TRUE){
# checks for proper inputs
if(!is.network(x))
stop("is.active requires an argument of class network.\n")
if(!is.null(at)) {
if(!is.vector(at) || !is.numeric(at))
stop("Singular time points given by the 'at' argument must be a numeric vector in is.active.\n")
if(!(is.null(onset) && is.null(terminus) && is.null(length)))
stop("Query intervals must be specified by exactly 1 of {at, onset+terminus, onset+length, length+terminus}")
} else {
if(!is.null(onset) && (!is.vector(onset) || !is.numeric(onset)))
stop("Onset times must be a numeric vector in is.active.\n")
if(!is.null(terminus) && (!is.vector(terminus) || !is.numeric(terminus)))
stop("Terminus times must be a numeric vector is.active.\n")
if(!is.null(length) && (!is.vector(length) || !is.numeric(length) || any(length < 0)))
stop("Interval lengths must be a non-negative numeric vector in is.active.\n")
if(!is.null(onset)) {
if(!xor(is.null(terminus),is.null(length)))
stop("Query intervals must be specified by exactly 1 of {at, onset+terminus, onset+length, length+terminus}")
} else {
if(is.null(terminus) || is.null(length))
stop("Query intervals must be specified by exactly 1 of {at, onset+terminus, onset+length, length+terminus}")
}
}
if(length(e)*length(v)>0)
stop("Either edges or vertices must be specified (not both) in is.active.\n")
if(!is.null(v) & length(v)>0){
if(!is.vector(v) || !is.numeric(v))
stop("Vertex ID's, v, must be a numeric vector in is.active.\n")
if((min(v) < 1) || (max(v) > network.size(x)))
stop("Vertex ID's, v, must be in the range from 1 to the size of the network in is.active.\n")
}
if(!is.null(e)){
if(!is.vector(e) || !is.numeric(e))
stop("Edge ID's, e, must be a numeric vector in is.active.\n")
if((min(e,Inf) < 1) || (max(e,-Inf) > x%n%"mnext"-1))
stop("Edge ID's in is.active e argument must be in the range from 1 to the number of edges in the network.\n")
}
# vertices or edges?
if(length(e)){
origelen<-length(e)
e <- e[!sapply(x$mel[e], is.null)] # filter out non-edges
# if e were omitted due to null edges, give warning
if (length(e)< origelen){
warning("Some edge IDs in the e argument correspond to deleted edges and will be ignored. Indices of values returned will not correspond to elements of e.")
}
}
if(length(v))
v <- v[!sapply(x$val[v], is.null)] # filter out non-vertices TODO: can this happen?
if(length(e)+length(v)==0)
return(logical(0))
if(length(e)){
active<-lapply(lapply(x$mel[e],"[[","atl"),"[[","active")
ev <- e
} else {
active<-lapply(x$val[v],"[[","active")
ev <- v
}
# preliminaries
rule<-match.arg(rule)
if(!is.null(at)) {
onset <- terminus <- rep(at, length=length(ev))
} else if (!is.null(onset)) {
onset <- rep(onset, length=length(ev))
if(!is.null(terminus))
terminus <- rep(terminus, length=length(ev))
else if (!is.null(length))
terminus <- onset + rep(length, length=length(ev))
} else {
terminus <- rep(terminus, length=length(ev))
onset <- terminus - rep(length, length=length(ev))
}
if(any(onset>terminus))
stop("Onset times must precede terminus times in is.active.\n")
# return(.Call('IsActiveInVector', onset, terminus, active, (match.arg(rule) == 'all'), active.default, get("debug.output", envir=.GlobalEnv)))
return(.Call(IsActiveInVector_R, onset, terminus, active, (match.arg(rule) == 'all'), active.default, FALSE))
}
#Variant of is.adjacent for networks with dynamic extensions. Slow, but will
#get the job done.
is.adjacent.active<-function(x,vi,vj,onset=NULL,terminus=NULL,length=NULL, at=NULL,
rule=c("any","all","earliest","latest"),na.omit=FALSE,active.default=TRUE){
rule<-match.arg(rule)
#Initially, get edge IDs from vi to vj
eid<-get.edgeIDs(x=x,v=vi,alter=vj,neighborhood="out",na.omit=na.omit)
#Return TRUE iff any active edges exist
if(length(eid)==0)
FALSE
else
any(is.active(x=x,onset=onset,terminus=terminus,length=length,at=at,
e=eid,v=NULL,rule=rule, active.default=active.default))
}
#Variant network.dyadcount which uses only active vertices.
network.dyadcount.active<-function (x, onset=NULL, terminus=NULL, length=NULL, at=NULL,
rule=c("any","all","earliest","latest"), na.omit = TRUE, active.default=TRUE, ...) {
if (!is.network(x))
stop("network.dyadcount.active requires an argument of class network.")
rule<-match.arg(rule)
if (is.bipartite(x)) {
bip = x%n%"bipartite"
nactor <- ifelse(bip >= 0,sum(is.active(x=x,onset=onset,terminus=terminus,length=length,at=at,
v=seq_len(bip),rule=rule, active.default=active.default)),
0)
nevent <- ifelse(x%n%"n">0 && bip<x%n%"n",sum(is.active(x=x,onset=onset,terminus=terminus,length=length,at=at,
v=(bip+1):(x%n%"n"),rule=rule, active.default=active.default)),
0)
if (is.directed(x))
dyads <- nactor * nevent * 2
else
dyads <- nactor * nevent
} else {
nodes <- ifelse(x%n%"n">0, network.size.active(x=x,onset=onset,terminus=terminus,length=length,at=at,
rule=rule, active.default=active.default),
0)
if (is.directed(x))
dyads <- nodes * (nodes - 1)
else
dyads <- nodes * (nodes - 1)/2
}
if (na.omit && x%n%"mnext" > 1 && dyads > 0) {
# note that I've removed a code block that would replace the block below.
# it handles the count of missing edges through the design attribute, rather
# than through the network.naedgecount.active function. The code chunk
# can be found in the v0.1 tag. --alc
# second note, you cannot just count up the number of na edges and subtract
# this from 'dyads', since 'dyads' is counted over the smaller active network,
# and 'na.edgecount' counts active missing edges over the full network.
# you can have edges that are active, whose head/tail nodes are not, and this
# leads to incorrect counts.
# given that we're supposed to upload to cran tomorrow, I'm using the quick
# fix: this will be slow. We (I) should fix this later.
xextracted = network.extract(x=x,onset=onset,terminus=terminus,length=length,at=at,
rule=rule,active.default=active.default)
na.edgecount = network.naedgecount.active(x=xextracted,onset=onset,terminus=terminus,
length=length,at=at,rule=rule, active.default=active.default)
dyads <- dyads - na.edgecount
}
dyads
}
#Variant network.edgecount which counts only active edges. Not long-run safe.
network.edgecount.active<-function (x, onset=NULL, terminus=NULL, length=NULL, at=NULL,
rule=c("any","all","earliest","latest"), na.omit = TRUE, active.default=TRUE,...){
rule<-match.arg(rule)
if(x%n%"mnext">1){
act<-is.active(x=x,onset=onset,terminus=terminus,length=length,at=at,
e=valid.eids(x), v=NULL,rule=rule, active.default=active.default)
if(na.omit)
sum(act*(1-(x%e%"na")))
else
sum(act)
} else {
0
}
}
#Variant network.naedgecount which counts only active edges. Not safe.
network.naedgecount.active<-function (x, onset=NULL, terminus=NULL, length=NULL, at=NULL,
rule=c("any","all","earliest","latest"), active.default=TRUE,...){
if(x%n%"mnext">1) {
act<-is.active(x=x,onset=onset,terminus=terminus,length=length, at=at,
e=valid.eids(x),v=NULL,rule=rule, active.default=active.default)
sum(act*(x%e%"na"))
}else{
0
}
}
#Network size which counts only active vertices - don't use for other purposes!
network.size.active<-function(x,onset=NULL,terminus=NULL,length=NULL, at=NULL,
rule=c("any","all","earliest","latest"),active.default=TRUE,...){
rule<-match.arg(rule)
sum(is.active(x=x,onset=onset,terminus=terminus,length=length, at=at,
e=NULL,v=seq_len(network.size(x)), rule=rule,active.default=active.default))
}
#--------------------------------------------------------------
# this function removes the activity matrices for a given
# set of edges.
#
# @param
# x: a networkDynamic or network object
# e: the edges whose spell matrices are to be deleted;
# default=all
#
# @return:
# the networkDynamic object without the spell matrices of 'e'
#------------------------------------------------------------------
delete.edge.activity <- function(x, e=seq_along(x$mel)) {
xn <- substitute(x) # needed for proper assignment in calling environment
if(!is.network(x))
stop("The remove.activity function requires that x be a network object.\n")
if(!is.vector(e) || !is.numeric(e))
stop("Edge ID's, e, must be a numeric vector in remove.activity.\n")
if((min(e,Inf) < 1) || (max(e,-Inf) > x%n%"mnext"-1))
stop("Illegal edge in remove.activity argument e.\n")
# if deleting all edges, can use network's delete.edge.attribute
# function, otherwise need to manually remove activity matrices.
if (length(e) == length(x$mel)) {
delete.edge.attribute(x, "active")
} else {
leave.active = setdiff(seq_along(x$mel), e)
leave.active = leave.active[!sapply(x$mel[leave.active], is.null)] # filter out non-edges
left.activity = lapply(lapply(x$mel[leave.active], "[[", "atl"), "[[", "active")
delete.edge.attribute(x, "active")
set.edge.attribute(x, "active", left.activity, leave.active)
}
if(.validLHS(xn, parent.frame()))
on.exit(eval.parent(call('<-',xn, x)))
invisible(x)
}
#--------------------------------------------------------------
# this function removes the activity matrices for a given
# set of vertices.
#
# @param
# x: a networkDynamic or network object
# v: the vertices whose spell matrices are to be deleted;
# default=all
#
# @return:
# the networkDynamic object without the spell matrices of 'v'
#------------------------------------------------------------------
delete.vertex.activity <- function(x, v=seq_len(network.size(x))) {
xn <- substitute(x) # needed for proper assignment in calling environment
if(!is.network(x))
stop("The remove.activity function requires that x be a network object.\n")
if(!is.vector(v) || !is.numeric(v))
stop("Vertex ID's, v, must be a numeric vector in remove.activity.\n")
if((min(v,Inf) < 1) || (max(v,-Inf) > network.size(x)))
stop("Illegal vertex in remove.activity argument v.\n")
# if deleting all vertices, can use network's delete.vertex.attribute
# function, otherwise need to manually remove activity matrices.
if (length(v) == network.size(x)) {
delete.vertex.attribute(x, "active")
} else {
leave.active = setdiff(seq_along(x$val), v)
leave.active = leave.active[!sapply(x$val[leave.active], is.null)] #Filter out non-vertices
left.activity = lapply(x$val[leave.active], "[[", "active")
delete.vertex.attribute(x, "active")
set.vertex.attribute(x, "active", left.activity, leave.active)
}
if(.validLHS(xn, parent.frame()))
on.exit(eval.parent(call('<-',xn, x)))
invisible(x)
}
#--------------------------------------------------------------
# this is a helper function to insert a single valid spell.
# valid means that (i) onset <= terminus and (ii) onset != Inf
# and (iii) terminus != -Inf
#
# @param
# spells : the 2x(number of spells) matrix of current
# spells
# onset : the onset time of the spell to be inserted;
# default=-Inf
# terminus: the terminus time of the spell to be inserted;
# default=Inf
#
# @return:
# the updated spells
#------------------------------------------------------------------
insert.spell<-function(spells, onset=-Inf, terminus=Inf){
# forget all the below, do it in C
return(.Call(InsertSpell_R, spells, onset, terminus, FALSE));
if (is.null(spells) || spells[1,1]== Inf || spells[1,2]==-Inf)
new.spells <- matrix(c(onset, terminus), 1,2)
else {
# case where no work is needed
afton<-which(onset>=spells[,1])
befon<-which(terminus<=spells[,2])
if(length(afton)*length(befon)!=0 && max(afton)==min(befon)){
afton = max(afton)
if(!(onset==terminus && terminus==spells[afton,2] &&
spells[afton,1]<spells[afton,2]))
return(spells)
}
# time for work
ns = NROW(spells)
if(onset>spells[ns,1]) {
spell.row = ns+1 # row that spell will go in
} else {
spell.row = min(which(onset<=spells[,1]))
}
# spell row adjustments (continuations)
if (spell.row > 1 && onset<=spells[spell.row-1,2] &&
ifelse(terminus!=onset,T,terminus!=spells[spell.row-1,2])) {
spell.row = spell.row-1
onset = spells[spell.row,1]
}
if(terminus>=spells[ns, 2])
retain.row = ns+1 # rows that are retained
else
retain.row = min(which(terminus<spells[,2]))
# retain row adjustments (continuations and instaneous pts)
if(retain.row <= ns && terminus>=spells[retain.row,1]) {
terminus=spells[retain.row,2]
retain.row=retain.row+1
}else if(retain.row >1 && terminus==spells[retain.row-1,1] &&
spells[retain.row-1,1]==spells[retain.row-1,2]){
retain.row=retain.row-1
}
new.spells = matrix(c(onset, terminus),1,2)
if(spell.row > 1)
new.spells = rbind(spells[1:(spell.row-1),], new.spells)
if(retain.row <= ns)
new.spells = rbind(new.spells, spells[retain.row:ns,])
}
new.spells
}
#--------------------------------------------------------------
# this is a helper function to delete a single valid spell.
# valid means that (i) onset <= terminus and (ii) onset != Inf
# and (iii) terminus != -Inf
#
# @param
# spells : the 2x(number of spells) matrix of current
# spells, assumed not null
# onset : the onset time of the spell to be deleted;
# default=-Inf
# terminus: the terminus time of the spell to be deleted;
# default=Inf
#
# @return:
# the updated spells
#------------------------------------------------------------------
delete.spell<-function(spells, onset=-Inf, terminus=Inf){
# case where no work is needed
ns = NROW(spells)
if(onset > spells[ns,2] || (onset==spells[ns,2] && onset!=spells[ns,1]))
return(spells)
if(terminus < spells[1,1] || (terminus==spells[1,1] && onset!=terminus))
return(spells)
afton<-which(onset>=spells[,2])
befon<-which(terminus<=spells[,1])
if(length(afton)*length(befon)!=0 && max(afton)==min(befon)){
afton = max(afton)
if(!(spells[afton,1]==spells[afton,2] && onset==spells[afton,1]))
return(spells)
}
# deactivation of points (disallowed for points in intervals)
if (onset==terminus){
pton = which(onset==spells[,1])
if(length(pton)>0 && spells[max(pton),1]==spells[max(pton),2]){
if(ns==1)
return(matrix(c(Inf,Inf),1,2))
else
return(spells[-max(pton),,drop=FALSE])
}else{
return(spells)
}
}
# deactivation of intervals
if(onset<=spells[1,1])
erow = 0 # the row number of the earlier rows to save
else
erow = max(which(onset>spells[,1]))
if(terminus>spells[ns,2] || (terminus==spells[ns,2] &&
spells[ns,2]!=spells[ns,1])){
lrow = ns+1 # the row number of the later rows to save
} else if (terminus==spells[ns,2] &
spells[ns,2]==spells[ns,1]) {
# when last spell is a matching point interval, keep it
lrow <- ns
} else {
lrow = min(which(terminus<spells[,2]))
if(lrow>1 && spells[lrow-1,2]==terminus && spells[lrow-1,1]==terminus)
lrow = lrow-1
}
# erow and lrow adjustments (truncations)
splitspell=NULL
if(lrow==erow)
splitspell = matrix(spells[lrow,],1,2)
if(erow!=0 && onset < spells[erow,2])
spells[erow,2] = onset
if(lrow!=(ns+1) && terminus > spells[lrow,1]){
if(lrow==erow){ # divided activation interval
splitspell[1,1]=terminus
lrow=lrow+1
}else{
spells[lrow,1] = terminus
}
}
if(erow==0 && lrow==(ns+1))
matrix(c(Inf,Inf),1,2)
else if(erow==0)
spells[(lrow:ns),,drop=FALSE]
else if(lrow==(ns+1) && !is.null(splitspell))
rbind(spells[(1:erow),,drop=FALSE], splitspell)
else if(lrow==(ns+1))
spells[(1:erow),,drop=FALSE]
else if (erow+1==lrow && is.null(splitspell))
spells
else if (erow+1==lrow)
rbind(spells[(1:erow),,drop=FALSE],
splitspell,
spells[(lrow:ns),,drop=FALSE])
else
spells[-((erow+1):(lrow-1)),,drop=FALSE]
}
# function to delete 'null' (Inf,Inf) spells from a lsit of spells
# and replace them with null
.removeNullSpells <- function(x){
return(lapply(x,function(x){
if(!is.null(x) && !any(is.na(x)) && x[1,1]==Inf && x[1,2]==Inf){ return(NULL)} else { return(x) }
}))
}
|
6755b002831bffe4089f144b10a8c4485b1007a3 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/gamlss/examples/Rsq.Rd.R | 5044c13a41ab8ad6cbf5c79321aa1c991dab85a3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 247 | r | Rsq.Rd.R | library(gamlss)
### Name: Rsq
### Title: Generalised (Pseudo) R-squared for GAMLSS models
### Aliases: Rsq
### Keywords: regression
### ** Examples
data(aids)
m1 <- gamlss(y~x+qrt, data=aids, family=NBI)
Rsq(m1)
Rsq(m1, type="both")
rm(m1)
|
da8998d3f549019da40b5d75e575a715b8b294cf | c87a3703d379a70af9082758d70beebb7a51d6c6 | /cachematrix.R | 3fefa9cb450251e986b9c0c576c820711fdc7110 | [] | no_license | StewartJepson/ProgrammingAssignment2 | 59aadcf2e212e49c861691c94c6f5de14fc3cb29 | 27b27fdb39a29b9aa0128d0e962f9bcdb2db28e3 | refs/heads/master | 2020-04-10T23:45:09.566674 | 2018-12-13T17:30:55 | 2018-12-13T17:30:55 | 161,363,655 | 0 | 0 | null | 2018-12-11T16:37:33 | 2018-12-11T16:37:32 | null | UTF-8 | R | false | false | 1,621 | r | cachematrix.R | ## The following functions cache inverse of a matrix
## makeCacheMatrix stores a matrix and its inverse
makeCacheMatrix <- function(x = matrix()) { # x is initialized as a function argument
I <- NULL # I is initialized as a NULL object so it can be used later in the function
set <- function(y) {
x <<- y # object y is assigned to x, in the parent environment
I <<- NULL # the value of NULL is assigned to I to clear any previously cached value
}
get <- function() x # x is retireved from the parent environment
setI <- function(inverse) I <<- inverse # input argument is assigned to I in the parent environment
getI <- function() I # getI retrieves the value of I
list(set = set, get = get,
setI = setI,
getI = getI) # makeCacheMatrix returns a list of the defined functions
}
## cachesolve gets the vlaue of I, checks if it is NULL
## returns cached value if it exists, calculates inverse if it does not
cacheSolve <- function(x, ...) {
I <- x$getI() # call getI function to retrieve I from the input object that was created by makeCahceMatrix
if(!is.null(I)) { # check if I exists (i.e. is not NULL)
message("getting cached data")
return(I) # return cached value if not NULL
}
data <- x$get() # assign the input matirx to data using get()
I <- solve(data, ...) # use the solve function to compute the inverse of the matrix
x$setI(I) # set I in the input object (created by makeCacheMatirx)
I # Print I
}
|
cc7e96a084f424e2b815d3b76de200789dbde733 | 5996e5ed35ef4e0cf75774c2ec837fb757462a48 | /man/meanByClassHelper1.Rd | b6762382f931cc3eb39f097c2bbb32046bc7fe30 | [] | no_license | interconnectDiabetes/dsBaseClient | 26045ac34ef4140e92b94bad5de77b726fafc67f | dfc52f174a8387a59aa4002b52b120186647867a | refs/heads/master | 2021-01-21T16:10:22.057430 | 2015-05-01T12:56:06 | 2015-05-01T12:56:06 | 34,659,157 | 0 | 0 | null | 2015-04-27T10:00:32 | 2015-04-27T10:00:31 | null | UTF-8 | R | false | false | 837 | rd | meanByClassHelper1.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/meanByClassHelper1.R
\name{meanByClassHelper1}
\alias{meanByClassHelper1}
\title{Generates subset tables}
\usage{
meanByClassHelper1(dtsource, tables, variable, categories)
}
\arguments{
\item{dtsource}{an opal object(s) obtained after login in to opal servers;}
\item{tables}{a character vector, the tables to breakdown}
\item{variable}{a character, the variable to subset on}
\item{categories}{a character vector, the classes in the variables to subset on}
}
\value{
a character the names of the new subset tables.
}
\description{
This is an internal function.
}
\details{
This function is called by the function 'ds.meanByClass' to break down
the initial table by the specified categorical variables.
}
\author{
Gaye, A.
}
\keyword{internal}
|
38ba59dcc8aa5fe87758142d721c9f3ff244c4fe | 657a828bb718736bdeaf56aa29b636f8e55ff743 | /man/LogLoss.Rd | 4f13ff021321b8275a711f00f057c6b450c0c213 | [] | no_license | Then-Terence/LXR | c8e53ed336cb850e1a9f68999af6493603e62f64 | 10d740c2b76a7f306359a34ccefda1a163c7e66d | refs/heads/master | 2023-04-04T17:03:13.198230 | 2021-03-11T19:23:03 | 2021-03-11T19:23:03 | 177,258,586 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 440 | rd | LogLoss.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LogLoss.R
\name{LogLoss}
\alias{LogLoss}
\title{Logarithmic Loss}
\usage{
LogLoss(Target, Prediction)
}
\arguments{
\item{Target}{A vector of the binary dependent variable.}
\item{Prediction}{A vector of the predictions.}
}
\description{
This function computes the logarithmic loss for binary predictions.
}
\examples{
LogLoss(Data[, Y], Model$fitted.values)
}
|
82d3388f20c1b02557483d6f7844e4ff81c48c1d | d7ff71e8ffb07419aad458fb2114a752c5bf562c | /man/alignment_col1_all_named.Rd | ba03141f173ff53cebb74900a58a3bd791b381ef | [
"MIT"
] | permissive | r-lib/styler | 50dcfe2a0039bae686518959d14fa2d8a3c2a50b | ca400ad869c6bc69aacb2f18ec0ffae8a195f811 | refs/heads/main | 2023-08-24T20:27:37.511727 | 2023-08-22T13:27:51 | 2023-08-22T13:27:51 | 81,366,413 | 634 | 79 | NOASSERTION | 2023-09-11T08:24:43 | 2017-02-08T19:16:37 | R | UTF-8 | R | false | true | 481 | rd | alignment_col1_all_named.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/detect-alignment-utils.R
\name{alignment_col1_all_named}
\alias{alignment_col1_all_named}
\title{Checks if all arguments of column 1 are named}
\usage{
alignment_col1_all_named(relevant_pd_by_line)
}
\arguments{
\item{relevant_pd_by_line}{A list with parse tables of a multi-line call,
excluding first and last column.}
}
\description{
Checks if all arguments of column 1 are named
}
\keyword{internal}
|
dc482dba2eca672990a541a49d1a1317be29aa57 | 5cb215dd1d269b4471b91efea988d842bf55de40 | /auto-man/synSetProvenance.Rd | 3080c678a7b7375415e21c14c01dda7d0f2500b1 | [
"Apache-2.0"
] | permissive | Sage-Bionetworks/synapser | 0d308dba0a4a993a1e8f609c25c75b072de78cdc | c9ed6ca9fb5247d56167ff8812ddc780de013127 | refs/heads/master | 2023-06-24T23:10:43.914336 | 2023-06-14T22:33:35 | 2023-06-14T22:33:35 | 34,292,371 | 31 | 16 | Apache-2.0 | 2023-09-10T04:16:43 | 2015-04-20T23:33:04 | R | UTF-8 | R | false | false | 584 | rd | synSetProvenance.Rd | %
% Auto-generated file, do not modify.
% Instead, copy this file to the man/ folder, remove this warning, and edit freely.
% Use Git to identify changes in this file which suggest where to change your edited copy.
%
\name{synSetProvenance}
\alias{synSetProvenance}
\docType{methods}
\title{
synSetProvenance
}
\description{
Stores a record of the code and data used to derive a Synapse entity.
}
\usage{
synSetProvenance(entity, activity)
}
\arguments{
\item{entity}{ An Entity or Synapse ID to modify\cr
}
\item{activity}{ a Activity}
}
\value{
An updated Activity object
}
|
e92b1c086af9107df5c527a27b57b922ee47f72f | 8cbf3effff17bb3d7b71f61d825c5396287aa97b | /R/miscellany.R | 016dc31bbbfa5163ee01e7ae06e8151a3316b279 | [] | no_license | hamedbh/HBHtools | a8df8c5efd3a4df005380a2982bf2acea53be869 | a6d55ed3d04210d858a486be030c98a80528e8eb | refs/heads/master | 2021-05-15T15:34:24.625179 | 2017-10-20T06:53:44 | 2017-10-20T06:53:44 | 107,390,117 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,681 | r | miscellany.R | #' A Function to Convert Decimal Time to Days, Hours, Minutes
#'
#' This function allows for easy conversion of time give as decimals to days,
#' minutes, and hours. Useful for switching between different modes of tracking
#' time.
#'
#' @param days Number of days, must be numeric and non-negative
#' @param hours Number of hours, must be numeric and non-negative
#' @return A named list of day, hour, and minute amounts for the decimal time
#' given. Will also print this to the console.
#' @export
#' @examples
#' convert_decimal_time(days = 3, hours = 2.5)
#' "Total time is 3 days, 2 hours, and 30 minutes."
#'
#' convert_decimal_time(days = 1.5)
#' "Total time is 1 days, 3 hours, and 42 minutes."
#'
#' convert_decimal_time(hours = 6.25)
#' "Total time is 0 days, 6 hours, and 15 minutes."
convert_decimal_time <- function(days = 0, hours = 0.0) {
full_days <- 0L
full_hrs <- 0L
full_mins <- 0L
stopifnot(is.numeric(days),
is.numeric(hours),
days >= 0,
hours >= 0)
if (days > 0) {
hours <- hours + ((as.integer(days) + (days %% 1)) * 7.4)
}
if (hours > 0) {
full_days <- hours %/% 7.4
full_hrs <- (hours %% 7.4) %/% 1
full_mins <- round(((hours %% 7.4) %% 1) * 60)
}
paste0("Total time is ",
full_days,
" days, ",
full_hrs,
" hours, and ",
full_mins,
" minutes.")
return(list(days = full_days,
hours = full_hrs,
mins = full_mins))
}
|
f99181e9a31adc118e3f382f3b65c0eda64bcf7a | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /RGtk2/man/gtkStatusIconSetFromIconName.Rd | 1ac424ace52e0ea6e00eba3b06c9478430f615ce | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 531 | rd | gtkStatusIconSetFromIconName.Rd | \alias{gtkStatusIconSetFromIconName}
\name{gtkStatusIconSetFromIconName}
\title{gtkStatusIconSetFromIconName}
\description{Makes \code{status.icon} display the icon named \code{icon.name} from the
current icon theme.
See \code{\link{gtkStatusIconNewFromIconName}} for details.}
\usage{gtkStatusIconSetFromIconName(object, icon.name)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkStatusIcon}}}
\item{\verb{icon.name}}{an icon name}
}
\details{Since 2.10}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
df6a90cf981a5b076962caab7d78514310f908e8 | d8fe878b71c231644cb5e364f94fa5efb3b68df2 | /plot.R | 734a0ced7acbf63872896ae8986618d7f7543f10 | [] | no_license | alanct/ExData_Plotting1 | 3648d7a26cc84a8260467d243c71d45fe8115586 | d8c4918327567bf489db64305390871cc3d38fcc | refs/heads/master | 2020-04-06T04:19:03.491370 | 2014-09-07T23:25:13 | 2014-09-07T23:25:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,225 | r | plot.R | data = read.csv("household_power_consumption.txt", header = T")
data$DateTime <- paste(data$Date, data$Time)
data$DateTime <- as.Date(data$DateTime, format = "%d/%m/%Y %H:%M:%S")
library(dplyr)
subsetted <- filter(data, DateTime >= as.Date("2007-02-01 00:00:00"), DateTime <= as.Date("2007-02-02 00:00:00"))
png(filename="plot1.png")
hist(subsetted$Global_active_power, main = "Global Active Power", ylab = "Frequency",
xlab = "Global Active Power (kilowatts)", col = "red", breaks = 13, ylim = c(0,
1200), xlim = c(0, 6), xaxp = c(0, 6, 3))
dev.off()
png(filename="plot2.png")
plot(subsetted$DateTime, subsetted$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.off()
png(filename="plot3.png")
plot(subsetted$DateTime, subsetted$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
points(subsetted$DateTime, subsetted$Sub_metering_2, type = "l", xlab = "", ylab = "Energy sub metering",
col = "red")
points(subsetted$DateTime, subsetted$Sub_metering_3, type = "l", xlab = "", ylab = "Energy sub metering",
col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1",
"Sub_metering_2", "Sub_metering_3"))
dev.off()
png(filename="plot4.png")
par(mfrow = c(2, 2))
# plot 1 (NW)
plot(subsetted$DateTime, subsetted$Global_active_power, type = "l", ylab = "Global Active Power",
xlab = "")
# plot 2 (NE)
plot(subsetted$DateTime, subsetted$Voltage, type = "l", ylab = "Voltage", xlab = "datetime")
# plot 3 (SW)
plot(subsetted$DateTime, subsetted$Sub_metering_1, type = "l", ylab = "Energy sub metering",
xlab = "", col = "black")
points(subsetted$DateTime, subsetted$Sub_metering_2, type = "l", xlab = "", ylab = "Sub_metering_2",
col = "red")
points(subsetted$DateTime, subsetted$Sub_metering_3, type = "l", xlab = "", ylab = "Sub_metering_3",
col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1",
"Sub_metering_2", "Sub_metering_3"), bty = "n", )
# plot 4 (SE)
plot(subsetted$DateTime, subsetted$Global_reactive_power, type = "l", xlab = "datetime",
ylab = "Global_reactive_power", ylim = c(0, 0.5))
dev.off()
|
bc6ecd742851b8554ad70df08295fa5a7700a0ed | f786383d6162f8fa082a83b03633dc6a0d64fb26 | /plot2.r | 85041b4486e9bfba66b48bfdf2196368ceb01780 | [] | no_license | macmcadams/ExData_Plotting1 | e54449d072a94cf77c0f4d675c7eba7144600d95 | 5ea68897ef261e66099ec8c6088e803fbd01cc26 | refs/heads/master | 2021-01-21T00:22:23.157166 | 2016-02-27T00:33:56 | 2016-02-27T00:33:56 | 52,625,904 | 0 | 0 | null | 2016-02-26T18:58:42 | 2016-02-26T18:58:41 | null | UTF-8 | R | false | false | 771 | r | plot2.r | ####Load necessary library
library(data.table)
####Make sure to have the household_power_consumption.txt file in your working directory
###Load and Transform data
powerdata <- fread("household_power_consumption.txt", header = TRUE, sep = ";", na.strings="?")
powerdata$Date2 <- as.Date(powerdata$Date, "%d/%m/%Y")
twodays <- powerdata[Date2 >= "2007-02-01" & Date2 <= "2007-02-02",]
###combine date and time
twodays$fulltime <- as.POSIXct(paste(twodays$Date2, twodays$Time), format="%Y-%m-%d %H:%M:%S")
##Plot 2
par(mfrow = c(1,1))
png("plot2.png", width=480, height=480, res = 100)
plot(twodays$fulltime, twodays$Global_active_power, type="o", col="black",
ylab = "Global Active Power (kilowatts)", xlab = "", lty = "solid", pch = '.')
dev.off() |
33978f63cd60c61f75012f82b0bd3a25bf06e1be | 9c83ac4aaa39d6a9431907b43ecd44ca43aadd82 | /R/GMM.R | 32adf909b4b35c6d24ea70f8b6ae1d1492a09b37 | [] | no_license | jkaufy/rpackage | 20a722749b8b773754f0d09c00da96f226a5952e | ae3a1c09380028e6995984465ad5dab6a3e435b1 | refs/heads/main | 2023-08-11T02:12:11.856164 | 2021-10-03T20:41:42 | 2021-10-03T20:41:42 | 411,527,129 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,829 | r | GMM.R | #' Perform Gaussian Mixture Model on a data matrix.
#'
#' This function computes the negative log likely on a data matrix using the
#' Gaussian Mixture Model
#'
#' @param data.mat A numeric data matrix (nxp)
#' @param K The number of clusters
#'
#' @return A numeric value of log.likely error
#'
#' @export
#'
#' @examples
#' data.mat <- as.matrix(iris[, c("Petal.Length", "Petal.Width")])
#' K <- 3
#' log.likely <- GMM(data.mat, K)
#'
GMM <- function(data.mat, K)
{
rand.mat <- matrix(runif(nrow(data.mat)*K), nrow=nrow(data.mat), ncol=K)
prob.mat <- rand.mat/rowSums(rand.mat)
old.log.likely <- 0
new.log.likely <- 1
round.decimal <- 3
while(round(old.log.likely, digits = round.decimal) !=
round(new.log.likely, digits = round.decimal))
{
old.log.likely = new.log.likely
cluster.param.list <- list()
for (cluster in 1:K)
{
prob.vec <- prob.mat[, cluster]
mean.vec <- colSums(data.mat * prob.vec)/sum(prob.vec)
mean.mat <- matrix(mean.vec, nrow(data.mat), ncol(data.mat), byrow = TRUE)
diff.mat <- data.mat - mean.mat
constrained.cov.mat <- diag(colSums(diff.mat^2*prob.vec) / sum(prob.vec))
this.cluster.params <- list(
prior.weight = mean(prob.vec),
mean.vec = mean.vec,
cov.mat = constrained.cov.mat)
cluster.param.list[[cluster]] <- this.cluster.params
}
density.mat <- matrix(NA, nrow(data.mat), K)
for(cluster in 1:K){
params <- cluster.param.list[[cluster]]
density.mat[,cluster] <- mvtnorm::dmvnorm(
data.mat, params$mean.vec, params$cov.mat
)
}
total.density.vec <- rowSums(density.mat)
new.log.likely <- sum(log(total.density.vec))
prob.mat <- density.mat/total.density.vec
}
return(list(
loglik = new.log.likely))
}
|
7fe5ff931fdf0d50527d29a22e67aa10186ae55b | 2a7862d20fc9a6819e849907bf0e76733ac270ef | /7.Check.clf.test.apcluster.ordered.by.names.R | 5f62ebc9b7329cce8417e9557e2e2ee91a516a28 | [] | no_license | LucasMS/HMA-LMA-SMP2017 | da5090cd82ba2372b505d56232b6ae0ab90d159d | 12fe422eda6244ecc065831a09bf6dfba4178913 | refs/heads/master | 2020-04-07T08:56:21.509665 | 2018-11-23T13:31:44 | 2018-11-23T13:31:44 | 158,233,921 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,189 | r | 7.Check.clf.test.apcluster.ordered.by.names.R | library('pheatmap')
ncol=7#11
add.alpha <- function(col, alpha=1){
if(missing(col))
stop("Please provide a vector of colours.")
apply(sapply(col, col2rgb)/255, 2,
function(x)
rgb(x[1], x[2], x[3], alpha=alpha))
}
cols <-c(add.alpha("dark red", alpha=0.7),"white", add.alpha("dark blue", 0.7))
rampcols <- colorRampPalette(colors = cols, space="Lab")(ncol)
for (i in 1:10){
dat=paste('./Prediction/Prediction', '.tsv', sep=as.character(i))
dat=read.delim(dat, row.names = 1)
data.m=as.matrix(dat)
data.m=data.m[,c(1,3)]
colnames(data.m)
if (i == 1){
d1P=data.frame(data.m[,1])
colnames(d1P)[i]=as.character(i)
d1C=data.frame(data.m[,2])
colnames(d1C)[i]=as.character(i)
}
if (i != 1){
d1P=cbind(d1P, data.m[,1])
colnames(d1P)[i]=as.character(i)
d1C=cbind(d1C,data.m[,2])
colnames(d1C)[i]=as.character(i)
}
}
data.m=data.frame(Phylum=rowMeans(d1P), Class=rowMeans(d1C))
##
data.m=as.matrix(data.m)
# library(mclust)
# Ys = data.frame((data.m))
#
# fit <- Mclust(Ys)
#
# plot.Mclust(fit, "BIC")
#
# fit$G
#
# Y.pca = princomp(Ys)
#
# plot(Y.pca$scores[,1:2], col = fit$classification, pch = 19)
#
#
# require(vegan)
# fit <- cascadeKM(data.m, 1, 10, iter = 1000)
# plot(fit, sortg = TRUE, grpmts.plot = TRUE)
# calinski.best <- as.numeric(which.max(fit$results[2,]))
# cat("Calinski criterion optimal number of clusters:", calinski.best, "\n")
library(apcluster)
#Create distance, The choice is the standard similarity measure used in the papers of Frey and Dueck — negative squared distances.
d=negDistMat(data.m, r=2)
#Create apreresult object. Also, without a q set, the exemplars preferences were set to the median, which is expected to result in a moderate number of clusters in comparison of a small number of clusters that results when the exemplar preferences are set to their minimum (q=0).
apres.d=apcluster(d)
apres.d
#Vizualize clusters
plot(apres.d, data.m)
#Vizualize details. It uses the median rule to determine input preferences (number of clusters)
apres.d=apcluster(d, details=T)
#Check how it was done. Net similrity has to converge and the objective of the alorithm is to inclrease net similarity
plot(apres.d)
#See heatmap with similarity matrix
heatmap(apres.d, d)
#It is a bit tricky to find the best number of clusters. Exemplar based agglomerative clustering (aggExCluster) on affinity propagation results provides an additional tool for finding the right number of clusters. The heatmao plot for affinity propagation results uses aggECcluster internally to arrange clusters ;). So, I will recover the clusters from there.
#Aggregate the clusters
aggres.ad=aggExCluster(d, apres.d)
aggres.ad
pdf('R7prediction.clusters.pdf')
plot(aggres.ad, ylab= 'Negative squared Euclidean distances', main=NA)
dev.off()
aggres.ad@order
#Aggregate the samples
#Now, get the clusters created by affinity propagation and order according to the results of agglomerative clustering (which made an hierarchical cluster of the AP clusters)
nomes <- data.frame(sp=character(),
cluster=numeric(),
stringsAsFactors=FALSE)
for (i in aggres.ad@order){
#for (z in names(apres.d@clusters[[i]])){ #order by cluster
for (z in sort(names(apres.d@clusters[[i]]))){ #order by names
datinha <- data.frame(sp=z,
cluster=i,
stringsAsFactors=FALSE)
nomes=rbind(nomes, datinha)
}
}
annotation_row=data.frame(Cluster=as.character(nomes$cluster))
rownames(annotation_row)=nomes$sp
##Now, use this to plot
data.m=data.m[match(nomes$sp, rownames(data.m)),]
pdf('R7prediction.pdf', width=6, height=15)
par(mfrow=c(1,2))
pheatmap(data.m, color = rampcols, cluster_rows = F, cluster_cols = F, cex=0.9,border_color="white",annotation_row = annotation_row, cellwidth = 10)
plot(aggres.ad, horiz=T, main=NA,showSamples=T, ticks=3, cex.axis=0.7)
dev.off()
#pdf('R7.clusters.pdf', width=3, height=2)
#plot(aggres.ad, main=NA, las=2, cex.axis=0.3, ticks=3)
#dev.off()
write.table(nomes, "R7prediction.ap.clusters.tsv", sep="\t", quote=F, col.names = F, row.names = F)
|
48213941fff672b32c1448b30c22e117a89023a5 | 27f3313a686b12e132502c6781d376f24d278595 | /ARIMA.r | cb02a66f7debe06704ca1fac72247495de89a33c | [
"MIT"
] | permissive | mstrickland256/Basics | d30f0059d208ceec09a29e4b317e952bb9d2515a | 7025e8bc03c40d65e71bcab783d2162be828ace7 | refs/heads/master | 2021-08-23T04:00:03.932561 | 2017-12-03T04:23:15 | 2017-12-03T04:23:15 | 112,898,566 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,484 | r | ARIMA.r | library(stats)
library(forecast)
library(ggplot2)
#Strip data down to a single column, no label.
snap <- read.csv("xxx.csv")
crackle <- findfrequency(snap)
#used to determine the frequency/period for seasonality.
pop <- ts(crackle, frequency=30)
#frequency here marks the period. 12 for months in a year, 4 for quarters in a year, 30 for days in a month. There must be at least two full periods in the data based on the chosen frequency, or there will be an error. Adjust in accord with the preceding "findfrequency" function.
plot(decompose(pop)
#this will create four graphs: 1 of the original data, three decomposed showing the trendline, seasonal swings, and the extracted "noise" from the former.
tick <- auto.arima(crackle, d=NA, D=NA, max.p=5, max.q=5,
max.P=2, max.Q=2, max.order=5, max.d=2, max.D=1,
start.p=2, start.q=2, start.P=1, start.Q=1,
stationary=FALSE, seasonal=TRUE,
ic=c("aicc", "aic", "bic"), stepwise=TRUE, trace=FALSE,
approximation=(length(crackle)>100 | frequency(crackle)>12), xreg=NULL,
test=c("kpss","adf","pp"), seasonal.test=c("ocsb","ch"),
allowdrift=TRUE, allowmean=TRUE, lambda=NULL, biasadj=FALSE,
parallel=FALSE, num.cores=2)
#Auto ARIMA will automatically seek out the best model based on AIC, etc. Note three places here where the arguments need to be edited for the name of the times series variable.
plot(forecast(tick))
|
16b21dd4fc14500d2563119edac0973df4c3246c | 29585dff702209dd446c0ab52ceea046c58e384e | /snn/R/mybnn.R | 12ff723ef4d39cde1cf1b71cedfabec8f8195750 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 781 | r | mybnn.R | mybnn <-
function(train,test,ratio){
#implement Bagged NN with given re-sampling ratio for new tests. test can be matrix or a vector
# Reference: Hall and Samworth (2005)
n = dim(train)[1]
weight = rep(0,n)
for(i in 1:n){
weight[i] = ratio*(1-ratio)^(i-1)/(1-(1-ratio)^n)
}
if(is.vector(test) == TRUE){
if(dim(train)[2] - 1 == 1){
# d = 1 case
test.mat = as.matrix(test)
}else{
# d > 1 case
test.mat = t(as.matrix(test))
}
}else{
test.mat = test
}
if(dim(test.mat)[2] != (dim(train)[2]-1)) stop("training data and test data have different dimensions")
label = apply(test.mat,1,function(x) mywnn(train,x,weight))
return(label)
}
|
8930c3ebc799fd78544a477299bc6a08dae64840 | 948c597c35bafb944d142eecc039885e759eab7c | /101519TA/p42a.R | 33c23aade4051a92ad5897eda5f389bd3e535ab3 | [] | no_license | ding05/time_series | 3e216ee8531f1349412b4e683b7af6f859d77c1d | 1b6b7cddc467564b8601e818378730b5d594c809 | refs/heads/master | 2022-08-19T05:30:18.485788 | 2019-11-01T00:44:12 | 2019-11-01T00:44:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 164 | r | p42a.R | co2_t = co2[7: 462]
co2_t = co2_t - ts1 - ts2
plot(co2_t, main = 'Residuals after Removing the Estimated Trend
and Seasonal Components', ylab = 'Residuals') |
e8d7bb35fa6a5e3062f9b1ad2ab6502b672f32be | 8d024367b4c01d59f965e4db7249328e686680f2 | /R/Storm.data.R | 73c5bee2def434f52a834c9ccea070b281df257e | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | nicholasjclark/BBS.occurrences | 78b972101488d3f365f03a5f0756d1c0e1216f2d | e6dc0eb6ec73b9f6b458a64990a9b249c1dddaaf | refs/heads/master | 2021-05-25T11:10:32.808761 | 2020-07-14T07:02:52 | 2020-07-14T07:02:52 | 127,217,351 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 660 | r | Storm.data.R | #' Descriptions and locations of damaging storms in the USA
#'
#' A dataset on storm events was accessed from NOAA and processed using regex
#' to categorise storms into categories. The entire workflow is shown in the \code{Appendix 6} script.
#' @format A \code{data.frame} containing 15 variables describing
#' location, severity and timing of the top third most damaging weather events recorded in the
#' NOAA storms database. Variables
#' include property damage, crop damage and total damage (all in US dollars), Year, Month,
#' Storm.type and geographrical coordinates
#'
#' @docType data
#' @usage data(Storm.data)
#' @keywords datasets
#'
"Storm.data"
|
1bdfbcfc83b3b58a965ded1ca1cb0a6442b1ea95 | 2ff4101baa9f59309035bc319d3582e776e33633 | /uvoz/iso_kratice.R | 5ba11c9c751f0caf08ba45cc5254816d43287707 | [
"MIT"
] | permissive | Charisa/APPR-2015-16 | deba4c05a361e60dbcdf5630ad57fe42f008d5fd | 04256b447f9cdcc1cfe97391c006c6a34a4a1a05 | refs/heads/master | 2021-01-18T18:14:07.588532 | 2016-03-01T22:51:45 | 2016-03-01T22:51:45 | 45,528,619 | 0 | 0 | null | 2015-11-04T09:30:32 | 2015-11-04T09:30:32 | null | UTF-8 | R | false | false | 825 | r | iso_kratice.R | # Uvožena tabela z ISO kraticami.
ISO <- read.csv("https://raw.githubusercontent.com/umpirsky/country-list/master/data/en_US/country.csv", encoding = "UTF-8")
colnames(ISO)[2] <- "Country" # Preimenujemo stolpec "value" v
colnames(ISO)[1] <- "CODE" # v "Country" in "id" v "CODE".
write.csv2(ISO, 'podatki/ISO.csv', fileEncoding = "UTF-8", row.names = FALSE)
# ISO kratice s 'tremi črkami'.
iso <- read.csv("https://raw.githubusercontent.com/plotly/datasets/master/2014_world_gdp_with_codes.csv", encoding = "UTF-8")
iso <- iso[c(1,3)]
colnames(iso)[1] <- "Country" # Preimenujemo stolpec "value" v
colnames(iso)[2] <- "CODE"
write.csv2(iso, 'podatki/iso.csv', fileEncoding = "UTF-8", row.names = FALSE) |
e3a2713feb7736f7379c4b324ac22957393b49c2 | 79373c11ae265c87f917203fecdd974b1eca95a7 | /man/immgenAnno.Rd | 0c4c18b4a30470e744d068c35fabbb40a8f28061 | [] | no_license | vjcitn/pcmp | 274c8311db39894698cbfbf16604c42674212215 | 9ce0c6647f2811b6a9ae1223082ac294865d487d | refs/heads/master | 2020-04-02T05:16:50.097591 | 2019-01-28T15:25:14 | 2019-01-28T15:25:14 | 154,063,364 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 418 | rd | immgenAnno.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{immgenAnno}
\alias{immgenAnno}
\title{annotation of immgen mouse immune cell resource}
\format{data.frame}
\source{
\url{https://gist.github.com/nachocab/3d9f374e0ade031c475a}
}
\usage{
immgenAnno
}
\description{
annotation of immgen mouse immune cell resource
}
\examples{
head(immgenAnno)
}
\keyword{datasets}
|
b5a5154ec0dabc98dfe7c26433d8462527d3bab1 | 5a5bc9e1b0d59859b4e213b092e19afe232819e1 | /R/test.dates.R | 231f9d573d7642d54992bd3a6ae47045ec782a3c | [] | no_license | jrmosedale/microclimates | bf469e07b688e9342c0a8d767db84ee428e778f3 | ae2e61969631506c523bd618c9106a61b00355dd | refs/heads/master | 2021-04-30T15:18:19.091728 | 2018-02-12T11:31:16 | 2018-02-12T11:31:16 | 121,236,443 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,802 | r | test.dates.R |
# Create arrays of two years including a leap year for testing JD functions etc
days<-c(1:31,1:29,1:31,1:30,1:31,1:30,1:31,1:31,1:30,1:31,1:30,1:31,1:31,1:28,1:31,1:30,1:31,1:30,1:31,1:31,1:30,1:31,1:30,1:31)
months<-c(rep(1,31),rep(2,29),rep(3,31),rep(4,30),rep(5,31),rep(6,30),rep(7,31),rep(8,31),rep(9,30),rep(10,31),rep(11,30),rep(12,31),rep(1,31),rep(2,28),rep(3,31),rep(4,30),rep(5,31),rep(6,30),rep(7,31),rep(8,31),rep(9,30),rep(10,31),rep(11,30),rep(12,31))
years<-c(rep(1980,366),rep(1981,365))
length(days)==length(months)
length(months)==length(years)
# Functions to be tested
JDdoy<-function(DOY,year)
{
month<-ifelse(DOY==365,12,floor(DOY*12/365)+1)
day<-DOY%%(365/12)
a<-(14-month)/12
y<-year+4800-a
m<-month+12*a-3
JDN<-day+(153*m+2)/5+365*y+y/4-y/100+y/400-32045
JDN<-JDN-1.5
JDN
}
JD<-function(day,month,year){
a<-(14-month)/12
y<-year+4800-a
m<-month+12*a-3
JDN<-as.integer(floor((153*m+2)/5) + 365*y + floor(y/4) - floor(y/100) + floor(y/400) - 32045 + day)
JDN
}
JDres<-JD(days,months,years)
JDdoyres<-c(JDdoy(1:366,rep(1980,366)),JDdoy(1:365,rep(1981,365)))
# From http://stackoverflow.com/questions/27757994/julian-dates-in-r-chron-versus-us-naval-observatory
a <- floor((14 - months) / 12)
y <- years + 4800 - a
m <- months + 12 * a - 3
julian2 <- days + floor((153*m + 2)/5) + 365*y + floor(y/4) - 32083
# Test chron package - doesn't work for negative origins
library("chron")
options(chron.origin = c(month=1, day=1, year= ))
julres <- julian(months,days,years)
# Corrected functions
# Functions to be tested
JDdoy<-function(DOY,year)
{
month<-ifelse(DOY>=365,12,floor(DOY*12/365)+1)
day<-DOY%%(365/12)
a<-(14-month)/12
y<-year+4800-a
m<-month+12*a-3
JDN<-day+(153*m+2)/5+365*y+y/4-y/100+y/400-32045
JDN<-JDN-1.5
JDN
}
|
abf1536944e3d44289731e66b1bccd4cb6b0a461 | 140ea3cff1547e58e831d929992621344449c126 | /man/imaks.Rd | 3a9f6d52cc1c7948dd7ecefee33003f0851debeb | [] | no_license | cran/hdeco | 59dbd54e7b428c3636bdb1b085c6bc3d964a59b7 | 527f9fda0e104d3aca08e719414998e1dfcc5384 | refs/heads/master | 2021-01-19T07:47:26.585594 | 2009-02-25T00:00:00 | 2009-02-25T00:00:00 | 17,672,567 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 979 | rd | imaks.Rd | \name{imaks}
\alias{imaks}
\title{Categorical map drawing tool with set colour scheme}
\description{A tool for drawing categorical data (matrices) in their proper orientation and with a set colour palette. This is especially useful for drawing multiple images that requrie identical palettes even though some classes may not exist on all images.}
\usage{imaks(BE = demoimage1, ncolours=NULL, LENG=4)}
\arguments{
\item{BE}{Matrix: this is the input image that may or may not have an attribute cim that contains the title text.}
\item{ncolours}{Integer: the total number of colours in the image. This can be automatically determined if set to NULL.}
\item{LENG}{integer: used to control labelling.}
}
\value{The result is a graphic with title read from attribute 'cim' if it exists.specified colour palette. This function is suitable for use with continuous surfaces.}
\author{Sandor Kabos and Tarmo Remmel}
\examples{
data(demoimage1)
imaks(demoimage1)}
\keyword{aplot}
|
537caa6dd708bb17af96db9c9cf73b4181835ecd | f518e9ef5191d05805e272d1ddf6b6add056022f | /code/build-jupyter-notebook.R | 79e516f0e21facdb32f4e95f7fd1e2d132787339 | [] | no_license | MingHacker/leetcode-cookbook | 186a7ceee8348e3d664ab8ed20a25d7c19502d18 | 272e08e8b362b3c4853c048fb53412f2db95b9b0 | refs/heads/master | 2023-07-31T15:57:34.422059 | 2021-09-16T14:39:26 | 2021-09-16T14:39:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 223 | r | build-jupyter-notebook.R | library(tidyverse)
# read_lines("notebook.md") %>%
# str_replace("^## \\d+\\. ", "### ") %>%
# write_lines("notebook.md")
# read_lines("Ex50.md") %>%
# str_replace("^## ", "# ") %>%
# write_lines("Ex50.md")
|
846a790eea58a4605401eae90873fc03ccbab57b | aa08000402f12817a0bba300e654c6e2c6136404 | /man/isotopic_information.Rd | 20dd85c092d27c50c3dae80787c26b201815f773 | [] | no_license | judechang/IsotopicLabelling | c6e413db5a61819233885fb0d6c5d7aa4291d9da | ddd5803d8d3902ee8c99bb49324bfeabf8930008 | refs/heads/master | 2021-12-10T05:12:18.607497 | 2016-07-22T09:30:28 | 2016-07-22T09:30:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,397 | rd | isotopic_information.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isotopic_information.R
\name{isotopic_information}
\alias{isotopic_information}
\title{Get useful isotopic information}
\usage{
isotopic_information(compound, charge = 1, labelling)
}
\arguments{
\item{compound}{Character vector specifying the chemical formula of the compound of interest,
with X being the element with unknown isotopic distribution (to be fitted)}
\item{charge}{Natural number, denoting the charge state of the target adduct (1,2,3,...). If not provided, it is 1 by default}
\item{labelling}{Character, either "H" or "C", specifying the labelling element}
}
\value{
A list with the following elements:
\item{compound}{The same as input}
\item{target}{Named vector with the exact masses of all the possible isotopologues
arising from the labelling isotope.
M+0 is the monoisotopic mass (sum of the masses of the atoms using the lightest isotope for each element, X included);
in M+1 one light isotope is replaced by its heaviest counterpart, and so forth}
\item{isotopes}{Table containing the natural isotopic abundances of the elements present in compound (numbers between 0 and 1).
The two isotopes of element X are given NA value}
\item{nX}{The number of X atoms. In other words, the number of atoms with unknown isotopic distribution}
\item{nTOT}{The total number of atoms of the labelling element (either H+X or C+X)}
}
\description{
This function gathers essential isotopic information required by the other functions of the
\code{\link{IsotopicLabelling}} package.
}
\details{
The specified compound is not the neutral molecular species of interest,
but the adduct observed by ESI-MS (such as protonated or sodiated species).
In the chemical formula, the element with unknown abundance should be denoted by X.
For example, the proton adduct of TAG 52:2, C55H103O6, should be written X55H103O6 for
^13C labelling experiments, and C55X102HO6 for ^2H labelling experiments.
Note that in this last case only 102 hydrogen atoms have unknown isotopic distribution,
since the one giving rise to the adduct comes from the solvent,
and is considered to have fixed natural abundance.
}
\examples{
info <- isotopic_information(compound="X40H77NO8P", charge=1, labelling="C")
# This is the case for [PC 32:2+H]+ in a ^13C-labelling experiment
}
\author{
Ruggero Ferrazza
}
\keyword{manip}
|
017db50d21bcfd6cca604a028aac7e8ac32f764f | 9995a101f4ed015258c3b2dc2044e7ff31e321e0 | /src/conditions.R | 544a6606aec675dad30a365a9e07e6e9e45a017c | [] | no_license | tvsiddhu/R_Code | fb1b9a3546f92af3f902e8bddfc181d6882a3340 | dcc7153cba94b67ec6baacbaa007c4409a3e6977 | refs/heads/master | 2022-11-11T12:07:45.138395 | 2020-07-06T07:21:00 | 2020-07-06T07:21:00 | 272,169,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 560 | r | conditions.R | # Title : conditional statement execution
# Objective : learning if and other conditional statements
# Created by: sid
# Created on: 14/Jun/20
#resetting the variable value
rm(random_variable)
#creating the random variable
random_variable <- rnorm(1)
if (random_variable < 0) {
print(random_variable)
print("<<-- this variable is less than zero")
} else if (random_variable > 0) {
print(random_variable)
print("<<-- this variable is greater than zero")
} else {
print(random_variable)
print("<<-- this variable is equal than zero")
}
|
8aff68d200624a2e4efb9bcbd0f17cefeaf3d92b | 2db887f51a48018a48ce823cc6a9aa6dac44a439 | /tests/testthat.R | a510ba8fa6ce858f258d1c97b7a7f3990922e950 | [
"MIT"
] | permissive | bertcarnell/impact | 99c682d749e86ae1e7433aeeeff28a725e792893 | 55d501afeee24a87c3061391f207713eb3249f57 | refs/heads/master | 2020-05-09T18:57:00.188174 | 2019-09-03T17:01:32 | 2019-09-03T17:01:32 | 181,361,147 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 56 | r | testthat.R | library(testthat)
library(impact)
test_check("impact")
|
4182a12de05d6aa2f7c0bf11d82581ffb4d43328 | b2220ea82036616e44d6fcea4d0a4b401c729f1f | /plot3.R | 06bc19bdff9fe7b155fd0b557e0b626d3a5b1693 | [] | no_license | EngSarahHassan/ExData_Plotting1 | 858e21a3fab102193ddfc97091c06031e313c04d | 1b2b741d58a0f925ec4727b317570d2aad056c73 | refs/heads/master | 2022-11-06T12:48:19.981536 | 2020-06-21T05:27:02 | 2020-06-21T05:27:02 | 273,829,126 | 0 | 0 | null | 2020-06-21T03:45:49 | 2020-06-21T03:45:48 | null | UTF-8 | R | false | false | 1,023 | r | plot3.R | #Load the data
electricPowerConsumptionDF<-read.csv("data/household_power_consumption.txt",header = TRUE,sep = ";",na.strings = "?")
electricPowerConsumptionDF$Date<-as.Date(electricPowerConsumptionDF$Date,format="%d/%m/%Y")
electricPowerConsumptionDF<-subset(electricPowerConsumptionDF, Date==as.Date("2007-02-02")|Date==as.Date("2007-02-01"))
#Plot
dtime<-strptime(paste(electricPowerConsumptionDF$Date,electricPowerConsumptionDF$Time),"%Y-%m-%d %H:%M:%S")
png(filename = "plot3.png",width = 480,height = 480,bg = "transparent")
plot(x=dtime,y=electricPowerConsumptionDF$Sub_metering_1,type="n",xlab="",ylab = "Energy sub metering")
points(dtime,electricPowerConsumptionDF$Sub_metering_1,col="black",type="l")
points(dtime,electricPowerConsumptionDF$Sub_metering_2,col="red",type="l")
points(dtime,electricPowerConsumptionDF$Sub_metering_3,col="blue",type="l")
legend("topright",col=c("black","red","blue"),legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty = c(1, 1, 1))
dev.off() |
86ce0bc596f2129291c0ba265b09fadaee0b0c68 | cd94ae361315380160c53aba76e55bad57c1ccdb | /man/theme_ojo.Rd | af6634e1c82b3e4c646c1c2f0462c6c90e925e90 | [] | no_license | rcgentzler/ojodb | c931836ff88b8ece481143c0752c16149d9851c1 | 7ba3700458023c8d8e39f6f6194692c277072df1 | refs/heads/master | 2023-01-20T20:48:12.975911 | 2020-11-20T19:48:53 | 2020-11-20T19:48:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 463 | rd | theme_ojo.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theme_ojo.R
\name{theme_ojo}
\alias{theme_ojo}
\title{Style a ggplot in the OJO style}
\usage{
theme_ojo()
}
\description{
Add OJO styling to a ggplot
}
\examples{
\dontrun{
ggplot(ojo_example, aes(file_year, n_cases, color = court)) +
geom_line(size = 1.5) +
theme_ojo() +
ojo_colors() +
scale_x_continuous(breaks = 2010:2019,
limits = c(NA, 2019))
}
}
|
b30abd69e91a4fce9107c328631b3bf00d60c998 | 7705703255e840bda0e0b24d1f9fb64b00bc4bab | /Allard _ Spatial Analysis .R | 8d3b897273126b6e7fb25fe3798a95db1a17b1a5 | [
"MIT"
] | permissive | gallard28/SouthernSettlers | bdb782d9be9e81a15854c0bf6954effec70e5aa4 | 148055752a23eabe1a41187c82db56a0b828c4fd | refs/heads/master | 2020-03-20T15:39:39.885234 | 2018-11-06T13:48:48 | 2018-11-06T13:48:48 | 137,518,331 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 867 | r | Allard _ Spatial Analysis .R | ####Title: Spatial Analysis
####Author: Grant A. Allard and Marcos Segantini
####Date:
#Libraries
library(dplyr)
library(stringr)
library(readr)
require(tidyr)
require(ggplot2)
library(leaflet)
#Load Data
load('inventor_df.RData')
#subset data for NZ and UY
inventor_UY_NZ_df<-inventor_df[inventor_df$country=="UY" | inventor_df$country=="NZ",]
names(inventor_UY_NZ_df)
head(inventor_df[inventor_df$country=="NZ",])
#Visualize data ###
#Count of inventors per country
country_table<-inventor_UY_NZ_df %>%
group_by(country) %>%
count()
country_table
#Set palette
pal<- colorFactor(c("white","blue"), domain= c("NZ", "UY"))
#Sample Map
map<-leaflet(inventor_UY_NZ_df) %>%
addProviderTiles(providers$CartoDB.DarkMatter) %>%
addCircleMarkers(lng= ~longitude, lat= ~latitude, color= ~pal(country), stroke=FALSE, fillOpacity = 0.5)
map
|
2565f8574b9bb3ec5ec0c5849f494576cbfdce9d | 033c440e58943c1b4092eb30398ed50c839093f2 | /man/select_genes_with_l2fc.Rd | ca3e8e13c1f33afba1ba27b21214dc7c01e11a00 | [
"MIT"
] | permissive | Ylefol/TimeSeriesAnalysis | 462c9e0cf6f430ca2d92182a613e71768739453e | 2a612f23d451ec90dac354fd11a13b69ea680370 | refs/heads/master | 2023-07-06T10:05:29.277659 | 2023-06-29T07:50:57 | 2023-06-29T07:50:57 | 522,526,978 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 763 | rd | select_genes_with_l2fc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/differential_expression.R
\name{select_genes_with_l2fc}
\alias{select_genes_with_l2fc}
\title{Select genes based on L2FC/FC}
\usage{
select_genes_with_l2fc(time_object, custom_l2fc_thresh = NULL)
}
\arguments{
\item{time_object}{A timeseries object}
\item{custom_l2fc_thresh}{A value indicating the log2FoldChange threshold, can be NULL}
}
\value{
A vector of significant genes whose absolute log2FoldChange is greater
than the inputed custom threhsold.
}
\description{
Function which goes through all differential gene expression experiments, both
conditional and temporal, and extracts all significant genes that have a
absolute log2FoldChange greater than the custom_l2fc_thresh
}
|
0ab3a9349c5c39ffd2ebddc35363e126c7c6df81 | b253ea9ffd151dbfe884a01592d910d8244b3984 | /Scripts/clean_data/get_controls.R | 3581bf8d3ecfbdcb0e3d7dbea1331621db309494 | [] | no_license | benmbrew/LFS | 4d0bb1b6df268813b69c298cc310f1f3fc65a9b6 | 1384fc1e5a32e15853bfbbf3df62999fbe1b97de | refs/heads/master | 2021-03-12T19:08:05.281863 | 2019-10-03T15:09:48 | 2019-10-03T15:09:48 | 47,150,327 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,841 | r | get_controls.R | ########################################
# this script will read in and preprocess idat for controls
##########
# load libraries
##########
library(tidyverse)
library(data.table)
library(GenomicRanges)
library(biovizBase)
library(GEOquery)
library(IlluminaHumanMethylation450kmanifest)
library(preprocessCore)
library(IlluminaHumanMethylationEPICanno.ilm10b2.hg19)
##########
# initialize folders
##########
home_folder <- '/home/benbrew/hpf/largeprojects/agoldenb/ben/Projects'
project_folder <- paste0(home_folder, '/LFS')
data_folder <- paste0(project_folder, '/Data')
methyl_data <- paste0(data_folder, '/methyl_data')
clin_data <- paste0(data_folder, '/clin_data')
idat_data <- paste0(methyl_data, '/controls')
model_data <- paste0(data_folder, '/model_data')
##########
# source all_functions.R script
##########
source(paste0(project_folder, '/Scripts/predict_age/all_functions.R'))
##########
# fixed variables
##########
method = 'noob'
##########
# read in clinical data
##########
clin <- read.csv(paste0(clin_data, '/clinical_two.csv'), stringsAsFactors = F)
# clean clinical idss
clin$ids <- gsub('A|B|_|-', '', clin$blood_dna_malkin_lab_)
##########
# Controls batch1
##########
id_map <- read.csv(paste0(methyl_data, '/ids_map_controls.csv'), stringsAsFactors = F)
##########
# clean idmap
##########
id_map <- cleanIdMap(id_map)
##########
# read in idate for Controls, controls, and validation set
##########
rgControls <- read.metharray.exp(idat_data)
###########
# remove outliers (previously determined) from rgset before normalization
###########
#
# rgControls <- remove_outliers(rgSet = rgControls,
# id_map = id_map,
# method = 'funnorm',
# type = 'controls')
# ##########
# get preprocedssing method
##########
betaControls <- preprocessMethod(rgControls, preprocess = method, only_m_values = T)
rm(rgControls)
###########
# id functions
###########
# Controls
betaControls <- findIds(betaControls, id_map = id_map)
# get id name (only Controls)
betaControls <- getIdName(betaControls)
# clean ids
betaControls <- cleanIds(betaControls)
# remove 'ch' from column names
betaControls <- betaControls[, !grepl('ch', colnames(betaControls))]
##########
# join data
##########
cg_sites <- readRDS(paste0(model_data, '/four_fifty_feats.rda'))
intersect_cg_cites <- intersect(cg_sites, colnames(betaControls))
# subset data by colmns of interest and cg_sites
betaControls <- betaControls[, c('ids',
'sentrix_id',
intersect_cg_cites)]
# inner join
betaControls <- inner_join(clin, betaControls, by = 'ids')
# remove NAs from tm_donor
betaControls <- betaControls[!is.na(betaControls$tm_donor_),]
# remove duplicates
betaControls <- betaControls[!duplicated(betaControls$tm_donor_),]
# get cg_sites
cg_sites <- colnames(betaControls)[grepl('cg', colnames(betaControls))]
# saveRDS(cg_sites, paste0(model_data, '/four_fifty_feats.rda'))
# subset data by colmns of interest and cg_sites
betaControls <- betaControls[, c('ids',
'p53_germline',
'cancer_diagnosis_diagnoses',
'age_diagnosis',
'age_sample_collection',
'gender',
'sentrix_id',
'family_name',
cg_sites)]
##########
# remove outliers
##########
betaControls <- removeOutlier(betaControls,
cases = F,
controls = T,
val = F)
##########
# saved unscaled data
##########
saveRDS(betaControls, paste0(model_data, paste0('/', method, '_', 'beta_controls_m.rda')))
|
dfbd78b8bdf688919e8c5b01c2510f02e9cb2117 | cd09ef10f84c7451b9ed6eedb0f754d06c1fd394 | /Image_segmentaion Rcode.R | b1a9d3af250534961420da8516532fdbb765d943 | [] | no_license | AVJdataminer/RFNC | a5262b259b514640ad61c88b053b3ee91888b6e9 | 12ca5e3a6edb6d1361f8e674bcb638e5d343ec8d | refs/heads/master | 2021-01-20T22:02:14.882718 | 2019-05-17T16:06:10 | 2019-05-17T16:06:10 | 101,795,957 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 381 | r | Image_segmentaion Rcode.R | ##build an image classification for cars
#source("http://bioconductor.org/biocLite.R")
#biocLite("EBImage")
require(EBImage)
setwd("C:/Users/")
imgcol = readImage("p90081936.jpg")
display(imgcol)
logo_label = watershed(imgcol)
display( colorLabels(logo_label) )
require(shiny)
setwd("C:/Users/cra987/Dropbox/Data Projects/Vitamin D and Sunny time")
runApp('Vit_D')
|
0a0ce4827f48c97c1374fde2746e18b93f0c0e78 | 2d6f2dd52bfe260517aa56531a9523715232f451 | /R/rlmer.R | fad90092f26e40f7459cced0da1c5fd684fae1a0 | [] | no_license | kollerma/robustlmm | e0c795c47281d5a98022d49ba1aa61d1ad85bcb3 | 7f7f38c95a557a7f05b835c666c224838174cf99 | refs/heads/master | 2023-04-06T11:40:31.984963 | 2023-03-24T21:07:49 | 2023-03-24T21:07:49 | 8,647,011 | 23 | 10 | null | 2021-05-24T10:30:02 | 2013-03-08T08:38:54 | R | UTF-8 | R | false | false | 31,966 | r | rlmer.R | ##' Robust estimation of linear mixed effects models, for hierarchical nested
##' and non-nested, e.g., crossed, datasets.
##'
##' \describe{ \item{Overview:}{
##'
##' This function implements the Robust Scoring Equations estimator for linear
##' mixed effect models. It can be used much like the function
##' \code{\link[lme4]{lmer}} in the package \code{lme4}. The supported models
##' are the same as for \code{\link[lme4]{lmer}} (gaussian family only). The
##' robust approach used is based on the robustification of the scoring
##' equations and an application of the Design Adaptive Scale approach.
##'
##' Example analyses and theoretical details on the method are available in the
##' vignette (see \code{vignette("rlmer")}).
##'
##' Models are specified using the \code{formula} argument, using the same
##' syntax as for \code{\link[lme4]{lmer}}. Additionally, one also needs to
##' specify what robust scoring or weight functions are to be used (arguments
##' starting with \code{rho.}). By default a smoothed version of the Huber
##' function is used. Furthermore, the \code{method} argument can be used to
##' speed up computations at the expense of accuracy of the results. }
##'
##' \item{Computation methods:}{
##'
##' Currently, there are two different methods available for fitting models.
##' They only differ in how the consistency factors for the Design Adaptive
##' Scale estimates are computed. Available fitting methods for theta and
##' sigma.e: \itemize{
##'
##' \item \code{DAStau} (default): For this method, the consistency factors are
##' computed using numerical quadrature. This is slower but yields more accurate
##' results. This is the direct analogue to the DAS-estimate in robust linear
##' regression.
##'
##' \item \code{DASvar}: This method computes the consistency factors using a
##' direct approximation which is faster but less accurate. For complex models
##' with correlated random effects with more than one correlation term, this is
##' the only method available.
##'
##' } }
##'
##' \item{Weight functions:}{
##'
##' The tuning parameters of the weight functions \dQuote{rho} can be used to
##' adjust robustness and efficiency of the resulting estimates (arguments
##' \code{rho.e}, \code{rho.b}, \code{rho.sigma.e} and \code{rho.sigma.b}).
##' Better robustness will lead to a decrease of the efficiency. With the default
##' setting, \code{setting = "RSEn"}, the tuning parameters are set to yield
##' estimates with approximately 95\% efficiency for the fixed effects. The
##' variance components are estimated with a lower efficiency but better
##' robustness properties.
##'
##' One has to use different weight functions and tuning parameters for simple
##' variance components and for such including correlation parameters. By
##' default, they are chosen appropriately to the model at hand. However, when
##' using the \code{rho.sigma.e} and \code{rho.sigma.b} arguments, it is up to
##' the user to specify the appropriate function. See
##' \code{\link{asymptoticEfficiency}} for methods to find tuning parameters
##' that yield a given asymptotic efficiency. \itemize{
##'
##' \item For simple variance components and the residual error scale use the
##' function \code{\link{psi2propII}} to change the tuning parameters. This is
##' similar to Proposal 2 in the location-scale problem (i.e., using the
##' squared robustness weights of the location estimate for the scale estimate;
##' otherwise the scale estimate is not robust).
##'
##' \item For multi-dimensional blocks of random effects modeled, e.g.,
##' a model with correlated random intercept and slope, (referred to as
##' block diagonal case below), use the \code{\link{chgDefaults}} function to
##' change the tuning parameters. The parameter estimation problem is
##' multivariate, unlike the case without correlation where the problem was
##' univariate. For the employed estimator, this amounts to switching from
##' simple scale estimates to estimating correlation matrices. Therefore
##' different weight functions have to be used. Squaring of the weights (using
##' the function \code{\link{psi2propII}}) is no longer necessary. To yield
##' estimates with the same efficiency, the tuning parameters for the
##' block diagonal are larger than for the simple case. Tables of tuning parameters
##' are given in Table 2 and 3 of the vignette (\code{vignette("rlmer")}).
##'
##' } }
##'
##' \item{Recommended tuning parameters:}{
##'
##' For a more robust estimate, use \code{setting = "RSEn"} (the default). For
##' higher efficiency, use \code{setting = "RSEa"}. The settings described in
##' the following paragraph are used when \code{setting = "RSEa"} is specified.
##'
##' For the smoothed Huber function the tuning parameters to get approximately
##' 95\% efficiency are \eqn{k=1.345}{k=1.345} for \code{rho.e} and
##' \eqn{k=2.28}{k=2.28} for \code{rho.sigma.e} (using the squared version). For
##' simple variance components, the same can be used for \code{rho.b} and
##' \code{rho.sigma.b}. For variance components including correlation
##' parameters, use \eqn{k=5.14}{k=5.14} for both \code{rho.b} and
##' \code{rho.sigma.b}. Tables of tuning parameter are given in Table 2 and 3 of
##' the vignette (\code{vignette("rlmer")}). }
##'
##' \item{Specifying (multiple) weight functions:}{
##'
##' If custom weight functions are specified using the argument \code{rho.b}
##' (\code{rho.e}) but the argument \code{rho.sigma.b} (\code{rho.sigma.e}) is
##' missing, then the squared weights are used for simple variance components
##' and the regular weights are used for variance components including
##' correlation parameters. The same tuning parameters will be used when
##' \code{setting = "RSEn"} is used. To get
##' higher efficiency either use \code{setting = "RSEa"} (and only set arguments
##' \code{rho.e} and \code{rho.b}). Or specify the tuning parameters by hand
##' using the \code{\link{psi2propII}} and \code{\link{chgDefaults}} functions.
##'
##' To specify separate weight functions \code{rho.b} and \code{rho.sigma.b} for
##' different variance components, it is possible to pass a list instead of a
##' psi_func object. The list entries correspond to the groups as shown by
##' \code{VarCorr(.)} when applied to the model fitted with \code{lmer}. A set
##' of correlated random effects count as just one group. }
##'
##' \item{\code{lmerNoFit}:}{
##'
##' The \code{lmerNoFit} function can be used to get trivial starting values.
##' This is mainly used to verify the algorithms to reproduce the fit by
##' \code{\link{lmer}} when starting from trivial initial values. } }
##'
##' @title Robust Scoring Equations Estimator for Linear Mixed Models
##' @param formula a two-sided linear formula object describing the
##' fixed-effects part of the model, with the response on the left of a
##' \code{~} operator and the terms, separated by \code{+} operators, on the
##' right. The vertical bar character \code{"|"} separates an expression for
##' a model matrix and a grouping factor.
##' @param data an optional data frame containing the variables named in
##' \code{formula}. By default the variables are taken from the environment
##' from which \code{lmer} is called.
##' @param ... Additional parameters passed to lmer to find the initial
##' estimates. See \code{\link[lme4]{lmer}}.
##' @param method method to be used for estimation of theta and sigma, see
##' Details.
##' @param setting a string specifying suggested choices for the arguments
##' \code{rho.e}, \code{rho.sigma.e}, \code{rho.b} and \code{rho.sigma.b}.
##' Use \code{"RSEn"} (the default) or \code{"RSEa"}. Both use
##' \code{\link{smoothPsi}} for all the \dQuote{rho} arguments. For
##' \code{rho.sigma.e}, squared robustness weights are used (see
##' \code{\link{psi2propII}}). \code{"RSEn"} uses the same tuning parameter as
##' for \code{rho.e}, which leads to higher robustness but lower efficiency.
##' \code{"RSEa"} adjusts the tuning parameter for higher asymptotic efficiency
##' which results in lower robustness (\code{k = 2.28} for default \code{rho.e}).
##' For diagonal random effects covariance matrices, \code{rho.sigma.b} is
##' treated exactly as \code{rho.sigma.e}. For block diagonal random effects
##' covariance matrices (with correlation terms), regular robustness weights
##' are used for \code{rho.sigma.b}, not squared ones, as they're not needed.
##' But the tuning parameters are adjusted for both \code{rho.b} and
##' \code{rho.sigma.b} according to the dimensions of the blocks (for both
##' \code{"RSEn"} or \code{"RSEa"}). For a block of dimension 2 (e.g.,
##' correlated random intercept and slope) \code{k = 5.14} is used.
##' @param rho.e object of class psi_func, specifying the functions to use for
##' the huberization of the residuals.
##' @param rho.b object of class psi_func or list of such objects (see Details),
##' specifying the functions to use for the huberization of the random
##' effects.
##' @param rho.sigma.e object of class psi_func, specifying the weight functions
##' to use for the huberization of the residuals when estimating the variance
##' components, use the \code{\link{psi2propII}} function to specify squared
##' weights and custom tuning parameters.
##' @param rho.sigma.b (optional) object of class psi_func or list of such
##' objects, specifying the weight functions to use for the huberization of
##' the random effects when estimating the variance components (see Details).
##' Use \code{\link{psi2propII}} to specify squared weights and custom tuning
##' parameters or \code{\link{chgDefaults}} for regular weights for variance
##' components including correlation parameters.
##' @param rel.tol relative tolerance used as criteria in the fitting process.
##' @param max.iter maximum number of iterations allowed.
##' @param verbose verbosity of output. Ranges from 0 (none) to 3 (a lot of
##' output)
##' @param doFit logical scalar. When \code{doFit = FALSE} the model is not fit
##' but instead a structure with the model matrices for the random-effects
##' terms is returned (used to speed up tests). When \code{doFit = TRUE}, the
##' default, the model is fit immediately.
##' @param init optional lmerMod- or rlmerMod-object to use for starting values,
##' a list with elements \sQuote{fixef}, \sQuote{u}, \sQuote{sigma},
##' \sQuote{theta}, or a function producing an lmerMod object.
##' @return object of class rlmerMod.
##' @seealso \code{\link[lme4]{lmer}}, \code{vignette("rlmer")}
##' @author Manuel Koller, with thanks to Vanda Lourenço for improvements.
##' @keywords models
##' @examples
##' ## dropping of VC
##' system.time(print(rlmer(Yield ~ (1|Batch), Dyestuff2, method="DASvar")))
##'
##' \dontrun{
##' ## Default method "DAStau"
##' system.time(rfm.DAStau <- rlmer(Yield ~ (1|Batch), Dyestuff))
##' summary(rfm.DAStau)
##' ## DASvar method (faster, less accurate)
##' system.time(rfm.DASvar <- rlmer(Yield ~ (1|Batch), Dyestuff,
##' method="DASvar"))
##' ## compare the two
##' compare(rfm.DAStau, rfm.DASvar)
##'
##' ## Fit variance components with higher efficiency
##' ## psi2propII yields squared weights to get robust estimates
##' ## this is the same as using rlmer's argument `setting = "RSEa"`
##' rlmer(diameter ~ 1 + (1|plate) + (1|sample), Penicillin,
##' rho.sigma.e = psi2propII(smoothPsi, k = 2.28),
##' rho.sigma.b = psi2propII(smoothPsi, k = 2.28))
##'
##' ## use chgDefaults for variance components including
##' ## correlation terms (regular, non squared weights suffice)
##' ## this is the same as using rlmer's argument `setting = "RSEa"`
##' rlmer(Reaction ~ Days + (Days|Subject), sleepstudy,
##' rho.sigma.e = psi2propII(smoothPsi, k = 2.28),
##' rho.b = chgDefaults(smoothPsi, k = 5.14, s=10),
##' rho.sigma.b = chgDefaults(smoothPsi, k = 5.14, s=10))
##' }
##'
##' @importFrom lme4 lmer
##' @importFrom stats getCall
##' @export
rlmer <- function(formula, data, ..., method = c("DAStau", "DASvar"),
setting, rho.e, rho.b, rho.sigma.e, rho.sigma.b,
rel.tol = 1e-8, max.iter = 40 * (r + 1)^2, verbose = 0,
doFit = TRUE, init)
{
lcall <- match.call()
pf <- parent.frame()
method <- match.arg(method)
lobj <- .rlmerInit(lcall, pf, formula, data, method, rho.e, rho.b, rho.sigma.e,
rho.sigma.b, rel.tol, max.iter, verbose, init, setting, ...)$obj
lobj@pp <- as(lobj@pp, "rlmerPredD_DAS")
lobj@pp$method <- lobj@method
lobj@pp$initRho(lobj)
lobj@pp$initMatrices(lobj)
## required for max.iter:
r <- len(lobj, "theta")
return(.rlmer(lobj, rel.tol, max.iter, verbose, doFit))
}
.rlmerInit <- function(lcall, pf, formula, data, method, rho.e, rho.b, rho.sigma.e,
rho.sigma.b, rel.tol, max.iter, verbose, init,
setting = c("RSEn", "RSEa"), ...) {
if (missing(init) || is.null(init) || is.list(init)) {
lcall2 <- lcall
lcall2[setdiff(names(formals(rlmer)), names(formals(lmer)))] <- NULL
lcall2$doFit <- NULL
lcall2$REML <- TRUE
lcall2[[1]] <- as.name("lmer")
linit <- eval(lcall2, pf)
if (!missing(init) && is.list(init)) {
## check sanity of input
stopifnot(length(init$fixef) == length(fixef(linit)),
length(init$u) == length(getME(linit, "u")),
length(init$sigma) == length(sigma(linit)),
length(init$theta) == length(getME(linit, "theta")))
## convert object to rlmerMod
linit <- as(linit, "rlmerMod")
## set all of the initial parameters, but do not fit yet
setFixef(linit, unname(init$fixef))
setSigma(linit, init$sigma)
setTheta(linit, init$theta, fit.effects=FALSE, update.sigma=FALSE)
setU(linit, init$u)
}
init <- linit
} else if (is.function(init)) {
init <- do.call(init,list(formula=formula, data=data, REML=TRUE, ...))
} else if (!is(init, "merMod") && !is(init, "rlmerMod")) {
stop("Unsuitable init object, aborting.",
"Expecting no, list (see ?rlmer), rlmerMod or merMod object")
}
lobj <- as(init, "rlmerMod")
lobj@call <- lcall
if (any(lobj@resp$weights == 0))
stop("Observations with zero weights are not allowed.")
if (!missing(setting)) {
if (!missing(rho.e) || !missing(rho.sigma.e) ||
!missing(rho.b) || !missing(rho.sigma.b)) {
overridden <- c()
if (!missing(rho.e)) {
overridden <- c(overridden, "'rho.e'")
}
if (!missing(rho.sigma.e)) {
overridden <- c(overridden, "'rho.sigma.e'")
}
if (!missing(rho.b)) {
overridden <- c(overridden, "'rho.b'")
}
if (!missing(rho.sigma.b)) {
overridden <- c(overridden, "'rho.sigma.b'")
}
if (length(overridden) > 1) {
args <- "Arguments "
} else {
args <- "Argument "
}
overridden <- paste(overridden, collapse = ", ")
setting <- match.arg(setting)
warning("Argument 'setting' specified together with ",
overridden, ". ", args, overridden,
" will override defaults of \"", setting, "\".")
}
setting <- match.arg(setting)
} else {
setting <- "RSEn"
}
if (missing(rho.e)) {
rho.e <- smoothPsi
}
adjustForEfficiency <- setting == "RSEa"
if (missing(rho.sigma.e)) {
rho.sigma.e <- psi2propII(rho.e, adjust = adjustForEfficiency)
}
if (missing(rho.b)) {
rho.b <- lapply(lobj@dim, getDefaultRhoB, rho = rho.e)
} else if (!is.list(rho.b)) {
rho.b <- rep.int(list(rho.b), length(lobj@dim))
## TODO warn if low asymptotic efficiency?
}
if (missing(rho.sigma.b)) {
rho.sigma.b <- list()
for (bt in seq_along(lobj@dim)) {
if (lobj@dim[bt] == 1) {
rho.sigma.b[[bt]] <- psi2propII(rho.b[[bt]], adjust = adjustForEfficiency)
} else {
rho.sigma.b[[bt]] <- rho.b[[bt]]
}
}
} else if (!is.list(rho.sigma.b)) {
rho.sigma.b <- rep.int(list(rho.sigma.b), length(lobj@dim))
## TODO warn if low asymptotic efficiency?
}
## set arguments only relevant to rlmerMod
lobj@rho.b <- rho.b
lobj@rho.sigma.b <- rho.sigma.b
if (!isTRUE(chk <- validObject(lobj))) stop(chk)
lobj@rho.e <- rho.e
lobj@rho.sigma.e <- rho.sigma.e
if (method == "DAStau" & any(sapply(lobj@idx, nrow) > 2)) {
warning("Method 'DAStau' does not support blocks of size larger than 2. ",
"Falling back to method 'DASvar'.")
method <- "DASvar"
}
lobj@method <- method
return(list(obj = lobj, init = init))
}
getDefaultRhoB <- function(dimension, rho) {
if (dimension == 1) {
return(rho)
}
if (isDefaultHuberOrSmoothPsi(rho) && dimension < 8) {
k <- switch(dimension - 1,
5.14, 5.55, 5.91, 6.25, 6.55, 6.84)
} else {
k <-
findTuningParameter(asymptoticEfficiency(rho, "location"),
rho,
"tau",
dimension)
}
return(chgDefaults(rho, k = k))
}
.rlmer <- function(lobj, rel.tol, max.iter, verbose, doFit) {
if (!doFit) {
lobj@pp$updateMatrices()
return(updateWeights(lobj))
}
## do not start with theta == 0
if (any(theta(lobj)[lobj@lower == 0] == 0)) {
if (verbose > 0)
cat("Setting variance components from 0 to 1\n")
theta0 <- theta(lobj)
theta0[lobj@lower == 0 & theta0 == 0] <- 1
setTheta(lobj, theta0, fit.effects = TRUE, update.sigma = TRUE)
} else {
## set theta at least once
setTheta(lobj, theta(lobj), fit.effects = FALSE)
}
if (verbose > 0) {
cat("\nrlmer starting values:\n")
cat("sigma, theta: ", .sigma(lobj), ", ", theta(lobj), "\n")
cat("coef: ", .fixef(lobj), "\n")
if (verbose > 1)
cat("b.s: ", b.s(lobj), "\n")
}
curWarnings <- list()
lobj <- withCallingHandlers(.rlmer.fit(lobj, rel.tol, max.iter, verbose),
warning = function(w) {
curWarnings <<- append(curWarnings,list(conditionMessage(w)))
invokeRestart("muffleWarning")
})
lobj@optinfo$warnings <- curWarnings
if (verbose > 0) {
cat("sigma, theta: ", .sigma(lobj), ", ", theta(lobj), "\n")
cat("coef: ", .fixef(lobj), "\n")
if (verbose > 1)
cat("b.s: ", b.s(lobj), "\n")
}
return(updateWeights(lobj))
}
.rlmer.fit <- function(lobj, rel.tol, max.iter, verbose) {
## do fit: non diagonal case differently
if (!isDiagonal(.U_b(lobj))) {
lobj <- rlmer.fit.DAS.nondiag(lobj, verbose, max.iter, rel.tol)
} else {
lobj <- rlmer.fit.DAS(lobj, verbose, max.iter, rel.tol)
}
return(lobj)
}
## DAS method
rlmer.fit.DAS.nondiag <- function(lobj, verbose, max.iter, rel.tol, method=lobj@method,
checkFalseConvergence = TRUE) {
if (!.isREML(lobj))
stop("can only do REML when using averaged DAS-estimate for sigma")
## Prepare for DAStau
if (method == "DAStau") {
## 4d int
## vectorize it!
ghZ <- as.matrix(expand.grid(lobj@pp$ghz, lobj@pp$ghz, lobj@pp$ghz, lobj@pp$ghz))
ghZ12 <- ghZ[, 1:2]
ghZ34 <- ghZ[, 3:4]
ghw <- apply(as.matrix(expand.grid(lobj@pp$ghw, lobj@pp$ghw, lobj@pp$ghw, lobj@pp$ghw)), 1, prod)
} else {
ghZ <- ghw <- c()
}
## fit model using EM algorithm
conv <- FALSE
convBlks <- rep(FALSE, length(lobj@blocks))
iter <- 0
rel.tol <- sqrt(rel.tol)
## compute kappa
kappas <- .kappa_b(lobj)
## zero pattern for T matrix
nzT <- as.matrix(crossprod(.bdiag(lobj@blocks[lobj@ind])) == 0)
expectedEntriesLength <- prod(dim(nzT)) - sum(nzT)
q <- lobj@pp$q
## false convergence indicator
fc <- rep(FALSE, length(lobj@blocks))
if (verbose > 0) {
theta0 <- theta(lobj)
if (verbose > 1) {
coef0 <- .fixef(lobj)
b.s0 <- b.s(lobj)
sigma0 <- .sigma(lobj)
}
}
## iterate
while(!conv && (iter <- iter + 1) < max.iter) {
if (verbose > 0) cat("---- Iteration", iter, " ----\n")
thetatilde <- theta(lobj)
sigma <- .sigma(lobj)
## get expected value of cov(\tvbs)
q <- len(lobj, "b")
T <- switch(method,
DASvar=lobj@pp$Tb(),
DAStau=calcTau.nondiag(lobj, ghZ12, ghZ34, ghw, .S(lobj), kappas, max.iter,
rel.tol = rel.tol, verbose = verbose),
stop("Non-diagonal case only implemented for DASvar and DAStau"))
T <- as(T, "CsparseMatrix")
## compute robustness weights and add to t and bs
T[nzT] <- 0
## apply chol to non-zero part only
idx <- !.zeroB(lobj)
## stop if all are zero
if (!any(idx)) break
Tidx <- T[idx, idx]
if (any(diag(Tidx) == 0.0)) {
## partially dropped block: can't handle yet.
idxZeroes <- which(idx)[which(diag(Tidx) == 0.0)]
blockAffected <- which(sapply(lobj@idx, function(cand) any(cand == idxZeroes)))
stop("Covariance matrix for random effects block ", blockAffected,
" with grouping factor ", names(lobj@cnms)[blockAffected],
" is singular. Please simplify the model and run again.")
}
L <- t(chol(Tidx))
T.bs <- numeric(q) ## set the others to zero
T.bs[idx] <- forwardsolve(L, b.s(lobj)[idx])
## compute weights
db <- .dk(lobj, sigma, FALSE, T.bs)[lobj@k]
wbsEta <- wbsDelta <- numeric(q)
for (type in seq_along(lobj@blocks)) {
s <- lobj@dim[type]
lidx <- as.vector(lobj@idx[[type]])
if (s > 1) {
## for eta, we would actually need a little smaller
## tuning constants than for delta to get the same efficiency
wbsEta[lidx] <- lobj@rho.sigma.b[[type]]@wgt(db[lidx])
wbsDelta[lidx] <- (lobj@rho.sigma.b[[type]]@psi(db[lidx]) -
lobj@rho.sigma.b[[type]]@psi(db[lidx] - s*kappas[type]))/s
} else {
lw <- lobj@rho.sigma.b[[type]]@wgt(db[lidx])
wbsEta[lidx] <- lw
wbsDelta[lidx] <- lw*kappas[type] ## adding kappa to wbsDelta in 1d case
}
}
WbDelta <- Diagonal(x=wbsDelta)
T <- as(WbDelta %*% T, "CsparseMatrix")
bs <- sqrt(wbsEta) * b.s(lobj)
Tstart <- 1
## cycle block types
for(type in seq_along(lobj@blocks)) {
bidx <- lobj@idx[[type]]
if (verbose > 5) {
cat("Tau for blocktype ", type, ":", as.vector(T[bidx[,1],bidx[,1]]), "\n")
}
## catch dropped vc
if (all(abs(bs[bidx]) < 1e-7)) {
if (verbose > 1)
cat("Block", type, "dropped (all = 0), stopping iterations.\n")
Ubtilde <- lobj@blocks[[type]]
pat <- Ubtilde != 0
Lind <- Ubtilde[pat]
thetatilde[Lind] <- 0
convBlks[type] <- TRUE
next
}
s <- nrow(bidx)
K <- ncol(bidx)
Tend <- Tstart + s * s * K - 1
if (convBlks[type]) {
Tstart <- Tend + 1
next
}
if (verbose > 5) {
cat("Tau for blocktype ", type, ":", as.vector(T[bidx[,1],bidx[,1]]), "\n")
}
## catch dropped vc
if (all(abs(bs[bidx]) < 1e-7)) {
if (verbose > 1)
cat("Block", type, "dropped (all = 0), stopping iterations.\n")
Ubtilde <- lobj@blocks[[type]]
pat <- Ubtilde != 0
Lind <- Ubtilde[pat]
thetatilde[Lind] <- 0
convBlks[type] <- TRUE
Tstart <- Tend + 1
next
}
## right hand side
if (length(T@x) == expectedEntriesLength) {
rhs <- matrix(rowSums(matrix(T@x[Tstart:Tend], s * s)), s)
if (isTRUE(getOption("robustlmm.check_rhs_optimisation"))) {
check <- matrix(0, s, s)
for (k in 1:K) {
check <- check + as.matrix(T[bidx[,k],bidx[,k]])
}
stopifnot(all.equal(rhs, check))
}
} else {
rhs <- matrix(0, s, s)
for (k in 1:K) {
rhs <- rhs + as.matrix(T[bidx[,k],bidx[,k]])
}
}
rhs <- rhs / K
Tstart <- Tend + 1
## left hand side
lbs <- matrix(bs[bidx], ncol(bidx), nrow(bidx), byrow=TRUE)
## add left hand side
lhs <- crossprod(lbs / sigma) / K
if (verbose > 2) {
cat("LHS:", as.vector(lhs), "\n")
cat("RHS:", as.vector(rhs), "\n")
cat("sum(abs(LHS - RHS)):", sum(abs(lhs - rhs)), "\n")
}
diff <- abs(rhs - lhs)
if (all(diff < rel.tol * max(diff, rel.tol))) {
if (verbose > 1)
cat("Estimating equations satisfied for block", type,
", stopping iterations.\n")
convBlks[type] <- TRUE
next
}
deltaT <- backsolve(lchol(rhs), lchol(lhs))
if (verbose > 1) cat("deltaT:", c(deltaT), "\n")
## get old parameter estimates for this block
Ubtilde <- lobj@blocks[[type]]
pat <- Ubtilde != 0
Lind <- Ubtilde[pat]
diagLind <- diag(Ubtilde)
Ubtilde[pat] <- thetatilde[Lind]
## update Ubtilde by deltaT
thetatilde[Lind] <- tcrossprod(Ubtilde, deltaT)[pat]
## FIXME: check boundary conditions?
## check if varcomp is dropped
if (all(thetatilde[diagLind] < 1e-7)) {
thetatilde[Lind] <- 0
convBlks[type] <- TRUE
next
}
## check if this block is converged
diff <- abs(thetatilde[Lind] - theta(lobj)[Lind])
if (verbose > 3)
cat("criterion:", sum(diff), ">=",
rel.tol * max(diff, rel.tol), ":",
sum(diff) < rel.tol * max(diff, rel.tol), "\n")
if (sum(diff) < rel.tol * max(diff, rel.tol)) {
convBlks[type] <- TRUE
## check if estimating equations are satisfied
if (checkFalseConvergence) {
if (verbose > 3)
cat("checking estimating equations:", sum(abs(lhs - rhs)),
">", sqrt(rel.tol), ":", sum(abs(lhs - rhs)) > sqrt(rel.tol), "\n")
if (sum(abs(lhs - rhs)) > sqrt(rel.tol))
fc[type] <- TRUE
}
next
}
}
## set theta
setTheta(lobj, thetatilde, fit.effects = TRUE,
update.sigma = FALSE)
## update sigma without refitting effects
updateSigma(lobj, fit.effects = FALSE)
if (verbose > 0) {
cat("delta theta:", format(theta0 - thetatilde, nsmall=20, scientific=FALSE),
"\n")
theta0 <- thetatilde
if (verbose > 1) {
cat(sprintf("delta coef: %.12f\n", sum(abs(coef0 - .fixef(lobj)))))
cat(sprintf("delta u: %.12f\n", sum(abs(b.s0 - b.s(lobj)))))
cat(sprintf("delta sigma: %.12f\n", abs(sigma0 - .sigma(lobj))))
coef0 <- .fixef(lobj)
b.s0 <- b.s(lobj)
sigma0 <- .sigma(lobj)
if (verbose > 2) {
cat("theta:", format(thetatilde, nsmall=20, scientific=FALSE), "\n")
cat("coef: ", .fixef(lobj),"\n")
cat("b.s: ", b.s(lobj), "\n")
cat("sigmae: ", .sigma(lobj), "\n")
}
}
}
if (all(convBlks)) conv <- TRUE
}
optinfo <- list(optimizer = "rlmer.fit.DAS.nondiag",
conv = list(opt = 0),
feval = iter,
warnings = list(),
val = diff)
if (iter == max.iter) {
warning(wt <- "iterations did not converge, returning unconverged estimate.")
optinfo$warnings <- list(wt)
optinfo$conv$opt <- 1
}
if (any(fc)) {
warning(wt <- "algorithm converged, but estimating equations are not satisfied.")
optinfo$warnings <- c(optinfo$warnings, list(wt))
optinfo$conv$opt <- 2
}
lobj@optinfo <- optinfo
return(lobj)
}
## DAS method
rlmer.fit.DAS <- function(lobj, verbose, max.iter, rel.tol) {
if (!.isREML(lobj))
stop("can only do REML when using averaged DAS-estimate for sigma")
## fit
converged <- FALSE
theta0 <- theta(lobj)
if (verbose > 1) {
coef0 <- .fixef(lobj)
b.s0 <- b.s(lobj)
sigma0 <- .sigma(lobj)
}
iter <- 0
while (!converged && iter < max.iter) {
iter <- iter + 1
if (verbose > 0) cat("Iteration", iter, "\n")
## fit theta
updateThetaTau(lobj, max.iter, rel.tol/10, verbose)
theta1 <- theta(lobj)
if (verbose > 0) {
cat(sprintf("delta theta: %.12f\n", sum(abs(theta0 - theta1))))
if (verbose > 1) {
cat(sprintf("delta coef: %.12f\n", sum(abs(coef0 - .fixef(lobj)))))
cat(sprintf("delta u: %.12f\n", sum(abs(b.s0 - b.s(lobj)))))
cat(sprintf("delta sigma: %.12f\n", abs(sigma0 - .sigma(lobj))))
coef0 <- .fixef(lobj)
b.s0 <- b.s(lobj)
sigma0 <- .sigma(lobj)
if (verbose > 2) {
cat("theta: ", theta(lobj),"\n")
cat("coef: ", .fixef(lobj),"\n")
cat("b.s: ", b.s(lobj), "\n")
cat("sigmae: ", .sigma(lobj), "\n")
}
}
}
## all zero or change smaller than relative tolerance
## all zero: we can't get out of this anyway, so we have to stop.
converged <- all(theta1 == 0) || sum(abs(theta0 - theta1)) <
200*rel.tol*sum(abs(theta0))
if (verbose > 1)
cat(sprintf("Criterion: %.12f, %.12f", sum(abs(theta0 - theta1)),
sum(abs(theta0 - theta1)) / rel.tol / sum(abs(theta0)) / 200), "\n")
theta0 <- theta1
}
optinfo <- list(optimizer = "rlmer.fit.DAS",
conv = list(opt = 0),
feval = iter,
warnings = list(),
val = diff)
if (iter == max.iter) {
warning(wt <- "iterations did not converge, returning unconverged estimate.")
optinfo$warnings <- list(wt)
optinfo$conv$opt <- 1
}
lobj@optinfo <- optinfo
return(lobj)
}
|
1eca0040cb7e4f60f0631d5d8f6567772ecc45a4 | 608369a849cf5244ac51d9180d142bceeba0a7ba | /R/qs-package.R | 85a018320b40db072bbca7cac47730a3ac3039e1 | [] | no_license | traversc/qs | 437d72b46b184d99e01e90d7cd00f3e5aaa155e5 | 792ae169ba0959568c69c18598d90faba33be623 | refs/heads/master | 2023-04-11T14:53:37.867427 | 2023-03-31T18:00:55 | 2023-03-31T18:00:55 | 163,551,739 | 361 | 30 | null | 2021-11-12T17:30:47 | 2018-12-30T00:54:43 | C | UTF-8 | R | false | false | 147 | r | qs-package.R | #' @useDynLib qs, .registration = TRUE
#' @keywords internal
#' @import RApiSerialize
#' @import stringfish
#' @importFrom Rcpp evalCpp
"_PACKAGE"
|
77ef3026d277eacc7fd5cbdba0c0664dd0c8e67d | e04b27d7341137d7b98842a1448fa914d4bb9ecc | /analysis/eulangs.R | 9a362508d615fc1b4248d76b6611411114dcea18 | [] | no_license | hannes/whocareseu | 9ad88e7b72241d455d8f5ff166a17b580a772bb4 | 6cea28f8014ebbe54de4dae8455c9fffbf0fa629 | refs/heads/master | 2022-03-05T17:49:16.243806 | 2014-01-15T12:58:42 | 2014-01-15T12:58:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,869 | r | eulangs.R | # Geonames localized country name retrieval for Europe, <hannes@muehleisen.org>, 2014-01-13
gncountryInfoCSV <- "http://api.geonames.org/countryInfoCSV?username=demo&"
# get country data from geonames API
countries <- read.csv(url(paste0(gncountryInfoCSV,"lang=en")),sep="\t",stringsAsFactors=F)
# ISO country codes for the countries in Europe
eucountrycodes <- scan(text="BE BG CZ DK DE EE IE GR ES FR HR IT CY LV LT LU HU MT NL AT PL PT RO SI SK FI SE GB NO CH",sep=" ", what=character())
#only countries in europe are interesting
eucountries <- countries[countries$iso.alpha2 %in% eucountrycodes,]
cleanuplangs <- function(langstrs) {
langcodes <- sort(unique(sapply(strsplit(unlist(strsplit(langstrs,",")),"-"),"[[",1)))
langcodes <- langcodes[nchar(langcodes) == 2]
langcodes
}
langcodes <- cleanuplangs(eucountries$languages)
cat(paste0("'",langcodes,"'",collapse=", "))
# this is dirty, I know, but this gets the alternate names for holland and england, which are non-official, but used a lot
#select langcode,'GB' as country,altname from alternatenames where altid=6269131 and langcode in ('bg', 'br', 'ca', 'co', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'eu', 'fi', 'fo', 'fr', 'fy', 'ga', 'gd', 'gl', 'hr', 'hu', 'it', 'lb', 'lt', 'lv', 'mt', 'nb', 'nl', 'nn', 'no', 'oc', 'pl', 'pt', 'rm', 'ro', 'ru', 'sc', 'se', 'sh', 'sk', 'sl', 'sr', 'sv', 'tr') union all select langcode,'NL' as country,altname from alternatenames where altid=4996248 and langcode in ('bg', 'br', 'ca', 'co', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'eu', 'fi', 'fo', 'fr', 'fy', 'ga', 'gd', 'gl', 'hr', 'hu', 'it', 'lb', 'lt', 'lv', 'mt', 'nb', 'nl', 'nn', 'no', 'oc', 'pl', 'pt', 'rm', 'ro', 'ru', 'sc', 'se', 'sh', 'sk', 'sl', 'sr', 'sv', 'tr');
altnamestr <- "lang,iso.alpha2,name
es,GB,Inglaterra
it,GB,Inghilterra
eu,GB,Ingalaterra
en,GB,England
fr,GB,Angleterre
ca,GB,Anglaterra
sv,GB,England
nl,GB,Engeland
br,GB,Bro-Saoz
pt,GB,Inglaterra
hr,GB,Engleska
sk,GB,Anglicko
ga,GB,Sasana
lt,GB,Anglija
oc,GB,Anglatèrra
rm,GB,Engalterra
mt,GB,Ingilterra
gl,GB,Inglaterra
el,GB,Αγγλία
tr,GB,İngiltere
gd,GB,Sasainn
sr,GB,Енглеска
fo,GB,Ongland
sc,GB,Inghilterra
fy,GB,Ingelân
sl,GB,Anglija
se,GB,Englánda
fi,GB,Englanti
et,GB,Inglismaa
lv,GB,Anglija
cs,GB,Anglie
cy,GB,Lloegr
bg,GB,Англия
ru,GB,Англия
en,NL,Holland
fr,NL,Holland
de,NL,Holland
nl,NL,Holland
no,NL,Holland
pt,NL,Holland
ru,NL,Голландия
sr,NL,Холанд
de,GB,England"
altnames <- read.csv(text=altnamestr,sep=",")
# get the names of the countries in all the languages
countrynames <- do.call("rbind",lapply(langcodes,function(lang) {
langcountry <- read.csv(url(paste0(gncountryInfoCSV,"lang=",lang)),sep="\t",stringsAsFactors=F)
langcountry$lang <- lang
langcountry[langcountry$iso.alpha2 %in% eucountrycodes,c("lang","iso.alpha2","name")]
}))
countrynames <- rbind(countrynames,altnames)
# remove bracketed things
countrynames$name <- gsub(" \\(.+\\)","",countrynames$name)
# time for some reshaping
countriesandlangs <- do.call("rbind",apply(eucountries[,c("iso.alpha2","languages")],1,function(row) {
ret <- data.frame(lang=c("en",cleanuplangs(row[[2]])),stringsAsFactors=F)
ret$iso.alpha2 <- row[[1]]
ret[,c(2,1)]
}))
countrylangcountrycombos <- do.call("rbind",apply(countriesandlangs,1,function(row) {
ret <- data.frame(othercountry=eucountrycodes,stringsAsFactors=F)
ret$country <- row[[1]]
ret$lang <- row[[2]]
ret[,c(2,3,1)]
}))
row.names(countrylangcountrycombos) <- NULL
finallist <- merge(countrylangcountrycombos,countrynames,by.x=c("lang","othercountry"),by.y=c("lang","iso.alpha2"))[,c(1,3,2,4)]
# go to adwords keyword planner, "get search volumne for a list of keywords", set country, keyword ideas, download, excel format
# generate the input for the adwords thing
invisible(lapply(eucountrycodes,function(country) {
cname <- eucountries[eucountries$iso.alpha2 == country,]$name
cat(paste0(cname," (",country,"): ", paste0(sort(unique(finallist[finallist$country==country,]$name)),collapse=", ")),"\n\n")
NULL
}))
stop("Now go ask Google (https://adwords.google.com/ko/KeywordPlanner/)")
# assume we have everything, load the CSVs
googleadwordsfreqs <- do.call("rbind",lapply(eucountrycodes,function(country) {
gdata <- read.csv(paste0("eulangs-results/",country,".csv"),sep="\t",stringsAsFactors=F,fileEncoding="UTF-16")[,c(2,4)]
gdata$country <- country
gdata
}))
# map keywords back to countries
cckeywordcount <- merge(googleadwordsfreqs,finallist,by.x=c("country","Keyword"),by.y=c("country","name"),all.x=T)
row.names(cckeywordcount) <- NULL
# remove duplicate entries for same other country with same keyword ("Malta")
cckeywordcount <- cckeywordcount[!duplicated(cckeywordcount[,c("country","Keyword","othercountry")]),]
# aggregate avg. monthly searches by othercountry by summation
final <-aggregate(cckeywordcount$Avg..monthly.searches, by=list(cckeywordcount$country,cckeywordcount$othercountry),FUN=sum,na.rm=T)
names(final) <- c("sourcecountry","targetcountry","monavgsearches")
final <- final[order(final$sourcecountry),]
row.names(final) <- NULL
final$sourcecountry <- factor(final$sourcecountry)
final$targetcountry <- factor(final$targetcountry)
final <- final[final$targetcountry != final$sourcecountry,]
final <- merge(final,aggregate(final$monavgsearches, by=list(final$sourcecountry),FUN=sum),by.x=c("sourcecountry"),by.y=c("Group.1"))
final$searchratio <- round((final$monavgsearches/final$x)*100)
final$x <- final$monavgsearches <- NULL
write.csv(final,"results.csv",row.names=F)
bytarget <- lapply(split(final,final$targetcountry),function(group) {
lapply(split(group,group$sourcecountry),function(group) {
group$searchratio
})
})
library(rjson)
bytargetjson <- paste0("var bytarget = ",toJSON(bytarget))
write(bytargetjson,"bytarget.js")
# that's all |
bbce107dac0084adc5e06b0beb9e0b7e9977e22a | 659ad54e6a8c297af8cce28eee53b175e21aef99 | /Sentiment Analysis (1).R | 466b72f7cc2a996ed1237d5c58ab107d32e8d749 | [
"MIT"
] | permissive | yashchoubey/BabyDestination | 425e1a11aa673efdfc4f2258e1813a551babe4ef | a5ef377850a4d6ead5b3bc18e81d2d85b70edf9e | refs/heads/master | 2020-03-20T15:11:01.693869 | 2018-07-27T05:48:31 | 2018-07-27T05:48:31 | 137,505,932 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,853 | r | Sentiment Analysis (1).R | ###for reading file
library(readtext)
conn = file("C:/Users/baby destination/Documents/R/win-library/3.4/WhatsApp Chat with BD Moms-To-Be(Pregnant).txt", 'r')
linn <-readLines(conn)
linesData <- ""
for (i in 1:length(linn)){
data <- linn[i]
dataProcessed <- gsub('(.* - .*: )(.*)', '\\1', data)
if(dataProcessed == data) {
dataProcessed <- ""
} else {
dataProcessed <- gsub('(.* - .*: )(.*)', '\\2', data)
}
linesData <- paste(linesData, dataProcessed)
#process data to remove sender info and time info
}
close(conn)
###For stopwords
#install.packages("tm")
library(tm)
library(NLP)
###Cleaning the data
CleanData <- linesData
CleanData <- tolower(CleanData) #Turn the data into lower case
CleanData <- removeNumbers(CleanData)
mystopwords <- c("pm","am", "<", ">", stopwords("en"))
CleanData <- removeWords(CleanData, mystopwords) #removing stop words
##Stemmer
#install.packages("SnowballC")
library(SnowballC)
CleanData <- wordStem(CleanData, language = "en")
###get frequently used words
#install.packages(c("qdap", "qdapTools", "rJava"))
library(qdapTools)
library(rJava)
library(qdap)
TextFrequency <- freq_terms(CleanData, at.least = 1)
library(wordcloud)
wordcloud(TextFrequency$WORD, TextFrequency$FREQ, colors = TextFrequency$FREQ, max.words = 200)
###
#install.packages("syuzhet")
library(syuzhet)
Sentiments <- get_nrc_sentiment(TextFrequency$WORD)
Sentiments <- cbind("Words" = TextFrequency$WORD, Sentiments)
SentimentsScore <- data.frame("Score" = colSums(Sentiments[2:11]))
TotalSentiments <- cbind("Sentiments" = rownames(SentimentsScore), SentimentsScore)
rownames(TotalSentiments) <- NULL
library(ggplot2)
ggplot(data = TotalSentiments, aes(x = Sentiments, y = Score)) + geom_bar(stat = "identity", aes(fill = Sentiments))
|
dac796aab6b65c93634cf7b2e9c681d480d3b95e | 543449de92eb4aef5a2708b5426cae700ffd5f8a | /run_stan_models_wrapper_function.R | 65f6c860aea452b397a94ecf36d677049ede4d89 | [] | no_license | nacemikus/belief-volatility-da-trustgame | aa1f28c806e5e277258b680abe0f6dbec672b075 | 8208ec1ec9cca9f78688e31359479e475893c63e | refs/heads/master | 2023-04-15T03:03:31.684956 | 2022-12-02T09:33:46 | 2022-12-02T09:33:46 | 491,472,031 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,737 | r | run_stan_models_wrapper_function.R | # =============================================================================
#### Info ####
# =============================================================================
# hierarchical model
run_model_fit <- function(modelfile, savemodelname, nIter_set, nWarmup_set=0, simulate = 0) {
if (nIter_set< 3) {
sampling = "try"
} else sampling = "sampling"
if (nWarmup_set == 0) {nWarmup_set = ceiling(nIter_set/3)}
# =============================================================================
#### Construct Data ####
# =============================================================================
# clear workspace
library(rstan)
library(ggplot2)
# library(R.matlab)
library(tidyr)
library(dplyr)
### load data
# data_temp <- read.csv ('R_table_beh.csv', header = T, sep="," )
data_temp <- readRDS("Behavioural_data.rds")
if (simulate ==1) print("simulating from the prior")
### prepare data
subjList <- unique(data_temp$ID)
removeSubjects<- {}
### load data
ankk <- data_temp$ankk[data_temp$Trial==1]
numSub <- length(subjList)
swm_error = data_temp$error_sum_all[data_temp$Trial==1]
swm_error = ave(swm_error, FUN =scale)
mean(swm_error)
Tsubj <- as.vector(rep(0, numSub))
Tsubj_remove <- as.vector(rep(0, numSub))
for (ss in 1:numSub) {
Tsubj[ss] <- max(data_temp$Trial[data_temp$ID == subjList[ss]]);
}
maxTrials <- max(Tsubj)
transfer <- array(1,c(numSub, maxTrials))
backtransfer <- array(1,c(numSub, maxTrials))
trustee <- array(1,c(numSub, maxTrials))
for (i in 1:numSub) {
tmp <- subset(data_temp, data_temp$ID==subjList[i])
transfer[i,1:Tsubj[i]] = tmp$transfer
backtransfer[i,1:Tsubj[i]] = tmp$backtransfer
trustee[i,1:Tsubj[i]] = tmp$trustee
if (subjList[i]%in% removeSubjects) Tsubj_remove[i] = 1;
}
sulpiride <- data_temp$drug[data_temp$Trial==1] # sul
serum <- data_temp$serum[data_temp$Trial==1] # ser
dataList <- list(N = numSub, T = maxTrials, Tsubj = Tsubj, transfer = transfer, backtransfer=backtransfer,
trustee = trustee, ankk =ankk, sulpiride= sulpiride, serum = serum, simulate = simulate,
Tsubj_remove = Tsubj_remove, swm_error = swm_error)
if (length(removeSubjects)!=0) {
print(paste("Removing subjects: "))
print(removeSubjects)
}
# =============================================================================
#### Running Stan ####
# =============================================================================
rstan_options(auto_write = TRUE)
options(mc.cores = 4)
if (sampling == "try") {
cat("Trying.. \n")
nIter <-2
nChains <-1
nWarmup <- 1
nThin <- 1
} else {
nIter <-nIter_set # 2000
nChains <- 4
nWarmup <- nWarmup_set
nThin <- 1
}
cat("Estimating", modelfile, "model... \n")
startTime = Sys.time(); print(startTime)
cat("Calling", nChains, "simulations in Stan... \n")
fit_rl <- stan(modelfile,
data = dataList,
chains = nChains,
iter = nIter,
warmup = nWarmup,
thin = nThin,
init = "random",#"0", #inits_list,#,
seed = 1450154637,
control = list(
adapt_delta = 0.95, max_treedepth = 15
)
)
cat("Finishing", modelfile, "model simulation ... \n")
endTime = Sys.time(); print(endTime)
cat("It took",as.character.Date(endTime - startTime), "\n")
cat("Saving in ", savemodelname, "... \n")
saveRDS(fit_rl, file = savemodelname)
}
|
c44570ce7ff0ad8682ea8b1cb6a41a543c31eb9b | be47c4a60d441650573e845ed156a4f98a8755b8 | /ui.R | 3add6cc85230fdb8520b238d0d32ccf5dc71168f | [] | no_license | niconaut/NYC_Crime | cff62edab2d0b4a6e41fd78d55fb994e8d2b8091 | 4b129d50ff9c504cbcc1a771075bb41bd2d11bfd | refs/heads/master | 2021-08-09T01:16:41.453434 | 2020-04-30T20:15:19 | 2020-04-30T20:15:19 | 168,894,807 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,268 | r | ui.R | #Define UI for shiny app
shinyUI(fluidPage(
# Application title
titlePanel("DS Project Nico, Alex"),
titlePanel("Interactive Crime Statistics Visualization"),
# Sidebar with an example slider input for the number of bins
sidebarLayout(
sidebarPanel(
sliderInput("time",
"Time Interval:",
min = as.Date("2015-01-01","%Y-%m-%d"),
max = as.Date("2015-12-31","%Y-%m-%d"),
value = c(as.Date("2015-01-01","%Y-%m-%d"), as.Date("2015-12-31","%Y-%m-%d"))),
sliderInput("lat",
"Center of Focus (Latitude):",
min = 40.5,
max = 40.9,
value = 40.7),
sliderInput("lon",
"Center of Focus (Longitude):",
min = -74.3,
max = -73.7,
value = -73.95),
sliderInput("zoom",
"Zoom Level",
min = 8,
max = 12,
value = 10),
checkboxGroupInput("cb","Types of Crime",c("FELONY", "MISDEMEANOR", "VIOLATION"),selected = c("FELONY","MISDEMEANOR","VIOLATION"))
),
# Show a plot of the generated map
mainPanel(
plotOutput("map")
)
)
)) |
7f1ef982ad3ffc21e49b04145e66d60f48144df6 | b39388e64e16776fedc50e2455c9f42de86c5c07 | /prediction/linear_predictor_inventory.R | 8477c71d006c6e9c8647eba852fe6f0a5074c184 | [
"Apache-2.0"
] | permissive | wuyou33/FHC_Timeseries_Error | f419ab60f3af753a7a2a783f1c426edd1a6dc147 | 1931aa8b753c3ccb975b3c1dbe094b702b597772 | refs/heads/master | 2020-06-06T19:10:59.097920 | 2016-07-30T12:35:34 | 2016-07-30T12:35:34 | 192,831,034 | 1 | 0 | null | 2019-06-20T01:59:17 | 2019-06-20T01:59:17 | null | UTF-8 | R | false | false | 2,657 | r | linear_predictor_inventory.R | ################################################################################
# (c) Copyright 2015 Farzad Noorian.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# linear prediction functions for the inventory problem
source("linear_predictor.R")
predict_inventory_error <- function(orders, train, test) {
# computes error of predicting "train" against "test" using AR models for the inventory problem
# train: observed values
# test: target of prediction
# orders: p for ar(p)
# returns: error for each horizon, concatenated
max.horizon = length(orders)
pred = list()
actual = list()
for (i in 1:length(test)) {
new_train = c(train, test[0:i])
new_traing_mean = mean(new_train)
new_train = new_train - new_traing_mean
# fit the beta matrix
beta = fit_beta_matrix(order = orders, ts = new_train)
# get the horizon
horizon = min(length(test) - i , max.horizon)
# forecast
prediction = c()
if (horizon > 0) {
p = predict_beta_matrix(beta, new_train)
prediction = p[1:horizon] + new_traing_mean
}
pred[[i]] = c(test[i], prediction)
actual[[i]] = test[i + 0:(horizon)]
}
e = do.call(c, pred) - do.call(c, actual)
return (e)
}
predict_inventory_naive_error <- function(train, test, max.horizon) {
# computes error of predicting "train" against "test" using Naive technique for the inventory problem
# train: observed values
# test: target of prediction
# max.horizon: max length of horizon
# returns: error for each horizon, concatenated
pred = list()
actual = list()
for (i in 1:length(test)) {
new_train = c(train, test[0:i])
# get the horizon
horizon = min(length(test) - i , max.horizon)
# forecast (i.e., repeat the last identified value)
prediction = c()
if (horizon > 0) {
prediction = rep.int(tail(new_train, 1), horizon)
}
pred[[i]] = c(test[i], prediction)
actual[[i]] = test[i + 0:(horizon)]
}
e = do.call(c, pred) - do.call(c, actual)
return (e)
}
|
069673d15e9249d89aac1df83bc8ba94ef84e481 | ae410ff373dce6144eae2472a224abbc7703bfc3 | /plot3.R | 58f9b50815365dd44c22055bf981c2b5d89af5ca | [] | no_license | beatasone/ExData_Plotting1 | c496d924139a25b61eed2e16561ad661c46684bb | 7f84ba4031c27685ce405241adf3498d18bc7d31 | refs/heads/master | 2021-01-14T12:47:41.659255 | 2015-04-09T01:02:39 | 2015-04-09T01:02:39 | 33,640,193 | 0 | 0 | null | 2015-04-09T00:45:47 | 2015-04-09T00:45:47 | null | UTF-8 | R | false | false | 1,251 | r | plot3.R | #set classes for colunns for faster reading of large data set
tab5rows <- read.table("household_power_consumption.txt",sep = ";", header = TRUE, nrows = 5)
classes <- sapply(tab5rows, class)
#read in the table, use csv for speed
powertot <- read.csv("household_power_consumption.txt", sep = ";", header= TRUE, na.strings = "?", colClasses = classes)
#subest the data for two dates for course project
powersub<-subset(powertot, Date=="1/2/2007"|Date=="2/2/2007")
#modify date and time
powersub$dt <-paste(powersub$Date, powersub$Time, sep = " " )
powersub$x <- strptime(powersub$dt, format='%d/%m/%Y %H:%M:%S')
#not needed for plot but can use to check day of the week, %a - abbrev weekday
powersub$xaxis <- strftime(powersub$x, '%a %H:%M')
png(file = "plot3.png",width=480,height=480) ## Open png device; create plot in my working directory
with(powersub,
plot(powersub$x, powersub$Sub_metering_1 , type="l", ylab="Energy sub metering", xlab="") )
lines(powersub$x, powersub$Sub_metering_2, col = "red")
lines(powersub$x, powersub$Sub_metering_3, col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), lwd=c(2.5,2.5, 2.5),col=c("black","red", "blue"))
dev.off() ## Close the png file device |
cdf5b281a69b9eaeb4fa89241cfd01abd2b937b4 | b9db037ee7bc2ebf9c228ad1f66fecabccfa70be | /man/Penalty-class.Rd | 4d5ca9c9f3f07facee10cdf2652b0c753e716b92 | [] | no_license | IsaakBM/prioritizr | 924a6d8dcc7c8ff68cd7f5a2077de2fa1f300fe7 | 1488f8062d03e8736de74c9e7803ade57d6fcc29 | refs/heads/master | 2020-12-10T06:23:19.437647 | 2019-12-22T00:04:20 | 2019-12-22T00:04:20 | 233,524,401 | 1 | 0 | null | 2020-01-13T06:13:19 | 2020-01-13T06:13:18 | null | UTF-8 | R | false | true | 632 | rd | Penalty-class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Penalty-proto.R
\name{Penalty-class}
\alias{Penalty-class}
\alias{Penalty}
\title{Penalty prototype}
\description{
This prototype is used to represent penalties that are added to the
objective function when making a conservation problem.
\strong{This prototype represents a recipe, to actually
add penalties to a planning problem, see the help page on
\code{\link{penalties}}. Only experts should use this class directly.} This
prototype inherits from the \code{\link{ConservationModifier-class}}.
}
\seealso{
\code{\link{ConservationModifier-class}}.
}
|
4eea9d8c06bda732792bef5a5429c2bcb63cb46e | 29cce8539013835a5cadc57ac6309af7388d49cd | /sliceSampler.R | 23a738c42ee9171754dc99e4b7d2d6bdc89c9c95 | [] | no_license | gusl/SliceSamplingDemo | 171db639517f5e6266eff17d6d0ebc6053fb4043 | 4f35ac81b8d383b321221ad6c8ced42e23dda8b9 | refs/heads/master | 2021-01-15T14:46:37.536770 | 2014-05-12T15:08:19 | 2014-05-12T15:08:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,064 | r | sliceSampler.R | sliceSampler1D <- function(f, N=5000, rInitDistr=function() rnorm(1, mean=0, sd=10)){
while(TRUE){
currTheta <- rInitDistr()
if (f(currTheta)>0) break
}
s <- vector()
for (i in 1:N){
height <- f(currTheta)
sliceLevel <- runif(1,min=0,max=height)
halfWidth <- 0.01
##if either endpoint is above the slice level, try again: expand width until both are below the slice level
while((f(currTheta-halfWidth)>sliceLevel)||(f(currTheta+halfWidth)>sliceLevel))
halfWidth <- 2*halfWidth
while(TRUE){ ##sample until the proposed theta is inside
propTheta <- runif(1, min=currTheta-halfWidth, max=currTheta+halfWidth)
if(f(propTheta)>sliceLevel) break
}
currTheta <- propTheta
s[i] <- currTheta
}
s
}
sliceSampler <- function(logf, N=50, rInitDistr=function() rnorm(k, mean=thetaHat, sd=1)){
k <- 2 ## number of parameters
print("hello1")
while(TRUE){
currTheta <- rInitDistr()
if (logf(currTheta)>-Inf) break
}
print("hello2")
s <- matrix(nrow=N,ncol=2)
for (i in 1:N){
inspect(i)
for (j in 1:k){ ##update each dimension separately
height <- logf(currTheta)
sliceLevel <- height - rexp(1)
halfWidth <- rep(0,k)
halfWidth[j] <- 0.01
##if either endpoint is above the slice level, try again: expand width until both are below the slice level
while(TRUE){
if((logf(currTheta-halfWidth) < sliceLevel) && logf(currTheta+halfWidth) < sliceLevel) break
halfWidth <- 2*halfWidth
}
while(TRUE){ ##sample until the proposed theta is inside
propThetaJ <- runif(1, min=currTheta[j]-halfWidth[j], max=currTheta[j]+halfWidth[j])
propTheta <- currTheta; propTheta[j] <- propThetaJ
if(logf(propTheta)>sliceLevel) break
}
currTheta <- propTheta
}
inspect(currTheta)
s[i,] <- currTheta
}
s
}
## make a pedagogical animation
sliceSamplerDemo <- function(f, xlim, waitTime=1, N=5000, rInitDistr=function() rnorm(1, mean=3, sd=1), ...){
gr <- makeGrid(xlim[1],xlim[2],1000)
plot(gr,sapply(gr,f), type="l", xlab="theta", ylab="unnormalized density", ...)
while(TRUE){
currTheta <- rInitDistr()
if (f(currTheta)>0) break
}
s <- vector()
Sys.sleep(waitTime)
for (i in 1:N){
Sys.sleep(4*waitTime); print("Drawing current theta")
##abline(v=currTheta, col="black")
height <- f(currTheta)
points(c(currTheta,currTheta),c(0,height), type="l", col="black")
sliceLevel <- runif(1,min=0,max=height)
Sys.sleep(4*waitTime); print("Drawing slice")
##abline(h=sliceLevel, col="black")
halfWidth <- 0.01
##if either endpoint is above the slice level, try again: expand width until both are below the slice level
while((f(currTheta-halfWidth)>sliceLevel)||(f(currTheta+halfWidth)>sliceLevel))
halfWidth <- 2*halfWidth
Sys.sleep(2*waitTime); print("Drawing endpoints")
points(c(currTheta-halfWidth,currTheta+halfWidth), c(sliceLevel, sliceLevel), pch=c(40,41))
points(c(currTheta-halfWidth,currTheta+halfWidth),c(sliceLevel,sliceLevel), type="l", col="black")
Sys.sleep(4*waitTime); print("proposed Theta")
while(TRUE){ ##sample until the proposed theta is inside
propTheta <- runif(1, min=currTheta-halfWidth, max=currTheta+halfWidth)
inspect(propTheta)
inspect(c(propTheta,sliceLevel))
points(propTheta,sliceLevel, col="red", pch=4) ## grey 'x'
Sys.sleep(2*waitTime); print("proposed Theta")
##points(c(propTheta,sliceLevel), col="white", pch=4) ## erase it
if(f(propTheta)>sliceLevel) break
}
Sys.sleep(2*waitTime); print("this Theta is accepted")
points(propTheta,sliceLevel, col="black", pch=4) ## black 'x'
Sys.sleep(4*waitTime); print("moving on...")
abline(h=sliceLevel, col="white")
points(c(currTheta,currTheta),c(0,height), type="l", col="white")
currTheta <- propTheta
s[i] <- currTheta
}
s
}
## example
f <- function(x) 0.3*dnorm(x,0,1) + 0.7*dnorm(x,3,3)
sliceSamplerDemo(f,xlim=c(-10,10), waitTime=0.5)
|
444a502f1916122f015807e5936947a3bee1cd7f | 7e0e9e2f7f7dff7e8866369374515b8c4d68463f | /RProgramming/ProgrammingAssignment1/complete.R | d8c59f896c3c56f745f44eb43e5aa8b0c2379406 | [
"MIT"
] | permissive | ZedZedNova/datasciencecoursera | d60b979e975db3df4469d4122394bd1d795b0112 | f8185e1b2f5959eb53839a190700b3947ed6215b | refs/heads/master | 2021-01-23T11:20:00.187022 | 2016-03-06T20:16:05 | 2016-03-06T20:16:05 | 23,549,450 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,843 | r | complete.R | complete <- function(directory, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
# # Build CSV filenames from the id vector, save in the csv_filenames vector
# csv_filenames <- paste(formatC(id, width = 3, flag = "0"), "csv", sep = ".")
#
# # build the path to the data files, and save in the data_files vector
# data_files <- paste(getwd(), directory, csv_filenames, sep = "/")
#
# # Read all of the CSV files into the data_tables vector
# data_tables <- lapply(data_files, read.csv)
#
# # create the data fram of the desired files
# pollutant_data_frame <- do.call(rbind, data_tables)
#
# # Number of complete observations
# complete_pollutant_data <- pollutant_data_frame[complete.cases(pollutant_data_frame),]
#
# Create a vector, nobs, that is as long as id
nobs <- rep(NA, length(id))
# determine number of observations for each ID
for(site in id) {
csv_filename <- paste(formatC(site, width = 3, flag = "0"), "csv", sep = ".")
data_file <- paste(getwd(), directory, csv_filename, sep = "/")
pollutant_data_frame <- read.csv(data_file)
complete_pollutant_data <- pollutant_data_frame[complete.cases(pollutant_data_frame),]
# nobs[[match(site, id)]] <- nrow(complete_pollutant_data[complete_pollutant_data$ID == site,])
nobs[[match(site, id)]] <- nrow(complete_pollutant_data)
}
# Create a new data frame with id and nobs
as.data.frame(cbind(id, nobs))
} |
f3b267412aa9f9e946496420e3e17199e9264643 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /Temporal/man/checkStatus.Rd | 83ca49f99bbf12d00459b608be9cf82bb38ca110 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 428 | rd | checkStatus.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Inputs.R
\name{checkStatus}
\alias{checkStatus}
\title{Status Check}
\usage{
checkStatus(n, status)
}
\arguments{
\item{n}{Integer, sample size.}
\item{status}{0/1 status indicator.}
}
\value{
Logical indicator of whether the status indicator was properly
formatted.
}
\description{
Function to ensure the status indicator is properly formatted
}
|
387ea743b4c2a2fbd7a92c506c82e089511a7470 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.analytics/man/gluedatabrew_update_recipe.Rd | 7fe751a10ac8b1f0f07ca41eefaded1d30e6640c | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 827 | rd | gluedatabrew_update_recipe.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gluedatabrew_operations.R
\name{gluedatabrew_update_recipe}
\alias{gluedatabrew_update_recipe}
\title{Modifies the definition of the LATEST_WORKING version of a DataBrew
recipe}
\usage{
gluedatabrew_update_recipe(Description = NULL, Name, Steps = NULL)
}
\arguments{
\item{Description}{A description of the recipe.}
\item{Name}{[required] The name of the recipe to be updated.}
\item{Steps}{One or more steps to be performed by the recipe. Each step consists of
an action, and the conditions under which the action should succeed.}
}
\description{
Modifies the definition of the \code{LATEST_WORKING} version of a DataBrew recipe.
See \url{https://www.paws-r-sdk.com/docs/gluedatabrew_update_recipe/} for full documentation.
}
\keyword{internal}
|
bbf8470dd5251cb3c9efce025a711673653a9850 | fb157ec338c0cf5425ae7ccfa16173f2dfa341ec | /R/gee_func.R | 8b8f3fd092f63f5a51e96b2c8ab09659704f6cbe | [
"Apache-2.0"
] | permissive | weinbergerlab/carriage-simulation | 626bfc92ac8872fced16724bb02d656269edbbef | 0190bfe0921e5e9759ad926839e77bd9f6226b75 | refs/heads/master | 2022-06-20T02:51:14.232016 | 2022-05-31T21:17:48 | 2022-05-31T21:17:48 | 152,311,839 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 560 | r | gee_func.R |
gee_func<-function(ds,ses.key ){
ds<-merge(ds,ses.key, by='id')
#reg.data$col<-as.factor(reg.data$col)
ds$date<-as.Date('2019/9/30') + ds$sample.index
ds$quarter<-as.factor(quarter(ds$date))
mod1<-geeglm( col~ quarter+ ses.grp.vector , data=ds, family='binomial', id=id,corstr='independence')
est <- esticon(mod1, diag(length(mod1$coefficients)))
covar<-names(mod1$coefficients)
OR.CI <- as.data.frame(round(exp(cbind(est$Estimate, est$Lower, est$Upper)),2))
names(OR.CI)<-c('or', 'or.lcl', 'or.ucl')
OR.CI$covar<-covar
return(OR.CI)
} |
4e62baf9ad18d675db53971045fa3720bfb4b85c | c931ab8ff778a3925ba51fc68728c759fecb934c | /man/run_shinymixr.Rd | 4c51b5e811006565c200f4837056107178262c5d | [
"MIT"
] | permissive | KyleBarrett/shinyMixR | be71597f00d10935d6470ecb4db4cbcd454b7a59 | d1bd1f617d4eeb51f5ae3a849b674a239e227092 | refs/heads/master | 2020-03-22T00:12:52.743235 | 2018-06-05T08:25:57 | 2018-06-05T08:25:57 | 139,231,198 | 1 | 0 | null | 2018-06-30T08:07:46 | 2018-06-30T08:07:45 | null | UTF-8 | R | false | true | 397 | rd | run_shinymixr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_shinymixr.r
\name{run_shinymixr}
\alias{run_shinymixr}
\title{Runs the shiny app for nlmixr}
\usage{
run_shinymixr(...)
}
\arguments{
\item{...}{arguments passed to the shiny runApp function}
}
\description{
Runs the shiny app for nlmixr
}
\examples{
\dontrun{
run_shinymixr()
}
}
\author{
Richard Hooijmaijers
}
|
abaf960d11186ea626447d88b2e214091e707f93 | 29585dff702209dd446c0ab52ceea046c58e384e | /npregfast/R/plotdiff.R | 2a766d9d850cdf06f8d3b513e39b959aa8355f45 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,992 | r | plotdiff.R | #' Visualization of the differences between the estimated curves
#' for two factor's levels
#' @description Useful for drawing the differences between the estimation of
#' curves (initial estimate, first or second derivative) for two factor's levels.
#' Missing values of factor's levels is not allowed.
#'@param model Parametric or nonparametric regression out
#' obtained by \code{\link{frfast}} function.
#'@param level2 Second factor's level at which to perform the
#'differences between curves.
#'@param level1 First factor's level at which to perform the
#'differences between curves.
#' @param der Number or vector which determines any inference process.
#' By default \code{der} is \code{NULL}. If this term is \code{0}, the plot
#' shows the differences between estimated regression functions. If it is
#' \code{1} or \code{2}, it is designed for the first or second derivative,
#' respectively.
#'@param est.include Draws the estimates of the model.
#'By default it is \code{FALSE}.
#' @param xlab A title for the x axis.
#' @param ylab A title for the y axis.
#' @param ylim The \code{y} limits of the plot.
#' @param main An overall title for the plot.
#' @param col A specification for the default plotting color.
#' @param CIcol A specification for the default confidence intervals
#' plotting color.
#' @param CIlinecol A specification for the default confidence intervals
#' plotting color (for the edge).
#' @param abline Draw an horizontal line into the plot of the second derivative
#' of the model.
#' @param ablinecol The color to be used for \code{abline}.
#' @param lty The line type. Line types can either be specified as an integer
#' (0 = blank, 1 = solid (default), 2 = dashed, 3 = dotted, 4 = dotdash,
#' 5 = longdash, 6 = twodash). See details in \code{\link{par}}.
#' @param CIlty The line type for confidence intervals. Line types can either
#' be specified as an integer (0 = blank, 1 = solid (default), 2 = dashed,
#' 3 = dotted, 4 = dotdash, 5 = longdash, 6 = twodash).
#' @param lwd The line width, a positive number, defaulting to 1.
#' See details in \code{\link{par}}.
#' @param CIlwd The line width for confidence intervals, a positive number,
#' defaulting to 1.
#' @param alpha Alpha transparency for overlapping elements expressed
#' as a fraction between 0 (complete transparency) and 1 (complete opacity).
#' @param \ldots Other options.
#' @return Simply produce a plot.
#' @author Marta Sestelo, Nora M. Villanueva and Javier Roca-Pardinas.
#' @examples
#' library(npregfast)
#' data(barnacle)
#'
#' # Nonparametric regression with interactions
#' fit2 <- frfast(DW ~ RC : F, data = barnacle, nboot = 100)
#' plotdiff(fit2, level2 = "lens", level1 = "barca")
#' plotdiff(fit2, level2 = "lens", level1 = "barca", der = 1, col = "blue", CIcol = "grey")
#' plotdiff(fit2, "lens", "barca", der = c(0, 1), ylim = c(-0.05, 0.05))
#'
#'
#'
#' @importFrom graphics lines par plot
#' @import ggplot2
#' @export
plotdiff <- function(model, level2, level1, der = NULL, est.include = FALSE,
xlab = model$name[2], ylab = model$name[1], ylim = NULL,
main = NULL, col = "black", CIcol = "black",
CIlinecol = "transparent", abline = TRUE, ablinecol = "red",
lty = 1, CIlty = 2, lwd = 1, CIlwd = 1.5,
alpha = 0.2, ...) {
nf <- model$nf
# co=length(der)
jnf <- c()
jnf[1] <- which(model$label == level1) #'B' plot.diff(ajus,'A','B');plot.diff(ajus,1,2)
jnf[2] <- which(model$label == level2) #'A'
# if(length(der)==0) {jder=c(1:3)}else{jder=der+1}
## Argumentos control
if (missing(model))
stop("Argument \"model\" is missing, with no default.
Must be a frfast object.")
if (missing(level1) & missing(level2))
stop("Argument 'level1' and/or 'level2' are missing, with no default")
if (level1 == level2)
stop("Argument 'level1' and 'level2' are not different")
if(!isTRUE(level1 %in% model$label)) {
stop("\"",paste(level1),"\" is not a factor's level.")
}
if(!isTRUE(level2 %in% model$label)) {
stop("\"",paste(level2),"\" is not a factor's level.")
}
if (sum(der > 2) >= 1)
stop("",paste(der[which(der > 2)])," is not a r-th derivative implemented, only
permitted 0, 1 or 2.")
if ((nf == 1))
stop("Function \"plot.diff\" not suported.
There is not factor in the model.")
p <-list()
if (est.include == FALSE) {
if (is.null(der)) der <- c(0, 1, 2)
der <- der + 1
#par(mfrow = c(1, length(der)))
cont = 0
for (i in der) {
cont = cont + 1
if (i == 1) ylab2 <- ylab
if (i == 2) ylab2 <- "First derivative"
if (i == 3) ylab2 <- "Second derivative"
if (sum(model$diff[, der = i, jnf[2], jnf[1]], na.rm = T) == 0) { # para ver si -1* o no
if (is.null(ylim)) {
rgo <- max(-1 * (model$diff[, der = i, jnf[1], jnf[2]]), na.rm = T) -
min(-1 * (model$diff[, der = i, jnf[1], jnf[2]]), na.rm = T)
ylim <- c(min(-1 * (model$diff[, der = i, jnf[1], jnf[2]]), na.rm = T) -
(rgo * 0.05),
max(-1 * (model$diff[, der = i, jnf[1], jnf[2]]), na.rm = T) +
(rgo * 0.05))
}
if (is.null(main)){
if(i == der[1] ){
title <- "Differences"
}else{
title <- NULL
}
}else{
if(i == der[1]){
title <- main
}else{
title <- NULL
}
}
data_bin <- data.frame(x = model$x,
p = -1 * model$diff[, der = i, jnf[1], jnf[2]],
pl = -1 * model$diffl[, der = i, jnf[1], jnf[2]],
pu = -1 * model$diffu[, der = i, jnf[1], jnf[2]])
if (abline == TRUE){
abline_layer <- geom_hline(yintercept = 0, colour = ablinecol)
}else{
abline_layer <- NULL
}
p[[cont]] <- ggplot() +
geom_ribbon(data = data_bin, aes_string(x = "x",
ymin = "pl",
ymax = "pu"),
alpha = alpha, fill = CIcol, linetype = lty,
size = CIlwd, col = CIlinecol) +
geom_line(data = data_bin, aes_string(x = "x",
y = "p"),
size = lwd, colour = col, linetype = lty, na.rm=TRUE) +
abline_layer +
coord_cartesian(ylim = ylim) +
# ylim(ylim) +
ylab(ylab2) +
xlab(xlab) +
ggtitle(title)
} else {
if (is.null(ylim)) {
rgo <- max(1 * (model$diff[, der = i, jnf[2], jnf[1]]), na.rm = T) -
min(1 * (model$diff[, der = i, jnf[2], jnf[1]]), na.rm = T)
ylim <- c(min(1 * (model$diff[, der = i, jnf[2], jnf[1]]), na.rm = T) -
(rgo * 0.05),
max(1 * (model$diff[, der = i, jnf[2], jnf[1]]), na.rm = T) +
(rgo * 0.05))
}
if (is.null(main)){
if(i == der[1] ){
title <- "Differences"
}else{
title <- NULL
}
}else{
if(i == der[1]){
title <- main
}else{
title <- NULL
}
}
data_bin <- data.frame(x = model$x,
p = model$diff[, der = i, jnf[2], jnf[1]],
pl = model$diffl[, der = i, jnf[2], jnf[1]],
pu = model$diffu[, der = i, jnf[2], jnf[1]])
if (abline == TRUE){
abline_layer <- geom_hline(yintercept = 0, colour = ablinecol)
}else{
abline_layer <- NULL
}
p[[cont]] <- ggplot() +
geom_ribbon(data = data_bin, aes_string(x = "x",
ymin = "pl",
ymax = "pu"),
alpha = alpha, fill = CIcol, linetype = lty,
size = CIlwd, col = CIlinecol) +
geom_line(data = data_bin, aes_string(x = "x",
y = "p"),
size = lwd, colour = col, linetype = lty, na.rm=TRUE) +
abline_layer +
coord_cartesian(ylim = ylim) +
# ylim(ylim) +
ylab(ylab2) +
xlab(xlab) +
ggtitle(title)
}
ylim <- NULL # hay que poner el ylim nulo para el siguiente plot
}
# NOTE: This ugly hack is here because of a bug in gridExtra which calls
# a ggplot2 function directly instead of namespacing it. The bug is fixed
# in the gridExtra GitHub version, but not on CRAN. Hopefully gridExtra
# will submit the fix to CRAN and I can remove this ugliness.
# https://github.com/baptiste/gridextra/issues/5
# Thanks to Dean Attali!!
if (!"package:ggplot2" %in% search()) {
suppressPackageStartupMessages(attachNamespace("ggplot2"))
on.exit(detach("package:ggplot2"))
}
args.list <- c(p, 1, length(der))
names(args.list) <- c(c(rep("", length(der)), "nrow", "ncol"))
suppressWarnings(do.call(gridExtra::grid.arrange, args.list))
} else { # est.include = TRUE
if (is.null(der)) der <- c(0, 1, 2)
jder <- der + 1 #if(length(der)==0) {jder=c(1:3)}else{jder=der+1}
# par(mfrow = c(nf + 1, length(der)))
cont = 0
for (i in jder) {
cont = cont + 1
if (i == 1)
ylab2 <- ylab
if (i == 2)
ylab2 <- "First derivative"
if (i == 3)
ylab2 <- "Second derivative"
if (sum(model$diff[, der = i, jnf[2], jnf[1]], na.rm = T) == 0) {
if (is.null(ylim)) {
rgo <- max(-1 * (model$diff[, der = i, jnf[1], jnf[2]]), na.rm = T) -
min(-1 * (model$diff[, der = i, jnf[1], jnf[2]]), na.rm = T)
ylim <- c(min(-1 * (model$diff[, der = i, jnf[1], jnf[2]]), na.rm = T) -
(rgo * 0.05),
max(-1 * (model$diff[, der = i, jnf[1], jnf[2]]), na.rm = T) +
(rgo * 0.05))
}
if (is.null(main)){
if(i == jder[1] ){
title <- "Differences"
}else{
title <- NULL
}
}else{
if(i == jder[1]){
title <- main
}else{
title <- NULL
}
}
data_bin <- data.frame(x = model$x,
p = -1 * model$diff[, der = i, jnf[1], jnf[2]],
pl = -1 * model$diffl[, der = i, jnf[1], jnf[2]],
pu = -1 * model$diffu[, der = i, jnf[1], jnf[2]])
if (abline == TRUE){
abline_layer <- geom_hline(yintercept = 0, colour = ablinecol)
}else{
abline_layer <- NULL
}
p[[cont]] <- ggplot() +
geom_ribbon(data = data_bin, aes_string(x = "x",
ymin = "pl",
ymax = "pu"),
alpha = alpha, fill = CIcol, linetype = lty,
size = CIlwd, col = CIlinecol) +
geom_line(data = data_bin, aes_string(x = "x",
y = "p"),
size = lwd, colour = col, linetype = lty, na.rm=TRUE) +
abline_layer +
coord_cartesian(ylim = ylim) +
# ylim(ylim) +
ylab(ylab2) +
xlab(xlab) +
ggtitle(title)
for (j in length(jnf):1) {
cont = cont + 1
if (jnf[j] == jnf[2]) {
title <- paste("Level", model$label[jnf[2]])
}
if (jnf[j] == jnf[1]) {
title <- paste("Level", model$label[jnf[1]])
}
rgo <- max(model$p[, der = i, jnf[j]], na.rm = T) -
min(model$p[, der = i, jnf[j]], na.rm = T)
ylim <- c(min(model$p[, der = i, jnf[j]], na.rm = T) - (rgo * 0.05),
max(model$p[, der = i, jnf[j]], na.rm = T) + (rgo * 0.05))
data_bin <- data.frame(x = model$x,
pl = model$pl[, der = i, fac = jnf[j]],
pu = model$pu[, der = i, fac = jnf[j]],
p = model$p[, der = i, fac = jnf[j]])
p[[cont]] <- ggplot() +
geom_ribbon(data = data_bin, aes_string(x = "x",
ymin = "pl",
ymax = "pu"),
alpha = alpha, fill = CIcol, linetype = lty,
size = CIlwd, col = CIlinecol) +
geom_line(data = data_bin, aes_string(x = "x",
y = "p"),
size = lwd, colour = col, linetype = lty, na.rm=TRUE) +
coord_cartesian(ylim =
ylim) +
# ylim(ylim) +
ylab(ylab2) +
xlab(xlab) +
ggtitle(title)
}
ylim <- NULL # hay que poner el ylim nulo para el siguiente plot
} else {
if (is.null(main)){
if(i == jder[1]){
title <- "Differences"
}else{
title <- NULL
}
}else{
if(i == jder[1]){
title <- main
}else{
title <- NULL
}
}
if (is.null(ylim)) {
rgo <- max(1 * (model$diff[, der = i, jnf[2], jnf[1]]), na.rm = T) -
min(1 * (model$diff[, der = i, jnf[2], jnf[1]]), na.rm = T)
ylim <- c(min(1 * (model$diff[, der = i, jnf[2], jnf[1]]), na.rm = T) -
(rgo * 0.05),
max(1 * (model$diff[, der = i, jnf[2], jnf[1]]), na.rm = T) +
(rgo * 0.05))
}
data_bin <- data.frame(x = model$x,
p = model$diff[, der = i, jnf[2], jnf[1]],
pl = model$diffl[, der = i, jnf[2], jnf[1]],
pu = model$diffu[, der = i, jnf[2], jnf[1]])
if (abline == TRUE){
abline_layer <- geom_hline(yintercept = 0, colour = ablinecol)
}else{
abline_layer <- NULL
}
p[[cont]] <- ggplot() +
geom_ribbon(data = data_bin, aes_string(x = "x",
ymin = "pl",
ymax = "pu"),
alpha = alpha, fill = CIcol, linetype = lty,
size = CIlwd, col = CIlinecol) +
geom_line(data = data_bin, aes_string(x = "x",
y = "p"),
size = lwd, colour = col, linetype = lty, na.rm=TRUE) +
abline_layer +
coord_cartesian(ylim = ylim) +
# ylim(ylim) +
ylab(ylab2) +
xlab(xlab) +
ggtitle(title)
for (j in length(jnf):1) {
cont = cont + 1
if (jnf[j] == jnf[2]) {
title <- paste("Level", model$label[jnf[2]])
}
if (jnf[j] == jnf[1]) {
title <- paste("Level", model$label[jnf[1]])
}
rgo <- max(model$p[, der = i, jnf[j]], na.rm = T) -
min(model$p[, der = i, jnf[j]], na.rm = T)
ylim <- c(min(model$p[, der = i, jnf[j]], na.rm = T) - (rgo * 0.05),
max(model$p[, der = i, jnf[j]], na.rm = T) + (rgo * 0.05))
data_bin <- data.frame(x = model$x,
pl = model$pl[, der = i, fac = jnf[j]],
pu = model$pu[, der = i, fac = jnf[j]],
p = model$p[, der = i, fac = jnf[j]])
p[[cont]] <- ggplot() +
geom_ribbon(data = data_bin, aes_string(x = "x",
ymin = "pl",
ymax = "pu"),
alpha = alpha, fill = CIcol, linetype = lty,
size = CIlwd, col = CIlinecol) +
geom_line(data = data_bin, aes_string(x = "x",
y = "p"),
size = lwd, colour = col, linetype = lty, na.rm=TRUE) +
coord_cartesian(ylim = ylim) +
# ylim(ylim) +
ylab(ylab2) +
xlab(xlab) +
ggtitle(title)
}
ylim <- NULL # hay que poner el ylim nulo para el siguiente plot
}
}
# NOTE: This ugly hack is here because of a bug in gridExtra which calls
# a ggplot2 function directly instead of namespacing it. The bug is fixed
# in the gridExtra GitHub version, but not on CRAN. Hopefully gridExtra
# will submit the fix to CRAN and I can remove this ugliness.
# https://github.com/baptiste/gridextra/issues/5
# Thanks to Dean Attali!!
if (!"package:ggplot2" %in% search()) {
suppressPackageStartupMessages(attachNamespace("ggplot2"))
on.exit(detach("package:ggplot2"))
}
args.list <- c(p, nf + 1, length(der))
names(args.list) <- c(c(rep("", (nf + 1) * length(der)), "nrow", "ncol"))
suppressWarnings(do.call(gridExtra::grid.arrange, args.list))
}
}
|
8b41504f541c3493a3b998dd656673618d6ea1fc | d95dce973e531a6ed5a8f1db121641a2bd978ec3 | /R/readSitraObs.R | 4358de82505546246e891e4d886dbd297f562090 | [] | no_license | zhenglei-gao/pest.R | f6a7a2fe095170ae384e53d0890eb014b372dc6a | 260a9e35fd49f0fa483d6c7904f9d9a8aed4725d | refs/heads/master | 2020-12-03T05:26:13.556729 | 2014-04-15T09:37:45 | 2014-04-15T09:37:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,509 | r | readSitraObs.R | readSitraObs <-
function # ^ Read sitra-observations
(
file # ^ sitra-observation-file
)
{
readPointData <-
function(x, skip, pname, ptype)
{
d <-
read.table(text = x, skip = skip
, stringsAsFactor = F
, col.names = c('Date', 'Ele', 'Q'))
d.tu <-
transform(d, Date = round(as.numeric(procTu(Date))))
with(d.tu,
data.frame(Name = pname
, Date = Date
, Ele = Ele
, Q = Q
, Type = ptype))
}
procPointWithNodes <-
function(x, pname, ptype)
{
s <-
scan(text = x, what = "character", nlines = 2, quiet = TRUE)
nele <- as.numeric(s[length(s)])
nds <- scan(text = x, skip = 2, nmax = nele, quiet = TRUE)
n <- grep(nds[length(nds)], strsplit(x, "\n")[[1]])
readPointData(x, skip = n, pname, ptype)
}
proc_tu_function <- # functions how to process time-units
list(
'BEZUGSDATUM' = function(d, units = "day")
{
asDate <- function(st)
as.POSIXct(strptime(st, format = "%d.%m.%Y"))
difftime(asDate(d), asDate(time_unit_line[2]), units = units)
}
,'ZEITEINHEIT' = function(d)
d
)
obs.lines <- readLines(file)
time_unit_line <- unlist(strsplit(obs.lines[6], " +"))
procTu <- proc_tu_function[[time_unit_line[1]]]
obs.string <- paste(obs.lines[-seq(1,7)], collapse = "\n")
obs.points <- strsplit(obs.string, "\n\\s*\n")[[1]]
do.call(
rbind,
lapply(obs.points, function(x)
{
header <- # point-type
scan(text = x, what = "character", nlines = 1, quiet = T)
ptype <- header[1]
pname <- header[2]
switch(
ptype
,"POTE" = readPointData(x, skip = 2, pname, ptype)
,"KNOT" = procPointWithNodes(x, pname, ptype)
,"LKNO" = procPointWithNodes(x, pname, ptype)
, stop('Keyword ', ptype, ' in .obs-file currently not supported')
)
}
)
) # ^ Returns a data-frame with columns: date, waterlevel, standard-deviation and type
}
|
5604ea241fe8130f62bb015b1d95111b843468fd | 468be7517b2c5953c57ca0f620f811efac6c9f84 | /man/dhhs_purple3.Rd | 9fa466f9b8b4d5f0e8a5834dc3c83cd3d2c288e5 | [] | no_license | wfmackey/dhhstheme | a29fa354554f76bc8b33ab942c8f5b9350a3e101 | 13883b2680a9deb72c918b4ec27aeb5585915dec | refs/heads/master | 2023-03-09T05:08:42.626075 | 2020-10-18T05:24:28 | 2020-10-18T05:24:28 | 303,308,423 | 5 | 2 | null | 2020-10-18T03:45:12 | 2020-10-12T07:13:48 | HTML | UTF-8 | R | false | true | 316 | rd | dhhs_purple3.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colours.R
\docType{data}
\name{dhhs_purple3}
\alias{dhhs_purple3}
\title{Hex code for the colour: DHHS Purple3}
\format{
An object of class \code{character} of length 1.
}
\usage{
dhhs_purple3
}
\description{
#DBBAE2
}
\keyword{datasets}
|
acf7a30f95e0755ba89db51256f24218502090db | c3826e89c7c78acdcc4596820d03fa96c8710b38 | /R/utils.R | da8072ff7c78d217cb850139446927a176116588 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | chen496/SomaDataIO | 7b393fad010774e17e086555a026c2a38de06415 | b8f00329aaa283f8243d1064a7bda19b873fdd67 | refs/heads/master | 2023-06-24T21:22:02.222540 | 2021-07-27T20:45:52 | 2021-07-27T20:45:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 363 | r | utils.R |
#' trim leading/trailing empty strings
#' @importFrom stringr str_trim str_c str_split
#' @noRd
trim_empty <- function(x, side) {
stringr::str_c(x, collapse = "\t") %>%
stringr::str_trim(side = side) %>%
stringr::str_split("\t") %>%
magrittr::extract2(1L)
}
# kinder version of identical
`%equals%` <- function(x, y) {
isTRUE(all.equal(x, y))
}
|
c6694797d044df387b7f6bf17976fc5db5b0c3fe | 262a347beae2643b62367b59bc45d2a96aa7dbfc | /man/url.get.Rd | e5d7a3cb43f91c0919ac998659ebf5e6192174ac | [] | no_license | BenioffOceanInitiative/whalesafe4r | 18ab7e7875808f2db91f42b62dea7e9eefc2f8f8 | ef4341c0622c9e32ccdd1d45ca3f5ca76b15effb | refs/heads/master | 2022-11-28T02:20:43.257741 | 2020-08-07T01:08:12 | 2020-08-07T01:08:12 | 162,746,601 | 0 | 1 | null | 2020-06-18T19:26:37 | 2018-12-21T18:39:01 | HTML | UTF-8 | R | false | true | 231 | rd | url.get.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{url.get}
\alias{url.get}
\title{Extracts the url forcefully}
\usage{
url.get(x)
}
\description{
Need this to ensure a few steps later on
}
|
30fd0094fdace3dafba96c77281b7e78f69ab26d | b3e37756f5993112fabcec65dcfbb97f2d592798 | /RepData_PeerAssessment2.R | 70a3239c4d1ba8f947375605f015892312bfe370 | [] | no_license | jmcampbell2/RepData_PeerAssessment2 | f0f357ea94f5069fd5cf5350b968da4bba48537f | be60566b4969a7104a9e70238f7d541547916033 | refs/heads/master | 2021-01-22T22:33:59.388261 | 2017-06-12T03:23:46 | 2017-06-12T03:23:46 | 92,780,020 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,751 | r | RepData_PeerAssessment2.R | #repdata_peerassessment2
#code for reproducible data project 2
#author: Jennifer Campbell
#data: May 29, 2017-June 4, 2017
#system: Windows 10
##read in data
if(!file.exists("storm.csv.bz2")){
url <- "https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2FStormData.csv.bz2"
download.file(url, destfile='storm.csv.bz2', method="wininet", mode="wb")
}
df <- read.csv("storm.csv.bz2",
na.strings = "NA", header = TRUE)
dim(df)
names(df)
##subsetting
df$Year <- as.numeric(format(as.Date(df$BGN_DATE, format = "%m/%d/%Y %H:%M:%S"), "%Y"))
hist(df$Year, main="Total Storm Events by Year", xlab="Year")
stormdata <- df[df$Year %in% c(1990:2011), ]
variables <- c("EVTYPE", "FATALITIES", "INJURIES", "PROPDMG", "PROPDMGEXP",
"CROPDMG", "CROPDMGEXP")
stormdata <- stormdata[variables]
##cleaning
stormdata$EVTYPE <- toupper(stormdata$EVTYPE)
stormdata$EVTYPE <- sub("TSTM", "THUNDERSTORM", stormdata$EVTYPE)
stormdata$EVTYPE <- sub("S$", "", stormdata$EVTYPE)
stormdata$EVTYPE <- sub("WINDS", "WIND", stormdata$EVTYPE)
stormdata$EVTYPE <- sub("FLOODING", "FLOOD", stormdata$EVTYPE)
stormdata$EVTYPE <- sub("FLD", "FLOOD", stormdata$EVTYPE)
stormdata$EVTYPE <- sub("SML", "SMALL", stormdata$EVTYPE)
stormdata$EVTYPE <- sub("WIND/HAIL", "WIND HAIL", stormdata$EVTYPE)
stormdata$EVTYPE <- as.factor(stormdata$EVTYPE)
##
sumFatalities <- aggregate(stormdata$FATALITIES,
by = list(stormdata$EVTYPE), FUN = "sum")
sumFatalities <- arrange(sumFatalities, sumFatalities[, 2], decreasing = T)
sumInjuries <- aggregate(stormdata$INJURIES,
by = list(stormdata$EVTYPE), FUN = "sum")
sumInjuries <- arrange(sumInjuries, sumInjuries[, 2], decreasing = T)
topFatalities <- head(sumFatalities, n = 10)
topInjuries <- head(sumInjuries, n = 10)
names(topFatalities) <- c("EventType", "Fatalities")
names(topInjuries) <- c("EventType", "Injuries")
##
unique(stormdata$PROPDMGEXP)
unique(stormdata$CROPDMGEXP)
stormdata$PROPDMGEXP<- toupper(stormdata$PROPDMGEXP)
stormdata$CROPDMGEXP<- toupper(stormdata$CROPDMGEXP)
stormdata$PROPDMGEXP <- sub("H", 2, stormdata$PROPDMGEXP)
stormdata$PROPDMGEXP <- sub("K", 3, stormdata$PROPDMGEXP)
stormdata$PROPDMGEXP <- sub("M", 6, stormdata$PROPDMGEXP)
stormdata$PROPDMGEXP <- sub("B", 9, stormdata$PROPDMGEXP)
stormdata$PROPDMGEXP <- as.numeric(stormdata$PROPDMGEXP)
stormdata$PROPDMGEXP[is.na(stormdata$PROPDMGEXP)] = 0 ##Assume non-alphanumeric values of exp are 0, bad data Quality
stormdata$CROPDMGEXP <- sub("H", 2, stormdata$CROPDMGEXP)
stormdata$CROPDMGEXP <- sub("K", 3, stormdata$CROPDMGEXP)
stormdata$CROPDMGEXP <- sub("M", 6, stormdata$CROPDMGEXP)
stormdata$CROPDMGEXP <- sub("B", 9, stormdata$CROPDMGEXP)
stormdata$CROPDMGEXP <- as.numeric(stormdata$CROPDMGEXP)
stormdata$CROPDMGEXP[is.na(stormdata$CROPDMGEXP)] = 0 ##Assume non-alphanumeric values of exp are 0, bad data Quality
calcDamage <- function(value,exp){value * (10^exp)}
stormdata$PropertyDamage <- mapply(calcDamage, stormdata$PROPDMG, stormdata$PROPDMGEXP)
summary(stormdata$PropertyDamage)
stormdata$CropDamage <- mapply(calcDamage, stormdata$CROPDMG, stormdata$CROPDMGEXP)
summary(stormdata$CropDamage)
sumPropDamage <- aggregate(stormdata$PropertyDamage,
by = list(stormdata$EVTYPE), FUN = "sum")
sumPropDamage <- arrange(sumPropDamage, sumPropDamage[, 2], decreasing = T)
sumCropDamage <- aggregate(stormdata$CropDamage,
by = list(stormdata$EVTYPE), FUN = "sum")
sumCropDamage <- arrange(sumCropDamage, sumCropDamage[, 2], decreasing = T)
topPropDamage <- head(sumPropDamage, n = 10)
topCropDamage <- head(sumCropDamage, n = 10)
names(topPropDamage) <- c("EventType", "PropertyDamage")
names(topCropDamage) <- c("EventType", "CropDamage")
topPropDamage
topCropDamage
##Results
par(mfrow = c(1,2), mar = c(12, 4, 2, 2), cex = 0.8)
barplot(topFatalities$Fatalities, names = topFatalities$EventType, las = 3,
ylab = "Total Fatalities", main = "10 Most Fatal Weather Events")
barplot(topInjuries$Injuries, names = topInjuries$EventType,las = 3,
ylab = "Total Injuries", main = "10 Most Injurious Weather Events")
par(mfrow = c(1,2), mar = c(12, 4, 2, 3), cex = 0.8)
barplot((topPropDamage$PropertyDamage/1000000000), names = topPropDamage$EventType,
las = 3, ylab = "Total Property Damage (Billions of $)",
main = "10 Most Property Damaging Weather Events")
barplot((topCropDamage$CropDamage/1000000000), names = topCropDamage$EventType,
las = 3, ylab = "Total Crop Damage (Billions of $)",
main = "10 Most Crop Damaging Weather Events")
|
f69e526968cc64b39f84113e0f5f6d273feae455 | 337bb7b2e0ca7416a17c1e1b48bf212f47ce402d | /man/panel_to_network.Rd | ffe58a5973f8667f2456d02940c54afd1dddd4db | [] | no_license | vincentarelbundock/SpatialHelper | 7a2459e855e99c654151b3a4052f7cd0aa7b98e3 | e8bacc811fd556b6549b6348f590d851fbbf2fb7 | refs/heads/master | 2023-08-31T01:21:01.570969 | 2023-08-14T14:16:52 | 2023-08-14T14:16:52 | 113,897,289 | 4 | 2 | null | null | null | null | UTF-8 | R | false | true | 788 | rd | panel_to_network.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/panel_to_network.R
\name{panel_to_network}
\alias{panel_to_network}
\title{Converts data.frames to objects amenable to network analysis using `btergm`}
\usage{
panel_to_network(unit_time, dyad_time, cores = 1, verbose = TRUE)
}
\arguments{
\item{unit_time}{data.frame unit/time dataset with columns named `unit`,
`time`. Additional columns are vertex attributes}
\item{dyad_time}{data.frame dyadic dataset with columns named `unit1`,
`unit2`, `time`. Additional columns are edge attributes.}
\item{cores}{integer number of cores to use for computation with mclapply}
\item{verbose}{print progress report if TRUE}
}
\description{
Converts data.frames to objects amenable to network analysis using `btergm`
}
|
47ff489ce9cb5e47aeb1a257461af884e78c2c92 | 94416a0cf94fc5eca3b2ee06c1d96ce5224a41b5 | /ProvisionalWindsorPlotCode.R | 7d7f8cb518520b4538727df540bb972a2e3028a4 | [] | no_license | Rivers-Project-2018/Sophie-River-Thames | 972b6c83d6516fc98020af6a2d0179d4f7b8fe5b | abb6b23469b97ccbaa075ef2111261186b0f2c5c | refs/heads/master | 2020-04-25T21:35:38.165142 | 2019-05-13T15:29:14 | 2019-05-13T15:29:14 | 159,342,789 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,409 | r | ProvisionalWindsorPlotCode.R | library(readxl)
Windsor <- read_excel("Desktop/University_of_Leeds/R/Windsor 2700TH flow and stage data 2013-14.xlsx",
sheet = "Sheet3") # Importing data set
DW=Windsor$Day # Setting data names
FlW=Windsor$Flow
StW=Windsor$Stage
minDW=1 # Setting min and max for Day data
maxDW=33
minFlW=30 # Setting min and max for Flow data
maxFlW=360
minStW=min(StW) # Setting min and max for Stage data
maxStW=max(StW)
DW_sc=(DW-minDW)/(maxDW-minDW) # Scaling the data sets
FlW_sc=(FlW-minFlW)/(maxFlW-minFlW)
StW_sc=(StW-minStW)/(maxStW-minStW)
plot(0, 0, xlim=cbind(-1,1), ylim=cbind(-1,1), xlab="", ylab="", axes=FALSE, type="l", main="Quadrant plot for the Windsor 2014 flood")
lines(x=DW_sc, y=FlW_sc, type="l", lty=1, lwd=1)
lines(x=-StW_sc, y=FlW_sc, type="l", lty=1, lwd=1)
lines(x=-StW_sc, y=-DW_sc, type="l", lty=1, lwd=1)
segments(-0.4788645, 0.6787879, 1, 0.6787879, lty=3) # (254-minFlW)/(maxFlW-minFlW)
abline(a=0.9757576, b=0, v=0 , h=0.9757576, lty=3) # (352-minFlW)/(maxFlW-minFlW)
segments(-0.4788645, -1, -0.4788645, 0.6787879, lty=3) # (4.38729-minStW)/(maxStW-minStW); 4.38729 = sum(StW)/3074
abline(a=0, b=0, v=0.340909, h=0, lty=3)
abline(a=0, b=0, v=0.4318182, h=0, lty=3)
abline(a=0, b=0, v=0.5909091, h=0, lty=3)
axis(1, at=NULL, labels = FALSE, tick = TRUE, pos = 0,0, lwd.ticks=0, lwd=2)
axis(2, at=NULL, labels = FALSE, tick = TRUE, pos = 0,0, lwd.ticks=0, lwd=2) |
8f6ab068c4ae33288fb0cddf13feb17ac51374ad | 87fd5edb936be0d400576497761f5e92b3954ebb | /filter_days.R | 8b6b6f4cf91799dbdf24649599f5b3f4759432e0 | [] | no_license | stineb/eval_pmodel | ae15c9bde9044b8313733110119622101ee061f8 | 9d590a4857385aadc3f1b3124488351f826d316b | refs/heads/master | 2022-10-04T17:45:58.770536 | 2022-09-24T08:24:35 | 2022-09-24T08:24:35 | 158,261,963 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,534 | r | filter_days.R | ##------------------------------------------------------------
## Filter days
##------------------------------------------------------------
filter_days <- function( ddf, filter_days_source, path_gepisat ){
require(purrr)
require(dplyr)
## send only one site's data to filter_days_bysite
ddf <- purrr::map( as.list( unique(ddf$sitename)), ~filter( ddf, sitename==. ) ) %>%
purrr::map( ~filter_days_bysite( ., filter_days_source, path_gepisat ) ) %>%
bind_rows()
}
filter_days_bysite <- function( ddf, filter_days_source, path_gepisat ){
## replace all 'gpp_obs' from FLUXNET 2015 data with NA if GPP from DT decomposition is NA
if ("fluxnet2015_DT" %in% filter_days_source){
# print("Filtering days based on GPP_DT_VUT_REF")
ddf <- ddf %>% dplyr::mutate( gpp_obs = ifelse( is.na(GPP_DT_VUT_REF), NA, gpp_obs ) )
}
## replace all 'gpp_obs' from FLUXNET 2015 data with NA if GPP from NT decomposition is NA
if ("fluxnet2015_NT" %in% filter_days_source){
# print("Filtering days based on GPP_NT_VUT_REF")
ddf <- ddf %>% dplyr::mutate( gpp_obs = ifelse( is.na(GPP_NT_VUT_REF), NA, gpp_obs ) )
}
if ("fluxnet2015_Ty" %in% filter_days_source){
# print("Filtering days based on gpp_obs_gepisat")
## Filter out data points based on GePiSaT data
avl_gepisat <- TRUE
if (!("gpp_obs_gepisat" %in% names(ddf))){
## get GePiSaT data
## Make sure data is available for this site
error <- rsofun::check_download_gepisat( path_gepisat, unique(ddf$sitename) )
ddf_gepisat <- rsofun::get_obs_bysite_gpp_gepisat( unique(ddf$sitename), path_gepisat, "d" )
## add to other data frame and take take weighted average for updated 'gpp_obs'
if (!is.null(ddf_gepisat)){
ddf <- ddf_gepisat %>%
## Some GPP data looks weird when its error in resp. day is zero. Exclude this data.
dplyr::mutate( gpp_obs = ifelse( gpp_err_obs == 0.0, NA, gpp_obs ) ) %>%
dplyr::rename( gpp_obs_gepisat = gpp_obs ) %>%
dplyr::right_join( ddf, by = "date" )
} else {
## No GePiSaT data available for this site. Consider all GPP data missing (NA).
ddf <- ddf %>% dplyr::mutate( gpp_obs = NA )
avl_gepisat <- FALSE
}
}
## replace all 'gpp_obs' from FLUXNET 2015 data with NA if GePiSaT-GPP is NA
if (avl_gepisat) ddf <- ddf %>% dplyr::mutate( gpp_obs = ifelse( is.na(gpp_obs_gepisat), NA, gpp_obs ) )
}
return(ddf)
}
|
95cb46af6f342c0ed4269ccd86c57c930519fa01 | 98c294d607fb9929e03a832f0ec858a9b40f2cfd | /man/pkgattrs.Rd | 2f2c4857fb4cc955604a2f4df559c6e14104bee0 | [
"MIT"
] | permissive | rich-iannone/pkgattrs | f2f48150cc81c64eee5922bde4115a607f0d7a8f | 9258ad8cd457137b3397451682c7898281f28c08 | refs/heads/main | 2021-07-13T18:39:36.630288 | 2020-07-24T17:53:26 | 2020-07-24T17:53:26 | 124,416,970 | 21 | 2 | null | null | null | null | UTF-8 | R | false | true | 785 | rd | pkgattrs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pkgattrs.R
\name{pkgattrs}
\alias{pkgattrs}
\title{Get a table of info functions inside one or more packages}
\usage{
pkgattrs(..., .make_clean = TRUE, .get_cyclocomp = FALSE)
}
\arguments{
\item{...}{A series of objects pointing to package locations. These can be
strings with paths to local package directories, or, invocations of helper
functions such as \code{from_github()}.}
\item{.make_clean}{An option to clean the working directory of any temporary
package files downloaded from GitHub.}
\item{.get_cyclocomp}{An option to include a measure of each function's
cyclomatic complexity.}
}
\description{
Create a tibble of information related to each function available in one or
several packages.
}
|
a07b98dc488b08279467134f36814ae014e87beb | 028d02e227415930b1e42ac363a98e7ecba8a493 | /Letters_Workflow.R | 54a6a4ab9f7e811b956563b8e145433484aecf16 | [] | no_license | SaraJKerr/Letters_1916_Internship | 8dc3f7dba97013a4f916ab5eac4693e0ec12a52c | 90a6f1f70f13791154a76be7b6d4129a7999b672 | refs/heads/master | 2021-01-24T11:18:29.122999 | 2017-07-05T15:40:49 | 2017-07-05T15:40:49 | 70,236,628 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,237 | r | Letters_Workflow.R | ################################################################################
# File-Name: Letters_Workflow.R #
# Date: 27 March 2017 #
# Author: Sara J Kerr #
# ORCID:orcid.org/0000-0002-2322-1178 #
# Institution: Maynooth University #
# Project: Letters of 1916 - Prof Susan Schreibman #
# Purpose: Workflow script combining functions #
# Data Used: Letters in xml format, downloaded from LetEd #
# Packages Used: XML, devtools, wordVectors, tm, koRpus, ggplot2 #
# Input: #
# Output: #
# Last Updated: 29 March 2017 #
################################################################################
# This script allows access to a variety of functions to access, process and
# analyse the letters
# Set working directory
# Load packages
library(XML)
library(devtools)
install_github("bmschmidt/wordVectors", force = T) # Check Xcode license agreed
install_github("trestletech/plumber")
library(wordVectors)
library(tm)
library(ggplot2)
library(koRpus)
library(ggrepel)
library(stringi)
library(magrittr)
library(igraph)
library(RColorBrewer)
library(visNetwork)
library(tsne)
library(Rtsne)
library(httr)
library(base64enc)
library(RMySQL)
library(sqldf)
source("config.R")
################################################################################
# Step 0: Get the letters from the Explore DB #
################################################################################
source("Text_Get_From_DB.R")
data <- get_all_from_db()
################################################################################
# Step 1: Extract body text from letters #
################################################################################
#OLD CODE FOR GENERATING FILES FROM TEI XML
# Load the function to extract the body text and save each as .txt files
#source("Code/Letters_1916_Internship/Text_Extract.R") # From my Mac
# Identify folder where XML files are saved and which format they are in
#input_dir1 <- "RawData/Letters" # path to xml letters
#files1 <- dir(input_dir1, "\\.xml") # vector of file names
#input_xml <- file.path(input_dir1,files1) # this creates a path to each file
# Run the function
#x <- lapply(input_xml, text_extract) # x returns NULL but the files are written
#NEW CODE FOR GENERATING FILES FROM DATABASE
source("Text_Extract.R")
apply(data, 1, text_extract_fromdb)
################################################################################
# Step 2: Process the texts #
################################################################################
# Prior to running the script TreeTagger needs to be downloaded to your computer
# it can be downloaded from http://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/
# Instructions for downloading and set up of TreeTagger are on the site.
# If error 'error TreeTagger/lib/english.par not found' appears check the
# TreeTagger 'lib' folder and rename the 'english-utf8.par' file 'english.par'.
# Load the function to process the files
source("Text_Process.R") # From my Mac
# Identify folder where .txt files are saved and which format they are in
#input_dir2 <- "Text_Files" # path to .txt letters' folder
y1 <- lapply(config_extract_folderpath, text_process) # combined .txt files and a DTM created
#input_dir3 <- "Processed_Files/Letters_cap.txt" # This output by the line above
y2 <- text_tag(paste0(config_process_folderpath, "/Letters_cap.txt")) #
################################################################################
# Step 3: Word2Vec #
################################################################################
source("Text_Word_Vec_Analysis.R") # From my Mac
# Train multiple models - see source file to amend parameters
#text <- "Processed_Files/Letters_corpus.txt"
text <- paste0(config_process_folderpath, "/Letters_corpus.txt")
w2v_train(text)
# Search for chosen word in corpus
#input_dir2 <- "Text_Files" # path to .txt letters' folder
files2 <- dir(config_extract_folderpath, "\\.txt") # vector of file names
# text_kwic(list of file names, input directory, word, context )
text_kwic(files2[1:100], config_extract_folderpath, "rising", 6)
input_dir4 <- paste0(config_results_folderpath, "/W2V")
files4 <- dir(input_dir4, "\\.bin")
input_bin <- file.path(input_dir4,files4)
# Load VSMs
vsm1 <- read.vectors(input_bin[1])
vsm4 <- read.vectors(input_bin[4])
# 10 nearest words
nearest_to(vsm1, vsm1[["rising"]])
nearest_to(vsm4, vsm4[["rising"]])
# Clustering on a small subsection
set.seed(42)
centers <- 5
x <- nearest_to(vsm1, vsm1[["rising"]], 50) # 50 nearest words to rising
y <- vsm1[[names(x), average = F]] # creates a VSM of nearest 50 words
clustering <- kmeans(y, centers = centers, iter.max = 40)
w2v_clus_vsm1 <- sapply(sample(1:centers, 5), function(n){
names(clustering$cluster[clustering$cluster ==n][1:10])
})
set.seed(42)
centers <- 5
x <- nearest_to(vsm4, vsm4[["rising"]], 50) # 50 nearest words to rising
y <- vsm4[[names(x), average = F]] # creates a VSM of nearest 50 words
clustering <- kmeans(y, centers = centers, iter.max = 40)
w2v_clus_vsm4 <- sapply(sample(1:centers, 5), function(n){
names(clustering$cluster[clustering$cluster ==n][1:10])
})
# Explore the vector space model - word list and plots for chosen term
# w2v_analysis2(VSM, word, seed, path for output, output file name, total words)
w2v_analysis2(vsm1, "theatre", 42, paste0(config_results_folderpath, "/VSM1_Trial/"), "theatre200", 200)
w2v_analysis2(vsm4, "prisoner", 42, paste0(config_results_folderpath, "/VSM4_Trial/"), "prisoner300", 300)
|
7784a59ae6adc1989c4d67e701016a0ba313769b | f0a4c280469b2074011c61ba2b32c8723317419a | /app.R | 4ac635a0a6dbdb3f301e99e08949ea5ded6d4a40 | [] | no_license | megwill09/inns | a6bb4a406208aac7314a9183b826cc413dece22e | 77e3270602d3be6f2239c1c39a9ddd6ddc9e8468 | refs/heads/master | 2023-05-25T13:03:58.222162 | 2021-04-20T09:54:44 | 2021-04-20T09:54:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,699 | r | app.R | # Source script for app
source('./app_fn.R')
# Source script for uploading to server
source('./jvb.R')
# Source the password for the server
source('./pwd.R')
# Address for devwarehouse
#https://devwarehouse.indicia.org.uk/index.php/survey/edit/500
# Set folder name for emails
folderName = "Inbox"
## create outlook object
OutApp <- COMCreate("Outlook.Application")
outlookNameSpace = OutApp$GetNameSpace("MAPI")
# Create list of emails
folder <- outlookNameSpace$Folders(userName)$Folders(folderName)
emails <- folder$Items
num_emails <- folder$Items()$Count()
# Emails may not be in date order, so assign a lookup df to date order, but
# only if computerspeed = 1 (fast) or 2 (middling)
# This is to prevent this running on a slow computer
computerspeed <- 2
if(computerspeed == 1){
cat('Reading in emails\n')
datesDF <- pblapply(1:num_emails, FUN = function(i){
dateEmail <- getDate(emails(i))
data.frame(dates = as.Date(dateEmail),
datetime = dateEmail,
subj = emails(i)[['Subject']],
Sender = getSender(emails(i)),
j = i,
stringsAsFactors = FALSE)
}) %>% bind_rows()
datesDF <- datesDF %>% arrange(desc(datetime))
} else if(computerspeed == 2){
cat('Reading in email dates\n')
datesDF <- pblapply(1:num_emails, FUN = function(i){
dateEmail <- getDate(emails(i))
data.frame(dates = as.Date(dateEmail),
datetime = dateEmail,
subj = '',
Sender = '',
j = i,
stringsAsFactors = FALSE)
}) %>% bind_rows()
datesDF <- datesDF %>% arrange(desc(datetime))
} else {
datesDF <- data.frame(dates = '',
subj = '',
Sender = '',
j = 1:num_emails)
}
datesDF$i <- 1:nrow(datesDF)
datesDF$Subject <- substr(datesDF$subj,1,100)
datesDF$Date <- as.character(datesDF$dates)
global <- list(
sender = getSender(emails(datesDF$j[1])),
sendername = emails(datesDF$j[1])[['SenderName']],
subject = emails(datesDF$j[1])[['Subject']],
msgbody = emails(datesDF$j[1])[['Body']],
date = as.Date(getDate(emails(datesDF$j[1]))),
datetime = getDate(emails(datesDF$j[1])),
tel = '',
location = '',
comment = '',
correspondance = '',
body = '',
geoparsed = data.frame(),
num_attachments = emails(datesDF$j[1])[['attachments']]$Count(),
attachment_location = 'tmp',
expert = '',
location_description = ''
)
# Define the UI
ui <- fluidPage(
sidebarLayout(
sidebarPanel(
tabsetPanel(
tabPanel('Upload',
fluidRow(column(6,textInput(inputId = 'sender', label = 'Sender',
value = global$sender)),
column(6,textInput(inputId = 'name', label = 'Name',
value = global$sendername))),
textInput(inputId = 'subject', label = 'Subject',
value = global$subject),
fluidRow(column(5,
textInput(inputId = 'date', label = 'Date',
value = as.character(global$date))),
column(7,
selectInput(inputId = 'species', label = 'Species',
choices = c('Vespa velutina',''),
selected = 'Vespa velutina'))),
fluidRow(column(5,
textInput(inputId = 'location', label = 'Location',
placeholder = 'gridref of observation')),
column(7,
textInput(inputId = 'tel', label = 'Telephone Number',
value = ''))),
textInput(inputId = 'location_description',
label = 'Location Description', value = ''),
textInput(inputId = 'comment', label = 'Comment', value = ''),
textAreaInput(inputId = 'correspondance', label = 'Correspondence',
height = '100px', value = global$msgbody),
selectInput(inputId = 'expert', label = 'Expert Knowledge?',
choices = c('',
'General nature recording',
'Entomology',
'Apiculture'),
selected = ''),
checkboxInput(inputId = 'includeAtt', label = 'Include Attachment Images',
value = TRUE),
actionButton(inputId = 'upload_Indicia', label = 'Upload to Database'),
textOutput('serverResponse'),
),
tabPanel('Email',
textInput(inputId = 'recipient', label = 'Recipient',
value = global$sender),
textInput(inputId = 'subject_reply', label = 'Subject',
value = global$subject),
selectInput(inputId = 'email_text_selector', label = 'Email Response',
choices = c(names(responses)),
selected = 'Custom'),
textAreaInput(inputId = 'email_text',height = '100px',
label = 'Message Body', value = global$body),
textOutput('sendemail'),
actionButton(inputId = 'send_thanksbutno', label = 'Send reply'),
checkboxInput(inputId = 'emailOn', label = 'Turn on Email Function',
value = FALSE),
),
tabPanel('Jump',
if(computerspeed <= 2){
dateInput(inputId = 'dateselector', label = 'Select Email Date')
},
fluidRow(
column(6,
textInput(inputId = 'i', label = 'Select Index (i)',
value = '1')),
column(6,
HTML("<br>"),
actionButton(inputId = 'jumpToIndex', label = 'Jump to Index'))
),
HTML("<hr>"),
if(computerspeed == 1){
dataTableOutput(outputId = 'summaryDF')
}
),
tabPanel('Tools',
HTML('<br>'),
fluidRow(
column(6,
actionButton(inputId='launchBrowser',label='GridRef Finder')),
column(6,
actionButton(inputId='launchBrowser2',label='GAGR'))),
HTML("<hr>"),
fluidRow(
column(6,
bsButton(inputId = 'clearActions', label = 'Clear Actions')),
column(6,
checkboxInput(inputId = 'turnonclearActions', label = 'Turn on Clear'))),
textOutput('clearMessage'),
HTML("<hr>"),
fluidRow(
column(6,
actionButton(inputId = 'geoparse', label = 'Attempt to Geoparse')),
column(6,
actionButton(inputId = 'cleargeoparse', 'Clear Table'))),
dataTableOutput(outputId = 'geotable')
))),
mainPanel(fluidRow(column(2,bsButton(inputId = 'aftten', label = '',style = 'info',
icon = icon('arrow-circle-left', 'fa-2x')) %>%
myPopify(txt = 'Go back 10 emails')),
column(2,bsButton(inputId = 'aft', label = '',style = 'primary',
icon = icon('arrow-left', 'fa-2x')) %>%
myPopify(txt = 'Go back 1 email')),
column(2,bsButton(inputId = 'aft_img', label = '',style = 'success',
icon = icon('chevron-left', 'fa-2x')) %>%
myPopify(txt = 'Go back 1 attachment')),
column(2,bsButton(inputId = 'fore_img', label = '',style = 'success',
icon = icon('chevron-right', 'fa-2x')) %>%
myPopify(txt = 'Go forward 1 attachment')),
column(2,bsButton(inputId = 'fore',label = '',style = 'primary',
icon = icon('arrow-right', 'fa-2x')) %>%
myPopify(txt = 'Go forward 1 email')),
column(2,bsButton(inputId = 'foreten', label = '',style = 'info',
icon = icon('arrow-circle-right', 'fa-2x')) %>%
myPopify(txt = 'Go forward 10 emails'))
),
textOutput('attachment_info'),
actionButton("att_open", "Open File"),
imageOutput('myImage', height = '100%'),
htmlOutput('msgbody'),
htmlOutput("inc")
)
)
)
# Create the server
server <- function(input, output, session){
values <-
reactiveValues(i = 1,
sender = getSender(datesDF$j[1]),
sendername = emails(datesDF$j[1])[['SenderName']],
subject = emails(datesDF$j[1])[['Subject']],
msgbody = emails(datesDF$j[1])[['Body']],
date = as.Date(getDate(emails(datesDF$j[1]))),
datetime = getDate(emails(datesDF$j[1])),
attachments = ifelse(emails(datesDF$j[1])[['attachments']]$Count()>0,
emails(datesDF$j[1])[['attachments']]$Item(1)[['DisplayName']],
''),
num_attachments = emails(datesDF$j[1])[['attachments']]$Count(),
attachment_location = 'tmp',
num_emails = num_emails,
img_num = 1,
includeAtt = TRUE)
# Jump to selected date
observeEvent(input$dateselector, {
if(computerspeed <= 2){
if(any(datesDF$dates==input$dateselector)){
lastmatch <- which(datesDF$dates==input$dateselector) %>% tail(1)
values$i <- datesDF$i[lastmatch]
} else {
# We don't have an email which matches that date, so find the nearest,
# looking forward in time first
diffs <- datesDF$dates - input$dateselector
if(any(diffs > 0)){
lastmatch <- which(diffs==min(diffs[diffs > 0])) %>% tail(1)
values$i <- datesDF$i[lastmatch]
} else {
# A time further in the future than any emails has been picked.
# Go to the top
values$i <- 1
}
}
# Call the wrapper function to jump to the email and get outputs
retList <- jumpTo(emails, values, global, datesDF, output, session)
output <- retList$output
values <- retList$values
global <- retList$global
}
})
# Jump to selected index value
observeEvent(input$jumpToIndex, {
if(any(as.character(datesDF$i)==input$i) & !is.na(as.numeric(input$i))){
values$i <- as.numeric(input$i)
} else {
values$i <- 1
}
# Call the wrapper function to jump to the email and get outputs
retList <- jumpTo(emails, values, global, datesDF, output, session)
output <- retList$output
values <- retList$values
global <- retList$global
})
# Going forward in time, subtract one from the email counter (i),
# or loop to the end if we hit the beginning
observeEvent(input$fore, {
if(values$i!=1){
values$i <- values$i - 1
} else {
values$i <- values$num_emails
}
# Call the wrapper function to jump to the email and get outputs
retList <- jumpTo(emails, values, global, datesDF, output, session)
output <- retList$output
values <- retList$values
global <- retList$global
})
# Going forward in time, subtract ten from the email counter (i),
observeEvent(input$foreten, {
values$i <- values$i - 10
if(values$i<0){
values$i <- 1
}
# Call the wrapper function to jump to the email and get outputs
retList <- jumpTo(emails, values, global, datesDF, output, session)
output <- retList$output
values <- retList$values
global <- retList$global
})
# Going backward in time, add one to the email counter (i),
# or loop back to the beginning if we hit the end
observeEvent(input$aft, {
if(values$i<values$num_emails){
values$i <- values$i + 1
} else {
values$i <- 1
}
# Call the wrapper function to jump to an email and get outputs
retList <- jumpTo(emails, values, global, datesDF, output, session)
output <- retList$output
values <- retList$values
global <- retList$global
})
# Going backward in time, add ten to the email counter (i),
observeEvent(input$aftten, {
values$i <- values$i + 10
if(values$i>values$num_emails){
values$i <- values$num_emails
}
# Call the wrapper function to jump to an email and get outputs
retList <- jumpTo(emails, values, global, datesDF, output, session)
output <- retList$output
values <- retList$values
global <- retList$global
})
# Go backward one image in the email
observeEvent(input$aft_img, {
if(values$img_num != 1){
values$img_num <- values$img_num - 1
} else {
values$img_num <- values$num_attachments
}
# No need to get any email info, just grab the relevant attachment
if(values$num_attachments > 1)
{
return_list <- format_attachments(emails, values, output, datesDF)
output <- return_list$output
values <- return_list$values
global$attachment_location <- values$attachment_location
#cat(global$attachment_location,'\n')
}
})
# Go forward one image in the email
observeEvent(input$fore_img, {
if(values$img_num < values$num_attachments){
values$img_num <- values$img_num + 1
} else {
values$img_num <- 1
}
# No need to get any email info, just grab the relevant attachment
if(values$num_attachments>1)
{
return_list <- format_attachments(emails, values, output, datesDF)
output <- return_list$output
values <- return_list$values
global$attachment_location <- values$attachment_location
#cat(global$attachment_location,'\n')
}
})
output$msgbody <- renderUI({
HTML(paste(str_replace_all(values$msgbody,'\\n','<br>')))
})
if(computerspeed == 1){
output$summaryDF <- renderDataTable({
(datesDF %>% select(i, Subject, datetime, Sender))
})
}
observeEvent(input$att_open, {
if(values$num_attachments>0){
shell(paste0(tmpfiles, '/', values$attachment_location))
}
})
# Attempt to geoparse
observeEvent(input$geoparse, {
output$geotable <- renderDataTable({
# Check if there's a postcode in the text
pcdf <- getpostcode(values$msgbody)
# Geoparse text with a progress bar
withProgress(message = 'Geoparsing...', value = 0, {
# Take text and split by words, removing stopwords
textl <- gsub('[[:punct:] ]+',' ',values$msgbody) %>% tolower()
textl <- strsplit(values$msgbody,split = ' ') %>% unlist()
textl <- textl[!duplicated(textl)]
textl <- textl[!(textl %in% stopwrds)]
# Try to find words in geonames
results <- lapply(1:length(textl), FUN = function(l){
incProgress(1/length(textl))
geoparse(textl, l)
}) %>% bind_rows()
names(results) <- c('lat','lng','name')
})
global$geoparsed <- bind_rows(pcdf, results)
global$geoparsed
})
})
observeEvent(input$cleargeoparse, {
global$geoparsed <- data.frame()
output$geotable <- renderDataTable({
global$geoparsed
})
})
observeEvent(input$email_text_selector, {
global$body <- responses[[input$email_text_selector]]
updateTextAreaInput(session, inputId = 'email_text',
label = 'Message Body', value = global$body)
})
# Clear Actions rds
observeEvent(input$clearActions, {
if(input$turnonclearActions){
overwrite_actions()
output$clearMessage <- renderText({
paste0('Actions File Cleared')
})
} else {
output$clearMessage <- renderText({
paste0('Actions File NOT Cleared - please check the \'Turn on Clear\' button')
})
}
})
# Send an email if this button is pressed
observeEvent(input$send_thanksbutno, {
if(input$emailOn){
if(!grepl(pattern = "^[[:alnum:].-_]+@[[:alnum:].-]+$",
x = input$recipient)){
output$sendemail <- renderText({
paste0('Please enter a valid email address and try again')
})
} else {
values <-
send_email(OutApp = OutApp,
values = values,
reply = input$email_text,
recipient = input$recipient,
msgBody = input$email_text,
subject = input$subject_reply,
from = from)
output$sendemail <- renderText({
paste0('Email sent')
})
updateactions(currentemail = emails(datesDF$j[values$i]),
action = 'reply')
}
} else {
output$sendemail <- renderText({
paste0('Email not sent - please check the \'Turn on Email Function\' button')
})
}
})
# Turn on attachment flag if ticked
observeEvent(input$includeAtt,{
values$includeAtt <- input$includeAtt
})
observeEvent(input$launchBrowser,{
output$inc <- renderUI({
getPage('https://gridreferencefinder.com/')
})
})
observeEvent(input$launchBrowser2,{
output$inc <- renderUI({
getPage('https://www.bnhs.co.uk/2019/technology/grabagridref/gagrol.php#map')
})
})
# Upload the record to Indicia
observeEvent(input$upload_Indicia, {
if(input$tel == ''){
global$tel <- NULL
} else {
global$tel <- input$tel
}
global$location <- input$location
global$correspondance <- input$correspondance
global$comment <- input$comment
global$expert <- input$expert
global$location_description <- input$location_description
global$sender <- input$sender
global$date <- input$date
global$sendername <- input$name
imageStr <- NULL
if(values$includeAtt){
# Attachment images are being included in the upload.
# Find out what they are and store temporary copies
imagelist <- getallimages(emails, values, datesDF)
if(!is.null(imagelist)){
cat('\nUploading images to data warehouse\n')
imageStr <- pblapply(imagelist, FUN = function(img){
getnonce(password = password, URLbase = URLbase) %>%
postimage(imgpath = img, URLbase = URLbase)
}) %>% unlist()
}
}
cat('\nUploading record to data warehouse\n')
submission <- createjson(imgString = imageStr,
email = global$sender,
recordername = global$sendername,
tel = global$tel,
date = global$date,
location = global$location,
comment = global$comment,
correspondance = global$correspondance,
experience = global$expert,
location_description = global$location_description)
if(submission=='Location improperly formatted'){
output$serverResponse <- renderText({
paste0(submission)
})
} else {
serverPost <- getnonce(password = password, URLbase = URLbase) %>%
postsubmission(URLbase = URLbase,
submission = submission)
serverOut <- serverPost %>% fromJSON()
serverResp <- paste0('SUCCESS! ',
'Sample ID: ',serverOut$outer_id,
', Occurrence ID: ',
serverOut$struct$children %>%
filter(model == 'occurrence') %>% pull(id))
cat(serverResp,'\n')
output$serverResponse <- renderText({
paste0(serverResp)
})
updateactions(currentemail = emails(datesDF$j[values$i]),
action = 'upload',
sampleID = serverOut$outer_id,
occurrenceID = serverOut$struct$children %>%
filter(model == 'occurrence') %>% pull(id))
}
})
}
shinyApp(ui = ui, server = server)
|
ef2771104e95b58eef17e675d9c149f39263002a | fa7f978201a7eed7001626f714cd9e89e8040c59 | /CFW_GWAS/code_dir/GWAS/effect_sizes/plot_af.R | 98975564ab5e22f291467c4ff6e9e10cd3960344 | [] | no_license | limix/SGE | 42a023286c863b7281033058edc93499439f9a53 | 50e690a163f24a185fbcef50f569912eededd3c6 | refs/heads/master | 2020-03-11T11:38:43.160238 | 2018-04-17T23:35:49 | 2018-04-17T23:35:49 | 129,974,768 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,037 | r | plot_af.R | load('output_dir/effect_sizes_additive_model/betas_conditioning/all_betas.RData')
#betas are additive betas^2
#colour for var_expl
N=4
hot=heat.colors(N)
hot=colorRampPalette(c('red','yellow'))(N)
hot=hot[seq(length(hot),1,by=(-1))]
plotted = indiv_DGE_QTLs_betas[,'var_expl']
r=range(plotted)
r = c(0, 3)
cutted=cut(plotted,breaks=seq(r[1]-0.000000001,r[2]+0.000000001,length.out=N),labels=F)
cols_DGE=hot[cutted]
cols_DGE[plotted>3] = 'red4'
hot=heat.colors(N)
hot=colorRampPalette(c('red','yellow'))(N)
hot=hot[seq(length(hot),1,by=(-1))]
plotted = indiv_SGE_QTLs_betas[,'var_expl']
r=range(plotted)
r = c(0, 3)
cutted=cut(plotted,breaks=seq(r[1]-0.000000001,r[2]+0.000000001,length.out=N),labels=F)
cols_SGE_add=hot[cutted]
#cex for sample size
N=6
r = c(800, 1700)
plotted = indiv_DGE_QTLs_betas[,'sample_size']
cutted=cut(plotted,breaks=seq(r[1],r[2],length.out=N),labels=F)
cexes_DGE=seq(0.6,1.4,length = N)[cutted]
plotted = indiv_SGE_QTLs_betas[,'sample_size']
cutted=cut(plotted,breaks=seq(r[1],r[2],length.out=N),labels=F)
cexes_SGE=seq(0.6,1.4,length = N)[cutted]
load('data_dir/dosages/pruned_dosages/unstd_my_final_dosages.RData')
MAF = apply(dosages,FUN = mean, MAR = 2)
range(MAF)
#0 to 1
w = which(MAF>0.5)
MAF[w] = 1 - MAF[w]
#0 to 0.5
dens_MAF_all = density(MAF, from = 0, to = 0.5)
#MAD is always number of minor alleles (additive SGE model)!
#max(indiv_SGE_QTLs_betas[,'MAD'])
#[1] 1.50781 when it could vary btw 0 and 2
#max(indiv_DGE_QTLs_betas[,'MAD'])
#[1] 0.9971054 when it could vary btw 0 and 1
#for Suppl Figure 6 figure, want to plot genotype variance for DGE and SGE under two models
#the genotype variance is 2p(1-p) for DGE, 2Np(1-p) for SGE under the additive model, and 2Np(1-p)/N2 for SGE under the proportional model
motch_SGE = match(sub('_social','',indiv_SGE_QTLs_betas[,2]),names(MAF))
MAF_SGE_QTLs = MAF[motch_SGE]
SGE_QTLs_vars_add = 2 * 2 * MAF_SGE_QTLs * (1-MAF_SGE_QTLs)
SGE_QTLs_vars_prop = MAF_SGE_QTLs * (1-MAF_SGE_QTLs)
dens_SGE_QTLs_vars_add = density(SGE_QTLs_vars_add, from = 0, to = 1)
dens_SGE_QTLs_vars_prop = density(SGE_QTLs_vars_prop, from = 0, to = 0.25)
#allelic effect (betas squared) for additive SGE model
r = range(indiv_SGE_QTLs_betas[,'add_allelic_effect'])
dens_SGE_QTLs_all_eff = density(indiv_SGE_QTLs_betas[,'add_allelic_effect'], from = r[1], to = r[2])
r_SGE_QTLs_vars = range(c(dens_SGE_QTLs_vars_add$y,dens_SGE_QTLs_vars_prop$y))
pdf('plots_dir/effect_sizes/paper_marginals.pdf', width = 13)
par(mfrow = c(1,2))
plot(dens_SGE_QTLs_vars_add$x, dens_SGE_QTLs_vars_add$y, type = 'l', xlab = 'Social genotype variance', ylab = 'Density', las = 1, col = 'red', xlim = c(0,1), ylim = r_SGE_QTLs_vars)
lines(dens_SGE_QTLs_vars_prop$x, dens_SGE_QTLs_vars_prop$y, col = 'orange')
legend(x = 'topright', legend = c('Additive model','Proportional model'), fill = c('red','orange'), bty = 'n', border = 'white')
plot(dens_SGE_QTLs_all_eff$x, dens_SGE_QTLs_all_eff$y, type = 'l', xlab = 'Social allelic effect', ylab = 'Density', las = 1, col = 'red')
dev.off()
|
eed3715bcc5a3113992498102f6cf16c3e9e185a | 22e28998d73d1898500b71886e1963e11fdcc654 | /RCode/archetypalAnalysis.R | afe4d2d80b84c87b3898f4a184e292b478f0fc91 | [] | no_license | sefield/CodeEx | 9b15281731f2f6d79d5075299ef7715fef05954a | 2fb95baa1c3e6ad185037fc899b2e404511fbd59 | refs/heads/master | 2021-01-17T11:27:35.938700 | 2017-03-06T06:03:43 | 2017-03-06T06:03:43 | 84,036,024 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,876 | r | archetypalAnalysis.R | #Data loading and setup.
rawData <- read.csv('/Users/sradevski/Desktop/JSCode/StackOverflowCsvData.csv')
rawLibraryData <- read.csv('/Users/sradevski/Desktop/JSCode/librariesCsv.csv')
data <- rawData
libraryData <- rawLibraryData
rownames(data) = data$id #set what the rownames are (so they can be put back later)
rownames(libraryData) = libraryData$id
data$id = NULL #delete the id column
libraryData$id = NULL
unscaledData <- data
unscaledLibraryData <- libraryData
data <- scale(data) #standardize/normalize variables
libraryData <- scale(libraryData)
#Archetypal analysis
library('modeltools')
library('archetypes')
library('nnls')
library("RColorBrewer")
col_pal <- brewer.pal(3, "Set1")
col_black <- rgb(0, 0, 0, 0.2)
### Data set with outliers:
# set.seed(1234)
# outliers <- t(sapply(runif(5, min = 1.5, max = 2),
# function(x)
# x * apply(unscaledData, 2, max) + apply(unscaledData, 2, IQR)))
#
# datao1 <- scale(rbind(unscaledData, outliers))
# pairs(datao1)
#Sort and print to see outliers
library(dplyr)
head(arrange(rawData, loc))
### Robust archetypal algorithm:
set.seed(36)
#Create Normal Archetypes
ras <- stepArchetypes(unscaledData, k = 1:5, verbose = FALSE, nrep = 4, method = robustArchetypes)
screeplot(ras, cex = 1.5, cex.axis = 1.5, cex.lab = 1.5)
set.seed(10536)
ra.oz1 <- robustArchetypes(unscaledData, 3)
parameters(ra.oz1)
library(plotrix)
#barplot(ra.oz1, unscaledData, percentiles = TRUE, which = "beside", which.beside="variables") #Change to FALSE to show from -1 to 1
#barplot(ra.oz1, unscaledData, percentiles = TRUE, col = col_pal[1:3]) #Change to FALSE to show from -1 to 1
pcplot(ra.oz1, unscaledData, atypes.col = col_pal[1:3], atypes.lwd = 5, cex.axis = 0.8)
legend(x = 1, y = 1, legend = sprintf("A%s", 1:3), col = col_pal[1:3], lwd = 5, bg = "white", cex = 0.7)
plot(rss(ra.oz1, type = 'single'), xlab = '', ylab = 'RSS')
plot(weights(ra.oz1, type = 'reweights'), xlab = '', ylab = 'Weight')
simplexplot(ra.oz1,labels_cex = 1,points_pch = 19 , show_labels = TRUE, show_direction = TRUE) #Plot the archetype result.
#Create Normal Archetypes
as <- stepArchetypes(data, k = 1:5, verbose = FALSE, nrep = 4)
screeplot(as)
a3 <- bestModel(as[[3]])
#Get coefficients
cof <- coefficients(a3)
#cof
archResult <- data.frame(data, cof)
#write.table(archResult, file = '/Users/sradevski/Desktop/ExampleMetric/Result/ResultFromR.csv', row.names=FALSE,col.names=TRUE,sep=",")
nparameters(a3)
rss(as[2]) # The residual sum-of-squares (RSS) indicates how well the original data can be reproduced as mixtures of the archetypes.
t(atypes(a3))
parameters(a3)
t(parameters(a3))
simplexplot(a3,labels_cex = 1,points_pch = 19 , show_labels = TRUE, show_direction = TRUE) #Plot the archetype result.
png('Plot.png',width=1024,height=3072)
barplot(a3, d2, percentiles = FALSE,which='below')
|
5c5ed071d6d1309392142dbcc45303a3570feff3 | 3e1f696029222b31796a5adef0afc272f6cc4c11 | /IAU_DeltaOnly_20190820.R | f97cf3240f61d94f143f1b33fc46d565b50f7e4c | [] | no_license | dehrlich014/DoriansStockDataCode | 82c890070bf66075b331cb80c056e24122a80ec5 | b64f6ee2d369d7c269efd266701065d15b3c5347 | refs/heads/master | 2023-02-03T13:52:58.288342 | 2020-12-22T20:08:55 | 2020-12-22T20:08:55 | 275,254,152 | 0 | 0 | null | 2020-07-16T22:55:35 | 2020-06-26T21:51:11 | R | UTF-8 | R | false | false | 1,964 | r | IAU_DeltaOnly_20190820.R | ####This document reads in the performance data of the ishares Gold ETF from a chosen time interval
####and computes two statistics, the change in value from a previous close to a current close, or the AbsChange,
####and the percentage change between a previous close and a current close, or Delta.
####Of equal significance, this document is read into by another document, 'NSRGYMerged8_NormClose_REVISED_20200627.R'.
#The dataset comes straight from Yahoo Finance.
IAU_Historical <- read.csv("IAU_20070411_20190820.csv")
library(dplyr)
library(ggplot2)
library(stringr)
library(tidyr)
###################################
###################################
IAU_N <- length(IAU_Historical[[1]])
IAU_Historical <- rename(IAU_Historical,"AdjClose" = "Adj.Close")
####Reformatting the date into something that R can make sense of.
#IAU_Historical$Date <- (as.Date(IAU_Historical$Date, format = "%m/%d/%y"))
IAU_Historical$Date <- (as.Date(IAU_Historical$Date))
####We just don't need these variables at this time.
IAU_Historical$Open <- NULL
IAU_Historical$High <- NULL
IAU_Historical$Low <- NULL
####We will come back to them, for sure.
IAU_Historical$Close <- NULL
#We don't need the close because we are using the AdjClose... always.
IAU_Historical$AbsChange <- 0
IAU_Historical$Delta <- 0
####I'll put the new NormClose variable here, and then come back to it
####after we do all the usual work with the AdjClose.
####This is really what we care most about, getting the deltas from the AdjClose's.
####Once we do this, we can use *these* deltas to back into generating the NormClose.
for(i in 2:IAU_N){
IAU_Historical$AbsChange[i] <- (IAU_Historical$AdjClose[i] - IAU_Historical$AdjClose[i-1])
IAU_Historical$Delta[i] <- (IAU_Historical$AbsChange[i]/IAU_Historical$AdjClose[i-1])
}
# for(i in 2:IAU_N){
# IAU_Historical$NormClose[i] <- IAU_Historical$NormClose[i-1]*(1 + IAU_Historical$Delta[i])
# }
# plot(IAU_Historical$NormClose, type = "l")
|
789f8c475d19be8434f238fa61507de3233c063e | e7601987c26482a7ff596ed0008c903df8e29213 | /myutils.R | 65e124497f79c58023c88419b7dde358a5ed5443 | [] | no_license | mtafiti/rstuff-for-ser | 3dd2b030577de08f36082ede5e68bee6218e5499 | 28c94ae38f5d9ca325ff735a7c1a8d40b09b6746 | refs/heads/master | 2021-01-10T06:50:23.206510 | 2016-04-05T12:44:38 | 2016-04-05T12:44:38 | 53,937,138 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 188 | r | myutils.R |
showMyPackages <- function(){
ip <- as.data.frame(installed.packages()[,c(1,3:4)])
rownames(ip) <- NULL
ip <- ip[is.na(ip$Priority),1:2,drop=FALSE]
print(ip, row.names=FALSE)
} |
bff238f473786af99af55e872d611654c17217b8 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609960521-test.R | 78c12ddac43a371a25cd56e476bf419706e9e214 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 555 | r | 1609960521-test.R | testlist <- list(x = integer(0), y = c(1869359146L, 1298231382L, 1768257321L, 676545880L, 1344299887L, 1853060137L, 1869573160L, 711158895L, 1944387584L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) |
b5b62f958d5b4f501812dcdcaefa8453a7113bbb | 18e1744249ac1d85a1d884368d28515a077b487f | /R_Analysis/01_Demo/Module_15_notes.R | c19e1a28c93d337602086fa89af8cc3c8faf6aab | [] | no_license | Baylex/MechaCar_Statistical_Analysis | ac126e82c7b5c147ac9e9b3abc433cbbb0487e9d | 563842c5ef98bd3e2e3eef2cc9afe18a122d805a | refs/heads/main | 2023-08-10T18:54:36.082224 | 2021-09-25T03:58:21 | 2021-09-25T03:58:21 | 334,603,716 | 0 | 13 | null | null | null | null | UTF-8 | R | false | false | 12,958 | r | Module_15_notes.R | #15.1.5 install packages
install.packages("tidyverse")
install.packages("jsonlite")
#15.2.1 put stuff in the console
#15.2.2 c() fucntion
c()
#15.2.3 pull in files
?read.csv()
demo_table <- read.csv(file='demo.csv',check.names=F,stringsAsFactors = F)
library(jsonlite)
?fromJSON()
demo_table2 <- fromJSON(txt='demo.json')
#15.2.4 how to select data
x <- c(3, 3, 2, 2, 5, 5, 8, 8, 9)
x[3]
demo_table[3,"Year"]
demo_table[3,3]
demo_table$"Vehicle_Class"
demo_table$"Vehicle_Class"[2]
filter_table <- demo_table2[demo_table2$price > 10000,]
?subset()
filter_table2 <- subset(demo_table2, price > 10000 & drive == "4wd" & "clean" %in% title_status) #filter by price and drivetrain
filter_table3 <- demo_table2[("clean" %in% demo_table2$title_status) & (demo_table2$price > 10000) & (demo_table2$drive == "4wd"),]
?sample()
sample(c("cow", "deer", "pig", "chicken", "duck", "sheep", "dog"), 4)
num_rows <- 1:nrow(demo_table)
sample_rows <- sample(num_rows, 3)
demo_table[sample(1:nrow(demo_table), 3),]
demo_table[sample(1:nrow(demo_table), 3),]
#15.2.5 Transform, Group, and Reshape Data Using the Tidyverse Package
library(tidyverse)
?mutate()
demo_table <- demo_table %>% mutate(Mileage_per_Year=Total_Miles/(2020-Year),IsActive=TRUE) #add columns to original data frame
summarize_demo <- demo_table2 %>% group_by(condition) %>% summarize(Mean_Mileage=mean(odometer), .groups = 'keep') #create summary table
summarize_demo <- demo_table2 %>% group_by(condition) %>% summarize(Mean_Mileage=mean(odometer),Maximum_Price=max(price),Num_Vehicles=n(), .groups = 'keep') #create summary table with multiple columns
?gather()
demo_table3 <- read.csv('demo2.csv',check.names = F,stringsAsFactors = F)
long_table <- gather(demo_table3,key="Metric",value="Score",buying_price:popularity)
long_table <- demo_table3 %>% gather(key="Metric",value="Score",buying_price:popularity)
?spread()
wide_table <- long_table %>% spread(key="Metric",value="Score")
# table <- table[,order(colnames(table))] ###sorting
#15.3.1 Introduction to ggplot2
?ggplot()
#15.3.2 Build a Bar Plot in ggplot2
head(mpg)
plt <- ggplot(mpg,aes(x=class)) #import dataset into ggplot2
plt + geom_bar() #plot a bar plot
?geom_bar()
mpg_summary <- mpg %>% group_by(manufacturer) %>% summarize(Vehicle_Count=n(), .groups = 'keep') #create summary table
plt <- ggplot(mpg_summary,aes(x=manufacturer,y=Vehicle_Count)) #import dataset into ggplot2
plt + geom_col() #plot a bar plot
#15.3.3 Format Bar chart with functions
plt + geom_col() + xlab("Manufacturing Company") + ylab("Number of Vehicles in Dataset") #plot bar plot with labels
plt + geom_col() + xlab("Manufacturing Company") + ylab("Number of Vehicles in Dataset") + #plot a boxplot with labels
theme(axis.text.x=element_text(angle=45,hjust=1)) #rotate the x-axis label 45 degrees
#15.3.4 Build a Line Plot in ggplot2
mpg_summary <- subset(mpg,manufacturer=="toyota") %>% group_by(cyl) %>% summarize(Mean_Hwy=mean(hwy), .groups = 'keep') #create summary table
plt <- ggplot(mpg_summary,aes(x=cyl,y=Mean_Hwy)) #import dataset into ggplot2
plt <- ggplot(mpg,aes(x=displ,y=cty,color=class,shape=drv)) #import dataset into ggplot2
plt + geom_point() + labs(x="Engine Size (L)", y="City Fuel-Efficiency (MPG)", color="Vehicle Class",shape="Type of Drive") #add scatter plot with multiple aesthetics
plt + geom_line() + scale_x_discrete(limits=c(4,6,8)) + scale_y_continuous(breaks = c(15:30)) #add line plot with labels
plt <- ggplot(mpg,aes(x=displ,y=cty)) #import dataset into ggplot2
plt + geom_point() + xlab("Engine Size (L)") + ylab("City Fuel-Efficiency (MPG)") #add scatter plot with labels
plt <- ggplot(mpg,aes(x=displ,y=cty,color=class)) #import dataset into ggplot2
plt + geom_point() + labs(x="Engine Size (L)", y="City Fuel-Efficiency (MPG)", color="Vehicle Class") #add scatter plot with labels
# Skill Drill 15.3.4 size equals the y axis aka cty
plt <- ggplot(mpg,aes(x=displ,y=cty,color=class,size=cty))
plt + geom_point() + labs(x="Engine Size (L)", y="City Fuel-Efficiency (MPG)", color="Vehicle Class")
# 15.3.5 Create Advanced Boxplots in ggplot2
plt <- ggplot(mpg,aes(y=hwy)) #import dataset into ggplot2
plt + geom_boxplot() #add boxplot
plt <- ggplot(mpg,aes(x=manufacturer,y=hwy)) #import dataset into ggplot2
plt + geom_boxplot() + theme(axis.text.x=element_text(angle=45,hjust=1)) #add boxplot and rotate x-axis labels 45 degrees
#Skill Drill 15.3.5 Customize the boxplot to be more aesthetic by adding some color and using dotted instead of solid lines.
# Codes for linetypes: http://www.sthda.com/english/wiki/ggplot2-line-types-how-to-change-line-types-of-a-graph-in-r-software
plt <- ggplot(mpg,aes(x=manufacturer,y=hwy,color=manufacturer))
plt + geom_boxplot(outlier.colour="blue",outlier.shape=8,linetype=3) + theme(axis.text.x=element_text(angle=45,hjust=1))
#15.3.6 Create Heatmap Plots
mpg_summary <- mpg %>% group_by(class,year) %>% summarize(Mean_Hwy=mean(hwy), .groups = 'keep') #create summary table
plt <- ggplot(mpg_summary, aes(x=class,y=factor(year),fill=Mean_Hwy))
plt + geom_tile() + labs(x="Vehicle Class",y="Vehicle Year",fill="Mean Highway (MPG)") #create heatmap with labels
mpg_summary <- mpg %>% group_by(model,year) %>% summarize(Mean_Hwy=mean(hwy), .groups = 'keep') #create summary table
plt <- ggplot(mpg_summary, aes(x=model,y=factor(year),fill=Mean_Hwy)) #import dataset into ggplot2
plt + geom_tile() + labs(x="Model",y="Vehicle Year",fill="Mean Highway (MPG)") + theme(axis.text.x = element_text(angle=90,hjust=1,vjust=.5)) #rotate x-axis labels 90 degrees
#15.3.7 Add Layers to Plots
plt <- ggplot(mpg,aes(x=manufacturer,y=hwy)) #import dataset into ggplot2
plt + geom_boxplot() + #add boxplot
theme(axis.text.x=element_text(angle=45,hjust=1)) + #rotate x-axis labels 45 degrees
geom_point() #overlay scatter plot on top
mpg_summary <- mpg %>% group_by(class) %>% summarize(Mean_Engine=mean(displ), .groups = 'keep') #create summary table
plt <- ggplot(mpg_summary,aes(x=class,y=Mean_Engine)) #import dataset into ggplot2
plt + geom_point(size=4) + labs(x="Vehicle Class",y="Mean Engine Size") #add scatter plot
mpg_summary <- mpg %>% group_by(class) %>% summarize(Mean_Engine=mean(displ),SD_Engine=sd(displ), .groups = 'keep')
plt <- ggplot(mpg_summary,aes(x=class,y=Mean_Engine)) #import dataset into ggplot2
plt + geom_point(size=4) + labs(x="Vehicle Class",y="Mean Engine Size") + #add scatter plot with labels
geom_errorbar(aes(ymin=Mean_Engine-SD_Engine,ymax=Mean_Engine+SD_Engine)) #overlay with error bars
mpg_long <- mpg %>% gather(key="MPG_Type",value="Rating",c(cty,hwy)) #convert to long format
head(mpg_long)
plt <- ggplot(mpg_long,aes(x=manufacturer,y=Rating,color=MPG_Type)) #import dataset into ggplot2
plt + geom_boxplot() + theme(axis.text.x=element_text(angle=45,hjust=1)) #add boxplot with labels rotated 45 degrees
?facet_wrap()
plt <- ggplot(mpg_long,aes(x=manufacturer,y=Rating,color=MPG_Type)) #import dataset into ggplot2
plt + geom_boxplot() + facet_wrap(vars(MPG_Type)) + #create multiple boxplots, one for each MPG type
theme(axis.text.x=element_text(angle=45,hjust=1),legend.position = "none") + xlab("Manufacturer") #rotate x-axis labels
# Skill Drill 15.3.7 use diff variables to do facet wrap
plt <- ggplot(mpg_long,aes(x=manufacturer,y=year,color=MPG_Type)) #import dataset into ggplot2
plt + geom_boxplot() + facet_wrap(vars(MPG_Type)) + #create multiple boxplots, one for each MPG type
theme(axis.text.x=element_text(angle=45,hjust=1),legend.position = "none") + xlab("Manufacturer") #rotate x-axis labels
# Skill Drill 15.3.7 use diff variables to do facet wrap
plt <- ggplot(mpg_long,aes(x=class,y=Rating,color=MPG_Type)) #import dataset into ggplot2
plt + geom_boxplot() + facet_wrap(vars(MPG_Type)) + #create multiple boxplots, one for each MPG type
theme(axis.text.x=element_text(angle=45,hjust=1),legend.position = "none") + xlab("Manufacturer") #rotate x-axis labels
# Skill Drill 15.3.7 use diff variables to do facet wrap
plt <- ggplot(mpg_long,aes(x=year,y=Rating,color=MPG_Type)) #import dataset into ggplot2
plt + geom_boxplot() + facet_wrap(vars(class)) + #create multiple boxplots, one for each MPG type
theme(axis.text.x=element_text(angle=45,hjust=1),legend.position = "none") + xlab("Manufacturer") #rotate x-axis labels
#15.4.1 Identifying Statistical Test Types
# refer to cheat sheet
#15.4.2 Identify Different Data Types
# use head() with [] and $ operator to look up column data
#15.4.3 Dive Into Distributions
# norm dist 68-95-99.7 and the central limit thm - larger pops = more normed
#15.4.4 Test for Normality
ggplot(mtcars,aes(x=wt)) + geom_density() #visualize distribution using density plot
?shapiro.test()
shapiro.test(mtcars$wt)
# 15.4.5 Understand Skew
# Left or Right - make notes in presentations
#15.5.1 Practice Hypothesis Testing
# null and alt hyp testing plus 1-tail and 2-tail testing
#15.5.2 Assess Error in Hypothesis Testing
# Error type and 1. set [Ho, Ha, lev sig],pick test, calc p, compare p to lev sig, reject/fail to reject
#15.6.1 Sample Versus Population Dataset
?sample_n()
population_table <- read.csv('used_car_data.csv',check.names = F,stringsAsFactors = F) #import used car dataset
plt <- ggplot(population_table,aes(x=log10(Miles_Driven))) #import dataset into ggplot2
plt + geom_density() #visualize distribution using density plot
sample_table <- population_table %>% sample_n(50) #randomly sample 50 data points
plt <- ggplot(sample_table,aes(x=log10(Miles_Driven))) #import dataset into ggplot2
plt + geom_density() #visualize distribution using density plot
#15.6.2 Use the One-Sample t-Test
?t.test()
t.test(log10(sample_table$Miles_Driven),mu=mean(log10(population_table$Miles_Driven))) #compare sample versus population means
#15.6.3 Use the Two-Sample t-Test
sample_table <- population_table %>% sample_n(50) #generate 50 randomly sampled data points
sample_table2 <- population_table %>% sample_n(50) #generate another 50 randomly sampled data points
t.test(log10(sample_table$Miles_Driven),log10(sample_table2$Miles_Driven)) #compare means of two samples
#15.6.4 Use the Two-Sample t-Test to Compare Samples
mpg_data <- read.csv('mpg_modified.csv') #import dataset
mpg_1999 <- mpg_data %>% filter(year==1999) #select only data points where the year is 1999
mpg_2008 <- mpg_data %>% filter(year==2008) #select only data points where the year is 2008
t.test(mpg_1999$hwy,mpg_2008$hwy,paired = T) #compare the mean difference between two samples
#15.6.5 Use the ANOVA Test
?aov()
mtcars_filt <- mtcars[,c("hp","cyl")] #filter columns from mtcars dataset
mtcars_filt$cyl <- factor(mtcars_filt$cyl) #convert numeric column to factor
aov(hp ~ cyl,data=mtcars_filt) #compare means across multiple levels
summary(aov(hp ~ cyl,data=mtcars_filt))
#15.7.1 The Correlation Conundrum
?cor()
head(mtcars)
plt <- ggplot(mtcars,aes(x=hp,y=qsec)) #import dataset into ggplot2
plt + geom_point() #create scatter plot
cor(mtcars$hp,mtcars$qsec) #calculate correlation coefficient
used_cars <- read.csv('used_car_data.csv',stringsAsFactors = F) #read in dataset
head(used_cars)
plt <- ggplot(used_cars,aes(x=Miles_Driven,y=Selling_Price)) #import dataset into ggplot2
plt + geom_point() #create a scatter plot
cor(used_cars$Miles_Driven,used_cars$Selling_Price) #calculate correlation coefficient
used_matrix <- as.matrix(used_cars[,c("Selling_Price","Present_Price","Miles_Driven")]) #convert data frame into numeric matrix
cor(used_matrix)
#15.7.2 Return to Linear Regression
?lm()
lm(qsec ~ hp,mtcars) #create linear model
summary(lm(qsec~hp,mtcars)) #summarize linear model
model <- lm(qsec ~ hp,mtcars) #create linear model
yvals <- model$coefficients['hp']*mtcars$hp +
model$coefficients['(Intercept)'] #determine y-axis values from linear model
plt <- ggplot(mtcars,aes(x=hp,y=qsec)) #import dataset into ggplot2
plt + geom_point() + geom_line(aes(y=yvals), color = "red") #plot scatter and linear model
#15.7.3 Perform Multiple Linear Regression
lm(qsec ~ mpg + disp + drat + wt + hp,data=mtcars) #generate multiple linear regression model
summary(lm(qsec ~ mpg + disp + drat + wt + hp,data=mtcars)) #generate summary statistics
#15.8.1 Category Complexities
?chisq.test()
table(mpg$class,mpg$year) #generate contingency table
tbl <- table(mpg$class,mpg$year) #generate contingency table
chisq.test(tbl) #compare categorical distributions
#15.9.1 Practice A/B Testing
# A/B testing is a randomized controlled experiment that uses a control (unchanged)
# and experimental (changed) group to test potential changes using a success metric.
# A/B testing is used to test whether or not the distribution of the success metric
# increases in the experiment group instead of the control group; we would not want
# to make changes to the product that would cause a decrease in the success metric.
#15.10.1 Whose Analysis Is It Anyway?
# Continuous vs discrete vs dichotomous
# independent vs dependent
# categorical data set |
057b6d364b5db197b1bff818879facd96a498838 | 1855cfeeb88680ebcad978f73ecf11aac5e37bf3 | /vmstools/man/VMShf.rd | dbc498f3c43dd10603c73351b95627a9f9db8b83 | [] | no_license | nielshintzen/vmstools | ddb996dd4d5b3b2504216911284d3dd872f81baa | 8bb3666d3778eac5b8be1c9454573a85157f11c3 | refs/heads/master | 2023-06-22T08:48:54.584274 | 2023-06-09T09:10:05 | 2023-06-09T09:10:05 | 37,326,848 | 16 | 13 | null | 2023-06-01T20:23:18 | 2015-06-12T14:35:02 | R | UTF-8 | R | false | true | 966 | rd | VMShf.rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vmstools-package.R
\docType{data}
\name{VMShf}
\alias{VMShf}
\title{Small high frequency test VMS dataset CANNOT BE DISTRIBUTED WITHOUT PRIOR
PERMISSION -> SEE Author}
\format{
A data frame with 702 observations on the following 6 variables.
\describe{ \item{list("ship")}{ID of ship} \item{list("declat")}{Decimal
notation of longitude position} \item{list("declon")}{Decimal notation of
latitude postion} \item{list("speed")}{Speed at ping}
\item{list("heading")}{Heading at ping} \item{list("date")}{Date and time of
ping} }
}
\source{
niels.hintzen@wur.nl
}
\description{
A short VMS dataset with high frequency interval rate (every 6 minutes) used
to test spatial and VMS functions. Dataframe contains vessel ID, decimal
longitude and decimal latitude position, speed and heading at ping and date
of ping.
}
\examples{
data(VMShf)
str(VMShf)
}
\references{
niels.hintzen@wur.nl
}
|
2fa9f17088357d97b8608c6b8d787bbd0d992855 | e15406b6e03914009289b663ffa67c8e635a6170 | /tests/testthat/test-bed.R | 4d255adeb35b7805960a7522a81e34ae952037a6 | [
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"LicenseRef-scancode-other-copyleft",
"GPL-3.0-only"
] | permissive | theandyb/smaug | 8bce51094aa8bbb02244dff3f496907eecd9ec5e | 47959cfdb4fce389a5893080177cb84cd75c56bd | refs/heads/master | 2022-11-16T10:04:36.601358 | 2022-11-07T20:50:22 | 2022-11-07T20:50:22 | 143,783,962 | 0 | 0 | MIT | 2018-08-06T21:13:54 | 2018-08-06T21:13:54 | null | UTF-8 | R | false | false | 687 | r | test-bed.R | context("test-bed")
test_that("bed_granges works", {
testDat <- "chr1 0 249250621
chr2 0 243199373
chr3 0 198022430
chr4 0 191154276
chr5 0 180915260
chr6 0 171115067
chr7 0 159138663
chr8 0 146364022
chr9 0 141213431
chr10 0 135534747
chr11 0 135006516
chr12 0 133851895
chr13 0 115169878
chr14 0 107349540
chr15 0 102531392
chr16 0 90354753
chr17 0 81195210
chr18 0 78077248
chr19 0 59128983
chr20 0 63025520
chr21 0 48129895
chr22 0 51304566"
con <- textConnection(testDat)
test <- bed_to_granges(con)
close(con)
expect_equal(length(test), 22)
expect_is(test, "GRanges")
})
|
cd4af58a01cc1f2362c58b7c6fc883120fe973c1 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/poisson.glm.mix/examples/init2.k.Rd.R | d06c89943dd853085bbfb740d06d6e4a74a6ee9e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,338 | r | init2.k.Rd.R | library(poisson.glm.mix)
### Name: init2.k
### Title: Initialization 2 for the beta_k parameterization (m=3).
### Aliases: init2.k
### ** Examples
# this is to be used as an example with the simulated data
data("simulated_data_15_components_bjk")
x <- sim.data[,1]
x <- array(x,dim=c(length(x),1))
y <- sim.data[,-1]
# At first a 2 component mixture is fitted using parameterization $m=1$.
run.previous<-bkmodel(reference=x, response=y, L=c(3,2,1), m=100, K=2,
nr=-10*log(10), maxnr=5, m2=3, t2=3, prev.z,
prev.clust, start.type=1, prev.alpha, prev.beta)
## Then the estimated clusters and parameters are used to initialize a
## 3 component mixture using Initialization 2. The number of different
## runs is set to tsplit=3 with each one of them using msplit = 5
## em iterations.
q <- 3
tau <- 1
nc <- 3
z <- run.previous$z
ml <- length(run.previous$psim)/(nc - 1)
alpha <- array(run.previous$alpha[ml, , ], dim = c(q, nc - 1))
beta <- array(run.previous$beta[ml, , ], dim = c(nc - 1, tau))
clust <- run.previous$clust
run<-init2.k(reference=x, response=y, L=c(3,2,1), K=nc, t2=3, m2=5, previousz=z,
previousclust=clust, previous.alpha=alpha, previous.beta=beta,mnr = 5)
summary(run)
# note: useR should specify larger values for m2, t2 for a complete analysis.
|
dfd322606a97b3c4aff935f8214b03da109e3f07 | 96d59e64e49050c1050212d3ddb312be95698387 | /updating_code/inclass_code/MLR1.R | 4512fa489116346ef6170938fc180e6afa91df44 | [] | no_license | lassiterdc/sys-4021-6021.github.io | d90e533b8c7b888417f8844303500ba3c049bf5f | 4755c848b9325af6280f0807489185fe3b0a555a | refs/heads/main | 2023-07-25T01:35:21.018597 | 2021-09-05T01:55:44 | 2021-09-05T01:55:44 | 403,077,696 | 0 | 0 | null | 2021-09-04T14:29:55 | 2021-09-04T14:29:54 | null | UTF-8 | R | false | false | 7,009 | r | MLR1.R | #
#
# Multiple Linear Regression 1
#
#******************************************************
##load data
setwd(sourcedir)
source("AccidentInput.R")
#load libraries
library(ggplot2)
library(GGally)
library(devtools) # for ggbiplot
library(ggbiplot)
acts <- file.inputl(traindir)
totacts <- combine.data(acts)
##Build a data frame with only extreme accidents for ACCDMG
dmgbox <-boxplot(totacts$ACCDMG)
ggplot(as.data.frame(totacts$ACCDMG), aes(x=totacts$ACCDMG)) +
geom_boxplot(col= "steelblue") + theme(plot.title = element_text(hjust = 0.5)) + coord_flip()
xdmg <- totacts[totacts$ACCDMG > dmgbox$stats[5],]
## Remove duplicates from xdmg and call new data frame xdmgnd
xdmgnd <- xdmg[!(duplicated(xdmg[, c("INCDTNO", "YEAR", "MONTH", "DAY", "TIMEHR", "TIMEMIN")])),]
# Setup categorical variables
xdmgnd$Cause <- rep(NA, nrow(xdmgnd))
xdmgnd$Cause[which(substr(xdmgnd$CAUSE, 1, 1) == "M")] <- "M"
xdmgnd$Cause[which(substr(xdmgnd$CAUSE, 1, 1) == "T")] <- "T"
xdmgnd$Cause[which(substr(xdmgnd$CAUSE, 1, 1) == "S")] <- "S"
xdmgnd$Cause[which(substr(xdmgnd$CAUSE, 1, 1) == "H")] <- "H"
xdmgnd$Cause[which(substr(xdmgnd$CAUSE, 1, 1) == "E")] <- "E"
# This new variable, Cause, has to be a factor
xdmgnd$Cause <- factor(xdmgnd$Cause)
xdmgnd$Type <- factor(xdmgnd$TYPE, labels = c("Derailment", "HeadOn", "Rearend", "Side", "Raking", "BrokenTrain", "Hwy-Rail", "GradeX", "Obstruction", "Explosive", "Fire","Other","SeeNarrative"))
#***********************************************************
# Possible predictors of damage
#***********************************************************
# SPM
#Scatter plot matricies for quantitative predictors and single metric.
source("SPM_Panel.R")
uva.pairs(xdmgnd[,c("ACCDMG", "TRNSPD", "CARS", "TIMEHR", "TEMP")])
ggpairs(xdmgnd[,c("ACCDMG", "TRNSPD", "CARS", "TIMEHR", "TEMP")])
# PCA
# Principal components with the correlation matrix for extreme data with 1 metric and quantitative predictors.
source("PCAplots.R")
pred.pca <- princomp(xdmgnd[,c("ACCDMG", "TRNSPD", "CARS", "TIMEHR", "TEMP")], cor = T )
biplot(pred.pca)
## Which predictors are most correlated with accident damage?
###############################
# Categorical plots
# heatmap
source("http://www.phaget4.org/R/myImagePlot.R")
myImagePlot(table(xdmgnd$Cause, xdmgnd$Type), title = "No. of Accidents by Cause and Type of Accident")
## Which accident causes and types have the highest numbers of extreme accidents?
# Type & TRNSPD
library(lattice)
xyplot(log(ACCDMG)~TRNSPD | Type, data = xdmgnd, type = c("p", "r"))
qplot(TRNSPD, log(ACCDMG), data = xdmgnd) + geom_point() +
geom_smooth(method = "lm", se = FALSE) + facet_wrap(~ Type, scales = "free")
# Cause & TRNSPD
xyplot(log(ACCDMG)~TRNSPD | Cause, data = xdmgnd, type = c("p", "r"))
qplot(TRNSPD, log(ACCDMG), data = xdmgnd) + geom_point() +
geom_smooth(method = "lm", se = FALSE) + facet_wrap(~ Cause, scales = "free")
##What is notable about the relationship between train speed and accident
##damages for different accident causes and types?
#More complex xyplots
# Cause X Type and TRNSPD
xyplot(log(ACCDMG)~TRNSPD | Cause * Type, data = xdmgnd, type = c("p", "r"))
qplot(log(ACCDMG), TRNSPD, data = xdmgnd) + geom_point() +
geom_smooth(method = "lm", se = FALSE) + facet_wrap(~ Cause * Type, scales = "free")
# Create the Derail variable &
# then look at interactions with Cause
xdmgnd$Derail <- (xdmgnd$Type == "Derailment")
# plot xy with interactions of Derail and Cause
xyplot(log(ACCDMG)~TRNSPD | Cause * Derail, data = xdmgnd, type = c("p", "r"))
qplot(log(ACCDMG), TRNSPD, data = xdmgnd) + geom_point() +
geom_smooth(method = "lm", se = FALSE) + facet_wrap(~ Cause * Derail, scales = "free")
# Create a Freight variable
xdmgnd$Freight <- (xdmgnd$TYPEQ == 1)
# Interaction plots
# Plot interaction between Derailment and Cause
interaction.plot(xdmgnd$Derail, xdmgnd$Cause,log(xdmgnd$ACCDMG))
ggplot() +
aes(x = xdmgnd$Derail, y = log(xdmgnd$ACCDMG), group = xdmgnd$Cause, color = xdmgnd$Cause) +
stat_summary(fun = mean, geom = "point") +
stat_summary(fun = mean, geom = "line")
# Interaction plots with quantitative variables
Speed <- cut(xdmgnd$TRNSPD, c(min(xdmgnd$TRNSPD),15,max(xdmgnd$TRNSPD)), include.lowest = T, labels = c("low speed", "high speed"))
Cars <- cut(xdmgnd$CARS, c(min(xdmgnd$CARS),1,max(xdmgnd$CARS)), include.lowest = T, labels = c("low hzd", "high hzd"))
Tons <- cut(xdmgnd$TONS, c(min(xdmgnd$TONS),median(xdmgnd$TONS),max(xdmgnd$TONS)), include.lowest = T, labels = c("low tons", "high tons"))
# Plot interaction between Speed and Cars
interaction.plot(Speed, Cars, log(xdmgnd$ACCDMG))
# First option with seeing points
qplot(x = TRNSPD, y = log(ACCDMG), data = xdmgnd, colour = Cars) +
geom_point(colour = "gray")+
geom_smooth(method = "lm")
# Second option without points
ggplot() +
aes(x = Speed, y = log(xdmgnd$ACCDMG), group = Cars, color = Cars) +
stat_summary(fun.y = mean, geom = "point") +
stat_summary(fun.y = mean, geom = "line")
# Plot interaction between Freight and Speed
interaction.plot(xdmgnd$Freight, Speed, log(xdmgnd$ACCDMG))
ggplot() +
aes(x = xdmgnd$Freight, y = log(xdmgnd$ACCDMG), group = Speed, color = Speed) +
stat_summary(fun.y = mean, geom = "point") +
stat_summary(fun.y = mean, geom = "line")
# Plot interaction between Derailments and Speed
interaction.plot(xdmgnd$Derail, Speed, log(xdmgnd$ACCDMG))
ggplot() +
aes(x = xdmgnd$Derail, y = log(xdmgnd$ACCDMG), group = Speed, color = Speed) +
stat_summary(fun.y = mean, geom = "point") +
stat_summary(fun.y = mean, geom = "line")
# Plot interaction between Tons and Speed
interaction.plot(Speed, Tons, log(xdmgnd$ACCDMG))
ggplot() +
aes(x = Speed, y = log(xdmgnd$ACCDMG), group = Tons, color = Tons) +
stat_summary(fun.y = mean, geom = "point") +
stat_summary(fun.y = mean, geom = "line")
## How might these results inform your hypotheses?
## Use the multivariate visualizations as evidence to form at least 1 hypothesis.
####################################
# Linear Models
####################################
# Build linear regression models with different combinations of quantitative predictors to provide evidence for your hypothesis
# Single predictor
xdmgnd.lm1<-lm(ACCDMG~TEMP,data=xdmgnd)
summary(xdmgnd.lm1)
names(xdmgnd.lm1)
coef(xdmgnd.lm1)
sum(xdmgnd.lm1$res^2)
# Two predictors
xdmgnd.lm2<-lm(ACCDMG~TEMP+TRNSPD,data=xdmgnd)
summary(xdmgnd.lm2)
names(xdmgnd.lm2)
coef(xdmgnd.lm2)
#Three predictors
xdmgnd.lm3<-lm(ACCDMG~TEMP+TRNSPD+CARS,data=xdmgnd)
summary(xdmgnd.lm3)
coef(xdmgnd.lm3)
# Interpret your model coefficients. Do they make sense?
# Interpret your developed models using the model utility test and t-test.
# Write out the null and alternative hypothesis for each of the tests.
# Do you reject or fail to reject H0 for each test?
####################################
# Now repeat for TOTKLD + TOTINJ
####################################
|
1b8bd566616cc30d7641bf6e194828bcb9eca4b8 | e17c6aec7115cb53939b784a87f5909be5fff032 | /lmer testing.R | 01f986d32af1386b353f74952c0988dd031ffa59 | [] | no_license | fawnshao/rexamples | 801ca734159a46ac67ed03b578001529563d3142 | 8e61d423237da5cb409f032dd896903fe8ac68c4 | refs/heads/master | 2021-01-18T05:46:15.505501 | 2013-06-11T01:26:21 | 2013-06-11T01:26:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 698 | r | lmer testing.R | library(lme4)
(model1 <- lmer(Reaction ~ Days + (Days|Subject), sleepstudy))
vc <- VarCorr( model1 )
varcomps <- c(unlist( lapply(vc, diag) ), # random intercept variances
attr(vc,"sc")^2) # residual variance
library(statmod)
library(MASS)
data(petrol)
out <- mixedModel2(Y~SG+VP+V10+EP, random=No, data=petrol)
cbind(varcomp=out$varcomp,se=out$se.varcomp)
library(nlme)
Petrol <- petrol
Petrol[, 2:5] <- scale(as.matrix(Petrol[, 2:5]), scale = FALSE)
pet3.lme <- lme(Y ~ SG + VP + V10 + EP,
random = ~ 1 | No, data = Petrol)
pet3.lme <- update(pet3.lme, method = "ML")
pet4.lme <- update(pet3.lme, fixed = Y ~ V10 + EP)
anova(pet4.lme, pet3.lme) |
26885a38e8d17c3b24a70663ffd1e9ee37d943b4 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/LncFinder/examples/svm_cv.Rd.R | f272d6a828108f1560371fd4af4cb1a0e3d8a86e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 425 | r | svm_cv.Rd.R | library(LncFinder)
### Name: svm_cv
### Title: _k_-fold Cross Validation for SVM
### Aliases: svm_cv
### ** Examples
## Not run:
##D data(demo_dataset)
##D my_dataset <- demo_dataset
##D
##D cv_res <- svm_cv(my_dataset, folds.num = 4, seed = 1,
##D parallel.core = 2, cost = 3, kernel = "radial", gamma = 0.5)
##D
##D ### Users can set return.model = TRUE to return the best model.
## End(Not run)
|
9920be98adaa1ecca077872cd0fa97faef7fc981 | 98e6299843c912e30cd82c48b8c7624e76bd0dee | /man/classify_occ.Rd | 4436d94670ccb6741ad20ea4756facb387c1e103 | [] | no_license | GabrielNakamura/naturaList-1 | 3365c819b7d353efa3a8a664e47f701f3d777b95 | 068372422c475bada0056b3482fd824eca09d2b2 | refs/heads/master | 2020-12-23T19:30:33.179233 | 2020-01-30T14:42:35 | 2020-01-30T14:42:35 | 237,250,055 | 1 | 0 | null | 2020-01-30T16:04:25 | 2020-01-30T16:04:24 | null | UTF-8 | R | false | true | 2,547 | rd | classify_occ.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classify_occ.R
\name{classify_occ}
\alias{classify_occ}
\title{Classify occurrence records in levels of confidence in species determination}
\usage{
classify_occ(occ, spec = NULL, na.rm.coords = TRUE,
crit.levels = c("det_by_spec", "taxonomist", "image", "sci_colection",
"field_obs", "no_criteria_met"),
institution.source = "institutionCode",
collection.code = "collectionCode", catalog.number = "catalogNumber",
year.event = "year", date.identified = "dateIdentified",
scientific.name = "species", determined.by = "identifiedBy",
longitude = "decimalLongitude", latitude = "decimalLatitude",
basis.of.rec = "basisOfRecord", media.type = "mediaType",
occ.id = "occurrenceID")
}
\arguments{
\item{occ}{dataframe with occurrence records information.}
\item{spec}{dataframe with specialists' names}
\item{na.rm.coords}{logical. If TRUE, remove occurrences with NA in latitude or longitude}
\item{crit.levels}{character. Vector with levels of confidence in decreasing order.
The criteria allowed are \code{det_by_spec}, \code{taxonomist},
\code{image}, \code{sci_colection}, \code{field_obs}, \code{no_criteria_met}. See datails.}
\item{institution.source}{column of \code{occ} with the name of the institution that provided the data}
\item{collection.code}{column of \code{occ} with the codes for institution names}
\item{catalog.number}{column of \code{occ} with catalog number}
\item{year.event}{column with of \code{occ} the year of the collection event}
\item{date.identified}{column of \code{occ} with the date of species determination}
\item{scientific.name}{column of \code{occ} with the species names}
\item{determined.by}{column of \code{occ} with the name of who determined the species}
\item{longitude}{column with of \code{occ} longitude in decimal degrees}
\item{latitude}{column with of \code{occ} latitude in decimal degrees}
\item{basis.of.rec}{column of \code{occ} with the recording types, as in GBIF. See details.}
\item{media.type}{column of \code{occ} with the media type of recording. See details.}
\item{occ.id}{column of \code{occ} with link or code for the occurence record.}
}
\description{
Classify occurrence records in levels of confidence in species determination based on two data frames
}
\details{
basis.of.rec is a character vector with one of the following types:.... as in GBIF data 'basisOfRecord'
media.type uses the same pattern as GBIF data column mediaType.....
}
\author{
Arthur V. Rodrigues
}
|
e3d45b8234aeb80eba80b3bf159e0ec6629ab96c | ce24f86456f017bc1c0df7eb7bfb017dbce7a59e | /Channel Attribution Modeling/Channel_attribution.R | 6776dab4599fab7453df7c40524d27ed6284c321 | [] | no_license | nandeda89/Machine_Learning_Concepts | 47f064bc181811619e17cd4463c88edf33e6fc67 | ec979b698517b16d864a12ed0053c293eb3f6e72 | refs/heads/master | 2020-03-21T12:50:08.685806 | 2019-01-23T02:54:37 | 2019-01-23T02:54:37 | 138,574,271 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,925 | r | Channel_attribution.R | #Install the libraries
install.packages("ChannelAttribution")
install.packages("ggplot2")
install.packages("reshape")
install.packages("dplyr")
install.packages("plyr")
install.packages("reshape2")
install.packages("markovchain")
install.packages("plotly")
#Load the libraries
library("ChannelAttribution")
library("ggplot2")
library("reshape")
library("dplyr")
library("plyr")
library("reshape2")
library("markovchain")
library("plotly")
#Read the data into R
channel = read.csv("\\Channel_attribution.csv", header = T)
head(channel)
for(row in 1:nrow(channel))
{
if(21 %in% channel[row,]){channel$convert[row] = 1}
}
column = colnames(channel)
channel$path = do.call(paste, c(channel[column], sep = " > "))
head(channel$path)
for(row in 1:nrow(channel))
{
channel$path[row] = strsplit(channel$path[row], " > 21")[[1]][1]
}
channel_fin = channel[,c(23,22)]
channel_fin = ddply(channel_fin,~path,summarise, conversion= sum(convert))
head(channel_fin)
Data = channel_fin
head(Data)
H <- heuristic_models(Data, 'path', 'conversion', var_value='conversion')
H
M <- markov_model(Data, 'path', 'conversion', var_value='conversion', order = 1)
M
# Merges the two data frames on the "channel_name" column.
R <- merge(H, M, by='channel_name')
# Select only relevant columns
R1 <- R[, (colnames(R) %in% c('channel_name', 'first_touch_conversions', 'last_touch_conversions', 'linear_touch_conversions', 'total_conversion'))]
# Transforms the dataset into a data frame that ggplot2 can use to plot the outcomes
R1 <- melt(R1, id='channel_name')
# Plot the total conversions
ggplot(R1, aes(channel_name, value, fill = variable)) +
geom_bar(stat='identity', position='dodge') +
ggtitle('TOTAL CONVERSIONS') +
theme(axis.title.x = element_text(vjust = -2)) +
theme(axis.title.y = element_text(vjust = +2)) +
theme(title = element_text(size = 16)) +
theme(plot.title=element_text(size = 20)) +
ylab("")
|
fdc24911de7b15a2cd4e104e2fcb94694405942e | 9a9224f60b124a5c117a3f51bfabf45c9d0a198e | /man/tidymodels_update.Rd | 27934540b82faac08acdfb16af73277e738bc843 | [
"MIT"
] | permissive | tidymodels/tidymodels | 536c37a24fbd9a983b9685108fe64cdd7718a72f | 4f77b2561e3911831d863d5a3e577bbcfdb07149 | refs/heads/main | 2023-08-31T11:41:41.634575 | 2023-08-24T21:17:54 | 2023-08-24T21:17:54 | 139,892,826 | 712 | 74 | NOASSERTION | 2023-08-24T21:17:29 | 2018-07-05T19:38:29 | R | UTF-8 | R | false | true | 877 | rd | tidymodels_update.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/update.R
\name{tidymodels_update}
\alias{tidymodels_update}
\title{Update tidymodels packages}
\usage{
tidymodels_update(pkg = "tidymodels", recursive = FALSE, ...)
}
\arguments{
\item{pkg}{A character string for the model being updated.}
\item{recursive}{If \code{TRUE}, will also check all dependencies of
tidymodels packages.}
\item{...}{Extra arguments to pass to \code{\link[utils:install.packages]{utils::install.packages()}}}
}
\value{
Nothing is returned but a message is printed to the
console about which packages (if any) should be installed along
with code to do so.
}
\description{
This will check to see if all tidymodels packages (and optionally, their
dependencies) are up-to-date, and will install after an interactive
confirmation.
}
\examples{
\dontrun{
tidymodels_update()
}
}
|
0db77f9de9065edff7e23a29fcd4801b497f411f | 477fae4aa62646c585ad1c8247120560a87500cc | /analysis/spline_additon_analysis.R | 18ca7786cc26c6d2f9918c6577785c0f349e13da | [] | no_license | jmargrove/DensityDependentProcessesInTropicalForests | 382335623e7bfdfa3a9aec2267de0b485f2ea6ac | f00ec5368f51529529f89f2c6dd303f1f7dce884 | refs/heads/master | 2022-01-14T19:45:55.496034 | 2019-04-05T14:44:30 | 2019-04-05T14:44:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,582 | r | spline_additon_analysis.R | ################################################################################
#' @author James Margrove
#' @title Fitting the spline models
# Clear workspace
rm(list=ls())
# Import packages
require(splines)
# Import data
data <- read.table("./data/data.txt", header = TRUE)
str(data)
# Calculate the abundance
n.indv <- with(subset(data, DBH > 50), tapply(sp,sp,length))
n.dt <- with(data, tapply(sp,sp,length))
n.indv[which(is.na(n.indv))] <- 0
data$ABN <- rep(n.indv, n.dt)
data <- data[!is.na(data$seedlings),] # All data with seedlings
data <- data[which(!is.na(data$GI)),] # There are 4 tree species where we did not know where there NFC was no NFC in the plot - these were dropped
data[which(is.na(data$sBA)),"sBA"] <- 0 # There is one species that has no individuals above the 50 cm limit Vmic
data <- subset(data, DBH >= 30)
# Log the flowering intensities
data$LGI <- log(data$GI_II)
data$LHF <- log(data$HF_II)
# Import model
model3 <- glm.nb(formula = seedlings ~ ABN * ns(LGI, df = 2) + 1, data = data)
summary(model3)
load("./models/bestOverallModel.R")
summary(model2)
model4 <- update(model2, . ~ . - I(LGI^2):LHF - log(DBH) - ABN:LHF - LHF)
diff(AIC(model4, model3)[,2])
summary(model4)
# Predicting the models results
pred1 <- expand.grid(ABN = round(quantile(data$ABN,
c(0.1,0.25,0.5,0.75,0.9)),1),
DBH = mean(data$DBH, na.rm = TRUE),
LHF = round(quantile(data$LHF,
c(0.1,0.25,0.5,0.75,0.9)),1)[3],
LGI = seq(min(data$LGI),max(data$LGI),length=50))
pred1$seedlings <- predict(model3, pred1)
dfr <- model2$df.residual
pred1$se <- predict(model3, pred1, se.fit = T)$se.fit * qt(0.95, dfr)
pred1$Abundance <- factor(pred1$ABN)
head(pred1)
#dev.off() # problems with plotting - use dev.off: close a ploting device
cols <- c('#a1d99b','#74c476','#41ab5d','#238b45','#005a32')
# Plotting the results
p2 <- ggplot(pred1, aes(x=LGI, exp(seedlings), fill = Abundance)) + geom_line() +
geom_line() + geom_ribbon(aes(ymin=exp(seedlings-se),ymax=exp(seedlings+se)), alpha = 0.5) +
geom_line() + theme_bw() + xlab("Conspecific flowering intensity") +
ylab(bquote('Seedlings 16'~ m^-2)) + scale_alpha(guide = 'none') +
scale_fill_manual(values = cols) +
theme(legend.position = "top") +
scale_x_continuous(breaks=pretty_breaks(6))
p2
ggsave(p2, file = "./graphs/splines_LCFI.png",
width = 4, height = 4)
|
36badc07ed99fb5599aa5ef832a0491d9cb44504 | 29585dff702209dd446c0ab52ceea046c58e384e | /localdepth/R/localdepth2Dsimplicial.R | 46669cad36dc99eb22b7d543fbe13c052f7a4b03 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,110 | r | localdepth2Dsimplicial.R | #############################################################
#
# localdepth2Dsimplicial function
# Author: Claudio Agostinelli and Mario Romanazzi
# E-mail: claudio@unive.it
# Date: June, 15, 2008
# Version: 0.5
#
# Copyright (C) 2008 Claudio Agostinelli and Mario Romanazzi
#
#############################################################
localdepth2Dsimplicial <- function(x, y, tau, use) {
x <- as.matrix(x)
y <- as.matrix(y)
nx <- nrow(x)
ny <- nrow(y)
result <- list()
## funzione per calcolare la lunghezza di un vettore e l'area di un triangolo#
## norm <- function(x) sqrt(t(x)%*%x)
## norma <- function(x) sqrt(diag(x%*%t(x)))
## area <- function(x) sqrt(sum(x)*(sum(x)/2-x[1])*(sum(x)/2-x[2])*(sum(x)/2-x[3])/2)
## numero di terne
nt <- choose(nx, 3)
depth <- rep(0,ny)
localdepth <- rep(0,ny)
if (use=='diameter') {
res <- .C("ld2Ddiamsimp",
x1 = as.double(x[,1]),
x2 = as.double(x[,2]),
y1 = as.double(y[,1]),
y2 = as.double(y[,2]),
tau =as.double(tau),
nx = as.integer(nx),
ny = as.integer(ny),
depth = as.double(depth),
localdepth = as.double(localdepth),
DUP = FALSE, NAOK = FALSE, PACKAGE = "localdepth")
result$localdepth <- res$localdepth
result$depth <- res$depth
result$num <- nt
return(result)
} else {
res <- .C("ld2Dareasimp",
x1 = as.double(x[,1]),
x2 = as.double(x[,2]),
y1 = as.double(y[,1]),
y2 = as.double(y[,2]),
tau =as.double(tau),
nx = as.integer(nx),
ny = as.integer(ny),
depth = as.double(depth),
localdepth = as.double(localdepth),
DUP = FALSE, NAOK = FALSE, PACKAGE = "localdepth")
# aree <- posizione <- rep(0, nt)
# for (i1 in 1:(nx-2)) {
# for (i2 in (i1+1):(nx-1)) {
# for (i3 in (i2+1):nx) {
# ind1 <- ind1+1
# x1 <- x[i1,]
# x2 <- x[i2,]
# x3 <- x[i3,]
# n1 <- norm(x1-x2)
# n2 <- norm(x1-x3)
# n3 <- norm(x2-x3)
# aree[ind1] <- area(c(n1,n2,n3))
# }
# }
# }
# x <- t(x)
# for (ind2 in 1:ny) {
# y1 <- y[ind2,]
# u <- (x-y1)/norma(x-y1)
# alpha <- atan2(u[2,],u[1,])%%(2*pi)
# ind1 <- 0
# for (i1 in 1:(nx-2)) {
# for (i2 in (i1+1):(nx-1)) {
# for (i3 in (i2+1):nx) {
# ind1 <- ind1 + 1
# a <- sort(alpha[c(i1,i2,i3)])
# ## Ho tolto a[1]==0 ||
# if ((a[3]-a[1])%%(2*pi) >= pi & (a[1]-a[2])%%(2*pi) >= pi & (a[2]-a[3])%%(2*pi) >= pi) {
# depth[ind2] <- depth[ind2]+1
# if (use=='areas' && aree[ind1] <= tau)
# localdepth[ind2] <- localdepth[ind2]+1
# if (use=='spherical' && max(norma(x[,c(i1,i2,i3)]-y1))<= tau)
# localdepth[ind2] <- localdepth[ind2]+1
# }
# }
# }
# }
# }
result$localdepth <- res$localdepth
result$depth <- res$depth
# result$areas <- aree
result$num <- nt
return(result)
}
}
|
e2dbeac7402845b145d6397ba98d0a5bd8b844ee | 7e5e5139f817c4f4729c019b9270eb95978feb39 | /Cleaning data with R/cleaning Data in R/Chapter 3-Preparing data for analysis/3.R | 145ebf7aa103df2bd0156ac0f45309317bffd5be | [] | no_license | Pranav-Polavarapu/Datacamp-Data-Scientist-with-R-Track- | a45594a8a9078076fe90076f675ec509ae694761 | a50740cb3545c3d03f19fc79930cb895b33af7c4 | refs/heads/main | 2023-05-08T19:45:46.830676 | 2021-05-31T03:30:08 | 2021-05-31T03:30:08 | 366,929,815 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,932 | r | 3.R | # Working with dates
# Dates can be a challenge to work with in any programming language, but thanks to the lubridate package, working with dates in R isn't so bad. Since this course is about cleaning data, we only cover the most basic functions from lubridate to help us standardize the format of dates and times in our data.
#
# As you saw in the video, these functions combine the letters y, m, d, h, m, s, which stand for year, month, day, hour, minute, and second, respectively. The order of the letters in the function should match the order of the date/time you are attempting to read in, although not all combinations are valid. Notice that the functions are "smart" in that they are capable of parsing multiple formats.
#
# Instructions
# 0 XP
# Instructions
# 0 XP
# We have loaded a dataset called students2 into your workspace. students2 is similar to students, except now instead of an age for each student, we have a (hypothetical) date of birth in the dob column. There's another new column called nurse_visit, which gives a timestamp for each student's most recent visit to the school nurse.
#
# Preview students2 with str(). Notice that dob and nurse_visit are both stored as character.
# Load the lubridate package.
# Print "17 Sep 2015" as a date.
# Print "July 15, 2012 12:56" as a date and time (note there are hours and minutes, but no seconds!).
# Coerce dob to a date (with no time).
# Coerce nurse_visit to a date and time.
# Use str() to see the changes to students2.
# Preview students2 with str()
str(students2)
# Load the lubridate package
library(lubridate)
# Parse as date
dmy("17 Sep 2015")
# Parse as date and time (with no seconds!)
mdy_hm("July 15, 2012 12:56")
# Coerce dob to a date (with no time)
students2$dob <- ymd(students2$dob)
# Coerce nurse_visit to a date and time
students2$nurse_visit <- ymd_hms(students2$nurse_visit)
# Look at students2 once more with str()
str(students2) |
0f61a11174106cd672cc88ffc3dfe7a3d62ce96e | b4b5a6c61e6500656f942cbc6bf1192d51524ec7 | /simulator_functions.R | 657eb5a1762ee578113465074bf3697e747c6d2c | [] | no_license | E-Durland/Genotype_simulator | c5912bbc3e7749cc2889a87b95886a2da19ef20a | 56070ce4356755ee544189a5940b02b49b86e2e2 | refs/heads/main | 2023-07-16T14:22:52.375490 | 2021-09-02T07:41:56 | 2021-09-02T07:41:56 | 314,291,370 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,346 | r | simulator_functions.R | #first, the function to make random parents from an MAF pool
parent_maker<-function(male_n, female_n, MAF){
n_par <- male_n + female_n
t_alleles <- n_par *2
p_MAF <- ceiling(MAF/100 * t_alleles)
p_genpool <- c( rep( "A" , t_alleles - p_MAF) , rep( "B" , p_MAF))
gen_indx <- c( 1:t_alleles )
male<-c()
female<-c()
for ( p in 1:n_par ){
temp_par <- sample( gen_indx, 2, replace = FALSE)
gen_indx<-gen_indx[!(gen_indx %in% temp_par) ] #remove index numbers by identity, not position
#assign the genotype, this time with "_" between alleles to keep them together:
temp_par <- paste(p_genpool[ temp_par[1] ], p_genpool[ temp_par[2] ], sep = "_")
if(p <= male_n){
male <- append(male, temp_par)
}else{
female <- append(female, temp_par)
}
}
males <<- male
females <<- female
}
#for a parental genotype pool with 2 males, 10 females and a MAF = 30% for this locus:
parent_maker(2,10,30)
#now a function to make crosses from this parental pool:
cross_sim<-function(dads,moms){
genFreq<-c()
crosses<-c()
for(dad in 1:length(dads)){
sire<-unlist(strsplit(dads[dad],split="_"))
for(mom in 1:length(moms)){
dam<-unlist(strsplit(moms[mom],split="_"))
c1<-paste(sire[1],dam[1],sep="_")
c2<-paste(sire[1],dam[2],sep="_")
c3<-paste(sire[2],dam[1],sep="_")
c4<-paste(sire[2],dam[2],sep="_")
#add crosses to the list:
crosses<-append(crosses,c1)
crosses<-append(crosses,c2)
crosses<-append(crosses,c3)
crosses<-append(crosses,c4)
#add all parental alleles to the list:
genFreq<-append(genFreq,sire[1])
genFreq<-append(genFreq,sire[2])
genFreq<-append(genFreq,dam[1])
genFreq<-append(genFreq,dam[2])
}
}
#get summary stats of the pool:
#realized MAF:
r_MAF<-length(genFreq[genFreq=="B"])/length(genFreq)
#each genotype proportion:
p_AA<-length(crosses[crosses=="A_A"])/length(crosses)
p_AB<-length(crosses[crosses=="A_B"|crosses=="B_A"])/length(crosses)
p_BB<-length(crosses[crosses=="B_B"])/length(crosses)
return(c("r_MAF"=round(r_MAF*100,2),"p_AA"=p_AA,"p_AB"=p_AB,"p_BB"=p_BB))
}
#to run a single cross:
sims<-cross_sim(males,females)
#to run multiple (30) simulations:
for(i in 1:30){
parent_maker(5,20,30)
if(i==1){
sims<-cross_sim(males,females)
}else{
sims<-rbind(sims,cross_sim(males,females))
}
}
#for multiple simulations(30) across a range of MAF values (1-50)
for(a in 1:50){
MAF<-a
for(i in 1:30){
parent_maker(5,20,a)
if(a==1){
sims<-cross_sim(males,females)
}else{
sims<-rbind(sims,cross_sim(males,females))
}
}
}
#To plot your simulations:
library(tidyr)
library(ggplot2)
m_sims<-pivot_longer(sims,
cols = p_AA:p_BB,
names_to ="gtype",
values_to = "p")%>%
as.data.frame()
ggplot(m_sims,aes(r_MAF,p*100))+
geom_point(aes(color=gtype),size=0.5)+
stat_smooth(aes(color=gtype))+
scale_x_continuous("Minor allele frequency (%)",
limits = c(0,50),
breaks=seq(0,50,by=10))+
scale_y_continuous("Genotypes in pool (%)")+
scale_color_manual("Genotype", breaks=c("p_AA","p_AB","p_BB"),
labels=c("AA","AB","BB"),values=c("red3","green3","dodgerblue"))+
theme_minimal()
|
ecf0ec2a01921953b8ee11553bfb759ae92929c3 | 57c871c47dcfbf20f313d282f58ae2589558dbc6 | /cachematrix.R | c6d94dcd344a0fbf6df2edaf87e06a18e286b2ad | [] | no_license | mike0919/ProgrammingAssignment2 | bd8ff07de40c6aab9ba3f3bbad90eaaf659de0a3 | 4ecfcc6d470cd97295151cdc6933486d0bae97d8 | refs/heads/master | 2020-12-25T23:47:01.756800 | 2015-04-25T14:33:44 | 2015-04-25T14:33:44 | 34,382,781 | 0 | 0 | null | 2015-04-22T09:57:22 | 2015-04-22T09:57:22 | null | UTF-8 | R | false | false | 2,413 | r | cachematrix.R | ##*******************************************************************************
## This is the R programming Assignment #2.
## There are 2 functions:
## 1. makeCacheMatrix : to return a special matrix that has 4 functions in a list
## 2. cacheSolve: to return the inverse of the special matrix from either cache
## result or calculate it in real time.
##*******************************************************************************
## This function returns 4 functions in a list: set, get, setinverse, getinverse.
## e.g. x <- matrix(1:4, nrow = 2, ncol = 2),
## f <- makeCacheMatrix(x)
## f will have $set, $get, $setinverse, and $getinverse total 4 functions.
makeCacheMatrix <- function(x = matrix()) {
#inv is a local variable for saving inverse matrix of a special matrix
inv <- NULL
#set function
set <- function(y) {
x <<- y
inv <<- NULL
}
#get function
get <- function() x
#setinverse function
setinverse <- function(inverse) inv <<- inverse
#getinverse function
getinverse <- function() inv
#Return a list of these 4 functions
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function returns a inverse matrix of the special matrix.
## If the special matrix already has a calculated inverse matrix, return it directly.
## Otherwise, calculate the inverse matrix in real time and return it while
## saving it into the special matrix's cache.
cacheSolve <- function(x, ...) {
#inv is a local variable for saving inverse matrix of a special matrix
inv <- x$getinverse()
#If we can get the inverse matrix from cache, return it directly and print
#out a message
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
#data is a local variable for saving special matrix
data <- x$get()
#Since we get here only if we cannot get the cached data of inverse matrix,
#we need to calculate the inverse matrix in run time using solve() function.
inv <- solve(data, ...)
#Set the inverse matrix into the specail matrix
x$setinverse(inv)
#return the inverse matrix as a result
inv
} |
96c3c4320ab400397fc274c9345eacc5c7efe4da | 7ff0097bb8a28165ab2696c73dbcc8f34817064b | /plot6.R | 00e1e4adf7ed709a96cfda8fe044dbc39ce8cfad | [] | no_license | AlchemyGeek/ExData_Plotting2 | 1e80e7bdb73e1da5270d0e9b50a91b12abd87aa6 | cc83693c03658bc39a9980191f0ce2fef86251de | refs/heads/master | 2020-04-15T23:36:55.496009 | 2016-09-14T21:35:55 | 2016-09-14T21:35:55 | 68,239,271 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,979 | r | plot6.R | ##
## Coursera: Exploratory Data Analysis, Project 2
## Data
## - PM2.5 Emissions Data (summarySCC_PM25.rds) and
## - Source Classification Code Table (Source_Classification_Code.rds)
##
## Compare emissions from motor vehicle sources in Baltimore City
## from motor vehicle sources in Los Angeles County, CA. Which city
## has seen greater changes over time in motor vehicle emissions?
##
library(ggplot2)
library(dplyr)
## Check existence of data files
files <- list.files(".")
if( !("summarySCC_PM25.rds" %in% files &
"Source_Classification_Code.rds" %in% files ) )
stop("Cannot find data files")
## Read data
sourceData <- readRDS("Source_Classification_Code.rds")
emissionData <- readRDS("summarySCC_PM25.rds")
## Select motor vehicle emission source SCCs
sourceData <- filter(sourceData,regexpr("Highway Veh",sourceData$Short.Name) != -1)
## Summarize emissions per year
baltimoreData <- emissionData %>%
filter(fips == "24510") %>%
filter(SCC %in% sourceData$SCC) %>%
group_by(year) %>%
summarize(yearEm = sum(Emissions)) %>%
mutate(City="Baltimore")
laData <- emissionData %>%
filter(fips == "06037") %>%
filter(SCC %in% sourceData$SCC) %>%
group_by(year) %>%
summarize(yearEm = sum(Emissions)) %>%
mutate(City="Los Angeles")
## Normalize the data using 1999 as the baseline
laData$yearEm <- laData$yearEm / laData$yearEm[1]
baltimoreData$yearEm <- baltimoreData$yearEm / baltimoreData$yearEm[1]
# Combine tables
emissionData <- rbind(baltimoreData,laData)
## Normalize the data using 1999 as the baseline
emissionData$yearEm <- emissionData$yearEm / emissionData$yearEm[1]
## Do plot stuff
p <- ggplot(emissionData, aes(x = year, y = yearEm, shape = City, color = City))+
geom_point(size = 3) +
geom_smooth(method="gam",se=FALSE) +
xlab("Year") + ylab("% PM2.5 Change Compared to 1999") +
ggtitle("Motor Vehicle Emission Changes (1999-2008)")
ggsave("plot6.png",plot = p, device="png")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.