content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deploy_app.R
\name{deploy_app}
\alias{deploy_app}
\title{Deploy Shiny application}
\usage{
deploy_app(
env = "production",
deployed_dir_name,
instance_name,
project_id,
project_zone,
gcloud = NULL,
app_dir = "shiny_app"
)
}
\arguments{
\item{env}{Character string name of configuration environment in `config.yml` to
use for deployed app. e.g. "production" or "default".}
\item{deployed_dir_name}{Name of the directory in the VM instance for this app}
\item{instance_name}{The name of the VM instance}
\item{project_id}{The name of the GCP project for this VM instance}
\item{project_zone}{The zone of the GCP project for this VM instance}
\item{gcloud}{Absolute path for gcloud CLI (Ex: `/usr/local/bin/google-cloud-sdk/bin`)}
\item{app_dir}{the local directory containing the shiny app. Defaults to "shiny_app".}
}
\description{
Deploy a Shiny application to a GCP VM instance (Must have gcloud on local machine).
Currently, this function MUST be run with the application in a `shiny_app` directory, &
that directory should be in the Current Working Directory.
}
\examples{
\dontrun{
deploy_app(
env = 'production',
deployed_dir_name = 'example_app',
instance_name = 'instance-1',
project_id = 'gcp-project-id',
project_zone = 'us-east1-d',
gcloud = '/usr/local/bin/google-cloud-sdk/bin'
)
}
}
|
/man/deploy_app.Rd
|
permissive
|
Tychobra/tychobratools
|
R
| false
| true
| 1,426
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deploy_app.R
\name{deploy_app}
\alias{deploy_app}
\title{Deploy Shiny application}
\usage{
deploy_app(
env = "production",
deployed_dir_name,
instance_name,
project_id,
project_zone,
gcloud = NULL,
app_dir = "shiny_app"
)
}
\arguments{
\item{env}{Character string name of configuration environment in `config.yml` to
use for deployed app. e.g. "production" or "default".}
\item{deployed_dir_name}{Name of the directory in the VM instance for this app}
\item{instance_name}{The name of the VM instance}
\item{project_id}{The name of the GCP project for this VM instance}
\item{project_zone}{The zone of the GCP project for this VM instance}
\item{gcloud}{Absolute path for gcloud CLI (Ex: `/usr/local/bin/google-cloud-sdk/bin`)}
\item{app_dir}{the local directory containing the shiny app. Defaults to "shiny_app".}
}
\description{
Deploy a Shiny application to a GCP VM instance (Must have gcloud on local machine).
Currently, this function MUST be run with the application in a `shiny_app` directory, &
that directory should be in the Current Working Directory.
}
\examples{
\dontrun{
deploy_app(
env = 'production',
deployed_dir_name = 'example_app',
instance_name = 'instance-1',
project_id = 'gcp-project-id',
project_zone = 'us-east1-d',
gcloud = '/usr/local/bin/google-cloud-sdk/bin'
)
}
}
|
# Salary and Income
df$AGE<-2010-df$BIRTHYR # other age varaible is cut in intervals for some reason
df<- df %>%
mutate(CARNEGIE = recode(CARNEGIE, `RU/VH: Research Universities (very high research activity)`="R1",
`RU/H: Research Universities (high research activity)`="R2",
`DRU: Doctoral/Research Universities`="R3/Doctoral",
`Assoc/Pub-R-L: Associates--Public Rural-serving Large` = "Associates",
`Assoc/Pub-R-M: Associates--Public Rural-serving Medium`="Associates",
`Assoc/Pub-S-MC: ASSOCIATE--Public Suburban-serving Multicampus`="Associates",
`Assoc/Pub-S-SC: ASSOCIATE--Public Suburban-serving Single Campus`="Associates",
`Assoc/Pub-U-MC: ASSOCIATE--Public Urban-serving Multicampus`="Associates",
`Assoc/Pub-U-SC: ASSOCIATE--Public Urban-serving Single Campus`="Associates",
`Assoc/Pub2in4: ASSOCIATE--Public 2-year colleges under 4-year universities`="Associates",
`Assoc/Pub4: ASSOCIATE--Public 4-year Primarily ASSOCIATE`="Associates",
`Bac/A&S: Baccalaureate Colleges--Arts & Sciences`="Bachelors/Masters",
`Bac/Assoc: Baccalaureate/ASSOCIATE Colleges`="Bachelors/Masters",
`Bac/Diverse: Baccalaureate Colleges--Diverse Fields`="Bachelors/Masters",
`Masters L: Masters Colleges and Universities (larger programs)`="Bachelors/Masters",
`Masters M: Masters Colleges and Universities (medium programs)`="Bachelors/Masters",
`Masters S: Masters Colleges and Universities (smaller programs)`="Bachelors/Masters",
`Spec/Arts: Special Focus Institutions--Schools of art, music, and design`="Other",
`Spec/Bus: Special Focus Institutions--Schools of business and management`="Other",
`Spec/Faith: Special Focus Institutions--Theological seminaries, Bible colleges, and other faith-related institutions`="Other",
`Spec/Health: Special Focus Institutions--Other health professions schools`="Other",
`Spec/Other: Special Focus Institutions--Other special-focus institutions`="Other", `-3` = NA_character_))
df$CARNEGIE<-factor(df$CARNEGIE,levels = c("R1","R2","R3/Doctoral","Bachelors/Masters","Associates","Other"))
df<- df %>%
mutate(OBEREG=recode(OBEREG, `Far West - AK CA HI NV OR WA`="West",
`Great Lakes - IL IN MI OH WI`="Midwest",
`Mid East - DE DC MD NJ NY PA`="East",
`New England - CT ME MA NH RI VT`="East",
`Other`="Other",
`Plains - IA KS MN MO NE ND SD`="Midwest",
`Rocky Mountains - CO ID MT UT WY`="West",
`Southeast - AL AR FL GA KY LA MS NC SC TN VA WV`="South",
`Southwest - AZ NM OK TX`="West"))
df$OBEREG<-relevel(df$OBEREG,"East")
df$MARITAL2<-df$MARITAL; levels(df$MARITAL2)<-c("Not","Married","Not","Not","Married","Not") # cohab=married
df$RACEGROUP2<-df$RACEGROUP; levels(df$RACEGROUP2)<-c(rep("Minority",6),"White") # cohab=married
df$NCHILD3<-as.numeric(df$NCHILD1)-1+as.numeric(df$NCHILD2)-1 # 4+ changes to four in this op
# Professional Development factor
profdf<-df %>% select(starts_with("PROFDEV")) %>% as.data.frame
PROFDEVvars<-df %>% select(starts_with("PROFDEV")) %>% names()
for(i in PROFDEVvars){
levels(profdf[,i])=c(rep("0",3),"1")
profdf[,i]<-as.numeric(profdf[,i])-1
}
pca<-princomp(~ ., data = profdf, na.action=na.exclude)
df$PROFDEVFAC<--(pca$scores[,1]) # I think this is the scores (for comp 1). sign is negative so inverse it.
levels(df$GENACT01) <- c("Non-Union", "Union") #Act: Are you a member of a faculty union?
levels(df$GENACT02) <- c("Non-Citizen", "Citizen") #Act: Are you a member of a faculty union?
df<- df %>%
mutate(DEGWORK2=recode(DEGWORK, `Bachelors (B.A., B.S., etc.)`="Yes",
`Ed.D.`="Yes",
`LL.B., J.D.`="Yes",
`M.D., D.D.S. (or equivalent)`="Yes",
`Masters (M.A., M.S., M.F.A., M.B.A., etc.)`="Yes",
`None`="No",
`Other degree`="Yes",
`Other first professional degree beyond B.A. (e.g., D.D., D.V.M.)`="Yes",
`Ph.D.`="Yes"))
df$DEGWORK2<-relevel(df$DEGWORK2,"No")
df<- df %>%
mutate(DEGEARN2=recode(DEGEARN, `Bachelors (B.A., B.S., etc.)`="BA or Less",
`Ed.D.`="Prof Degree",
`LL.B., J.D.`="Prof Degree",
`M.D., D.D.S. (or equivalent)`="Prof Degree",
`Masters (M.A., M.S., M.F.A., M.B.A., etc.)`="Prof Degree",
`None`="BA or Less",
`Other degree`="BA or Less", # Assuming this means less, like an assoc. degree
`Other first professional degree beyond B.A. (e.g., D.D., D.V.M.)`="Prof Degree",
`Ph.D.`="Ph.D."))
df$SELECTIVITY2=cut(df$SELECTIVITY, breaks=quantile(df$SELECTIVITY, probs = c(0,.9,1),na.rm=T)) # defined as median SAT math and verbal (or ACT composite) of 1st time freshmen
levels(df$SELECTIVITY2)<-c("Not","Selective")
df<- df %>%
mutate(INSTDESCR03=recode(INSTDESCR03, `Not descriptive`="Not very",
`Somewhat descriptive`="Not very",
`Very descriptive`="Very"))
df<- df %>%
mutate(INSTDESCR08=recode(INSTDESCR08, `Not descriptive`="Not very",
`Somewhat descriptive`="Not very",
`Very descriptive`="Very"))
df<- df %>%
mutate(INSTOPN10=recode(INSTOPN10, `Agree somewhat`="Agree",
`Agree strongly`="Agree",
`Disagree somewhat`="Disagree",
`Disagree strongly`="Disagree"))
df$INSTOPN10<-relevel(df$INSTOPN10,"Disagree")
df<- df %>%
mutate(INSTOPN11=recode(INSTOPN11, `Agree somewhat`="Agree",
`Agree strongly`="Agree",
`Disagree somewhat`="Disagree",
`Disagree strongly`="Disagree"))
df$INSTOPN11<-relevel(df$INSTOPN11,"Disagree")
df$HEALTHBENEFITS=df$SATIS02; levels(df$HEALTHBENEFITS)=c("Health Ins", "No Health Ins",rep("Health Ins",3)) #Not Applicable means "No insureance"
df$HEALTHBENEFITS<-relevel(df$HEALTHBENEFITS,"No Health Ins")
df$RETIREBENEFITS=df$SATIS03; levels(df$RETIREBENEFITS)=c("Retirement","No Retirement",rep("Retirement",3))
df$RETIREBENEFITS<-relevel(df$RETIREBENEFITS,"No Retirement")
df$PRINACT2<-df$PRINACT; levels(df$PRINACT2)[1]<-"Admin/Other"
levels(df$PRINACT2)[2]<-"Admin/Other"
levels(df$PRINACT2)[3]<-"Admin/Other"
df$PRINACT2<-factor(df$PRINACT2,levels = c("Teaching","Research","Admin/Other"))
df$GAPPANTT<- factor(rep(NA, nrow(df)), levels=c("FT NTT", "Aspiring Academic","Career-Ender","Expert","Freelancer"))
df$GAPPANTT[df$FULLSTAT %in% "Yes" ] <- "FT NTT"
df$GAPPANTT[df$FULLSTAT %in% "No" & df$PTCHOICE %in% "Yes" ] <- "Aspiring Academic"
df$GAPPANTT[df$FULLSTAT %in% "No" & df$PTCHOICE %in% "No" & df$PTCAREER %in% "Yes"] <- "Expert"
df$GAPPANTT[df$FULLSTAT %in% "No" & df$PTCHOICE %in% "No" & df$PTCAREER %in% "No" & df$GENACT03 %in% "Yes"] <- "Career-Ender"
df$GAPPANTT[df$FULLSTAT %in% "No" & df$PTCHOICE %in% "No" & df$PTCAREER %in% "No" & df$GENACT03 %in% "No"] <- "Freelancer"
df$ADJUNCT1<- factor(rep(NA, nrow(df)), levels=c("Professional Adjuncts","Itinerant Academic","Single Inst Adjunct","Full-time"))
df$ADJUNCT1[df$FULLSTAT %in% "Yes"] <- "Full-time"
df$ADJUNCT1[df$PTCAREER %in% "Yes" ] <- "Professional Adjuncts"
df$ADJUNCT1[df$PTCAREER %in% "No" & df$PTTEACH>0 ] <- "Itinerant Academic"
df$ADJUNCT1[df$PTCAREER %in% "No" & df$PTTEACH==0 ] <- "Single Inst Adjunct"
|
/munge/03_Recode_HERI.R
|
no_license
|
chadgevans/Faculty_Classification
|
R
| false
| false
| 9,146
|
r
|
# Salary and Income
df$AGE<-2010-df$BIRTHYR # other age varaible is cut in intervals for some reason
df<- df %>%
mutate(CARNEGIE = recode(CARNEGIE, `RU/VH: Research Universities (very high research activity)`="R1",
`RU/H: Research Universities (high research activity)`="R2",
`DRU: Doctoral/Research Universities`="R3/Doctoral",
`Assoc/Pub-R-L: Associates--Public Rural-serving Large` = "Associates",
`Assoc/Pub-R-M: Associates--Public Rural-serving Medium`="Associates",
`Assoc/Pub-S-MC: ASSOCIATE--Public Suburban-serving Multicampus`="Associates",
`Assoc/Pub-S-SC: ASSOCIATE--Public Suburban-serving Single Campus`="Associates",
`Assoc/Pub-U-MC: ASSOCIATE--Public Urban-serving Multicampus`="Associates",
`Assoc/Pub-U-SC: ASSOCIATE--Public Urban-serving Single Campus`="Associates",
`Assoc/Pub2in4: ASSOCIATE--Public 2-year colleges under 4-year universities`="Associates",
`Assoc/Pub4: ASSOCIATE--Public 4-year Primarily ASSOCIATE`="Associates",
`Bac/A&S: Baccalaureate Colleges--Arts & Sciences`="Bachelors/Masters",
`Bac/Assoc: Baccalaureate/ASSOCIATE Colleges`="Bachelors/Masters",
`Bac/Diverse: Baccalaureate Colleges--Diverse Fields`="Bachelors/Masters",
`Masters L: Masters Colleges and Universities (larger programs)`="Bachelors/Masters",
`Masters M: Masters Colleges and Universities (medium programs)`="Bachelors/Masters",
`Masters S: Masters Colleges and Universities (smaller programs)`="Bachelors/Masters",
`Spec/Arts: Special Focus Institutions--Schools of art, music, and design`="Other",
`Spec/Bus: Special Focus Institutions--Schools of business and management`="Other",
`Spec/Faith: Special Focus Institutions--Theological seminaries, Bible colleges, and other faith-related institutions`="Other",
`Spec/Health: Special Focus Institutions--Other health professions schools`="Other",
`Spec/Other: Special Focus Institutions--Other special-focus institutions`="Other", `-3` = NA_character_))
df$CARNEGIE<-factor(df$CARNEGIE,levels = c("R1","R2","R3/Doctoral","Bachelors/Masters","Associates","Other"))
df<- df %>%
mutate(OBEREG=recode(OBEREG, `Far West - AK CA HI NV OR WA`="West",
`Great Lakes - IL IN MI OH WI`="Midwest",
`Mid East - DE DC MD NJ NY PA`="East",
`New England - CT ME MA NH RI VT`="East",
`Other`="Other",
`Plains - IA KS MN MO NE ND SD`="Midwest",
`Rocky Mountains - CO ID MT UT WY`="West",
`Southeast - AL AR FL GA KY LA MS NC SC TN VA WV`="South",
`Southwest - AZ NM OK TX`="West"))
df$OBEREG<-relevel(df$OBEREG,"East")
df$MARITAL2<-df$MARITAL; levels(df$MARITAL2)<-c("Not","Married","Not","Not","Married","Not") # cohab=married
df$RACEGROUP2<-df$RACEGROUP; levels(df$RACEGROUP2)<-c(rep("Minority",6),"White") # cohab=married
df$NCHILD3<-as.numeric(df$NCHILD1)-1+as.numeric(df$NCHILD2)-1 # 4+ changes to four in this op
# Professional Development factor
profdf<-df %>% select(starts_with("PROFDEV")) %>% as.data.frame
PROFDEVvars<-df %>% select(starts_with("PROFDEV")) %>% names()
for(i in PROFDEVvars){
levels(profdf[,i])=c(rep("0",3),"1")
profdf[,i]<-as.numeric(profdf[,i])-1
}
pca<-princomp(~ ., data = profdf, na.action=na.exclude)
df$PROFDEVFAC<--(pca$scores[,1]) # I think this is the scores (for comp 1). sign is negative so inverse it.
levels(df$GENACT01) <- c("Non-Union", "Union") #Act: Are you a member of a faculty union?
levels(df$GENACT02) <- c("Non-Citizen", "Citizen") #Act: Are you a member of a faculty union?
df<- df %>%
mutate(DEGWORK2=recode(DEGWORK, `Bachelors (B.A., B.S., etc.)`="Yes",
`Ed.D.`="Yes",
`LL.B., J.D.`="Yes",
`M.D., D.D.S. (or equivalent)`="Yes",
`Masters (M.A., M.S., M.F.A., M.B.A., etc.)`="Yes",
`None`="No",
`Other degree`="Yes",
`Other first professional degree beyond B.A. (e.g., D.D., D.V.M.)`="Yes",
`Ph.D.`="Yes"))
df$DEGWORK2<-relevel(df$DEGWORK2,"No")
df<- df %>%
mutate(DEGEARN2=recode(DEGEARN, `Bachelors (B.A., B.S., etc.)`="BA or Less",
`Ed.D.`="Prof Degree",
`LL.B., J.D.`="Prof Degree",
`M.D., D.D.S. (or equivalent)`="Prof Degree",
`Masters (M.A., M.S., M.F.A., M.B.A., etc.)`="Prof Degree",
`None`="BA or Less",
`Other degree`="BA or Less", # Assuming this means less, like an assoc. degree
`Other first professional degree beyond B.A. (e.g., D.D., D.V.M.)`="Prof Degree",
`Ph.D.`="Ph.D."))
df$SELECTIVITY2=cut(df$SELECTIVITY, breaks=quantile(df$SELECTIVITY, probs = c(0,.9,1),na.rm=T)) # defined as median SAT math and verbal (or ACT composite) of 1st time freshmen
levels(df$SELECTIVITY2)<-c("Not","Selective")
df<- df %>%
mutate(INSTDESCR03=recode(INSTDESCR03, `Not descriptive`="Not very",
`Somewhat descriptive`="Not very",
`Very descriptive`="Very"))
df<- df %>%
mutate(INSTDESCR08=recode(INSTDESCR08, `Not descriptive`="Not very",
`Somewhat descriptive`="Not very",
`Very descriptive`="Very"))
df<- df %>%
mutate(INSTOPN10=recode(INSTOPN10, `Agree somewhat`="Agree",
`Agree strongly`="Agree",
`Disagree somewhat`="Disagree",
`Disagree strongly`="Disagree"))
df$INSTOPN10<-relevel(df$INSTOPN10,"Disagree")
df<- df %>%
mutate(INSTOPN11=recode(INSTOPN11, `Agree somewhat`="Agree",
`Agree strongly`="Agree",
`Disagree somewhat`="Disagree",
`Disagree strongly`="Disagree"))
df$INSTOPN11<-relevel(df$INSTOPN11,"Disagree")
df$HEALTHBENEFITS=df$SATIS02; levels(df$HEALTHBENEFITS)=c("Health Ins", "No Health Ins",rep("Health Ins",3)) #Not Applicable means "No insureance"
df$HEALTHBENEFITS<-relevel(df$HEALTHBENEFITS,"No Health Ins")
df$RETIREBENEFITS=df$SATIS03; levels(df$RETIREBENEFITS)=c("Retirement","No Retirement",rep("Retirement",3))
df$RETIREBENEFITS<-relevel(df$RETIREBENEFITS,"No Retirement")
df$PRINACT2<-df$PRINACT; levels(df$PRINACT2)[1]<-"Admin/Other"
levels(df$PRINACT2)[2]<-"Admin/Other"
levels(df$PRINACT2)[3]<-"Admin/Other"
df$PRINACT2<-factor(df$PRINACT2,levels = c("Teaching","Research","Admin/Other"))
df$GAPPANTT<- factor(rep(NA, nrow(df)), levels=c("FT NTT", "Aspiring Academic","Career-Ender","Expert","Freelancer"))
df$GAPPANTT[df$FULLSTAT %in% "Yes" ] <- "FT NTT"
df$GAPPANTT[df$FULLSTAT %in% "No" & df$PTCHOICE %in% "Yes" ] <- "Aspiring Academic"
df$GAPPANTT[df$FULLSTAT %in% "No" & df$PTCHOICE %in% "No" & df$PTCAREER %in% "Yes"] <- "Expert"
df$GAPPANTT[df$FULLSTAT %in% "No" & df$PTCHOICE %in% "No" & df$PTCAREER %in% "No" & df$GENACT03 %in% "Yes"] <- "Career-Ender"
df$GAPPANTT[df$FULLSTAT %in% "No" & df$PTCHOICE %in% "No" & df$PTCAREER %in% "No" & df$GENACT03 %in% "No"] <- "Freelancer"
df$ADJUNCT1<- factor(rep(NA, nrow(df)), levels=c("Professional Adjuncts","Itinerant Academic","Single Inst Adjunct","Full-time"))
df$ADJUNCT1[df$FULLSTAT %in% "Yes"] <- "Full-time"
df$ADJUNCT1[df$PTCAREER %in% "Yes" ] <- "Professional Adjuncts"
df$ADJUNCT1[df$PTCAREER %in% "No" & df$PTTEACH>0 ] <- "Itinerant Academic"
df$ADJUNCT1[df$PTCAREER %in% "No" & df$PTTEACH==0 ] <- "Single Inst Adjunct"
|
options(warn=1)
library(optparse)
option_list <- list(
make_option("--patient", type="character", help="patient ID"),
make_option("--diagnosis", type="character", help="diagnosis coverage data file"),
make_option("--relapse", type="character", help="relapse coverage data file"),
make_option("--remission", type="character", help="remission coverage data file")
)
opt <- parse_args(OptionParser(option_list=option_list))
if (is.na(opt$patient)) stop("patient not specified")
if (is.null(opt$diagnosis)) stop("diagnosis sample not specified")
if (is.na(opt$relapse)) stop("relapse sample not specified")
if (is.na(opt$remission)) stop("remission sample not specified")
dia <- read.delim(opt$diagnosis, check.names=F, stringsAsFactor=F, header=F)
rel <- read.delim(opt$relapse, check.names=F, stringsAsFactor=F, header=F)
rem <- read.delim(opt$remission, check.names=F, stringsAsFactor=F, header=F)
#dia.mito <- read.delim("/mnt/projects/hdall/results/cnv/592_dia.coverage.mito.tsv", check.names=F, stringsAsFactor=F, header=F)
#rel.mito <- read.delim("/mnt/projects/hdall/results/cnv/592_rel.coverage.mito.tsv", check.names=F, stringsAsFactor=F, header=F)
#rem.mito <- read.delim("/mnt/projects/hdall/results/cnv/592_rem.coverage.mito.tsv", check.names=F, stringsAsFactor=F, header=F)
# normalization factors
#nf.dia = sum(dia.mito$V3)/sum(rem.mito$V3)
#nf.rel = sum(rel.mito$V3)/sum(rem.mito$V3)
names(dia) <- c("chr", "start", "end", "name", "length", "strand", "total.dia", "avg.dia")
names(rem) <- c("chr", "start", "end", "name", "length", "strand", "total.rem", "avg.rem")
names(rel) <- c("chr", "start", "end", "name", "length", "strand", "total.rel", "avg.rel")
m <- merge(rem, dia, by=c("chr", "start", "end", "name", "length", "strand"))
m <- merge(m, rel, by=c("chr", "start", "end", "name", "length", "strand"))
# normalize coverage to total coverage
mn <- m
mn <- mn[mn$avg.dia >= 20 | mn$avg.rel >= 20 | mn$avg.rem >= 20,]
mn$avg.dia <- mn$avg.dia / median(mn[mn$chr=="chr19", "avg.dia"]) * median(mn[mn$chr=="chr19", "avg.rem"])
mn$avg.rel <- mn$avg.rel / median(mn[mn$chr=="chr19", "avg.rel"]) * median(mn[mn$chr=="chr19", "avg.rem"])
#mn$avg.dia <- mn$avg.dia / nf.dia
#mn$avg.rel <- mn$avg.rel / nf.rel
chr <- c("chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY")
pdf(paste("/mnt/projects/hdall/results/cnv/cov-plot.", opt$patient, ".pdf.part", sep=""))
for (c in chr)
{
par(mfrow=c(2,1))
plot(mn[mn$chr==c,"start"],log2(mn[mn$chr==c,"avg.dia"]/mn[mn$chr==c,"avg.rem"]), ylim=c(-2, 2), col=rgb(0,0,0,0.05), main=paste(opt$patient, c, "dia"), ylab="log fold cov", xlab="", cex=0.2)
plot(mn[mn$chr==c,"start"],log2(mn[mn$chr==c,"avg.rel"]/mn[mn$chr==c,"avg.rem"]), ylim=c(-2, 2), col=rgb(0,0,0,0.05), main=paste(opt$patient, c, "rel"), ylab="log fold cov", xlab="", cex=0.2)
}
dev.off()
|
/cnv/cov-plot-exome.R
|
no_license
|
Gig77/ccri-hdall
|
R
| false
| false
| 2,979
|
r
|
options(warn=1)
library(optparse)
option_list <- list(
make_option("--patient", type="character", help="patient ID"),
make_option("--diagnosis", type="character", help="diagnosis coverage data file"),
make_option("--relapse", type="character", help="relapse coverage data file"),
make_option("--remission", type="character", help="remission coverage data file")
)
opt <- parse_args(OptionParser(option_list=option_list))
if (is.na(opt$patient)) stop("patient not specified")
if (is.null(opt$diagnosis)) stop("diagnosis sample not specified")
if (is.na(opt$relapse)) stop("relapse sample not specified")
if (is.na(opt$remission)) stop("remission sample not specified")
dia <- read.delim(opt$diagnosis, check.names=F, stringsAsFactor=F, header=F)
rel <- read.delim(opt$relapse, check.names=F, stringsAsFactor=F, header=F)
rem <- read.delim(opt$remission, check.names=F, stringsAsFactor=F, header=F)
#dia.mito <- read.delim("/mnt/projects/hdall/results/cnv/592_dia.coverage.mito.tsv", check.names=F, stringsAsFactor=F, header=F)
#rel.mito <- read.delim("/mnt/projects/hdall/results/cnv/592_rel.coverage.mito.tsv", check.names=F, stringsAsFactor=F, header=F)
#rem.mito <- read.delim("/mnt/projects/hdall/results/cnv/592_rem.coverage.mito.tsv", check.names=F, stringsAsFactor=F, header=F)
# normalization factors
#nf.dia = sum(dia.mito$V3)/sum(rem.mito$V3)
#nf.rel = sum(rel.mito$V3)/sum(rem.mito$V3)
names(dia) <- c("chr", "start", "end", "name", "length", "strand", "total.dia", "avg.dia")
names(rem) <- c("chr", "start", "end", "name", "length", "strand", "total.rem", "avg.rem")
names(rel) <- c("chr", "start", "end", "name", "length", "strand", "total.rel", "avg.rel")
m <- merge(rem, dia, by=c("chr", "start", "end", "name", "length", "strand"))
m <- merge(m, rel, by=c("chr", "start", "end", "name", "length", "strand"))
# normalize coverage to total coverage
mn <- m
mn <- mn[mn$avg.dia >= 20 | mn$avg.rel >= 20 | mn$avg.rem >= 20,]
mn$avg.dia <- mn$avg.dia / median(mn[mn$chr=="chr19", "avg.dia"]) * median(mn[mn$chr=="chr19", "avg.rem"])
mn$avg.rel <- mn$avg.rel / median(mn[mn$chr=="chr19", "avg.rel"]) * median(mn[mn$chr=="chr19", "avg.rem"])
#mn$avg.dia <- mn$avg.dia / nf.dia
#mn$avg.rel <- mn$avg.rel / nf.rel
chr <- c("chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY")
pdf(paste("/mnt/projects/hdall/results/cnv/cov-plot.", opt$patient, ".pdf.part", sep=""))
for (c in chr)
{
par(mfrow=c(2,1))
plot(mn[mn$chr==c,"start"],log2(mn[mn$chr==c,"avg.dia"]/mn[mn$chr==c,"avg.rem"]), ylim=c(-2, 2), col=rgb(0,0,0,0.05), main=paste(opt$patient, c, "dia"), ylab="log fold cov", xlab="", cex=0.2)
plot(mn[mn$chr==c,"start"],log2(mn[mn$chr==c,"avg.rel"]/mn[mn$chr==c,"avg.rem"]), ylim=c(-2, 2), col=rgb(0,0,0,0.05), main=paste(opt$patient, c, "rel"), ylab="log fold cov", xlab="", cex=0.2)
}
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iSEE-main.R
\name{iSEE}
\alias{iSEE}
\title{iSEE: interactive SummarizedExperiment/SingleCellExperiment Explorer}
\usage{
iSEE(se, redDimArgs = NULL, colDataArgs = NULL, featAssayArgs = NULL,
rowStatArgs = NULL, rowDataArgs = NULL, sampAssayArgs = NULL,
colStatArgs = NULL, customDataArgs = NULL, customStatArgs = NULL,
heatMapArgs = NULL, redDimMax = 5, colDataMax = 5,
featAssayMax = 5, rowStatMax = 5, rowDataMax = 5,
sampAssayMax = 5, colStatMax = 5, customDataMax = 5,
customStatMax = 5, heatMapMax = 5, initialPanels = NULL,
annotFun = NULL, customDataFun = NULL, customStatFun = NULL,
customSendAll = FALSE, colormap = ExperimentColorMap(),
tour = NULL, appTitle = NULL, runLocal = TRUE, voice = FALSE,
bugs = FALSE)
}
\arguments{
\item{se}{An object that is coercible to \linkS4class{SingleCellExperiment}.
If missing, an app is launched with a file upload control allowing users to upload an RDS file that contains such as object.
See Details for information to set the maximal size limit for file uploads.}
\item{redDimArgs}{A DataFrame similar to that produced by \code{\link{redDimPlotDefaults}}, specifying initial parameters for the reduced dimension plots.}
\item{colDataArgs}{A DataFrame similar to that produced by \code{\link{colDataPlotDefaults}}, specifying initial parameters for the column data plots.}
\item{featAssayArgs}{A DataFrame similar to that produced by \code{\link{featAssayPlotDefaults}}, specifying initial parameters for the feature assay plots.}
\item{rowStatArgs}{A DataFrame similar to that produced by \code{\link{rowStatTableDefaults}}, specifying initial parameters for the row statistics tables.}
\item{rowDataArgs}{A DataFrame similar to that produced by \code{\link{rowDataPlotDefaults}}, specifying initial parameters for the row data plots.}
\item{sampAssayArgs}{A DataFrame similar to that produced by \code{\link{sampAssayPlotDefaults}}, specifying initial parameters for the sample assay plots.}
\item{colStatArgs}{A DataFrame similar to that produced by \code{\link{colStatTableDefaults}}, specifying initial parameters for the column statistics tables.}
\item{customDataArgs}{A DataFrame similar to that produced by \code{\link{customDataPlotDefaults}}, specifying initial parameters for the custom data plots.}
\item{customStatArgs}{A DataFrame similar to that produced by \code{\link{customStatTableDefaults}}, specifying initial parameters for the custom statistics tables.}
\item{heatMapArgs}{A DataFrame similar to that produced by \code{\link{heatMapPlotDefaults}}, specifying initial parameters for the heatmaps.}
\item{redDimMax}{An integer scalar specifying the maximum number of reduced dimension plots in the interface.}
\item{colDataMax}{An integer scalar specifying the maximum number of column data plots in the interface.}
\item{featAssayMax}{An integer scalar specifying the maximum number of feature assay plots in the interface.}
\item{rowStatMax}{An integer scalar specifying the maximum number of row statistics tables in the interface.}
\item{rowDataMax}{An integer scalar specifying the maximum number of row data plots in the interface.}
\item{sampAssayMax}{An integer scalar specifying the maximum number of sample assay plots in the interface.}
\item{colStatMax}{An integer scalar specifying the maximum number of column statistics tables in the interface.}
\item{customDataMax}{An integer scalar specifying the maximum number of custom data plots in the interface.}
\item{customStatMax}{An integer scalar specifying the maximum number of custom statistics tables in the interface.}
\item{heatMapMax}{An integer scalar specifying the maximum number of heatmaps in the interface.}
\item{initialPanels}{A DataFrame specifying which panels should be created at initialization.
This should contain a \code{Name} character field and may have optional \code{Width} and \code{Height} integer fields, see Details.}
\item{annotFun}{A function, similar to those returned by \code{\link{annotateEntrez}} or \code{\link{annotateEnsembl}}.
The function should accept two parameters, \code{se} and \code{row_index}, and return a HTML element with annotation for the selected row.}
\item{customDataFun}{A named list of functions for reporting coordinates to use in a custom data plot.}
\item{customStatFun}{A named list of functions for reporting coordinates to use in a custom statistics table.}
\item{customSendAll}{A logical scalar indicating whether all (active and saved) selections should be passed from transmitting panels to custom plots or tables.
The default (\code{FALSE}) only passes the row names of the points in the active selection.}
\item{colormap}{An \linkS4class{ExperimentColorMap} object that defines custom colormaps to apply to individual \code{assays}, \code{colData} and \code{rowData} covariates.}
\item{tour}{A data.frame with the content of the interactive tour to be displayed after starting up the app.}
\item{appTitle}{A string indicating the title to be displayed in the app.
If not provided, the app displays the version info of \code{\link{iSEE}}.}
\item{runLocal}{A logical indicating whether the app is to be run locally or remotely on a server, which determines how documentation will be accessed.}
\item{voice}{A logical indicating whether the voice recognition should be enabled.}
\item{bugs}{Set to \code{TRUE} to enable the bugs Easter egg.
Alternatively, a named numeric vector control the respective number of each bug type (e.g., \code{c(bugs=3L, spiders=1L)}).
Credits to https://github.com/Auz/Bug for the JavaScript code.}
}
\value{
A Shiny app object is returned, for interactive data exploration of the \linkS4class{SummarizedExperiment} or \linkS4class{SingleCellExperiment} object.
}
\description{
Interactive and reproducible visualization of data contained in a
SummarizedExperiment/SingleCellExperiment, using a Shiny interface.
}
\details{
Users can pass default parameters via DataFrame objects in \code{redDimArgs} and \code{featAssayArgs}.
Each object can contain some or all of the expected fields (see \code{\link{redDimPlotDefaults}}).
Any missing fields will be filled in with the defaults.
The number of maximum plots for each type of plot is set to the larger of \code{*Max} and \code{nrow(*Args)}.
Users can specify any number of maximum plots, though increasing the number will increase the time required to render the interface.
The \code{initialPanels} argument specifies the panels to be created upon initializing the interface.
This should be a DataFrame containing a \code{Name} field specifying the identity of the panel, e.g., \code{"Reduced dimension plot 1"}, \code{"Row statistics table 2"}.
Please refer to \code{\link{availablePanelTypes}} for the full list of panels available.
The trailing number should not be greater than the number of maximum plots of that type.
Users can also define the \code{Width} field, specifying the width of each panel from 2 to 12 (values will be coerced inside this range);
and the \code{Height} field, specifying the height of each panel from 400 to 1000 pixels.
By default, one panel of each type (where possible) will be generated, with height of 500 and width of 4.
The \code{tour} argument needs to be provided in a form compatible with the format expected by the \code{rintrojs} package.
There should be two columns, \code{element} and \code{intro}, with the former describing the element to highlight and the latter providing some descriptive text.
See \url{https://github.com/carlganz/rintrojs#usage} for more information.
By default, categorical data types such as factor and character are limited to 24 levels, beyond which they are coerced to numeric variables for faster plotting.
This limit may be set to a different value as a global option, e.g. \code{options(iSEE.maxlevels=30)}.
By default, the maximum request size for file uploads defaults to 5MB (https://shiny.rstudio.com/reference/shiny/0.14/shiny-options.html).
To raise the limit (e.g., 50MB), run \code{options(shiny.maxRequestSize=50*1024^2)}.
}
\examples{
library(scRNAseq)
# Example data ----
sce <- ReprocessedAllenData(assays="tophat_counts")
class(sce)
library(scater)
sce <- logNormCounts(sce, exprs_values="tophat_counts")
sce <- runPCA(sce, ncomponents=4)
sce <- runTSNE(sce)
rowData(sce)$ave_count <- rowMeans(assay(sce, "tophat_counts"))
rowData(sce)$n_cells <- rowSums(assay(sce, "tophat_counts") > 0)
sce
# launch the app itself ----
app <- iSEE(sce)
if (interactive()) {
shiny::runApp(app, port=1234)
}
}
|
/man/iSEE.Rd
|
permissive
|
shenyang1981/iSEE
|
R
| false
| true
| 8,607
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iSEE-main.R
\name{iSEE}
\alias{iSEE}
\title{iSEE: interactive SummarizedExperiment/SingleCellExperiment Explorer}
\usage{
iSEE(se, redDimArgs = NULL, colDataArgs = NULL, featAssayArgs = NULL,
rowStatArgs = NULL, rowDataArgs = NULL, sampAssayArgs = NULL,
colStatArgs = NULL, customDataArgs = NULL, customStatArgs = NULL,
heatMapArgs = NULL, redDimMax = 5, colDataMax = 5,
featAssayMax = 5, rowStatMax = 5, rowDataMax = 5,
sampAssayMax = 5, colStatMax = 5, customDataMax = 5,
customStatMax = 5, heatMapMax = 5, initialPanels = NULL,
annotFun = NULL, customDataFun = NULL, customStatFun = NULL,
customSendAll = FALSE, colormap = ExperimentColorMap(),
tour = NULL, appTitle = NULL, runLocal = TRUE, voice = FALSE,
bugs = FALSE)
}
\arguments{
\item{se}{An object that is coercible to \linkS4class{SingleCellExperiment}.
If missing, an app is launched with a file upload control allowing users to upload an RDS file that contains such as object.
See Details for information to set the maximal size limit for file uploads.}
\item{redDimArgs}{A DataFrame similar to that produced by \code{\link{redDimPlotDefaults}}, specifying initial parameters for the reduced dimension plots.}
\item{colDataArgs}{A DataFrame similar to that produced by \code{\link{colDataPlotDefaults}}, specifying initial parameters for the column data plots.}
\item{featAssayArgs}{A DataFrame similar to that produced by \code{\link{featAssayPlotDefaults}}, specifying initial parameters for the feature assay plots.}
\item{rowStatArgs}{A DataFrame similar to that produced by \code{\link{rowStatTableDefaults}}, specifying initial parameters for the row statistics tables.}
\item{rowDataArgs}{A DataFrame similar to that produced by \code{\link{rowDataPlotDefaults}}, specifying initial parameters for the row data plots.}
\item{sampAssayArgs}{A DataFrame similar to that produced by \code{\link{sampAssayPlotDefaults}}, specifying initial parameters for the sample assay plots.}
\item{colStatArgs}{A DataFrame similar to that produced by \code{\link{colStatTableDefaults}}, specifying initial parameters for the column statistics tables.}
\item{customDataArgs}{A DataFrame similar to that produced by \code{\link{customDataPlotDefaults}}, specifying initial parameters for the custom data plots.}
\item{customStatArgs}{A DataFrame similar to that produced by \code{\link{customStatTableDefaults}}, specifying initial parameters for the custom statistics tables.}
\item{heatMapArgs}{A DataFrame similar to that produced by \code{\link{heatMapPlotDefaults}}, specifying initial parameters for the heatmaps.}
\item{redDimMax}{An integer scalar specifying the maximum number of reduced dimension plots in the interface.}
\item{colDataMax}{An integer scalar specifying the maximum number of column data plots in the interface.}
\item{featAssayMax}{An integer scalar specifying the maximum number of feature assay plots in the interface.}
\item{rowStatMax}{An integer scalar specifying the maximum number of row statistics tables in the interface.}
\item{rowDataMax}{An integer scalar specifying the maximum number of row data plots in the interface.}
\item{sampAssayMax}{An integer scalar specifying the maximum number of sample assay plots in the interface.}
\item{colStatMax}{An integer scalar specifying the maximum number of column statistics tables in the interface.}
\item{customDataMax}{An integer scalar specifying the maximum number of custom data plots in the interface.}
\item{customStatMax}{An integer scalar specifying the maximum number of custom statistics tables in the interface.}
\item{heatMapMax}{An integer scalar specifying the maximum number of heatmaps in the interface.}
\item{initialPanels}{A DataFrame specifying which panels should be created at initialization.
This should contain a \code{Name} character field and may have optional \code{Width} and \code{Height} integer fields, see Details.}
\item{annotFun}{A function, similar to those returned by \code{\link{annotateEntrez}} or \code{\link{annotateEnsembl}}.
The function should accept two parameters, \code{se} and \code{row_index}, and return a HTML element with annotation for the selected row.}
\item{customDataFun}{A named list of functions for reporting coordinates to use in a custom data plot.}
\item{customStatFun}{A named list of functions for reporting coordinates to use in a custom statistics table.}
\item{customSendAll}{A logical scalar indicating whether all (active and saved) selections should be passed from transmitting panels to custom plots or tables.
The default (\code{FALSE}) only passes the row names of the points in the active selection.}
\item{colormap}{An \linkS4class{ExperimentColorMap} object that defines custom colormaps to apply to individual \code{assays}, \code{colData} and \code{rowData} covariates.}
\item{tour}{A data.frame with the content of the interactive tour to be displayed after starting up the app.}
\item{appTitle}{A string indicating the title to be displayed in the app.
If not provided, the app displays the version info of \code{\link{iSEE}}.}
\item{runLocal}{A logical indicating whether the app is to be run locally or remotely on a server, which determines how documentation will be accessed.}
\item{voice}{A logical indicating whether the voice recognition should be enabled.}
\item{bugs}{Set to \code{TRUE} to enable the bugs Easter egg.
Alternatively, a named numeric vector control the respective number of each bug type (e.g., \code{c(bugs=3L, spiders=1L)}).
Credits to https://github.com/Auz/Bug for the JavaScript code.}
}
\value{
A Shiny app object is returned, for interactive data exploration of the \linkS4class{SummarizedExperiment} or \linkS4class{SingleCellExperiment} object.
}
\description{
Interactive and reproducible visualization of data contained in a
SummarizedExperiment/SingleCellExperiment, using a Shiny interface.
}
\details{
Users can pass default parameters via DataFrame objects in \code{redDimArgs} and \code{featAssayArgs}.
Each object can contain some or all of the expected fields (see \code{\link{redDimPlotDefaults}}).
Any missing fields will be filled in with the defaults.
The number of maximum plots for each type of plot is set to the larger of \code{*Max} and \code{nrow(*Args)}.
Users can specify any number of maximum plots, though increasing the number will increase the time required to render the interface.
The \code{initialPanels} argument specifies the panels to be created upon initializing the interface.
This should be a DataFrame containing a \code{Name} field specifying the identity of the panel, e.g., \code{"Reduced dimension plot 1"}, \code{"Row statistics table 2"}.
Please refer to \code{\link{availablePanelTypes}} for the full list of panels available.
The trailing number should not be greater than the number of maximum plots of that type.
Users can also define the \code{Width} field, specifying the width of each panel from 2 to 12 (values will be coerced inside this range);
and the \code{Height} field, specifying the height of each panel from 400 to 1000 pixels.
By default, one panel of each type (where possible) will be generated, with height of 500 and width of 4.
The \code{tour} argument needs to be provided in a form compatible with the format expected by the \code{rintrojs} package.
There should be two columns, \code{element} and \code{intro}, with the former describing the element to highlight and the latter providing some descriptive text.
See \url{https://github.com/carlganz/rintrojs#usage} for more information.
By default, categorical data types such as factor and character are limited to 24 levels, beyond which they are coerced to numeric variables for faster plotting.
This limit may be set to a different value as a global option, e.g. \code{options(iSEE.maxlevels=30)}.
By default, the maximum request size for file uploads defaults to 5MB (https://shiny.rstudio.com/reference/shiny/0.14/shiny-options.html).
To raise the limit (e.g., 50MB), run \code{options(shiny.maxRequestSize=50*1024^2)}.
}
\examples{
library(scRNAseq)
# Example data ----
sce <- ReprocessedAllenData(assays="tophat_counts")
class(sce)
library(scater)
sce <- logNormCounts(sce, exprs_values="tophat_counts")
sce <- runPCA(sce, ncomponents=4)
sce <- runTSNE(sce)
rowData(sce)$ave_count <- rowMeans(assay(sce, "tophat_counts"))
rowData(sce)$n_cells <- rowSums(assay(sce, "tophat_counts") > 0)
sce
# launch the app itself ----
app <- iSEE(sce)
if (interactive()) {
shiny::runApp(app, port=1234)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract-tools.R
\name{extract_organism}
\alias{extract_organism}
\title{Extract organism}
\usage{
extract_organism(record)
}
\arguments{
\item{record}{GenBank record in text format, character}
}
\value{
character
}
\description{
Return organism name from GenBank record
}
\details{
If element is not found, '' returned.
}
\seealso{
Other private:
\code{\link{add_rcrd_log}()},
\code{\link{cat_line}()},
\code{\link{char}()},
\code{\link{check_connection}()},
\code{\link{cleanup}()},
\code{\link{connected}()},
\code{\link{connection_get}()},
\code{\link{db_download_intern}()},
\code{\link{db_sqlngths_get}()},
\code{\link{db_sqlngths_log}()},
\code{\link{dir_size}()},
\code{\link{dwnld_path_get}()},
\code{\link{dwnld_rcrd_log}()},
\code{\link{entrez_fasta_get}()},
\code{\link{entrez_gb_get}()},
\code{\link{extract_accession}()},
\code{\link{extract_by_patterns}()},
\code{\link{extract_clean_sequence}()},
\code{\link{extract_definition}()},
\code{\link{extract_features}()},
\code{\link{extract_inforecpart}()},
\code{\link{extract_keywords}()},
\code{\link{extract_locus}()},
\code{\link{extract_seqrecpart}()},
\code{\link{extract_sequence}()},
\code{\link{extract_version}()},
\code{\link{file_download}()},
\code{\link{filename_log}()},
\code{\link{flatfile_read}()},
\code{\link{gb_build}()},
\code{\link{gb_df_create}()},
\code{\link{gb_df_generate}()},
\code{\link{gb_sql_add}()},
\code{\link{gb_sql_query}()},
\code{\link{gbrelease_check}()},
\code{\link{gbrelease_get}()},
\code{\link{gbrelease_log}()},
\code{\link{has_data}()},
\code{\link{identify_downloadable_files}()},
\code{\link{last_add_get}()},
\code{\link{last_dwnld_get}()},
\code{\link{last_entry_get}()},
\code{\link{latest_genbank_release_notes}()},
\code{\link{latest_genbank_release}()},
\code{\link{message_missing}()},
\code{\link{mock_def}()},
\code{\link{mock_gb_df_generate}()},
\code{\link{mock_org}()},
\code{\link{mock_rec}()},
\code{\link{mock_seq}()},
\code{\link{predict_datasizes}()},
\code{\link{print.status}()},
\code{\link{readme_log}()},
\code{\link{restez_connect}()},
\code{\link{restez_disconnect}()},
\code{\link{restez_path_check}()},
\code{\link{restez_rl}()},
\code{\link{search_gz}()},
\code{\link{seshinfo_log}()},
\code{\link{setup}()},
\code{\link{slctn_get}()},
\code{\link{slctn_log}()},
\code{\link{sql_path_get}()},
\code{\link{status_class}()},
\code{\link{stat}()},
\code{\link{testdatadir_get}()}
}
\concept{private}
|
/man/extract_organism.Rd
|
permissive
|
ropensci/restez
|
R
| false
| true
| 2,514
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract-tools.R
\name{extract_organism}
\alias{extract_organism}
\title{Extract organism}
\usage{
extract_organism(record)
}
\arguments{
\item{record}{GenBank record in text format, character}
}
\value{
character
}
\description{
Return organism name from GenBank record
}
\details{
If element is not found, '' returned.
}
\seealso{
Other private:
\code{\link{add_rcrd_log}()},
\code{\link{cat_line}()},
\code{\link{char}()},
\code{\link{check_connection}()},
\code{\link{cleanup}()},
\code{\link{connected}()},
\code{\link{connection_get}()},
\code{\link{db_download_intern}()},
\code{\link{db_sqlngths_get}()},
\code{\link{db_sqlngths_log}()},
\code{\link{dir_size}()},
\code{\link{dwnld_path_get}()},
\code{\link{dwnld_rcrd_log}()},
\code{\link{entrez_fasta_get}()},
\code{\link{entrez_gb_get}()},
\code{\link{extract_accession}()},
\code{\link{extract_by_patterns}()},
\code{\link{extract_clean_sequence}()},
\code{\link{extract_definition}()},
\code{\link{extract_features}()},
\code{\link{extract_inforecpart}()},
\code{\link{extract_keywords}()},
\code{\link{extract_locus}()},
\code{\link{extract_seqrecpart}()},
\code{\link{extract_sequence}()},
\code{\link{extract_version}()},
\code{\link{file_download}()},
\code{\link{filename_log}()},
\code{\link{flatfile_read}()},
\code{\link{gb_build}()},
\code{\link{gb_df_create}()},
\code{\link{gb_df_generate}()},
\code{\link{gb_sql_add}()},
\code{\link{gb_sql_query}()},
\code{\link{gbrelease_check}()},
\code{\link{gbrelease_get}()},
\code{\link{gbrelease_log}()},
\code{\link{has_data}()},
\code{\link{identify_downloadable_files}()},
\code{\link{last_add_get}()},
\code{\link{last_dwnld_get}()},
\code{\link{last_entry_get}()},
\code{\link{latest_genbank_release_notes}()},
\code{\link{latest_genbank_release}()},
\code{\link{message_missing}()},
\code{\link{mock_def}()},
\code{\link{mock_gb_df_generate}()},
\code{\link{mock_org}()},
\code{\link{mock_rec}()},
\code{\link{mock_seq}()},
\code{\link{predict_datasizes}()},
\code{\link{print.status}()},
\code{\link{readme_log}()},
\code{\link{restez_connect}()},
\code{\link{restez_disconnect}()},
\code{\link{restez_path_check}()},
\code{\link{restez_rl}()},
\code{\link{search_gz}()},
\code{\link{seshinfo_log}()},
\code{\link{setup}()},
\code{\link{slctn_get}()},
\code{\link{slctn_log}()},
\code{\link{sql_path_get}()},
\code{\link{status_class}()},
\code{\link{stat}()},
\code{\link{testdatadir_get}()}
}
\concept{private}
|
################
require("mvtnorm")
binommh <- function(y,...) UseMethod("binommh")
binommh.default<-function(y,m,X,b=rep(0,dim(X)[2]),B=diag(rep(10000,dim(X)[2])),N=3000,flag=F,...){
########utility functions
yhat<-function(y,X,bta,W_n=1,mu_n){drop(X%*%bta+(y-mu_n)/W_n)}
btahat<-function(yhat_n,X,W_n=1){drop(solve(t(X)%*%(W_n*X))%*%t(X)%*%(W_n*yhat_n))}
log_lik<-function(y,m,X,bta){
eta_y<-X%*%bta
sum(dbinom(y,m,drop(exp(eta_y)/(1+exp(eta_y))),log=T))
}
###################
chain<-matrix(rep(NA,dim(X)[2]*N),nrow=N);colnames(chain)<-colnames(X)
Dev<-rep(NA,N)
accept <- 0
###frcuent cuantities
Binv<-solve(B)
####initials
ytemp<-y
ytemp[ytemp==0]<-.5
W_0<-ytemp*(m-ytemp)/m
den<-m-ytemp
den[den==0]<-0.5
yhat_0<-log(ytemp/(den))
chain[1,]<-btahat(yhat_0,X,W_0)
Dev[1]<- -2*log_lik(y,m,X,chain[1,])
###proposal kernel
for(i in 2:N){
mu_n<-drop(m*exp(X%*%chain[i-1,])/(1+exp(X%*%chain[i-1,])))
W_n<-mu_n*(m-mu_n)/m
yhat_n<-yhat(y,X,chain[i-1,],W_n,mu_n)
B.<-solve(Binv+t(X)%*%(W_n*X))
b.<-drop(B.%*%(Binv%*%b+t(X)%*%(W_n*yhat_n)))
betaprop<-drop(rmvnorm(1,b.,B.))
mu_prop<-drop(m*exp(X%*%betaprop)/(1+exp(X%*%betaprop)))
W_prop<-mu_prop*(m-mu_prop)/m
yhat_prop<-yhat(y,X,betaprop,W_prop,mu_prop)
B.prop<-solve(Binv+t(X)%*%(W_prop*X))
b.prop<-drop(B.%*%(Binv%*%b+t(X)%*%(W_prop*yhat_prop)))
r<-exp(dmvnorm(t(betaprop),b,B,log=T)-dmvnorm(chain[i-1,],b,B,log=T)+
sum(dbinom(y,m,mu_prop/m,log =T)-dbinom(y,m,mu_n/m,log =T))+
dmvnorm(chain[i-1,],b.prop,B.prop,log = T)-(dmvnorm(t(betaprop),b.,B.,log = T)))
if(runif(1)<r){
accept<-accept+1
chain[i,]<-betaprop
} else{
chain[i,]<-chain[i-1,]
}
if(flag){cat(i,accept/N,"\n")}
Dev[i]<- -2*log_lik(y,m,X,chain[i,])
}
return(list(chain=chain,Deviance=Dev,Accepted_samples=accept))
}
binommh.formula <- function(formula, data=list(), weights,...)
{
mf <- model.frame(formula=formula, data=data)
X <- model.matrix(attr(mf, "terms"), data=mf)
Y <- model.response(mf)
est <- binommh.default(Y,weights, X, ...)
}
poissmh<- function(y,...) UseMethod("poissmh")
poissmh.default<-function(y,X,b=rep(0,dim(X)[2]),B=diag(rep(10000,dim(X)[2])),N=3000,flag=F,...){
########utility functions
yhat<-function(y,X,bta,W_n=1,mu_n){drop(X%*%bta+(y-mu_n)/W_n)}
btahat<-function(yhat_n,X,W_n=1){drop(solve(t(X)%*%(W_n*X))%*%t(X)%*%(W_n*yhat_n))}
log_lik<-function(y,X,bta){
eta_y<-X%*%bta
sum(dpois(y,drop(exp(eta_y)),log=T))
}
####################
chain<-matrix(rep(NA,dim(X)[2]*N),nrow=N);colnames(chain)<-colnames(X)
Dev<-rep(NA,N)
accept <- 0
###frcuent cuantities
Binv<-solve(B)
####initials
ytemp<-y
ytemp[ytemp==0]<-0.0000001
W_0<-ytemp
yhat_0<-log(ytemp)
chain[1,]<-btahat(yhat_0,X,W_0)
Dev[1]<- -2*log_lik(y,X,chain[1,])
###proposal kernel
for(i in 2:N){
mu_n<-drop(exp(X%*%chain[i-1,]))
W_n<-mu_n
yhat_n<-yhat(y,X,chain[i-1,],W_n,mu_n)
B.<-solve(Binv+t(X)%*%(W_n*X))
b.<-drop(B.%*%(Binv%*%b+t(X)%*%(W_n*yhat_n)))
betaprop<-t(rmvnorm(1,b.,B.))
mu_prop<-drop(exp(X%*%betaprop))
W_prop<-mu_prop
yhat_prop<-yhat(y,X,betaprop,W_prop,mu_prop)
B.prop<-solve(Binv+t(X)%*%(W_prop*X))
b.prop<-drop(B.prop%*%(Binv%*%b+t(X)%*%(W_prop*yhat_prop)))
r<-exp(dmvnorm(t(betaprop),b,B,log=T)-dmvnorm(chain[i-1,],b,B,log=T)+
sum(dpois(y,mu_prop,log=T)-dpois(y,mu_n,log=T))+
dmvnorm(chain[i-1,],b.prop,B.prop,log=T)-dmvnorm(t(betaprop),b.,B.,log=T))
if(runif(1)<r){
accept<-accept+1
chain[i,]<-betaprop
} else{
chain[i,]<-chain[i-1,]
}
if(flag){cat(i,accept/N,"\n")}
Dev[i]<- -2*log_lik(y,X,chain[i,])
}
return(list(chain=chain,Deviance=Dev,Accepted_samples=accept))
}
poissmh.formula <- function(formula, data=list(),...)
{
mf <- model.frame(formula=formula, data=data)
X <- model.matrix(attr(mf, "terms"), data=mf)
Y <- model.response(mf)
est <- poissmh.default(Y, X, ...)
}
expmh<- function(y,...) UseMethod("expmh")
expmh.default<-function(y,X,b=rep(0,dim(X)[2]),B=diag(rep(100,dim(X)[2])),N=3000,flag=F,...){
########utility functions
yhat<-function(y,X,bta,W_n=1,mu_n){drop(X%*%bta+(y-mu_n)/W_n)}
btahat<-function(yhat_n,X,W_n=1){drop(solve(t(X)%*%(W_n*X))%*%t(X)%*%(W_n*yhat_n))}
log_lik<-function(y,X,bta){
eta_y<-X%*%bta
sum(dexp(y,1/drop(exp(eta_y)),log=T))
}
####################
chain<-matrix(rep(NA,dim(X)[2]*N),nrow=N);colnames(chain)<-colnames(X)
Dev<-rep(NA,N)
accept <- 0
###frcuent cuantities
Binv<-solve(B)
B.<-solve(Binv+t(X)%*%X)
####initials
ytemp<-y
ytemp[ytemp==0]<-0.0000001
yhat_0<-log(ytemp)
chain[1,]<-btahat(yhat_0,X)
Dev[1]<- -2*log_lik(y,X,chain[1,])
###proposal kernel
for(i in 2:N){
mu_n<-drop(exp(X%*%chain[i-1,]))
W_n<-mu_n
yhat_n<-yhat(y,X,chain[i-1,],W_n,mu_n)
b.<-B.%*%(Binv%*%b+t(X)%*%yhat_n)
betaprop<-t(rmvnorm(1,b.,B.))
mu_prop<-drop(exp(X%*%betaprop))
W_prop<-mu_prop
yhat_prop<-yhat(y,X,betaprop,W_prop,mu_prop)
b.prop<-B.%*%(Binv%*%b+t(X)%*%yhat_prop)
r<-exp(dmvnorm(t(betaprop),b,B,log=T)-dmvnorm(chain[i-1,],b,B,log=T)+
sum(dexp(y,1/mu_prop,log =T)-dexp(y,1/mu_n,log =T))+
dmvnorm(chain[i-1,],b.prop,B.,log = T)-(dmvnorm(t(betaprop),b.,B.,log = T)))
if(runif(1)<r){
accept<-accept+1
chain[i,]<-betaprop
} else{
chain[i,]<-chain[i-1,]
}
Dev[i]<- -2*log_lik(y,X,chain[i,])
if(flag){cat(i,accept/N,"\n")}
}
return(list(chain=chain,Deviance=Dev,Accepted_samples=accept))
}
expmh.formula <- function(formula, data=list(),...)
{
mf <- model.frame(formula=formula, data=data)
X <- model.matrix(attr(mf, "terms"), data=mf)
Y <- model.response(mf)
est <- expmh.default(Y, X, ...)
}
|
/bglm/R/bglm_v3.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 6,106
|
r
|
################
require("mvtnorm")
binommh <- function(y,...) UseMethod("binommh")
binommh.default<-function(y,m,X,b=rep(0,dim(X)[2]),B=diag(rep(10000,dim(X)[2])),N=3000,flag=F,...){
########utility functions
yhat<-function(y,X,bta,W_n=1,mu_n){drop(X%*%bta+(y-mu_n)/W_n)}
btahat<-function(yhat_n,X,W_n=1){drop(solve(t(X)%*%(W_n*X))%*%t(X)%*%(W_n*yhat_n))}
log_lik<-function(y,m,X,bta){
eta_y<-X%*%bta
sum(dbinom(y,m,drop(exp(eta_y)/(1+exp(eta_y))),log=T))
}
###################
chain<-matrix(rep(NA,dim(X)[2]*N),nrow=N);colnames(chain)<-colnames(X)
Dev<-rep(NA,N)
accept <- 0
###frcuent cuantities
Binv<-solve(B)
####initials
ytemp<-y
ytemp[ytemp==0]<-.5
W_0<-ytemp*(m-ytemp)/m
den<-m-ytemp
den[den==0]<-0.5
yhat_0<-log(ytemp/(den))
chain[1,]<-btahat(yhat_0,X,W_0)
Dev[1]<- -2*log_lik(y,m,X,chain[1,])
###proposal kernel
for(i in 2:N){
mu_n<-drop(m*exp(X%*%chain[i-1,])/(1+exp(X%*%chain[i-1,])))
W_n<-mu_n*(m-mu_n)/m
yhat_n<-yhat(y,X,chain[i-1,],W_n,mu_n)
B.<-solve(Binv+t(X)%*%(W_n*X))
b.<-drop(B.%*%(Binv%*%b+t(X)%*%(W_n*yhat_n)))
betaprop<-drop(rmvnorm(1,b.,B.))
mu_prop<-drop(m*exp(X%*%betaprop)/(1+exp(X%*%betaprop)))
W_prop<-mu_prop*(m-mu_prop)/m
yhat_prop<-yhat(y,X,betaprop,W_prop,mu_prop)
B.prop<-solve(Binv+t(X)%*%(W_prop*X))
b.prop<-drop(B.%*%(Binv%*%b+t(X)%*%(W_prop*yhat_prop)))
r<-exp(dmvnorm(t(betaprop),b,B,log=T)-dmvnorm(chain[i-1,],b,B,log=T)+
sum(dbinom(y,m,mu_prop/m,log =T)-dbinom(y,m,mu_n/m,log =T))+
dmvnorm(chain[i-1,],b.prop,B.prop,log = T)-(dmvnorm(t(betaprop),b.,B.,log = T)))
if(runif(1)<r){
accept<-accept+1
chain[i,]<-betaprop
} else{
chain[i,]<-chain[i-1,]
}
if(flag){cat(i,accept/N,"\n")}
Dev[i]<- -2*log_lik(y,m,X,chain[i,])
}
return(list(chain=chain,Deviance=Dev,Accepted_samples=accept))
}
binommh.formula <- function(formula, data=list(), weights,...)
{
mf <- model.frame(formula=formula, data=data)
X <- model.matrix(attr(mf, "terms"), data=mf)
Y <- model.response(mf)
est <- binommh.default(Y,weights, X, ...)
}
poissmh<- function(y,...) UseMethod("poissmh")
poissmh.default<-function(y,X,b=rep(0,dim(X)[2]),B=diag(rep(10000,dim(X)[2])),N=3000,flag=F,...){
########utility functions
yhat<-function(y,X,bta,W_n=1,mu_n){drop(X%*%bta+(y-mu_n)/W_n)}
btahat<-function(yhat_n,X,W_n=1){drop(solve(t(X)%*%(W_n*X))%*%t(X)%*%(W_n*yhat_n))}
log_lik<-function(y,X,bta){
eta_y<-X%*%bta
sum(dpois(y,drop(exp(eta_y)),log=T))
}
####################
chain<-matrix(rep(NA,dim(X)[2]*N),nrow=N);colnames(chain)<-colnames(X)
Dev<-rep(NA,N)
accept <- 0
###frcuent cuantities
Binv<-solve(B)
####initials
ytemp<-y
ytemp[ytemp==0]<-0.0000001
W_0<-ytemp
yhat_0<-log(ytemp)
chain[1,]<-btahat(yhat_0,X,W_0)
Dev[1]<- -2*log_lik(y,X,chain[1,])
###proposal kernel
for(i in 2:N){
mu_n<-drop(exp(X%*%chain[i-1,]))
W_n<-mu_n
yhat_n<-yhat(y,X,chain[i-1,],W_n,mu_n)
B.<-solve(Binv+t(X)%*%(W_n*X))
b.<-drop(B.%*%(Binv%*%b+t(X)%*%(W_n*yhat_n)))
betaprop<-t(rmvnorm(1,b.,B.))
mu_prop<-drop(exp(X%*%betaprop))
W_prop<-mu_prop
yhat_prop<-yhat(y,X,betaprop,W_prop,mu_prop)
B.prop<-solve(Binv+t(X)%*%(W_prop*X))
b.prop<-drop(B.prop%*%(Binv%*%b+t(X)%*%(W_prop*yhat_prop)))
r<-exp(dmvnorm(t(betaprop),b,B,log=T)-dmvnorm(chain[i-1,],b,B,log=T)+
sum(dpois(y,mu_prop,log=T)-dpois(y,mu_n,log=T))+
dmvnorm(chain[i-1,],b.prop,B.prop,log=T)-dmvnorm(t(betaprop),b.,B.,log=T))
if(runif(1)<r){
accept<-accept+1
chain[i,]<-betaprop
} else{
chain[i,]<-chain[i-1,]
}
if(flag){cat(i,accept/N,"\n")}
Dev[i]<- -2*log_lik(y,X,chain[i,])
}
return(list(chain=chain,Deviance=Dev,Accepted_samples=accept))
}
poissmh.formula <- function(formula, data=list(),...)
{
mf <- model.frame(formula=formula, data=data)
X <- model.matrix(attr(mf, "terms"), data=mf)
Y <- model.response(mf)
est <- poissmh.default(Y, X, ...)
}
expmh<- function(y,...) UseMethod("expmh")
expmh.default<-function(y,X,b=rep(0,dim(X)[2]),B=diag(rep(100,dim(X)[2])),N=3000,flag=F,...){
########utility functions
yhat<-function(y,X,bta,W_n=1,mu_n){drop(X%*%bta+(y-mu_n)/W_n)}
btahat<-function(yhat_n,X,W_n=1){drop(solve(t(X)%*%(W_n*X))%*%t(X)%*%(W_n*yhat_n))}
log_lik<-function(y,X,bta){
eta_y<-X%*%bta
sum(dexp(y,1/drop(exp(eta_y)),log=T))
}
####################
chain<-matrix(rep(NA,dim(X)[2]*N),nrow=N);colnames(chain)<-colnames(X)
Dev<-rep(NA,N)
accept <- 0
###frcuent cuantities
Binv<-solve(B)
B.<-solve(Binv+t(X)%*%X)
####initials
ytemp<-y
ytemp[ytemp==0]<-0.0000001
yhat_0<-log(ytemp)
chain[1,]<-btahat(yhat_0,X)
Dev[1]<- -2*log_lik(y,X,chain[1,])
###proposal kernel
for(i in 2:N){
mu_n<-drop(exp(X%*%chain[i-1,]))
W_n<-mu_n
yhat_n<-yhat(y,X,chain[i-1,],W_n,mu_n)
b.<-B.%*%(Binv%*%b+t(X)%*%yhat_n)
betaprop<-t(rmvnorm(1,b.,B.))
mu_prop<-drop(exp(X%*%betaprop))
W_prop<-mu_prop
yhat_prop<-yhat(y,X,betaprop,W_prop,mu_prop)
b.prop<-B.%*%(Binv%*%b+t(X)%*%yhat_prop)
r<-exp(dmvnorm(t(betaprop),b,B,log=T)-dmvnorm(chain[i-1,],b,B,log=T)+
sum(dexp(y,1/mu_prop,log =T)-dexp(y,1/mu_n,log =T))+
dmvnorm(chain[i-1,],b.prop,B.,log = T)-(dmvnorm(t(betaprop),b.,B.,log = T)))
if(runif(1)<r){
accept<-accept+1
chain[i,]<-betaprop
} else{
chain[i,]<-chain[i-1,]
}
Dev[i]<- -2*log_lik(y,X,chain[i,])
if(flag){cat(i,accept/N,"\n")}
}
return(list(chain=chain,Deviance=Dev,Accepted_samples=accept))
}
expmh.formula <- function(formula, data=list(),...)
{
mf <- model.frame(formula=formula, data=data)
X <- model.matrix(attr(mf, "terms"), data=mf)
Y <- model.response(mf)
est <- expmh.default(Y, X, ...)
}
|
data=iris
View(iris)
library(ggplot2)
a=ggplot(data,aes(Petal.Length,Petal.Width)) + geom_point(aes(col=Species))
library(plotly)
ggplotly(a) #To make it more interactive
b=plot_ly(iris,x = ~Sepal.Length,y = ~Sepal.Width,color = ~Species)
b
#Plotly website
|
/GGPLOTLY.R
|
no_license
|
sarthakverma11/R-shiny
|
R
| false
| false
| 270
|
r
|
data=iris
View(iris)
library(ggplot2)
a=ggplot(data,aes(Petal.Length,Petal.Width)) + geom_point(aes(col=Species))
library(plotly)
ggplotly(a) #To make it more interactive
b=plot_ly(iris,x = ~Sepal.Length,y = ~Sepal.Width,color = ~Species)
b
#Plotly website
|
expand_helper <- function(x) {
if (is.na(x)) {
integer(0L)
} else {
as.integer(substring(x, seq(nchar(x)), seq(nchar(x))))
}
}
expand_digits <- function(v) {
base::Reduce(c, base::lapply(v, expand_helper))
}
mul_isin_f <- function(v) {
v * rev(rep(c(2L, 1L), length.out = length(v)))
}
isin_compute_checksum <- function(s) {
u8_l <- figi_char_to_utf8(s)
figi_code_l <- base::lapply(u8_l, FUN = figi_utf8_to_code)
digi_code_l <- base::lapply(figi_code_l, FUN = expand_digits)
mul12_l <- base::lapply(digi_code_l, FUN = mul_isin_f)
figi_sum2_l <- base::lapply(mul12_l, FUN = figi_sum_digits)
sum_digits_v <- base::vapply(figi_sum2_l, FUN = sum, FUN.VALUE = NA_integer_)
check_digit_v <- -sum_digits_v %% 10L
as.character(check_digit_v)
}
isin_has_correct_checksum <- function(s) {
isin_compute_checksum(substr(s, 1L, 11L)) == substr(s, 12L, 12L)
}
#' Check validity of ISIN
#'
#' Given a character vector, check the validity of ISIN
#' (International Securities Identification Number)
#' for each of its elements.
#'
#' @param s a character vector for whose elements validity of ISIN
#' (International Securities Identification Number) is checked.
#'
#' @return A logical vector.
#'
#' @examples
#' isin_check("BBG000BLNQ16")
#' isin_check("NRG92C84SB39")
#' isin_check(c("BBG000BLNQ16", "NRG92C84SB39"))
#'
#' @export
isin_check <- function(s) {
!is.na(s) &
base::nchar(s) == 12L &
grepl("^[A-Z]{2}[A-Z0-9]{9}[0-9]$",
s) &
isin_has_correct_checksum(s)
}
|
/R/isin.R
|
permissive
|
philaris/figir
|
R
| false
| false
| 1,518
|
r
|
expand_helper <- function(x) {
if (is.na(x)) {
integer(0L)
} else {
as.integer(substring(x, seq(nchar(x)), seq(nchar(x))))
}
}
expand_digits <- function(v) {
base::Reduce(c, base::lapply(v, expand_helper))
}
mul_isin_f <- function(v) {
v * rev(rep(c(2L, 1L), length.out = length(v)))
}
isin_compute_checksum <- function(s) {
u8_l <- figi_char_to_utf8(s)
figi_code_l <- base::lapply(u8_l, FUN = figi_utf8_to_code)
digi_code_l <- base::lapply(figi_code_l, FUN = expand_digits)
mul12_l <- base::lapply(digi_code_l, FUN = mul_isin_f)
figi_sum2_l <- base::lapply(mul12_l, FUN = figi_sum_digits)
sum_digits_v <- base::vapply(figi_sum2_l, FUN = sum, FUN.VALUE = NA_integer_)
check_digit_v <- -sum_digits_v %% 10L
as.character(check_digit_v)
}
isin_has_correct_checksum <- function(s) {
isin_compute_checksum(substr(s, 1L, 11L)) == substr(s, 12L, 12L)
}
#' Check validity of ISIN
#'
#' Given a character vector, check the validity of ISIN
#' (International Securities Identification Number)
#' for each of its elements.
#'
#' @param s a character vector for whose elements validity of ISIN
#' (International Securities Identification Number) is checked.
#'
#' @return A logical vector.
#'
#' @examples
#' isin_check("BBG000BLNQ16")
#' isin_check("NRG92C84SB39")
#' isin_check(c("BBG000BLNQ16", "NRG92C84SB39"))
#'
#' @export
isin_check <- function(s) {
!is.na(s) &
base::nchar(s) == 12L &
grepl("^[A-Z]{2}[A-Z0-9]{9}[0-9]$",
s) &
isin_has_correct_checksum(s)
}
|
\name{r_from_tcmd}
\alias{r_from_tcmd}
\title{Internal anRpackage objects}
\description{Internal anRpackage objects.}
\details{These are not to be called by the user.}
\keyword{internal}
|
/man/r_from_tcmd.Rd
|
no_license
|
cran/RcmdrPlugin.MA
|
R
| false
| false
| 191
|
rd
|
\name{r_from_tcmd}
\alias{r_from_tcmd}
\title{Internal anRpackage objects}
\description{Internal anRpackage objects.}
\details{These are not to be called by the user.}
\keyword{internal}
|
#' Upper Level Set Spatial Scan Test
#'
#' \code{uls.test} performs the Upper Level Set (ULS)
#' spatial scan test of Patil and Taillie (2004). The test
#' is performed using the spatial scan test based on a fixed
#' number of cases. The windows are based on the Upper
#' Level Sets proposed by Patil and Taillie (2004). The
#' clusters returned are non-overlapping, ordered from most
#' significant to least significant. The first cluster is
#' the most likely to be a cluster. If no significant
#' clusters are found, then the most likely cluster is
#' returned (along with a warning).
#'
#' The ULS method has a special (and time consuming)
#' construction when the observed rates aren't unique. This
#' is unlikely to arise for real data, except with observed
#' rates of 0, which are of little interest. The method can
#' take substantially if this is considered.
#'
#' @param w A binary spatial adjacency matrix for the
#' regions.
#' @param check.unique A logical value indicating whether a
#' check for unique values should be determined. The
#' default is \code{FALSE}. This is unlikely to make a
#' practical different for most real data sets.
#' @inheritParams scan.test
#'
#' @return Returns a list of length two of class scan. The
#' first element (clusters) is a list containing the
#' significant, non-ovlappering clusters, and has the the
#' following components: \item{locids}{The location ids of
#' regions in a significant cluster.} \item{pop}{The total
#' population in the cluser window.} \item{cases}{The
#' observed number of cases in the cluster window.}
#' \item{expected}{The expected number of cases in the
#' cluster window.} \item{smr}{Standarized mortaility
#' ratio (observed/expected) in the cluster window.}
#' \item{rr}{Relative risk in the cluster window.}
#' \item{loglikrat}{The loglikelihood ratio for the
#' cluster window (i.e., the log of the test statistic).}
#' \item{pvalue}{The pvalue of the test statistic
#' associated with the cluster window.} The second element
#' of the list is the centroid coordinates. This is
#' needed for plotting purposes.
#' @author Joshua French
#' @seealso \code{\link{print.smerc_cluster}},
#' \code{\link{summary.smerc_cluster}},
#' \code{\link{plot.smerc_cluster}},
#' \code{\link{scan.stat}}, \code{\link{scan.test}}
#' @export
#' @references Patil, G.P. & Taillie, C. Upper level set
#' scan statistic for detecting arbitrarily shaped
#' hotspots. Environmental and Ecological Statistics
#' (2004) 11(2):183-197.
#' <doi:10.1023/B:EEST.0000027208.48919.7e>
#' @examples
#' data(nydf)
#' data(nyw)
#' coords <- with(nydf, cbind(longitude, latitude))
#' out <- uls.test(
#' coords = coords, cases = floor(nydf$cases),
#' pop = nydf$pop, w = nyw,
#' alpha = 0.05, longlat = TRUE,
#' nsim = 9, ubpop = 0.5
#' )
#' # better plotting
#' if (require("sf", quietly = TRUE)) {
#' data(nysf)
#' plot(st_geometry(nysf), col = color.clusters(out))
#' }
uls.test <- function(coords, cases, pop, w,
ex = sum(cases) / sum(pop) * pop,
nsim = 499, alpha = 0.1,
ubpop = 0.5, longlat = FALSE,
cl = NULL, type = "poisson",
check.unique = FALSE) {
# sanity checking
arg_check_scan_test(coords, cases, pop, ex, nsim, alpha,
nsim + 1, ubpop, longlat, TRUE,
k = 1, w = w, type = type
)
coords <- as.matrix(coords)
zones <- uls.zones(cases, pop, w, ubpop)
# compute needed information
ty <- sum(cases)
yin <- zones.sum(zones, cases)
# compute test statistics for observed data
if (type == "poisson") {
ein <- zones.sum(zones, ex)
tobs <- stat.poisson(yin, ty - yin, ein, ty - ein)
} else if (type == "binomial") {
tpop <- sum(pop)
popin <- zones.sum(zones, pop)
tobs <- stat.binom(yin, ty - yin, ty, popin, tpop - popin, tpop)
}
# compute test statistics for simulated data
if (nsim > 0) {
message("computing statistics for simulated data:")
tsim <- uls.sim(
nsim = nsim, ty = ty, ex = ex, w = w,
pop = pop, ubpop = ubpop, cl = cl
)
pvalue <- mc.pvalue(tobs, tsim)
} else {
pvalue <- rep(1, length(tobs))
}
# significant, ordered, non-overlapping clusters and
# information
pruned <- sig_noc(
tobs = tobs, zones = zones,
pvalue = pvalue, alpha = alpha,
order_by = "tobs"
)
smerc_cluster(
tobs = pruned$tobs, zones = pruned$zones,
pvalue = pruned$pvalue, coords = coords,
cases = cases, pop = pop, ex = ex,
longlat = longlat, method = "upper level set",
rel_param = list(
type = type,
simdist = "multinomial",
nsim = nsim,
ubpop = ubpop
),
alpha = alpha,
w = w, d = NULL
)
}
|
/R/uls.test.R
|
no_license
|
cran/smerc
|
R
| false
| false
| 4,919
|
r
|
#' Upper Level Set Spatial Scan Test
#'
#' \code{uls.test} performs the Upper Level Set (ULS)
#' spatial scan test of Patil and Taillie (2004). The test
#' is performed using the spatial scan test based on a fixed
#' number of cases. The windows are based on the Upper
#' Level Sets proposed by Patil and Taillie (2004). The
#' clusters returned are non-overlapping, ordered from most
#' significant to least significant. The first cluster is
#' the most likely to be a cluster. If no significant
#' clusters are found, then the most likely cluster is
#' returned (along with a warning).
#'
#' The ULS method has a special (and time consuming)
#' construction when the observed rates aren't unique. This
#' is unlikely to arise for real data, except with observed
#' rates of 0, which are of little interest. The method can
#' take substantially if this is considered.
#'
#' @param w A binary spatial adjacency matrix for the
#' regions.
#' @param check.unique A logical value indicating whether a
#' check for unique values should be determined. The
#' default is \code{FALSE}. This is unlikely to make a
#' practical different for most real data sets.
#' @inheritParams scan.test
#'
#' @return Returns a list of length two of class scan. The
#' first element (clusters) is a list containing the
#' significant, non-ovlappering clusters, and has the the
#' following components: \item{locids}{The location ids of
#' regions in a significant cluster.} \item{pop}{The total
#' population in the cluser window.} \item{cases}{The
#' observed number of cases in the cluster window.}
#' \item{expected}{The expected number of cases in the
#' cluster window.} \item{smr}{Standarized mortaility
#' ratio (observed/expected) in the cluster window.}
#' \item{rr}{Relative risk in the cluster window.}
#' \item{loglikrat}{The loglikelihood ratio for the
#' cluster window (i.e., the log of the test statistic).}
#' \item{pvalue}{The pvalue of the test statistic
#' associated with the cluster window.} The second element
#' of the list is the centroid coordinates. This is
#' needed for plotting purposes.
#' @author Joshua French
#' @seealso \code{\link{print.smerc_cluster}},
#' \code{\link{summary.smerc_cluster}},
#' \code{\link{plot.smerc_cluster}},
#' \code{\link{scan.stat}}, \code{\link{scan.test}}
#' @export
#' @references Patil, G.P. & Taillie, C. Upper level set
#' scan statistic for detecting arbitrarily shaped
#' hotspots. Environmental and Ecological Statistics
#' (2004) 11(2):183-197.
#' <doi:10.1023/B:EEST.0000027208.48919.7e>
#' @examples
#' data(nydf)
#' data(nyw)
#' coords <- with(nydf, cbind(longitude, latitude))
#' out <- uls.test(
#' coords = coords, cases = floor(nydf$cases),
#' pop = nydf$pop, w = nyw,
#' alpha = 0.05, longlat = TRUE,
#' nsim = 9, ubpop = 0.5
#' )
#' # better plotting
#' if (require("sf", quietly = TRUE)) {
#' data(nysf)
#' plot(st_geometry(nysf), col = color.clusters(out))
#' }
uls.test <- function(coords, cases, pop, w,
ex = sum(cases) / sum(pop) * pop,
nsim = 499, alpha = 0.1,
ubpop = 0.5, longlat = FALSE,
cl = NULL, type = "poisson",
check.unique = FALSE) {
# sanity checking
arg_check_scan_test(coords, cases, pop, ex, nsim, alpha,
nsim + 1, ubpop, longlat, TRUE,
k = 1, w = w, type = type
)
coords <- as.matrix(coords)
zones <- uls.zones(cases, pop, w, ubpop)
# compute needed information
ty <- sum(cases)
yin <- zones.sum(zones, cases)
# compute test statistics for observed data
if (type == "poisson") {
ein <- zones.sum(zones, ex)
tobs <- stat.poisson(yin, ty - yin, ein, ty - ein)
} else if (type == "binomial") {
tpop <- sum(pop)
popin <- zones.sum(zones, pop)
tobs <- stat.binom(yin, ty - yin, ty, popin, tpop - popin, tpop)
}
# compute test statistics for simulated data
if (nsim > 0) {
message("computing statistics for simulated data:")
tsim <- uls.sim(
nsim = nsim, ty = ty, ex = ex, w = w,
pop = pop, ubpop = ubpop, cl = cl
)
pvalue <- mc.pvalue(tobs, tsim)
} else {
pvalue <- rep(1, length(tobs))
}
# significant, ordered, non-overlapping clusters and
# information
pruned <- sig_noc(
tobs = tobs, zones = zones,
pvalue = pvalue, alpha = alpha,
order_by = "tobs"
)
smerc_cluster(
tobs = pruned$tobs, zones = pruned$zones,
pvalue = pruned$pvalue, coords = coords,
cases = cases, pop = pop, ex = ex,
longlat = longlat, method = "upper level set",
rel_param = list(
type = type,
simdist = "multinomial",
nsim = nsim,
ubpop = ubpop
),
alpha = alpha,
w = w, d = NULL
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coverage.R
\name{addin_extract_covr}
\alias{addin_extract_covr}
\title{Add-in for Extract & Coverage}
\usage{
addin_extract_covr()
}
\description{
Add-in for Extract & Coverage
}
|
/man/addin_extract_covr.Rd
|
no_license
|
yanggenome/testextra
|
R
| false
| true
| 257
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coverage.R
\name{addin_extract_covr}
\alias{addin_extract_covr}
\title{Add-in for Extract & Coverage}
\usage{
addin_extract_covr()
}
\description{
Add-in for Extract & Coverage
}
|
#' Separate a Column Pasted by paste2
#'
#' Separates a \code{\link[qdap]{paste2}} column into separate columns.
#'
#' @param column The pasted vector.
#' @param col.sep The column separator used in \code{paste2}.
#' @param name.sep Name separator used in the column (generally for internal use
#' with \code{\link[qdap]{colsplit2df}}).
#' @return Returns a dataframe of split columns.
#' @seealso \code{\link[qdap]{colsplit2df}},
#' \code{\link[qdap]{paste2}}
#' @keywords column-split
#' @export
#' @examples
#' \dontrun{
#' foo1 <- paste2(CO2[, 1:3])
#' head(foo1, 12)
#' bar1 <- colSplit(foo1)
#' head(bar1, 10)
#'
#' foo2 <- paste2(mtcars[, 1:3], sep="|")
#' head(foo2, 12)
#' bar2 <- colSplit(foo2, col.sep = "|")
#' head(bar2, 10)
#' }
colSplit <-
function(column, col.sep = ".", name.sep = "&"){
column <- as.data.frame(column)
svar <- strsplit(as.character(column[, 1]), col.sep, fixed = TRUE)
svar <- data.frame(do.call('rbind', svar), stringsAsFactors = FALSE)
if (!is.null(name.sep) & length(unlist(strsplit(names(column),
name.sep, fixed = TRUE))) > 1){
cn <- strsplit(names(column)[1], name.sep, fixed = TRUE)[[1]]
if (length(cn) == ncol(svar)) {
names(svar) <- cn
} else {
colnames(svar) <- make.names(1:ncol(svar), unique = TRUE)
}
}
svar
}
|
/R/colSplit.R
|
no_license
|
cran/qdap
|
R
| false
| false
| 1,366
|
r
|
#' Separate a Column Pasted by paste2
#'
#' Separates a \code{\link[qdap]{paste2}} column into separate columns.
#'
#' @param column The pasted vector.
#' @param col.sep The column separator used in \code{paste2}.
#' @param name.sep Name separator used in the column (generally for internal use
#' with \code{\link[qdap]{colsplit2df}}).
#' @return Returns a dataframe of split columns.
#' @seealso \code{\link[qdap]{colsplit2df}},
#' \code{\link[qdap]{paste2}}
#' @keywords column-split
#' @export
#' @examples
#' \dontrun{
#' foo1 <- paste2(CO2[, 1:3])
#' head(foo1, 12)
#' bar1 <- colSplit(foo1)
#' head(bar1, 10)
#'
#' foo2 <- paste2(mtcars[, 1:3], sep="|")
#' head(foo2, 12)
#' bar2 <- colSplit(foo2, col.sep = "|")
#' head(bar2, 10)
#' }
colSplit <-
function(column, col.sep = ".", name.sep = "&"){
column <- as.data.frame(column)
svar <- strsplit(as.character(column[, 1]), col.sep, fixed = TRUE)
svar <- data.frame(do.call('rbind', svar), stringsAsFactors = FALSE)
if (!is.null(name.sep) & length(unlist(strsplit(names(column),
name.sep, fixed = TRUE))) > 1){
cn <- strsplit(names(column)[1], name.sep, fixed = TRUE)[[1]]
if (length(cn) == ncol(svar)) {
names(svar) <- cn
} else {
colnames(svar) <- make.names(1:ncol(svar), unique = TRUE)
}
}
svar
}
|
\name{LCHab2Lab}
\alias{LCHab2Lab}
\title{Convert LCHab coordinates to CIE Lab}
\description{\code{LCHab2Lab} Converts LCHab coordinates to CIE Lab.
}
\usage{LCHab2Lab(LCHabmatrix) }
\arguments{
\item{LCHabmatrix}{ LCHab coordinates}
}
\value{
CIE Lab coordinates
}
\source{
Logicol S.r.l., 2014
EasyRGB color search engine
\url{http://www.easyrgb.com/}
}
\references{
Logicol S.r.l., 2014
EasyRGB color search engine
\url{http://www.easyrgb.com/}
}
\author{Jose Gama}
\examples{
LCHab2Lab(c(0.310897, 0.306510, 74.613450))
}
\keyword{datasets}
|
/man/LCHab2Lab.Rd
|
no_license
|
playwar/colorscience
|
R
| false
| false
| 550
|
rd
|
\name{LCHab2Lab}
\alias{LCHab2Lab}
\title{Convert LCHab coordinates to CIE Lab}
\description{\code{LCHab2Lab} Converts LCHab coordinates to CIE Lab.
}
\usage{LCHab2Lab(LCHabmatrix) }
\arguments{
\item{LCHabmatrix}{ LCHab coordinates}
}
\value{
CIE Lab coordinates
}
\source{
Logicol S.r.l., 2014
EasyRGB color search engine
\url{http://www.easyrgb.com/}
}
\references{
Logicol S.r.l., 2014
EasyRGB color search engine
\url{http://www.easyrgb.com/}
}
\author{Jose Gama}
\examples{
LCHab2Lab(c(0.310897, 0.306510, 74.613450))
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/softlayer-ranges.R
\name{softlayer_ranges}
\alias{softlayer_ranges}
\title{Softlayer ranges}
\usage{
softlayer_ranges(method = c("asn", "list"))
}
\arguments{
\item{method}{if \code{list}, this method will use the HTML published ranges; if
\code{asn}, this method will build the CIDR list from Softlayer published
ASNs. The default method is "\code{asn}".}
}
\value{
a \code{tibble}, the most interesting colun of which is \code{ip_range}
}
\description{
Retrieves the official list of Softlayer cloud network ranges.
}
\details{
Softlayer provides \href{https://github.com/IBM-Bluemix-Docs/hardware-firewall-dedicated/blob/master/ips.md}{a list}
of public netblock ranges. Softlayer also has a large number of large ASNs but
Softlayer tech support claimes AS36351 is their "public cloud" ASN.
Methods are provided that enable using either of these sources to generate the CIDR list.
It is unlikely that this list will change in your analysis session, so it is
recommended that you cache the results. Future versions will automatically cache
the results both in-session and on-disk for a period of time.
}
\examples{
ranges <- softlayer_ranges()
normalize_ipv4(ranges)
}
|
/man/softlayer_ranges.Rd
|
no_license
|
isabella232/cloudcidrs
|
R
| false
| true
| 1,252
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/softlayer-ranges.R
\name{softlayer_ranges}
\alias{softlayer_ranges}
\title{Softlayer ranges}
\usage{
softlayer_ranges(method = c("asn", "list"))
}
\arguments{
\item{method}{if \code{list}, this method will use the HTML published ranges; if
\code{asn}, this method will build the CIDR list from Softlayer published
ASNs. The default method is "\code{asn}".}
}
\value{
a \code{tibble}, the most interesting colun of which is \code{ip_range}
}
\description{
Retrieves the official list of Softlayer cloud network ranges.
}
\details{
Softlayer provides \href{https://github.com/IBM-Bluemix-Docs/hardware-firewall-dedicated/blob/master/ips.md}{a list}
of public netblock ranges. Softlayer also has a large number of large ASNs but
Softlayer tech support claimes AS36351 is their "public cloud" ASN.
Methods are provided that enable using either of these sources to generate the CIDR list.
It is unlikely that this list will change in your analysis session, so it is
recommended that you cache the results. Future versions will automatically cache
the results both in-session and on-disk for a period of time.
}
\examples{
ranges <- softlayer_ranges()
normalize_ipv4(ranges)
}
|
##
# Author: Autogenerated on 2013-11-27 18:13:59
# gitHash: c4ad841105ba82f4a3979e4cf1ae7e20a5905e59
# SEED: 4663640625336856642
##
source('./findNSourceUtils.R')
Log.info("======================== Begin Test ===========================")
complexFilterTest_failtoconverge_1000x501_148 <- function(conn) {
Log.info("A munge-task R unit test on data <failtoconverge_1000x501> testing the functional unit <['', '==']> ")
Log.info("Uploading failtoconverge_1000x501")
hex <- h2o.uploadFile(conn, locate("../../smalldata/logreg/failtoconverge_1000x501.csv.gz"), "rfailtoconverge_1000x501.hex")
Log.info("Performing compound task ( ( hex[,c(156)] == 0.910207334282 )) on dataset <failtoconverge_1000x501>")
filterHex <- hex[( ( hex[,c(156)] == 0.910207334282 )) ,]
Log.info("Performing compound task ( ( hex[,c(192)] == 0.90791247409 )) on dataset failtoconverge_1000x501, and also subsetting columns.")
filterHex <- hex[( ( hex[,c(192)] == 0.90791247409 )) , c(156,195,311,196,454,192,80,101,429,183,402,342,343,9,420,423,202,351,206,467,304,421,383,73,72,265,227,226,167,222,79,76,33,473,355,412,295,262)]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[( ( hex[,c(192)] == 0.90791247409 )) , c(344,346,347,340,341,348,349,298,299,296,297,294,292,293,290,291,199,198,194,197,191,190,193,270,271,272,273,274,275,276,277,278,279,449,448,443,442,441,440,447,446,445,444,108,109,102,103,100,106,107,104,105,39,38,32,31,30,37,36,35,34,438,439,436,437,434,435,432,433,430,431,339,338,335,334,337,336,331,330,333,332,345,6,99,98,91,90,93,92,95,94,97,96,238,239,234,235,236,237,230,231,232,233,1,146,147,144,145,142,143,140,141,148,149,133,132,131,130,137,136,135,134,494,495,139,138,490,491,492,493,24,25,26,27,20,21,22,23,28,29,407,406,405,404,403,401,400,409,408,379,378,371,370,373,372,375,374,377,376,393,392,88,89,397,396,395,394,82,83,399,81,86,87,84,85,7,245,244,247,246,241,240,243,242,249,248,458,459,450,451,452,453,455,456,457,179,178,177,176,175,174,173,172,171,170,253,182,180,181,186,187,184,185,188,189,11,10,13,12,15,14,17,16,19,18,62,322,323,320,321,326,327,324,325,328,329,201,200,203,205,204,207,209,208,77,75,74,71,70,78,2,8,68,120,121,122,123,124,125,126,127,128,129,414,415,416,417,410,411,413,498,418,419,499,319,318,313,312,310,317,316,315,314,496,497,3,368,369,366,367,364,365,362,363,360,361,380,381,382,384,385,386,387,388,389,60,61,258,259,64,65,66,67,252,69,250,251,256,257,254,255,502,500,501,469,468,465,464,466,461,460,463,462,168,169,164,165,166,160,161,162,163,357,356,354,353,352,350,359,358,216,217,214,215,212,213,210,211,218,219,289,288,4,281,280,283,282,285,284,287,286,263,261,260,267,266,264,269,268,59,58,55,54,57,56,51,50,53,52,63,115,114,117,116,111,110,113,112,119,118,428,422,425,424,427,426,308,309,300,301,302,303,305,306,307,229,228,225,224,223,221,220,391,390,151,150,153,152,155,154,157,159,158,398,48,49,46,47,44,45,42,43,40,41,5,489,488,487,486,485,484,483,482,481,480,472,470,471,476,477,474,475,478,479)]
}
conn = new("H2OClient", ip=myIP, port=myPort)
tryCatch(test_that("compoundFilterTest_ on data failtoconverge_1000x501", complexFilterTest_failtoconverge_1000x501_148(conn)), warning = function(w) WARN(w), error = function(e) FAIL(e))
PASS()
|
/R/tests/testdir_autoGen/runit_complexFilterTest_failtoconverge_1000x501_148.R
|
permissive
|
hardikk/h2o
|
R
| false
| false
| 3,538
|
r
|
##
# Author: Autogenerated on 2013-11-27 18:13:59
# gitHash: c4ad841105ba82f4a3979e4cf1ae7e20a5905e59
# SEED: 4663640625336856642
##
source('./findNSourceUtils.R')
Log.info("======================== Begin Test ===========================")
complexFilterTest_failtoconverge_1000x501_148 <- function(conn) {
Log.info("A munge-task R unit test on data <failtoconverge_1000x501> testing the functional unit <['', '==']> ")
Log.info("Uploading failtoconverge_1000x501")
hex <- h2o.uploadFile(conn, locate("../../smalldata/logreg/failtoconverge_1000x501.csv.gz"), "rfailtoconverge_1000x501.hex")
Log.info("Performing compound task ( ( hex[,c(156)] == 0.910207334282 )) on dataset <failtoconverge_1000x501>")
filterHex <- hex[( ( hex[,c(156)] == 0.910207334282 )) ,]
Log.info("Performing compound task ( ( hex[,c(192)] == 0.90791247409 )) on dataset failtoconverge_1000x501, and also subsetting columns.")
filterHex <- hex[( ( hex[,c(192)] == 0.90791247409 )) , c(156,195,311,196,454,192,80,101,429,183,402,342,343,9,420,423,202,351,206,467,304,421,383,73,72,265,227,226,167,222,79,76,33,473,355,412,295,262)]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[( ( hex[,c(192)] == 0.90791247409 )) , c(344,346,347,340,341,348,349,298,299,296,297,294,292,293,290,291,199,198,194,197,191,190,193,270,271,272,273,274,275,276,277,278,279,449,448,443,442,441,440,447,446,445,444,108,109,102,103,100,106,107,104,105,39,38,32,31,30,37,36,35,34,438,439,436,437,434,435,432,433,430,431,339,338,335,334,337,336,331,330,333,332,345,6,99,98,91,90,93,92,95,94,97,96,238,239,234,235,236,237,230,231,232,233,1,146,147,144,145,142,143,140,141,148,149,133,132,131,130,137,136,135,134,494,495,139,138,490,491,492,493,24,25,26,27,20,21,22,23,28,29,407,406,405,404,403,401,400,409,408,379,378,371,370,373,372,375,374,377,376,393,392,88,89,397,396,395,394,82,83,399,81,86,87,84,85,7,245,244,247,246,241,240,243,242,249,248,458,459,450,451,452,453,455,456,457,179,178,177,176,175,174,173,172,171,170,253,182,180,181,186,187,184,185,188,189,11,10,13,12,15,14,17,16,19,18,62,322,323,320,321,326,327,324,325,328,329,201,200,203,205,204,207,209,208,77,75,74,71,70,78,2,8,68,120,121,122,123,124,125,126,127,128,129,414,415,416,417,410,411,413,498,418,419,499,319,318,313,312,310,317,316,315,314,496,497,3,368,369,366,367,364,365,362,363,360,361,380,381,382,384,385,386,387,388,389,60,61,258,259,64,65,66,67,252,69,250,251,256,257,254,255,502,500,501,469,468,465,464,466,461,460,463,462,168,169,164,165,166,160,161,162,163,357,356,354,353,352,350,359,358,216,217,214,215,212,213,210,211,218,219,289,288,4,281,280,283,282,285,284,287,286,263,261,260,267,266,264,269,268,59,58,55,54,57,56,51,50,53,52,63,115,114,117,116,111,110,113,112,119,118,428,422,425,424,427,426,308,309,300,301,302,303,305,306,307,229,228,225,224,223,221,220,391,390,151,150,153,152,155,154,157,159,158,398,48,49,46,47,44,45,42,43,40,41,5,489,488,487,486,485,484,483,482,481,480,472,470,471,476,477,474,475,478,479)]
}
conn = new("H2OClient", ip=myIP, port=myPort)
tryCatch(test_that("compoundFilterTest_ on data failtoconverge_1000x501", complexFilterTest_failtoconverge_1000x501_148(conn)), warning = function(w) WARN(w), error = function(e) FAIL(e))
PASS()
|
library(timeR)
### Name: getTimer
### Title: Get the data frame in timer object
### Aliases: getTimer
### ** Examples
timer1 <- createTimer()
timer1$start("event1")
Sys.sleep(1)
timer1$stop("event1")
getTimer(timer1)
|
/data/genthat_extracted_code/timeR/examples/getTimer.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 224
|
r
|
library(timeR)
### Name: getTimer
### Title: Get the data frame in timer object
### Aliases: getTimer
### ** Examples
timer1 <- createTimer()
timer1$start("event1")
Sys.sleep(1)
timer1$stop("event1")
getTimer(timer1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nabat_gql_queries.R
\name{get_presigned_data}
\alias{get_presigned_data}
\title{Get presigned data}
\usage{
get_presigned_data(
project_id,
token,
branch = "beta",
url = NULL,
aws_gql = NULL,
aws_alb = NULL,
docker = FALSE
)
}
\arguments{
\item{token}{List token created from get_nabat_gql_token() or
get_refresh_token()}
\item{branch}{(optional) String that defaults to 'prod' but can also be
'dev'|'beta'|'local'}
\item{url}{(optional) String url to use for GQL}
\item{aws_gql}{(optional) String url to use in aws}
\item{aws_alb}{(optional) String url to use in aws}
\item{docker}{(optional) Boolean if being run in docker container or not}
\item{file_path}{String full path to CSV file for preview}
\item{survey_type}{(optional) String 'bulk_sae' | 'bulk_mae' | 'bulk_cc'}
}
\description{
Returns a uuid and presigned url to upload a csv into the AWS bucket
}
|
/man/get_presigned_data.Rd
|
permissive
|
tinazilla/nabatr-1
|
R
| false
| true
| 962
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nabat_gql_queries.R
\name{get_presigned_data}
\alias{get_presigned_data}
\title{Get presigned data}
\usage{
get_presigned_data(
project_id,
token,
branch = "beta",
url = NULL,
aws_gql = NULL,
aws_alb = NULL,
docker = FALSE
)
}
\arguments{
\item{token}{List token created from get_nabat_gql_token() or
get_refresh_token()}
\item{branch}{(optional) String that defaults to 'prod' but can also be
'dev'|'beta'|'local'}
\item{url}{(optional) String url to use for GQL}
\item{aws_gql}{(optional) String url to use in aws}
\item{aws_alb}{(optional) String url to use in aws}
\item{docker}{(optional) Boolean if being run in docker container or not}
\item{file_path}{String full path to CSV file for preview}
\item{survey_type}{(optional) String 'bulk_sae' | 'bulk_mae' | 'bulk_cc'}
}
\description{
Returns a uuid and presigned url to upload a csv into the AWS bucket
}
|
#' match two taxonomic lists
#'
#' match two taxonomic lists using canonical names
#'
#' @param master master taxonomic list
#' @param checklist match taxonomic list
#' @param masterfld field name for canonical name in master list
#' @param checklistfld field name for canonical name in match list
#' @family list functions
#' @return a list with data frames containing matched records,
#' records only in master and cheklist and statistics about the
#' records including Jaccard index
#' @examples
#' \dontrun{
#'master <- data.frame("canonical" = c("Abrothrix longipilis",
#' "Acodon hirtus",
#' "Akodon longipilis apta",
#' "Akodon longipilis castaneus",
#' "Chroeomys jelskii",
#' "Acodon jelskii pyrrhotis"),
#' stringsAsFactors = F)
#'checklist <- data.frame("canonical" = c("Abrothrix longipilis",
#' "Akodon longipilis apta",
#' "Akodon longipilis castaneus",
#' "Abrothrix jelskii",
#' "Acodon jelskii pyrrhotis"),
#' stringsAsFactors = F)
#' match_lists(master,checklist,"canonical","canonical")
#' }
#' @export
match_lists <- function(master,checklist,masterfld,checklistfld){
retval <- NULL
if(masterfld==""){
return(NULL)
} else {
master <- rename_column(master,masterfld,"masterfld")
master$masterfld <- as.character(master$masterfld)
}
if(checklistfld==""){
return(NULL)
} else {
checklist <- rename_column(checklist,checklistfld,"checklistfld")
checklist$checklistfld <- as.character(checklist$checklistfld)
}
retval$matchlist <- master[which(master$masterfld %in% checklist$checklistfld),]
retval$onlymaster <- master[which(master$masterfld %!in% checklist$checklistfld),]
retval$onlychecklist <- checklist[which(checklist$checklistfld %!in% master$masterfld),]
retval$matchlist <- rename_column(retval$matchlist,"masterfld",masterfld)
retval$onlymaster <- rename_column(retval$onlymaster,"masterfld",masterfld)
retval$onlychecklist <- rename_column(retval$onlychecklist,"checklistfld",checklistfld)
retval$stat$masterrec <- dim(master)[1]
retval$stat$checkrec <- dim(checklist)[1]
retval$stat$match <- dim(retval$matchlist)[1]
retval$stat$onlymaster <- dim(retval$onlymaster)[1]
retval$stat$onlychecklist <- dim(retval$onlychecklist)[1]
retval$stat$jacard <- dim(retval$matchlist)[1] / ( dim(retval$matchlist)[1] +
dim(retval$onlymaster)[1] +
dim(retval$onlychecklist)[1] )
return(retval)
}
|
/R/match_lists.R
|
no_license
|
SrishtiJ1305/taxotools
|
R
| false
| false
| 2,849
|
r
|
#' match two taxonomic lists
#'
#' match two taxonomic lists using canonical names
#'
#' @param master master taxonomic list
#' @param checklist match taxonomic list
#' @param masterfld field name for canonical name in master list
#' @param checklistfld field name for canonical name in match list
#' @family list functions
#' @return a list with data frames containing matched records,
#' records only in master and cheklist and statistics about the
#' records including Jaccard index
#' @examples
#' \dontrun{
#'master <- data.frame("canonical" = c("Abrothrix longipilis",
#' "Acodon hirtus",
#' "Akodon longipilis apta",
#' "Akodon longipilis castaneus",
#' "Chroeomys jelskii",
#' "Acodon jelskii pyrrhotis"),
#' stringsAsFactors = F)
#'checklist <- data.frame("canonical" = c("Abrothrix longipilis",
#' "Akodon longipilis apta",
#' "Akodon longipilis castaneus",
#' "Abrothrix jelskii",
#' "Acodon jelskii pyrrhotis"),
#' stringsAsFactors = F)
#' match_lists(master,checklist,"canonical","canonical")
#' }
#' @export
match_lists <- function(master,checklist,masterfld,checklistfld){
retval <- NULL
if(masterfld==""){
return(NULL)
} else {
master <- rename_column(master,masterfld,"masterfld")
master$masterfld <- as.character(master$masterfld)
}
if(checklistfld==""){
return(NULL)
} else {
checklist <- rename_column(checklist,checklistfld,"checklistfld")
checklist$checklistfld <- as.character(checklist$checklistfld)
}
retval$matchlist <- master[which(master$masterfld %in% checklist$checklistfld),]
retval$onlymaster <- master[which(master$masterfld %!in% checklist$checklistfld),]
retval$onlychecklist <- checklist[which(checklist$checklistfld %!in% master$masterfld),]
retval$matchlist <- rename_column(retval$matchlist,"masterfld",masterfld)
retval$onlymaster <- rename_column(retval$onlymaster,"masterfld",masterfld)
retval$onlychecklist <- rename_column(retval$onlychecklist,"checklistfld",checklistfld)
retval$stat$masterrec <- dim(master)[1]
retval$stat$checkrec <- dim(checklist)[1]
retval$stat$match <- dim(retval$matchlist)[1]
retval$stat$onlymaster <- dim(retval$onlymaster)[1]
retval$stat$onlychecklist <- dim(retval$onlychecklist)[1]
retval$stat$jacard <- dim(retval$matchlist)[1] / ( dim(retval$matchlist)[1] +
dim(retval$onlymaster)[1] +
dim(retval$onlychecklist)[1] )
return(retval)
}
|
####################################################################################
###
### This script provides an overview of working with spatial data in R
### for the GLSC-LSC all hands meeting in Cleveland, OH
###
### Created 2020-02-18 by Daniel B. Fitzgerald
####################################################################################
# set directory
#setwd("~/Courses and Training/LSC All Hands")
# install packages if neccessary
#install.packages(c("sf", "raster", "tmap", "spData"))
# attach packages
library("sf") # functions for vector data
library("raster") # functions for raster data
library("tmap") # provides mapping functionality
library("spData") # provides spatial datasets for illustration purposes
# load package data
data("coffee_data")
data("rivers")
data("us_states")
# generate file path for reading in shapefile
path.to.file <- system.file("shapes/world.gpkg", package="spData")
# load shapefile
world <- st_read(path.to.file)
# typing the variable name at the console will provide a brief overview of the object
world
# explore the class of world (both a data.frame and an sf object)
class(world)
# all the normal functionality for data frames is retained
head(world)
str(world)
colnames(world)
subset(world, name_long == "United States")
# notice that the geometry column is "sticky"
world[ , 1:4]
world[ , 1:2]
# this geometry column is what allows us to retain the spatial representation as we subset the data frame
plot(world[ , 2])
plot(world[5 , 2])
# get a feel for how sf handles complicated geometries using the US as example
str(world$geom[5])
head(st_coordinates(world[5, "geom"]), n=15)
# identify coordinate reference system
st_crs(world)
# reproject layer to new coordinate reference system
world.proj <- st_transform(world, crs = 3857)
# plot comparison graph
par(mfrow = c(1,2))
plot(st_geometry(world), main = "WGS84 (EPSG: 4326)")
plot(st_geometry(world.proj), main = "Mercator (EPSG: 3957)")
# reset graphics parameters
par(mfrow = c(1,1))
# join non spatial data frame (coffee) with spatial layer (world) note: warning OK for our purposes
world.coffee <- dplyr::left_join(world, coffee_data, by = "name_long")
# plot 2016 coffee production using base plotting functions
plot(world.coffee["coffee_production_2016"])
# Begin exploring tmap plotting
coffee.map <- tm_shape(world.coffee) +
tm_polygons(col = "coffee_production_2016", title = "2016 Coffee Production", palette = "YlOrRd") +
tm_layout(legend.bg.color = "white", legend.frame = TRUE)
print(coffee.map)
# change tmap mode to interactive
tmap_mode("view")
print(coffee.map)
# return to plot mode
tmap_mode("plot")
### Bring in another vector layer to show to show spatial subsets, joins, and zonal stats with rasters
# reproject US States Data
us <- st_transform(us_states, crs=4326)
# the rivers layer has a larger spatial extent than we need
tm_shape(rivers) +tm_lines(col="blue") +
tm_shape(us) + tm_polygons(col="grey", alpha = 0.3)
# We can clip (spatial subsetting) one spatial feature by another using the normal [ , ] notation
us.rivers <- rivers[us, ]
# plot clipped layer
tm_shape(us) + tm_polygons(col="grey") +
tm_shape(us.rivers) +tm_lines(col="blue")
# generate random points of "species" occurrence
points <- st_sample(us, size = 1000)
# view generated points
tm_shape(us) + tm_polygons(col="grey") +
tm_shape(us.rivers) + tm_lines(col="blue") +
tm_shape(points) + tm_symbols(col="black")
# calculate number of occurrences by state using spatial intersection
us$npoints <- lengths(st_intersects(us, points))
# plot map showing numbe of records by state
tm_shape(us) + tm_polygons(col="npoints", palette = "Reds", title = "Number of Records")
# add points overlaying and demostrate transparency
tm_shape(us) + tm_polygons(col="npoints", palette = "Reds", title = "Number of Records") +
tm_shape(points) + tm_symbols(col = "grey", alpha = 0.4)
# generate a random rater layer of "land cover"
lu <- raster(xmn = st_bbox(us)[1], xmx=st_bbox(us)[3], ymn = st_bbox(us)[2], ymx=st_bbox(us)[4], resolution = 0.2)
lu <- setValues(lu, sample(1:20, ncell(lu), replace = TRUE))
plot(lu)
plot(st_geometry(us), lwd=2, add=TRUE)
# rasterize a polygon layer to speed processing (e.g., if we have numerous small catchments or watersheds)
us$GEOID <- as.numeric(us$GEOID)
us.rast <- rasterize(us, lu, field = "GEOID")
plot(us.rast)
# Cross-tabulate number of LU pixels
tab <- crosstab(us.rast, lu)
head(tab)
# convert to percentage of each LU type
lu.prct <- t(apply(tab, 1, FUN=function(x) x/sum(x)))
lu.prct <- round(lu.prct, 4)
head(lu.prct)
# reorder polygons by HUC10 code to match raster ordering
rastord <- order(us$GEOID)
# merge landscape composition results with US polygon layer for LU 1:4
us[rastord, c("lu1", "lu2", "lu3", "lu4")] <- lu.prct[ , 1:4]
# plot percentage of Land Use 1 by state
tm_shape(us) + tm_polygons(col = "lu1", palette = "Reds", title = "Perctange of LU1")
# save results to shapefile in working directory
st_write(us, "Example_shapefile.shp")
### END OF SPATIAL DEMO
|
/spatial_data_tutorial.R
|
no_license
|
JVAdams/vizzy
|
R
| false
| false
| 5,135
|
r
|
####################################################################################
###
### This script provides an overview of working with spatial data in R
### for the GLSC-LSC all hands meeting in Cleveland, OH
###
### Created 2020-02-18 by Daniel B. Fitzgerald
####################################################################################
# set directory
#setwd("~/Courses and Training/LSC All Hands")
# install packages if neccessary
#install.packages(c("sf", "raster", "tmap", "spData"))
# attach packages
library("sf") # functions for vector data
library("raster") # functions for raster data
library("tmap") # provides mapping functionality
library("spData") # provides spatial datasets for illustration purposes
# load package data
data("coffee_data")
data("rivers")
data("us_states")
# generate file path for reading in shapefile
path.to.file <- system.file("shapes/world.gpkg", package="spData")
# load shapefile
world <- st_read(path.to.file)
# typing the variable name at the console will provide a brief overview of the object
world
# explore the class of world (both a data.frame and an sf object)
class(world)
# all the normal functionality for data frames is retained
head(world)
str(world)
colnames(world)
subset(world, name_long == "United States")
# notice that the geometry column is "sticky"
world[ , 1:4]
world[ , 1:2]
# this geometry column is what allows us to retain the spatial representation as we subset the data frame
plot(world[ , 2])
plot(world[5 , 2])
# get a feel for how sf handles complicated geometries using the US as example
str(world$geom[5])
head(st_coordinates(world[5, "geom"]), n=15)
# identify coordinate reference system
st_crs(world)
# reproject layer to new coordinate reference system
world.proj <- st_transform(world, crs = 3857)
# plot comparison graph
par(mfrow = c(1,2))
plot(st_geometry(world), main = "WGS84 (EPSG: 4326)")
plot(st_geometry(world.proj), main = "Mercator (EPSG: 3957)")
# reset graphics parameters
par(mfrow = c(1,1))
# join non spatial data frame (coffee) with spatial layer (world) note: warning OK for our purposes
world.coffee <- dplyr::left_join(world, coffee_data, by = "name_long")
# plot 2016 coffee production using base plotting functions
plot(world.coffee["coffee_production_2016"])
# Begin exploring tmap plotting
coffee.map <- tm_shape(world.coffee) +
tm_polygons(col = "coffee_production_2016", title = "2016 Coffee Production", palette = "YlOrRd") +
tm_layout(legend.bg.color = "white", legend.frame = TRUE)
print(coffee.map)
# change tmap mode to interactive
tmap_mode("view")
print(coffee.map)
# return to plot mode
tmap_mode("plot")
### Bring in another vector layer to show to show spatial subsets, joins, and zonal stats with rasters
# reproject US States Data
us <- st_transform(us_states, crs=4326)
# the rivers layer has a larger spatial extent than we need
tm_shape(rivers) +tm_lines(col="blue") +
tm_shape(us) + tm_polygons(col="grey", alpha = 0.3)
# We can clip (spatial subsetting) one spatial feature by another using the normal [ , ] notation
us.rivers <- rivers[us, ]
# plot clipped layer
tm_shape(us) + tm_polygons(col="grey") +
tm_shape(us.rivers) +tm_lines(col="blue")
# generate random points of "species" occurrence
points <- st_sample(us, size = 1000)
# view generated points
tm_shape(us) + tm_polygons(col="grey") +
tm_shape(us.rivers) + tm_lines(col="blue") +
tm_shape(points) + tm_symbols(col="black")
# calculate number of occurrences by state using spatial intersection
us$npoints <- lengths(st_intersects(us, points))
# plot map showing numbe of records by state
tm_shape(us) + tm_polygons(col="npoints", palette = "Reds", title = "Number of Records")
# add points overlaying and demostrate transparency
tm_shape(us) + tm_polygons(col="npoints", palette = "Reds", title = "Number of Records") +
tm_shape(points) + tm_symbols(col = "grey", alpha = 0.4)
# generate a random rater layer of "land cover"
lu <- raster(xmn = st_bbox(us)[1], xmx=st_bbox(us)[3], ymn = st_bbox(us)[2], ymx=st_bbox(us)[4], resolution = 0.2)
lu <- setValues(lu, sample(1:20, ncell(lu), replace = TRUE))
plot(lu)
plot(st_geometry(us), lwd=2, add=TRUE)
# rasterize a polygon layer to speed processing (e.g., if we have numerous small catchments or watersheds)
us$GEOID <- as.numeric(us$GEOID)
us.rast <- rasterize(us, lu, field = "GEOID")
plot(us.rast)
# Cross-tabulate number of LU pixels
tab <- crosstab(us.rast, lu)
head(tab)
# convert to percentage of each LU type
lu.prct <- t(apply(tab, 1, FUN=function(x) x/sum(x)))
lu.prct <- round(lu.prct, 4)
head(lu.prct)
# reorder polygons by HUC10 code to match raster ordering
rastord <- order(us$GEOID)
# merge landscape composition results with US polygon layer for LU 1:4
us[rastord, c("lu1", "lu2", "lu3", "lu4")] <- lu.prct[ , 1:4]
# plot percentage of Land Use 1 by state
tm_shape(us) + tm_polygons(col = "lu1", palette = "Reds", title = "Perctange of LU1")
# save results to shapefile in working directory
st_write(us, "Example_shapefile.shp")
### END OF SPATIAL DEMO
|
library(data.table)
library(dplyr)
library(ggplot2)
library(magrittr)
library(maps)
library(zoo)
library(stats)
library(reshape2)
library(geosphere)
library(ROCR)
#library(grid)
#library(factoextra)
library(party)
library(partykit)
setwd(".")
traps <- unique(merged_agg_test$TrapFactor)
species <- unique(merged_agg_test$SpeciesFactor)
#Random Forests
random_fst_data <- fread("merged_data.csv", data.table = T, stringsAsFactors=T)
random_fst_data$TrapFactor <- factor(random_fst_data$TrapFactor, levels=traps)
random_fst_data$SpeciesFactor <- factor(random_fst_data$SpeciesFactor, levels=species)
random_fst_data$ReadingDate <- factor(random_fst_data$ReadingDate)
set.seed(13147)
data = ctree(V2 ~ TrapFactor+SpeciesFactor+weekNumber+Tavg+Tmax+Tmin+PrecipTotal+ThreedaymedianMinTmp+ThreedaymedianTmp+AvgSpeed+StnPressure , data = random_fst_data, control = ctree_control( mtry=4))
km_curve <- treeresponse(data.rf, newdata=random_fst_data[1:2, ], OOB = T)
plot(varimp(data.rf))
plot(data.rf)
print(data.rf)
|
/input/cTreeModel.R
|
no_license
|
sfines/west_nile
|
R
| false
| false
| 1,014
|
r
|
library(data.table)
library(dplyr)
library(ggplot2)
library(magrittr)
library(maps)
library(zoo)
library(stats)
library(reshape2)
library(geosphere)
library(ROCR)
#library(grid)
#library(factoextra)
library(party)
library(partykit)
setwd(".")
traps <- unique(merged_agg_test$TrapFactor)
species <- unique(merged_agg_test$SpeciesFactor)
#Random Forests
random_fst_data <- fread("merged_data.csv", data.table = T, stringsAsFactors=T)
random_fst_data$TrapFactor <- factor(random_fst_data$TrapFactor, levels=traps)
random_fst_data$SpeciesFactor <- factor(random_fst_data$SpeciesFactor, levels=species)
random_fst_data$ReadingDate <- factor(random_fst_data$ReadingDate)
set.seed(13147)
data = ctree(V2 ~ TrapFactor+SpeciesFactor+weekNumber+Tavg+Tmax+Tmin+PrecipTotal+ThreedaymedianMinTmp+ThreedaymedianTmp+AvgSpeed+StnPressure , data = random_fst_data, control = ctree_control( mtry=4))
km_curve <- treeresponse(data.rf, newdata=random_fst_data[1:2, ], OOB = T)
plot(varimp(data.rf))
plot(data.rf)
print(data.rf)
|
library(randomForest)
library(randomForestCI)
library(Hmisc)
rm(list = ls())
n = 1000
p = 20
X = matrix(rnorm(n * p), n, p)
Y = factor(rbinom(n, 1, 0.5))
n.test = 1000
X.test = matrix(rnorm(n.test * p), n.test, p)
rf = randomForest(X, Y, keep.inbag = TRUE, ntree = 1000, replace = FALSE, sampsize = n/2)
ij = randomForestInfJack(rf, X.test, calibrate = TRUE)
y.hat = ij$y.hat
se.hat = sqrt(ij$var.hat)
up = y.hat + 1.96 * se.hat
down = y.hat - 1.96 * se.hat
errbar(1:n.test, y.hat, up, down)
covered = up >= 0.5 & down <= 0.5
mean(covered)
|
/causalforest_paper_simu/non_honest_test.R
|
no_license
|
alejandroschuler/causal_effect_estimation
|
R
| false
| false
| 546
|
r
|
library(randomForest)
library(randomForestCI)
library(Hmisc)
rm(list = ls())
n = 1000
p = 20
X = matrix(rnorm(n * p), n, p)
Y = factor(rbinom(n, 1, 0.5))
n.test = 1000
X.test = matrix(rnorm(n.test * p), n.test, p)
rf = randomForest(X, Y, keep.inbag = TRUE, ntree = 1000, replace = FALSE, sampsize = n/2)
ij = randomForestInfJack(rf, X.test, calibrate = TRUE)
y.hat = ij$y.hat
se.hat = sqrt(ij$var.hat)
up = y.hat + 1.96 * se.hat
down = y.hat - 1.96 * se.hat
errbar(1:n.test, y.hat, up, down)
covered = up >= 0.5 & down <= 0.5
mean(covered)
|
#####################################################################
### Code to run models used in: ###
### Foster, Hill & Lyons, 2017, JRSS (Series C, Applied Stats) ###
### Written by N. Hill Nov 2015. ###
### Works with RCPmod version 2.142 ###
#####################################################################
####################
## Data prep
####################
# load libraries and source file and datafile
library(RCPmod)
library(raster)
library(rasterVis)
library(tidyr)
# contains additional functions for data transformations, plotting etc.
source("RCP_Helper_Functions.R")
#read in ID, biological and environmental data file.
#Note: this file only contains species and environmental variables used in RCP models.
#Original data and metadata can be found here:
#Hill, N. and Lamb, T. (2015) HIMI Demersal Fish Update and Environmental Covariates.
#Australian Antarctic Data Centre
#doi:10.4225/15/5671FDEC717B4
fish<-read.csv("SubAntFish_bioenv.csv")
species <-names(fish)[9:23]
#generate datafile with orthogonal polynomial terms
rcp_env_vars<-c("Long_MP", "log_depth", "caisom_floor_temperature")
rcp_poly<-poly_data(poly_vars=rcp_env_vars, degree=c(2,2,2),
id_vars="HaulIndex",sample_vars="Season",
species_vars=species, data=fish)
rcp_data<-rcp_poly$rcp_data
#Load rasters and create dataframe of prediction space
pred_masked<-brick("pred_masked")
#convert rasters to dataframe and log transform depth
pred_space_rcp<-as.data.frame(rasterToPoints(
subset(pred_masked, c("Long_MP", "bathymetry", "caisom_floor_temperature"))))
pred_space_rcp<-na.omit(pred_space_rcp)
pred_space_rcp$log_depth<-log(pred_space_rcp$bathymetry* -1)
# Transform using stored polys, predict and plot results
rcp_poly_pred<-poly_pred_space(pred_space_rcp, rcp_poly$poly_output,
sampling_vals="Autumn/Winter",
sampling_name="Season", sampling_factor_levels = c("Autumn/Winter","Spring","summer"))
#create RCP formula
form<- as.formula(paste("cbind(",paste(species, collapse=", "),")~",paste(names(rcp_data)[18:23], collapse="+")))
########################
## Run RCPs
########################
# With Season/Year as sampling effect----
#Note: No seed was set so if running code from here will get slightly different results.
#This bit will take a while on a single core machine
# to get the same results and save some time, load the "RCPsamp_fin.RDS" and skip to line 102
nstarts<-1000
max.nRCP<-6
nRCPs_samp <- list()
for( ii in 1:max.nRCP)
nRCPs_samp[[ii]] <- regimix.multifit(form.RCP=form, form.spp= ~ Season, data=rcp_data, nRCP=ii,
inits="random2", nstart=nstarts, dist="NegBin", mc.cores=1)
#get BICs
RCPsamp_BICs <- sapply( nRCPs_samp, function(x) sapply( x, function(y) y$BIC))
#Are any RCPs consisting of a small number of sites? (A posteriori) If so remove.
RCPsamp_minPosteriorSites <- cbind( 181, sapply( nRCPs_samp[-1], function(y) sapply( y, function(x) min( colSums( x$postProbs)))))
RCPsamp_ObviouslyBad <- RCPsamp_minPosteriorSites < 2
RCPsamp_BICs[RCPsamp_ObviouslyBad] <- NA
#plot minimum BIC for each nRCP
RCPsamp_minBICs <- apply( RCPsamp_BICs, 2, min, na.rm=TRUE)
plot( 1:max.nRCP, RCPsamp_minBICs, type='b', ylab="BIC", xlab="nRCP", pch=20)
points( rep( 1:max.nRCP, each=nrow( RCPsamp_BICs)), RCPsamp_BICs, pch=20)
#choose 3 RCPs and run best model from above (to get additonal tidbits in model output for later steps)
RCPsamp_goodun <- which.min( RCPsamp_BICs[,3])
control <- list( optimise=FALSE, quiet=FALSE)
RCPsamp_fin<-regimix(form.RCP=form, form.spp=~Season,
nRCP=3, data=rcp_data, dist="NegBin", inits = unlist( nRCPs_samp[[3]][[RCPsamp_goodun]]$coef), control=control)
rm(RCPsamp_BICs,RCPsamp_minPosteriorSites, RCPsamp_ObviouslyBad, RCPsamp_minBICs, RCPsamp_goodun, control)
#plot model diagnostics
# residual plots
plot.regimix(RCPsamp_fin, type="RQR", fitted.scale="log") #looks OK
#Cooks Distance Plots
#takes a while
tmp <- stability.regimix(RCPsamp_fin, oosSizeRange=c(1,2,3,4,5,6,7,8,9,10,20,30,40,50), mc.cores=1, times=, RCPsamp_fin$n, doPlot=FALSE)
plot( tmp, minWidth=2, ncuts=111)
# examine dispersion parameter for negative Binomial
hist(RCPsamp_fin$coefs$disp, xlab="Dispersion Parameter",
main="Negative Binomial Model", col="grey", cex.main=0.8, cex=0.8, cex.lab=0.8 )
#generate bootstrap estimates of parameters
#again may a while and get slighlty different results. To avoid load "RCPsamp_boots.RDS"
rcpsamp_boots<-regiboot(RCPsamp_fin, type="BayesBoot", nboot=1000, mc.cores=1)
#### Average, SD and CI of species abundances in each RCP
#for some reason will(?) produce warnings.
RCP_abund_samp<-Sp_abund_all(rcpsamp_boots)
#Get autumn values and make pretty
aut_samp<-as.data.frame(matrix(data=paste0(sprintf("%.2f", round(RCP_abund_samp$autumn$mean,2)), " (", sprintf("%.2f",round(RCP_abund_samp$autumn$sd,2)), ")"),
ncol=3, nrow=15, byrow=TRUE))
names(aut_samp)<-paste0("RCP", 1:3)
rownames(aut_samp)<- gsub("."," ", species, fixed=TRUE)
## plot of sampling factor effects
sampling_dotplot2(RCPsamp_fin,rcpsamp_boots,legend_fact=c("Spring", "Summer"), col=c("black", "red"), lty=c(1,2))
#Spatial Predictions
RCPsamp_SpPreds<-predict.regimix(object=RCPsamp_fin, object2=rcpsamp_boots, newdata=rcp_poly_pred)
predict_maps2_SDF2(RCPsamp_SpPreds, pred_space=pred_space_rcp, pred_crop=pred_masked, nRCP=3)
###Run models without sampling effect------
#Note: No seed was set so if running code from here will get slightly different results.
#This bit will take a while on a single core machine
# to get the same results and save some time, load the "RCPNosamp_fin.RDS" and skip to line 160
nstarts<-1000
max.nRCP<-6
nRCPs_NoSamp <- list()
for( ii in 1:max.nRCP)
nRCPs_NoSamp[[ii]] <- regimix.multifit(form.RCP=form, data=rcp_data, nRCP=ii,
inits="random2", nstart=nstarts, dist="NegBin", mc.cores=1)
#get BICs
RCPNoSamp_BICs <- sapply( nRCPs_NoSamp, function(x) sapply( x, function(y) y$BIC))
#Are any RCPs consisting of a small number of sites? (A posteriori) If so remove.
RCPNoSamp_minPosteriorSites <- cbind( 181, sapply( nRCPs_NoSamp[-1], function(y) sapply( y, function(x) min( colSums( x$postProbs)))))
RCPNoSamp_ObviouslyBad <- RCPNoSamp_minPosteriorSites < 2
RCPNoSamp_BICs[RCPNoSamp_ObviouslyBad] <- NA
#plot minimum BIC for each nRCP
RCPNoSamp_minBICs <- apply( RCPNoSamp_BICs, 2, min, na.rm=TRUE)
plot( 1:max.nRCP, RCPNoSamp_minBICs, type='b', ylab="BIC", xlab="nRCP", pch=20)
points( rep( 1:max.nRCP, each=nrow( RCPNoSamp_BICs)), RCPNoSamp_BICs, pch=20)
#choose 3 RCPs ---
RCPNoSamp_goodun <- which.min( RCPNoSamp_BICs[,3])
control <- list( optimise=FALSE, quiet=FALSE)
RCPNoSamp_fin<-regimix(form.RCP=form,
nRCP=3, data=rcp_data, dist="NegBin", inits = unlist( nRCPs_NoSamp[[3]][[RCPNoSamp_goodun]]$coef), control=control)
rm(RCPNoSamp_BICs,RCPNoSamp_minPosteriorSites, RCPNoSamp_ObviouslyBad, RCPNosamp_minBICs, RCPNoSamp_goodun)
#plot model diagnostics
#residual plot
plot.regimix(RCPNoSamp_fin, type="RQR", fitted.scale="log")
#Cooks Distance Plots
#will take a while to run
tmp <- stability.regimix(RCPNoSamp_fin, oosSizeRange=c(1,2,3,4,5,6,7,8,9,10,20,30,40,50), mc.cores=1, times=RCPsamp_fin$n)
plot( tmp, minWidth=2, ncuts=111)
#generate bootstrap estimates of parameters
#again may a while and get slighlty different results. To avoid load "RCPNoSamp_boots.RDS"
rcpNoSamp_boots<-regiboot(RCPNoSamp_fin, type="BayesBoot", nboot=1000, mc.cores=1)
#Spatial Predictions
RCPNoSamp_SpPreds<-predict.regimix(object=RCPNoSamp_fin, object2=rcpNoSamp_boots, newdata=rcp_poly_pred)
predict_maps2_SDF2(RCPNoSamp_SpPreds, pred_space=pred_space_rcp, pred_crop=pred_masked, nRCP=3)
|
/SubAntFish.R
|
no_license
|
mitchest/rcp-survey-artifacts
|
R
| false
| false
| 7,957
|
r
|
#####################################################################
### Code to run models used in: ###
### Foster, Hill & Lyons, 2017, JRSS (Series C, Applied Stats) ###
### Written by N. Hill Nov 2015. ###
### Works with RCPmod version 2.142 ###
#####################################################################
####################
## Data prep
####################
# load libraries and source file and datafile
library(RCPmod)
library(raster)
library(rasterVis)
library(tidyr)
# contains additional functions for data transformations, plotting etc.
source("RCP_Helper_Functions.R")
#read in ID, biological and environmental data file.
#Note: this file only contains species and environmental variables used in RCP models.
#Original data and metadata can be found here:
#Hill, N. and Lamb, T. (2015) HIMI Demersal Fish Update and Environmental Covariates.
#Australian Antarctic Data Centre
#doi:10.4225/15/5671FDEC717B4
fish<-read.csv("SubAntFish_bioenv.csv")
species <-names(fish)[9:23]
#generate datafile with orthogonal polynomial terms
rcp_env_vars<-c("Long_MP", "log_depth", "caisom_floor_temperature")
rcp_poly<-poly_data(poly_vars=rcp_env_vars, degree=c(2,2,2),
id_vars="HaulIndex",sample_vars="Season",
species_vars=species, data=fish)
rcp_data<-rcp_poly$rcp_data
#Load rasters and create dataframe of prediction space
pred_masked<-brick("pred_masked")
#convert rasters to dataframe and log transform depth
pred_space_rcp<-as.data.frame(rasterToPoints(
subset(pred_masked, c("Long_MP", "bathymetry", "caisom_floor_temperature"))))
pred_space_rcp<-na.omit(pred_space_rcp)
pred_space_rcp$log_depth<-log(pred_space_rcp$bathymetry* -1)
# Transform using stored polys, predict and plot results
rcp_poly_pred<-poly_pred_space(pred_space_rcp, rcp_poly$poly_output,
sampling_vals="Autumn/Winter",
sampling_name="Season", sampling_factor_levels = c("Autumn/Winter","Spring","summer"))
#create RCP formula
form<- as.formula(paste("cbind(",paste(species, collapse=", "),")~",paste(names(rcp_data)[18:23], collapse="+")))
########################
## Run RCPs
########################
# With Season/Year as sampling effect----
#Note: No seed was set so if running code from here will get slightly different results.
#This bit will take a while on a single core machine
# to get the same results and save some time, load the "RCPsamp_fin.RDS" and skip to line 102
nstarts<-1000
max.nRCP<-6
nRCPs_samp <- list()
for( ii in 1:max.nRCP)
nRCPs_samp[[ii]] <- regimix.multifit(form.RCP=form, form.spp= ~ Season, data=rcp_data, nRCP=ii,
inits="random2", nstart=nstarts, dist="NegBin", mc.cores=1)
#get BICs
RCPsamp_BICs <- sapply( nRCPs_samp, function(x) sapply( x, function(y) y$BIC))
#Are any RCPs consisting of a small number of sites? (A posteriori) If so remove.
RCPsamp_minPosteriorSites <- cbind( 181, sapply( nRCPs_samp[-1], function(y) sapply( y, function(x) min( colSums( x$postProbs)))))
RCPsamp_ObviouslyBad <- RCPsamp_minPosteriorSites < 2
RCPsamp_BICs[RCPsamp_ObviouslyBad] <- NA
#plot minimum BIC for each nRCP
RCPsamp_minBICs <- apply( RCPsamp_BICs, 2, min, na.rm=TRUE)
plot( 1:max.nRCP, RCPsamp_minBICs, type='b', ylab="BIC", xlab="nRCP", pch=20)
points( rep( 1:max.nRCP, each=nrow( RCPsamp_BICs)), RCPsamp_BICs, pch=20)
#choose 3 RCPs and run best model from above (to get additonal tidbits in model output for later steps)
RCPsamp_goodun <- which.min( RCPsamp_BICs[,3])
control <- list( optimise=FALSE, quiet=FALSE)
RCPsamp_fin<-regimix(form.RCP=form, form.spp=~Season,
nRCP=3, data=rcp_data, dist="NegBin", inits = unlist( nRCPs_samp[[3]][[RCPsamp_goodun]]$coef), control=control)
rm(RCPsamp_BICs,RCPsamp_minPosteriorSites, RCPsamp_ObviouslyBad, RCPsamp_minBICs, RCPsamp_goodun, control)
#plot model diagnostics
# residual plots
plot.regimix(RCPsamp_fin, type="RQR", fitted.scale="log") #looks OK
#Cooks Distance Plots
#takes a while
tmp <- stability.regimix(RCPsamp_fin, oosSizeRange=c(1,2,3,4,5,6,7,8,9,10,20,30,40,50), mc.cores=1, times=, RCPsamp_fin$n, doPlot=FALSE)
plot( tmp, minWidth=2, ncuts=111)
# examine dispersion parameter for negative Binomial
hist(RCPsamp_fin$coefs$disp, xlab="Dispersion Parameter",
main="Negative Binomial Model", col="grey", cex.main=0.8, cex=0.8, cex.lab=0.8 )
#generate bootstrap estimates of parameters
#again may a while and get slighlty different results. To avoid load "RCPsamp_boots.RDS"
rcpsamp_boots<-regiboot(RCPsamp_fin, type="BayesBoot", nboot=1000, mc.cores=1)
#### Average, SD and CI of species abundances in each RCP
#for some reason will(?) produce warnings.
RCP_abund_samp<-Sp_abund_all(rcpsamp_boots)
#Get autumn values and make pretty
aut_samp<-as.data.frame(matrix(data=paste0(sprintf("%.2f", round(RCP_abund_samp$autumn$mean,2)), " (", sprintf("%.2f",round(RCP_abund_samp$autumn$sd,2)), ")"),
ncol=3, nrow=15, byrow=TRUE))
names(aut_samp)<-paste0("RCP", 1:3)
rownames(aut_samp)<- gsub("."," ", species, fixed=TRUE)
## plot of sampling factor effects
sampling_dotplot2(RCPsamp_fin,rcpsamp_boots,legend_fact=c("Spring", "Summer"), col=c("black", "red"), lty=c(1,2))
#Spatial Predictions
RCPsamp_SpPreds<-predict.regimix(object=RCPsamp_fin, object2=rcpsamp_boots, newdata=rcp_poly_pred)
predict_maps2_SDF2(RCPsamp_SpPreds, pred_space=pred_space_rcp, pred_crop=pred_masked, nRCP=3)
###Run models without sampling effect------
#Note: No seed was set so if running code from here will get slightly different results.
#This bit will take a while on a single core machine
# to get the same results and save some time, load the "RCPNosamp_fin.RDS" and skip to line 160
nstarts<-1000
max.nRCP<-6
nRCPs_NoSamp <- list()
for( ii in 1:max.nRCP)
nRCPs_NoSamp[[ii]] <- regimix.multifit(form.RCP=form, data=rcp_data, nRCP=ii,
inits="random2", nstart=nstarts, dist="NegBin", mc.cores=1)
#get BICs
RCPNoSamp_BICs <- sapply( nRCPs_NoSamp, function(x) sapply( x, function(y) y$BIC))
#Are any RCPs consisting of a small number of sites? (A posteriori) If so remove.
RCPNoSamp_minPosteriorSites <- cbind( 181, sapply( nRCPs_NoSamp[-1], function(y) sapply( y, function(x) min( colSums( x$postProbs)))))
RCPNoSamp_ObviouslyBad <- RCPNoSamp_minPosteriorSites < 2
RCPNoSamp_BICs[RCPNoSamp_ObviouslyBad] <- NA
#plot minimum BIC for each nRCP
RCPNoSamp_minBICs <- apply( RCPNoSamp_BICs, 2, min, na.rm=TRUE)
plot( 1:max.nRCP, RCPNoSamp_minBICs, type='b', ylab="BIC", xlab="nRCP", pch=20)
points( rep( 1:max.nRCP, each=nrow( RCPNoSamp_BICs)), RCPNoSamp_BICs, pch=20)
#choose 3 RCPs ---
RCPNoSamp_goodun <- which.min( RCPNoSamp_BICs[,3])
control <- list( optimise=FALSE, quiet=FALSE)
RCPNoSamp_fin<-regimix(form.RCP=form,
nRCP=3, data=rcp_data, dist="NegBin", inits = unlist( nRCPs_NoSamp[[3]][[RCPNoSamp_goodun]]$coef), control=control)
rm(RCPNoSamp_BICs,RCPNoSamp_minPosteriorSites, RCPNoSamp_ObviouslyBad, RCPNosamp_minBICs, RCPNoSamp_goodun)
#plot model diagnostics
#residual plot
plot.regimix(RCPNoSamp_fin, type="RQR", fitted.scale="log")
#Cooks Distance Plots
#will take a while to run
tmp <- stability.regimix(RCPNoSamp_fin, oosSizeRange=c(1,2,3,4,5,6,7,8,9,10,20,30,40,50), mc.cores=1, times=RCPsamp_fin$n)
plot( tmp, minWidth=2, ncuts=111)
#generate bootstrap estimates of parameters
#again may a while and get slighlty different results. To avoid load "RCPNoSamp_boots.RDS"
rcpNoSamp_boots<-regiboot(RCPNoSamp_fin, type="BayesBoot", nboot=1000, mc.cores=1)
#Spatial Predictions
RCPNoSamp_SpPreds<-predict.regimix(object=RCPNoSamp_fin, object2=rcpNoSamp_boots, newdata=rcp_poly_pred)
predict_maps2_SDF2(RCPNoSamp_SpPreds, pred_space=pred_space_rcp, pred_crop=pred_masked, nRCP=3)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/statistics.plot.R
\name{add.boot.conf}
\alias{add.boot.conf}
\title{Add Bootstrapped Confidence Intervals}
\usage{
add.boot.conf(
model,
x = NULL,
col = "#55555540",
conf = c(0.025, 0.975),
border = FALSE,
trendline = FALSE,
n = 1000,
...
)
}
\arguments{
\item{model}{A bootstrap object (i.e. a dataframe) containing bootstrapped estimates of m and b.}
\item{col}{The desired color of the confidence band. We recomend colors with transparency for plotting.}
\item{conf}{The quantile ranges for the plotting (default is 95% two-tail).}
\item{border}{Do you want a border on the shaded confidence interval?}
\item{trendline}{Should the maximum liklihood values be plotted as a trendline?}
\item{new.x}{The x values for which predictions are required.}
}
\description{
Add confidence bands to a figure based on results of a bootstrap.
}
\author{
Thomas Bryce Kelly
}
\keyword{Statistics}
\keyword{Uncertainty}
|
/man/add.boot.conf.Rd
|
no_license
|
tbrycekelly/TheSource
|
R
| false
| true
| 1,006
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/statistics.plot.R
\name{add.boot.conf}
\alias{add.boot.conf}
\title{Add Bootstrapped Confidence Intervals}
\usage{
add.boot.conf(
model,
x = NULL,
col = "#55555540",
conf = c(0.025, 0.975),
border = FALSE,
trendline = FALSE,
n = 1000,
...
)
}
\arguments{
\item{model}{A bootstrap object (i.e. a dataframe) containing bootstrapped estimates of m and b.}
\item{col}{The desired color of the confidence band. We recomend colors with transparency for plotting.}
\item{conf}{The quantile ranges for the plotting (default is 95% two-tail).}
\item{border}{Do you want a border on the shaded confidence interval?}
\item{trendline}{Should the maximum liklihood values be plotted as a trendline?}
\item{new.x}{The x values for which predictions are required.}
}
\description{
Add confidence bands to a figure based on results of a bootstrap.
}
\author{
Thomas Bryce Kelly
}
\keyword{Statistics}
\keyword{Uncertainty}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blbglm-logistic.R
\name{glm_each_boot}
\alias{glm_each_boot}
\title{Regression estimates for blbglm data set
Compute the regression estimates for blbglm data set}
\usage{
glm_each_boot(formula, data, n)
}
\arguments{
\item{formula}{a formula}
\item{data}{a data frame}
\item{n}{an integer}
}
\description{
Regression estimates for blbglm data set
Compute the regression estimates for blbglm data set
}
|
/man/glm_each_boot.Rd
|
permissive
|
gilidror/blblm
|
R
| false
| true
| 482
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blbglm-logistic.R
\name{glm_each_boot}
\alias{glm_each_boot}
\title{Regression estimates for blbglm data set
Compute the regression estimates for blbglm data set}
\usage{
glm_each_boot(formula, data, n)
}
\arguments{
\item{formula}{a formula}
\item{data}{a data frame}
\item{n}{an integer}
}
\description{
Regression estimates for blbglm data set
Compute the regression estimates for blbglm data set
}
|
library(evian)
### Name: evian_linear
### Title: Evidential analysis for quantitative outcome data using linear
### regression models
### Aliases: evian_linear
### ** Examples
data(eviandata_linear)
data(evianmap_linear)
## Don't show:
rst1=evian_linear(data=eviandata_linear, bim=evianmap_linear, xcols=10:ncol(eviandata_linear),
ycol=6, covariateCol=c(5,7:9), robust=FALSE, model="additive", m=100, lolim=-0.4,
hilim=0.4, kcutoff = c(32,100), multiThread=1)
## End(Don't show)
## No test:
rst1=evian_linear(data=eviandata_linear, bim=evianmap_linear, xcols=10:ncol(eviandata_linear),
ycol=6, covariateCol=c(5,7:9), robust=FALSE, model="additive",
m=1000, kcutoff = c(32,100), multiThread=1)
#Alternatively you can use the formula argument to run the same model as above
rst2=evian_linear(data=eviandata_linear, bim=evianmap_linear,
formula='Y_norma~Fev+SEX+Age_group+BMI_group+rs141+rs912+rs573+rs414+rs635+
rs356+rs877+rs168+rs449+rs580', robust=FALSE,
model="additive", m=1000, kcutoff = c(32,100), multiThread=1)
## End(No test)
|
/data/genthat_extracted_code/evian/examples/evian_linear.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,048
|
r
|
library(evian)
### Name: evian_linear
### Title: Evidential analysis for quantitative outcome data using linear
### regression models
### Aliases: evian_linear
### ** Examples
data(eviandata_linear)
data(evianmap_linear)
## Don't show:
rst1=evian_linear(data=eviandata_linear, bim=evianmap_linear, xcols=10:ncol(eviandata_linear),
ycol=6, covariateCol=c(5,7:9), robust=FALSE, model="additive", m=100, lolim=-0.4,
hilim=0.4, kcutoff = c(32,100), multiThread=1)
## End(Don't show)
## No test:
rst1=evian_linear(data=eviandata_linear, bim=evianmap_linear, xcols=10:ncol(eviandata_linear),
ycol=6, covariateCol=c(5,7:9), robust=FALSE, model="additive",
m=1000, kcutoff = c(32,100), multiThread=1)
#Alternatively you can use the formula argument to run the same model as above
rst2=evian_linear(data=eviandata_linear, bim=evianmap_linear,
formula='Y_norma~Fev+SEX+Age_group+BMI_group+rs141+rs912+rs573+rs414+rs635+
rs356+rs877+rs168+rs449+rs580', robust=FALSE,
model="additive", m=1000, kcutoff = c(32,100), multiThread=1)
## End(No test)
|
#covid.r
#-------
# Set things up
covid19.dir <- "~/Desktop/covid19/"
# Required packages
library(tidyverse)
library(lubridate)
get.data <- function(download=FALSE){
# Download the current data
if(download){
nytimes.state.cv <- download.file("https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv",paste0(covid19.dir,"covid19.csv"),method="curl")
nytimes.county.cv <- download.file("https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv",paste0(covid19.dir,"covid19-counties.csv"),method="curl")
#rt.live <- download.file("https://d14wlfuexuxgcm.cloudfront.net/covid/rt.csv","rt.csv",method="curl")
}
# Set up the data
covid19.s <- read.csv(paste0(covid19.dir,"covid19.csv"))
covid19.s$date <- ymd(covid19.s$date)
covid19.state <<- covid19.s
covid19.c <- read.csv(paste0(covid19.dir,"covid19-counties.csv"))
covid19.c$date <- ymd(covid19.c$date)
covid19.county <<- covid19.c
}
capwords <- function(s, strict = FALSE) {
cap <- function(s) paste(toupper(substring(s, 1, 1)),
{s <- substring(s, 2); if(strict) tolower(s) else s},
sep = "", collapse = " " )
sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
}
conv.state.code <- function(code){
state.codes <- c("AL","AK","AZ","AR","CA","CO","CT","DE","DC","FL",
"GA","GU","HI","ID","IL","IN","IA","KS","KY","LA",
"ME","MD","MA","MI","MN","MS","MO","MT","NE","NV",
"NH","NJ","NM","NY","NC","ND","MP","OH","OK","OR",
"PA","PR","RI","SC","SD","TN","TX","UT","VT","VI",
"VA","WA","WV","WI","WY")
state.names <- c("Alabama",
"Alaska",
"Arizona",
"Arkansas",
"California",
"Colorado",
"Connecticut",
"Delaware",
"District of Columbia",
"Florida",
"Georgia",
"Guam",
"Hawaii",
"Idaho",
"Illinois",
"Indiana",
"Iowa",
"Kansas",
"Kentucky",
"Louisiana",
"Maine",
"Maryland",
"Massachusetts",
"Michigan",
"Minnesota",
"Mississippi",
"Missouri",
"Montana",
"Nebraska",
"Nevada",
"New Hampshire",
"New Jersey",
"New Mexico",
"New York",
"North Carolina",
"North Dakota",
"Northern Mariana Islands",
"Ohio",
"Oklahoma",
"Oregon",
"Pennsylvania",
"Puerto Rico",
"Rhode Island",
"South Carolina",
"South Dakota",
"Tennessee",
"Texas",
"Utah",
"Vermont",
"Virgin Islands",
"Virginia",
"Washington",
"West Virginia",
"Wisconsin",
"Wyoming")
state.name <- state.names[which(state.codes==code)]
}
calc.data <- function(cdata){
n.rows <- dim(cdata)[1]
cases.new <- deaths.new <- rep(0,n.rows)
cases.7day.mean <- deaths.7day.mean <- rep(NA,n.rows)
cases.3day.mean <- deaths.3day.mean <- rep(NA,n.rows)
calcdata <- cbind(cdata,cases.new,deaths.new,
cases.7day.mean,deaths.7day.mean,
cases.3day.mean,deaths.3day.mean)
for(i in 2:dim(calcdata)[1]){
calcdata$cases.new[i] <- calcdata$cases[i] - calcdata$cases[i-1]
calcdata$deaths.new[i] <- calcdata$deaths[i] - calcdata$deaths[i-1]
if (i > 2){
calcdata$cases.3day.mean[i] <- mean(c(calcdata$cases.new[i-2],
calcdata$cases.new[i-1],
calcdata$cases.new[i]),na.rm=TRUE)
calcdata$deaths.3day.mean[i] <- mean(c(calcdata$deaths.new[i-2],
calcdata$deaths.new[i-1],
calcdata$deaths.new[i]),na.rm=TRUE)
if (i >6){
calcdata$cases.7day.mean[i] <- mean(c(calcdata$cases.new[i-6],
calcdata$cases.new[i-5],
calcdata$cases.new[i-4],
calcdata$cases.new[i-3],
calcdata$cases.new[i-2],
calcdata$cases.new[i-1],
calcdata$cases.new[i]),na.rm=TRUE)
calcdata$deaths.7day.mean[i] <- mean(c(calcdata$deaths.new[i-6],
calcdata$deaths.new[i-5],
calcdata$deaths.new[i-4],
calcdata$deaths.new[i-3],
calcdata$deaths.new[i-2],
calcdata$deaths.new[i-1],
calcdata$deaths.new[i]),na.rm=TRUE)
}
}
}
calcdata
}
plot.data <- function(plotdata,title="Plot"){
plotdata.calc <- calc.data(plotdata)
# Make the plot
ggplot(plotdata.calc,aes(date,cases.new,group=1)) +
geom_bar(stat="identity",fill="steelblue2") +
ylab("New Daily Cases") +
xlab("Date") +
ggtitle(paste(title, "-- Data as of",format(Sys.Date(), format="%B %d, %Y"))) +
geom_smooth(aes(date,cases.7day.mean,color="7-day average"),stat="identity",na.rm=TRUE) +
geom_smooth(aes(date,cases.3day.mean,color="3-day average"),stat="identity",na.rm=TRUE) +
scale_x_date(date_labels = "%b %d") +
scale_color_manual(name="",values=c("mediumpurple3","red")) +
theme(legend.pos="bottom",
legend.key = element_rect(fill = "white"))
}
# Check if new data needs to be downloaded (is it older than today?)
data.check <- function(){
cur.date <- Sys.Date()
file.date <- substr(as.character(file.mtime(paste0(covid19.dir,"covid19.csv"))),1,10)
if (file.date < cur.date){
print("Getting new data")
get.data(download=TRUE)
} else get.data()
}
plot.state <- function(state.name){
State.name <- if (nchar(str_trim(state.name))==2) conv.state.code(toupper(state.name)) else capwords(state.name)
data.check()
plot.data(covid19.state[covid19.state$state==State.name,],State.name)
}
plot.county <- function(county.name,state.name){
County.name <- capwords(county.name)
State.name <- if (nchar(str_trim(state.name))==2) conv.state.code(toupper(state.name)) else capwords(state.name)
data.check()
plot.data(covid19.county[covid19.county$state==State.name & covid19.county$county==County.name,],paste0(County.name," County, ",State.name))
}
### USAGE Examples
#plot.state("Wisconsin")
#plot.state("WI")
#plot.county("Dane","Wisconsin")
#plot.county("dane","wi)
|
/covid.r
|
no_license
|
sdiegel/covid19
|
R
| false
| false
| 7,365
|
r
|
#covid.r
#-------
# Set things up
covid19.dir <- "~/Desktop/covid19/"
# Required packages
library(tidyverse)
library(lubridate)
get.data <- function(download=FALSE){
# Download the current data
if(download){
nytimes.state.cv <- download.file("https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv",paste0(covid19.dir,"covid19.csv"),method="curl")
nytimes.county.cv <- download.file("https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv",paste0(covid19.dir,"covid19-counties.csv"),method="curl")
#rt.live <- download.file("https://d14wlfuexuxgcm.cloudfront.net/covid/rt.csv","rt.csv",method="curl")
}
# Set up the data
covid19.s <- read.csv(paste0(covid19.dir,"covid19.csv"))
covid19.s$date <- ymd(covid19.s$date)
covid19.state <<- covid19.s
covid19.c <- read.csv(paste0(covid19.dir,"covid19-counties.csv"))
covid19.c$date <- ymd(covid19.c$date)
covid19.county <<- covid19.c
}
capwords <- function(s, strict = FALSE) {
cap <- function(s) paste(toupper(substring(s, 1, 1)),
{s <- substring(s, 2); if(strict) tolower(s) else s},
sep = "", collapse = " " )
sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
}
conv.state.code <- function(code){
state.codes <- c("AL","AK","AZ","AR","CA","CO","CT","DE","DC","FL",
"GA","GU","HI","ID","IL","IN","IA","KS","KY","LA",
"ME","MD","MA","MI","MN","MS","MO","MT","NE","NV",
"NH","NJ","NM","NY","NC","ND","MP","OH","OK","OR",
"PA","PR","RI","SC","SD","TN","TX","UT","VT","VI",
"VA","WA","WV","WI","WY")
state.names <- c("Alabama",
"Alaska",
"Arizona",
"Arkansas",
"California",
"Colorado",
"Connecticut",
"Delaware",
"District of Columbia",
"Florida",
"Georgia",
"Guam",
"Hawaii",
"Idaho",
"Illinois",
"Indiana",
"Iowa",
"Kansas",
"Kentucky",
"Louisiana",
"Maine",
"Maryland",
"Massachusetts",
"Michigan",
"Minnesota",
"Mississippi",
"Missouri",
"Montana",
"Nebraska",
"Nevada",
"New Hampshire",
"New Jersey",
"New Mexico",
"New York",
"North Carolina",
"North Dakota",
"Northern Mariana Islands",
"Ohio",
"Oklahoma",
"Oregon",
"Pennsylvania",
"Puerto Rico",
"Rhode Island",
"South Carolina",
"South Dakota",
"Tennessee",
"Texas",
"Utah",
"Vermont",
"Virgin Islands",
"Virginia",
"Washington",
"West Virginia",
"Wisconsin",
"Wyoming")
state.name <- state.names[which(state.codes==code)]
}
calc.data <- function(cdata){
n.rows <- dim(cdata)[1]
cases.new <- deaths.new <- rep(0,n.rows)
cases.7day.mean <- deaths.7day.mean <- rep(NA,n.rows)
cases.3day.mean <- deaths.3day.mean <- rep(NA,n.rows)
calcdata <- cbind(cdata,cases.new,deaths.new,
cases.7day.mean,deaths.7day.mean,
cases.3day.mean,deaths.3day.mean)
for(i in 2:dim(calcdata)[1]){
calcdata$cases.new[i] <- calcdata$cases[i] - calcdata$cases[i-1]
calcdata$deaths.new[i] <- calcdata$deaths[i] - calcdata$deaths[i-1]
if (i > 2){
calcdata$cases.3day.mean[i] <- mean(c(calcdata$cases.new[i-2],
calcdata$cases.new[i-1],
calcdata$cases.new[i]),na.rm=TRUE)
calcdata$deaths.3day.mean[i] <- mean(c(calcdata$deaths.new[i-2],
calcdata$deaths.new[i-1],
calcdata$deaths.new[i]),na.rm=TRUE)
if (i >6){
calcdata$cases.7day.mean[i] <- mean(c(calcdata$cases.new[i-6],
calcdata$cases.new[i-5],
calcdata$cases.new[i-4],
calcdata$cases.new[i-3],
calcdata$cases.new[i-2],
calcdata$cases.new[i-1],
calcdata$cases.new[i]),na.rm=TRUE)
calcdata$deaths.7day.mean[i] <- mean(c(calcdata$deaths.new[i-6],
calcdata$deaths.new[i-5],
calcdata$deaths.new[i-4],
calcdata$deaths.new[i-3],
calcdata$deaths.new[i-2],
calcdata$deaths.new[i-1],
calcdata$deaths.new[i]),na.rm=TRUE)
}
}
}
calcdata
}
plot.data <- function(plotdata,title="Plot"){
plotdata.calc <- calc.data(plotdata)
# Make the plot
ggplot(plotdata.calc,aes(date,cases.new,group=1)) +
geom_bar(stat="identity",fill="steelblue2") +
ylab("New Daily Cases") +
xlab("Date") +
ggtitle(paste(title, "-- Data as of",format(Sys.Date(), format="%B %d, %Y"))) +
geom_smooth(aes(date,cases.7day.mean,color="7-day average"),stat="identity",na.rm=TRUE) +
geom_smooth(aes(date,cases.3day.mean,color="3-day average"),stat="identity",na.rm=TRUE) +
scale_x_date(date_labels = "%b %d") +
scale_color_manual(name="",values=c("mediumpurple3","red")) +
theme(legend.pos="bottom",
legend.key = element_rect(fill = "white"))
}
# Check if new data needs to be downloaded (is it older than today?)
data.check <- function(){
cur.date <- Sys.Date()
file.date <- substr(as.character(file.mtime(paste0(covid19.dir,"covid19.csv"))),1,10)
if (file.date < cur.date){
print("Getting new data")
get.data(download=TRUE)
} else get.data()
}
plot.state <- function(state.name){
State.name <- if (nchar(str_trim(state.name))==2) conv.state.code(toupper(state.name)) else capwords(state.name)
data.check()
plot.data(covid19.state[covid19.state$state==State.name,],State.name)
}
plot.county <- function(county.name,state.name){
County.name <- capwords(county.name)
State.name <- if (nchar(str_trim(state.name))==2) conv.state.code(toupper(state.name)) else capwords(state.name)
data.check()
plot.data(covid19.county[covid19.county$state==State.name & covid19.county$county==County.name,],paste0(County.name," County, ",State.name))
}
### USAGE Examples
#plot.state("Wisconsin")
#plot.state("WI")
#plot.county("Dane","Wisconsin")
#plot.county("dane","wi)
|
# Custom user Rprofile
# options(show.signif.stars=FALSE) # Don't show those silly significanct stars
## Credentials for connections. Not the most secure option but easiest.
## Simple way to storage username and password for different connections. Use Sys.getenv(USERNAME) function to get USERNAME.
# Sys.setenv(USERNAME = "tunnus")
# Sys.setenv(PASSWORD = "salasana")
## Don't ask me for my CRAN mirror every time
options("repos" = c(CRAN = "https://cran.uni-muenster.de/"))
## Create a new invisible environment for all the functions to go in so it doesn't clutter your workspace.
.env <- new.env()
## Rprofile version
.env$rprofile.version <- "1.07.5"
## Update RProfile
.env$rprofile.update <- function(){
download.file(url = "https://raw.githubusercontent.com/janikmiet/rprofile/main/.Rprofile.R", destfile = "~/.Rprofile")
rstudioapi::restartSession()
}
## Install R packages from a list
.env$install.packages.list <- function(list = "https://research.janimiettinen.fi/data/r_packages.txt"){
if(substr(list,1,4) == "http"){
PACKAGES <- scan(url(list), what="character")
}else{
PACKAGES <- scan(list, what="character")
}
message("Trying to install packages: ")
message(paste0(PACKAGES, collapse = ", "))
inst <- match(PACKAGES, .packages(all=TRUE))
need <- which(is.na(inst))
if (length(need) > 0) install.packages(PACKAGES[need])
}
## Returns a logical vector TRUE for elements of X not in Y
.env$"%nin%" <- function(x, y) !(x %in% y)
## Returns names(df) in single column, numbered matrix format.
.env$n <- function(df) matrix(names(df))
## Single character shortcuts for summary() and head().
.env$s <- base::summary
.env$h <- utils::head
## ht==headtail, i.e., show the first and last 10 items of an object
.env$ht <- function(d) rbind(head(d,10),tail(d,10))
## Show the first 5 rows and first 5 columns of a data frame or matrix
.env$hh <- function(d) if(class(d) %in% c("matrix","data.frame","tbl_df")) d[1:5,1:5]
## Show the first 5 rows and first 5 columns of a data frame or matrix
.env$print_cs1 <- function(d) cat(paste(shQuote(d, type="cmd"), collapse=", "))
.env$print_cs2 <- function(d) cat(paste(d, collapse=","))
## Strip row names from a data frame (stolen from plyr)
.env$unrowname <- function(x) {
rownames(x) <- NULL
x
}
# for qplots
.env$ggdens <- function(var){
ggplot2::qplot(x = var, geom="density")
}
.env$ggscat <- function(varx,vary){
ggplot2::qplot(x = varx, y=vary, geom="point")
}
.env$gghist <- function(var){
ggplot2::qplot(x = var, geom="histogram")
}
.env$ggbox <- function(var){
ggplot2::qplot(x = var, geom="boxplot")
}
## List objects and classes (from @_inundata, mod by ateucher)
.env$lsa <- function() {
obj_type <- function(x) class(get(x, envir = .GlobalEnv)) # define environment
foo = data.frame(sapply(ls(envir = .GlobalEnv), obj_type))
foo$object_name = rownames(foo)
names(foo)[1] = "class"
names(foo)[2] = "object"
return(unrowname(foo))
}
## List all functions in a package (also from @_inundata)
.env$lsp <-function(package, all.names = FALSE, pattern) {
package <- deparse(substitute(package))
ls(
pos = paste("package", package, sep = ":"),
all.names = all.names,
pattern = pattern
)
}
## table showing Na by default
.env$table_na <- function (..., exclude = NULL, useNA = "always", dnn = list.names(...), deparse.level = 1){
list.names <- function(...) {
l <- as.list(substitute(list(...)))[-1L]
nm <- names(l)
fixup <- if (is.null(nm))
seq_along(l)
else nm == ""
dep <- vapply(l[fixup], function(x) switch(deparse.level +
1, "", if (is.symbol(x)) as.character(x) else "",
deparse(x, nlines = 1)[1L]), "")
if (is.null(nm))
dep
else {
nm[fixup] <- dep
nm
}
}
if (!missing(exclude) && is.null(exclude))
useNA <- "always"
useNA <- match.arg(useNA)
args <- list(...)
if (!length(args))
stop("nothing to tabulate")
if (length(args) == 1L && is.list(args[[1L]])) {
args <- args[[1L]]
if (length(dnn) != length(args))
dnn <- if (!is.null(argn <- names(args)))
argn
else paste(dnn[1L], seq_along(args), sep = ".")
}
bin <- 0L
lens <- NULL
dims <- integer()
pd <- 1L
dn <- NULL
for (a in args) {
if (is.null(lens))
lens <- length(a)
else if (length(a) != lens)
stop("all arguments must have the same length")
cat <- if (is.factor(a)) {
if (any(is.na(levels(a))))
a
else {
if (is.null(exclude) && useNA != "no")
addNA(a, ifany = (useNA == "ifany"))
else {
if (useNA != "no")
a <- addNA(a, ifany = (useNA == "ifany"))
ll <- levels(a)
a <- factor(a, levels = ll[!(ll %in% exclude)],
exclude = if (useNA == "no")
NA)
}
}
}
else {
a <- factor(a, exclude = exclude)
if (useNA != "no")
addNA(a, ifany = (useNA == "ifany"))
else a
}
nl <- length(ll <- levels(cat))
dims <- c(dims, nl)
if (prod(dims) > .Machine$integer.max)
stop("attempt to make a table with >= 2^31 elements")
dn <- c(dn, list(ll))
bin <- bin + pd * (as.integer(cat) - 1L)
pd <- pd * nl
}
names(dn) <- dnn
bin <- bin[!is.na(bin)]
if (length(bin))
bin <- bin + 1L
y <- array(tabulate(bin, pd), dims, dimnames = dn)
class(y) <- "table"
y
}
## Open Finder to the current directory on mac
# .env$macopen <- function(...) if(Sys.info()[1]=="Darwin") system("open .")
.env$o <- function(...){
if(Sys.info()[1]=="Linux") system("nemo .")
if(Sys.info()[1]=="Windows") shell.exec(file = ".")
if(Sys.info()[1]=="Darwin") system("open .")
}
## Read data on clipboard.
.env$read_cb <- function(...) {
ismac <- Sys.info()[1]=="Darwin"
if (!ismac) read.table(file="clipboard", ...)
else read.table(pipe("pbpaste"), ...)
}
## List all rprofile functions
.env$rprofile.functions <- function(){
cat(paste0("User .Rprofile \n") ,sep="")
cat("s() - shortcut for summary\n",sep="")
cat("h() - shortcut for head\n",sep="")
cat("o() - shortcut for file explorer\n",sep="")
cat("table_na() - table showing Na by default\n", sep="")
cat("unrowname() - remove data frame row names\n",sep="")
cat("read_cb() - read from clipboard\n",sep="")
cat("lsa() - list objects and classes\n",sep="")
cat("lsp() - list all functions in a package\n",sep="")
cat("ggdens() - quick density plot\n",sep="")
cat("ggscat() - quick scatter plot\n",sep="")
cat("gghist() - quick histogram plot\n",sep="")
cat("ggbox() - quick boxplot\n",sep="")
}
## Attach all the variables above
attach(.env)
## .First() run at the start of every R session.
.First <- function() {
hello_message <- c(
"Hello, sunshine",
"Howdy, partner",
"Hey, howdy, hi",
"What’s kickin’, little chicken",
"Peek-a-boo",
"Howdy-doody",
"Hey there, freshman",
"Hi, mister",
"I come in peace",
"Put that cookie down",
"Ahoy, matey",
"Hiya",
"Ello, gov'nor",
"Top of the mornin’ to ya",
"What’s crackin’",
"GOOOOOD MORNING, VIETNAM",
"‘Sup, homeslice",
"This call may be recorded for training purposes",
"Howdy, howdy ,howdy",
"I'm Batman",
"At least, we meet for the first time for the last time",
"Hello, who's there, I'm talking",
"You know who this is",
"Ghostbusters, whatya want",
"Yo",
"Whaddup",
"Greetings and salutations",
"Doctor"
)
stringi <- paste0("\n", hello_message[sample(1:length(hello_message), 1)], " ",Sys.info()["user"][[1]],"! Loaded .Rprofile (v.",rprofile.version,") at ", strftime(Sys.time(),"%Y-%m-%d %H:%M:%S"), "\n")
cat(stringi)
}
## .Last() run at the end of the session
.Last <- function() {
goodbye_message <- c("See you later, alligator!", "After a while, crocodile.", "Stay out of trouble.", "I’m out of here.", "Okay...bye, fry guy!", "If I don’t see you around, I'll see you square.", "Stay classy.", "Fare thee well.", "Catch you on the rebound.", "Gotta go, buffalo.", "Peace out!", "Gotta hit the road.", "Long live and prosper!", "Well, I'm off!", "Smoke me a kipper, I'll be back for breakfast.", "Bye bye, butterfly.", "Gotta get going.", "To the winch, wench!", "It has been emotional, bye.", "Out to the door, dinosaur.", "Catch you on the flip side.", "Gotta bolt!", "See you soon, racoon.", "You're still here? It's over. Go home. Go!", "Don't get run over!", "Give a hug, ladybug.", "I gotta bounce.", "Toodle-pip!", "Calc you later!", "See you on the other side.", "Take care, butterfly.", "Have fun storming the castle!", "Adieu, cockatoo!", "Blow a kiss, goldfish.", "It's been a pleasure and a privilege to meet you.", "Don't get attacked by a bear, it's night-time!", "Be good and don't get caught.", "Thank you for your cooperation. Farewell.", "I look forward to our next meeting.", "I'm gonna make a like a bakery truck and haul buns.", "Chop chop, lollipop!", "Gotta roll!", "Can’t star, blue jay.", "Oh, and in case I don't see you—good afternoon, good evening, and good night!", "Influence everyone in a good way!", "Don't forget to come back!", "Once more unto the breach, dear friends!", "See ya, wouldn't wanna be ya.", "Peace out, girl scout!", "Adios, hippos.", "Time to scoot, little newt.", "Smell ya later!", "I gotta jet.", "Happy trails!", "Cheerio!", "Bye for now.", "Tootle-loo, kangaroo.", "Don't get lost on your way to class!", "Love, peace, and chicken grease.", "I'm off like a dirty shirt.", "See you when I see you.", "In a while, crocodile.", "Catch ya later, future dudes!", "Cya. (Clearly, this is just short for ‘see you,’ which makes no sense because you utter ‘cya’ and not write it. Oh, whatever!)", "As you wish, jellyfish!", "Later, skater!", "May the force be with you... always.", "Shine on, you crazy diamonds.", "Parting is such sweet sorrow, that I shall say good night till it be tomorrow.", "Don't let the door hit ya where the good lord split ya.", "Better shake, rattlesnake!", "Later, potato!", "Don't forget to be awesome.", "Later, nerds!", "Stay cool, my dude.", "Don't get cut by a blade of grass!", "Be sweet, parakeet.", "Be careful! Don't get mauled by a squirrel!", "See you later, aggregator!", "Don't trip on a raindrop!", "See you soon, baboon!", "Bye! I tolerate you!", "Gotta go, the power of the shower compels me.", "Make new friends on the sidewalk!", "I’m late for my bus, gigantopithecus!", "Move out, brussels sprout!", "Make sure the doormat says goodbye!", "I’ll show you to the door!", "Ciao ciao, brown cow!", "Screw you guys, I'm going home!", "I shall return.", "Catch you round like a rissole!", "Take it easy, greasy. You've got a long way to slide.", "Toodaloo, caribou!", "I'm outtie.", "Adios, amigos.", "That's all folks.", "Take care, polar bear!", "Peace out, rainbow trout!", "I'm outta here like spit through a trumpet.", "Au revoir!", "See you in the future.", "Begone!", "Until next time.", "So long, suckers!", "Hasta lasagna, don't get any on ya.", "Sayonara, muchachos!", "Next time, bring more cookies.", "Party easy, drive safe, and return with a smile on your face.", "After two, kangaroo!", "After three, chimpanzee!", "After four, dinosaur.", "Come back when you can't stay so long.", "Don’t forget to send a letter.", "Goodbye forever.", "See you in another life, brotha!", "We may not talk for a long time, but I hope we don't lose touch.", "Never look back!", "See you on the internet!", "Forever and forever farewell. If we do meet again, we'll smile indeed. If not, 'tis true parting was well made.", "You will do well.", "See you at the restaurant at the edge of the universe!", "I'd say goodbye, but you're not worth it.")
cat(paste0("\n ", goodbye_message[sample(1:length(goodbye_message), 1)]," You finished at ", strftime(Sys.time(),"%Y-%m-%d %H:%M:%S"), "\n"))
}
|
/.Rprofile.R
|
no_license
|
janikmiet/rprofile
|
R
| false
| false
| 12,021
|
r
|
# Custom user Rprofile
# options(show.signif.stars=FALSE) # Don't show those silly significanct stars
## Credentials for connections. Not the most secure option but easiest.
## Simple way to storage username and password for different connections. Use Sys.getenv(USERNAME) function to get USERNAME.
# Sys.setenv(USERNAME = "tunnus")
# Sys.setenv(PASSWORD = "salasana")
## Don't ask me for my CRAN mirror every time
options("repos" = c(CRAN = "https://cran.uni-muenster.de/"))
## Create a new invisible environment for all the functions to go in so it doesn't clutter your workspace.
.env <- new.env()
## Rprofile version
.env$rprofile.version <- "1.07.5"
## Update RProfile
.env$rprofile.update <- function(){
download.file(url = "https://raw.githubusercontent.com/janikmiet/rprofile/main/.Rprofile.R", destfile = "~/.Rprofile")
rstudioapi::restartSession()
}
## Install R packages from a list
.env$install.packages.list <- function(list = "https://research.janimiettinen.fi/data/r_packages.txt"){
if(substr(list,1,4) == "http"){
PACKAGES <- scan(url(list), what="character")
}else{
PACKAGES <- scan(list, what="character")
}
message("Trying to install packages: ")
message(paste0(PACKAGES, collapse = ", "))
inst <- match(PACKAGES, .packages(all=TRUE))
need <- which(is.na(inst))
if (length(need) > 0) install.packages(PACKAGES[need])
}
## Returns a logical vector TRUE for elements of X not in Y
.env$"%nin%" <- function(x, y) !(x %in% y)
## Returns names(df) in single column, numbered matrix format.
.env$n <- function(df) matrix(names(df))
## Single character shortcuts for summary() and head().
.env$s <- base::summary
.env$h <- utils::head
## ht==headtail, i.e., show the first and last 10 items of an object
.env$ht <- function(d) rbind(head(d,10),tail(d,10))
## Show the first 5 rows and first 5 columns of a data frame or matrix
.env$hh <- function(d) if(class(d) %in% c("matrix","data.frame","tbl_df")) d[1:5,1:5]
## Show the first 5 rows and first 5 columns of a data frame or matrix
.env$print_cs1 <- function(d) cat(paste(shQuote(d, type="cmd"), collapse=", "))
.env$print_cs2 <- function(d) cat(paste(d, collapse=","))
## Strip row names from a data frame (stolen from plyr)
.env$unrowname <- function(x) {
rownames(x) <- NULL
x
}
# for qplots
.env$ggdens <- function(var){
ggplot2::qplot(x = var, geom="density")
}
.env$ggscat <- function(varx,vary){
ggplot2::qplot(x = varx, y=vary, geom="point")
}
.env$gghist <- function(var){
ggplot2::qplot(x = var, geom="histogram")
}
.env$ggbox <- function(var){
ggplot2::qplot(x = var, geom="boxplot")
}
## List objects and classes (from @_inundata, mod by ateucher)
.env$lsa <- function() {
obj_type <- function(x) class(get(x, envir = .GlobalEnv)) # define environment
foo = data.frame(sapply(ls(envir = .GlobalEnv), obj_type))
foo$object_name = rownames(foo)
names(foo)[1] = "class"
names(foo)[2] = "object"
return(unrowname(foo))
}
## List all functions in a package (also from @_inundata)
.env$lsp <-function(package, all.names = FALSE, pattern) {
package <- deparse(substitute(package))
ls(
pos = paste("package", package, sep = ":"),
all.names = all.names,
pattern = pattern
)
}
## table showing Na by default
.env$table_na <- function (..., exclude = NULL, useNA = "always", dnn = list.names(...), deparse.level = 1){
list.names <- function(...) {
l <- as.list(substitute(list(...)))[-1L]
nm <- names(l)
fixup <- if (is.null(nm))
seq_along(l)
else nm == ""
dep <- vapply(l[fixup], function(x) switch(deparse.level +
1, "", if (is.symbol(x)) as.character(x) else "",
deparse(x, nlines = 1)[1L]), "")
if (is.null(nm))
dep
else {
nm[fixup] <- dep
nm
}
}
if (!missing(exclude) && is.null(exclude))
useNA <- "always"
useNA <- match.arg(useNA)
args <- list(...)
if (!length(args))
stop("nothing to tabulate")
if (length(args) == 1L && is.list(args[[1L]])) {
args <- args[[1L]]
if (length(dnn) != length(args))
dnn <- if (!is.null(argn <- names(args)))
argn
else paste(dnn[1L], seq_along(args), sep = ".")
}
bin <- 0L
lens <- NULL
dims <- integer()
pd <- 1L
dn <- NULL
for (a in args) {
if (is.null(lens))
lens <- length(a)
else if (length(a) != lens)
stop("all arguments must have the same length")
cat <- if (is.factor(a)) {
if (any(is.na(levels(a))))
a
else {
if (is.null(exclude) && useNA != "no")
addNA(a, ifany = (useNA == "ifany"))
else {
if (useNA != "no")
a <- addNA(a, ifany = (useNA == "ifany"))
ll <- levels(a)
a <- factor(a, levels = ll[!(ll %in% exclude)],
exclude = if (useNA == "no")
NA)
}
}
}
else {
a <- factor(a, exclude = exclude)
if (useNA != "no")
addNA(a, ifany = (useNA == "ifany"))
else a
}
nl <- length(ll <- levels(cat))
dims <- c(dims, nl)
if (prod(dims) > .Machine$integer.max)
stop("attempt to make a table with >= 2^31 elements")
dn <- c(dn, list(ll))
bin <- bin + pd * (as.integer(cat) - 1L)
pd <- pd * nl
}
names(dn) <- dnn
bin <- bin[!is.na(bin)]
if (length(bin))
bin <- bin + 1L
y <- array(tabulate(bin, pd), dims, dimnames = dn)
class(y) <- "table"
y
}
## Open Finder to the current directory on mac
# .env$macopen <- function(...) if(Sys.info()[1]=="Darwin") system("open .")
.env$o <- function(...){
if(Sys.info()[1]=="Linux") system("nemo .")
if(Sys.info()[1]=="Windows") shell.exec(file = ".")
if(Sys.info()[1]=="Darwin") system("open .")
}
## Read data on clipboard.
.env$read_cb <- function(...) {
ismac <- Sys.info()[1]=="Darwin"
if (!ismac) read.table(file="clipboard", ...)
else read.table(pipe("pbpaste"), ...)
}
## List all rprofile functions
.env$rprofile.functions <- function(){
cat(paste0("User .Rprofile \n") ,sep="")
cat("s() - shortcut for summary\n",sep="")
cat("h() - shortcut for head\n",sep="")
cat("o() - shortcut for file explorer\n",sep="")
cat("table_na() - table showing Na by default\n", sep="")
cat("unrowname() - remove data frame row names\n",sep="")
cat("read_cb() - read from clipboard\n",sep="")
cat("lsa() - list objects and classes\n",sep="")
cat("lsp() - list all functions in a package\n",sep="")
cat("ggdens() - quick density plot\n",sep="")
cat("ggscat() - quick scatter plot\n",sep="")
cat("gghist() - quick histogram plot\n",sep="")
cat("ggbox() - quick boxplot\n",sep="")
}
## Attach all the variables above
attach(.env)
## .First() run at the start of every R session.
.First <- function() {
hello_message <- c(
"Hello, sunshine",
"Howdy, partner",
"Hey, howdy, hi",
"What’s kickin’, little chicken",
"Peek-a-boo",
"Howdy-doody",
"Hey there, freshman",
"Hi, mister",
"I come in peace",
"Put that cookie down",
"Ahoy, matey",
"Hiya",
"Ello, gov'nor",
"Top of the mornin’ to ya",
"What’s crackin’",
"GOOOOOD MORNING, VIETNAM",
"‘Sup, homeslice",
"This call may be recorded for training purposes",
"Howdy, howdy ,howdy",
"I'm Batman",
"At least, we meet for the first time for the last time",
"Hello, who's there, I'm talking",
"You know who this is",
"Ghostbusters, whatya want",
"Yo",
"Whaddup",
"Greetings and salutations",
"Doctor"
)
stringi <- paste0("\n", hello_message[sample(1:length(hello_message), 1)], " ",Sys.info()["user"][[1]],"! Loaded .Rprofile (v.",rprofile.version,") at ", strftime(Sys.time(),"%Y-%m-%d %H:%M:%S"), "\n")
cat(stringi)
}
## .Last() run at the end of the session
.Last <- function() {
goodbye_message <- c("See you later, alligator!", "After a while, crocodile.", "Stay out of trouble.", "I’m out of here.", "Okay...bye, fry guy!", "If I don’t see you around, I'll see you square.", "Stay classy.", "Fare thee well.", "Catch you on the rebound.", "Gotta go, buffalo.", "Peace out!", "Gotta hit the road.", "Long live and prosper!", "Well, I'm off!", "Smoke me a kipper, I'll be back for breakfast.", "Bye bye, butterfly.", "Gotta get going.", "To the winch, wench!", "It has been emotional, bye.", "Out to the door, dinosaur.", "Catch you on the flip side.", "Gotta bolt!", "See you soon, racoon.", "You're still here? It's over. Go home. Go!", "Don't get run over!", "Give a hug, ladybug.", "I gotta bounce.", "Toodle-pip!", "Calc you later!", "See you on the other side.", "Take care, butterfly.", "Have fun storming the castle!", "Adieu, cockatoo!", "Blow a kiss, goldfish.", "It's been a pleasure and a privilege to meet you.", "Don't get attacked by a bear, it's night-time!", "Be good and don't get caught.", "Thank you for your cooperation. Farewell.", "I look forward to our next meeting.", "I'm gonna make a like a bakery truck and haul buns.", "Chop chop, lollipop!", "Gotta roll!", "Can’t star, blue jay.", "Oh, and in case I don't see you—good afternoon, good evening, and good night!", "Influence everyone in a good way!", "Don't forget to come back!", "Once more unto the breach, dear friends!", "See ya, wouldn't wanna be ya.", "Peace out, girl scout!", "Adios, hippos.", "Time to scoot, little newt.", "Smell ya later!", "I gotta jet.", "Happy trails!", "Cheerio!", "Bye for now.", "Tootle-loo, kangaroo.", "Don't get lost on your way to class!", "Love, peace, and chicken grease.", "I'm off like a dirty shirt.", "See you when I see you.", "In a while, crocodile.", "Catch ya later, future dudes!", "Cya. (Clearly, this is just short for ‘see you,’ which makes no sense because you utter ‘cya’ and not write it. Oh, whatever!)", "As you wish, jellyfish!", "Later, skater!", "May the force be with you... always.", "Shine on, you crazy diamonds.", "Parting is such sweet sorrow, that I shall say good night till it be tomorrow.", "Don't let the door hit ya where the good lord split ya.", "Better shake, rattlesnake!", "Later, potato!", "Don't forget to be awesome.", "Later, nerds!", "Stay cool, my dude.", "Don't get cut by a blade of grass!", "Be sweet, parakeet.", "Be careful! Don't get mauled by a squirrel!", "See you later, aggregator!", "Don't trip on a raindrop!", "See you soon, baboon!", "Bye! I tolerate you!", "Gotta go, the power of the shower compels me.", "Make new friends on the sidewalk!", "I’m late for my bus, gigantopithecus!", "Move out, brussels sprout!", "Make sure the doormat says goodbye!", "I’ll show you to the door!", "Ciao ciao, brown cow!", "Screw you guys, I'm going home!", "I shall return.", "Catch you round like a rissole!", "Take it easy, greasy. You've got a long way to slide.", "Toodaloo, caribou!", "I'm outtie.", "Adios, amigos.", "That's all folks.", "Take care, polar bear!", "Peace out, rainbow trout!", "I'm outta here like spit through a trumpet.", "Au revoir!", "See you in the future.", "Begone!", "Until next time.", "So long, suckers!", "Hasta lasagna, don't get any on ya.", "Sayonara, muchachos!", "Next time, bring more cookies.", "Party easy, drive safe, and return with a smile on your face.", "After two, kangaroo!", "After three, chimpanzee!", "After four, dinosaur.", "Come back when you can't stay so long.", "Don’t forget to send a letter.", "Goodbye forever.", "See you in another life, brotha!", "We may not talk for a long time, but I hope we don't lose touch.", "Never look back!", "See you on the internet!", "Forever and forever farewell. If we do meet again, we'll smile indeed. If not, 'tis true parting was well made.", "You will do well.", "See you at the restaurant at the edge of the universe!", "I'd say goodbye, but you're not worth it.")
cat(paste0("\n ", goodbye_message[sample(1:length(goodbye_message), 1)]," You finished at ", strftime(Sys.time(),"%Y-%m-%d %H:%M:%S"), "\n"))
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this functionmakeCacheMatrix <- function(x = matrix()) {}
## Write a short comment describing this function cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'}
#Solution
# The makeCacheMatrix and Cachesolve functions are closure funtions that cache and compute the inverse of a matrix.
# MakeCacheMatrix creates a special "Matrix" object.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y;
inverse <<- NULL;
}
get <- function() return(x);
setinv <- function(inv) inverse <<- inv;
getinv <- function() return(inverse);
return(list(set = set, get = get, setinv = setinv, getinv = getinv))
# Cachesolve function- a closure function, will compute the inverse of the "matrix" created by "makeCachematrix" function
cacheSolve <- function(x, ...) {
inverse <- x$getinv()
#if a value for the cache exists , return data
if(!is.null(inverse)) {
message("Getting cached data...")
return(inverse)
}
#else calculate inverse, store in cache and return result
data <- x$get()
invserse <- solve(data, ...)
x$setinv(inverse)
return(inverse)
}
|
/cachematrix.R
|
no_license
|
Comu18/ProgrammingAssignment2
|
R
| false
| false
| 1,393
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this functionmakeCacheMatrix <- function(x = matrix()) {}
## Write a short comment describing this function cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'}
#Solution
# The makeCacheMatrix and Cachesolve functions are closure funtions that cache and compute the inverse of a matrix.
# MakeCacheMatrix creates a special "Matrix" object.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y;
inverse <<- NULL;
}
get <- function() return(x);
setinv <- function(inv) inverse <<- inv;
getinv <- function() return(inverse);
return(list(set = set, get = get, setinv = setinv, getinv = getinv))
# Cachesolve function- a closure function, will compute the inverse of the "matrix" created by "makeCachematrix" function
cacheSolve <- function(x, ...) {
inverse <- x$getinv()
#if a value for the cache exists , return data
if(!is.null(inverse)) {
message("Getting cached data...")
return(inverse)
}
#else calculate inverse, store in cache and return result
data <- x$get()
invserse <- solve(data, ...)
x$setinv(inverse)
return(inverse)
}
|
#create html heat maps with leafletR
setwd("D:/CUNY Files/IS 608/All Final Project Files/create maps")
#the following libraries need to be added to shiny app for this source file to run
library(RColorBrewer)
library(leafletR)
#geojson file directory
leafdat<-"Manhattan2.geojson"
#import subdatM2 data file
data <- read.csv("subdatM2.csv")
#colnames(data)
#only looking at price for the heatmap
#create a zipcode data file with price fields only
z_data <-read.csv("z_stats_all.csv")
col<-ncol(z_data)/2
rangep <- c(1:col)*2+1
rangep <- c(1,rangep)
cuts_data <- z_data[,rangep]
#has only price fields, created in Excel from z_data_all file which produced
#the subdatM2 file
#compute cuts for all years of data - *2013 data removed
m<-ncol(cuts_data)
cuts<-round(quantile(cuts_data[,2:m], probs = seq(0, 1, 0.20), na.rm = T), 0)
cuts[1]<-0 # ----- for this example make first cut zero
#testing
#BType <- "All"
#Year <- 2014
#set color palate
pal <- brewer.pal(5, "Reds")
#create a function to generate maps
leafmap_gen <- function(Year,BType){
#convert All to blank for column name
if(BType=='All'){
b <- "." } else {
b <- paste0(".",BType,".")
}
P.BT.Year <- paste0("P",b,Year)
# ----- Create the cuts
#cuts<-round(quantile(data[,P.BT.Year], probs = seq(0, 1, 0.20), na.rm = T), 0)
#cuts[1]<-0 # ----- for this example make first cut zero
#cuts were calculated on all years of data
popup<-c("POSTAL",P.BT.Year)
#changing these popup labels seem to require going back and changing the
#original colum names and then recreate the z_data files and then the geojson
#pal <- brewer.pal(5, "Reds")
sty<-styleGrad(prop=P.BT.Year, breaks=cuts, right=FALSE, style.par="col",
style.val=pal, leg=paste(Year," Median Price ($ in 000s)"), lwd=1)
# ----- Create the map and load into browser
map<-leaflet(data=leafdat,style=sty,
title="Manhattan Real Estate Prices", base.map="osm",
incl.data=TRUE, popup=popup)
#copy file to html_maps directory
oldfname<-"Manhattan_Real_Estate_Prices/Manhattan_Real_Estate_Prices.html"
newfname<-paste0("html_maps","/",paste0("MedPrice",BType,Year),".html")
file.copy(oldfname, newfname,overwrite = T)
# ----- to look at the map you can use this code
#browseURL(map)
}
#use a for loop to generate the column name and loop each building type and year
#to create a heat map for each one.
#years
years <- c(2003:2014)
nyears<-length(years)
#building type
btype<-c("All","Condo","Coop","Condop")
nbtype<-length(btype)
#test function
#Year=2004
#BType="All"
#leafmap_gen(Year,BType)
for (i in 1:nyears) {
for (j in 1:nbtype){
Year <- years[i]
BType <- btype[j]
#print(paste0(Year,BType))
leafmap_gen(Year,BType)
}
}
|
/create maps/leafmapR_create_all.R
|
no_license
|
phoenixpei/Manhattan-Real-Estate-Project
|
R
| false
| false
| 2,804
|
r
|
#create html heat maps with leafletR
setwd("D:/CUNY Files/IS 608/All Final Project Files/create maps")
#the following libraries need to be added to shiny app for this source file to run
library(RColorBrewer)
library(leafletR)
#geojson file directory
leafdat<-"Manhattan2.geojson"
#import subdatM2 data file
data <- read.csv("subdatM2.csv")
#colnames(data)
#only looking at price for the heatmap
#create a zipcode data file with price fields only
z_data <-read.csv("z_stats_all.csv")
col<-ncol(z_data)/2
rangep <- c(1:col)*2+1
rangep <- c(1,rangep)
cuts_data <- z_data[,rangep]
#has only price fields, created in Excel from z_data_all file which produced
#the subdatM2 file
#compute cuts for all years of data - *2013 data removed
m<-ncol(cuts_data)
cuts<-round(quantile(cuts_data[,2:m], probs = seq(0, 1, 0.20), na.rm = T), 0)
cuts[1]<-0 # ----- for this example make first cut zero
#testing
#BType <- "All"
#Year <- 2014
#set color palate
pal <- brewer.pal(5, "Reds")
#create a function to generate maps
leafmap_gen <- function(Year,BType){
#convert All to blank for column name
if(BType=='All'){
b <- "." } else {
b <- paste0(".",BType,".")
}
P.BT.Year <- paste0("P",b,Year)
# ----- Create the cuts
#cuts<-round(quantile(data[,P.BT.Year], probs = seq(0, 1, 0.20), na.rm = T), 0)
#cuts[1]<-0 # ----- for this example make first cut zero
#cuts were calculated on all years of data
popup<-c("POSTAL",P.BT.Year)
#changing these popup labels seem to require going back and changing the
#original colum names and then recreate the z_data files and then the geojson
#pal <- brewer.pal(5, "Reds")
sty<-styleGrad(prop=P.BT.Year, breaks=cuts, right=FALSE, style.par="col",
style.val=pal, leg=paste(Year," Median Price ($ in 000s)"), lwd=1)
# ----- Create the map and load into browser
map<-leaflet(data=leafdat,style=sty,
title="Manhattan Real Estate Prices", base.map="osm",
incl.data=TRUE, popup=popup)
#copy file to html_maps directory
oldfname<-"Manhattan_Real_Estate_Prices/Manhattan_Real_Estate_Prices.html"
newfname<-paste0("html_maps","/",paste0("MedPrice",BType,Year),".html")
file.copy(oldfname, newfname,overwrite = T)
# ----- to look at the map you can use this code
#browseURL(map)
}
#use a for loop to generate the column name and loop each building type and year
#to create a heat map for each one.
#years
years <- c(2003:2014)
nyears<-length(years)
#building type
btype<-c("All","Condo","Coop","Condop")
nbtype<-length(btype)
#test function
#Year=2004
#BType="All"
#leafmap_gen(Year,BType)
for (i in 1:nyears) {
for (j in 1:nbtype){
Year <- years[i]
BType <- btype[j]
#print(paste0(Year,BType))
leafmap_gen(Year,BType)
}
}
|
source ("./mlknn.R");
mlknn_stack_train <- function (X_train, Y_train, l0_k, l1_k, s0 = 1, s1 = 1, l0_dist = "euclidean", l1_dist = "euclidean")
{
retval <- list ();
mlknn_m <- mlknn_train (X = X_train, Y = Y_train, k = l0_k, s = 1, dist = l0_dist)
l1_target <- c ();
pred_mlknn_train <- predict (mlknn_m, X_train, type = "prob");
l1_target <- Y_train;
retval$l0_pred_type <- "prob";
pred_l1_val <- list ();
evals_for_k <- list ();
this_l1_m <- NA;
this_l1_m_list <- NA;
bn_dep <- NA;
l1_lab_deps <- list ();
this_l1_m <- mlknn_train (X = pred_mlknn_train, Y = l1_target, k = l1_k, s = s1, dist = l1_dist);
retval$l0_m <- mlknn_m;
retval$l1_m <- this_l1_m;
retval$l0_k <- l0_k;
retval$l1_k <- l1_k;
retval$s0 <- s0;
retval$s1 <- s1;
retval$type_of_l1 <- type_of_l1;
retval$use_bn <- use_bn;
retval$bn_method <- bn_method;
retval$bn_dep <- bn_dep;
retval$l1_lab_deps <- l1_lab_deps;
retval$this_l1_m_list <- this_l1_m_list;
class (retval) <- "mlknn_stack_t";
return (retval);
}
predict.mlknn_stack_t <- function (model, X_test, type = "raw")
{
l1_data <- predict (model$l0_m, X_test, model$l0_pred_type);
pred_final <- NA;
if (model$use_bn == FALSE)
{
pred_final <- predict (model$l1_m, l1_data, type = type);
}
else
{
pred_final <- as.data.frame (matrix (NA, nrow = nrow (X_test), ncol = length (model$this_l1_m_list)));
colnames (pred_final) <- names (model$this_l1_m_list);
for (this_label in names (model$this_l1_m_list))
{
cat ("Processing Level 1, Label \"", this_label, "\"\n");
this_dep <- model$l1_lab_deps[[this_label]];
# If it is empty, make it depend on itself.
if (length (this_dep))
{
this_dep <- this_label;
}
# pred_final[,this_label] <- predict (model$this_l1_m_list[[this_label]], l1_data[,this_dep,drop=F], type = "class")[,];
pred_final[,this_label] <- apply (predict (model$this_l1_m_list[[this_label]], l1_data, type = "prob"), 1, which.max) - 1;
}
}
return (pred_final);
}
|
/src/mlknn_stack.R
|
no_license
|
BinDuan/stack_mlknn
|
R
| false
| false
| 2,173
|
r
|
source ("./mlknn.R");
mlknn_stack_train <- function (X_train, Y_train, l0_k, l1_k, s0 = 1, s1 = 1, l0_dist = "euclidean", l1_dist = "euclidean")
{
retval <- list ();
mlknn_m <- mlknn_train (X = X_train, Y = Y_train, k = l0_k, s = 1, dist = l0_dist)
l1_target <- c ();
pred_mlknn_train <- predict (mlknn_m, X_train, type = "prob");
l1_target <- Y_train;
retval$l0_pred_type <- "prob";
pred_l1_val <- list ();
evals_for_k <- list ();
this_l1_m <- NA;
this_l1_m_list <- NA;
bn_dep <- NA;
l1_lab_deps <- list ();
this_l1_m <- mlknn_train (X = pred_mlknn_train, Y = l1_target, k = l1_k, s = s1, dist = l1_dist);
retval$l0_m <- mlknn_m;
retval$l1_m <- this_l1_m;
retval$l0_k <- l0_k;
retval$l1_k <- l1_k;
retval$s0 <- s0;
retval$s1 <- s1;
retval$type_of_l1 <- type_of_l1;
retval$use_bn <- use_bn;
retval$bn_method <- bn_method;
retval$bn_dep <- bn_dep;
retval$l1_lab_deps <- l1_lab_deps;
retval$this_l1_m_list <- this_l1_m_list;
class (retval) <- "mlknn_stack_t";
return (retval);
}
predict.mlknn_stack_t <- function (model, X_test, type = "raw")
{
l1_data <- predict (model$l0_m, X_test, model$l0_pred_type);
pred_final <- NA;
if (model$use_bn == FALSE)
{
pred_final <- predict (model$l1_m, l1_data, type = type);
}
else
{
pred_final <- as.data.frame (matrix (NA, nrow = nrow (X_test), ncol = length (model$this_l1_m_list)));
colnames (pred_final) <- names (model$this_l1_m_list);
for (this_label in names (model$this_l1_m_list))
{
cat ("Processing Level 1, Label \"", this_label, "\"\n");
this_dep <- model$l1_lab_deps[[this_label]];
# If it is empty, make it depend on itself.
if (length (this_dep))
{
this_dep <- this_label;
}
# pred_final[,this_label] <- predict (model$this_l1_m_list[[this_label]], l1_data[,this_dep,drop=F], type = "class")[,];
pred_final[,this_label] <- apply (predict (model$this_l1_m_list[[this_label]], l1_data, type = "prob"), 1, which.max) - 1;
}
}
return (pred_final);
}
|
library(shiny)
library(shinydashboard)
library(DT)
library(shinythemes)
library(shinyWidgets)
require(ggplot2)
require(shiny)
library(plotly)
library(ggvis)
shinyUI(
dashboardPage(title = "Demo",skin="yellow",
dashboardHeader(title="My First Dashboard!",dropdownMenu(
type="message",messageItem(from="Yokesh",message = "This is my 1st dashboard that i have created!")
)),
dashboardSidebar(
sidebarMenu(
sidebarSearchForm("searchText","buttonSearch","Search"),
# menuItem("Histogram plot",tabName = "dashboard",icon = icon("dashboard")),
menuItem("Exploratory data analysis!",tabName = "Software",icon = icon("th")),
menuItem("Additional analysis!",tabName = "Voice"),
menuItem("Detailed analysis",tabName="ab",badgeLabel = "New",badgeColor = "green"),
menuItem("Raw data",tabName = "ac")
)),
dashboardBody(
tabItems(
tabItem(tabName = "Software",
tabsetPanel(type="tabs",
tabPanel("Data",
fluidPage(
fluidRow(tags$h1("Original Data table & Filtered Data table")),
fluidRow(column(10,DT::dataTableOutput('faith'))),
fluidRow(column(8,verbatimTextOutput('filteredTableSelected')))
)),
tabPanel("Summary",verbatimTextOutput("summ")),
tabPanel("Structure",verbatimTextOutput("struc")))),
tabItem(tabName = "Voice",h1("Page under construction!")),
tabItem(tabName = "ab",h1("New analysis coming soon!")),
tabItem(tabName = "ac",h1("Page under construction!"))
)
)
)
)
server
========
library(shiny)
library(shinydashboard)
library(DT)
library(ggplot2)
library(stringr)
library(plotly)
library(ggplot2)
library(shinyBS)
library(reshape2)
library(tidyverse)
library(plyr)
shinyServer(function(input,output,session){
myDF=mtcars
# View(myDF)
myDF=cbind(car.names = rownames(myDF), myDF) ## Assigning the row names to a new column.
# View(myDF)
rownames(myDF)=NULL ## And then removing the row names away from my data frame
# Specifying the column names and then converting to categorical in myDF dataframe table
cols=c("cyl","vs","am","gear","carb")
myDF[cols]=lapply(myDF[cols], factor)
# str(myDF)
# subseting the numerical attributes alone from the myDF and storing it in a new variable named sub_myDF
subset_colclasses <- function(myDF, colclasses="numeric") {
myDF[,sapply(myDF, function(vec, test) class(vec) %in% test, test=colclasses)]
}
sub_myDF=subset_colclasses(myDF)
## creating a reactive function
mydata <- reactive({myDF})
## passing the reactive function created above over down here.
output$faith=DT::renderDataTable({
DT::datatable(mydata(), options = list(scrollX = TRUE, sScrollY = '75vh', scrollCollapse = TRUE), extensions = list("Scroller"))
})
## creating a reactive function for storing the values that we are selecting and then subseting on the original data.
filteredTable_selected <- reactive({
ids <- input$faith_rows_selected
mydata()[ids,]
})
## Defining the filtered data table along in a verbatim text input
output$filteredTableSelected <- renderPrint({
filteredTable_selected()
})
output$summ=renderPrint({
summary(myDF)
})
output$struc=renderPrint({
str(myDF)
})
|
/Exercise/app1.R
|
no_license
|
avijandiran/My-shiny-dashboards
|
R
| false
| false
| 4,153
|
r
|
library(shiny)
library(shinydashboard)
library(DT)
library(shinythemes)
library(shinyWidgets)
require(ggplot2)
require(shiny)
library(plotly)
library(ggvis)
shinyUI(
dashboardPage(title = "Demo",skin="yellow",
dashboardHeader(title="My First Dashboard!",dropdownMenu(
type="message",messageItem(from="Yokesh",message = "This is my 1st dashboard that i have created!")
)),
dashboardSidebar(
sidebarMenu(
sidebarSearchForm("searchText","buttonSearch","Search"),
# menuItem("Histogram plot",tabName = "dashboard",icon = icon("dashboard")),
menuItem("Exploratory data analysis!",tabName = "Software",icon = icon("th")),
menuItem("Additional analysis!",tabName = "Voice"),
menuItem("Detailed analysis",tabName="ab",badgeLabel = "New",badgeColor = "green"),
menuItem("Raw data",tabName = "ac")
)),
dashboardBody(
tabItems(
tabItem(tabName = "Software",
tabsetPanel(type="tabs",
tabPanel("Data",
fluidPage(
fluidRow(tags$h1("Original Data table & Filtered Data table")),
fluidRow(column(10,DT::dataTableOutput('faith'))),
fluidRow(column(8,verbatimTextOutput('filteredTableSelected')))
)),
tabPanel("Summary",verbatimTextOutput("summ")),
tabPanel("Structure",verbatimTextOutput("struc")))),
tabItem(tabName = "Voice",h1("Page under construction!")),
tabItem(tabName = "ab",h1("New analysis coming soon!")),
tabItem(tabName = "ac",h1("Page under construction!"))
)
)
)
)
server
========
library(shiny)
library(shinydashboard)
library(DT)
library(ggplot2)
library(stringr)
library(plotly)
library(ggplot2)
library(shinyBS)
library(reshape2)
library(tidyverse)
library(plyr)
shinyServer(function(input,output,session){
myDF=mtcars
# View(myDF)
myDF=cbind(car.names = rownames(myDF), myDF) ## Assigning the row names to a new column.
# View(myDF)
rownames(myDF)=NULL ## And then removing the row names away from my data frame
# Specifying the column names and then converting to categorical in myDF dataframe table
cols=c("cyl","vs","am","gear","carb")
myDF[cols]=lapply(myDF[cols], factor)
# str(myDF)
# subseting the numerical attributes alone from the myDF and storing it in a new variable named sub_myDF
subset_colclasses <- function(myDF, colclasses="numeric") {
myDF[,sapply(myDF, function(vec, test) class(vec) %in% test, test=colclasses)]
}
sub_myDF=subset_colclasses(myDF)
## creating a reactive function
mydata <- reactive({myDF})
## passing the reactive function created above over down here.
output$faith=DT::renderDataTable({
DT::datatable(mydata(), options = list(scrollX = TRUE, sScrollY = '75vh', scrollCollapse = TRUE), extensions = list("Scroller"))
})
## creating a reactive function for storing the values that we are selecting and then subseting on the original data.
filteredTable_selected <- reactive({
ids <- input$faith_rows_selected
mydata()[ids,]
})
## Defining the filtered data table along in a verbatim text input
output$filteredTableSelected <- renderPrint({
filteredTable_selected()
})
output$summ=renderPrint({
summary(myDF)
})
output$struc=renderPrint({
str(myDF)
})
|
###############################################################################
# Title: PSYC 7765 - Lesson 13
# Author: Jeffrey R. Spies (jspies@virginia.edu)
# History: date()
# Fri Nov 18 13:02:38 2011 - Created the file - JRS
###############################################################################
rm(list=ls())
setwd("~/Projects/funstats2011/Lesson12")
require(plyr)
require(ggplot2)
###############################################################################
# Binary variables: transitions
###############################################################################
isHappy <- c(T, F, T, T, T, T, F, F, F, F, F)
transTable <- function(v){
table(v[1:length(v)-1], v[2:length(v)])
}
transTable(isHappy)
prop.table(transTable(isHappy), 1)
transDf <- function(from, fromNames=NULL){
tab <- transTable(from)
data.frame(counts=as.vector(tab), props=as.vector(prop.table(tab,1)))
}
transDf(isHappy)
transDf <- function(from, fromNames=NULL){
tab <- transTable(from)
data.frame(counts=as.vector(tab), props=as.vector(prop.table(tab,1)))
}
levels(as.factor(isHappy))
transDf(isHappy, c('sad', 'happy'))
# count from to
# 4 F F
# 1 F T
transDf(isHappy)
aborc <- c('a', 'a', 'a', 'b', 'c')
transDf(aborc, c('a', 'b', 'c'))
###############################################################################
# Merging datasets
###############################################################################
df <- data.frame(
Subject=rep(1:5, each=3),
Gender=rep(sample(c('M', 'F'), 5, replace=T), each=3),
Time=1:3,
Depression=rnorm(15),
Anxiety=rnorm(15)
)
df.age <- data.frame(id=1:20, Age=runif(20, 20, 30))
merge(df, df.age, by.x="Subject", by.y="id")
###############################################################################
# Wide to Tall
###############################################################################
df <- data.frame(
Subject=rep(1:5, each=3),
Gender=rep(sample(c('M', 'F'), 5, replace=T), each=3),
Time=1:3,
Depression=rnorm(15),
Anxiety=rnorm(15),
stringsAsFactors = F
)
# Graph Time by Anxiety
df
wide.df <- reshape(df,
idvar=c("Subject", "Gender"),
timevar="Time",
direction="wide"
)
reshape(wide.df, direction="long")
attributes(wide.df)
attr(wide.df, "reshapeWide") <- NULL
attributes(wide.df)
reshape(wide.df, direction="long")
tall.df <- reshape(
wide.df,
idvar=c("Subject"),
varying=3:8,
direction="long"
)
###############################################################################
# Another Example
###############################################################################
dat <- data.frame(ID=1:5, gender=sample(c('Male', 'Female'), 5, replace=T),
csmart=rnorm(5), cfriendly=rnorm(5),
psmart=rnorm(5), pfriendly=rnorm(5),
tsmart=rnorm(5), tfriendly=rnorm(5)
)
dat.tall <- reshape(dat,
idvar="ID",
#timevar="character",
#times=c("cheer", "prof", "teacher"),
varying=list(
grep("smart", names(dat), value=T),
grep("friendly", names(dat), value=T)
),
#v.names = c("smart","friendly"),
direction="long"
)
# A safer way (I think)
dat2 <- dat
names(dat2) <- c('ID', 'gender',
'smart.cheer', 'friendly.cheer',
'smart.prof', 'friendly.prof',
'smart.teacher', 'friendly.teacher'
)
dat2.tall <- reshape(dat2,
idvar="ID",
timevar="character",
varying=3:8,
direction="long"
)
# Clean up
row.names(dat.tall) <- NULL
row.names(dat2.tall) <- NULL
###############################################################################
# Using regular expressions to rename dataframes
###############################################################################
names(dat)
gsub('^c(smart|friendly)$', '\\1.cheer', names(dat))
gsub('^p(smart|friendly)$', '\\1.prof', names(dat))
gsub('^t(smart|friendly)$', '\\1.teacher', names(dat))
###############################################################################
# Conditional basics: if
###############################################################################
if(TRUE){
cat('hi', fill=T)
}
if(FALSE){
cat('hi', fill=T)
}
if(FALSE){
answer <- 'hello'
}else{
answer <- 'goodbye'
}
answer
score <- 9
sex <- 'M'
if(score > 5 & sex == 'F'){
result <- 1
}else{
if(score > 5 & sex == 'M'){
result <- 2
}else{
reult <- NA
}
}
if(score > 5){
if(sex == 'F'){
result <- 1
}else{
result <- 2
}
}else{
result <- NA
}
# If you're looking to simply fill a variable, you might prefer:
(answer <- ifelse(TRUE, 'hello', 'goodbye'))
(answer <- ifelse(FALSE, 'hello', 'goodbye'))
df <- data.frame(id=1:10, sex=sample(c('M','F'), 10, replace=T))
df$isMale <- ifelse(df$sex=='M', T, F)
df$isMale2 <- df$sex == 'M'
|
/Lesson13/Lesson13.R
|
no_license
|
JeffSpies/funstats2011
|
R
| false
| false
| 4,835
|
r
|
###############################################################################
# Title: PSYC 7765 - Lesson 13
# Author: Jeffrey R. Spies (jspies@virginia.edu)
# History: date()
# Fri Nov 18 13:02:38 2011 - Created the file - JRS
###############################################################################
rm(list=ls())
setwd("~/Projects/funstats2011/Lesson12")
require(plyr)
require(ggplot2)
###############################################################################
# Binary variables: transitions
###############################################################################
isHappy <- c(T, F, T, T, T, T, F, F, F, F, F)
transTable <- function(v){
table(v[1:length(v)-1], v[2:length(v)])
}
transTable(isHappy)
prop.table(transTable(isHappy), 1)
transDf <- function(from, fromNames=NULL){
tab <- transTable(from)
data.frame(counts=as.vector(tab), props=as.vector(prop.table(tab,1)))
}
transDf(isHappy)
transDf <- function(from, fromNames=NULL){
tab <- transTable(from)
data.frame(counts=as.vector(tab), props=as.vector(prop.table(tab,1)))
}
levels(as.factor(isHappy))
transDf(isHappy, c('sad', 'happy'))
# count from to
# 4 F F
# 1 F T
transDf(isHappy)
aborc <- c('a', 'a', 'a', 'b', 'c')
transDf(aborc, c('a', 'b', 'c'))
###############################################################################
# Merging datasets
###############################################################################
df <- data.frame(
Subject=rep(1:5, each=3),
Gender=rep(sample(c('M', 'F'), 5, replace=T), each=3),
Time=1:3,
Depression=rnorm(15),
Anxiety=rnorm(15)
)
df.age <- data.frame(id=1:20, Age=runif(20, 20, 30))
merge(df, df.age, by.x="Subject", by.y="id")
###############################################################################
# Wide to Tall
###############################################################################
df <- data.frame(
Subject=rep(1:5, each=3),
Gender=rep(sample(c('M', 'F'), 5, replace=T), each=3),
Time=1:3,
Depression=rnorm(15),
Anxiety=rnorm(15),
stringsAsFactors = F
)
# Graph Time by Anxiety
df
wide.df <- reshape(df,
idvar=c("Subject", "Gender"),
timevar="Time",
direction="wide"
)
reshape(wide.df, direction="long")
attributes(wide.df)
attr(wide.df, "reshapeWide") <- NULL
attributes(wide.df)
reshape(wide.df, direction="long")
tall.df <- reshape(
wide.df,
idvar=c("Subject"),
varying=3:8,
direction="long"
)
###############################################################################
# Another Example
###############################################################################
dat <- data.frame(ID=1:5, gender=sample(c('Male', 'Female'), 5, replace=T),
csmart=rnorm(5), cfriendly=rnorm(5),
psmart=rnorm(5), pfriendly=rnorm(5),
tsmart=rnorm(5), tfriendly=rnorm(5)
)
dat.tall <- reshape(dat,
idvar="ID",
#timevar="character",
#times=c("cheer", "prof", "teacher"),
varying=list(
grep("smart", names(dat), value=T),
grep("friendly", names(dat), value=T)
),
#v.names = c("smart","friendly"),
direction="long"
)
# A safer way (I think)
dat2 <- dat
names(dat2) <- c('ID', 'gender',
'smart.cheer', 'friendly.cheer',
'smart.prof', 'friendly.prof',
'smart.teacher', 'friendly.teacher'
)
dat2.tall <- reshape(dat2,
idvar="ID",
timevar="character",
varying=3:8,
direction="long"
)
# Clean up
row.names(dat.tall) <- NULL
row.names(dat2.tall) <- NULL
###############################################################################
# Using regular expressions to rename dataframes
###############################################################################
names(dat)
gsub('^c(smart|friendly)$', '\\1.cheer', names(dat))
gsub('^p(smart|friendly)$', '\\1.prof', names(dat))
gsub('^t(smart|friendly)$', '\\1.teacher', names(dat))
###############################################################################
# Conditional basics: if
###############################################################################
if(TRUE){
cat('hi', fill=T)
}
if(FALSE){
cat('hi', fill=T)
}
if(FALSE){
answer <- 'hello'
}else{
answer <- 'goodbye'
}
answer
score <- 9
sex <- 'M'
if(score > 5 & sex == 'F'){
result <- 1
}else{
if(score > 5 & sex == 'M'){
result <- 2
}else{
reult <- NA
}
}
if(score > 5){
if(sex == 'F'){
result <- 1
}else{
result <- 2
}
}else{
result <- NA
}
# If you're looking to simply fill a variable, you might prefer:
(answer <- ifelse(TRUE, 'hello', 'goodbye'))
(answer <- ifelse(FALSE, 'hello', 'goodbye'))
df <- data.frame(id=1:10, sex=sample(c('M','F'), 10, replace=T))
df$isMale <- ifelse(df$sex=='M', T, F)
df$isMale2 <- df$sex == 'M'
|
# Multispecies, spatially implicit IPM
# This version makes it possible to assume "no overlap"
# for intraspecific competition only or intra- and interspecific competition
#This version allows you to "grow" each species in isolation or in mixture to obtain
#the effect of interspecific competition on growth -- basically the difference in
#FROM PETER: Basically, we simulate a species' intrinsic growth rate (geometric mean across the year specific rates)
#and then simulate the species' invasion growth rate (growth rate in the presence of competitors at their
#equilibrium abundances). The bigger the difference, the more sensitive a species is to competition.
#If there is no difference, then interspecific competition has zero effect. You can also get facilitation
#(invasion growth rate > intrinsic growth rate).
#We do this in a fluctuating environment, so these intrinsic growth rates are different than in the constant environment
#Sensitivity to competition: S = 1 - (r[invasion]-r[alone])
setwd(dir = "../")
invade=TRUE #FALSE for intrinsic growth rate; TRUE for invasion growth rate
ifelse(invade==TRUE,
outfile <- "./simulations/invasionGrowthRates_YrFluct_PairWise.csv",
outfile <- "./simulations/intrinsicGrowthRates_YrFluct.csv")
# ATT 9/26/14
A=10000 #Area of 100cm x 100cm quadrat
tlimit=1000 ## number of years to simulate
burn.in=100 # years to cut before calculations
sppList=c("ARTR","HECO","POSE","PSSP")
bigM=c(75,75,50,50) #Set matrix dimension for each species
maxSize=c(3000,202,260,225) # in cm^2: PSSP=225 HECO=202 POSE=260 ARTR=3000 # minSize=0.2 cm^2
Nyrs=22
doGroup=NA # NA for spatial avg., values 1-6 for a specific group
constant=F # constant environment
NoOverlap.Inter=T # no overlap of heterospecifics
compScale=F # not well implemented, but for rescaling competition coefficients
maxR <- matrix(NA, nrow = length(sppList), ncol=length(sppList)-1)
for(jjjj in 1:length(sppList)){
#####################################
fixSpp=sppList[jjjj] ##need to change if for other species
fixCov=0.0001 # in % cover
otherSpp=sppList[-jjjj]
#####################################
for(kkkk in 1:length(otherSpp)){
sppListSub <- c(fixSpp, otherSpp[kkkk])
#============================================================
# (I) LOAD VITAL RATE PARAMETERS & FUNCTIONS
#============================================================
Nspp=length(sppList)
NsppSub=length(sppListSub)
# set up survival parameters and function
source("./survival/import2ipm_noOverlap.r")
# set up growth parameters and function
source("./growth/import2ipm_noOverlap.r")
# set up recruitment parameters and function
source("./recruitment/import2ipm.r")
# get stable size distribution for fixed species
infile=paste(fixSpp,"_stable_size.csv",sep="")
sizeD=read.csv(infile)
# model spatial group variation (or not)
if(!is.na(doGroup)){
Spars$intcpt=Spars$intcpt+Spars$intcpt.gr[doGroup,]
Gpars$intcpt=Gpars$intcpt+Gpars$intcpt.gr[doGroup,]
Rpars$intcpt.yr=Rpars$intcpt.yr+matrix(Rpars$intcpt.gr[doGroup,],Nyrs,Nspp,byrow=T)
}
# PERTURB PARAMETERS -------------------------------------
if(constant==T){
#turn off random year effects
Rpars$intcpt.yr=matrix(Rpars$intcpt.mu,Nyrs,Nspp,byrow=T)
Gpars$intcpt.yr[]=0;Gpars$slope.yr[]=0
Spars$intcpt.yr[]=0;Spars$slope.yr[]=0
}
if(compScale==T){
tmp <- matrix(rep(-0.1, length(Gpars$nb)), nrow = 4)
for(i in 1:Nspp){
tmp[i,i] <- Gpars$nb[i,i]
}
Gpars$nb <- tmp
}
#============================================================================================#
# (II) Simulation length, Matrix size and initial vectors
#============================================================================================#
v=v.r=b.r=expv=Cr=WmatG=WmatS=list(4)
h=r.L=r.U=Ctot=numeric(4)
for(i in 1:Nspp){
# minimum (0.9*minimum size from data) and maximum sizes (1.1*maximum size from data)
L=log(0.2)
U=log(maxSize[i])*1.1
# boundary points b and mesh points y. Note: b chops up the size interval (L-U) into bigM-equal-sized portions.
b = L+c(0:bigM[i])*(U-L)/bigM[i]
# v calculates the middle of each n-equal-sized portion.
v[[i]] = 0.5*(b[1:bigM[i]]+b[2:(bigM[i]+1)])
# step size for midpoint rule. (see equations 4 and 5 in Ellner and Rees (2006) Am Nat.)
h[i] = v[[i]][2]-v[[i]][1]
# variables for Wr approximation
b.r[[i]]=sqrt(exp(b)/pi)
v.r[[i]]=sqrt(exp(v[[i]])/pi)
expv[[i]]=exp(v[[i]])
r.L[i] = sqrt(exp(L)/pi)
r.U[i] = sqrt(exp(U)/pi)
WmatG[[i]]=matrix(NA,length(v.r[[i]]),Nspp) # storage of size-specific W values for each focal species
WmatS[[i]]=matrix(NA,length(v.r[[i]]),Nspp)
} # next species
tmp=range(v.r)
size.range=seq(tmp[1],tmp[2],length=50) # range across all possible sizes
#============================================================================================#
# (III) Utility functions
#============================================================================================#
# load the necessary libraries
library(boot)
library(mvtnorm)
library(msm)
library(statmod)
## combined kernel
make.K.values=function(v,u,muWG,muWS, #state variables
Rpars,rpa,Gpars,Spars,doYear,doSpp){ #growth arguments
f(v,u,Rpars,rpa,doSpp)+S(u,muWS,Spars,doYear,doSpp)*G(v,u,muWG,Gpars,doYear,doSpp)
}
# Function to make iteration matrix based only on mean crowding
make.K.matrix=function(v,muWG,muWS,Rpars,rpa,Gpars,Spars,doYear,doSpp) {
muWG=expandW(v,v,muWG)
muWS=expandW(v,v,muWS)
K.matrix=outer(v,v,make.K.values,muWG,muWS,Rpars,rpa,Gpars,Spars,doYear,doSpp)
return(h[doSpp]*K.matrix)
}
# Function to format the W matrix for the outer product
expandW=function(v,u,W){
if(dim(W)[1]!=length(u)) stop("Check size of W")
Nspp=dim(W)[2]
W=as.vector(W)
W=matrix(W,length(W),ncol=length(v))
W=as.vector(t(W))
W=matrix(W,nrow=length(u)*length(v),ncol=Nspp)
return(W)
}
# Function to calculate size-dependent crowding, assuming no overlap
wrijG=function(r,i,j){
return(2*pi*integrate(function(z) z*exp(-alphaG[i,j]*(z^2))*Cr[[j]](z-r),r,r+r.U[j])$value+
pi*Ctot[j]*exp(-alphaG[i,j]*((r+r.U[j])^2))/alphaG[i,j]);
}
WrijG=Vectorize(wrijG,vectorize.args="r")
wrijS=function(r,i,j){
return(2*pi*integrate(function(z) z*exp(-alphaS[i,j]*(z^2))*Cr[[j]](z-r),r,r+r.U[j])$value+
pi*Ctot[j]*exp(-alphaS[i,j]*((r+r.U[j])^2))/alphaS[i,j]);
}
WrijS=Vectorize(wrijS,vectorize.args="r")
# Function to sum total cover of each species
sumCover=function(v,nt,h,A){
out=lapply(1:Nspp,function(i,v,nt,h,A) h[i]*sum(nt[[i]]*exp(v[[i]]))/A,v=v,nt=nt,h=h,A=A)
return(unlist(out))
}
# Function to sum total density of each species
sumN=function(nt,h){
out=lapply(1:Nspp,function(i,nt,h) h[i]*sum(nt[[i]]),nt=nt,h=h)
return(unlist(out))
}
# Function to calculate size variance of each species
varN=function(v,nt,h,Xbar,N){
out=lapply(1:Nspp,function(i,v,nt,h,Xbar,N) h[i]*sum((exp(v[[i]]-Xbar[i])^2)*nt[[i]])/N[i],v=v,nt=nt,h=h,Xbar=Xbar,N=N)
return(unlist(out))
}
# Function to do an image plot of a matrix in the usual orientation, A(1,1) at top left
matrix.image=function(x,y,A,col=topo.colors(100),...) {
nx=length(x); ny=length(y);
x1=c(1.5*x[1]-0.5*x[2],1.5*x[nx]-0.5*x[nx-1]);
y1=c(1.5*y[1]-0.5*y[2],1.5*y[ny]-0.5*y[ny-1]);
image(list(x=x,y=y,z=t(A)),xlim=x1,ylim=rev(y1),col=col,bty="u",...);
}
#============================================================================================#
# (IV) Calculate the equilibrium areas.
#============================================================================================#
fixI=which(sppList==fixSpp)
ifelse(invade==TRUE, otherN <- 0.1, otherN <- 0)
noResI=which(sppList!=otherSpp[kkkk] & sppList!=fixSpp)
## initial population density vector
nt=v
for(i in 1:Nspp) nt[[i]][]=otherN
# set fix spp to stable size distribution
nt.fix=sizeD$freq
# initialize at fix cover value
tmp=fixCov*100/(h[fixI]*sum(nt.fix*exp(v[[fixI]])))
nt.fix=nt.fix*tmp
nt[[fixI]]=nt.fix
nt[[noResI[1]]]=nt[[noResI[1]]]*0
nt[[noResI[2]]]=nt[[noResI[2]]]*0
new.nt=nt
# set up matrix to record cover
covSave=matrix(NA,0,(2+2*Nspp))
colnames(covSave)=c("time","yrParams",paste(sppList,".t0",sep=""),paste(sppList,".t1",sep=""))
covSave=rbind(covSave,c(1,NA,sumCover(v,nt,h,A),rep(NA,Nspp)) )
# initial densities
Nsave=matrix(NA,tlimit,Nspp)
Nsave[1,]=sumN(nt,h)
yrSave=rep(NA,tlimit)
for (i in 2:(tlimit)){
#draw from observed year effects
allYrs=c(1:Nyrs)
doYear=sample(allYrs,1)
yrSave[i]=doYear
#get recruits per area
# cover=covSave[i-1,]; N=Nsave[i-1,]
# rpa=get.rpa(Rpars,cover,doYear)
cover=covSave[i-1,3:6]; N=Nsave[i-1,]
rpa=get.rpa(Rpars,cover,doYear)
#calculate size-specific crowding
alphaG=Gpars$alpha
alphaS=Spars$alpha
if(NoOverlap.Inter==F){#T: heterospecific genets cannot overlap; F: overlap allowed
for(ii in 1:Nspp){
# first do all overlap W's
Xbar=cover*A/N # multiply by A to get cover back in cm^2
varX=varN(v,nt,h,Xbar,N)
muWG = pi*Xbar*N/(A*alphaG[ii,])
muWS = pi*Xbar*N/(A*alphaS[ii,])
muWG[is.na(muWG)]=0
muWS[is.na(muWS)]=0
WmatG[[ii]]=matrix(muWG,nrow=length(v[[ii]]),ncol=Nspp,byrow=T)
WmatS[[ii]]=matrix(muWS,nrow=length(v[[ii]]),ncol=Nspp,byrow=T)
# now do conspecific no overlap W
Ctot[ii]=h[ii]*sum(expv[[ii]]*nt[[ii]])
Cr[[ii]]=splinefun(b.r[[ii]],h[ii]*c(0,cumsum(expv[[ii]]*nt[[ii]])),method="natural")
WmatG[[ii]][,ii]=WrijG(v.r[[ii]],ii,ii)/A
WmatS[[ii]][,ii]=WrijS(v.r[[ii]],ii,ii)/A
}
}else{
for(ii in 1:Nspp){
Ctot[ii]=h[ii]*sum(expv[[ii]]*nt[[ii]])
Cr[[ii]]=splinefun(b.r[[ii]],h[ii]*c(0,cumsum(expv[[ii]]*nt[[ii]])),method="natural")
}
for(jj in 1:Nspp){
WfunG=splinefun(size.range,WrijG(size.range,jj,jj))
WfunS=splinefun(size.range,WrijS(size.range,jj,jj))
for(ii in 1:Nspp) {
WmatG[[ii]][,jj]=WfunG(v.r[[ii]])/A
WmatS[[ii]][,jj]=WfunS(v.r[[ii]])/A
}
}
} # end NoOverlap if
for(doSpp in 1:Nspp){
if(cover[doSpp]>0){
# make kernels and project
K.matrix=make.K.matrix(v[[doSpp]],WmatG[[doSpp]],WmatS[[doSpp]],Rpars,rpa,Gpars,Spars,doYear,doSpp)
new.nt[[doSpp]]=K.matrix%*%nt[[doSpp]]
# sizeSave[[doSpp]][,i]=new.nt[[doSpp]]/sum(new.nt[[doSpp]])
}
} # next species
tmp=c(i,doYear,sumCover(v,nt,h,A),sumCover(v,new.nt,h,A))
covSave=rbind(covSave,tmp) # store the cover as cm^2/cm^2
Nsave[i,]=sumN(nt,h)
nt=new.nt
# return focal spp to fix cover value
tmp=fixCov*100/(h[fixI]*sum(nt[[fixI]]*exp(v[[fixI]])))
nt[[fixI]]=nt[[fixI]]*tmp
#return all non-pairwise spp to zero cover value
nt[[noResI]]=nt[[noResI]]*0
# return all other species to zero cover value
# if(invade==FALSE){
# tmp2 <- which(c(1:4) != fixI)
# nt[[tmp2[1]]] <- nt[[tmp2[1]]]*0
# nt[[tmp2[2]]] <- nt[[tmp2[2]]]*0
# nt[[tmp2[3]]] <- nt[[tmp2[3]]]*0
# }
print(paste(jjjj,kkkk,i));flush.console()
if(sum(is.na(nt))>0) browser()
} # next time step
#
## Figures ==============================================================
par(mfrow=c(1,2),tcl=-0.2,mgp=c(2,0.5,0))
myCol=c("forestgreen","darkred","blue","pink")
#cover
## here 3:6 may need change up to the species number
boxplot(as.data.frame(100*covSave[(burn.in+1):tlimit,3:6]),ylab="Cover (%)",names=sppList,col=myCol)
abline(h=0)
#density
boxplot(as.data.frame(Nsave[(burn.in+1):tlimit,]),ylab="Density",names=sppList,col=myCol)
abline(h=0)
#low density growth rate of focal species
tmp1 <- which(colnames(covSave)==paste(fixSpp, ".t0", sep=""))
tmp2 <- which(colnames(covSave)==paste(fixSpp, ".t1", sep=""))
pgrMean <- mean(log(covSave[burn.in:tlimit,tmp2]/covSave[burn.in:tlimit,tmp1]), na.rm=TRUE)
maxR[jjjj,kkkk] <- pgrMean
}#end otherspp loop
}#end fix spp look
out <- matrix(NA, 4, 4)
for(i in 1:nrow(maxR)){
out[i,i] <- 0
out[i,-i] <- maxR[i,]
}
output <- as.data.frame(out)
rownames(output) <- sppList
colnames(output) <- sppList
write.table(output, outfile, row.names=TRUE, sep=",")
|
/PopModels/idahoIPM/simulations/ipm_noOverlap_competitionEffects_PairWise.R
|
permissive
|
atredennick/Diversity_Stability
|
R
| false
| false
| 13,453
|
r
|
# Multispecies, spatially implicit IPM
# This version makes it possible to assume "no overlap"
# for intraspecific competition only or intra- and interspecific competition
#This version allows you to "grow" each species in isolation or in mixture to obtain
#the effect of interspecific competition on growth -- basically the difference in
#FROM PETER: Basically, we simulate a species' intrinsic growth rate (geometric mean across the year specific rates)
#and then simulate the species' invasion growth rate (growth rate in the presence of competitors at their
#equilibrium abundances). The bigger the difference, the more sensitive a species is to competition.
#If there is no difference, then interspecific competition has zero effect. You can also get facilitation
#(invasion growth rate > intrinsic growth rate).
#We do this in a fluctuating environment, so these intrinsic growth rates are different than in the constant environment
#Sensitivity to competition: S = 1 - (r[invasion]-r[alone])
setwd(dir = "../")
invade=TRUE #FALSE for intrinsic growth rate; TRUE for invasion growth rate
ifelse(invade==TRUE,
outfile <- "./simulations/invasionGrowthRates_YrFluct_PairWise.csv",
outfile <- "./simulations/intrinsicGrowthRates_YrFluct.csv")
# ATT 9/26/14
A=10000 #Area of 100cm x 100cm quadrat
tlimit=1000 ## number of years to simulate
burn.in=100 # years to cut before calculations
sppList=c("ARTR","HECO","POSE","PSSP")
bigM=c(75,75,50,50) #Set matrix dimension for each species
maxSize=c(3000,202,260,225) # in cm^2: PSSP=225 HECO=202 POSE=260 ARTR=3000 # minSize=0.2 cm^2
Nyrs=22
doGroup=NA # NA for spatial avg., values 1-6 for a specific group
constant=F # constant environment
NoOverlap.Inter=T # no overlap of heterospecifics
compScale=F # not well implemented, but for rescaling competition coefficients
maxR <- matrix(NA, nrow = length(sppList), ncol=length(sppList)-1)
for(jjjj in 1:length(sppList)){
#####################################
fixSpp=sppList[jjjj] ##need to change if for other species
fixCov=0.0001 # in % cover
otherSpp=sppList[-jjjj]
#####################################
for(kkkk in 1:length(otherSpp)){
sppListSub <- c(fixSpp, otherSpp[kkkk])
#============================================================
# (I) LOAD VITAL RATE PARAMETERS & FUNCTIONS
#============================================================
Nspp=length(sppList)
NsppSub=length(sppListSub)
# set up survival parameters and function
source("./survival/import2ipm_noOverlap.r")
# set up growth parameters and function
source("./growth/import2ipm_noOverlap.r")
# set up recruitment parameters and function
source("./recruitment/import2ipm.r")
# get stable size distribution for fixed species
infile=paste(fixSpp,"_stable_size.csv",sep="")
sizeD=read.csv(infile)
# model spatial group variation (or not)
if(!is.na(doGroup)){
Spars$intcpt=Spars$intcpt+Spars$intcpt.gr[doGroup,]
Gpars$intcpt=Gpars$intcpt+Gpars$intcpt.gr[doGroup,]
Rpars$intcpt.yr=Rpars$intcpt.yr+matrix(Rpars$intcpt.gr[doGroup,],Nyrs,Nspp,byrow=T)
}
# PERTURB PARAMETERS -------------------------------------
if(constant==T){
#turn off random year effects
Rpars$intcpt.yr=matrix(Rpars$intcpt.mu,Nyrs,Nspp,byrow=T)
Gpars$intcpt.yr[]=0;Gpars$slope.yr[]=0
Spars$intcpt.yr[]=0;Spars$slope.yr[]=0
}
if(compScale==T){
tmp <- matrix(rep(-0.1, length(Gpars$nb)), nrow = 4)
for(i in 1:Nspp){
tmp[i,i] <- Gpars$nb[i,i]
}
Gpars$nb <- tmp
}
#============================================================================================#
# (II) Simulation length, Matrix size and initial vectors
#============================================================================================#
v=v.r=b.r=expv=Cr=WmatG=WmatS=list(4)
h=r.L=r.U=Ctot=numeric(4)
for(i in 1:Nspp){
# minimum (0.9*minimum size from data) and maximum sizes (1.1*maximum size from data)
L=log(0.2)
U=log(maxSize[i])*1.1
# boundary points b and mesh points y. Note: b chops up the size interval (L-U) into bigM-equal-sized portions.
b = L+c(0:bigM[i])*(U-L)/bigM[i]
# v calculates the middle of each n-equal-sized portion.
v[[i]] = 0.5*(b[1:bigM[i]]+b[2:(bigM[i]+1)])
# step size for midpoint rule. (see equations 4 and 5 in Ellner and Rees (2006) Am Nat.)
h[i] = v[[i]][2]-v[[i]][1]
# variables for Wr approximation
b.r[[i]]=sqrt(exp(b)/pi)
v.r[[i]]=sqrt(exp(v[[i]])/pi)
expv[[i]]=exp(v[[i]])
r.L[i] = sqrt(exp(L)/pi)
r.U[i] = sqrt(exp(U)/pi)
WmatG[[i]]=matrix(NA,length(v.r[[i]]),Nspp) # storage of size-specific W values for each focal species
WmatS[[i]]=matrix(NA,length(v.r[[i]]),Nspp)
} # next species
tmp=range(v.r)
size.range=seq(tmp[1],tmp[2],length=50) # range across all possible sizes
#============================================================================================#
# (III) Utility functions
#============================================================================================#
# load the necessary libraries
library(boot)
library(mvtnorm)
library(msm)
library(statmod)
## combined kernel
make.K.values=function(v,u,muWG,muWS, #state variables
Rpars,rpa,Gpars,Spars,doYear,doSpp){ #growth arguments
f(v,u,Rpars,rpa,doSpp)+S(u,muWS,Spars,doYear,doSpp)*G(v,u,muWG,Gpars,doYear,doSpp)
}
# Function to make iteration matrix based only on mean crowding
make.K.matrix=function(v,muWG,muWS,Rpars,rpa,Gpars,Spars,doYear,doSpp) {
muWG=expandW(v,v,muWG)
muWS=expandW(v,v,muWS)
K.matrix=outer(v,v,make.K.values,muWG,muWS,Rpars,rpa,Gpars,Spars,doYear,doSpp)
return(h[doSpp]*K.matrix)
}
# Function to format the W matrix for the outer product
expandW=function(v,u,W){
if(dim(W)[1]!=length(u)) stop("Check size of W")
Nspp=dim(W)[2]
W=as.vector(W)
W=matrix(W,length(W),ncol=length(v))
W=as.vector(t(W))
W=matrix(W,nrow=length(u)*length(v),ncol=Nspp)
return(W)
}
# Function to calculate size-dependent crowding, assuming no overlap
wrijG=function(r,i,j){
return(2*pi*integrate(function(z) z*exp(-alphaG[i,j]*(z^2))*Cr[[j]](z-r),r,r+r.U[j])$value+
pi*Ctot[j]*exp(-alphaG[i,j]*((r+r.U[j])^2))/alphaG[i,j]);
}
WrijG=Vectorize(wrijG,vectorize.args="r")
wrijS=function(r,i,j){
return(2*pi*integrate(function(z) z*exp(-alphaS[i,j]*(z^2))*Cr[[j]](z-r),r,r+r.U[j])$value+
pi*Ctot[j]*exp(-alphaS[i,j]*((r+r.U[j])^2))/alphaS[i,j]);
}
WrijS=Vectorize(wrijS,vectorize.args="r")
# Function to sum total cover of each species
sumCover=function(v,nt,h,A){
out=lapply(1:Nspp,function(i,v,nt,h,A) h[i]*sum(nt[[i]]*exp(v[[i]]))/A,v=v,nt=nt,h=h,A=A)
return(unlist(out))
}
# Function to sum total density of each species
sumN=function(nt,h){
out=lapply(1:Nspp,function(i,nt,h) h[i]*sum(nt[[i]]),nt=nt,h=h)
return(unlist(out))
}
# Function to calculate size variance of each species
varN=function(v,nt,h,Xbar,N){
out=lapply(1:Nspp,function(i,v,nt,h,Xbar,N) h[i]*sum((exp(v[[i]]-Xbar[i])^2)*nt[[i]])/N[i],v=v,nt=nt,h=h,Xbar=Xbar,N=N)
return(unlist(out))
}
# Function to do an image plot of a matrix in the usual orientation, A(1,1) at top left
matrix.image=function(x,y,A,col=topo.colors(100),...) {
nx=length(x); ny=length(y);
x1=c(1.5*x[1]-0.5*x[2],1.5*x[nx]-0.5*x[nx-1]);
y1=c(1.5*y[1]-0.5*y[2],1.5*y[ny]-0.5*y[ny-1]);
image(list(x=x,y=y,z=t(A)),xlim=x1,ylim=rev(y1),col=col,bty="u",...);
}
#============================================================================================#
# (IV) Calculate the equilibrium areas.
#============================================================================================#
fixI=which(sppList==fixSpp)
ifelse(invade==TRUE, otherN <- 0.1, otherN <- 0)
noResI=which(sppList!=otherSpp[kkkk] & sppList!=fixSpp)
## initial population density vector
nt=v
for(i in 1:Nspp) nt[[i]][]=otherN
# set fix spp to stable size distribution
nt.fix=sizeD$freq
# initialize at fix cover value
tmp=fixCov*100/(h[fixI]*sum(nt.fix*exp(v[[fixI]])))
nt.fix=nt.fix*tmp
nt[[fixI]]=nt.fix
nt[[noResI[1]]]=nt[[noResI[1]]]*0
nt[[noResI[2]]]=nt[[noResI[2]]]*0
new.nt=nt
# set up matrix to record cover
covSave=matrix(NA,0,(2+2*Nspp))
colnames(covSave)=c("time","yrParams",paste(sppList,".t0",sep=""),paste(sppList,".t1",sep=""))
covSave=rbind(covSave,c(1,NA,sumCover(v,nt,h,A),rep(NA,Nspp)) )
# initial densities
Nsave=matrix(NA,tlimit,Nspp)
Nsave[1,]=sumN(nt,h)
yrSave=rep(NA,tlimit)
for (i in 2:(tlimit)){
#draw from observed year effects
allYrs=c(1:Nyrs)
doYear=sample(allYrs,1)
yrSave[i]=doYear
#get recruits per area
# cover=covSave[i-1,]; N=Nsave[i-1,]
# rpa=get.rpa(Rpars,cover,doYear)
cover=covSave[i-1,3:6]; N=Nsave[i-1,]
rpa=get.rpa(Rpars,cover,doYear)
#calculate size-specific crowding
alphaG=Gpars$alpha
alphaS=Spars$alpha
if(NoOverlap.Inter==F){#T: heterospecific genets cannot overlap; F: overlap allowed
for(ii in 1:Nspp){
# first do all overlap W's
Xbar=cover*A/N # multiply by A to get cover back in cm^2
varX=varN(v,nt,h,Xbar,N)
muWG = pi*Xbar*N/(A*alphaG[ii,])
muWS = pi*Xbar*N/(A*alphaS[ii,])
muWG[is.na(muWG)]=0
muWS[is.na(muWS)]=0
WmatG[[ii]]=matrix(muWG,nrow=length(v[[ii]]),ncol=Nspp,byrow=T)
WmatS[[ii]]=matrix(muWS,nrow=length(v[[ii]]),ncol=Nspp,byrow=T)
# now do conspecific no overlap W
Ctot[ii]=h[ii]*sum(expv[[ii]]*nt[[ii]])
Cr[[ii]]=splinefun(b.r[[ii]],h[ii]*c(0,cumsum(expv[[ii]]*nt[[ii]])),method="natural")
WmatG[[ii]][,ii]=WrijG(v.r[[ii]],ii,ii)/A
WmatS[[ii]][,ii]=WrijS(v.r[[ii]],ii,ii)/A
}
}else{
for(ii in 1:Nspp){
Ctot[ii]=h[ii]*sum(expv[[ii]]*nt[[ii]])
Cr[[ii]]=splinefun(b.r[[ii]],h[ii]*c(0,cumsum(expv[[ii]]*nt[[ii]])),method="natural")
}
for(jj in 1:Nspp){
WfunG=splinefun(size.range,WrijG(size.range,jj,jj))
WfunS=splinefun(size.range,WrijS(size.range,jj,jj))
for(ii in 1:Nspp) {
WmatG[[ii]][,jj]=WfunG(v.r[[ii]])/A
WmatS[[ii]][,jj]=WfunS(v.r[[ii]])/A
}
}
} # end NoOverlap if
for(doSpp in 1:Nspp){
if(cover[doSpp]>0){
# make kernels and project
K.matrix=make.K.matrix(v[[doSpp]],WmatG[[doSpp]],WmatS[[doSpp]],Rpars,rpa,Gpars,Spars,doYear,doSpp)
new.nt[[doSpp]]=K.matrix%*%nt[[doSpp]]
# sizeSave[[doSpp]][,i]=new.nt[[doSpp]]/sum(new.nt[[doSpp]])
}
} # next species
tmp=c(i,doYear,sumCover(v,nt,h,A),sumCover(v,new.nt,h,A))
covSave=rbind(covSave,tmp) # store the cover as cm^2/cm^2
Nsave[i,]=sumN(nt,h)
nt=new.nt
# return focal spp to fix cover value
tmp=fixCov*100/(h[fixI]*sum(nt[[fixI]]*exp(v[[fixI]])))
nt[[fixI]]=nt[[fixI]]*tmp
#return all non-pairwise spp to zero cover value
nt[[noResI]]=nt[[noResI]]*0
# return all other species to zero cover value
# if(invade==FALSE){
# tmp2 <- which(c(1:4) != fixI)
# nt[[tmp2[1]]] <- nt[[tmp2[1]]]*0
# nt[[tmp2[2]]] <- nt[[tmp2[2]]]*0
# nt[[tmp2[3]]] <- nt[[tmp2[3]]]*0
# }
print(paste(jjjj,kkkk,i));flush.console()
if(sum(is.na(nt))>0) browser()
} # next time step
#
## Figures ==============================================================
par(mfrow=c(1,2),tcl=-0.2,mgp=c(2,0.5,0))
myCol=c("forestgreen","darkred","blue","pink")
#cover
## here 3:6 may need change up to the species number
boxplot(as.data.frame(100*covSave[(burn.in+1):tlimit,3:6]),ylab="Cover (%)",names=sppList,col=myCol)
abline(h=0)
#density
boxplot(as.data.frame(Nsave[(burn.in+1):tlimit,]),ylab="Density",names=sppList,col=myCol)
abline(h=0)
#low density growth rate of focal species
tmp1 <- which(colnames(covSave)==paste(fixSpp, ".t0", sep=""))
tmp2 <- which(colnames(covSave)==paste(fixSpp, ".t1", sep=""))
pgrMean <- mean(log(covSave[burn.in:tlimit,tmp2]/covSave[burn.in:tlimit,tmp1]), na.rm=TRUE)
maxR[jjjj,kkkk] <- pgrMean
}#end otherspp loop
}#end fix spp look
out <- matrix(NA, 4, 4)
for(i in 1:nrow(maxR)){
out[i,i] <- 0
out[i,-i] <- maxR[i,]
}
output <- as.data.frame(out)
rownames(output) <- sppList
colnames(output) <- sppList
write.table(output, outfile, row.names=TRUE, sep=",")
|
testlist <- list(x = c(1.29849269214681e+219, 1.3017676113098e+219, 1.80107573886382e-226, NaN, NaN, 6.91613364787677e-223, 1.29849269167491e+219, 1.80122446398248e-226, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(netrankr:::checkPairs,testlist)
str(result)
|
/netrankr/inst/testfiles/checkPairs/libFuzzer_checkPairs/checkPairs_valgrind_files/1612746655-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 326
|
r
|
testlist <- list(x = c(1.29849269214681e+219, 1.3017676113098e+219, 1.80107573886382e-226, NaN, NaN, 6.91613364787677e-223, 1.29849269167491e+219, 1.80122446398248e-226, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(netrankr:::checkPairs,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Package.1_functions.R
\name{find.limits}
\alias{find.limits}
\title{Find scales for plots}
\usage{
find.limits(x,y)
}
\arguments{
\item{x}{Values on the x axis}
\item{y}{Values on the y axis}
}
\value{
4 numerics with max and min value for x and y
}
\description{
Return the limits of two vectors centered on 0 and with a 10 % margin.
}
\examples{
find.limits()
}
\keyword{ggplot}
\keyword{scales,}
|
/man/find.limits.Rd
|
no_license
|
sdechaumet/SDjoygret
|
R
| false
| true
| 478
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Package.1_functions.R
\name{find.limits}
\alias{find.limits}
\title{Find scales for plots}
\usage{
find.limits(x,y)
}
\arguments{
\item{x}{Values on the x axis}
\item{y}{Values on the y axis}
}
\value{
4 numerics with max and min value for x and y
}
\description{
Return the limits of two vectors centered on 0 and with a 10 % margin.
}
\examples{
find.limits()
}
\keyword{ggplot}
\keyword{scales,}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{macd_df}
\alias{macd_df}
\title{Table with one column indicator dataset}
\format{
A dataframe with one column
\describe{
\item{CADCHF}{Indicator values of the asset}
}
}
\usage{
macd_df
}
\description{
Table with one column indicator dataset
}
\keyword{datasets}
|
/man/macd_df.Rd
|
permissive
|
vzhomeexperiments/lazytrade
|
R
| false
| true
| 373
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{macd_df}
\alias{macd_df}
\title{Table with one column indicator dataset}
\format{
A dataframe with one column
\describe{
\item{CADCHF}{Indicator values of the asset}
}
}
\usage{
macd_df
}
\description{
Table with one column indicator dataset
}
\keyword{datasets}
|
## server.R
## Author: David Shumway
library(shiny)
library(DT)
library(leaflet)
library(RColorBrewer)
library(scales)
library(lattice)
shinyServer(function(input, output, session) { #, session
# about
#~ output$textAbout <- renderText({
#~ 'Initial template used: (https://shiny.rstudio.com/gallery/superzip-example.html).<br>Author: David Shumway<br>Original data: (https://www.epa.gov/egrid/download-data).'
#~ })
# ill.
#~ West_Bounding_Coordinate: -91.4244
#~ East_Bounding_Coordinate: -87.3840
#~ North_Bounding_Coordinate: 42.4951
#~ South_Bounding_Coordinate: 36.9540
#~ output$map <- renderLeaflet({})
# part 2, comparing two states
#map = 'map1', data = data3)
# 1 inputs
observeEvent(input$m1Range, {
filter(session, input, 1)
}, ignoreInit=T)
observeEvent(input$m1BaseMap, {
filter(session, input, 1)
}, ignoreInit=T)
observeEvent(input$m1Country, {
redrawMap1()
filter(session, input, 1)
ll = firstLL(input$m1Country)
leafletProxy('map1') %>%
flyTo(ll[[2]], ll[[1]], zoom = 3)
}, ignoreInit=T)
observeEvent(input$m1Link, {
if (!rv$linked) {
rv$linked = TRUE
} else {
rv$linked = FALSE
}
filter(session, input, 1)
}, ignoreInit=T)
observeEvent(input$m1Source, {
# individual cb
filter(session, input, 1)
}, ignoreInit=T);
observeEvent(input$m1All, {
updateCheckboxGroupInput(session, 'm1Source', 'Source:',
choices = energyList, selected = unlist(energyList)
)
filter(session, input, 1)
}, ignoreInit=T)
observeEvent(input$m1Ren, {
updateCheckboxGroupInput(session, 'm1Source', 'Source:',
choices = energyList, selected = unlist(energyListRen)
)
filter(session, input, 1)
}, ignoreInit=T)
observeEvent(input$m1NonRen, {
updateCheckboxGroupInput(session, 'm1Source', 'Source:',
choices = energyList, selected = unlist(energyListNonRen)
)
filter(session, input, 1)
}, ignoreInit=T)
observeEvent(input$m1Reset, {
updateCheckboxGroupInput(session, 'm1Source', 'Source:',
choices = energyList, selected = unlist(energyList)
)
updateSelectInput(session, 'm1Country', 'Continent:',
selected = 'Americas')
updateSelectInput(session, 'm1BaseMap', 'Map Type:',
selected = 'OpenStreetMap')
updateSliderInput(session, 'm1Range', 'GW Range:',
min = 0, max = 23, step = 1, value = c(0, 23))
redrawMap1()
}, ignoreInit=T)
# 2 input
observeEvent(input$m2Range, {
filter(session, input, 2)
}, ignoreInit=T)
observeEvent(input$m2BaseMap, {
filter(session, input, 2)
}, ignoreInit=T)
observeEvent(input$m2Country, {
redrawMap2()
filter(session, input, 2)
ll = firstLL(input$m2Country)
leafletProxy('map2') %>%
flyTo(ll[[2]], ll[[1]], zoom = 3)
}, ignoreInit=T)
observeEvent(input$m2Link, {
if (!rv$linked) {
rv$linked = TRUE
} else {
rv$linked = FALSE
}
filter(session, input, 2)
}, ignoreInit=T)
observeEvent(input$m2Source, {
# individual cb
filter(session, input, 2)
}, ignoreInit=T)
observeEvent(input$m2All, {
updateCheckboxGroupInput(session, 'm2Source', 'Source:',
choices = energyList, selected = unlist(energyList)
)
filter(session, input, 2)
}, ignoreInit=T)
observeEvent(input$m2Ren, {
updateCheckboxGroupInput(session, 'm2Source', 'Source:',
choices = energyList, selected = unlist(energyListRen)
)
filter(session, input, 2)
}, ignoreInit=T)
observeEvent(input$m2NonRen, {
updateCheckboxGroupInput(session, 'm2Source', 'Source:',
choices = energyList, selected = unlist(energyListNonRen)
)
filter(session, input, 2)
}, ignoreInit=T)
observeEvent(input$m2Reset, {
updateCheckboxGroupInput(session, 'm2Source', 'Source:',
choices = energyList, selected = unlist(energyList)
)
updateSelectInput(session, 'm2Country', 'Continent:',
selected = 'Asia')
updateSelectInput(session, 'm2BaseMap', 'Map Type:',
selected = 'OpenStreetMap')
updateSliderInput(session, 'm2Range', 'GW Range:',
min = 0, max = 23, step = 1, value = c(0, 23))
redrawMap2()
}, ignoreInit=T)
# p2, maps 1/2
# maps require redraw altering dataset year
# Apparently there's a magical "map" parameter that's getting passed
# here. Without it, leaflet throws a fit.
addLegendCustom <- function(map, opacity = 0.5) {
labels = c('<500MW', '<2000MW', '<4000MW', '<6000MW', '>6000MW')# "0.5 mil, 6 mil, 12 mil"
sizes = c(4, 6, 12, 18, 22)
# https://stackoverflow.com/questions/37446283/creating-legend-with-circles-leaflet-r
colors <- c('blue')
colorAdditions <- paste0(colors, '; width:', sizes, 'px; height:',
sizes, 'px')
labelAdditions <- paste0('<div style="display: inline-block;height: ',
sizes, 'px;margin-top: 4px;line-height: ', sizes, 'px;">', labels,
'</div>')
addLegend(map, colors = colorAdditions, labels = labelAdditions,
opacity = opacity) #map,
}
output$map1 <- renderLeaflet({
leaflet(subset(data, country_long == 'Americas')) %>%
addProviderTiles(input$m1BaseMap) %>%
addCircleMarkers(
radius = ~frad(capacity_mw),
fillColor = ~fcol(primary_fuel),
color = 'black',
stroke = TRUE, fillOpacity = 0.5, weight = 1,
popup = ~as.character(Popup),
) %>%
addLegend(position = 'topright', colors = xc, labels = xl, opacity = 1) %>%
addLegendCustom(opacity = 0.6)
})
output$map2 <- renderLeaflet({
leaflet(subset(data, country_long == 'Asia')) %>%
addProviderTiles(input$m2BaseMap) %>%
addCircleMarkers(
radius = ~frad(capacity_mw),
fillColor = ~fcol(primary_fuel),
color = 'black',
stroke = TRUE, fillOpacity = 0.5, weight = 1,
popup = ~as.character(Popup),
) %>%
addLegend(colors = xc, labels = xl, opacity = 1) %>%
addLegendCustom(opacity = 0.6)
})
redrawMap1 <- reactive({
output$map1 <- renderLeaflet({
leaflet(rv$d1) %>%
addProviderTiles(input$m1BaseMap) %>%
addCircleMarkers(
radius = ~frad(capacity_mw),
fillColor = ~fcol(primary_fuel),
color = 'black',
stroke = TRUE, fillOpacity = 0.5, weight = 1,
popup = ~as.character(Popup),
) %>%
addLegend(colors = xc, labels = xl, opacity = 1) %>%
addLegendCustom(opacity = 0.6)
})
})
redrawMap2 <- reactive({
output$map2 <- renderLeaflet({
leaflet(rv$d2) %>%
addProviderTiles(input$m2BaseMap) %>%
addCircleMarkers(
radius = ~frad(capacity_mw),
fillColor = ~fcol(primary_fuel),
color = 'black',
stroke = TRUE, fillOpacity = 0.5, weight = 1,
popup = ~as.character(Popup),
) %>%
addLegend(colors = xc, labels = xl, opacity = 1) %>%
addLegendCustom(opacity = 0.6)
})
})
observeEvent(input$goto, {
if (input$goto$side == 1) {
leafletProxy('map1') %>%
flyTo(lat=input$goto$lat, lng=input$goto$lon, zoom = 6)
} else {
leafletProxy('map2') %>%
flyTo(lat=input$goto$lat, lng=input$goto$lon, zoom = 6)
}
}, ignoreInit=T)
output$datatbl <- DT::renderDataTable({
df <- data %>%
mutate(Action = paste(
'<a class="go-map" href="" data-lat="', latitude,
'" data-lon="', longitude,
'" data-side="1"><i class="fa fa-crosshairs"></i></a>',
'<a class="go-map" href="" data-lat="', latitude,
'" data-lon="', longitude,
'" data-side="2"><i class="fa fa-crosshairs"></i></a>',
sep = '')
)
action <- DT::dataTableAjax(session, df, outputId = 'datatbl')
DT::datatable(df, options = list(ajax = list(url = action)), escape = FALSE)
})
})
|
/projectx/server.R
|
no_license
|
davidshumway/cs424
|
R
| false
| false
| 7,878
|
r
|
## server.R
## Author: David Shumway
library(shiny)
library(DT)
library(leaflet)
library(RColorBrewer)
library(scales)
library(lattice)
shinyServer(function(input, output, session) { #, session
# about
#~ output$textAbout <- renderText({
#~ 'Initial template used: (https://shiny.rstudio.com/gallery/superzip-example.html).<br>Author: David Shumway<br>Original data: (https://www.epa.gov/egrid/download-data).'
#~ })
# ill.
#~ West_Bounding_Coordinate: -91.4244
#~ East_Bounding_Coordinate: -87.3840
#~ North_Bounding_Coordinate: 42.4951
#~ South_Bounding_Coordinate: 36.9540
#~ output$map <- renderLeaflet({})
# part 2, comparing two states
#map = 'map1', data = data3)
# 1 inputs
observeEvent(input$m1Range, {
filter(session, input, 1)
}, ignoreInit=T)
observeEvent(input$m1BaseMap, {
filter(session, input, 1)
}, ignoreInit=T)
observeEvent(input$m1Country, {
redrawMap1()
filter(session, input, 1)
ll = firstLL(input$m1Country)
leafletProxy('map1') %>%
flyTo(ll[[2]], ll[[1]], zoom = 3)
}, ignoreInit=T)
observeEvent(input$m1Link, {
if (!rv$linked) {
rv$linked = TRUE
} else {
rv$linked = FALSE
}
filter(session, input, 1)
}, ignoreInit=T)
observeEvent(input$m1Source, {
# individual cb
filter(session, input, 1)
}, ignoreInit=T);
observeEvent(input$m1All, {
updateCheckboxGroupInput(session, 'm1Source', 'Source:',
choices = energyList, selected = unlist(energyList)
)
filter(session, input, 1)
}, ignoreInit=T)
observeEvent(input$m1Ren, {
updateCheckboxGroupInput(session, 'm1Source', 'Source:',
choices = energyList, selected = unlist(energyListRen)
)
filter(session, input, 1)
}, ignoreInit=T)
observeEvent(input$m1NonRen, {
updateCheckboxGroupInput(session, 'm1Source', 'Source:',
choices = energyList, selected = unlist(energyListNonRen)
)
filter(session, input, 1)
}, ignoreInit=T)
observeEvent(input$m1Reset, {
updateCheckboxGroupInput(session, 'm1Source', 'Source:',
choices = energyList, selected = unlist(energyList)
)
updateSelectInput(session, 'm1Country', 'Continent:',
selected = 'Americas')
updateSelectInput(session, 'm1BaseMap', 'Map Type:',
selected = 'OpenStreetMap')
updateSliderInput(session, 'm1Range', 'GW Range:',
min = 0, max = 23, step = 1, value = c(0, 23))
redrawMap1()
}, ignoreInit=T)
# 2 input
observeEvent(input$m2Range, {
filter(session, input, 2)
}, ignoreInit=T)
observeEvent(input$m2BaseMap, {
filter(session, input, 2)
}, ignoreInit=T)
observeEvent(input$m2Country, {
redrawMap2()
filter(session, input, 2)
ll = firstLL(input$m2Country)
leafletProxy('map2') %>%
flyTo(ll[[2]], ll[[1]], zoom = 3)
}, ignoreInit=T)
observeEvent(input$m2Link, {
if (!rv$linked) {
rv$linked = TRUE
} else {
rv$linked = FALSE
}
filter(session, input, 2)
}, ignoreInit=T)
observeEvent(input$m2Source, {
# individual cb
filter(session, input, 2)
}, ignoreInit=T)
observeEvent(input$m2All, {
updateCheckboxGroupInput(session, 'm2Source', 'Source:',
choices = energyList, selected = unlist(energyList)
)
filter(session, input, 2)
}, ignoreInit=T)
observeEvent(input$m2Ren, {
updateCheckboxGroupInput(session, 'm2Source', 'Source:',
choices = energyList, selected = unlist(energyListRen)
)
filter(session, input, 2)
}, ignoreInit=T)
observeEvent(input$m2NonRen, {
updateCheckboxGroupInput(session, 'm2Source', 'Source:',
choices = energyList, selected = unlist(energyListNonRen)
)
filter(session, input, 2)
}, ignoreInit=T)
observeEvent(input$m2Reset, {
updateCheckboxGroupInput(session, 'm2Source', 'Source:',
choices = energyList, selected = unlist(energyList)
)
updateSelectInput(session, 'm2Country', 'Continent:',
selected = 'Asia')
updateSelectInput(session, 'm2BaseMap', 'Map Type:',
selected = 'OpenStreetMap')
updateSliderInput(session, 'm2Range', 'GW Range:',
min = 0, max = 23, step = 1, value = c(0, 23))
redrawMap2()
}, ignoreInit=T)
# p2, maps 1/2
# maps require redraw altering dataset year
# Apparently there's a magical "map" parameter that's getting passed
# here. Without it, leaflet throws a fit.
addLegendCustom <- function(map, opacity = 0.5) {
labels = c('<500MW', '<2000MW', '<4000MW', '<6000MW', '>6000MW')# "0.5 mil, 6 mil, 12 mil"
sizes = c(4, 6, 12, 18, 22)
# https://stackoverflow.com/questions/37446283/creating-legend-with-circles-leaflet-r
colors <- c('blue')
colorAdditions <- paste0(colors, '; width:', sizes, 'px; height:',
sizes, 'px')
labelAdditions <- paste0('<div style="display: inline-block;height: ',
sizes, 'px;margin-top: 4px;line-height: ', sizes, 'px;">', labels,
'</div>')
addLegend(map, colors = colorAdditions, labels = labelAdditions,
opacity = opacity) #map,
}
output$map1 <- renderLeaflet({
leaflet(subset(data, country_long == 'Americas')) %>%
addProviderTiles(input$m1BaseMap) %>%
addCircleMarkers(
radius = ~frad(capacity_mw),
fillColor = ~fcol(primary_fuel),
color = 'black',
stroke = TRUE, fillOpacity = 0.5, weight = 1,
popup = ~as.character(Popup),
) %>%
addLegend(position = 'topright', colors = xc, labels = xl, opacity = 1) %>%
addLegendCustom(opacity = 0.6)
})
output$map2 <- renderLeaflet({
leaflet(subset(data, country_long == 'Asia')) %>%
addProviderTiles(input$m2BaseMap) %>%
addCircleMarkers(
radius = ~frad(capacity_mw),
fillColor = ~fcol(primary_fuel),
color = 'black',
stroke = TRUE, fillOpacity = 0.5, weight = 1,
popup = ~as.character(Popup),
) %>%
addLegend(colors = xc, labels = xl, opacity = 1) %>%
addLegendCustom(opacity = 0.6)
})
redrawMap1 <- reactive({
output$map1 <- renderLeaflet({
leaflet(rv$d1) %>%
addProviderTiles(input$m1BaseMap) %>%
addCircleMarkers(
radius = ~frad(capacity_mw),
fillColor = ~fcol(primary_fuel),
color = 'black',
stroke = TRUE, fillOpacity = 0.5, weight = 1,
popup = ~as.character(Popup),
) %>%
addLegend(colors = xc, labels = xl, opacity = 1) %>%
addLegendCustom(opacity = 0.6)
})
})
redrawMap2 <- reactive({
output$map2 <- renderLeaflet({
leaflet(rv$d2) %>%
addProviderTiles(input$m2BaseMap) %>%
addCircleMarkers(
radius = ~frad(capacity_mw),
fillColor = ~fcol(primary_fuel),
color = 'black',
stroke = TRUE, fillOpacity = 0.5, weight = 1,
popup = ~as.character(Popup),
) %>%
addLegend(colors = xc, labels = xl, opacity = 1) %>%
addLegendCustom(opacity = 0.6)
})
})
observeEvent(input$goto, {
if (input$goto$side == 1) {
leafletProxy('map1') %>%
flyTo(lat=input$goto$lat, lng=input$goto$lon, zoom = 6)
} else {
leafletProxy('map2') %>%
flyTo(lat=input$goto$lat, lng=input$goto$lon, zoom = 6)
}
}, ignoreInit=T)
output$datatbl <- DT::renderDataTable({
df <- data %>%
mutate(Action = paste(
'<a class="go-map" href="" data-lat="', latitude,
'" data-lon="', longitude,
'" data-side="1"><i class="fa fa-crosshairs"></i></a>',
'<a class="go-map" href="" data-lat="', latitude,
'" data-lon="', longitude,
'" data-side="2"><i class="fa fa-crosshairs"></i></a>',
sep = '')
)
action <- DT::dataTableAjax(session, df, outputId = 'datatbl')
DT::datatable(df, options = list(ajax = list(url = action)), escape = FALSE)
})
})
|
library(DBI)
library(stringr)
sql <- readLines("database.sql")
stmt <- str_c(sql, collapse = "\n") |>
str_trim() |>
str_split(";")
conn <- dbConnect(RSQLite::SQLite(), "meudb/meu.db")
lapply(stmt[[1]], function(q) {
if (q != "")
dbExecute(conn, q)
})
dbGetQuery(conn, "select * from sqlite_master")
dbDisconnect(conn)
|
/create_database.R
|
no_license
|
wilsonfreitas/construindo-um-banco-de-dados-financeiro
|
R
| false
| false
| 335
|
r
|
library(DBI)
library(stringr)
sql <- readLines("database.sql")
stmt <- str_c(sql, collapse = "\n") |>
str_trim() |>
str_split(";")
conn <- dbConnect(RSQLite::SQLite(), "meudb/meu.db")
lapply(stmt[[1]], function(q) {
if (q != "")
dbExecute(conn, q)
})
dbGetQuery(conn, "select * from sqlite_master")
dbDisconnect(conn)
|
#Load xlsx
library(xlsx)
# Load the readxl package
library(readxl)
#Getting the right
getwd()
setwd('C:/Users/lahona/Downloads')
#To oberve the frequency great visualisation tool
library(funModeling)
library(tidyverse)
library(Hmisc)
# Read the sheets, one by one
LS <- read_excel('Lawsuits.xlsx', sheet = 'Lawsuits')
LS
#No missing value found in the entire dataset
sum(is.na(LS))
library(moments)
library(ggplot2)
library(dplyr)
glimpse(LS)
dim(LS)
str(LS)
#Gender
Sex<-LS$Gender
Sex
Sex<- table(Sex)
Sex
barplot(Sex,main='Gender Distribution',xlab='Gender',ylab='Frequency',col=c('beige','bisque4'))
#Pie chart
pielabels <- sprintf("%s = %3.1f%s", Sex,
100*Sex/sum(Sex), "%")
pie(Sex, labels=pielabels,
clockwise=TRUE,
radius=1,
border="red",
cex=0.8,
main="Gender distribution")
str(LS)
#Specialty frequency barplot
Sp<-ggplot(data = LS) +geom_bar(mapping = aes(x = Specialty))
Sp + theme(axis.text.x = element_text(angle = 45, hjust = 1))
LS %>%
count(Specialty)
#71 are married
LS %>%
count(`Marital Status`)
#Maritial Status
Ms<-ggplot(data = LS) +geom_bar(mapping = aes(x =`Marital Status` )) +scale_colour_brewer(palette = "Set2")
MS<-Ms + theme(axis.text.x = element_text(angle = 45, hjust = 5))
Ms<-Ms+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 14),
legend.text = element_text(size = 13), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#C8EDAF"), legend.background = element_rect(fill = "#C8EDAF"))
Ms
#Private Attory frequency
PA<-ggplot(data = LS) +geom_bar(mapping = aes(x =`Private Attorney` ))
PA + theme(axis.text.x = element_text(angle = 45, hjust = 1))
PrivateAt<-LS$`Private Attorney`
PrivateAt
PrivateAt<- table(PrivateAt)
PrivateAt
#barplot(PrivateAt,main='Private Attorney Distribution',xlab='Private Attorney',ylab='Frequency',col=c('beige','bisque4'))
#Insurance
Ins<-ggplot(data = LS) +geom_bar(mapping = aes(x = Insurance))
Ins + theme(axis.text.x = element_text(angle = 45, hjust = 1))
#Count of different insurance 30% is unkown
LS%>%
count(Insurance)
#pielabels <- sprintf("%s = %3.1f%s", Ins,
# 100*Ins/sum(Ins), "%")
#pie(Ins, labels=pielabels,
# clockwise=TRUE,
# radius=1,
# border="red",
# cex=0.8,
# main="Insurance distribution")
library(dplyr)
LS%>%
filter(Severity>=6)
#Group by gender
Gender<-LS%>%
group_by(Gender)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment))
Gender
#below we combine unkown and Unkown together because there is a typo of capital letters
levels(LS$Insurance) <- c(levels(LS$Insurance),'unkown')
LS$Insurance[LS$Insurance=='Unknown'] <- 'unknown'
LS
#Insurance boxplot after correcting unkown
Ins<-ggplot(data = LS) +geom_bar(mapping = aes(x = Insurance))
Ins + theme(axis.text.x = element_text(angle = 45, hjust = 1))+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 11),
legend.text = element_text(size = 12), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#B0AFED"), legend.background = element_rect(fill = "#C8EDAF"))
#Graph of Insurance
Insu <- LS%>%
group_by(Insurance) %>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment))
Insu
#Need to make a boxplot of the Insurance
Private <- LS%>%
group_by('Private Attorney') %>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment)
)
Private
#Checking for private attorney
#Private is way more in terms of mean and median for payment
PRi <-LS%>%
group_by(`Private Attorney`)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment)
)
PRi
#Group by Specialty
SPEC <-LS%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment)
)
#group by severity
SEVEE <-LS%>%
group_by(Severity)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment),
)
SEVEE
kurtosis(LS$Payment)
skewness(LS$Payment)
###########have to see#########
freq(LS$Severity)
PAA<-table(LS$`Private Attorney`)
pielab <- sprintf("%s = %3.1f%s", PAA,
100*PAA/sum(PAA), "%")
#Pie chart of Private Attorney
pie(PAA, labels=pielab,
clockwise=TRUE,
radius=1,
border="red",
cex=0.8,
main="Private Attorney",
col=c("Green","Pink")
)
legend(1.3, .1, c("Non-Private","Private"), cex = 0.9, fill = PAA)
MARII <-LS%>%
group_by(`Marital Status`)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment)
)
MARII
LS%>%
count(`Private Attorney`)
#Checking which are the highest payment
HigestTOLowest<- LS%>%
arrange(desc(Payment))
HigestTOLowest
#Gender i have to create a box plot
ggplot(LS, aes(Gender, Payment)) +
geom_point() +
geom_smooth()
# box plot of the gender to see outliers and others
g<- ggplot(LS, aes(Gender, Payment)) +
geom_boxplot() +
geom_smooth()
g
#boxplot for insurance
ggplotInsurance <- ggplot(LS, aes(Insurance, Payment)) +
geom_boxplot() +
geom_smooth() + theme(axis.text.x = element_text(angle = 45, hjust = 1))+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 11),
legend.text = element_text(size = 12), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#EDAFEC"), legend.background = element_rect(fill = "#EDAFEC"))
ggplotInsurance
#Bad graph because of many categories .redraw it
# Group by age and Payment
by_Age<- LS %>%
group_by(Age,Specialty)
by_Age
#We have to correct the typos for obGyN to OBGYN
levels(LS$Specialty) <- c(levels(LS$Specialty),'ObGyN')
LS$Specialty[LS$Specialty=='ObGyn'] <- 'OBGYN'
LS
# Vector of Specialty to examine
specialty1 <- c('Pediatrics', 'Plastic Surgeon', 'Internal Medicine',
'Urological Surgery', 'General Surgery', 'OBGYN',
'Orthopedic Surgery', 'Ophthamology', 'Emergency Medicine',
'ObGyn', 'Anesthesiology', 'Neurology/Neurosurgery',
'Family Practice', 'Dermatology', 'Physical Medicine',
'Cardiology', 'Resident', 'Pathology', 'Radiology',
'Thoracic Surgery', 'Occupational Medicine')
# Filter
filteredSpecialty <- by_Age %>%
filter(Specialty %in% specialty1)
# Line plot
ggplot(filteredSpecialty, aes(Age, Payment, color = Specialty)) +
geom_line()
#Gender
by_G<- LS %>%
group_by(Age,Gender)
# Vector of four countries to examine
countries <- c('Male','Female')
# Filter
filtered_4_countries <- by_G %>%
filter(Gender %in% countries)
# Line plot
ggplot(filtered_4_countries, aes(Age, Payment, color = Gender)) +
geom_line()
#F
by_Se<- LS %>%
group_by(Age,Gender)
countries <- c('Pediatrics', 'Plastic Surgeon', 'Internal Medicine',
'Urological Surgery', 'General Surgery', 'OBGYN',
'Orthopedic Surgery', 'Ophthamology', 'Emergency Medicine',
'ObGyn', 'Anesthesiology', 'Neurology/Neurosurgery',
'Family Practice', 'Dermatology', 'Physical Medicine',
'Cardiology', 'Resident', 'Pathology', 'Radiology',
'Thoracic Surgery', 'Occupational Medicine')
f <- by_Se %>%
filter(Specialty %in% countries)
ggplot(f, aes(Age,Payment)) +
geom_line() +
facet_wrap(~ Specialty,scales = "free_y")
library(moments)
skewness(LS$Payment)
kurtosis(LS$Payment)
jarque.test(LS$Payment)
#count private attorney
LS %>%
count(`Private Attorney`)
#observe the Payment but we can't treat them as outliers
ggplot(data = LS) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
#The 3 numbers in the last are outliers if we do z score and >3
LS %>%
count(cut_width(Payment, 500))
# not getting executed but will try to
#AGE<-function(age){
# if (age<=18){
# return (small)
#}else if(age>18 & age <=35){
# retun (young)
#}else if(age>35 & age <=60){
# return (old)
#}else {
# return(seniorCitzen)
#}
#}
#fun<-lapply(LS$Age,AGE)
library(psych)
skew(LS$Payment)
quantile(LS$Payment)
#quantile(LS$Age)
#Total payment in the dataset
Total=sum(LS$Payment)
Total
#Famiy Practice
FamilyP<-LS %>%
filter(Specialty=='Family Practice')
FamilyP
sum(FamilyP$Payment)
MedFP<- median(FamilyP$Payment)
MedFP
MeanFP<-mean(FamilyP$Payment)
MeanFP
#print(paste('Average Family Practice:',MedFP/17))
#General Surgery
GeneralS<-LS %>%
filter(Specialty=='General Surgery')
GeneralS
MedGS<- median(GeneralS$Payment)
MedGS
MeanGS<-mean(GeneralS$Payment)
MeanGS
sum(GeneralS$Payment)
#print(paste('Average General Surgery:',MedGS/14))
#Anesthesiology
Anes<-LS %>%
filter(Specialty=='Anesthesiology')
Anes
MedAN<- median(Anes$Payment)
MedAN
MeanAN<-mean(Anes$Payment)
MeanAN
sum(Anes$Payment)
#Orthopedic Surgery
ORT<-LS %>%
filter(Specialty=='Orthopedic Surgery')
ORT
MedOR<- median(ORT$Payment)
MedOR
MeanOR<-mean(ORT$Payment)
MeanOR
sum(ORT$Payment)
#OBGYN
OBGYN<-LS %>%
filter(Specialty=='OBGYN')
OBGYN
MedOBGYN<- median(OBGYN$Payment)
MedOBGYN
MeanOBGYN<-mean(OBGYN$Payment)
MeanOBGYN
sum(OBGYN$Payment)
#Top 5 speciality consisits of 67.23 % Payment but they are more in number
ProportionofTop5<-(sum(FamilyP$Payment)+sum(GeneralS$Payment)+sum(OBGYN$Payment)+sum(ORT$Payment)+sum(OBGYN$Payment))/Total
ProportionofTop5
#Function to check median,mean,total_sum
SpecialtyFunction <- function(a){
Med<-median(a$Payment)
Mean<-mean(a$Payment)
SUM<-sum(a$Payment)
return (list(Med,Mean,SUM))
}
#Checking statistics for Internal Medicine
IM<-LS%>%
filter(Specialty=='Internal Medicine')
IM
I<-SpecialtyFunction(IM)
I
#checking statistics for Neurology/Neurosurgery
Neuro <- LS%>%
filter(Specialty=='Neurology/Neurosurgery')
Neuro
Neurolo<-SpecialtyFunction(Neuro)
Neurolo
#checking statistics for Emergency Medicine
Emergency <- LS%>%
filter(Specialty=='Emergency Medicine')
Emergency
EMERGEN<- SpecialtyFunction(Emergency)
EMERGEN
#combination of Private attorney and Private insurance
CombinationAllPrivate<-LS%>%
group_by(Insurance,Specialty,`Private Attorney`) %>%
filter(`Private Attorney`==1,Insurance=='Private')
glimpse(CombinationAllPrivate)
#Median and mean is higher than normal
SpecialtyFunction(CombinationAllPrivate)
#Comparing Private to all the values including Private
#hist for All
hist(LS$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Payment of all values vs Payment for Private ")
#hist for combinationAllPrivate
hist(CombinationAllPrivate$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for All,we may take median
abline(v = median(LS$Payment),
col = "#00009950", lwd = 2)
#vertical line for CombinationAll for mean ,we may take median
abline(v = median(CombinationAllPrivate$Payment),
col = "#99000050", lwd = 2)
#have to remove the y axis as density
#Private attorney
PrivateAttorney1<-LS%>%
filter(`Private Attorney`==1)
PrivateAttorney1
NonPrivateAttorney<-LS%>%
filter(`Private Attorney`==0)
NonPrivateAttorney
#Make PrivateAttorney vs Non-Private
dat <- PrivateAttorney1$Payment
extra_dat <- NonPrivateAttorney$Payment
#Plot
plot(density(dat),col="blue")
lines(density(extra_dat),col="red")
#Histogram of Private vs Non-Private
hist(PrivateAttorney1$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Payment of Private Attorney vs Payment for Non-Private Attorney")
#hist for combinationAllPrivate
hist(NonPrivateAttorney$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for All,we may take median
abline(v = median(PrivateAttorney1$Payment),
col = "#00009950", lwd = 2)
#vertical line for CombinationAll for mean ,we may take median
abline(v = median(NonPrivateAttorney$Payment),
col = "#99000050", lwd = 2)
#To oberve the frequency great visualisation tool
library(funModeling)
library(tidyverse)
library(Hmisc)
freq(LS)
#basic eda done in one function
basic_eda <- function(data)
{
glimpse(data)
df_status(data)
freq(data)
profiling_num(data)
plot_num(data)
describe(data)
}
basic_eda(LS)
#checking metrices
df_status(LS)
#Not usefull now but is used for numerical variable
#data_prof=profiling_num(LS)
#data_prof
#contingency table we can create to see for any two variable
#describe(LS)
#Checking for specialty that is Surgery
Surgery <- LS%>%
filter(Specialty=='General Surgery' | Specialty =='Orthopedic Surgery' | Specialty =='Neurology/Neurosurgery' |Specialty=='Urological Surgery' | Specialty=='Plastic Surgeon' | Specialty=='Thoracic Surgery')
Surgery
hist(Surgery$Payment)
Surgerysats<-SpecialtyFunction(Surgery)
Surgerysats
#checking for specialty that is medicine
Medicine <- LS%>%
filter(Specialty=='Internal Medicine' | Specialty =='Emergency Medicine' | Specialty=='Physical Medicine' | Specialty=='Occupational Medicine')
hist(Medicine$Payment)
Medicine
Medicinestats <- SpecialtyFunction(Medicine)
Medicinestats
#Histogram of Surgery vs Medicine
hist(Surgery$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Surgery vs Medicine for Payment")
#hist for Medicine
hist(Medicine$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for surgery,we may take median
abline(v = median(Surgery$Payment),
col = "#00009950", lwd = 2)
#vertical line for Medicine for median ,we may take median
abline(v = median(Medicine$Payment),
col = "#99000050", lwd = 2)
# Histogram Grey Color
hist(Medicine, col=rgb(0.1,0.1,0.1,0.5),xlim=c(0,10), ylim=c(0,200), main="Overlapping Histogram")
hist(Surgery, col=rgb(0.8,0.8,0.8,0.5), add=T)
# Histogram Colored (blue and red)
hist(Medicine, col=rgb(1,0,0,0.5),xlim=c(0,10), ylim=c(0,200), main="Overlapping Histogram", xlab="Variable")
hist(Surgery, col=rgb(0,0,1,0.5), add=T)
#Have to make a two histogram together
#Surgery and Private
SurgeryPrivate <- LS%>%
filter(`Private Attorney`==1,Specialty=='General Surgery' | Specialty =='Orthopedic Surgery' | Specialty =='Neurology/Neurosurgery' |Specialty=='Urological Surgery' | Specialty=='Plastic Surgeon' | Specialty=='Thoracic Surgery')
SurgeryPrivate
SpecialtyFunction(SurgeryPrivate)
#Medicine and Private not needed
MedicinePrivate <- LS%>%
filter(Insurance=='Private',`Private Attorney`==1,Specialty=='Internal Medicine' | Specialty =='Emergency Medicine' | Specialty=='Physical Medicine' | Specialty=='Occupational Medicine')
MedicinePrivate
SpecialtyFunction(MedicinePrivate)
#whole prive Speciality
SpecialityPrivateALL<- LS%>%
filter(`Private Attorney`==1,Insurance=='Private')
SpecialityPrivateALL
SpecialtyFunction(SpecialityPrivateALL)
table(LS$Specialty,LS$Insurance)
#We should merge the two unkown columns into one column unknown
#Severity
SEVE <- LS %>%
filter(Severity==9 | Severity==8 | Severity==7 | Severity==6)
SEVE
#Proportion of top 4 severity consist of 63.9 % of the payment..High severity means high payment
sum(SEVE$Payment)/sum(LS$Payment)
SEVELESS <-LS%>%
filter(Severity==1 | Severity==2 | Severity==3 | Severity==4 | Severity==5)
SEVELESS
#Majority of high severity is done by private attorney
SEVE1 <- LS %>%
filter(Severity==9 | Severity==8 | Severity==7 | Severity==6,`Private Attorney`==1)
SEVE1
HighSeverityPrivategraph<-table(SEVE$`Private Attorney`)
barplot(HighSeverityPrivategraph,main='Private Attorney Distribution in High Severity',xlab='Private Attorney',ylab='Frequency',col=c('beige','bisque4'))
#SS<- data.frame("HighSeverityAll":HighSeverityAll,"HighSeverityPrivate":HighSeverityPrivate)
#SS
#############Boxplot of less severity and high severity with respect to payment
boxplot(SEVE$Payment,SEVELESS$Payment , xlab="High Severity vs Low Severity",
main="boxplot of High Severity VS Less Severity ",ylab='Payment'
)
plot(density(SEVELESS$Payment))
plot(density(SEVE$Payment))
#See the graph to see relation between two variabe (work to be done)
library("DataExplorer")
plot_correlation(LS)
#
library(vcd)
#mosaic(LS, shade=TRUE, legend=TRUE)
#ssoc(LS, shade=TRUE)
#We are grouping ages
AGE1<-LS%>%
filter(Age<18)
AGE1
AGE2 <- LS%>%
filter(Age>=18 & Age<40)
AGE2
AGE3<-LS%>%
filter(Age>=40 & Age<60)
AGE3
AGE4 <- LS%>%
filter(Age>=60)
AGE4
#Here average we are taking median is more in 60 and above
SpecialtyFunction(AGE1)
SpecialtyFunction(AGE2)
SpecialtyFunction(AGE3)
SpecialtyFunction(AGE4)
#More severity is more in 60 and above followed by 35+
table(AGE1$Severity)
table(AGE2$Severity)
table(AGE3$Severity)
table(AGE4$Severity)
#AGE and specialty
table(AGE1$Specialty)
table(AGE2$Specialty)
table(AGE3$Specialty)
table(AGE4$Specialty)
freq(AGE1$Specialty)
freq(AGE2$Specialty)
freq(AGE3$Specialty)
freq(AGE4$Specialty)
ggplot(data = AGE1) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
ggplot(data = AGE2) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
ggplot(data = AGE3) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
ggplot(data = AGE4) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
AAAG1<-ggplot(data = AGE1) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG1<-AAAG1 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG1<-AAAG1+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 13), axis.title = element_text(size = 10), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#afbaed"), legend.background = element_rect(fill = "#afbaed"))
AAAG1
AAAG2<-ggplot(data = AGE2) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG2<-AAAG2 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG2<-AAAG2+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 10), axis.title = element_text(size = 10), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#afbaed"), legend.background = element_rect(fill = "#afbaed"))
AAAG2
AAAG3<-ggplot(data = AGE3) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG3<-AAAG3 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG3<-AAAG3+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 10), axis.title = element_text(size = 10), axis.line = element_line(size = 0.2, colour = "grey10"),
plot.background = element_rect(fill = "#edafaf"), legend.background = element_rect(fill = "#edafaf"))
AAAG3
AAAG4<-ggplot(data = AGE4) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG4<-AAAG4 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG4<-AAAG4+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 16), axis.title = element_text(size = 14), axis.line = element_line(size = 0.6, colour = "grey10"),
plot.background = element_rect(fill = "#eaafed"), legend.background = element_rect(fill = "#eaafed"))
AAAG4
AAGE1 <-AGE1%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE1
AAGE2 <-AGE2%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE2
AAGE3 <-AGE3%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE3
AAGE4 <-AGE4%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE4
AAGE11 <-AGE1%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE11
AAGE12 <-AGE2%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE12
AAGE13 <-AGE3%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE13
AAGE14 <-AGE4%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE14
#The insurance which is Private and Gender is Female
PF <- LS%>%
filter(Insurance=='Private',Gender=='Female')
PF
dim(PF)
#47.8 percent is private and female
print(34/71)
# Mix both unkown together
UNKOINSU<-LS%>%
filter(Insurance=='Unknown'| Insurance=='unknown')
UNKOINSU
count(UNKOINSU)
#36 values are unkown for Insurance ,out of 118
36/118
#30% are unkown
#ggplot(LS,aes(Gender,Payment))+theme(plot.background = element_rect(fill = "#C8EDAF"), legend.background = element_rect(fill = "#C8EDAF"))+geom_point()
ggplot(LS,aes(Gender,Payment))+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 14),
legend.text = element_text(size = 13), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),plot.background = element_rect(fill = "#C8EDAF"), legend.background = element_rect(fill = "#C8EDAF"))+geom_point()
#We have to see mainly which are the columns we have to focus
#hist(LS$Payment,
#col = "#00009950", freq = FALSE, xlab = "Payment",
#main = "Payment vs count")
#abline(v = mean(LS$Payment),
#col = "#00009950", lwd = 2)
#just
#male
Mapay <- LS%>%
filter(Gender=='Male')
#female
Fepay <-LS%>%
filter(Gender=='Female')
#hist for male
hist(Mapay$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Payment vs count")
#hist for female
hist(Fepay$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for male,we may take median
abline(v = mean(Mapay$Payment),
col = "#00009950", lwd = 2)
#vertical line for female for mean ,we may take median
abline(v = mean(Fepay$Payment),
col = "#99000050", lwd = 2)
##Severity and Gender
LS$Severity<-as.factor(LS$Severity)
LS$Gender<-as.factor(LS$Gender)
LS$Severity
spineplot(LS$Severity,LS$Gender,
xlab = "severity",ylab = "Gender",col=c("blue","green"))
#Insurance and Gender
#There are many unknowns in Male gender In total 30 % are unknown
LS$Insurance<-as.factor(LS$Insurance)
LS$Insurance
spineplot(LS$Gender,LS$Insurance,ylab = "Insurance",
xlab = "Gender",
col =c("red","green","yellow","purple","orange","blue"))
|
/groupassignment (2).R
|
permissive
|
anuraglahon16/Case-Study-of-Insurance-Lawsuit
|
R
| false
| false
| 26,339
|
r
|
#Load xlsx
library(xlsx)
# Load the readxl package
library(readxl)
#Getting the right
getwd()
setwd('C:/Users/lahona/Downloads')
#To oberve the frequency great visualisation tool
library(funModeling)
library(tidyverse)
library(Hmisc)
# Read the sheets, one by one
LS <- read_excel('Lawsuits.xlsx', sheet = 'Lawsuits')
LS
#No missing value found in the entire dataset
sum(is.na(LS))
library(moments)
library(ggplot2)
library(dplyr)
glimpse(LS)
dim(LS)
str(LS)
#Gender
Sex<-LS$Gender
Sex
Sex<- table(Sex)
Sex
barplot(Sex,main='Gender Distribution',xlab='Gender',ylab='Frequency',col=c('beige','bisque4'))
#Pie chart
pielabels <- sprintf("%s = %3.1f%s", Sex,
100*Sex/sum(Sex), "%")
pie(Sex, labels=pielabels,
clockwise=TRUE,
radius=1,
border="red",
cex=0.8,
main="Gender distribution")
str(LS)
#Specialty frequency barplot
Sp<-ggplot(data = LS) +geom_bar(mapping = aes(x = Specialty))
Sp + theme(axis.text.x = element_text(angle = 45, hjust = 1))
LS %>%
count(Specialty)
#71 are married
LS %>%
count(`Marital Status`)
#Maritial Status
Ms<-ggplot(data = LS) +geom_bar(mapping = aes(x =`Marital Status` )) +scale_colour_brewer(palette = "Set2")
MS<-Ms + theme(axis.text.x = element_text(angle = 45, hjust = 5))
Ms<-Ms+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 14),
legend.text = element_text(size = 13), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#C8EDAF"), legend.background = element_rect(fill = "#C8EDAF"))
Ms
#Private Attory frequency
PA<-ggplot(data = LS) +geom_bar(mapping = aes(x =`Private Attorney` ))
PA + theme(axis.text.x = element_text(angle = 45, hjust = 1))
PrivateAt<-LS$`Private Attorney`
PrivateAt
PrivateAt<- table(PrivateAt)
PrivateAt
#barplot(PrivateAt,main='Private Attorney Distribution',xlab='Private Attorney',ylab='Frequency',col=c('beige','bisque4'))
#Insurance
Ins<-ggplot(data = LS) +geom_bar(mapping = aes(x = Insurance))
Ins + theme(axis.text.x = element_text(angle = 45, hjust = 1))
#Count of different insurance 30% is unkown
LS%>%
count(Insurance)
#pielabels <- sprintf("%s = %3.1f%s", Ins,
# 100*Ins/sum(Ins), "%")
#pie(Ins, labels=pielabels,
# clockwise=TRUE,
# radius=1,
# border="red",
# cex=0.8,
# main="Insurance distribution")
library(dplyr)
LS%>%
filter(Severity>=6)
#Group by gender
Gender<-LS%>%
group_by(Gender)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment))
Gender
#below we combine unkown and Unkown together because there is a typo of capital letters
levels(LS$Insurance) <- c(levels(LS$Insurance),'unkown')
LS$Insurance[LS$Insurance=='Unknown'] <- 'unknown'
LS
#Insurance boxplot after correcting unkown
Ins<-ggplot(data = LS) +geom_bar(mapping = aes(x = Insurance))
Ins + theme(axis.text.x = element_text(angle = 45, hjust = 1))+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 11),
legend.text = element_text(size = 12), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#B0AFED"), legend.background = element_rect(fill = "#C8EDAF"))
#Graph of Insurance
Insu <- LS%>%
group_by(Insurance) %>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment))
Insu
#Need to make a boxplot of the Insurance
Private <- LS%>%
group_by('Private Attorney') %>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment)
)
Private
#Checking for private attorney
#Private is way more in terms of mean and median for payment
PRi <-LS%>%
group_by(`Private Attorney`)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment)
)
PRi
#Group by Specialty
SPEC <-LS%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment)
)
#group by severity
SEVEE <-LS%>%
group_by(Severity)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment),
)
SEVEE
kurtosis(LS$Payment)
skewness(LS$Payment)
###########have to see#########
freq(LS$Severity)
PAA<-table(LS$`Private Attorney`)
pielab <- sprintf("%s = %3.1f%s", PAA,
100*PAA/sum(PAA), "%")
#Pie chart of Private Attorney
pie(PAA, labels=pielab,
clockwise=TRUE,
radius=1,
border="red",
cex=0.8,
main="Private Attorney",
col=c("Green","Pink")
)
legend(1.3, .1, c("Non-Private","Private"), cex = 0.9, fill = PAA)
MARII <-LS%>%
group_by(`Marital Status`)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment)
)
MARII
LS%>%
count(`Private Attorney`)
#Checking which are the highest payment
HigestTOLowest<- LS%>%
arrange(desc(Payment))
HigestTOLowest
#Gender i have to create a box plot
ggplot(LS, aes(Gender, Payment)) +
geom_point() +
geom_smooth()
# box plot of the gender to see outliers and others
g<- ggplot(LS, aes(Gender, Payment)) +
geom_boxplot() +
geom_smooth()
g
#boxplot for insurance
ggplotInsurance <- ggplot(LS, aes(Insurance, Payment)) +
geom_boxplot() +
geom_smooth() + theme(axis.text.x = element_text(angle = 45, hjust = 1))+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 11),
legend.text = element_text(size = 12), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#EDAFEC"), legend.background = element_rect(fill = "#EDAFEC"))
ggplotInsurance
#Bad graph because of many categories .redraw it
# Group by age and Payment
by_Age<- LS %>%
group_by(Age,Specialty)
by_Age
#We have to correct the typos for obGyN to OBGYN
levels(LS$Specialty) <- c(levels(LS$Specialty),'ObGyN')
LS$Specialty[LS$Specialty=='ObGyn'] <- 'OBGYN'
LS
# Vector of Specialty to examine
specialty1 <- c('Pediatrics', 'Plastic Surgeon', 'Internal Medicine',
'Urological Surgery', 'General Surgery', 'OBGYN',
'Orthopedic Surgery', 'Ophthamology', 'Emergency Medicine',
'ObGyn', 'Anesthesiology', 'Neurology/Neurosurgery',
'Family Practice', 'Dermatology', 'Physical Medicine',
'Cardiology', 'Resident', 'Pathology', 'Radiology',
'Thoracic Surgery', 'Occupational Medicine')
# Filter
filteredSpecialty <- by_Age %>%
filter(Specialty %in% specialty1)
# Line plot
ggplot(filteredSpecialty, aes(Age, Payment, color = Specialty)) +
geom_line()
#Gender
by_G<- LS %>%
group_by(Age,Gender)
# Vector of four countries to examine
countries <- c('Male','Female')
# Filter
filtered_4_countries <- by_G %>%
filter(Gender %in% countries)
# Line plot
ggplot(filtered_4_countries, aes(Age, Payment, color = Gender)) +
geom_line()
#F
by_Se<- LS %>%
group_by(Age,Gender)
countries <- c('Pediatrics', 'Plastic Surgeon', 'Internal Medicine',
'Urological Surgery', 'General Surgery', 'OBGYN',
'Orthopedic Surgery', 'Ophthamology', 'Emergency Medicine',
'ObGyn', 'Anesthesiology', 'Neurology/Neurosurgery',
'Family Practice', 'Dermatology', 'Physical Medicine',
'Cardiology', 'Resident', 'Pathology', 'Radiology',
'Thoracic Surgery', 'Occupational Medicine')
f <- by_Se %>%
filter(Specialty %in% countries)
ggplot(f, aes(Age,Payment)) +
geom_line() +
facet_wrap(~ Specialty,scales = "free_y")
library(moments)
skewness(LS$Payment)
kurtosis(LS$Payment)
jarque.test(LS$Payment)
#count private attorney
LS %>%
count(`Private Attorney`)
#observe the Payment but we can't treat them as outliers
ggplot(data = LS) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
#The 3 numbers in the last are outliers if we do z score and >3
LS %>%
count(cut_width(Payment, 500))
# not getting executed but will try to
#AGE<-function(age){
# if (age<=18){
# return (small)
#}else if(age>18 & age <=35){
# retun (young)
#}else if(age>35 & age <=60){
# return (old)
#}else {
# return(seniorCitzen)
#}
#}
#fun<-lapply(LS$Age,AGE)
library(psych)
skew(LS$Payment)
quantile(LS$Payment)
#quantile(LS$Age)
#Total payment in the dataset
Total=sum(LS$Payment)
Total
#Famiy Practice
FamilyP<-LS %>%
filter(Specialty=='Family Practice')
FamilyP
sum(FamilyP$Payment)
MedFP<- median(FamilyP$Payment)
MedFP
MeanFP<-mean(FamilyP$Payment)
MeanFP
#print(paste('Average Family Practice:',MedFP/17))
#General Surgery
GeneralS<-LS %>%
filter(Specialty=='General Surgery')
GeneralS
MedGS<- median(GeneralS$Payment)
MedGS
MeanGS<-mean(GeneralS$Payment)
MeanGS
sum(GeneralS$Payment)
#print(paste('Average General Surgery:',MedGS/14))
#Anesthesiology
Anes<-LS %>%
filter(Specialty=='Anesthesiology')
Anes
MedAN<- median(Anes$Payment)
MedAN
MeanAN<-mean(Anes$Payment)
MeanAN
sum(Anes$Payment)
#Orthopedic Surgery
ORT<-LS %>%
filter(Specialty=='Orthopedic Surgery')
ORT
MedOR<- median(ORT$Payment)
MedOR
MeanOR<-mean(ORT$Payment)
MeanOR
sum(ORT$Payment)
#OBGYN
OBGYN<-LS %>%
filter(Specialty=='OBGYN')
OBGYN
MedOBGYN<- median(OBGYN$Payment)
MedOBGYN
MeanOBGYN<-mean(OBGYN$Payment)
MeanOBGYN
sum(OBGYN$Payment)
#Top 5 speciality consisits of 67.23 % Payment but they are more in number
ProportionofTop5<-(sum(FamilyP$Payment)+sum(GeneralS$Payment)+sum(OBGYN$Payment)+sum(ORT$Payment)+sum(OBGYN$Payment))/Total
ProportionofTop5
#Function to check median,mean,total_sum
SpecialtyFunction <- function(a){
Med<-median(a$Payment)
Mean<-mean(a$Payment)
SUM<-sum(a$Payment)
return (list(Med,Mean,SUM))
}
#Checking statistics for Internal Medicine
IM<-LS%>%
filter(Specialty=='Internal Medicine')
IM
I<-SpecialtyFunction(IM)
I
#checking statistics for Neurology/Neurosurgery
Neuro <- LS%>%
filter(Specialty=='Neurology/Neurosurgery')
Neuro
Neurolo<-SpecialtyFunction(Neuro)
Neurolo
#checking statistics for Emergency Medicine
Emergency <- LS%>%
filter(Specialty=='Emergency Medicine')
Emergency
EMERGEN<- SpecialtyFunction(Emergency)
EMERGEN
#combination of Private attorney and Private insurance
CombinationAllPrivate<-LS%>%
group_by(Insurance,Specialty,`Private Attorney`) %>%
filter(`Private Attorney`==1,Insurance=='Private')
glimpse(CombinationAllPrivate)
#Median and mean is higher than normal
SpecialtyFunction(CombinationAllPrivate)
#Comparing Private to all the values including Private
#hist for All
hist(LS$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Payment of all values vs Payment for Private ")
#hist for combinationAllPrivate
hist(CombinationAllPrivate$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for All,we may take median
abline(v = median(LS$Payment),
col = "#00009950", lwd = 2)
#vertical line for CombinationAll for mean ,we may take median
abline(v = median(CombinationAllPrivate$Payment),
col = "#99000050", lwd = 2)
#have to remove the y axis as density
#Private attorney
PrivateAttorney1<-LS%>%
filter(`Private Attorney`==1)
PrivateAttorney1
NonPrivateAttorney<-LS%>%
filter(`Private Attorney`==0)
NonPrivateAttorney
#Make PrivateAttorney vs Non-Private
dat <- PrivateAttorney1$Payment
extra_dat <- NonPrivateAttorney$Payment
#Plot
plot(density(dat),col="blue")
lines(density(extra_dat),col="red")
#Histogram of Private vs Non-Private
hist(PrivateAttorney1$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Payment of Private Attorney vs Payment for Non-Private Attorney")
#hist for combinationAllPrivate
hist(NonPrivateAttorney$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for All,we may take median
abline(v = median(PrivateAttorney1$Payment),
col = "#00009950", lwd = 2)
#vertical line for CombinationAll for mean ,we may take median
abline(v = median(NonPrivateAttorney$Payment),
col = "#99000050", lwd = 2)
#To oberve the frequency great visualisation tool
library(funModeling)
library(tidyverse)
library(Hmisc)
freq(LS)
#basic eda done in one function
basic_eda <- function(data)
{
glimpse(data)
df_status(data)
freq(data)
profiling_num(data)
plot_num(data)
describe(data)
}
basic_eda(LS)
#checking metrices
df_status(LS)
#Not usefull now but is used for numerical variable
#data_prof=profiling_num(LS)
#data_prof
#contingency table we can create to see for any two variable
#describe(LS)
#Checking for specialty that is Surgery
Surgery <- LS%>%
filter(Specialty=='General Surgery' | Specialty =='Orthopedic Surgery' | Specialty =='Neurology/Neurosurgery' |Specialty=='Urological Surgery' | Specialty=='Plastic Surgeon' | Specialty=='Thoracic Surgery')
Surgery
hist(Surgery$Payment)
Surgerysats<-SpecialtyFunction(Surgery)
Surgerysats
#checking for specialty that is medicine
Medicine <- LS%>%
filter(Specialty=='Internal Medicine' | Specialty =='Emergency Medicine' | Specialty=='Physical Medicine' | Specialty=='Occupational Medicine')
hist(Medicine$Payment)
Medicine
Medicinestats <- SpecialtyFunction(Medicine)
Medicinestats
#Histogram of Surgery vs Medicine
hist(Surgery$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Surgery vs Medicine for Payment")
#hist for Medicine
hist(Medicine$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for surgery,we may take median
abline(v = median(Surgery$Payment),
col = "#00009950", lwd = 2)
#vertical line for Medicine for median ,we may take median
abline(v = median(Medicine$Payment),
col = "#99000050", lwd = 2)
# Histogram Grey Color
hist(Medicine, col=rgb(0.1,0.1,0.1,0.5),xlim=c(0,10), ylim=c(0,200), main="Overlapping Histogram")
hist(Surgery, col=rgb(0.8,0.8,0.8,0.5), add=T)
# Histogram Colored (blue and red)
hist(Medicine, col=rgb(1,0,0,0.5),xlim=c(0,10), ylim=c(0,200), main="Overlapping Histogram", xlab="Variable")
hist(Surgery, col=rgb(0,0,1,0.5), add=T)
#Have to make a two histogram together
#Surgery and Private
SurgeryPrivate <- LS%>%
filter(`Private Attorney`==1,Specialty=='General Surgery' | Specialty =='Orthopedic Surgery' | Specialty =='Neurology/Neurosurgery' |Specialty=='Urological Surgery' | Specialty=='Plastic Surgeon' | Specialty=='Thoracic Surgery')
SurgeryPrivate
SpecialtyFunction(SurgeryPrivate)
#Medicine and Private not needed
MedicinePrivate <- LS%>%
filter(Insurance=='Private',`Private Attorney`==1,Specialty=='Internal Medicine' | Specialty =='Emergency Medicine' | Specialty=='Physical Medicine' | Specialty=='Occupational Medicine')
MedicinePrivate
SpecialtyFunction(MedicinePrivate)
#whole prive Speciality
SpecialityPrivateALL<- LS%>%
filter(`Private Attorney`==1,Insurance=='Private')
SpecialityPrivateALL
SpecialtyFunction(SpecialityPrivateALL)
table(LS$Specialty,LS$Insurance)
#We should merge the two unkown columns into one column unknown
#Severity
SEVE <- LS %>%
filter(Severity==9 | Severity==8 | Severity==7 | Severity==6)
SEVE
#Proportion of top 4 severity consist of 63.9 % of the payment..High severity means high payment
sum(SEVE$Payment)/sum(LS$Payment)
SEVELESS <-LS%>%
filter(Severity==1 | Severity==2 | Severity==3 | Severity==4 | Severity==5)
SEVELESS
#Majority of high severity is done by private attorney
SEVE1 <- LS %>%
filter(Severity==9 | Severity==8 | Severity==7 | Severity==6,`Private Attorney`==1)
SEVE1
HighSeverityPrivategraph<-table(SEVE$`Private Attorney`)
barplot(HighSeverityPrivategraph,main='Private Attorney Distribution in High Severity',xlab='Private Attorney',ylab='Frequency',col=c('beige','bisque4'))
#SS<- data.frame("HighSeverityAll":HighSeverityAll,"HighSeverityPrivate":HighSeverityPrivate)
#SS
#############Boxplot of less severity and high severity with respect to payment
boxplot(SEVE$Payment,SEVELESS$Payment , xlab="High Severity vs Low Severity",
main="boxplot of High Severity VS Less Severity ",ylab='Payment'
)
plot(density(SEVELESS$Payment))
plot(density(SEVE$Payment))
#See the graph to see relation between two variabe (work to be done)
library("DataExplorer")
plot_correlation(LS)
#
library(vcd)
#mosaic(LS, shade=TRUE, legend=TRUE)
#ssoc(LS, shade=TRUE)
#We are grouping ages
AGE1<-LS%>%
filter(Age<18)
AGE1
AGE2 <- LS%>%
filter(Age>=18 & Age<40)
AGE2
AGE3<-LS%>%
filter(Age>=40 & Age<60)
AGE3
AGE4 <- LS%>%
filter(Age>=60)
AGE4
#Here average we are taking median is more in 60 and above
SpecialtyFunction(AGE1)
SpecialtyFunction(AGE2)
SpecialtyFunction(AGE3)
SpecialtyFunction(AGE4)
#More severity is more in 60 and above followed by 35+
table(AGE1$Severity)
table(AGE2$Severity)
table(AGE3$Severity)
table(AGE4$Severity)
#AGE and specialty
table(AGE1$Specialty)
table(AGE2$Specialty)
table(AGE3$Specialty)
table(AGE4$Specialty)
freq(AGE1$Specialty)
freq(AGE2$Specialty)
freq(AGE3$Specialty)
freq(AGE4$Specialty)
ggplot(data = AGE1) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
ggplot(data = AGE2) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
ggplot(data = AGE3) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
ggplot(data = AGE4) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500)
AAAG1<-ggplot(data = AGE1) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG1<-AAAG1 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG1<-AAAG1+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 13), axis.title = element_text(size = 10), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#afbaed"), legend.background = element_rect(fill = "#afbaed"))
AAAG1
AAAG2<-ggplot(data = AGE2) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG2<-AAAG2 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG2<-AAAG2+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 10), axis.title = element_text(size = 10), axis.line = element_line(size = 0.4, colour = "grey10"),
plot.background = element_rect(fill = "#afbaed"), legend.background = element_rect(fill = "#afbaed"))
AAAG2
AAAG3<-ggplot(data = AGE3) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG3<-AAAG3 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG3<-AAAG3+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 10), axis.title = element_text(size = 10), axis.line = element_line(size = 0.2, colour = "grey10"),
plot.background = element_rect(fill = "#edafaf"), legend.background = element_rect(fill = "#edafaf"))
AAAG3
AAAG4<-ggplot(data = AGE4) +
geom_histogram(mapping = aes(x = Payment), binwidth = 500) +scale_colour_brewer(palette = "Set2")
AAAG4<-AAAG4 + theme(axis.text.x = element_text(angle = 45, hjust = 5))
AAAG4<-AAAG4+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 10),
legend.text = element_text(size = 16), axis.title = element_text(size = 14), axis.line = element_line(size = 0.6, colour = "grey10"),
plot.background = element_rect(fill = "#eaafed"), legend.background = element_rect(fill = "#eaafed"))
AAAG4
AAGE1 <-AGE1%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE1
AAGE2 <-AGE2%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE2
AAGE3 <-AGE3%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE3
AAGE4 <-AGE4%>%
group_by(Specialty)%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=kurtosis(Payment)
)
AAGE4
AAGE11 <-AGE1%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE11
AAGE12 <-AGE2%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE12
AAGE13 <-AGE3%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE13
AAGE14 <-AGE4%>%
summarise(total=n(),
pay=sum(Payment),
Med=median(Payment),
Mean=mean(Payment),
SD=sd(Payment),
VAR=var(Payment),
KUR=kurtosis(Payment),
SKEW=skewness(Payment)
)
AAGE14
#The insurance which is Private and Gender is Female
PF <- LS%>%
filter(Insurance=='Private',Gender=='Female')
PF
dim(PF)
#47.8 percent is private and female
print(34/71)
# Mix both unkown together
UNKOINSU<-LS%>%
filter(Insurance=='Unknown'| Insurance=='unknown')
UNKOINSU
count(UNKOINSU)
#36 values are unkown for Insurance ,out of 118
36/118
#30% are unkown
#ggplot(LS,aes(Gender,Payment))+theme(plot.background = element_rect(fill = "#C8EDAF"), legend.background = element_rect(fill = "#C8EDAF"))+geom_point()
ggplot(LS,aes(Gender,Payment))+theme(legend.position="bottom", legend.direction="horizontal", legend.title = element_blank(), axis.text = element_text(size = 14),
legend.text = element_text(size = 13), axis.title = element_text(size = 14), axis.line = element_line(size = 0.4, colour = "grey10"),plot.background = element_rect(fill = "#C8EDAF"), legend.background = element_rect(fill = "#C8EDAF"))+geom_point()
#We have to see mainly which are the columns we have to focus
#hist(LS$Payment,
#col = "#00009950", freq = FALSE, xlab = "Payment",
#main = "Payment vs count")
#abline(v = mean(LS$Payment),
#col = "#00009950", lwd = 2)
#just
#male
Mapay <- LS%>%
filter(Gender=='Male')
#female
Fepay <-LS%>%
filter(Gender=='Female')
#hist for male
hist(Mapay$Payment,
col = "#00009950", freq = FALSE, xlab = "Payment",
main = "Payment vs count")
#hist for female
hist(Fepay$Payment, add = TRUE,
col = "#99000050", freq = FALSE)
#vertical line for male,we may take median
abline(v = mean(Mapay$Payment),
col = "#00009950", lwd = 2)
#vertical line for female for mean ,we may take median
abline(v = mean(Fepay$Payment),
col = "#99000050", lwd = 2)
##Severity and Gender
LS$Severity<-as.factor(LS$Severity)
LS$Gender<-as.factor(LS$Gender)
LS$Severity
spineplot(LS$Severity,LS$Gender,
xlab = "severity",ylab = "Gender",col=c("blue","green"))
#Insurance and Gender
#There are many unknowns in Male gender In total 30 % are unknown
LS$Insurance<-as.factor(LS$Insurance)
LS$Insurance
spineplot(LS$Gender,LS$Insurance,ylab = "Insurance",
xlab = "Gender",
col =c("red","green","yellow","purple","orange","blue"))
|
############################################################################################################################################################################
# Processing raw MIMS data to calculate concentrations & ratios
# Original code from Hilary Madinger and Bob Hall
# 16 July 2016
# Updated by Luke Loken
# Aug 2018
# Execute the functions in MIMS_gas_functions.R before this code.
# MIMS_gas_functions.R calculates gas concentrations at saturation, while correcting for water density, temperature, and barometric presssure, which are used in the functions below.
# Using gas saturations, two water bath calibrations (at different temperatures), and the data copied and pasted from the MIMS, this code converts mass signals into concentrations/ratios.
############################################################################################################################################################################
###########################################
# DATA IMPORT AND MANAGEMENT
###########################################
# I have had problems with .csv files removing decimal places for values from the MIMS. One way to fix this is to use the readxl package to read in an excel file instead of a .csv file.
library(readxl)
# library(RcppRoll)
#load gas functions
source('R/mims_gas_functions.R')
#load processing function
source('R/MIMScalc.R')
data_dir<-'C:/Dropbox/USBR Delta Project'
# Call and name the MIMS data.
# MIMSdata_Run1<-read_excel(paste0(data_dir, "/Data/MIMS/MIMS_Run1_2018_08_18.xlsx"))
# MIMSdata_Run2<-read_excel(paste0(data_dir, "/Data/MIMS/MIMS_Run2_2018_08_18.xlsx"))
MIMSdata_Run1<-read_excel(paste0(data_dir, "/Data/MIMS/Loken_MIMS_SSC_2019-11-21.xlsx"))
# MIMSdata_Run1<-read_excel(paste0(data_dir, "/Data/MIMS/Loken_MIMS_SSC_2019-11-20_v2.xlsx"))
#Rename columns
names(MIMSdata_Run1)[match(c('18', '28', '32', '40', 'N2/Ar', 'O2/Ar'), names(MIMSdata_Run1))]<-c('X18', 'X28', 'X32', 'X40', 'N2.Ar', 'O2.Ar')
# names(MIMSdata_Run2)[match(c('18', '28', '32', '40', 'N2/Ar', 'O2/Ar'), names(MIMSdata_Run2))]<-c('X18', 'X28', 'X32', 'X40', 'N2.Ar', 'O2.Ar')
if ('34' %in% names(MIMSdata_Run1)){
names(MIMSdata_Run1)[match(c('34', 'O2-18/Ar'), names(MIMSdata_Run1))]<-c('X34', 'O2-18.Ar')
}
# if ('34' %in% names(MIMSdata_Run2)){
# names(MIMSdata_Run2)[match(c('34', 'O2-18/Ar'), names(MIMSdata_Run2))]<-c('X34', 'O2-18.Ar')
# }
#Default pressure for sea level
MIMSdata_Run1$Pressure[which(is.na(MIMSdata_Run1$Pressure))] <- 760
# The needed columns for this code include:
# X28, X32, X40, N2.Ar, O2.Ar = columns from the MIMS output. These can come from MIMS_datasheet_mean_example.R too.
# Temp = waterbath or water sample temperature in Celcius
# Pressure = air pressure when sample was collected or when running samples in mmHg (barometric pressure conversions also in MIMS_gas_functions.R)
# Sampletype = datasheet column distinguishing data from calibrating the MIMS and samples (Calibrate/Samp)
# Calibnum = datasheet column where each calibration period has a seperate, sequential number (1, 2, 3, ect. )
# Calibtype = datasheet column distinguishing colder and warmer calibration temperatures (Low/High)
# Useful metadata may include sample ID, treatment type, sample location, time and date of sample collection, ect.
# There is also a comments column for including any irregularities or observations while running the MIMS
###########################################
# Process data
###########################################
MIMS_outdf_Run1 <-MIMScalc(MIMSdata_Run1) #Name the file made in the MIMScalc function anything you would like.
# MIMS_outdf_Run2 <-MIMScalc(MIMSdata_Run2)
# MIMS_outdf_Full<-full_join(MIMS_outdf_Run1, MIMS_outdf_Run2)
MIMS_outdf_Full<-MIMS_outdf_Run1
# write.csv(MIMS_outdf_Full, paste0(data_dir, "/Data/MIMS/Outputs/MIMScalculations_2019-11-20.csv")) #Save the file to your computer or dropbox.
## The new datafile is only data from samples
## You can delete the new first column without a header (it's just a count of the samples).
## The resulting narcalc and O2arcalc columns are the ratios you then use for calculations. Nar and O2Ar are unprocessed numbers.
## The '_calc' are the now calibrated concentration and ratio values.
## The ratios are more accurate than concentrations calculated from the MIMS samples because the machine is better at measureing ratios of masses than individual masses.
# ##################################
# Plotting
# Should probably put this in a different script
# ##################################
MIMS_outdf_Full$SampleID <- gsub('Prosp', 'Pro', MIMS_outdf_Full$SampleID )
sitesIDs<-factor(MIMS_outdf_Full$SampleID)
# png(paste0(data_dir, '/Figures/MIMS/2019_ArbTest_MIMS_Boxplots.png'), width=5, height=8, units='in', res=200)
png(paste0(data_dir, '/Figures/MIMS/2018_SSC_EC6_Deep.png'), width=5, height=8, units='in', res=200)
par(mar=c(2,3.75,0.5,0.5))
par(oma=c(1,0,0,0))
par(mgp=c(3,0.5,0))
par(mfrow=c(4,1))
par(cex.axis=.8)
colors<-c('#1f78b4', '#b2df8a')
boxplot(MIMS_outdf_Full$O2calc~ MIMS_outdf_Full$Date, col=colors[1])
# boxplot(MIMS_outdf_Full$O2satv ~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$O2satv), lty=3)
mtext(expression(paste(O[2], ' (mg L'^'-1', ')')), 2, 1.75)
# legend('topleft', 'Saturation', lty=3, bty='n')
boxplot(MIMS_outdf_Full$O2arcalc~ MIMS_outdf_Full$Date, col=colors[1])
# boxplot(MIMS_outdf_Full$O2arsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$O2arsatv), lty=3)
mtext(expression(paste(O[2], ':Ar', ' (molar ratio)')), 2, 1.75)
boxplot(MIMS_outdf_Full$ncalc~ MIMS_outdf_Full$Date, col=colors[2])
# boxplot(MIMS_outdf_Full$nsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$nsatv), lty=3)
mtext(expression(paste(N[2], ' (mg L'^'-1', ')')), 2, 1.75)
boxplot(MIMS_outdf_Full$narcalc~ MIMS_outdf_Full$Date , col=colors[2])
# boxplot(MIMS_outdf_Full$narsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$narsatv), lty=3)
mtext(expression(paste(N[2], ':Ar', ' (molar ratio)')), 2, 1.75)
mtext('Station ID', 1, 0, outer=T)
dev.off()
# png(paste0(data_dir, '/Figures/MIMS/2018August_MIMS_Boxplots_Conc.png'), width=6, height=4, units='in', res=200)
par(mar=c(2,3.75,0.5,0.5))
par(oma=c(1,0,0,0))
par(mgp=c(3,0.5,0))
par(mfrow=c(2,1))
par(cex.axis=.7)
colors<-c('#1f78b4', '#b2df8a')
boxplot(MIMS_outdf_Full$O2calc~ MIMS_outdf_Full$SampleID, col=colors[1])
# boxplot(MIMS_outdf_Full$O2satv ~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$O2satv ), lty=3)
mtext(expression(paste(O[2], ' (mg L'^'-1', ')')), 2, 1.75)
# legend('topleft', 'Saturation', lty=3, bty='n')
boxplot(MIMS_outdf_Full$ncalc~ MIMS_outdf_Full$SampleID, col=colors[2])
# boxplot(MIMS_outdf_Full$nsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$nsatv), lty=3)
mtext(expression(paste(N[2], ' (mg L'^'-1', ')')), 2, 1.75)
mtext('Station ID', 1, 0, outer=T)
dev.off()
if ('X34' %in% names(MIMS_outdf_Full)){
png(paste0(data_dir, '/Figures/MIMS/2018August_MIMS_Boxplots_O18_withRatios.png'), width=5, height=10, units='in', res=200)
par(mar=c(2,3.75,0.5,0.5))
par(oma=c(1,0,0,0))
par(mgp=c(3,0.5,0))
par(mfrow=c(5,1))
par(cex.axis=.7)
colors<-c('#1f78b4', '#b2df8a', '#a6cee3', '#fb9a99')
boxplot(MIMS_outdf_Full$O2calc~ MIMS_outdf_Full$SampleID, col=colors[1])
# boxplot(MIMS_outdf_Full$O2satv ~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$O2satv ), lty=3)
mtext(expression(paste(O[2], ' (mg L'^'-1', ')')), 2, 1.75)
boxplot(MIMS_outdf_Full$O2arcalc~ MIMS_outdf_Full$SampleID, col=colors[1])
# boxplot(MIMS_outdf_Full$O2arsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$O2arsatv), lty=3)
mtext(expression(paste(O[2], ':Ar', ' (molar ratio)')), 2, 1.75)
boxplot(MIMS_outdf_Full$O18calc~ MIMS_outdf_Full$SampleID, col=colors[3])
# boxplot(MIMS_outdf_Full$O18satv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$O18satv, na.rm=T), lty=3)
mtext(expression(paste(''^'18','O-', O[2], ' (mg L'^'-1', ')')), 2, 1.75)
boxplot(MIMS_outdf_Full$O18arcalc~ MIMS_outdf_Full$SampleID, col=colors[3])
# boxplot(MIMS_outdf_Full$O18arsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$O18arsatv, na.rm=T), lty=3)
mtext(expression(paste(''^'18','O-',O[2], ':Ar', ' (molar ratio)')), 2, 1.75)
boxplot(MIMS_outdf_Full$O18arcalc/MIMS_outdf_Full$O2arcalc*1000~ MIMS_outdf_Full$SampleID, col=colors[4])
# boxplot(MIMS_outdf_Full$O18arsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
# abline(h=mean(MIMS_outdf_Full$O18arsatv, na.rm=T), lty=3)
mtext(expression(paste(''^'18', 'O:', ''^'16','O-',O[2], ' (per mil)')), 2, 1.75)
dev.off()
boxplot((MIMS_outdf_Full$O18calc/MIMS_outdf_Full$O2calc/0.002005-1)*1000~ MIMS_outdf_Full$SampleID, col=colors[4])
# boxplot(MIMS_outdf_Full$O18arsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
# abline(h=mean(MIMS_outdf_Full$O18arsatv, na.rm=T), lty=3)
mtext(expression(paste(delta,''^'18', 'O (per mil)')), 2, 1.75)
}
|
/R/mims_workflow_Nov21_2019.R
|
no_license
|
lukeloken/USBRDelta
|
R
| false
| false
| 9,440
|
r
|
############################################################################################################################################################################
# Processing raw MIMS data to calculate concentrations & ratios
# Original code from Hilary Madinger and Bob Hall
# 16 July 2016
# Updated by Luke Loken
# Aug 2018
# Execute the functions in MIMS_gas_functions.R before this code.
# MIMS_gas_functions.R calculates gas concentrations at saturation, while correcting for water density, temperature, and barometric presssure, which are used in the functions below.
# Using gas saturations, two water bath calibrations (at different temperatures), and the data copied and pasted from the MIMS, this code converts mass signals into concentrations/ratios.
############################################################################################################################################################################
###########################################
# DATA IMPORT AND MANAGEMENT
###########################################
# I have had problems with .csv files removing decimal places for values from the MIMS. One way to fix this is to use the readxl package to read in an excel file instead of a .csv file.
library(readxl)
# library(RcppRoll)
#load gas functions
source('R/mims_gas_functions.R')
#load processing function
source('R/MIMScalc.R')
data_dir<-'C:/Dropbox/USBR Delta Project'
# Call and name the MIMS data.
# MIMSdata_Run1<-read_excel(paste0(data_dir, "/Data/MIMS/MIMS_Run1_2018_08_18.xlsx"))
# MIMSdata_Run2<-read_excel(paste0(data_dir, "/Data/MIMS/MIMS_Run2_2018_08_18.xlsx"))
MIMSdata_Run1<-read_excel(paste0(data_dir, "/Data/MIMS/Loken_MIMS_SSC_2019-11-21.xlsx"))
# MIMSdata_Run1<-read_excel(paste0(data_dir, "/Data/MIMS/Loken_MIMS_SSC_2019-11-20_v2.xlsx"))
#Rename columns
names(MIMSdata_Run1)[match(c('18', '28', '32', '40', 'N2/Ar', 'O2/Ar'), names(MIMSdata_Run1))]<-c('X18', 'X28', 'X32', 'X40', 'N2.Ar', 'O2.Ar')
# names(MIMSdata_Run2)[match(c('18', '28', '32', '40', 'N2/Ar', 'O2/Ar'), names(MIMSdata_Run2))]<-c('X18', 'X28', 'X32', 'X40', 'N2.Ar', 'O2.Ar')
if ('34' %in% names(MIMSdata_Run1)){
names(MIMSdata_Run1)[match(c('34', 'O2-18/Ar'), names(MIMSdata_Run1))]<-c('X34', 'O2-18.Ar')
}
# if ('34' %in% names(MIMSdata_Run2)){
# names(MIMSdata_Run2)[match(c('34', 'O2-18/Ar'), names(MIMSdata_Run2))]<-c('X34', 'O2-18.Ar')
# }
#Default pressure for sea level
MIMSdata_Run1$Pressure[which(is.na(MIMSdata_Run1$Pressure))] <- 760
# The needed columns for this code include:
# X28, X32, X40, N2.Ar, O2.Ar = columns from the MIMS output. These can come from MIMS_datasheet_mean_example.R too.
# Temp = waterbath or water sample temperature in Celcius
# Pressure = air pressure when sample was collected or when running samples in mmHg (barometric pressure conversions also in MIMS_gas_functions.R)
# Sampletype = datasheet column distinguishing data from calibrating the MIMS and samples (Calibrate/Samp)
# Calibnum = datasheet column where each calibration period has a seperate, sequential number (1, 2, 3, ect. )
# Calibtype = datasheet column distinguishing colder and warmer calibration temperatures (Low/High)
# Useful metadata may include sample ID, treatment type, sample location, time and date of sample collection, ect.
# There is also a comments column for including any irregularities or observations while running the MIMS
###########################################
# Process data
###########################################
MIMS_outdf_Run1 <-MIMScalc(MIMSdata_Run1) #Name the file made in the MIMScalc function anything you would like.
# MIMS_outdf_Run2 <-MIMScalc(MIMSdata_Run2)
# MIMS_outdf_Full<-full_join(MIMS_outdf_Run1, MIMS_outdf_Run2)
MIMS_outdf_Full<-MIMS_outdf_Run1
# write.csv(MIMS_outdf_Full, paste0(data_dir, "/Data/MIMS/Outputs/MIMScalculations_2019-11-20.csv")) #Save the file to your computer or dropbox.
## The new datafile is only data from samples
## You can delete the new first column without a header (it's just a count of the samples).
## The resulting narcalc and O2arcalc columns are the ratios you then use for calculations. Nar and O2Ar are unprocessed numbers.
## The '_calc' are the now calibrated concentration and ratio values.
## The ratios are more accurate than concentrations calculated from the MIMS samples because the machine is better at measureing ratios of masses than individual masses.
# ##################################
# Plotting
# Should probably put this in a different script
# ##################################
MIMS_outdf_Full$SampleID <- gsub('Prosp', 'Pro', MIMS_outdf_Full$SampleID )
sitesIDs<-factor(MIMS_outdf_Full$SampleID)
# png(paste0(data_dir, '/Figures/MIMS/2019_ArbTest_MIMS_Boxplots.png'), width=5, height=8, units='in', res=200)
png(paste0(data_dir, '/Figures/MIMS/2018_SSC_EC6_Deep.png'), width=5, height=8, units='in', res=200)
par(mar=c(2,3.75,0.5,0.5))
par(oma=c(1,0,0,0))
par(mgp=c(3,0.5,0))
par(mfrow=c(4,1))
par(cex.axis=.8)
colors<-c('#1f78b4', '#b2df8a')
boxplot(MIMS_outdf_Full$O2calc~ MIMS_outdf_Full$Date, col=colors[1])
# boxplot(MIMS_outdf_Full$O2satv ~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$O2satv), lty=3)
mtext(expression(paste(O[2], ' (mg L'^'-1', ')')), 2, 1.75)
# legend('topleft', 'Saturation', lty=3, bty='n')
boxplot(MIMS_outdf_Full$O2arcalc~ MIMS_outdf_Full$Date, col=colors[1])
# boxplot(MIMS_outdf_Full$O2arsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$O2arsatv), lty=3)
mtext(expression(paste(O[2], ':Ar', ' (molar ratio)')), 2, 1.75)
boxplot(MIMS_outdf_Full$ncalc~ MIMS_outdf_Full$Date, col=colors[2])
# boxplot(MIMS_outdf_Full$nsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$nsatv), lty=3)
mtext(expression(paste(N[2], ' (mg L'^'-1', ')')), 2, 1.75)
boxplot(MIMS_outdf_Full$narcalc~ MIMS_outdf_Full$Date , col=colors[2])
# boxplot(MIMS_outdf_Full$narsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$narsatv), lty=3)
mtext(expression(paste(N[2], ':Ar', ' (molar ratio)')), 2, 1.75)
mtext('Station ID', 1, 0, outer=T)
dev.off()
# png(paste0(data_dir, '/Figures/MIMS/2018August_MIMS_Boxplots_Conc.png'), width=6, height=4, units='in', res=200)
par(mar=c(2,3.75,0.5,0.5))
par(oma=c(1,0,0,0))
par(mgp=c(3,0.5,0))
par(mfrow=c(2,1))
par(cex.axis=.7)
colors<-c('#1f78b4', '#b2df8a')
boxplot(MIMS_outdf_Full$O2calc~ MIMS_outdf_Full$SampleID, col=colors[1])
# boxplot(MIMS_outdf_Full$O2satv ~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$O2satv ), lty=3)
mtext(expression(paste(O[2], ' (mg L'^'-1', ')')), 2, 1.75)
# legend('topleft', 'Saturation', lty=3, bty='n')
boxplot(MIMS_outdf_Full$ncalc~ MIMS_outdf_Full$SampleID, col=colors[2])
# boxplot(MIMS_outdf_Full$nsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$nsatv), lty=3)
mtext(expression(paste(N[2], ' (mg L'^'-1', ')')), 2, 1.75)
mtext('Station ID', 1, 0, outer=T)
dev.off()
if ('X34' %in% names(MIMS_outdf_Full)){
png(paste0(data_dir, '/Figures/MIMS/2018August_MIMS_Boxplots_O18_withRatios.png'), width=5, height=10, units='in', res=200)
par(mar=c(2,3.75,0.5,0.5))
par(oma=c(1,0,0,0))
par(mgp=c(3,0.5,0))
par(mfrow=c(5,1))
par(cex.axis=.7)
colors<-c('#1f78b4', '#b2df8a', '#a6cee3', '#fb9a99')
boxplot(MIMS_outdf_Full$O2calc~ MIMS_outdf_Full$SampleID, col=colors[1])
# boxplot(MIMS_outdf_Full$O2satv ~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$O2satv ), lty=3)
mtext(expression(paste(O[2], ' (mg L'^'-1', ')')), 2, 1.75)
boxplot(MIMS_outdf_Full$O2arcalc~ MIMS_outdf_Full$SampleID, col=colors[1])
# boxplot(MIMS_outdf_Full$O2arsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$O2arsatv), lty=3)
mtext(expression(paste(O[2], ':Ar', ' (molar ratio)')), 2, 1.75)
boxplot(MIMS_outdf_Full$O18calc~ MIMS_outdf_Full$SampleID, col=colors[3])
# boxplot(MIMS_outdf_Full$O18satv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$O18satv, na.rm=T), lty=3)
mtext(expression(paste(''^'18','O-', O[2], ' (mg L'^'-1', ')')), 2, 1.75)
boxplot(MIMS_outdf_Full$O18arcalc~ MIMS_outdf_Full$SampleID, col=colors[3])
# boxplot(MIMS_outdf_Full$O18arsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
abline(h=mean(MIMS_outdf_Full$O18arsatv, na.rm=T), lty=3)
mtext(expression(paste(''^'18','O-',O[2], ':Ar', ' (molar ratio)')), 2, 1.75)
boxplot(MIMS_outdf_Full$O18arcalc/MIMS_outdf_Full$O2arcalc*1000~ MIMS_outdf_Full$SampleID, col=colors[4])
# boxplot(MIMS_outdf_Full$O18arsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
# abline(h=mean(MIMS_outdf_Full$O18arsatv, na.rm=T), lty=3)
mtext(expression(paste(''^'18', 'O:', ''^'16','O-',O[2], ' (per mil)')), 2, 1.75)
dev.off()
boxplot((MIMS_outdf_Full$O18calc/MIMS_outdf_Full$O2calc/0.002005-1)*1000~ MIMS_outdf_Full$SampleID, col=colors[4])
# boxplot(MIMS_outdf_Full$O18arsatv~ MIMS_outdf_Full$SampleID, add=T, border='blue', boxwex=0.3, lwd=3)
# abline(h=mean(MIMS_outdf_Full$O18arsatv, na.rm=T), lty=3)
mtext(expression(paste(delta,''^'18', 'O (per mil)')), 2, 1.75)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_SnpMatrix_from_vcf.R
\name{get_SnpMatrix_from_vcf}
\alias{get_SnpMatrix_from_vcf}
\title{Download 1000 Genomes vcf chunks and convert them to SnpMatrix objects}
\usage{
get_SnpMatrix_from_vcf(where, which = "CEU", vcf_file, panel_file)
}
\arguments{
\item{where}{GRanges object specifying chromosome, start and end position of the target region.}
\item{which}{Target population(s). Something like "CEU" or c("CEU", "CHB").}
\item{vcf_file}{local file or url to vcf file.}
\item{panel_file}{local file or url to panel file.}
}
\value{
Object of class SnpMatrix.
}
\description{
Download 1000 Genomes vcf chunks and convert them to SnpMatrix objects
}
\examples{
COMPLETE EXAMPLES
}
|
/man/get_SnpMatrix_from_vcf.Rd
|
no_license
|
joe-nas/haploplotR
|
R
| false
| true
| 766
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_SnpMatrix_from_vcf.R
\name{get_SnpMatrix_from_vcf}
\alias{get_SnpMatrix_from_vcf}
\title{Download 1000 Genomes vcf chunks and convert them to SnpMatrix objects}
\usage{
get_SnpMatrix_from_vcf(where, which = "CEU", vcf_file, panel_file)
}
\arguments{
\item{where}{GRanges object specifying chromosome, start and end position of the target region.}
\item{which}{Target population(s). Something like "CEU" or c("CEU", "CHB").}
\item{vcf_file}{local file or url to vcf file.}
\item{panel_file}{local file or url to panel file.}
}
\value{
Object of class SnpMatrix.
}
\description{
Download 1000 Genomes vcf chunks and convert them to SnpMatrix objects
}
\examples{
COMPLETE EXAMPLES
}
|
#' @include FLMatrix.R
NULL
#' An S4 class to represent LU Decomposition
#' @slot x object of class FLVector
#' @slot perm object of class FLVector
#' @slot Dim object of class FLVector
#' @slot lower object of class FLMatrix
#' @slot upper object of class FLMatrix
#' @slot data_perm object of class FLMatrix
#' @export
setClass(
"FLLU",
slots=list(
x="FLVector",
perm="FLVector",
Dim="vector",
lower="FLMatrix",
upper="FLMatrix",
data_perm="FLMatrix",
Dimnames="list"
)
)
#' LU Decomposition.
#'
#' The LU decomposition involves factorizing a matrix as the product of a lower
#' triangular matrix L and an upper triangular matrix U. Permutation matrix is also provided in the output.
#' If permutation matrix is not used in the decomposition, the output of permutation matrix is an identity matrix.
#'
#' \code{lu} replicates the equivalent lu() generic function.\cr
#' \code{expand} decomposes the compact form to a list of matrix factors.\cr
#' The expand method returns L,U and P factors as a list of FLMatrices.\cr
#'
#' The decomposition is of the form A = P L U where typically all matrices are of size (n x n),
#' and the matrix P is a permutation matrix, L is lower triangular and U is upper triangular.
#' @method lu FLMatrix
#' @param object is of class FLMatrix
#' @param ... any additional arguments
#' @section Constraints:
#' Input can only be with maximum dimension limitations
#' of (1000 x 1000).
#' @return
#' \item{x}{the FLVector form of "L" (unit lower triangular) and "U" (upper triangular) factors of the original matrix}
#' \item{perm}{FLVector that describes the permutation applied to the rows of the original matrix}
#' \item{Dim}{FLVector that gives the dimension of the original matrix}
#' \item{lower}{FLMatrix representing the lower triangular matrix}
#' \item{upper}{FLMatrix representing the upper triangular matrix}
#' \item{data_perm}{FLMatrix representing the permutation matrix}
#' @examples
#' flmatrix <- FLMatrix("tblMatrixMulti", 5,"MATRIX_ID","ROW_ID","COL_ID","CELL_VAL")
#' FLLUobject <- lu(flmatrix)
#' listresult <- expand(FLLUobject)
#' listresult$L
#' listresult$U
#' listresult$P
#' @export
setGeneric("lu", function(object,...) {
standardGeneric("lu")
})
setMethod("lu", signature(object = "matrix"),
function(object,...)
Matrix::lu(object,...))
setMethod("lu", signature(object = "dgeMatrix"),
function(object,...)
Matrix::lu(object,...))
setMethod("lu", signature(object = "dgCMatrix"),
function(object,...)
Matrix::lu(object,...))
setMethod("lu", signature(object = "FLMatrix"),
function(object,...)
lu.FLMatrix(object,...))
# #' @export
# lu<-function(object, ...){
# UseMethod("lu",object)
# }
#' @export
lu.default <- Matrix::lu
#' @export
lu.FLMatrix<-function(object,...)
{
connection<-getFLConnection()
## flag3Check(connection)
## flag1Check(connection)
MID1 <- getMaxMatrixId(connection)
# sqlstr <- paste0(
# viewSelectMatrix(object, "a","z"),
# outputSelectMatrix("FLLUDecompUdt",viewName="z",localName="a",
# outColNames=list("OutputMatrixID","OutputRowNum",
# "OutputColNum","OutputValL","OutputValU","OutputPermut"),
# whereClause="")
# )
sqlstr <- constructMatrixUDTSQL(pObject=object,
pFuncName="FLLUDecompUdt",
pdims=getDimsSlot(object),
pdimnames=dimnames(object),
pReturnQuery=TRUE
)
sqlstr <- gsub("'%insertIDhere%'",MID1,sqlstr)
sqlstr <- ensureQuerySize(pResult=sqlstr,
pInput=list(object),
pOperator="lu")
tempResultTable <- createTable(pTableName=gen_unique_table_name("LU"),
pSelect=sqlstr)
# calculating LU matrix
sqlstrLU <-paste0(" SELECT ",MID1," AS MATRIX_ID, \n ",
"OutputRowNum AS rowIdColumn, \n ",
"OutputColNum AS colIdColumn, \n ",
"CAST(OutputValL AS FLOAT) AS valueColumn \n ",
" FROM ",tempResultTable,
" WHERE OutputRowNum > OutputColNum \n ",
" AND OutputValL IS NOT NULL \n ",
" UNION ALL \n ",
" SELECT ",MID1," AS MATRIX_ID, \n ",
" OutputRowNum AS rowIdColumn, \n ",
" OutputColNum AS colIdColumn, \n ",
" CAST(OutputValU AS FLOAT) AS valueColumn \n ",
" FROM ",tempResultTable,
" WHERE OutputRowNum <= OutputColNum \n ",
" AND OutputValU IS NOT NULL ")
##@Phani: insert into select with union all not working in hadoop
vLUTable <- createTable(pTableName=gen_unique_table_name("LU"),
pSelect=sqlstrLU)
flm <- FLMatrix(
connection = getFLConnection(object),
table_name = vLUTable,
matrix_id_value = MID1,
matrix_id_colname = "MATRIX_ID",
row_id_colname = "rowIdColumn",
col_id_colname = "colIdColumn",
cell_val_colname = "valueColumn",
dims=dim(object),
dimnames=dimnames(object),
type=typeof(object)
)
# tblfunqueryobj <- new("FLTableFunctionQuery",
# connectionName = attr(connection,"name"),
# variables=list(
# rowIdColumn="rowIdColumn",
# colIdColumn="colIdColumn",
# valueColumn="valueColumn"),
# whereconditions="",
# order = "",
# SQLquery=sqlstrLU)
# flm <- newFLMatrix(
# select= tblfunqueryobj,
# dims=dim(object),
# Dimnames=dimnames(object))
# LUMatrix <- store(object=flm)
LUMatrix <- flm
# calculating Permutation FLMatrix
data_perm <- FLMatrix(connection = connection,
table_name = tempResultTable,
matrix_id_value = "",
matrix_id_colname = "",
row_id_colname = "OutputRowNum",
col_id_colname = "OutputColNum",
cell_val_colname = "OutputPermut",
whereconditions=paste0("mtrx.OutputPermut IS NOT NULL "))
# calculating l FLmatrix
l<-FLMatrix(connection = connection,
table_name = tempResultTable,
matrix_id_value = "",
matrix_id_colname = "",
row_id_colname = "OutputRowNum",
col_id_colname = "OutputColNum",
cell_val_colname = "OutputValL",
whereconditions=paste0("mtrx.OutputValL IS NOT NULL "))
# calculating U FLmatrix
u<-FLMatrix(connection = connection,
table_name = tempResultTable,
matrix_id_value = "",
matrix_id_colname = "",
row_id_colname = "OutputRowNum",
col_id_colname = "OutputColNum",
cell_val_colname = "OutputValU",
whereconditions=paste0("mtrx.OutputValU IS NOT NULL "))
# calculating perm FLVector
table <- FLTable(tempResultTable,
"OutputColNum",
whereconditions=paste0(tempResultTable,".OutputPermut = 1 ")
)
perm <- table[,"OutputRowNum"]
# calculating x FLVector
VID2 <- getMaxVectorId(connection)
sqlstrX <-paste0("SELECT ",VID2," AS vectorIdColumn",
",ROW_NUMBER() OVER(ORDER BY ",
getVariables(LUMatrix)$colId,",",
getVariables(LUMatrix)$rowId,") AS vectorIndexColumn
, ",getVariables(LUMatrix)$value," AS vectorValueColumn
FROM ",tableAndAlias(LUMatrix),
constructWhere(constraintsSQL(LUMatrix)))
tblfunqueryobj <- new("FLTableFunctionQuery",
connectionName = attr(connection,"name"),
variables = list(
obs_id_colname = "vectorIndexColumn",
cell_val_colname = "vectorValueColumn"),
whereconditions="",
order = "",
SQLquery=sqlstrX)
flv <- newFLVector(
select = tblfunqueryobj,
Dimnames = list(1:length(LUMatrix),
"vectorValueColumn"),
isDeep = FALSE)
# x <- store(object=flv)
x <- flv
# calculating Dim vector
Dim<- dim(data_perm)
a<-new("FLLU",
x=x,
perm=perm,
Dim=Dim,
lower=l,
upper=u,
data_perm = data_perm,
Dimnames=dimnames(object)
)
class(a)<-"FLLU"
#sqlSendUpdate(connection,paste0(" DROP TABLE ",getRemoteTableName(getOption("ResultDatabaseFL"),tempResultTable)))
return(a)
}
#' @export
print.FLLU<-function(object){
note1<-length(object@x)
note2<-length(object@perm)
note3<-length(object@Dim)
cat("'Matrix Factorization' of Formal class 'denseLU' [package Matrix] with 3 slots\n") #"Matrix"
cat("..@x : num[1:",note1,"]")
print(object@x)
cat("..@perm : int[1:",note2,"]")
print(object@perm)
cat("..@Dim : int[1:",note3,"]")
print(object@Dim)
}
#' @export
setMethod("show","FLLU",print.FLLU)
#' @export
expand<-function(object, ...){
UseMethod("expand",object)
}
#' @export
expand.default <- Matrix::expand
#' @export
expand.FLLU <- function(object,...)
{
return(list(L=object@lower,
U=object@upper,
P=object@data_perm))
}
#' @export
`$.FLLU`<-function(object,property){
if(property=="L"){
object@lower
}
else if(property=="U"){
object@upper
}
else if(property=="P"){
object@data_perm
}
else "That's not a valid property"
}
|
/R/FLLUDecomp.R
|
no_license
|
richa3522/AdapteR
|
R
| false
| false
| 9,568
|
r
|
#' @include FLMatrix.R
NULL
#' An S4 class to represent LU Decomposition
#' @slot x object of class FLVector
#' @slot perm object of class FLVector
#' @slot Dim object of class FLVector
#' @slot lower object of class FLMatrix
#' @slot upper object of class FLMatrix
#' @slot data_perm object of class FLMatrix
#' @export
setClass(
"FLLU",
slots=list(
x="FLVector",
perm="FLVector",
Dim="vector",
lower="FLMatrix",
upper="FLMatrix",
data_perm="FLMatrix",
Dimnames="list"
)
)
#' LU Decomposition.
#'
#' The LU decomposition involves factorizing a matrix as the product of a lower
#' triangular matrix L and an upper triangular matrix U. Permutation matrix is also provided in the output.
#' If permutation matrix is not used in the decomposition, the output of permutation matrix is an identity matrix.
#'
#' \code{lu} replicates the equivalent lu() generic function.\cr
#' \code{expand} decomposes the compact form to a list of matrix factors.\cr
#' The expand method returns L,U and P factors as a list of FLMatrices.\cr
#'
#' The decomposition is of the form A = P L U where typically all matrices are of size (n x n),
#' and the matrix P is a permutation matrix, L is lower triangular and U is upper triangular.
#' @method lu FLMatrix
#' @param object is of class FLMatrix
#' @param ... any additional arguments
#' @section Constraints:
#' Input can only be with maximum dimension limitations
#' of (1000 x 1000).
#' @return
#' \item{x}{the FLVector form of "L" (unit lower triangular) and "U" (upper triangular) factors of the original matrix}
#' \item{perm}{FLVector that describes the permutation applied to the rows of the original matrix}
#' \item{Dim}{FLVector that gives the dimension of the original matrix}
#' \item{lower}{FLMatrix representing the lower triangular matrix}
#' \item{upper}{FLMatrix representing the upper triangular matrix}
#' \item{data_perm}{FLMatrix representing the permutation matrix}
#' @examples
#' flmatrix <- FLMatrix("tblMatrixMulti", 5,"MATRIX_ID","ROW_ID","COL_ID","CELL_VAL")
#' FLLUobject <- lu(flmatrix)
#' listresult <- expand(FLLUobject)
#' listresult$L
#' listresult$U
#' listresult$P
#' @export
setGeneric("lu", function(object,...) {
standardGeneric("lu")
})
setMethod("lu", signature(object = "matrix"),
function(object,...)
Matrix::lu(object,...))
setMethod("lu", signature(object = "dgeMatrix"),
function(object,...)
Matrix::lu(object,...))
setMethod("lu", signature(object = "dgCMatrix"),
function(object,...)
Matrix::lu(object,...))
setMethod("lu", signature(object = "FLMatrix"),
function(object,...)
lu.FLMatrix(object,...))
# #' @export
# lu<-function(object, ...){
# UseMethod("lu",object)
# }
#' @export
lu.default <- Matrix::lu
#' @export
lu.FLMatrix<-function(object,...)
{
connection<-getFLConnection()
## flag3Check(connection)
## flag1Check(connection)
MID1 <- getMaxMatrixId(connection)
# sqlstr <- paste0(
# viewSelectMatrix(object, "a","z"),
# outputSelectMatrix("FLLUDecompUdt",viewName="z",localName="a",
# outColNames=list("OutputMatrixID","OutputRowNum",
# "OutputColNum","OutputValL","OutputValU","OutputPermut"),
# whereClause="")
# )
sqlstr <- constructMatrixUDTSQL(pObject=object,
pFuncName="FLLUDecompUdt",
pdims=getDimsSlot(object),
pdimnames=dimnames(object),
pReturnQuery=TRUE
)
sqlstr <- gsub("'%insertIDhere%'",MID1,sqlstr)
sqlstr <- ensureQuerySize(pResult=sqlstr,
pInput=list(object),
pOperator="lu")
tempResultTable <- createTable(pTableName=gen_unique_table_name("LU"),
pSelect=sqlstr)
# calculating LU matrix
sqlstrLU <-paste0(" SELECT ",MID1," AS MATRIX_ID, \n ",
"OutputRowNum AS rowIdColumn, \n ",
"OutputColNum AS colIdColumn, \n ",
"CAST(OutputValL AS FLOAT) AS valueColumn \n ",
" FROM ",tempResultTable,
" WHERE OutputRowNum > OutputColNum \n ",
" AND OutputValL IS NOT NULL \n ",
" UNION ALL \n ",
" SELECT ",MID1," AS MATRIX_ID, \n ",
" OutputRowNum AS rowIdColumn, \n ",
" OutputColNum AS colIdColumn, \n ",
" CAST(OutputValU AS FLOAT) AS valueColumn \n ",
" FROM ",tempResultTable,
" WHERE OutputRowNum <= OutputColNum \n ",
" AND OutputValU IS NOT NULL ")
##@Phani: insert into select with union all not working in hadoop
vLUTable <- createTable(pTableName=gen_unique_table_name("LU"),
pSelect=sqlstrLU)
flm <- FLMatrix(
connection = getFLConnection(object),
table_name = vLUTable,
matrix_id_value = MID1,
matrix_id_colname = "MATRIX_ID",
row_id_colname = "rowIdColumn",
col_id_colname = "colIdColumn",
cell_val_colname = "valueColumn",
dims=dim(object),
dimnames=dimnames(object),
type=typeof(object)
)
# tblfunqueryobj <- new("FLTableFunctionQuery",
# connectionName = attr(connection,"name"),
# variables=list(
# rowIdColumn="rowIdColumn",
# colIdColumn="colIdColumn",
# valueColumn="valueColumn"),
# whereconditions="",
# order = "",
# SQLquery=sqlstrLU)
# flm <- newFLMatrix(
# select= tblfunqueryobj,
# dims=dim(object),
# Dimnames=dimnames(object))
# LUMatrix <- store(object=flm)
LUMatrix <- flm
# calculating Permutation FLMatrix
data_perm <- FLMatrix(connection = connection,
table_name = tempResultTable,
matrix_id_value = "",
matrix_id_colname = "",
row_id_colname = "OutputRowNum",
col_id_colname = "OutputColNum",
cell_val_colname = "OutputPermut",
whereconditions=paste0("mtrx.OutputPermut IS NOT NULL "))
# calculating l FLmatrix
l<-FLMatrix(connection = connection,
table_name = tempResultTable,
matrix_id_value = "",
matrix_id_colname = "",
row_id_colname = "OutputRowNum",
col_id_colname = "OutputColNum",
cell_val_colname = "OutputValL",
whereconditions=paste0("mtrx.OutputValL IS NOT NULL "))
# calculating U FLmatrix
u<-FLMatrix(connection = connection,
table_name = tempResultTable,
matrix_id_value = "",
matrix_id_colname = "",
row_id_colname = "OutputRowNum",
col_id_colname = "OutputColNum",
cell_val_colname = "OutputValU",
whereconditions=paste0("mtrx.OutputValU IS NOT NULL "))
# calculating perm FLVector
table <- FLTable(tempResultTable,
"OutputColNum",
whereconditions=paste0(tempResultTable,".OutputPermut = 1 ")
)
perm <- table[,"OutputRowNum"]
# calculating x FLVector
VID2 <- getMaxVectorId(connection)
sqlstrX <-paste0("SELECT ",VID2," AS vectorIdColumn",
",ROW_NUMBER() OVER(ORDER BY ",
getVariables(LUMatrix)$colId,",",
getVariables(LUMatrix)$rowId,") AS vectorIndexColumn
, ",getVariables(LUMatrix)$value," AS vectorValueColumn
FROM ",tableAndAlias(LUMatrix),
constructWhere(constraintsSQL(LUMatrix)))
tblfunqueryobj <- new("FLTableFunctionQuery",
connectionName = attr(connection,"name"),
variables = list(
obs_id_colname = "vectorIndexColumn",
cell_val_colname = "vectorValueColumn"),
whereconditions="",
order = "",
SQLquery=sqlstrX)
flv <- newFLVector(
select = tblfunqueryobj,
Dimnames = list(1:length(LUMatrix),
"vectorValueColumn"),
isDeep = FALSE)
# x <- store(object=flv)
x <- flv
# calculating Dim vector
Dim<- dim(data_perm)
a<-new("FLLU",
x=x,
perm=perm,
Dim=Dim,
lower=l,
upper=u,
data_perm = data_perm,
Dimnames=dimnames(object)
)
class(a)<-"FLLU"
#sqlSendUpdate(connection,paste0(" DROP TABLE ",getRemoteTableName(getOption("ResultDatabaseFL"),tempResultTable)))
return(a)
}
#' @export
print.FLLU<-function(object){
note1<-length(object@x)
note2<-length(object@perm)
note3<-length(object@Dim)
cat("'Matrix Factorization' of Formal class 'denseLU' [package Matrix] with 3 slots\n") #"Matrix"
cat("..@x : num[1:",note1,"]")
print(object@x)
cat("..@perm : int[1:",note2,"]")
print(object@perm)
cat("..@Dim : int[1:",note3,"]")
print(object@Dim)
}
#' @export
setMethod("show","FLLU",print.FLLU)
#' @export
expand<-function(object, ...){
UseMethod("expand",object)
}
#' @export
expand.default <- Matrix::expand
#' @export
expand.FLLU <- function(object,...)
{
return(list(L=object@lower,
U=object@upper,
P=object@data_perm))
}
#' @export
`$.FLLU`<-function(object,property){
if(property=="L"){
object@lower
}
else if(property=="U"){
object@upper
}
else if(property=="P"){
object@data_perm
}
else "That's not a valid property"
}
|
library(Julia)
### Name: JuliaIterate
### Title: JuliaIterate
### Aliases: JuliaIterate
### ** Examples
z<-0+0i
C <- 1-1.6180339887;# Golden Ratio
it<- JuliaIterate(z,C)
|
/data/genthat_extracted_code/Julia/examples/JuliaIterate.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 184
|
r
|
library(Julia)
### Name: JuliaIterate
### Title: JuliaIterate
### Aliases: JuliaIterate
### ** Examples
z<-0+0i
C <- 1-1.6180339887;# Golden Ratio
it<- JuliaIterate(z,C)
|
AccidentsByDistricts <- aggregate(infodata["סהכ תאונות דרכים"], by=list(infodata$מחוז), FUN=sum)
#Summing seekers by district.
colnames(AccidentsByDistricts) <- c("מחוז", "סהכ תאונות דרכים")
#Sorting by percentage
AccidentsByDistricts <- AccidentsByDistricts[order(AccidentsByDistricts["סהכ תאונות דרכים"], decreasing = TRUE),]
AccidentsByDistricts$מחוז <- factor(AccidentsByDistricts$מחוז, levels = AccidentsByDistricts$מחוז)
#Plot
ggplot(AccidentsByDistricts, aes(y = unlist(AccidentsByDistricts["סהכ תאונות דרכים"]), x = unlist(AccidentsByDistricts$מחוז))) +
geom_bar(position = 'dodge', stat="identity", width=0.8, fill="Red") +
geom_text(aes(label=paste0(unlist(AccidentsByDistricts["סהכ תאונות דרכים"]))), position=position_dodge(width=0.1), vjust=-0.25) +
labs(title = "תאונות דרכים לפי מחוז" , x = "" , y = "מספר תאונות הדרכים") +
theme(axis.text.x = element_text(size=11, angle=0, vjust=1), plot.title = element_text(hjust = 0.5))
NABD <- aggregate(infodata["סהכ תאונות דרכים"]/infodata["מס' תושבים"], by=list(infodata$מחוז), FUN=sum)
#Summing seekers by district.
colnames(NABD) <- c("מחוז", "סך תאונות דרכים מנורמל למס' תושבים")
#Sorting by percentage
NABD <- NABD[order(NABD["סך תאונות דרכים מנורמל למס' תושבים"], decreasing = TRUE),]
NABD$מחוז <- factor(NABD$מחוז, levels = NABD$מחוז)
#Plot
ggplot(NABD, aes(y = unlist(NABD["סך תאונות דרכים מנורמל למס' תושבים"]), x = unlist(NABD$מחוז))) +
geom_bar(position = 'dodge', stat="identity", width=0.8, fill="301934") +
geom_text(aes(label=paste0(round(unlist(NABD["סך תאונות דרכים מנורמל למס' תושבים"]),2),"%")), position=position_dodge(width=0.1), vjust=-0.25) +
labs(title = "שיעור תאונות הדרכים ביחס לאוכלוסיה לפי מחוז" , x = "" , y = "תאונות הדרכים %") +
theme(axis.text.x = element_text(size=11, angle=0, vjust=1), plot.title = element_text(hjust = 0.5)) +
ylim(0, 2.5)
|
/AccidentByDistrict.R
|
no_license
|
daniel1kakon/Accidents
|
R
| false
| false
| 2,225
|
r
|
AccidentsByDistricts <- aggregate(infodata["סהכ תאונות דרכים"], by=list(infodata$מחוז), FUN=sum)
#Summing seekers by district.
colnames(AccidentsByDistricts) <- c("מחוז", "סהכ תאונות דרכים")
#Sorting by percentage
AccidentsByDistricts <- AccidentsByDistricts[order(AccidentsByDistricts["סהכ תאונות דרכים"], decreasing = TRUE),]
AccidentsByDistricts$מחוז <- factor(AccidentsByDistricts$מחוז, levels = AccidentsByDistricts$מחוז)
#Plot
ggplot(AccidentsByDistricts, aes(y = unlist(AccidentsByDistricts["סהכ תאונות דרכים"]), x = unlist(AccidentsByDistricts$מחוז))) +
geom_bar(position = 'dodge', stat="identity", width=0.8, fill="Red") +
geom_text(aes(label=paste0(unlist(AccidentsByDistricts["סהכ תאונות דרכים"]))), position=position_dodge(width=0.1), vjust=-0.25) +
labs(title = "תאונות דרכים לפי מחוז" , x = "" , y = "מספר תאונות הדרכים") +
theme(axis.text.x = element_text(size=11, angle=0, vjust=1), plot.title = element_text(hjust = 0.5))
NABD <- aggregate(infodata["סהכ תאונות דרכים"]/infodata["מס' תושבים"], by=list(infodata$מחוז), FUN=sum)
#Summing seekers by district.
colnames(NABD) <- c("מחוז", "סך תאונות דרכים מנורמל למס' תושבים")
#Sorting by percentage
NABD <- NABD[order(NABD["סך תאונות דרכים מנורמל למס' תושבים"], decreasing = TRUE),]
NABD$מחוז <- factor(NABD$מחוז, levels = NABD$מחוז)
#Plot
ggplot(NABD, aes(y = unlist(NABD["סך תאונות דרכים מנורמל למס' תושבים"]), x = unlist(NABD$מחוז))) +
geom_bar(position = 'dodge', stat="identity", width=0.8, fill="301934") +
geom_text(aes(label=paste0(round(unlist(NABD["סך תאונות דרכים מנורמל למס' תושבים"]),2),"%")), position=position_dodge(width=0.1), vjust=-0.25) +
labs(title = "שיעור תאונות הדרכים ביחס לאוכלוסיה לפי מחוז" , x = "" , y = "תאונות הדרכים %") +
theme(axis.text.x = element_text(size=11, angle=0, vjust=1), plot.title = element_text(hjust = 0.5)) +
ylim(0, 2.5)
|
#' Do the columns contain R Date objects?
#'
#' Set a verification step where a table column is expected to consist entirely
#' of R \code{Date} objects.
#' @inheritParams col_vals_gt
#' @param column the name of a single table column, multiple columns in the same
#' table, or, a helper function such as \code{\link{all_cols}()}.
#' @return an agent object.
#' @examples
#' # Create a simple data frame
#' # with a column containing data
#' # classed as `Date`
#' df <-
#' data.frame(
#' a = as.Date("2017-08-15"))
#'
#' # Validate that column `a`
#' # in the data frame is classed
#' # as `Date`
#' agent <-
#' create_agent() %>%
#' focus_on(tbl_name = "df") %>%
#' col_is_date(
#' column = a) %>%
#' interrogate()
#'
#' # Determine if these column
#' # validations have all passed
#' # by using `all_passed()`
#' all_passed(agent)
#' @importFrom dplyr bind_rows tibble
#' @importFrom rlang enquo expr_text
#' @importFrom stringr str_replace_all
#' @export
col_is_date <- function(...,
column,
brief = NULL,
warn_count = NULL,
notify_count = NULL,
warn_fraction = NULL,
notify_fraction = NULL,
tbl_name = NULL,
db_type = NULL,
creds_file = NULL,
initial_sql = NULL,
file_path = NULL,
col_types = NULL) {
# Collect the object provided
object <- list(...)
# Get the column name
column <-
rlang::enquo(column) %>%
rlang::expr_text() %>%
stringr::str_replace_all("~", "") %>%
stringr::str_replace_all("\"", "'")
if (inherits(object[[1]] , c("data.frame", "tbl_df", "tbl_dbi"))) {
return(
object[[1]] %>%
evaluate_single(
type = "col_is_date",
column = column,
value = value,
warn_count = warn_count,
notify_count = notify_count,
warn_fraction = warn_fraction,
notify_fraction = notify_fraction)
)
}
agent <- object[[1]]
preconditions <- NULL
if (is.null(brief)) {
brief <-
create_autobrief(
agent = agent,
assertion_type = "col_is_date",
column = column)
}
# If "*" is provided for `column`, select all
# table columns for this verification
if (column[1] == "all_cols()") {
column <- get_all_cols(agent = agent)
}
# Add one or more validation steps
agent <-
create_validation_step(
agent = agent,
assertion_type = "col_is_date",
column = column,
preconditions = preconditions,
brief = brief,
warn_count = warn_count,
notify_count = notify_count,
warn_fraction = warn_fraction,
notify_fraction = notify_fraction,
tbl_name = ifelse(is.null(tbl_name), as.character(NA), tbl_name),
db_type = ifelse(is.null(db_type), as.character(NA), db_type),
creds_file = ifelse(is.null(creds_file), as.character(NA), creds_file),
init_sql = ifelse(is.null(initial_sql), as.character(NA), initial_sql),
file_path = ifelse(is.null(file_path), as.character(NA), file_path),
col_types = ifelse(is.null(col_types), as.character(NA), col_types))
# If no `brief` provided, set as NA
if (is.null(brief)) {
brief <- as.character(NA)
}
# Place the validation step in the logical plan
agent$logical_plan <-
dplyr::bind_rows(
agent$logical_plan,
dplyr::tibble(
component_name = "col_is_date",
parameters = as.character(NA),
brief = brief))
agent
}
|
/R/col_is_date.R
|
permissive
|
elong0527/pointblank
|
R
| false
| false
| 3,699
|
r
|
#' Do the columns contain R Date objects?
#'
#' Set a verification step where a table column is expected to consist entirely
#' of R \code{Date} objects.
#' @inheritParams col_vals_gt
#' @param column the name of a single table column, multiple columns in the same
#' table, or, a helper function such as \code{\link{all_cols}()}.
#' @return an agent object.
#' @examples
#' # Create a simple data frame
#' # with a column containing data
#' # classed as `Date`
#' df <-
#' data.frame(
#' a = as.Date("2017-08-15"))
#'
#' # Validate that column `a`
#' # in the data frame is classed
#' # as `Date`
#' agent <-
#' create_agent() %>%
#' focus_on(tbl_name = "df") %>%
#' col_is_date(
#' column = a) %>%
#' interrogate()
#'
#' # Determine if these column
#' # validations have all passed
#' # by using `all_passed()`
#' all_passed(agent)
#' @importFrom dplyr bind_rows tibble
#' @importFrom rlang enquo expr_text
#' @importFrom stringr str_replace_all
#' @export
col_is_date <- function(...,
column,
brief = NULL,
warn_count = NULL,
notify_count = NULL,
warn_fraction = NULL,
notify_fraction = NULL,
tbl_name = NULL,
db_type = NULL,
creds_file = NULL,
initial_sql = NULL,
file_path = NULL,
col_types = NULL) {
# Collect the object provided
object <- list(...)
# Get the column name
column <-
rlang::enquo(column) %>%
rlang::expr_text() %>%
stringr::str_replace_all("~", "") %>%
stringr::str_replace_all("\"", "'")
if (inherits(object[[1]] , c("data.frame", "tbl_df", "tbl_dbi"))) {
return(
object[[1]] %>%
evaluate_single(
type = "col_is_date",
column = column,
value = value,
warn_count = warn_count,
notify_count = notify_count,
warn_fraction = warn_fraction,
notify_fraction = notify_fraction)
)
}
agent <- object[[1]]
preconditions <- NULL
if (is.null(brief)) {
brief <-
create_autobrief(
agent = agent,
assertion_type = "col_is_date",
column = column)
}
# If "*" is provided for `column`, select all
# table columns for this verification
if (column[1] == "all_cols()") {
column <- get_all_cols(agent = agent)
}
# Add one or more validation steps
agent <-
create_validation_step(
agent = agent,
assertion_type = "col_is_date",
column = column,
preconditions = preconditions,
brief = brief,
warn_count = warn_count,
notify_count = notify_count,
warn_fraction = warn_fraction,
notify_fraction = notify_fraction,
tbl_name = ifelse(is.null(tbl_name), as.character(NA), tbl_name),
db_type = ifelse(is.null(db_type), as.character(NA), db_type),
creds_file = ifelse(is.null(creds_file), as.character(NA), creds_file),
init_sql = ifelse(is.null(initial_sql), as.character(NA), initial_sql),
file_path = ifelse(is.null(file_path), as.character(NA), file_path),
col_types = ifelse(is.null(col_types), as.character(NA), col_types))
# If no `brief` provided, set as NA
if (is.null(brief)) {
brief <- as.character(NA)
}
# Place the validation step in the logical plan
agent$logical_plan <-
dplyr::bind_rows(
agent$logical_plan,
dplyr::tibble(
component_name = "col_is_date",
parameters = as.character(NA),
brief = brief))
agent
}
|
library(ggplot2)
library(mapproj)
mrp.prediction <- readRDS("Data/mrp_pred.rds")
mrp.prediction.map <- map_data("state")
mrp.prediction.map$opinion <-
mrp.prediction$opinion[match(mrp.prediction.map$region, tolower(mrp.prediction$state.abb))]
ggplot(mrp.prediction.map, aes(long, lat)) +
geom_polygon(aes(group = group, fill = opinion)) +
scale_fill_distiller(palette = "Spectral", labels = scales::percent) +
coord_map('mercator') +
labs(fill = "Percent favoring Deporting Dreamers") +
theme_void() +
theme(legend.position="bottom", legend.box="horizontal",
legend.key.width=grid::unit(.1,'npc')) +
guides(fill = guide_colourbar(title.position="top",
title.hjust = 0.5))
|
/5_map_plot.R
|
no_license
|
JohnPSpaw/IAT_MrP_Model
|
R
| false
| false
| 731
|
r
|
library(ggplot2)
library(mapproj)
mrp.prediction <- readRDS("Data/mrp_pred.rds")
mrp.prediction.map <- map_data("state")
mrp.prediction.map$opinion <-
mrp.prediction$opinion[match(mrp.prediction.map$region, tolower(mrp.prediction$state.abb))]
ggplot(mrp.prediction.map, aes(long, lat)) +
geom_polygon(aes(group = group, fill = opinion)) +
scale_fill_distiller(palette = "Spectral", labels = scales::percent) +
coord_map('mercator') +
labs(fill = "Percent favoring Deporting Dreamers") +
theme_void() +
theme(legend.position="bottom", legend.box="horizontal",
legend.key.width=grid::unit(.1,'npc')) +
guides(fill = guide_colourbar(title.position="top",
title.hjust = 0.5))
|
##set working directory
##change for each person!
setwd("C:/Users/Ronli/Documents/GitHub/scales_framework/src/dti")
setwd("/Users/dti/Dropbox/Research/simdata3/simdata3")
##read in simulation results
load("scale_cols.Rdata") #obj called out_cols
load("scale_rows.Rdata") #obj called out_rows
##reorder
out_cols_ord <- out_cols[mixedsort(names(out_cols))]
out_rows_ord <- out_rows[mixedsort(names(out_rows))]
##pull out proportion of violations
violations.cols <- data.frame(model = ordered(rep(c("UNC","MON","IIO","DM","LCR","RSH"), each = 50), levels = c("UNC","MON","IIO","DM","LCR","RSH")),
unwei = sapply(out_cols_ord, function(x) x@means$unweighted),
weigh = sapply(out_cols_ord, function(x) x@means$weighted))
violations.rows <- data.frame(model = ordered(rep(c("UNC","MON","IIO","DM","LCR","RSH"), each = 50), levels = c("UNC","MON","IIO","DM","LCR","RSH")),
unwei = sapply(out_rows_ord, function(x) x@means$unweighted),
weigh = sapply(out_rows_ord, function(x) x@means$weighted))
library(psych)
describeBy(violations.cols, violations.cols$model)
pdf.options(family = 'Palatino')
##plot results
boxplot(weigh ~ model, violations.cols, at =rev(1:nlevels(violations.cols$model)), ylim = c(0,0.4), horizontal = TRUE, boxwex = .5, las = 1, xlab = "Percentage of Violations", col = "grey70")
#boxplot(weigh ~ model, violations.cols, ylim = c(0,0.4))
dev.print(device = pdf, file = 'violations_columns_weighted.pdf')
boxplot(weigh ~ model, violations.rows, at =rev(1:nlevels(violations.rows$model)), ylim = c(0,0.4), horizontal = TRUE, boxwex = .5, las = 1, xlab = "Percentage of Violations", col = "grey70")
#boxplot(weigh ~ model, violations.rows, ylim = c(0,0.4))
dev.print(device = pdf, file = 'violations_rows_weighted.pdf')
boxplot(weigh ~ model, violations.cols, at =rev(1:nlevels(violations.cols$model)), ylim = c(0,0.4), horizontal = TRUE, boxwex = .5, las = 1, xlab = "Percentage of Violations", col = "grey70")
#boxplot(unwei ~ model, violations.cols, ylim = c(0,0.4))
dev.print(device = pdf, file = 'violations_columns_unweighted.pdf')
boxplot(weigh ~ model, violations.rows, at =rev(1:nlevels(violations.rows$model)), ylim = c(0,0.4), horizontal = TRUE, boxwex = .5, las = 1, xlab = "Percentage of Violations", col = "grey70")
#boxplot(unwei ~ model, violations.rows, ylim = c(0,0.4))
dev.print(device = pdf, file = 'violations_rows_unweighted.pdf')
####################################################################################################################
##random code for figuring stuff out
load("./simdata/T66.Rdata") #obj called model.LOResp
table(colSums(model.LOResp$obsData))
out_rows[[66]]@n / out_cols[[66]]@N
|
/src/dti/results.R
|
no_license
|
ben-domingue/scales_framework
|
R
| false
| false
| 2,787
|
r
|
##set working directory
##change for each person!
setwd("C:/Users/Ronli/Documents/GitHub/scales_framework/src/dti")
setwd("/Users/dti/Dropbox/Research/simdata3/simdata3")
##read in simulation results
load("scale_cols.Rdata") #obj called out_cols
load("scale_rows.Rdata") #obj called out_rows
##reorder
out_cols_ord <- out_cols[mixedsort(names(out_cols))]
out_rows_ord <- out_rows[mixedsort(names(out_rows))]
##pull out proportion of violations
violations.cols <- data.frame(model = ordered(rep(c("UNC","MON","IIO","DM","LCR","RSH"), each = 50), levels = c("UNC","MON","IIO","DM","LCR","RSH")),
unwei = sapply(out_cols_ord, function(x) x@means$unweighted),
weigh = sapply(out_cols_ord, function(x) x@means$weighted))
violations.rows <- data.frame(model = ordered(rep(c("UNC","MON","IIO","DM","LCR","RSH"), each = 50), levels = c("UNC","MON","IIO","DM","LCR","RSH")),
unwei = sapply(out_rows_ord, function(x) x@means$unweighted),
weigh = sapply(out_rows_ord, function(x) x@means$weighted))
library(psych)
describeBy(violations.cols, violations.cols$model)
pdf.options(family = 'Palatino')
##plot results
boxplot(weigh ~ model, violations.cols, at =rev(1:nlevels(violations.cols$model)), ylim = c(0,0.4), horizontal = TRUE, boxwex = .5, las = 1, xlab = "Percentage of Violations", col = "grey70")
#boxplot(weigh ~ model, violations.cols, ylim = c(0,0.4))
dev.print(device = pdf, file = 'violations_columns_weighted.pdf')
boxplot(weigh ~ model, violations.rows, at =rev(1:nlevels(violations.rows$model)), ylim = c(0,0.4), horizontal = TRUE, boxwex = .5, las = 1, xlab = "Percentage of Violations", col = "grey70")
#boxplot(weigh ~ model, violations.rows, ylim = c(0,0.4))
dev.print(device = pdf, file = 'violations_rows_weighted.pdf')
boxplot(weigh ~ model, violations.cols, at =rev(1:nlevels(violations.cols$model)), ylim = c(0,0.4), horizontal = TRUE, boxwex = .5, las = 1, xlab = "Percentage of Violations", col = "grey70")
#boxplot(unwei ~ model, violations.cols, ylim = c(0,0.4))
dev.print(device = pdf, file = 'violations_columns_unweighted.pdf')
boxplot(weigh ~ model, violations.rows, at =rev(1:nlevels(violations.rows$model)), ylim = c(0,0.4), horizontal = TRUE, boxwex = .5, las = 1, xlab = "Percentage of Violations", col = "grey70")
#boxplot(unwei ~ model, violations.rows, ylim = c(0,0.4))
dev.print(device = pdf, file = 'violations_rows_unweighted.pdf')
####################################################################################################################
##random code for figuring stuff out
load("./simdata/T66.Rdata") #obj called model.LOResp
table(colSums(model.LOResp$obsData))
out_rows[[66]]@n / out_cols[[66]]@N
|
##The goal of your project is to predict the manner in which
## they did the exercise. This is the "classe" variable in
##the training set.
##You may use any of the other variables
##to predict with. You should create a report describing
##how you built your model, how you used cross validation,
##what you think the expected out of sample error is,
##and why you made the choices you did.
##You will also use your prediction model to
##predict 20 different test cases.
training <- read.csv("pml-training.csv")
testing <- read.csv("pml-testing.csv")
head(training)
names(training)
library(ggplot2); library(caret);library(mlbench);set.seed(333);
inTrain <- createDataPartition(y=training$classe, p=0.75,list=FALSE)
tr <- training[inTrain,]
cv <- training[-inTrain,]
ctrl <- trainControl(method="repeatedcv",repeats=3,classProbs=TRUE)
modelFit <- train(classe~., data=tr,method="pls",tuneLength=15, trControl=ctrl,metric="ROC",preProcess=c("center","scale"))
confusionMatrix(cv$classe, predict(modelFit, cv))
|
/Learning.R
|
no_license
|
eeliuchang/PracticalMLProject
|
R
| false
| false
| 1,024
|
r
|
##The goal of your project is to predict the manner in which
## they did the exercise. This is the "classe" variable in
##the training set.
##You may use any of the other variables
##to predict with. You should create a report describing
##how you built your model, how you used cross validation,
##what you think the expected out of sample error is,
##and why you made the choices you did.
##You will also use your prediction model to
##predict 20 different test cases.
training <- read.csv("pml-training.csv")
testing <- read.csv("pml-testing.csv")
head(training)
names(training)
library(ggplot2); library(caret);library(mlbench);set.seed(333);
inTrain <- createDataPartition(y=training$classe, p=0.75,list=FALSE)
tr <- training[inTrain,]
cv <- training[-inTrain,]
ctrl <- trainControl(method="repeatedcv",repeats=3,classProbs=TRUE)
modelFit <- train(classe~., data=tr,method="pls",tuneLength=15, trControl=ctrl,metric="ROC",preProcess=c("center","scale"))
confusionMatrix(cv$classe, predict(modelFit, cv))
|
## Create a class "myDate" to coerce character string in the format DD-MM-YYYY into Date class upon import
setClass("myDate")
setAs("character","myDate", function(from) as.Date(from, format = "%d/%m/%Y"))
## Create a class "myTime" to coerce character string in the format HH:MM:SS into POSIXct class upon import
setClass("myTime")
setAs("character", "myTime", function(from) strptime(from, "%H:%M:%S"))
## Read all data
largeData <- read.table("./household_power_consumption.txt", ## data file path in working directory
sep = ";", ## data separated by semi-colon
header = TRUE, ## variable names in first row
colClasses = c("Date" = "myDate", "Time" = "myTime"), ## set classes of Date and Time data upon import
stringsAsFactors = FALSE, ## character strings should not default to factors
na.strings = "?") ## missing values are denoted by a question mark
## Coerce date and time character strings into a single variable
library(lubridate)
largeData$Time <- with(largeData, dmy(Date) + hms(Time)) ## Time variable now includes data information
largeData <- largeData[,2:9] ## remove Date variable (now superfluous)
## Subset data from the dates 2007-02-01 and 2007-02-02
subsetbyDate <- with(largeData, subset(largeData, date(Time) == "2007-02-01" | date(Time) == "2007-02-02"))
## Open PNG device
png(filename = "plot4.png", width = 480, height = 480, units = "px")
## Define a matrix to enable multiple plots
par(mfrow = c(2,2)) ## this will fill the plot space by ROW
## Create first plot (top left corner, same as plot2.png)
plot(subsetbyDate$Time, subsetbyDate$Global_active_power, ## specify x- and y-variables
type = "l", ## line graph with no point markers
xlab = "", ## set no x-axis label
ylab = "Global Active Power (kilowatts)") ## set y-axis label
## Create second plot (top right corner)
plot(subsetbyDate$Time, subsetbyDate$Voltage, ## specify x- and y-variables
type = "l", ## line graph with no point markers
xlab = "datetime", ## set no x-axis label
ylab = "Voltage") ## set y-axis label
## Create third plot (bottom left corner, same as plot3.png)
plot(subsetbyDate$Time, subsetbyDate$Sub_metering_1, ## select x- and y-variables
type = "n", ## plot an empty graph without data
xlab = "", ## set no x-axis label
ylab = "Energy sub metering") ## set y-axis label
lines(subsetbyDate$Time, subsetbyDate$Sub_metering_1,
col = "black", type = "l") ## add black line for sub_metering_1 data
lines(subsetbyDate$Time, subsetbyDate$Sub_metering_2,
col = "red", type = "l") ## add red line for sub_metering_2 data
lines(subsetbyDate$Time, subsetbyDate$Sub_metering_3,
col = "blue", type = "l") ## add blue line for sub_metering_3 data
legend("topright", ## add legend in top right corner
lty = 1, ## line as legend item, thickness of 1
bty = "n", ## no border around legend
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), ## specify legend text labels
col = c("black", "red", "blue")) ## specify color of legend lines
## Create fourth plot (bottom right corner)
plot(subsetbyDate$Time, subsetbyDate$Global_reactive_power, ## specify x- and y-variables
type = "l", ## line graph with no point markers
xlab = "datetime", ## set no x-axis label
ylab = "Global_reactive_power") ## set y-axis label
# Close PNG device
dev.off()
|
/plot4.R
|
no_license
|
bepartridge/ExData_Plotting1
|
R
| false
| false
| 4,982
|
r
|
## Create a class "myDate" to coerce character string in the format DD-MM-YYYY into Date class upon import
setClass("myDate")
setAs("character","myDate", function(from) as.Date(from, format = "%d/%m/%Y"))
## Create a class "myTime" to coerce character string in the format HH:MM:SS into POSIXct class upon import
setClass("myTime")
setAs("character", "myTime", function(from) strptime(from, "%H:%M:%S"))
## Read all data
largeData <- read.table("./household_power_consumption.txt", ## data file path in working directory
sep = ";", ## data separated by semi-colon
header = TRUE, ## variable names in first row
colClasses = c("Date" = "myDate", "Time" = "myTime"), ## set classes of Date and Time data upon import
stringsAsFactors = FALSE, ## character strings should not default to factors
na.strings = "?") ## missing values are denoted by a question mark
## Coerce date and time character strings into a single variable
library(lubridate)
largeData$Time <- with(largeData, dmy(Date) + hms(Time)) ## Time variable now includes data information
largeData <- largeData[,2:9] ## remove Date variable (now superfluous)
## Subset data from the dates 2007-02-01 and 2007-02-02
subsetbyDate <- with(largeData, subset(largeData, date(Time) == "2007-02-01" | date(Time) == "2007-02-02"))
## Open PNG device
png(filename = "plot4.png", width = 480, height = 480, units = "px")
## Define a matrix to enable multiple plots
par(mfrow = c(2,2)) ## this will fill the plot space by ROW
## Create first plot (top left corner, same as plot2.png)
plot(subsetbyDate$Time, subsetbyDate$Global_active_power, ## specify x- and y-variables
type = "l", ## line graph with no point markers
xlab = "", ## set no x-axis label
ylab = "Global Active Power (kilowatts)") ## set y-axis label
## Create second plot (top right corner)
plot(subsetbyDate$Time, subsetbyDate$Voltage, ## specify x- and y-variables
type = "l", ## line graph with no point markers
xlab = "datetime", ## set no x-axis label
ylab = "Voltage") ## set y-axis label
## Create third plot (bottom left corner, same as plot3.png)
plot(subsetbyDate$Time, subsetbyDate$Sub_metering_1, ## select x- and y-variables
type = "n", ## plot an empty graph without data
xlab = "", ## set no x-axis label
ylab = "Energy sub metering") ## set y-axis label
lines(subsetbyDate$Time, subsetbyDate$Sub_metering_1,
col = "black", type = "l") ## add black line for sub_metering_1 data
lines(subsetbyDate$Time, subsetbyDate$Sub_metering_2,
col = "red", type = "l") ## add red line for sub_metering_2 data
lines(subsetbyDate$Time, subsetbyDate$Sub_metering_3,
col = "blue", type = "l") ## add blue line for sub_metering_3 data
legend("topright", ## add legend in top right corner
lty = 1, ## line as legend item, thickness of 1
bty = "n", ## no border around legend
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), ## specify legend text labels
col = c("black", "red", "blue")) ## specify color of legend lines
## Create fourth plot (bottom right corner)
plot(subsetbyDate$Time, subsetbyDate$Global_reactive_power, ## specify x- and y-variables
type = "l", ## line graph with no point markers
xlab = "datetime", ## set no x-axis label
ylab = "Global_reactive_power") ## set y-axis label
# Close PNG device
dev.off()
|
# Author: Robert J. Hijmans
# Date : March 2009
# Version 0.9
# Licence GPL v3
if (!isGeneric("freq")) {
setGeneric("freq", function(x, ...)
standardGeneric("freq"))
}
setMethod('freq', signature(x='RasterLayer'),
function(x, digits=0, value=NULL, useNA="ifany", progress='', ...) {
if (!is.null(value)) {
return( .count(x, value, digits=digits, progress=progress, ...) )
}
if (canProcessInMemory(x, 3)) {
d <- round(getValues(x), digits=digits)
res <- table( d, useNA=useNA )
res <- cbind(as.numeric(names(res)), as.vector(res))
} else {
tr <- blockSize(x, n=2)
pb <- pbCreate(tr$n, progress=progress, label='freq')
z <- vector(length=0)
for (i in 1:tr$n) {
d <- round(getValuesBlock(x, row=tr$row[i], nrows=tr$nrows[i]), digits=digits)
res <- table(d, useNA=useNA )
res <- cbind(as.numeric(unlist(as.vector(dimnames(res)))), as.vector(res))
z <- rbind(z, res)
pbStep(pb, i)
}
res <- tapply(z[,2], as.character(z[,1]), sum)
res <- cbind(as.numeric(names(res)), as.vector(res))
z <- z[is.na(z[,1]), ,drop=FALSE]
if (isTRUE(nrow(z) > 0)) {
z <- sum(z[,2])
res <- rbind(res, c(NA, z))
}
pbClose(pb)
}
colnames(res) <- c('value', 'count')
return(res)
}
)
setMethod('freq', signature(x='RasterStackBrick'),
function(x, digits=0, value=NULL, useNA="ifany", merge=FALSE, progress='', ...) {
if (!is.null(value)) {
return(.count(x, value, digits=digits, progress=progress, ...))
}
nl <- nlayers(x)
res <- list()
pb <- pbCreate(nl, progress=progress, label='freq')
for (i in 1:nl) {
res[[i]] <- freq( raster(x, i), useNA=useNA, progress='', ...)
pbStep(pb, i)
}
pbClose(pb)
names(res) <- ln <- names(x)
if (merge) {
r <- res[[1]]
colnames(r)[2] <- ln[1]
if (nl > 1) {
for (i in 2:nl) {
x <- res[[i]]
colnames(x)[2] <- ln[i]
r <- merge(r, x, by=1, all=TRUE)
}
}
return(r)
}
return(res)
}
)
.count <- function(x, value, digits=0, progress='', ...) {
value <- value[1]
if (nlayers(x) > 1) {
if (canProcessInMemory(x, 2)) {
if (is.na(value)) {
v <- colSums(is.na(getValues(x)))
} else {
v <- round(getValues(x), digits=digits) == value
v <- colSums(v, na.rm=TRUE)
}
} else {
tr <- blockSize(x, n=2)
pb <- pbCreate(tr$n, progress=progress)
v <- 0
for (i in 1:tr$n) {
vv <- getValues(x, row=tr$row[i], nrows=tr$nrows[i])
if (is.na(value)) {
v <- v + colSums(is.na(vv))
} else {
vv <- round(v, digits=digits) == value
v <- v + colSums(vv, na.rm=TRUE)
}
pbStep(pb, i)
}
pbClose(pb)
}
return(v)
} else {
if (canProcessInMemory(x, 2)) {
if (is.na(value)) {
x <- sum(is.na(getValues(x)))
} else {
v <- na.omit(round(getValues(x), digits=digits))
x <- sum(v == value)
}
return(x)
} else {
tr <- blockSize(x, n=2)
pb <- pbCreate(tr$n, progress=progress)
r <- 0
for (i in 1:tr$n) {
v <- getValues(x, row=tr$row[i], nrows=tr$nrows[i])
if (is.na(value)) {
r <- r + sum(is.na(v))
} else {
v <- na.omit(round(v, digits=digits))
r <- r + sum(v == value)
}
pbStep(pb, i)
}
pbClose(pb)
return(r)
}
}
}
|
/R/freq.R
|
no_license
|
kenahoo/raster
|
R
| false
| false
| 3,427
|
r
|
# Author: Robert J. Hijmans
# Date : March 2009
# Version 0.9
# Licence GPL v3
if (!isGeneric("freq")) {
setGeneric("freq", function(x, ...)
standardGeneric("freq"))
}
setMethod('freq', signature(x='RasterLayer'),
function(x, digits=0, value=NULL, useNA="ifany", progress='', ...) {
if (!is.null(value)) {
return( .count(x, value, digits=digits, progress=progress, ...) )
}
if (canProcessInMemory(x, 3)) {
d <- round(getValues(x), digits=digits)
res <- table( d, useNA=useNA )
res <- cbind(as.numeric(names(res)), as.vector(res))
} else {
tr <- blockSize(x, n=2)
pb <- pbCreate(tr$n, progress=progress, label='freq')
z <- vector(length=0)
for (i in 1:tr$n) {
d <- round(getValuesBlock(x, row=tr$row[i], nrows=tr$nrows[i]), digits=digits)
res <- table(d, useNA=useNA )
res <- cbind(as.numeric(unlist(as.vector(dimnames(res)))), as.vector(res))
z <- rbind(z, res)
pbStep(pb, i)
}
res <- tapply(z[,2], as.character(z[,1]), sum)
res <- cbind(as.numeric(names(res)), as.vector(res))
z <- z[is.na(z[,1]), ,drop=FALSE]
if (isTRUE(nrow(z) > 0)) {
z <- sum(z[,2])
res <- rbind(res, c(NA, z))
}
pbClose(pb)
}
colnames(res) <- c('value', 'count')
return(res)
}
)
setMethod('freq', signature(x='RasterStackBrick'),
function(x, digits=0, value=NULL, useNA="ifany", merge=FALSE, progress='', ...) {
if (!is.null(value)) {
return(.count(x, value, digits=digits, progress=progress, ...))
}
nl <- nlayers(x)
res <- list()
pb <- pbCreate(nl, progress=progress, label='freq')
for (i in 1:nl) {
res[[i]] <- freq( raster(x, i), useNA=useNA, progress='', ...)
pbStep(pb, i)
}
pbClose(pb)
names(res) <- ln <- names(x)
if (merge) {
r <- res[[1]]
colnames(r)[2] <- ln[1]
if (nl > 1) {
for (i in 2:nl) {
x <- res[[i]]
colnames(x)[2] <- ln[i]
r <- merge(r, x, by=1, all=TRUE)
}
}
return(r)
}
return(res)
}
)
.count <- function(x, value, digits=0, progress='', ...) {
value <- value[1]
if (nlayers(x) > 1) {
if (canProcessInMemory(x, 2)) {
if (is.na(value)) {
v <- colSums(is.na(getValues(x)))
} else {
v <- round(getValues(x), digits=digits) == value
v <- colSums(v, na.rm=TRUE)
}
} else {
tr <- blockSize(x, n=2)
pb <- pbCreate(tr$n, progress=progress)
v <- 0
for (i in 1:tr$n) {
vv <- getValues(x, row=tr$row[i], nrows=tr$nrows[i])
if (is.na(value)) {
v <- v + colSums(is.na(vv))
} else {
vv <- round(v, digits=digits) == value
v <- v + colSums(vv, na.rm=TRUE)
}
pbStep(pb, i)
}
pbClose(pb)
}
return(v)
} else {
if (canProcessInMemory(x, 2)) {
if (is.na(value)) {
x <- sum(is.na(getValues(x)))
} else {
v <- na.omit(round(getValues(x), digits=digits))
x <- sum(v == value)
}
return(x)
} else {
tr <- blockSize(x, n=2)
pb <- pbCreate(tr$n, progress=progress)
r <- 0
for (i in 1:tr$n) {
v <- getValues(x, row=tr$row[i], nrows=tr$nrows[i])
if (is.na(value)) {
r <- r + sum(is.na(v))
} else {
v <- na.omit(round(v, digits=digits))
r <- r + sum(v == value)
}
pbStep(pb, i)
}
pbClose(pb)
return(r)
}
}
}
|
library(ggplot2)
df <- read.table('ames.csv', sep = ',')
ran <- sample(1 : nrow(df), .9 * nrow(df))
nor <- function(x){
return (x - min(x)) / (max(x) - min(x))
}
norm <- as.data.frame(lapply(df[, c(c(range(56)), c(range(58, 81)))], nor))
set.seed(123)
dat.d <- sample(1:n)
|
/in_class_knn.R
|
no_license
|
jschmidt345/Group_BLSS_SSC442
|
R
| false
| false
| 278
|
r
|
library(ggplot2)
df <- read.table('ames.csv', sep = ',')
ran <- sample(1 : nrow(df), .9 * nrow(df))
nor <- function(x){
return (x - min(x)) / (max(x) - min(x))
}
norm <- as.data.frame(lapply(df[, c(c(range(56)), c(range(58, 81)))], nor))
set.seed(123)
dat.d <- sample(1:n)
|
.makeSparse<-function(X) {
X<-as.matrix(X)
w<-cbind(c(row(X)),c(col(X)),c(X))
w<-w[abs(w[,3])>1e-16,,drop=FALSE]
Y<-sparseMatrix(w[,1],w[,2],x=w[,3],dims=dim(X))
}
##if A is a N x N matrix A[i,j]
## and R=c(A[1,1],A[1,2]...A[1,n],A[2,1]..A[2,n],, A[n,n]
## A[i,j]=R[r]
.ij2r<-function(i,j,N)
(i-1)*N+j
.indexSymmat2vec <- function(i,j,N) {
## S[i,j] symetric N times N matrix
## r the vector of upper triangular element in row major order:
## r= c(S[1,1],S[1,2]...,S[1,j], S[1,N], S[2,2],...S[N,N]
##Result: k: index of k-th element of r
k <-if (i<=j) {
(i-1)*(N-i/2)+j
} else {
(j-1)*(N-j/2)+i
}
}
.indexVec2Symmat<-function(k,N) {
## inverse of indexSymmat2vec
## result: index pair (i,j) with i>=j
## k: element in the vector of upper triangular elements
## example: N=3: k=1 -> (1,1), k=2 -> (1,2), k=3 -> (1,3), k=4 -> (2,2)
aa <- cumsum(N:1)
aaLow <- c(0,aa[-length(aa)])
i <- which( aaLow<k & k<=aa)
j <- k-N*i+N-i*(3-i)/2+i
return( c(i,j) )
}
.index2UpperTriEntry <- .indexVec2Symmat
.divZero<-function(x,y,tol=1e-14){
## ratio x/y is set to 1 if both |x| and |y| are below tol
x.y <- if( abs(x)<tol & abs(y)<tol) {1} else x/y
x.y
}
.is.lmm <- function(object) {
if (class(object) %in% c("matrix","Matrix")){
FALSE
} else {
isLMM(object)
}
}
## .is.lmm <- function(object) {
## ##checks whether object is
## ## - mer object AND
## ## - linear mixed model
## if (class(object) %in% "mer") {
## if (length(object@muEta)==0 )
## TRUE
## else
## ## FALSE
## ## } else {
## ## FALSE
## ## }
## ## }
|
/R/KR-utils.R
|
no_license
|
PhilipPallmann/pbkrtest
|
R
| false
| false
| 1,632
|
r
|
.makeSparse<-function(X) {
X<-as.matrix(X)
w<-cbind(c(row(X)),c(col(X)),c(X))
w<-w[abs(w[,3])>1e-16,,drop=FALSE]
Y<-sparseMatrix(w[,1],w[,2],x=w[,3],dims=dim(X))
}
##if A is a N x N matrix A[i,j]
## and R=c(A[1,1],A[1,2]...A[1,n],A[2,1]..A[2,n],, A[n,n]
## A[i,j]=R[r]
.ij2r<-function(i,j,N)
(i-1)*N+j
.indexSymmat2vec <- function(i,j,N) {
## S[i,j] symetric N times N matrix
## r the vector of upper triangular element in row major order:
## r= c(S[1,1],S[1,2]...,S[1,j], S[1,N], S[2,2],...S[N,N]
##Result: k: index of k-th element of r
k <-if (i<=j) {
(i-1)*(N-i/2)+j
} else {
(j-1)*(N-j/2)+i
}
}
.indexVec2Symmat<-function(k,N) {
## inverse of indexSymmat2vec
## result: index pair (i,j) with i>=j
## k: element in the vector of upper triangular elements
## example: N=3: k=1 -> (1,1), k=2 -> (1,2), k=3 -> (1,3), k=4 -> (2,2)
aa <- cumsum(N:1)
aaLow <- c(0,aa[-length(aa)])
i <- which( aaLow<k & k<=aa)
j <- k-N*i+N-i*(3-i)/2+i
return( c(i,j) )
}
.index2UpperTriEntry <- .indexVec2Symmat
.divZero<-function(x,y,tol=1e-14){
## ratio x/y is set to 1 if both |x| and |y| are below tol
x.y <- if( abs(x)<tol & abs(y)<tol) {1} else x/y
x.y
}
.is.lmm <- function(object) {
if (class(object) %in% c("matrix","Matrix")){
FALSE
} else {
isLMM(object)
}
}
## .is.lmm <- function(object) {
## ##checks whether object is
## ## - mer object AND
## ## - linear mixed model
## if (class(object) %in% "mer") {
## if (length(object@muEta)==0 )
## TRUE
## else
## ## FALSE
## ## } else {
## ## FALSE
## ## }
## ## }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glue_operations.R
\name{glue_list_schema_versions}
\alias{glue_list_schema_versions}
\title{Returns a list of schema versions that you have created, with minimal
information}
\usage{
glue_list_schema_versions(SchemaId, MaxResults, NextToken)
}
\arguments{
\item{SchemaId}{[required] This is a wrapper structure to contain schema identity fields. The
structure contains:
\itemize{
\item SchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema.
Either \code{SchemaArn} or \code{SchemaName} and \code{RegistryName} has to be
provided.
\item SchemaId$SchemaName: The name of the schema. Either \code{SchemaArn} or
\code{SchemaName} and \code{RegistryName} has to be provided.
}}
\item{MaxResults}{Maximum number of results required per page. If the value is not
supplied, this will be defaulted to 25 per page.}
\item{NextToken}{A continuation token, if this is a continuation call.}
}
\description{
Returns a list of schema versions that you have created, with minimal
information. Schema versions in Deleted status will not be included in
the results. Empty results will be returned if there are no schema
versions available.
}
\section{Request syntax}{
\preformatted{svc$list_schema_versions(
SchemaId = list(
SchemaArn = "string",
SchemaName = "string",
RegistryName = "string"
),
MaxResults = 123,
NextToken = "string"
)
}
}
\keyword{internal}
|
/paws/man/glue_list_schema_versions.Rd
|
permissive
|
sanchezvivi/paws
|
R
| false
| true
| 1,455
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glue_operations.R
\name{glue_list_schema_versions}
\alias{glue_list_schema_versions}
\title{Returns a list of schema versions that you have created, with minimal
information}
\usage{
glue_list_schema_versions(SchemaId, MaxResults, NextToken)
}
\arguments{
\item{SchemaId}{[required] This is a wrapper structure to contain schema identity fields. The
structure contains:
\itemize{
\item SchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema.
Either \code{SchemaArn} or \code{SchemaName} and \code{RegistryName} has to be
provided.
\item SchemaId$SchemaName: The name of the schema. Either \code{SchemaArn} or
\code{SchemaName} and \code{RegistryName} has to be provided.
}}
\item{MaxResults}{Maximum number of results required per page. If the value is not
supplied, this will be defaulted to 25 per page.}
\item{NextToken}{A continuation token, if this is a continuation call.}
}
\description{
Returns a list of schema versions that you have created, with minimal
information. Schema versions in Deleted status will not be included in
the results. Empty results will be returned if there are no schema
versions available.
}
\section{Request syntax}{
\preformatted{svc$list_schema_versions(
SchemaId = list(
SchemaArn = "string",
SchemaName = "string",
RegistryName = "string"
),
MaxResults = 123,
NextToken = "string"
)
}
}
\keyword{internal}
|
## Assignment 2 - cache and return of matrix input and its inverse
## creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve(x)
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## computes the inverse of the 'makeCacheMatrix' matrix and retrieves
## the inverse from the cache if it has already been calculated
cacheSolve <- function(x, ...) {
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
/cachematrix.R
|
no_license
|
KLGordon/ProgrammingAssignment2
|
R
| false
| false
| 895
|
r
|
## Assignment 2 - cache and return of matrix input and its inverse
## creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve(x)
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## computes the inverse of the 'makeCacheMatrix' matrix and retrieves
## the inverse from the cache if it has already been calculated
cacheSolve <- function(x, ...) {
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FSSgam.R
\docType{package}
\name{FSSgam-package}
\alias{FSSgam-package}
\alias{FSSgam}
\title{\tabular{ll}{
Package: \tab FSSgam\cr
Type: \tab Package\cr
Title: \tab FUll subsets multiple regression in R with gam(m4)\cr
Version: \tab 1.11\cr
Date: \tab 2018-09-14\cr
Author: \tab Rebecca Fisher\cr
Maintainer: \tab Rebecca Fisher\cr
License: \tab Apache 2\cr
LazyLoad: \tab yes\cr
Depends: \tab doSNOW, MuMIn, gamm4, mgcv, nnet\cr
}}
\details{
Full subsets information theoretic approaches are becoming an increasingly popular tool for exploring predictive power and variable importance where a wide range of candidate predictors are being considered.
This package provides simple function(s) that can be used to construct, fit and compare a complete model set of possible ecological or environmental predictors, given a response variable of interest. The function(s) are based on Generalized Additive Models (GAM) and builds on the MuMIn package.
Advantages include the capacity to fit more predictors than there are replicates, automatic removal of models with correlated predictors, and model sets that include interactions between factors and smooth predictors, as all as smooth interactions with other smooths (via te).
The function(s) takes a range of arguments that allow control over the model set being constructed, including specifying cyclic and linear continuous predictors, specification of the smoothing algorithm used and the maximum complexity allowed for smooth terms.
The full subsets analysis can be carried out via one of two alternative methods allowed in the package.
The first is through a direct call to full.subsets.gam (this is the original function).
This function both constructs and fits the complete model set, based on the user supplied input. This function requires that all model fits are saved, and is therefore
not suitable for extremely large models sets, as these will cause issues with memory. This method may be superceded in future versions of FSSgam, so for any new project please use the second method.
The second method is via a call to generate.model.set followed by as second call to fit.model set. This pair of functions splits the process of generating the model set from actually fitting and extracting the relevant model data.
This method is useful for large model sets, because it allows the model set to be interrrogated before fitting and also optionally allows model fit data to not be saved, thus alleviating memory issues.
The use of the function(s) is demonstrated via case studies that highlight how appropriate model sets can be easily constructed, and the broader utility of the approach for exploratory ecology.
Please see the case study files on github for usage examples at \url{https://github.com/beckyfisher/FSSgam}
}
\examples{
install.packages("FSSgam",dependencies=TRUE)
library(FSSgam)
}
\references{
Fisher R, Wilson SK, Sin TM, Lee AC, Langlois TJ (2018) A simple function for full-subsets multiple regression in ecology with R. Ecology and Evolution
\url{https://onlinelibrary.wiley.com/doi/abs/10.1002/ece3.4134}
}
\author{
Rebecca Fisher (Australian Institue of Marine Science)
Maintainer: Rebecca Fisher \email{r.fisher@aims.gov.au}
}
|
/man/FSSgam-package.Rd
|
permissive
|
beckyfisher/FSSgam_package
|
R
| false
| true
| 3,295
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FSSgam.R
\docType{package}
\name{FSSgam-package}
\alias{FSSgam-package}
\alias{FSSgam}
\title{\tabular{ll}{
Package: \tab FSSgam\cr
Type: \tab Package\cr
Title: \tab FUll subsets multiple regression in R with gam(m4)\cr
Version: \tab 1.11\cr
Date: \tab 2018-09-14\cr
Author: \tab Rebecca Fisher\cr
Maintainer: \tab Rebecca Fisher\cr
License: \tab Apache 2\cr
LazyLoad: \tab yes\cr
Depends: \tab doSNOW, MuMIn, gamm4, mgcv, nnet\cr
}}
\details{
Full subsets information theoretic approaches are becoming an increasingly popular tool for exploring predictive power and variable importance where a wide range of candidate predictors are being considered.
This package provides simple function(s) that can be used to construct, fit and compare a complete model set of possible ecological or environmental predictors, given a response variable of interest. The function(s) are based on Generalized Additive Models (GAM) and builds on the MuMIn package.
Advantages include the capacity to fit more predictors than there are replicates, automatic removal of models with correlated predictors, and model sets that include interactions between factors and smooth predictors, as all as smooth interactions with other smooths (via te).
The function(s) takes a range of arguments that allow control over the model set being constructed, including specifying cyclic and linear continuous predictors, specification of the smoothing algorithm used and the maximum complexity allowed for smooth terms.
The full subsets analysis can be carried out via one of two alternative methods allowed in the package.
The first is through a direct call to full.subsets.gam (this is the original function).
This function both constructs and fits the complete model set, based on the user supplied input. This function requires that all model fits are saved, and is therefore
not suitable for extremely large models sets, as these will cause issues with memory. This method may be superceded in future versions of FSSgam, so for any new project please use the second method.
The second method is via a call to generate.model.set followed by as second call to fit.model set. This pair of functions splits the process of generating the model set from actually fitting and extracting the relevant model data.
This method is useful for large model sets, because it allows the model set to be interrrogated before fitting and also optionally allows model fit data to not be saved, thus alleviating memory issues.
The use of the function(s) is demonstrated via case studies that highlight how appropriate model sets can be easily constructed, and the broader utility of the approach for exploratory ecology.
Please see the case study files on github for usage examples at \url{https://github.com/beckyfisher/FSSgam}
}
\examples{
install.packages("FSSgam",dependencies=TRUE)
library(FSSgam)
}
\references{
Fisher R, Wilson SK, Sin TM, Lee AC, Langlois TJ (2018) A simple function for full-subsets multiple regression in ecology with R. Ecology and Evolution
\url{https://onlinelibrary.wiley.com/doi/abs/10.1002/ece3.4134}
}
\author{
Rebecca Fisher (Australian Institue of Marine Science)
Maintainer: Rebecca Fisher \email{r.fisher@aims.gov.au}
}
|
---
title: "Statistical_analysis_prestudy"
author: "Marie Tzschaschel"
date: "09.07.2020"
output: pdf_document
---
```{r setup, include=FALSE}
install.packages("lme4")
library(lme4)
```
Goal of this analysis is to examine whether the similarity values of the similar dataset
(tuples containing two nouns that have the same animacy, e.g. animate and inanimte or
inanimte and inanimate) have significantly higher similarity values compared to the
contrast dataset (tuples containing two nouns with different animacy, animate and inanimate).
In the first block the data is uploaded into the r envirenmnent.
The data is stored in a csv file, the variables are separated by ',' and the first row is
the name of the columns (therefore header=TRUE).
The head() function shows the first rows of the dataset, such that we can see its structure.
```{r}
#load data
simval_data<-read.csv(file = "df_simval.csv", header = TRUE, sep = ',')
head(simval_data)
```
In the next block we extract the similarity values of the similar and the contrast data.
These values are stored in the third column of the simval_data. With the square brackets after the variable
we can access the different rows and columns. The first entry within the square brackets is
always the row, the second the column. If we let the row entry empty (as in our case), we
take the whole row. Since we need to access the third column we write three after the comma.
We do the same for the contrast data, that is stored in the fifth column.
After having extracted the two groups out of the simval_data we hist the two distributions
of the similarity values.
```{r}
#Extract the similarity values and check how they are distributed by plotting the histogram
simval_similar<-simval_data[,3]
simval_contrast<-simval_data[,5]
hist(simval_similar)
hist(simval_contrast)
```
```{r}
# Check the mean and sd for both groups
mean_similar<-mean(simval_similar)
mean_similar
mean_contrast<-mean(simval_contrast)
mean_contrast
sd_similar<-sd(simval_similar)
sd_similar
sd_contrast<-sd(simval_contrast)
sd_contrast
# the two distributions have approximately the same sd
delta<-mean_similar-mean_contrast
delta #delta is even higher for this dataset (last one was 0.06)
#power analysis: 86%
#one.sample since our data is dependant row by row
power.t.test(delta = delta, n=74, sd=sd_similar, type="one.sample", alternative="two.sided")
```
In the next block, do a one sample t-test (paired), create a data frame
and fit a linear model with varying intercept for items. The similar condition is coded as 1
the contrast condition as -1.
Lets start with the t-tests
```{r}
#One sample (paired) t-test. The assumption is that each row is dependent.
diff<-simval_similar-simval_contrast
t.test(diff)
#this is the same t-test
t.test(simval_similar,simval_contrast, paired = TRUE)
```
We fit a linear mixed effects model with varying intercepts for items.
We will then check the residual assumption of that model.
For that purpuse we create a dataframe containing
- the item ID
- the condition (sum contrast coding)
- a vector with all similarity values (first similarity values corresponding to the similar
tuples, then those corresponding to the contrast tuples)
For the sum contrast conding, we need a vector containing the condition (-1 and 1)
as many times as we have observations in each group (73 for each group).
Since we have the two groups together in one vector, we need to create an item ID that counts
from one to the length of the similar dataset and starts at one again for the contrast
dataset.
```{r}
#dataframe for linear mixed models
#Item ID
len_sim<-length(simval_similar)
len_con<-length(simval_contrast)
#the similar and the contrast group are equal in their length
Item_ID<-rep(1:len_sim,2)
#Condition (sum contrast coding). Similar group +1, contrast group -1.
condition<-c(rep(1,len_sim),rep(-1,len_con))
similarity_values<-c(simval_similar,simval_contrast)
dataframe<-data.frame(ID=Item_ID,condition=condition,similarity_values=similarity_values)
head(dataframe)
```
```
#Varying intercepts for items
m0<-lmer(similarity_values~condition + (1|Item_ID),dataframe,REML=FALSE)
summary(m0)
#significant
Check for the residuals of the model.
```{r}
acf(residuals(m0))
```
|
/Prestudy_dataanalysis.r
|
no_license
|
MarieTzschaschel/Pre-study
|
R
| false
| false
| 4,439
|
r
|
---
title: "Statistical_analysis_prestudy"
author: "Marie Tzschaschel"
date: "09.07.2020"
output: pdf_document
---
```{r setup, include=FALSE}
install.packages("lme4")
library(lme4)
```
Goal of this analysis is to examine whether the similarity values of the similar dataset
(tuples containing two nouns that have the same animacy, e.g. animate and inanimte or
inanimte and inanimate) have significantly higher similarity values compared to the
contrast dataset (tuples containing two nouns with different animacy, animate and inanimate).
In the first block the data is uploaded into the r envirenmnent.
The data is stored in a csv file, the variables are separated by ',' and the first row is
the name of the columns (therefore header=TRUE).
The head() function shows the first rows of the dataset, such that we can see its structure.
```{r}
#load data
simval_data<-read.csv(file = "df_simval.csv", header = TRUE, sep = ',')
head(simval_data)
```
In the next block we extract the similarity values of the similar and the contrast data.
These values are stored in the third column of the simval_data. With the square brackets after the variable
we can access the different rows and columns. The first entry within the square brackets is
always the row, the second the column. If we let the row entry empty (as in our case), we
take the whole row. Since we need to access the third column we write three after the comma.
We do the same for the contrast data, that is stored in the fifth column.
After having extracted the two groups out of the simval_data we hist the two distributions
of the similarity values.
```{r}
#Extract the similarity values and check how they are distributed by plotting the histogram
simval_similar<-simval_data[,3]
simval_contrast<-simval_data[,5]
hist(simval_similar)
hist(simval_contrast)
```
```{r}
# Check the mean and sd for both groups
mean_similar<-mean(simval_similar)
mean_similar
mean_contrast<-mean(simval_contrast)
mean_contrast
sd_similar<-sd(simval_similar)
sd_similar
sd_contrast<-sd(simval_contrast)
sd_contrast
# the two distributions have approximately the same sd
delta<-mean_similar-mean_contrast
delta #delta is even higher for this dataset (last one was 0.06)
#power analysis: 86%
#one.sample since our data is dependant row by row
power.t.test(delta = delta, n=74, sd=sd_similar, type="one.sample", alternative="two.sided")
```
In the next block, do a one sample t-test (paired), create a data frame
and fit a linear model with varying intercept for items. The similar condition is coded as 1
the contrast condition as -1.
Lets start with the t-tests
```{r}
#One sample (paired) t-test. The assumption is that each row is dependent.
diff<-simval_similar-simval_contrast
t.test(diff)
#this is the same t-test
t.test(simval_similar,simval_contrast, paired = TRUE)
```
We fit a linear mixed effects model with varying intercepts for items.
We will then check the residual assumption of that model.
For that purpuse we create a dataframe containing
- the item ID
- the condition (sum contrast coding)
- a vector with all similarity values (first similarity values corresponding to the similar
tuples, then those corresponding to the contrast tuples)
For the sum contrast conding, we need a vector containing the condition (-1 and 1)
as many times as we have observations in each group (73 for each group).
Since we have the two groups together in one vector, we need to create an item ID that counts
from one to the length of the similar dataset and starts at one again for the contrast
dataset.
```{r}
#dataframe for linear mixed models
#Item ID
len_sim<-length(simval_similar)
len_con<-length(simval_contrast)
#the similar and the contrast group are equal in their length
Item_ID<-rep(1:len_sim,2)
#Condition (sum contrast coding). Similar group +1, contrast group -1.
condition<-c(rep(1,len_sim),rep(-1,len_con))
similarity_values<-c(simval_similar,simval_contrast)
dataframe<-data.frame(ID=Item_ID,condition=condition,similarity_values=similarity_values)
head(dataframe)
```
```
#Varying intercepts for items
m0<-lmer(similarity_values~condition + (1|Item_ID),dataframe,REML=FALSE)
summary(m0)
#significant
Check for the residuals of the model.
```{r}
acf(residuals(m0))
```
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting.R
\name{plotHeatmapSegment}
\alias{plotHeatmapSegment}
\title{Plot probability table as a heatmap.}
\usage{
plotHeatmapSegment(dataFrame, plot.log = FALSE, file = NULL,
aggProbs = FALSE, CNV = 3)
}
\arguments{
\item{dataFrame}{A \code{\link{data.frame}} object that containing genotype probabilities.}
\item{plot.log}{A logical indicating whether or not to plot in logarithmic scale.}
\item{file}{A file to export the plot to.}
\item{aggProbs}{A logical indicating whether or not to plot aggregate probability values.}
\item{CNV}{A copy number value until which the probability values are plotted.}
}
\description{
Plot probability table as a heatmap.
}
\author{
David Porubsky
}
|
/man/plotHeatmapSegment.Rd
|
no_license
|
maryamghr/MaRyam-1
|
R
| false
| true
| 774
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting.R
\name{plotHeatmapSegment}
\alias{plotHeatmapSegment}
\title{Plot probability table as a heatmap.}
\usage{
plotHeatmapSegment(dataFrame, plot.log = FALSE, file = NULL,
aggProbs = FALSE, CNV = 3)
}
\arguments{
\item{dataFrame}{A \code{\link{data.frame}} object that containing genotype probabilities.}
\item{plot.log}{A logical indicating whether or not to plot in logarithmic scale.}
\item{file}{A file to export the plot to.}
\item{aggProbs}{A logical indicating whether or not to plot aggregate probability values.}
\item{CNV}{A copy number value until which the probability values are plotted.}
}
\description{
Plot probability table as a heatmap.
}
\author{
David Porubsky
}
|
# MODIS Data
#
library(MODISTools)
library(sp)
loc=rbind.data.frame(
list("UB Spine",43.000753, -78.788195))
colnames(loc)=c("loc","lat","long")
coordinates(loc)=cbind(loc$long,loc$lat)
## Available dates
modis_dates=GetDates(Product = "MOD11A2", Lat = loc$lat[1], Long = loc$long[1])
devtools::use_data(modis_dates)
lstdir=file.path("inst/extdata/modis/lst")
if(!file.exists(lstdir)) dir.create(lstdir, recursive=T)
lcdir=file.path("inst/extdata/modis/lc")
if(!file.exists(lcdir)) dir.create(lcdir)
### Get Land Surface Temperature Data
MODISSubsets(LoadDat = loc,
Products = c("MOD11A2"),
Bands = c( "LST_Day_1km", "QC_Day"),
Size = c(10,10),
SaveDir=lstdir,
StartDate=T)
### Get LULC
MODISSubsets(LoadDat = loc,
Products = c("MCD12Q1"),
Bands = c( "Land_Cover_Type_1"),
Size = c(10,10),
SaveDir=lcdir,
StartDate=T)
## Convert LST Data
MODISGrid(Dir = lstdir,
DirName = "modgrid",
SubDir = TRUE,
NoDataValues=
list("MOD11A2" = c("LST_Day_1km" = 0,
"QC_Day" = -1)))
## Convert LandCover Data
MODISGrid(Dir = lcdir,
DirName = "modgrid",
SubDir = TRUE,
NoDataValues=
list("MCD12Q1" = c("Land_Cover_Type_1" = 255)))
|
/raw-data/modis.R
|
no_license
|
adammwilson/DataScienceData
|
R
| false
| false
| 1,370
|
r
|
# MODIS Data
#
library(MODISTools)
library(sp)
loc=rbind.data.frame(
list("UB Spine",43.000753, -78.788195))
colnames(loc)=c("loc","lat","long")
coordinates(loc)=cbind(loc$long,loc$lat)
## Available dates
modis_dates=GetDates(Product = "MOD11A2", Lat = loc$lat[1], Long = loc$long[1])
devtools::use_data(modis_dates)
lstdir=file.path("inst/extdata/modis/lst")
if(!file.exists(lstdir)) dir.create(lstdir, recursive=T)
lcdir=file.path("inst/extdata/modis/lc")
if(!file.exists(lcdir)) dir.create(lcdir)
### Get Land Surface Temperature Data
MODISSubsets(LoadDat = loc,
Products = c("MOD11A2"),
Bands = c( "LST_Day_1km", "QC_Day"),
Size = c(10,10),
SaveDir=lstdir,
StartDate=T)
### Get LULC
MODISSubsets(LoadDat = loc,
Products = c("MCD12Q1"),
Bands = c( "Land_Cover_Type_1"),
Size = c(10,10),
SaveDir=lcdir,
StartDate=T)
## Convert LST Data
MODISGrid(Dir = lstdir,
DirName = "modgrid",
SubDir = TRUE,
NoDataValues=
list("MOD11A2" = c("LST_Day_1km" = 0,
"QC_Day" = -1)))
## Convert LandCover Data
MODISGrid(Dir = lcdir,
DirName = "modgrid",
SubDir = TRUE,
NoDataValues=
list("MCD12Q1" = c("Land_Cover_Type_1" = 255)))
|
#Phaseolus case study
#Julian Ramirez-Villegas
#CIAT
#March 2012
stop("Warning: do not run the whole thing")
#basic stuff
src.dir <- "D:/_tools/gap-analysis-cwr/trunk/gap-analysis/lathyrus"
#crop details
crop_dir <- "D:/CIAT_work/Gap_analysis/ICARDA-collab/lathyrus"; setwd(crop_dir)
#here first run the occurrence splitter (H/G) and the script to count and plot records
#extract climate data
source(paste(src.dir,"/001.extractClimates.R",sep=""))
occ_dir <- paste(crop_dir,"/occurrences",sep="")
cli_dir <- "D:/CIAT_work/climate_change/wcl_2_5min/bio"
swd_dir <- paste(crop_dir,"/swd",sep="")
if (!file.exists(swd_dir)) {dir.create(swd_dir)}
x <- extractClimates(input_dir=occ_dir,sample_file="lathyrus.csv",env_dir=cli_dir,
env_prefix="bio_",env_ext="",lonfield="lon",
latfield="lat",taxfield="Taxon",output_dir=swd_dir)
#splitting the occurrence files
source(paste(src.dir,"/003.createOccurrenceFiles.R",sep=""))
oDir <- paste(crop_dir,"/maxent_modelling/occurrence_files",sep="")
if (!file.exists(oDir)) {dir.create(oDir)}
x <- createOccFiles(occ=paste(crop_dir,"/swd/occurrences_swd_ok.csv",sep=""),taxfield="Taxon",outDir=oDir)
#making the pseudo-absences
source(paste(src.dir,"/002.selectBackgroundArea.R",sep=""))
fList <- list.files("./maxent_modelling/occurrence_files",pattern=".csv")
bkDir <- paste(crop_dir,"/maxent_modelling/background",sep="")
if (!file.exists(bkDir)) {dir.create(bkDir)}
for (f in fList) {
cat("Processing",paste(f),"\n")
iFile <- paste("./maxent_modelling/occurrence_files/",f,sep="")
oFile <- paste("./maxent_modelling/background/",f,sep="")
x <- selectBack(occFile=iFile, outBackName=oFile,
msk="D:/CIAT_work/GBIF_project/backgroundFiles/backselection.asc",
backFilesDir="D:/CIAT_work/GBIF_project/backgroundFiles")
}
#perform the maxent modelling in parallel
source(paste(src.dir,"/005.modelingApproach.R",sep=""))
GapProcess(inputDir=paste(crop_dir,"/maxent_modelling",sep=""), OSys="NT", ncpu=3)
#summarise the metrics
source(paste(src.dir,"/006.summarizeMetricsThresholds.R",sep=""))
x <- summarizeMetrics(idir=paste(crop_dir,"/maxent_modelling",sep=""))
#calculate area with SD<0.15 (aSD15)
source(paste(src.dir,"/007.calcASD15.R",sep=""))
x <- summarizeASD15(idir=paste(crop_dir,"/maxent_modelling",sep=""))
#calculate size of distributional range
source(paste(src.dir,"/008.sizeDR.R",sep=""))
x <- summarizeDR(crop_dir)
#select which taxa are of use for species richness
#get the following modelling metrics:
# a. 25-fold average test AUC (ATAUC)
# b. 25-fold stdev of test AUC (STAUC)
# c. proportion of potential distribution with SD>15 (ASD15)
#isValid==1 if ATAUC>0.7, STAUC<0.15, ASD15<10%
acc <- read.csv(paste(crop_dir,"/maxent_modelling/summary-files/accuracy.csv",sep=""))
asd <- read.csv(paste(crop_dir,"/maxent_modelling/summary-files/ASD15.csv",sep=""))
for (spp in acc$SPID) {
cat("Processing taxon",paste(spp),"\n")
#getting the quality metrics
atauc <- acc$TestAUC[which(acc$SPID==spp)]
stauc <- acc$TestAUCSD[which(acc$SPID==spp)]
asd15 <- asd$rateThresholded[which(asd$taxon==paste(spp))]
#putting everything onto a row for appending
row_res <- data.frame(Taxon=paste(spp),ATAUC=atauc,STAUC=stauc,ASD15=asd15,ValidModel=NA)
#checking if any is na and correcting consequently
if (is.na(atauc)) {atauc <- 0}
if (is.na(stauc)) {stauc <- 1}
if (is.na(asd15)) {asd15 <- 100}
#reporting model quality
if (atauc>=0.7 & stauc<=0.15 & asd15<=10) {
row_res$ValidModel <- 1
} else {
row_res$ValidModel <- 0
}
#appending everything
if (spp == acc$SPID[1]) {
res_all <- row_res
} else {
res_all <- rbind(res_all,row_res)
}
}
write.csv(res_all,paste(crop_dir,"/maxent_modelling/summary-files/taxaForRichness.csv",sep=""),quote=F,row.names=F)
#calculate species richness
source(paste(src.dir,"/010.speciesRichness.R",sep=""))
x <- speciesRichness(bdir=crop_dir)
#create the priorities table
#1. SRS=GS/(GS+HS)*10
table_base <- read.csv(paste(crop_dir,"/sample_counts/sample_count_table.csv",sep=""))
table_base <- data.frame(Taxon=table_base$TAXON)
table_base$HS <- NA; table_base$HS_RP <- NA
table_base$GS <- NA; table_base$GS_RP <- NA
table_base$TOTAL <- NA; table_base$TOTAL_RP <- NA
table_base$ATAUC <- NA; table_base$STAUC <- NA; table_base$ASD15 <- NA; table_base$IS_VALID <- NA
table_base$SRS <- NA; table_base$CA50_G <- NA; table_base$PD_COV <- NA
table_base$GRS <- NA; table_base$NC_G_PC1 <- NA; table_base$NC_PD_PC1 <- NA
table_base$NC_G_PC2 <- NA; table_base$NC_PD_PC2 <- NA; table_base$ERS <- NA
table_base$ERTS <- NA; table_base$FPS <- NA; table_base$FPCAT <- NA
#reading specific tables
samples <- read.csv(paste(crop_dir,"/sample_counts/sample_count_table.csv",sep=""))
model_met <- read.csv(paste(crop_dir,"/maxent_modelling/summary-files/taxaForRichness.csv",sep=""))
rsize <- read.csv(paste(crop_dir,"/maxent_modelling/summary-files/areas.csv",sep=""))
edist <- read.csv(paste(crop_dir,"/maxent_modelling/summary-files/edist.csv",sep=""))
#read principal components weights and scale them to match 1
#!!!!!!
w_pc1 <- 0.7
w_pc2 <- 0.3
for (spp in table_base$Taxon) {
cat("Processing species",paste(spp),"\n")
#sampling and SRS
hs <- samples$HNUM[which(samples$TAXON==paste(spp))]
hs_rp <- samples$HNUM_RP[which(samples$TAXON==paste(spp))]
gs <- samples$GNUM[which(samples$TAXON==paste(spp))]
gs_rp <- samples$GNUM_RP[which(samples$TAXON==paste(spp))]
total <- samples$TOTAL[which(samples$TAXON==paste(spp))]
total_rp <- samples$TOTAL_RP[which(samples$TAXON==paste(spp))]
srs <- gs/total*10
table_base$HS[which(table_base$Taxon==paste(spp))] <- hs
table_base$HS_RP[which(table_base$Taxon==paste(spp))] <- hs_rp
table_base$GS[which(table_base$Taxon==paste(spp))] <- gs
table_base$GS_RP[which(table_base$Taxon==paste(spp))] <- gs_rp
table_base$TOTAL[which(table_base$Taxon==paste(spp))] <- total
table_base$TOTAL_RP[which(table_base$Taxon==paste(spp))] <- total_rp
table_base$SRS[which(table_base$Taxon==paste(spp))] <- srs
#modelling metrics
atauc <- model_met$ATAUC[which(model_met$Taxon==paste(spp))]
stauc <- model_met$STAUC[which(model_met$Taxon==paste(spp))]
asd15 <- model_met$ASD15[which(model_met$Taxon==paste(spp))]
isval <- model_met$ValidModel[which(model_met$Taxon==paste(spp))]
table_base$ATAUC[which(table_base$Taxon==paste(spp))] <- atauc
table_base$STAUC[which(table_base$Taxon==paste(spp))] <- stauc
table_base$ASD15[which(table_base$Taxon==paste(spp))] <- asd15
table_base$IS_VALID[which(table_base$Taxon==paste(spp))] <- isval
#grs
g_ca50 <- rsize$GBSize[which(rsize$taxon==paste(spp))]
if (isval==1) {
drsize <- rsize$DRSize[which(rsize$taxon==paste(spp))]
} else {
drsize <- rsize$CHSize[which(rsize$taxon==paste(spp))]
}
grs <- g_ca50/drsize*10
if (!is.na(grs)) {
if (grs>10) {grs <- 10}
}
table_base$CA50_G[which(table_base$Taxon==paste(spp))] <- g_ca50
table_base$PD_COV[which(table_base$Taxon==paste(spp))] <- drsize
table_base$GRS[which(table_base$Taxon==paste(spp))] <- grs
#ers
ecg_ca50_pc1 <- edist$GBDist.PC1[which(edist$taxon==paste(spp))]
ecg_ca50_pc2 <- edist$GBDist.PC2[which(edist$taxon==paste(spp))]
dr_pc1 <- edist$DRDist.PC1[which(edist$taxon==paste(spp))]
dr_pc2 <- edist$DRDist.PC2[which(edist$taxon==paste(spp))]
ers_pc1 <- ecg_ca50_pc1/dr_pc1*10
if (!is.na(ers_pc1)) {
if (ers_pc1 > 10) {ers_pc1 <- 10}
}
ers_pc2 <- ecg_ca50_pc2/dr_pc2*10
if (!is.na(ers_pc2)) {
if (ers_pc2 > 10) {ers_pc2 <- 10}
}
ers <- ers_pc1*w_pc1 + ers_pc2*w_pc2
if (!is.na(ers))
if (ers > 10) {ers <- 10}
table_base$NC_G_PC1[which(table_base$Taxon==paste(spp))] <- ecg_ca50_pc1
table_base$NC_PD_PC1[which(table_base$Taxon==paste(spp))] <- dr_pc1
table_base$NC_G_PC2[which(table_base$Taxon==paste(spp))] <- ecg_ca50_pc2
table_base$NC_PD_PC2[which(table_base$Taxon==paste(spp))] <- dr_pc2
table_base$ERS[which(table_base$Taxon==paste(spp))] <- ers
#Final priority score
if (gs==0) {
fps <- 0
} else if (hs==0 & gs<10) {
fps <- 0
} else {
fps <- mean(c(srs,grs,ers),na.rm=T)
}
table_base$FPS[which(table_base$Taxon==paste(spp))] <- fps
if (fps>=0 & fps<=3) {
fpcat <- "HPS"
} else if (fps>3 & fps<=5) {
fpcat <- "MPS"
} else if (fps>5 & fps<=7.5) {
fpcat <- "LPS"
} else {
fpcat <- "NFCR"
}
table_base$FPCAT[which(table_base$Taxon==paste(spp))] <- fpcat
}
if (!file.exists(paste(crop_dir,"/priorities",sep=""))) {
dir.create(paste(crop_dir,"/priorities",sep=""))
}
write.csv(table_base,paste(crop_dir,"/priorities/priorities.csv",sep=""),row.names=F,quote=F)
#sub-select hps
table_hps <- table_base[which(table_base$FPCAT=="HPS"),]
write.csv(table_hps,paste(crop_dir,"/priorities/hps.csv",sep=""),row.names=F,quote=F)
#calculate distance to populations
source(paste(src.dir,"/011.distanceToPopulations.R",sep=""))
summarizeDistances(crop_dir)
#calculate final gap richness
source(paste(src.dir,"/012.gapRichness.R",sep=""))
x <- gapRichness(bdir=crop_dir)
#plot the CA50 vs Potential coverage thing
prior <- read.csv(paste(crop_dir,"/priorities/priorities.csv",sep=""))
fit <- lm(prior$CA50_G~prior$PD_COV)
lims <- c(min(prior$PD_COV,prior$CA50_G),max(prior$CA50_G,prior$PD_COV))/1000
#do the plot
tiff(paste(crop_dir,"/figures/geographic_coverage.tif",sep=""),
res=300,pointsize=12,width=1500,height=1000,units="px",compression="lzw")
par(mar=c(5,5,1,1),cex=0.8)
plot(prior$PD_COV/1000,prior$CA50_G/1000,pch=20,cex=0.75,xlim=lims,ylim=c(0,1000),
xlab="Potential geographic coverage (sq-km * 1000)",
ylab="Genebank accessions CA50 (sq-km * 1000")
abline(0,1,lwd=0.75,lty=2)
lines(prior$PD_COV/1000,fit$fitted.values/1000)
grid(lwd=0.75)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_sativus")]/1000+2000,
prior$CA50_G[which(prior$Taxon=="Lathyrus_sativus")]/1000,
"L. sativus",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_cicera")]/1000+1500,
prior$CA50_G[which(prior$Taxon=="Lathyrus_cicera")]/1000,
"L. cicera",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_aphaca")]/1000+2000,
prior$CA50_G[which(prior$Taxon=="Lathyrus_aphaca")]/1000,
"L. aphaca",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_pratensis")]/1000-2000,
prior$CA50_G[which(prior$Taxon=="Lathyrus_pratensis")]/1000,
"L. pratensis",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_ochrus")]/1000+1500,
prior$CA50_G[which(prior$Taxon=="Lathyrus_ochrus")]/1000,
"L. ochrus",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_clymenum")]/1000+2000,
prior$CA50_G[which(prior$Taxon=="Lathyrus_clymenum")]/1000,
"L. clymenum",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_inconspicuus")]/1000+2500,
prior$CA50_G[which(prior$Taxon=="Lathyrus_inconspicuus")]/1000,
"L. inconspicuus",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_annuus")]/1000+1500,
prior$CA50_G[which(prior$Taxon=="Lathyrus_annuus")]/1000,
"L. annuus",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_pseudocicera")]/1000+2400,
prior$CA50_G[which(prior$Taxon=="Lathyrus_pseudocicera")]/1000-10,
"L. pseudocicera",cex=0.5)
dev.off()
#plot the gap richness maps, uncertainty and related stuff
source(paste(src.dir,"/000.zipRead.R",sep=""))
gap_rich <- zipRead(paste(crop_dir,"/gap_richness/",sep=""),"gap-richness.asc.gz")
gap_dpmax <- zipRead(paste(crop_dir,"/gap_richness/",sep=""),"gap-richness-dpmax.asc.gz")
gap_sdmax <- zipRead(paste(crop_dir,"/gap_richness/",sep=""),"gap-richness-sdmax.asc.gz")
library(maptools); data(wrld_simpl)
z <- extent(gap_rich)
aspect <- (z@ymax-z@ymin)*1.2/(z@xmax-z@xmin)
grich_brks <- unique(gap_rich[!is.na(gap_rich[])])
grich_cols <- c("grey 80",colorRampPalette(c("yellow","orange","red"))(length(grich_brks)-2))
#gap richness map
tiff(paste(crop_dir,"/figures/gap_richness.tif",sep=""),
res=300,pointsize=7,width=1500,height=1500*aspect,units="px",compression="lzw")
par(mar=c(2.5,2.5,1,1),cex=0.8)
plot(gap_rich,col=grich_cols,zlim=c(min(grich_brks),max(grich_brks)),useRaster=F,
breaks=grich_brks,lab.breaks=grich_brks,
horizontal=T,
legend.width=1,
legend.shrink=0.99)
plot(wrld_simpl,add=T,lwd=0.5)
grid()
dev.off()
###############
#gsd_brks <- unique(quantile(gap_sdmax[],probs=seq(0,1,by=0.05),na.rm=T))
gap_sdmax[which(gap_rich[]==0)] <- NA
gsd_brks <- c(seq(0,max(gap_sdmax[],na.rm=T),by=0.05),max(gap_sdmax[],na.rm=T))
gsd_cols <- colorRampPalette(c("light green","green","light blue","blue"))(length(gsd_brks)-1)
gsd_labs <- round(gsd_brks,2)
#gap uncertainty map (standard deviation)
tiff(paste(crop_dir,"/figures/gap_richness_sd.tif",sep=""),
res=300,pointsize=7,width=1500,height=1500*aspect,units="px",compression="lzw")
par(mar=c(2.5,2.5,1,1),cex=0.8)
plot(gap_sdmax,col=gsd_cols,zlim=c(min(gsd_brks),max(gsd_brks)),useRaster=F,
breaks=gsd_brks,lab.breaks=gsd_labs,
horizontal=T,
legend.width=1,
legend.shrink=0.99)
plot(wrld_simpl,add=T,lwd=0.5)
grid()
dev.off()
gap_dpmax[which(gap_rich[]==0)] <- NA
gdp_brks <- unique(quantile(gap_dpmax[],probs=seq(0,1,by=0.05),na.rm=T))
gdp_cols <- colorRampPalette(c("yellow","green","blue"))(length(gdp_brks)-1)
gdp_labs <- round(gdp_brks,2)
#gap uncertainty map (popdist)
tiff(paste(crop_dir,"/figures/gap_richness_dp.tif",sep=""),
res=300,pointsize=7,width=1500,height=1500*aspect,units="px",compression="lzw")
par(mar=c(2.5,2.5,1,1),cex=0.8)
plot(gap_dpmax,col=gdp_cols,zlim=c(min(gdp_brks),max(gdp_brks)),useRaster=F,
breaks=gdp_brks,lab.breaks=gdp_labs,
horizontal=T,
legend.width=1,
legend.shrink=0.99)
plot(wrld_simpl,add=T,lwd=0.5)
grid()
dev.off()
#comparison of expert and gap scores
eps <- read.csv(paste(crop_dir,"/priorities/expert_gap_comparison.csv",sep=""))
spear <- cor(eps$FPS,eps$EPS,method="spearman")
eps$RD <- (eps$FPS-eps$EPS)*10
fit <- lm(eps$EPS~eps$FPS)
tiff(paste(crop_dir,"/figures/expert_evaluation.tif",sep=""),
res=300,pointsize=12,width=1500,height=1000,units="px",compression="lzw")
par(mar=c(5,5,1,1),cex=0.8)
plot(eps$FPS,eps$EPS,xlab="Gap Analysis Final priority score",
ylab="Expert priority score",pch=20,xlim=c(0,8),ylim=c(0,8))
lines(eps$FPS,fit$fitted.values)
abline(0,1,lty=2)
grid()
dev.off()
tiff(paste(crop_dir,"/figures/expert_evaluation_RD.tif",sep=""),
res=300,pointsize=12,width=1500,height=1000,units="px",compression="lzw")
par(mar=c(5,5,1,1),cex=0.8)
hist(eps$RD,xlab="Relative difference (%)",
ylab="Frequency (number of taxa)",
breaks=20,xlim=c(-100,100),col="grey 70",main=NA)
abline(v=0,col="red")
grid()
dev.off()
#plot species richness
source(paste(src.dir,"/000.zipRead.R",sep=""))
sp_rich <- zipRead(paste(crop_dir,"/species_richness",sep=""),"species-richness.asc.gz")
sdmax <- zipRead(paste(crop_dir,"/species_richness",sep=""),"species-richness-sdmax.asc.gz")
library(maptools); data(wrld_simpl)
z <- extent(sp_rich)
aspect <- (z@ymax-z@ymin)*1.2/(z@xmax-z@xmin)
rich_brks <- unique(sp_rich[!is.na(sp_rich[])])
rich_cols <- c("grey 80",colorRampPalette(c("yellow","orange","red"))(length(rich_brks)-2))
#gap richness map
tiff(paste(crop_dir,"/figures/sp_richness.tif",sep=""),
res=300,pointsize=7,width=1500,height=1500*aspect,units="px",compression="lzw")
par(mar=c(2.5,2.5,1,1),cex=0.8)
plot(sp_rich,col=rich_cols,zlim=c(min(rich_brks),max(rich_brks)),useRaster=F,
breaks=rich_brks,lab.breaks=rich_brks,
horizontal=T,
legend.width=1,
legend.shrink=0.99)
plot(wrld_simpl,add=T,lwd=0.5)
grid()
dev.off()
###############
#gsd_brks <- unique(quantile(gap_sdmax[],probs=seq(0,1,by=0.05),na.rm=T))
sdmax[which(sp_rich[]==0)] <- NA
sd_brks <- c(seq(0,max(sdmax[],na.rm=T),by=0.05),max(sdmax[],na.rm=T))
sd_cols <- colorRampPalette(c("light green","green","light blue","blue"))(length(sd_brks)-1)
sd_labs <- round(sd_brks,2)
#gap uncertainty map (standard deviation)
tiff(paste(crop_dir,"/figures/sp_richness_sd.tif",sep=""),
res=300,pointsize=7,width=1500,height=1500*aspect,units="px",compression="lzw")
par(mar=c(2.5,2.5,1,1),cex=0.8)
plot(gap_sdmax,col=sd_cols,zlim=c(min(sd_brks),max(sd_brks)),useRaster=F,
breaks=sd_brks,lab.breaks=sd_labs,
horizontal=T,
legend.width=1,
legend.shrink=0.99)
plot(wrld_simpl,add=T,lwd=0.5)
grid()
dev.off()
|
/gap-analysis/lathyrus/gap-lathyrus.R
|
no_license
|
CIAT-DAPA/cwr_gap-analysis-cwr
|
R
| false
| false
| 16,970
|
r
|
#Phaseolus case study
#Julian Ramirez-Villegas
#CIAT
#March 2012
stop("Warning: do not run the whole thing")
#basic stuff
src.dir <- "D:/_tools/gap-analysis-cwr/trunk/gap-analysis/lathyrus"
#crop details
crop_dir <- "D:/CIAT_work/Gap_analysis/ICARDA-collab/lathyrus"; setwd(crop_dir)
#here first run the occurrence splitter (H/G) and the script to count and plot records
#extract climate data
source(paste(src.dir,"/001.extractClimates.R",sep=""))
occ_dir <- paste(crop_dir,"/occurrences",sep="")
cli_dir <- "D:/CIAT_work/climate_change/wcl_2_5min/bio"
swd_dir <- paste(crop_dir,"/swd",sep="")
if (!file.exists(swd_dir)) {dir.create(swd_dir)}
x <- extractClimates(input_dir=occ_dir,sample_file="lathyrus.csv",env_dir=cli_dir,
env_prefix="bio_",env_ext="",lonfield="lon",
latfield="lat",taxfield="Taxon",output_dir=swd_dir)
#splitting the occurrence files
source(paste(src.dir,"/003.createOccurrenceFiles.R",sep=""))
oDir <- paste(crop_dir,"/maxent_modelling/occurrence_files",sep="")
if (!file.exists(oDir)) {dir.create(oDir)}
x <- createOccFiles(occ=paste(crop_dir,"/swd/occurrences_swd_ok.csv",sep=""),taxfield="Taxon",outDir=oDir)
#making the pseudo-absences
source(paste(src.dir,"/002.selectBackgroundArea.R",sep=""))
fList <- list.files("./maxent_modelling/occurrence_files",pattern=".csv")
bkDir <- paste(crop_dir,"/maxent_modelling/background",sep="")
if (!file.exists(bkDir)) {dir.create(bkDir)}
for (f in fList) {
cat("Processing",paste(f),"\n")
iFile <- paste("./maxent_modelling/occurrence_files/",f,sep="")
oFile <- paste("./maxent_modelling/background/",f,sep="")
x <- selectBack(occFile=iFile, outBackName=oFile,
msk="D:/CIAT_work/GBIF_project/backgroundFiles/backselection.asc",
backFilesDir="D:/CIAT_work/GBIF_project/backgroundFiles")
}
#perform the maxent modelling in parallel
source(paste(src.dir,"/005.modelingApproach.R",sep=""))
GapProcess(inputDir=paste(crop_dir,"/maxent_modelling",sep=""), OSys="NT", ncpu=3)
#summarise the metrics
source(paste(src.dir,"/006.summarizeMetricsThresholds.R",sep=""))
x <- summarizeMetrics(idir=paste(crop_dir,"/maxent_modelling",sep=""))
#calculate area with SD<0.15 (aSD15)
source(paste(src.dir,"/007.calcASD15.R",sep=""))
x <- summarizeASD15(idir=paste(crop_dir,"/maxent_modelling",sep=""))
#calculate size of distributional range
source(paste(src.dir,"/008.sizeDR.R",sep=""))
x <- summarizeDR(crop_dir)
#select which taxa are of use for species richness
#get the following modelling metrics:
# a. 25-fold average test AUC (ATAUC)
# b. 25-fold stdev of test AUC (STAUC)
# c. proportion of potential distribution with SD>15 (ASD15)
#isValid==1 if ATAUC>0.7, STAUC<0.15, ASD15<10%
acc <- read.csv(paste(crop_dir,"/maxent_modelling/summary-files/accuracy.csv",sep=""))
asd <- read.csv(paste(crop_dir,"/maxent_modelling/summary-files/ASD15.csv",sep=""))
for (spp in acc$SPID) {
cat("Processing taxon",paste(spp),"\n")
#getting the quality metrics
atauc <- acc$TestAUC[which(acc$SPID==spp)]
stauc <- acc$TestAUCSD[which(acc$SPID==spp)]
asd15 <- asd$rateThresholded[which(asd$taxon==paste(spp))]
#putting everything onto a row for appending
row_res <- data.frame(Taxon=paste(spp),ATAUC=atauc,STAUC=stauc,ASD15=asd15,ValidModel=NA)
#checking if any is na and correcting consequently
if (is.na(atauc)) {atauc <- 0}
if (is.na(stauc)) {stauc <- 1}
if (is.na(asd15)) {asd15 <- 100}
#reporting model quality
if (atauc>=0.7 & stauc<=0.15 & asd15<=10) {
row_res$ValidModel <- 1
} else {
row_res$ValidModel <- 0
}
#appending everything
if (spp == acc$SPID[1]) {
res_all <- row_res
} else {
res_all <- rbind(res_all,row_res)
}
}
write.csv(res_all,paste(crop_dir,"/maxent_modelling/summary-files/taxaForRichness.csv",sep=""),quote=F,row.names=F)
#calculate species richness
source(paste(src.dir,"/010.speciesRichness.R",sep=""))
x <- speciesRichness(bdir=crop_dir)
#create the priorities table
#1. SRS=GS/(GS+HS)*10
table_base <- read.csv(paste(crop_dir,"/sample_counts/sample_count_table.csv",sep=""))
table_base <- data.frame(Taxon=table_base$TAXON)
table_base$HS <- NA; table_base$HS_RP <- NA
table_base$GS <- NA; table_base$GS_RP <- NA
table_base$TOTAL <- NA; table_base$TOTAL_RP <- NA
table_base$ATAUC <- NA; table_base$STAUC <- NA; table_base$ASD15 <- NA; table_base$IS_VALID <- NA
table_base$SRS <- NA; table_base$CA50_G <- NA; table_base$PD_COV <- NA
table_base$GRS <- NA; table_base$NC_G_PC1 <- NA; table_base$NC_PD_PC1 <- NA
table_base$NC_G_PC2 <- NA; table_base$NC_PD_PC2 <- NA; table_base$ERS <- NA
table_base$ERTS <- NA; table_base$FPS <- NA; table_base$FPCAT <- NA
#reading specific tables
samples <- read.csv(paste(crop_dir,"/sample_counts/sample_count_table.csv",sep=""))
model_met <- read.csv(paste(crop_dir,"/maxent_modelling/summary-files/taxaForRichness.csv",sep=""))
rsize <- read.csv(paste(crop_dir,"/maxent_modelling/summary-files/areas.csv",sep=""))
edist <- read.csv(paste(crop_dir,"/maxent_modelling/summary-files/edist.csv",sep=""))
#read principal components weights and scale them to match 1
#!!!!!!
w_pc1 <- 0.7
w_pc2 <- 0.3
for (spp in table_base$Taxon) {
cat("Processing species",paste(spp),"\n")
#sampling and SRS
hs <- samples$HNUM[which(samples$TAXON==paste(spp))]
hs_rp <- samples$HNUM_RP[which(samples$TAXON==paste(spp))]
gs <- samples$GNUM[which(samples$TAXON==paste(spp))]
gs_rp <- samples$GNUM_RP[which(samples$TAXON==paste(spp))]
total <- samples$TOTAL[which(samples$TAXON==paste(spp))]
total_rp <- samples$TOTAL_RP[which(samples$TAXON==paste(spp))]
srs <- gs/total*10
table_base$HS[which(table_base$Taxon==paste(spp))] <- hs
table_base$HS_RP[which(table_base$Taxon==paste(spp))] <- hs_rp
table_base$GS[which(table_base$Taxon==paste(spp))] <- gs
table_base$GS_RP[which(table_base$Taxon==paste(spp))] <- gs_rp
table_base$TOTAL[which(table_base$Taxon==paste(spp))] <- total
table_base$TOTAL_RP[which(table_base$Taxon==paste(spp))] <- total_rp
table_base$SRS[which(table_base$Taxon==paste(spp))] <- srs
#modelling metrics
atauc <- model_met$ATAUC[which(model_met$Taxon==paste(spp))]
stauc <- model_met$STAUC[which(model_met$Taxon==paste(spp))]
asd15 <- model_met$ASD15[which(model_met$Taxon==paste(spp))]
isval <- model_met$ValidModel[which(model_met$Taxon==paste(spp))]
table_base$ATAUC[which(table_base$Taxon==paste(spp))] <- atauc
table_base$STAUC[which(table_base$Taxon==paste(spp))] <- stauc
table_base$ASD15[which(table_base$Taxon==paste(spp))] <- asd15
table_base$IS_VALID[which(table_base$Taxon==paste(spp))] <- isval
#grs
g_ca50 <- rsize$GBSize[which(rsize$taxon==paste(spp))]
if (isval==1) {
drsize <- rsize$DRSize[which(rsize$taxon==paste(spp))]
} else {
drsize <- rsize$CHSize[which(rsize$taxon==paste(spp))]
}
grs <- g_ca50/drsize*10
if (!is.na(grs)) {
if (grs>10) {grs <- 10}
}
table_base$CA50_G[which(table_base$Taxon==paste(spp))] <- g_ca50
table_base$PD_COV[which(table_base$Taxon==paste(spp))] <- drsize
table_base$GRS[which(table_base$Taxon==paste(spp))] <- grs
#ers
ecg_ca50_pc1 <- edist$GBDist.PC1[which(edist$taxon==paste(spp))]
ecg_ca50_pc2 <- edist$GBDist.PC2[which(edist$taxon==paste(spp))]
dr_pc1 <- edist$DRDist.PC1[which(edist$taxon==paste(spp))]
dr_pc2 <- edist$DRDist.PC2[which(edist$taxon==paste(spp))]
ers_pc1 <- ecg_ca50_pc1/dr_pc1*10
if (!is.na(ers_pc1)) {
if (ers_pc1 > 10) {ers_pc1 <- 10}
}
ers_pc2 <- ecg_ca50_pc2/dr_pc2*10
if (!is.na(ers_pc2)) {
if (ers_pc2 > 10) {ers_pc2 <- 10}
}
ers <- ers_pc1*w_pc1 + ers_pc2*w_pc2
if (!is.na(ers))
if (ers > 10) {ers <- 10}
table_base$NC_G_PC1[which(table_base$Taxon==paste(spp))] <- ecg_ca50_pc1
table_base$NC_PD_PC1[which(table_base$Taxon==paste(spp))] <- dr_pc1
table_base$NC_G_PC2[which(table_base$Taxon==paste(spp))] <- ecg_ca50_pc2
table_base$NC_PD_PC2[which(table_base$Taxon==paste(spp))] <- dr_pc2
table_base$ERS[which(table_base$Taxon==paste(spp))] <- ers
#Final priority score
if (gs==0) {
fps <- 0
} else if (hs==0 & gs<10) {
fps <- 0
} else {
fps <- mean(c(srs,grs,ers),na.rm=T)
}
table_base$FPS[which(table_base$Taxon==paste(spp))] <- fps
if (fps>=0 & fps<=3) {
fpcat <- "HPS"
} else if (fps>3 & fps<=5) {
fpcat <- "MPS"
} else if (fps>5 & fps<=7.5) {
fpcat <- "LPS"
} else {
fpcat <- "NFCR"
}
table_base$FPCAT[which(table_base$Taxon==paste(spp))] <- fpcat
}
if (!file.exists(paste(crop_dir,"/priorities",sep=""))) {
dir.create(paste(crop_dir,"/priorities",sep=""))
}
write.csv(table_base,paste(crop_dir,"/priorities/priorities.csv",sep=""),row.names=F,quote=F)
#sub-select hps
table_hps <- table_base[which(table_base$FPCAT=="HPS"),]
write.csv(table_hps,paste(crop_dir,"/priorities/hps.csv",sep=""),row.names=F,quote=F)
#calculate distance to populations
source(paste(src.dir,"/011.distanceToPopulations.R",sep=""))
summarizeDistances(crop_dir)
#calculate final gap richness
source(paste(src.dir,"/012.gapRichness.R",sep=""))
x <- gapRichness(bdir=crop_dir)
#plot the CA50 vs Potential coverage thing
prior <- read.csv(paste(crop_dir,"/priorities/priorities.csv",sep=""))
fit <- lm(prior$CA50_G~prior$PD_COV)
lims <- c(min(prior$PD_COV,prior$CA50_G),max(prior$CA50_G,prior$PD_COV))/1000
#do the plot
tiff(paste(crop_dir,"/figures/geographic_coverage.tif",sep=""),
res=300,pointsize=12,width=1500,height=1000,units="px",compression="lzw")
par(mar=c(5,5,1,1),cex=0.8)
plot(prior$PD_COV/1000,prior$CA50_G/1000,pch=20,cex=0.75,xlim=lims,ylim=c(0,1000),
xlab="Potential geographic coverage (sq-km * 1000)",
ylab="Genebank accessions CA50 (sq-km * 1000")
abline(0,1,lwd=0.75,lty=2)
lines(prior$PD_COV/1000,fit$fitted.values/1000)
grid(lwd=0.75)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_sativus")]/1000+2000,
prior$CA50_G[which(prior$Taxon=="Lathyrus_sativus")]/1000,
"L. sativus",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_cicera")]/1000+1500,
prior$CA50_G[which(prior$Taxon=="Lathyrus_cicera")]/1000,
"L. cicera",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_aphaca")]/1000+2000,
prior$CA50_G[which(prior$Taxon=="Lathyrus_aphaca")]/1000,
"L. aphaca",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_pratensis")]/1000-2000,
prior$CA50_G[which(prior$Taxon=="Lathyrus_pratensis")]/1000,
"L. pratensis",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_ochrus")]/1000+1500,
prior$CA50_G[which(prior$Taxon=="Lathyrus_ochrus")]/1000,
"L. ochrus",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_clymenum")]/1000+2000,
prior$CA50_G[which(prior$Taxon=="Lathyrus_clymenum")]/1000,
"L. clymenum",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_inconspicuus")]/1000+2500,
prior$CA50_G[which(prior$Taxon=="Lathyrus_inconspicuus")]/1000,
"L. inconspicuus",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_annuus")]/1000+1500,
prior$CA50_G[which(prior$Taxon=="Lathyrus_annuus")]/1000,
"L. annuus",cex=0.5)
text(prior$PD_COV[which(prior$Taxon=="Lathyrus_pseudocicera")]/1000+2400,
prior$CA50_G[which(prior$Taxon=="Lathyrus_pseudocicera")]/1000-10,
"L. pseudocicera",cex=0.5)
dev.off()
#plot the gap richness maps, uncertainty and related stuff
source(paste(src.dir,"/000.zipRead.R",sep=""))
gap_rich <- zipRead(paste(crop_dir,"/gap_richness/",sep=""),"gap-richness.asc.gz")
gap_dpmax <- zipRead(paste(crop_dir,"/gap_richness/",sep=""),"gap-richness-dpmax.asc.gz")
gap_sdmax <- zipRead(paste(crop_dir,"/gap_richness/",sep=""),"gap-richness-sdmax.asc.gz")
library(maptools); data(wrld_simpl)
z <- extent(gap_rich)
aspect <- (z@ymax-z@ymin)*1.2/(z@xmax-z@xmin)
grich_brks <- unique(gap_rich[!is.na(gap_rich[])])
grich_cols <- c("grey 80",colorRampPalette(c("yellow","orange","red"))(length(grich_brks)-2))
#gap richness map
tiff(paste(crop_dir,"/figures/gap_richness.tif",sep=""),
res=300,pointsize=7,width=1500,height=1500*aspect,units="px",compression="lzw")
par(mar=c(2.5,2.5,1,1),cex=0.8)
plot(gap_rich,col=grich_cols,zlim=c(min(grich_brks),max(grich_brks)),useRaster=F,
breaks=grich_brks,lab.breaks=grich_brks,
horizontal=T,
legend.width=1,
legend.shrink=0.99)
plot(wrld_simpl,add=T,lwd=0.5)
grid()
dev.off()
###############
#gsd_brks <- unique(quantile(gap_sdmax[],probs=seq(0,1,by=0.05),na.rm=T))
gap_sdmax[which(gap_rich[]==0)] <- NA
gsd_brks <- c(seq(0,max(gap_sdmax[],na.rm=T),by=0.05),max(gap_sdmax[],na.rm=T))
gsd_cols <- colorRampPalette(c("light green","green","light blue","blue"))(length(gsd_brks)-1)
gsd_labs <- round(gsd_brks,2)
#gap uncertainty map (standard deviation)
tiff(paste(crop_dir,"/figures/gap_richness_sd.tif",sep=""),
res=300,pointsize=7,width=1500,height=1500*aspect,units="px",compression="lzw")
par(mar=c(2.5,2.5,1,1),cex=0.8)
plot(gap_sdmax,col=gsd_cols,zlim=c(min(gsd_brks),max(gsd_brks)),useRaster=F,
breaks=gsd_brks,lab.breaks=gsd_labs,
horizontal=T,
legend.width=1,
legend.shrink=0.99)
plot(wrld_simpl,add=T,lwd=0.5)
grid()
dev.off()
gap_dpmax[which(gap_rich[]==0)] <- NA
gdp_brks <- unique(quantile(gap_dpmax[],probs=seq(0,1,by=0.05),na.rm=T))
gdp_cols <- colorRampPalette(c("yellow","green","blue"))(length(gdp_brks)-1)
gdp_labs <- round(gdp_brks,2)
#gap uncertainty map (popdist)
tiff(paste(crop_dir,"/figures/gap_richness_dp.tif",sep=""),
res=300,pointsize=7,width=1500,height=1500*aspect,units="px",compression="lzw")
par(mar=c(2.5,2.5,1,1),cex=0.8)
plot(gap_dpmax,col=gdp_cols,zlim=c(min(gdp_brks),max(gdp_brks)),useRaster=F,
breaks=gdp_brks,lab.breaks=gdp_labs,
horizontal=T,
legend.width=1,
legend.shrink=0.99)
plot(wrld_simpl,add=T,lwd=0.5)
grid()
dev.off()
#comparison of expert and gap scores
eps <- read.csv(paste(crop_dir,"/priorities/expert_gap_comparison.csv",sep=""))
spear <- cor(eps$FPS,eps$EPS,method="spearman")
eps$RD <- (eps$FPS-eps$EPS)*10
fit <- lm(eps$EPS~eps$FPS)
tiff(paste(crop_dir,"/figures/expert_evaluation.tif",sep=""),
res=300,pointsize=12,width=1500,height=1000,units="px",compression="lzw")
par(mar=c(5,5,1,1),cex=0.8)
plot(eps$FPS,eps$EPS,xlab="Gap Analysis Final priority score",
ylab="Expert priority score",pch=20,xlim=c(0,8),ylim=c(0,8))
lines(eps$FPS,fit$fitted.values)
abline(0,1,lty=2)
grid()
dev.off()
tiff(paste(crop_dir,"/figures/expert_evaluation_RD.tif",sep=""),
res=300,pointsize=12,width=1500,height=1000,units="px",compression="lzw")
par(mar=c(5,5,1,1),cex=0.8)
hist(eps$RD,xlab="Relative difference (%)",
ylab="Frequency (number of taxa)",
breaks=20,xlim=c(-100,100),col="grey 70",main=NA)
abline(v=0,col="red")
grid()
dev.off()
#plot species richness
source(paste(src.dir,"/000.zipRead.R",sep=""))
sp_rich <- zipRead(paste(crop_dir,"/species_richness",sep=""),"species-richness.asc.gz")
sdmax <- zipRead(paste(crop_dir,"/species_richness",sep=""),"species-richness-sdmax.asc.gz")
library(maptools); data(wrld_simpl)
z <- extent(sp_rich)
aspect <- (z@ymax-z@ymin)*1.2/(z@xmax-z@xmin)
rich_brks <- unique(sp_rich[!is.na(sp_rich[])])
rich_cols <- c("grey 80",colorRampPalette(c("yellow","orange","red"))(length(rich_brks)-2))
#gap richness map
tiff(paste(crop_dir,"/figures/sp_richness.tif",sep=""),
res=300,pointsize=7,width=1500,height=1500*aspect,units="px",compression="lzw")
par(mar=c(2.5,2.5,1,1),cex=0.8)
plot(sp_rich,col=rich_cols,zlim=c(min(rich_brks),max(rich_brks)),useRaster=F,
breaks=rich_brks,lab.breaks=rich_brks,
horizontal=T,
legend.width=1,
legend.shrink=0.99)
plot(wrld_simpl,add=T,lwd=0.5)
grid()
dev.off()
###############
#gsd_brks <- unique(quantile(gap_sdmax[],probs=seq(0,1,by=0.05),na.rm=T))
sdmax[which(sp_rich[]==0)] <- NA
sd_brks <- c(seq(0,max(sdmax[],na.rm=T),by=0.05),max(sdmax[],na.rm=T))
sd_cols <- colorRampPalette(c("light green","green","light blue","blue"))(length(sd_brks)-1)
sd_labs <- round(sd_brks,2)
#gap uncertainty map (standard deviation)
tiff(paste(crop_dir,"/figures/sp_richness_sd.tif",sep=""),
res=300,pointsize=7,width=1500,height=1500*aspect,units="px",compression="lzw")
par(mar=c(2.5,2.5,1,1),cex=0.8)
plot(gap_sdmax,col=sd_cols,zlim=c(min(sd_brks),max(sd_brks)),useRaster=F,
breaks=sd_brks,lab.breaks=sd_labs,
horizontal=T,
legend.width=1,
legend.shrink=0.99)
plot(wrld_simpl,add=T,lwd=0.5)
grid()
dev.off()
|
context("Template brain data")
data(FCWB.demo)
test_that("origin returns correct result", {
origin <- origin(FCWB.demo)
origin.expected <- c(0, 0, 0)
expect_equal(origin, origin.expected)
})
test_that("dim returns correct result", {
dims <- dim(FCWB.demo)
dims.expected <- c(1769, 1026, 108)
expect_equal(dims, dims.expected)
})
test_that("voxdims returns correct result", {
vd <- voxdims(FCWB.demo)
vd.expected <- c(0.318967307692308, 0.318427024390244, 1)
expect_equal(vd, vd.expected)
})
test_that("boundingbox returns correct result", {
bb <- boundingbox(FCWB.demo)
bb.expected <- structure(matrix(c(0, 563.9342, 0, 326.3877, 0, 107), nrow=2),
class='boundingbox')
expect_equivalent(bb, bb.expected)
})
context("Template brain")
test_that("is.templatebrain works",{
expect_true(is.templatebrain(FCWB.demo))
expect_false(is.templatebrain("FCWB.demo"))
})
test_that("as.character.templatebrain works",{
expect_equal(as.character(FCWB.demo), "FCWB")
expect_equal(as.character(FCWB.demo, 'name'), FCWB.demo$name)
expect_error(as.character(FCWB.demo, 'rhubarb'))
l=lapply(LETTERS, templatebrain)
expect_equal(sapply(l, as.character), LETTERS)
})
test_that("as.templatebrain.im3d works", {
fcwb.nhdr=system.file("images","FCWB.nhdr",package='nat.templatebrains')
expect_is(FCWB.test<-as.templatebrain(fcwb.nhdr, name="FlyCircuit Whole Brain (demonstration purposes)",
sex="Intersex", type="Average"), 'templatebrain')
fields=c("name","sex", "regName", "type","dims","voxdims", "origin","BoundingBox","units")
expect_equal(FCWB.test[fields], FCWB.demo[fields])
})
|
/nat.templatebrains/tests/testthat/test-template-brain-data.r
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 1,683
|
r
|
context("Template brain data")
data(FCWB.demo)
test_that("origin returns correct result", {
origin <- origin(FCWB.demo)
origin.expected <- c(0, 0, 0)
expect_equal(origin, origin.expected)
})
test_that("dim returns correct result", {
dims <- dim(FCWB.demo)
dims.expected <- c(1769, 1026, 108)
expect_equal(dims, dims.expected)
})
test_that("voxdims returns correct result", {
vd <- voxdims(FCWB.demo)
vd.expected <- c(0.318967307692308, 0.318427024390244, 1)
expect_equal(vd, vd.expected)
})
test_that("boundingbox returns correct result", {
bb <- boundingbox(FCWB.demo)
bb.expected <- structure(matrix(c(0, 563.9342, 0, 326.3877, 0, 107), nrow=2),
class='boundingbox')
expect_equivalent(bb, bb.expected)
})
context("Template brain")
test_that("is.templatebrain works",{
expect_true(is.templatebrain(FCWB.demo))
expect_false(is.templatebrain("FCWB.demo"))
})
test_that("as.character.templatebrain works",{
expect_equal(as.character(FCWB.demo), "FCWB")
expect_equal(as.character(FCWB.demo, 'name'), FCWB.demo$name)
expect_error(as.character(FCWB.demo, 'rhubarb'))
l=lapply(LETTERS, templatebrain)
expect_equal(sapply(l, as.character), LETTERS)
})
test_that("as.templatebrain.im3d works", {
fcwb.nhdr=system.file("images","FCWB.nhdr",package='nat.templatebrains')
expect_is(FCWB.test<-as.templatebrain(fcwb.nhdr, name="FlyCircuit Whole Brain (demonstration purposes)",
sex="Intersex", type="Average"), 'templatebrain')
fields=c("name","sex", "regName", "type","dims","voxdims", "origin","BoundingBox","units")
expect_equal(FCWB.test[fields], FCWB.demo[fields])
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/usair.R
\docType{data}
\name{usair}
\alias{usair}
\title{US Air Pollution}
\format{A data frame with 7 columns and 41 rows. The columns are:
\itemize{
\item{SO2}{Sulphur dioxide content of air in micrograms per cubic meter.}
\item{negtemp}{Negative value of Average annual temperature in fahrenheit.}
\item{manuf}{Number of manufacturing enterprises employing 20 or more workers.}
\item{pop}{Population size (1970 census) in thousands.}
\item{wind}{Average annual wind speed in miles per hour.}
\item{precip}{Average annual precipitation in inches.}
\item{days}{Average number of days with precipitation per year}
}}
\source{
Sokal and Rohlf (1981) Biometry: The Principles and Practices of Statistics in Biological Research
}
\usage{
data(usair)
}
\description{
The data were collected to investigate the determinants of pollution by considering SO2
level as the dependent variable and the remaining variables being potential explanatory variables.
}
\details{
Note that Neg.Temp represents the negative value of Average annual temperature.
}
\examples{
data(usair)
}
\references{
Hand, D. J., Daly, F., Lunn, A. D., McConway, K. J. and Ostrowski, E. (1994),
A handbook of small data sets, Chapman and Hall, London.
Brian S Everitt. An R and S-PLUS companion to multivariate analysis.
Springer Science & Business Media, 2006.
}
\keyword{datasets}
|
/man/usair.Rd
|
no_license
|
nemochina2008/brinla
|
R
| false
| true
| 1,427
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/usair.R
\docType{data}
\name{usair}
\alias{usair}
\title{US Air Pollution}
\format{A data frame with 7 columns and 41 rows. The columns are:
\itemize{
\item{SO2}{Sulphur dioxide content of air in micrograms per cubic meter.}
\item{negtemp}{Negative value of Average annual temperature in fahrenheit.}
\item{manuf}{Number of manufacturing enterprises employing 20 or more workers.}
\item{pop}{Population size (1970 census) in thousands.}
\item{wind}{Average annual wind speed in miles per hour.}
\item{precip}{Average annual precipitation in inches.}
\item{days}{Average number of days with precipitation per year}
}}
\source{
Sokal and Rohlf (1981) Biometry: The Principles and Practices of Statistics in Biological Research
}
\usage{
data(usair)
}
\description{
The data were collected to investigate the determinants of pollution by considering SO2
level as the dependent variable and the remaining variables being potential explanatory variables.
}
\details{
Note that Neg.Temp represents the negative value of Average annual temperature.
}
\examples{
data(usair)
}
\references{
Hand, D. J., Daly, F., Lunn, A. D., McConway, K. J. and Ostrowski, E. (1994),
A handbook of small data sets, Chapman and Hall, London.
Brian S Everitt. An R and S-PLUS companion to multivariate analysis.
Springer Science & Business Media, 2006.
}
\keyword{datasets}
|
## this function was developped with Dean C. Adams, Professor at Iowa State.
## the purpose was to perform multivariate phylogenetic least squares analysis on size corrected morphometric measurements
#Burnaby's size-correction
#Find isometric size vector
f.iso = array(1,dim=ncol(data))/sqrt(ncol(data))
# calculate shape based on isometric size vector
shape = log(data)%*%(diag(ncol(data))-(f.iso%*%solve(t(f.iso)%*%f.iso)%*%t(f.iso)))
#perform pca
shape.pca = prcopm(shape, scale.=TRUE)
# Multivariate pGLS
#For Brownian motion model
library(ape)
#phy is a phylogeny in ape's phylo format,
#y.mat is a matrix with response variables (e.g. shape morphology),
#x.mat is a matrix with predictor variables (e.g. microhabitat measurements)
mult.pgls<-function(phy,y.mat,x.mat){
phy.mat<-vcv(phy)
x.mat<-cbind(matrix(1,length(phy$tip.label)),x.mat)
x.mat<-x.mat[rownames(phy.mat),]
y.mat<-y.mat[rownames(phy.mat),]
mDnew<-solve(svd(phy.mat)$u%*%diag(sqrt(svd(phy.mat)$d))%*%t(svd(phy.mat)$u))
ynew<-mDnew %*% y.mat
xnew<-mDnew %*% x.mat
summary=summary(manova(lm(ynew~xnew-1)))
AIC=extractAIC(lm(ynew~xnew-1), k=2, scale=0)
return(list(summary=summary, AIC=AIC))
}
# For Ornstein-Uhlenbeck model
library(ape)
#phy is a phylogeny in ape's phylo format,
#y.mat is a matrix with response variables (e.g. shape morphology),
#x.mat is a matrix with predictor variables (e.g. microhabitat measurements)
library(geiger)
alpha=fitContinuous(phy, x.mat, method=”OU”)$alpha
phy.ou=corMartins(alpha, phy)
mult.pgls<-function(phy.ou,y.mat,x.mat,phy) {
phy.mat<-vcv(phy.ou)
x.mat<-cbind(matrix(1,length(phy$tip.label)),x.mat)
x.mat<-x.mat[rownames(phy.mat),]
y.mat<-y.mat[rownames(phy.mat),]
mDnew<-solve(svd(phy.mat)$u%*%diag(sqrt(svd(phy.mat)$d))%*%t(svd(phy.mat)$u))
ynew<-mDnew %*% y.mat
xnew<-mDnew %*% x.mat
summary=summary(manova(lm(ynew~xnew-1)))
AIC=extractAIC(lm(ynew~xnew-1), k=2, scale=0)
return(list(summary=summary, AIC=AIC))
}
|
/Multivariate_PGLS.r
|
no_license
|
thomasblankers/statistics
|
R
| false
| false
| 1,990
|
r
|
## this function was developped with Dean C. Adams, Professor at Iowa State.
## the purpose was to perform multivariate phylogenetic least squares analysis on size corrected morphometric measurements
#Burnaby's size-correction
#Find isometric size vector
f.iso = array(1,dim=ncol(data))/sqrt(ncol(data))
# calculate shape based on isometric size vector
shape = log(data)%*%(diag(ncol(data))-(f.iso%*%solve(t(f.iso)%*%f.iso)%*%t(f.iso)))
#perform pca
shape.pca = prcopm(shape, scale.=TRUE)
# Multivariate pGLS
#For Brownian motion model
library(ape)
#phy is a phylogeny in ape's phylo format,
#y.mat is a matrix with response variables (e.g. shape morphology),
#x.mat is a matrix with predictor variables (e.g. microhabitat measurements)
mult.pgls<-function(phy,y.mat,x.mat){
phy.mat<-vcv(phy)
x.mat<-cbind(matrix(1,length(phy$tip.label)),x.mat)
x.mat<-x.mat[rownames(phy.mat),]
y.mat<-y.mat[rownames(phy.mat),]
mDnew<-solve(svd(phy.mat)$u%*%diag(sqrt(svd(phy.mat)$d))%*%t(svd(phy.mat)$u))
ynew<-mDnew %*% y.mat
xnew<-mDnew %*% x.mat
summary=summary(manova(lm(ynew~xnew-1)))
AIC=extractAIC(lm(ynew~xnew-1), k=2, scale=0)
return(list(summary=summary, AIC=AIC))
}
# For Ornstein-Uhlenbeck model
library(ape)
#phy is a phylogeny in ape's phylo format,
#y.mat is a matrix with response variables (e.g. shape morphology),
#x.mat is a matrix with predictor variables (e.g. microhabitat measurements)
library(geiger)
alpha=fitContinuous(phy, x.mat, method=”OU”)$alpha
phy.ou=corMartins(alpha, phy)
mult.pgls<-function(phy.ou,y.mat,x.mat,phy) {
phy.mat<-vcv(phy.ou)
x.mat<-cbind(matrix(1,length(phy$tip.label)),x.mat)
x.mat<-x.mat[rownames(phy.mat),]
y.mat<-y.mat[rownames(phy.mat),]
mDnew<-solve(svd(phy.mat)$u%*%diag(sqrt(svd(phy.mat)$d))%*%t(svd(phy.mat)$u))
ynew<-mDnew %*% y.mat
xnew<-mDnew %*% x.mat
summary=summary(manova(lm(ynew~xnew-1)))
AIC=extractAIC(lm(ynew~xnew-1), k=2, scale=0)
return(list(summary=summary, AIC=AIC))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Interface.R
\docType{methods}
\name{runRFclust}
\alias{runRFclust}
\alias{runRFclust,RFclust.SGE-method}
\title{description of function runRFclust}
\usage{
runRFclust(x, ntree = 500, nforest = 500, name = "RFrun", force = FALSE)
}
\arguments{
\item{x}{the RFclust.SGE object}
\item{ntree}{the number of trees to grow}
\item{nforest}{the nuber of forests to create}
\item{name}{the name of the random forest clustering run (if you want to run multiple)}
}
\value{
a distRF object to be analyzed by pamNew
}
\description{
run the random forest calculations returning the density matrix
the clusters will be created for the columns of the data.frame
}
|
/man/runRFclust-methods.Rd
|
permissive
|
stela2502/RFclust.SGE
|
R
| false
| true
| 731
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Interface.R
\docType{methods}
\name{runRFclust}
\alias{runRFclust}
\alias{runRFclust,RFclust.SGE-method}
\title{description of function runRFclust}
\usage{
runRFclust(x, ntree = 500, nforest = 500, name = "RFrun", force = FALSE)
}
\arguments{
\item{x}{the RFclust.SGE object}
\item{ntree}{the number of trees to grow}
\item{nforest}{the nuber of forests to create}
\item{name}{the name of the random forest clustering run (if you want to run multiple)}
}
\value{
a distRF object to be analyzed by pamNew
}
\description{
run the random forest calculations returning the density matrix
the clusters will be created for the columns of the data.frame
}
|
# download and load the data into R
download.file('https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip', 'power.zip')
unzip('power.zip')
power <- read.table('household_power_consumption.txt', sep =';', header=TRUE, stringsAsFactors=FALSE, dec='.')
# We will only be using data from the dates 2007-02-01 and 2007-02-02.
date1 <- power[power$Date == '2/2/2007', ]
date2 <- power[power$Date == '1/2/2007', ]
ds <- rbind(date2,date1)
gap <- as.numeric(ds$Global_active_power)
times <- strptime(paste(ds$Date, ds$Time), format="%d/%m/%Y%H:%M:%S")
# plot 2
png("plot2.png", width = 480, height = 480, units = "px")
plot(times, gap, type='l', xlab = '', ylab="Global Active Power (kilowatts)")
dev.off()
|
/plot2.R
|
no_license
|
matthew-kruse/ExData_Plotting1
|
R
| false
| false
| 748
|
r
|
# download and load the data into R
download.file('https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip', 'power.zip')
unzip('power.zip')
power <- read.table('household_power_consumption.txt', sep =';', header=TRUE, stringsAsFactors=FALSE, dec='.')
# We will only be using data from the dates 2007-02-01 and 2007-02-02.
date1 <- power[power$Date == '2/2/2007', ]
date2 <- power[power$Date == '1/2/2007', ]
ds <- rbind(date2,date1)
gap <- as.numeric(ds$Global_active_power)
times <- strptime(paste(ds$Date, ds$Time), format="%d/%m/%Y%H:%M:%S")
# plot 2
png("plot2.png", width = 480, height = 480, units = "px")
plot(times, gap, type='l', xlab = '', ylab="Global Active Power (kilowatts)")
dev.off()
|
plotqPCR <- function(data, panel, facet_by = NULL, normalizer, ref_group, levels, chip = FALSE,
pvalue = F, pvalues_y = NULL, remove_y = F, print.p = F, ...) {
data <- dplyr::filter(data, Figure == panel)
all_genes <- unique(data$Gene)
experimental_genes <- all_genes[-which(all_genes == normalizer)]
samples <- unique(data$Sample)
data_wide <- data %>%
mutate(Value = 2^-as.numeric(CT)) %>%
dplyr::select(-CT) %>%
spread(Gene, Value) %>%
mutate(Sample = factor(Sample, levels = levels))
if(!is.null(facet_by)){
data_norm <- list()
for (i in unique(data_wide$Cells)) {
data_cells <- filter(data_wide, Cells == i)
normalizer_values <- data_cells[,normalizer,drop = T]
ratios <- data_cells[,all_genes]/normalizer_values
ratios <- ratios[ ,experimental_genes, drop = F]
data_ratios <- cbind(data_cells[,c("Cells", "Sample", "Replicate")],ratios)
mean_ctrl <- data_ratios %>%
dplyr::filter(Sample == ref_group) %>%
dplyr::select(-Sample, -Replicate) %>%
group_by(Cells) %>%
summarise_all(mean)
data_norm[[i]] <- data.frame(data_cells[,c("Cells", "Sample", "Replicate")],
scale(data_ratios[ ,experimental_genes, drop = F], center = FALSE, scale = mean_ctrl[[2]]))
}
data_norm <- bind_rows(data_norm)
} else {
normalizer_values <- data_wide[,normalizer,drop = T]
ratios <- data_wide[,all_genes]/normalizer_values
ratios <- ratios[ ,experimental_genes, drop = F]
data_norm <- cbind(data_wide[,c("Sample", "Replicate")],ratios)
mean_ctrl <- data_norm %>%
dplyr::filter(Sample == ref_group) %>%
dplyr::select(-Sample, -Replicate) %>%
summarise_all(mean)
data_norm <- data.frame(data_wide[,c("Sample", "Replicate")],
scale(data_norm[ ,experimental_genes, drop = F], center = FALSE, scale = mean_ctrl))
}
data_plot <- melt(data_norm) %>%
na.omit(.)
symnum.args <- list(cutpoints = c(0, 0.001, 0.01, 0.05, 1),
symbols = c("***", "**", "*","ns"))
pvalues <- data_plot %>%
compare_means(value ~ Sample, data = ., method = "t.test",
ref.group = ref_group, group.by = "variable",
symnum.args = symnum.args, na.rm = T, var.equal = TRUE)
if(pvalue) {
if(length(experimental_genes) == 1){
plot <- data_plot %>%
ggbarplot(x = "Sample", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", facet.by = facet_by,
ylab = "mRNA (A.U.)",
position = position_dodge(0.8)) +
stat_compare_means(method = "t.test", label = "p.signif", symnum.args = symnum.args, method.args = list(var.equal = T),
ref.group = ref_group, na.rm = T) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
} else {
if(is.null(pvalues_y)) {
pvalues_y <- max(data_plot$value) + max(data_plot$value)/10
}
if(length(samples) == 2) {
pvalues_x <- seq_along(1:length(experimental_genes))
} else {
pvalues_x <- seq_along(1:length(experimental_genes))+0.15
}
.chunk2 <- function(x,n) split(x, cut(seq_along(x), n, labels = FALSE))
pvalues_sig <- .chunk2(pvalues$p.signif, length(experimental_genes))
pvalues_sig <- lapply(pvalues_sig, paste, collapse = " ")
plot <- data_plot %>%
ggbarplot(x = "variable", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", facet.by = facet_by,
ylab = "mRNA (A.U.)",
position = position_dodge(0.8)) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
theme(axis.text.x = element_text(face = "italic")) +
annotate("text", x = pvalues_x, y = pvalues_y, label = pvalues_sig, size = 3) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
}
} else {
plot <- data_plot %>%
ggbarplot(x = "variable", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", ylab = "mRNA (A.U.)",
position = position_dodge(0.8)) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
theme(axis.text.x = element_text(face = "italic")) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
}
if(print.p){
print(pvalues)
}
p2 <- ggpar(plot,xlab = FALSE,...)
if(remove_y){
p2 <- p2 + rremove("y.axis") + rremove("y.text") + rremove("y.ticks")
}
return(p2)
}
plot_normqPCR <- function(data, panel, ref_group, facet_by, pvalue = F,
pvalues_y = NULL, remove_y = F, print.p = F, ...) {
data_plot <- dplyr::filter(data, Figure == panel) %>%
dplyr::select(Cells, Sample, Replicate,variable = Gene, value = CT)
all_genes <- experimental_genes <- unique(data_plot$variable)
samples <- unique(data_plot$Sample)
symnum.args <- list(cutpoints = c(0, 0.001, 0.01, 0.05, 1),
symbols = c("***", "**", "*","ns"))
pvalues <- compare_means(value ~ Sample,
data = data_plot,
method = "t.test",
ref.group = ref_group,
group.by = "variable",
symnum.args = symnum.args,
na.rm = T)
if(pvalue) {
if(length(experimental_genes) == 1){
plot <- data_plot %>%
ggbarplot(x = "Sample", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", facet.by = facet_by,
ylab = "mRNA (A.U.)",
position = position_dodge(0.8)) +
stat_compare_means(method = "t.test", label = "p.signif", symnum.args = symnum.args,
ref.group = ref_group, na.rm = T) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
} else {
if(is.null(pvalues_y)) {
pvalues_y <- max(data_plot$value) + max(data_plot$value)/10
}
if(length(samples) == 2) {
pvalues_x <- seq_along(1:length(experimental_genes))
} else {
pvalues_x <- seq_along(1:length(experimental_genes))+0.15
}
.chunk2 <- function(x,n) split(x, cut(seq_along(x), n, labels = FALSE))
pvalues_sig <- .chunk2(pvalues$p.signif, length(experimental_genes))
pvalues_sig <- lapply(pvalues_sig, paste, collapse = " ")
plot <- data_plot %>%
ggbarplot(x = "variable", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", facet.by = facet_by,
ylab = "mRNA (A.U.)",
position = position_dodge(0.8)) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
theme(axis.text.x = element_text(face = "italic")) +
annotate("text", x = pvalues_x, y = pvalues_y, label = pvalues$p.signif, size = 3) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
}
} else {
plot <- data_plot %>%
ggbarplot(x = "variable", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", facet.by = facet_by,
ylab = "mRNA (A.U.)",
position = position_dodge(0.8)) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
theme(axis.text.x = element_text(face = "italic")) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
}
if(print.p){
print(pvalues)
}
p2 <- ggpar(plot,xlab = FALSE,...)
if(remove_y){
p2 <- p2 + rremove("y.axis") + rremove("y.text") + rremove("y.ticks")
}
return(p2)
}
plotChIP <- function(data, panel, levels, ref_group, pvalue = F, pvalues_y = NULL,
facet_by = NULL, remove_y = F, print.p = F, ...) {
data <- dplyr::filter(data, Figure == panel) %>%
dplyr::select(Sample,Replicate,Gene,CT)
experimental_genes <- unique(data$Gene)
data_wide <- data %>%
mutate(Value = 2^-CT) %>%
dplyr::select(-CT) %>%
spread(Gene, Value)
mean_input <- data_wide %>%
dplyr::filter(Sample == "Input") %>%
dplyr::select(-Sample, -Replicate) %>%
summarise_all(mean)
mat <- as.matrix(data_wide[ ,experimental_genes])
vec <- as.numeric(mean_input[experimental_genes])
ratios <- sweep(mat, 2, vec, `/`)
data_norm <- cbind(data_wide[,c("Sample", "Replicate")], ratios) %>% filter(Sample != "Input")
data_plot <- melt(data_norm) %>%
na.omit(.) %>%
mutate(Sample = factor(Sample, levels = levels))
symnum.args <- list(cutpoints = c(0, 0.001, 0.01, 0.05, 1),
symbols = c("***", "**", "*","ns"))
pvalues <- data_plot %>%
compare_means(value ~ Sample, data = ., method = "t.test",
ref.group = ref_group, group.by = "variable",
symnum.args = symnum.args, na.rm = T, var.equal = TRUE)
if(pvalue) {
if(is.null(pvalues_y)) {
pvalues_y <- max(data_plot$value) + max(data_plot$value)/10
}
if(length(samples) == 2) {
pvalues_x <- seq_along(1:length(experimental_genes))
} else {
pvalues_x <- seq_along(1:length(experimental_genes))+0.15
}
.chunk2 <- function(x,n) split(x, cut(seq_along(x), n, labels = FALSE))
pvalues_sig <- .chunk2(pvalues$p.signif, length(experimental_genes))
pvalues_sig <- lapply(pvalues_sig, paste, collapse = " ")
plot <- data_plot %>%
ggbarplot(x = "variable", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", facet.by = facet_by,
ylab = "% of Input",
position = position_dodge(0.8)) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
theme(axis.text.x = element_text(face = "italic")) +
annotate("text", x = pvalues_x, y = pvalues_y, label = pvalues_sig, size = 3) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
} else {
plot <- data_plot %>%
ggbarplot(x = "variable", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", ylab = "% of Input",
position = position_dodge(0.8)) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
theme(axis.text.x = element_text(face = "italic")) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
}
if(print.p){
print(pvalues)
}
p2 <- ggpar(plot,xlab = FALSE,...)
if(remove_y){
p2 <- p2 + rremove("y.axis") + rremove("y.text") + rremove("y.ticks")
}
return(p2)
}
|
/Scripts/plotqPCR.R
|
no_license
|
squatrim/marques2020
|
R
| false
| false
| 11,079
|
r
|
plotqPCR <- function(data, panel, facet_by = NULL, normalizer, ref_group, levels, chip = FALSE,
pvalue = F, pvalues_y = NULL, remove_y = F, print.p = F, ...) {
data <- dplyr::filter(data, Figure == panel)
all_genes <- unique(data$Gene)
experimental_genes <- all_genes[-which(all_genes == normalizer)]
samples <- unique(data$Sample)
data_wide <- data %>%
mutate(Value = 2^-as.numeric(CT)) %>%
dplyr::select(-CT) %>%
spread(Gene, Value) %>%
mutate(Sample = factor(Sample, levels = levels))
if(!is.null(facet_by)){
data_norm <- list()
for (i in unique(data_wide$Cells)) {
data_cells <- filter(data_wide, Cells == i)
normalizer_values <- data_cells[,normalizer,drop = T]
ratios <- data_cells[,all_genes]/normalizer_values
ratios <- ratios[ ,experimental_genes, drop = F]
data_ratios <- cbind(data_cells[,c("Cells", "Sample", "Replicate")],ratios)
mean_ctrl <- data_ratios %>%
dplyr::filter(Sample == ref_group) %>%
dplyr::select(-Sample, -Replicate) %>%
group_by(Cells) %>%
summarise_all(mean)
data_norm[[i]] <- data.frame(data_cells[,c("Cells", "Sample", "Replicate")],
scale(data_ratios[ ,experimental_genes, drop = F], center = FALSE, scale = mean_ctrl[[2]]))
}
data_norm <- bind_rows(data_norm)
} else {
normalizer_values <- data_wide[,normalizer,drop = T]
ratios <- data_wide[,all_genes]/normalizer_values
ratios <- ratios[ ,experimental_genes, drop = F]
data_norm <- cbind(data_wide[,c("Sample", "Replicate")],ratios)
mean_ctrl <- data_norm %>%
dplyr::filter(Sample == ref_group) %>%
dplyr::select(-Sample, -Replicate) %>%
summarise_all(mean)
data_norm <- data.frame(data_wide[,c("Sample", "Replicate")],
scale(data_norm[ ,experimental_genes, drop = F], center = FALSE, scale = mean_ctrl))
}
data_plot <- melt(data_norm) %>%
na.omit(.)
symnum.args <- list(cutpoints = c(0, 0.001, 0.01, 0.05, 1),
symbols = c("***", "**", "*","ns"))
pvalues <- data_plot %>%
compare_means(value ~ Sample, data = ., method = "t.test",
ref.group = ref_group, group.by = "variable",
symnum.args = symnum.args, na.rm = T, var.equal = TRUE)
if(pvalue) {
if(length(experimental_genes) == 1){
plot <- data_plot %>%
ggbarplot(x = "Sample", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", facet.by = facet_by,
ylab = "mRNA (A.U.)",
position = position_dodge(0.8)) +
stat_compare_means(method = "t.test", label = "p.signif", symnum.args = symnum.args, method.args = list(var.equal = T),
ref.group = ref_group, na.rm = T) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
} else {
if(is.null(pvalues_y)) {
pvalues_y <- max(data_plot$value) + max(data_plot$value)/10
}
if(length(samples) == 2) {
pvalues_x <- seq_along(1:length(experimental_genes))
} else {
pvalues_x <- seq_along(1:length(experimental_genes))+0.15
}
.chunk2 <- function(x,n) split(x, cut(seq_along(x), n, labels = FALSE))
pvalues_sig <- .chunk2(pvalues$p.signif, length(experimental_genes))
pvalues_sig <- lapply(pvalues_sig, paste, collapse = " ")
plot <- data_plot %>%
ggbarplot(x = "variable", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", facet.by = facet_by,
ylab = "mRNA (A.U.)",
position = position_dodge(0.8)) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
theme(axis.text.x = element_text(face = "italic")) +
annotate("text", x = pvalues_x, y = pvalues_y, label = pvalues_sig, size = 3) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
}
} else {
plot <- data_plot %>%
ggbarplot(x = "variable", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", ylab = "mRNA (A.U.)",
position = position_dodge(0.8)) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
theme(axis.text.x = element_text(face = "italic")) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
}
if(print.p){
print(pvalues)
}
p2 <- ggpar(plot,xlab = FALSE,...)
if(remove_y){
p2 <- p2 + rremove("y.axis") + rremove("y.text") + rremove("y.ticks")
}
return(p2)
}
plot_normqPCR <- function(data, panel, ref_group, facet_by, pvalue = F,
pvalues_y = NULL, remove_y = F, print.p = F, ...) {
data_plot <- dplyr::filter(data, Figure == panel) %>%
dplyr::select(Cells, Sample, Replicate,variable = Gene, value = CT)
all_genes <- experimental_genes <- unique(data_plot$variable)
samples <- unique(data_plot$Sample)
symnum.args <- list(cutpoints = c(0, 0.001, 0.01, 0.05, 1),
symbols = c("***", "**", "*","ns"))
pvalues <- compare_means(value ~ Sample,
data = data_plot,
method = "t.test",
ref.group = ref_group,
group.by = "variable",
symnum.args = symnum.args,
na.rm = T)
if(pvalue) {
if(length(experimental_genes) == 1){
plot <- data_plot %>%
ggbarplot(x = "Sample", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", facet.by = facet_by,
ylab = "mRNA (A.U.)",
position = position_dodge(0.8)) +
stat_compare_means(method = "t.test", label = "p.signif", symnum.args = symnum.args,
ref.group = ref_group, na.rm = T) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
} else {
if(is.null(pvalues_y)) {
pvalues_y <- max(data_plot$value) + max(data_plot$value)/10
}
if(length(samples) == 2) {
pvalues_x <- seq_along(1:length(experimental_genes))
} else {
pvalues_x <- seq_along(1:length(experimental_genes))+0.15
}
.chunk2 <- function(x,n) split(x, cut(seq_along(x), n, labels = FALSE))
pvalues_sig <- .chunk2(pvalues$p.signif, length(experimental_genes))
pvalues_sig <- lapply(pvalues_sig, paste, collapse = " ")
plot <- data_plot %>%
ggbarplot(x = "variable", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", facet.by = facet_by,
ylab = "mRNA (A.U.)",
position = position_dodge(0.8)) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
theme(axis.text.x = element_text(face = "italic")) +
annotate("text", x = pvalues_x, y = pvalues_y, label = pvalues$p.signif, size = 3) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
}
} else {
plot <- data_plot %>%
ggbarplot(x = "variable", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", facet.by = facet_by,
ylab = "mRNA (A.U.)",
position = position_dodge(0.8)) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
theme(axis.text.x = element_text(face = "italic")) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
}
if(print.p){
print(pvalues)
}
p2 <- ggpar(plot,xlab = FALSE,...)
if(remove_y){
p2 <- p2 + rremove("y.axis") + rremove("y.text") + rremove("y.ticks")
}
return(p2)
}
plotChIP <- function(data, panel, levels, ref_group, pvalue = F, pvalues_y = NULL,
facet_by = NULL, remove_y = F, print.p = F, ...) {
data <- dplyr::filter(data, Figure == panel) %>%
dplyr::select(Sample,Replicate,Gene,CT)
experimental_genes <- unique(data$Gene)
data_wide <- data %>%
mutate(Value = 2^-CT) %>%
dplyr::select(-CT) %>%
spread(Gene, Value)
mean_input <- data_wide %>%
dplyr::filter(Sample == "Input") %>%
dplyr::select(-Sample, -Replicate) %>%
summarise_all(mean)
mat <- as.matrix(data_wide[ ,experimental_genes])
vec <- as.numeric(mean_input[experimental_genes])
ratios <- sweep(mat, 2, vec, `/`)
data_norm <- cbind(data_wide[,c("Sample", "Replicate")], ratios) %>% filter(Sample != "Input")
data_plot <- melt(data_norm) %>%
na.omit(.) %>%
mutate(Sample = factor(Sample, levels = levels))
symnum.args <- list(cutpoints = c(0, 0.001, 0.01, 0.05, 1),
symbols = c("***", "**", "*","ns"))
pvalues <- data_plot %>%
compare_means(value ~ Sample, data = ., method = "t.test",
ref.group = ref_group, group.by = "variable",
symnum.args = symnum.args, na.rm = T, var.equal = TRUE)
if(pvalue) {
if(is.null(pvalues_y)) {
pvalues_y <- max(data_plot$value) + max(data_plot$value)/10
}
if(length(samples) == 2) {
pvalues_x <- seq_along(1:length(experimental_genes))
} else {
pvalues_x <- seq_along(1:length(experimental_genes))+0.15
}
.chunk2 <- function(x,n) split(x, cut(seq_along(x), n, labels = FALSE))
pvalues_sig <- .chunk2(pvalues$p.signif, length(experimental_genes))
pvalues_sig <- lapply(pvalues_sig, paste, collapse = " ")
plot <- data_plot %>%
ggbarplot(x = "variable", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", facet.by = facet_by,
ylab = "% of Input",
position = position_dodge(0.8)) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
theme(axis.text.x = element_text(face = "italic")) +
annotate("text", x = pvalues_x, y = pvalues_y, label = pvalues_sig, size = 3) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
} else {
plot <- data_plot %>%
ggbarplot(x = "variable", y = "value",
add = c("mean_sd", "jitter"),
color = "Sample", ylab = "% of Input",
position = position_dodge(0.8)) +
theme_classic() + scale_y_continuous(expand = c(0, 0)) +
theme(axis.text.x = element_text(face = "italic")) +
font("xy.text", size = 8) + font("xlab", size = 10) + font("ylab", size = 10)
}
if(print.p){
print(pvalues)
}
p2 <- ggpar(plot,xlab = FALSE,...)
if(remove_y){
p2 <- p2 + rremove("y.axis") + rremove("y.text") + rremove("y.ticks")
}
return(p2)
}
|
library(dplyr)
library(ggplot2)
# Reading datasets.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# filtering the dataset for coal combustion-related sources from 1999 to 2008
CoalComp<-NEI[NEI$SCC %in% SCC[grep("Coal", SCC$EI.Sector),"SCC"],]
# Finding total PM2.5 emission from coal combustion sources for each of the years 1999, 2002, 2005, and 2008
SumByYear <- CoalComp %>% group_by(year) %>% summarise(sumEmissions=sum(Emissions))
ggplot(SumByYear, aes(x=year, y=sumEmissions, fill=year)) +
geom_line()+
labs(x="year")+
labs(y="Emission")+
labs(title="Emission across years for coal combustion soures.")
ggsave("plot4.png", plot = last_plot())
dev.off()
#From the plot we can see that the emissions from coal combustion-related sources
# has decreased from the year 1999 to 2008.
|
/exdata Assignment w4/plot4.R
|
no_license
|
yousefbasel/ExData_Plotting1
|
R
| false
| false
| 845
|
r
|
library(dplyr)
library(ggplot2)
# Reading datasets.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# filtering the dataset for coal combustion-related sources from 1999 to 2008
CoalComp<-NEI[NEI$SCC %in% SCC[grep("Coal", SCC$EI.Sector),"SCC"],]
# Finding total PM2.5 emission from coal combustion sources for each of the years 1999, 2002, 2005, and 2008
SumByYear <- CoalComp %>% group_by(year) %>% summarise(sumEmissions=sum(Emissions))
ggplot(SumByYear, aes(x=year, y=sumEmissions, fill=year)) +
geom_line()+
labs(x="year")+
labs(y="Emission")+
labs(title="Emission across years for coal combustion soures.")
ggsave("plot4.png", plot = last_plot())
dev.off()
#From the plot we can see that the emissions from coal combustion-related sources
# has decreased from the year 1999 to 2008.
|
library(dplyr)
count(enem_pe, NO_MUNICIPIO_RESIDENCIA)
enem_pe %>% group_by(TP_SEXO) %>% summarise(avg = mean(nota_media))
enem_pe_GUS <- enem_2014 %>%
filter(NO_MUNICIPIO_RESIDENCIA == 'GARANHUNS') %>%
select(nota_media, renda_pc, TP_SEXO)
|
/scripts/dplyr.R
|
no_license
|
fabiana-costa/etl_r_ppgcp
|
R
| false
| false
| 250
|
r
|
library(dplyr)
count(enem_pe, NO_MUNICIPIO_RESIDENCIA)
enem_pe %>% group_by(TP_SEXO) %>% summarise(avg = mean(nota_media))
enem_pe_GUS <- enem_2014 %>%
filter(NO_MUNICIPIO_RESIDENCIA == 'GARANHUNS') %>%
select(nota_media, renda_pc, TP_SEXO)
|
#' dann
#'
#' Run Discriminant Adaptive Nearest Neighbors
#'
#' @param x covariates matrix
#' @param testx test covariate matrix
#' @param y labels
#' @param k number of clusters
#' @param kmetric metric
#' @param epsilon epsilon
#' @param fullw Boolean
#' @param scalar Boolean
#' @param iter maximum number of iterations
#' @param covmin cov
#' @param cv boolean reflecting whether to cross-validate or not
#'
#'
#' @return A list with items including Name of the Application, No. of pages remaining (given the money),
#' No. of fields remaining (given the money), and when the application credits expire.
#'
#' @export
#'
#' @examples \dontrun{
#' dann(x <- matrix(rnorm(120,1,.2)), testx <- glass.test$x, y <- matrix(rnorm(120,1,.5)),
#' epsilon = 1, fullw = FALSE, iter = 100, covmin = 1e-04, cv = FALSE)
#' }
dann <- function(x, testx = matrix(nrow = 1, ncol = p), y, k = 5,
kmetric = max(50, 0.2 * n), epsilon = 1, fullw = FALSE, scalar = FALSE, iter = 1,
covmin = 1e-04, cv = FALSE) {
storage.mode(x) <- "double"
storage.mode(testx) <- "double"
storage.mode(y) <- "integer"
np <- dim(x)
p <- np[2]
n <- np[1]
storage.mode(epsilon) <- "double"
neps <- length(epsilon)
nclass <- length(table(y))
if (cv) {
ntest <- n
} else {
ntest <- nrow(testx)
}
pred <- matrix(integer(ntest * neps), nrow = ntest, ncol = neps,
dimnames = list(NULL, format(round(epsilon, 5))))
.Fortran("dann",
np[1], np[2], x, y, nclass, t(testx), cv, ntest,
pred, kmetric, k, iter, fullw, scalar, epsilon,
neps, integer(n), double(n), matrix(double(p^2), nrow = p, ncol = p),
covmin, matrix(double(nclass * p), nrow = nclass, ncol = p),
double(n), as.single(runif(ntest)),
double(n + 2 * p^2 + 3 * p), PACKAGE = "dann")$pred
}
|
/R/dann.R
|
permissive
|
soodoku/dann
|
R
| false
| false
| 1,896
|
r
|
#' dann
#'
#' Run Discriminant Adaptive Nearest Neighbors
#'
#' @param x covariates matrix
#' @param testx test covariate matrix
#' @param y labels
#' @param k number of clusters
#' @param kmetric metric
#' @param epsilon epsilon
#' @param fullw Boolean
#' @param scalar Boolean
#' @param iter maximum number of iterations
#' @param covmin cov
#' @param cv boolean reflecting whether to cross-validate or not
#'
#'
#' @return A list with items including Name of the Application, No. of pages remaining (given the money),
#' No. of fields remaining (given the money), and when the application credits expire.
#'
#' @export
#'
#' @examples \dontrun{
#' dann(x <- matrix(rnorm(120,1,.2)), testx <- glass.test$x, y <- matrix(rnorm(120,1,.5)),
#' epsilon = 1, fullw = FALSE, iter = 100, covmin = 1e-04, cv = FALSE)
#' }
dann <- function(x, testx = matrix(nrow = 1, ncol = p), y, k = 5,
kmetric = max(50, 0.2 * n), epsilon = 1, fullw = FALSE, scalar = FALSE, iter = 1,
covmin = 1e-04, cv = FALSE) {
storage.mode(x) <- "double"
storage.mode(testx) <- "double"
storage.mode(y) <- "integer"
np <- dim(x)
p <- np[2]
n <- np[1]
storage.mode(epsilon) <- "double"
neps <- length(epsilon)
nclass <- length(table(y))
if (cv) {
ntest <- n
} else {
ntest <- nrow(testx)
}
pred <- matrix(integer(ntest * neps), nrow = ntest, ncol = neps,
dimnames = list(NULL, format(round(epsilon, 5))))
.Fortran("dann",
np[1], np[2], x, y, nclass, t(testx), cv, ntest,
pred, kmetric, k, iter, fullw, scalar, epsilon,
neps, integer(n), double(n), matrix(double(p^2), nrow = p, ncol = p),
covmin, matrix(double(nclass * p), nrow = nclass, ncol = p),
double(n), as.single(runif(ntest)),
double(n + 2 * p^2 + 3 * p), PACKAGE = "dann")$pred
}
|
library(shiny)
library(shinydashboard)
library(networkD3)
library(ggplot2)
library(dplyr)
my_color <- 'd3.scaleOrdinal() .domain(["_NULL", "_AUTO","_BOTTOM","_COLD","_NEW","_TOP","_LOST","_WARM","my_unique_group"]) .range(["aquamarine","darkorange","darkmagenta","blue","gold","yellow","green","red","grey"])'
#SHINY APP
ui <- dashboardPage(
dashboardHeader(title = "Assignment 3"),
dashboardSidebar(),
dashboardBody(
fluidPage(
selectInput("year", "Transition Matrix Pedriod:",
c("2008-2009" = "2009",
"2009-2010" = "2010",
"2010-2011" = "2011",
"2011-2012" = "2012",
"2012-2013" = "2013",
"2013-2014" = "2014",
"2014-2015" = "2015",
"2015-2016" = "2016",
"2016-2017" = "2017",
"2017-2018" = "2018")),
selectInput("Segment", "Query:",
c("New DO" = 2,
"New PA" = 3,
"Top" = 4,
"Bottom" = 5,
"Cold" = 6,
"Warm" = 7,
"Lost" = 8,
"Auto" = 9)),
radioButtons("Variable",label="Number of (/amount):",
choices = c("WOMEN"="WOMEN","MEN"="MEN",
"NOTSURE"="NOTSURE","ZOMBIE"="ZOMBIE","MAILING"="MAILING","PRELEVEMENT"="PRELEVEMENT","INTERNET"="INTERNET",
"CHEQUE"="CHEQUE","CARTE_BANCAIRE"="CARTE_BANCAIRE","Average Amount"="avg","Max Amount"="max"),inline=TRUE)),
mainPanel(
# Output: Tabset w/ plot, summary, and table ----
tabsetPanel(type = "tabs",
tabPanel("Transition Matrix", sankeyNetworkOutput("plot")),
tabPanel("Evolution of Segments", plotOutput("plot2"))
)
)
)
)
server <- function(input, output) {
output$plot <- renderSankeyNetwork({
datainput = data[which(data$year == input$year),]
links=data.frame(source = datainput$old_period, target = datainput$new_period, value = datainput$value)
nodes=data.frame(name=c(as.character(links$source), as.character(links$target)) %>% unique())
nodes$group=as.factor(c("my_unique_group"))
links$IDsource=match(links$source, nodes$name)-1
links$IDtarget=match(links$target, nodes$name)-1
links$group=as.factor(datainput$old_period)
sankeyNetwork(Links = links, Nodes = nodes, Source = "IDsource", Target = "IDtarget", Value = "value", NodeID = "name", colourScale=my_color, LinkGroup="group", NodeGroup="group")
})
output$plot2 <- renderPlot({
y=input$Variable
# Render a barplot
ggplot(data2[which(data2$query == input$Segment),], aes_string(x="period_id", y=y)) + geom_line() +
labs(x="Year", y="Variable Selected")+ scale_x_continuous(breaks=c(2008:2018))
})
}
shinyApp(ui, server)
|
/shiny.r
|
no_license
|
Charlesbdlt/project
|
R
| false
| false
| 3,017
|
r
|
library(shiny)
library(shinydashboard)
library(networkD3)
library(ggplot2)
library(dplyr)
my_color <- 'd3.scaleOrdinal() .domain(["_NULL", "_AUTO","_BOTTOM","_COLD","_NEW","_TOP","_LOST","_WARM","my_unique_group"]) .range(["aquamarine","darkorange","darkmagenta","blue","gold","yellow","green","red","grey"])'
#SHINY APP
ui <- dashboardPage(
dashboardHeader(title = "Assignment 3"),
dashboardSidebar(),
dashboardBody(
fluidPage(
selectInput("year", "Transition Matrix Pedriod:",
c("2008-2009" = "2009",
"2009-2010" = "2010",
"2010-2011" = "2011",
"2011-2012" = "2012",
"2012-2013" = "2013",
"2013-2014" = "2014",
"2014-2015" = "2015",
"2015-2016" = "2016",
"2016-2017" = "2017",
"2017-2018" = "2018")),
selectInput("Segment", "Query:",
c("New DO" = 2,
"New PA" = 3,
"Top" = 4,
"Bottom" = 5,
"Cold" = 6,
"Warm" = 7,
"Lost" = 8,
"Auto" = 9)),
radioButtons("Variable",label="Number of (/amount):",
choices = c("WOMEN"="WOMEN","MEN"="MEN",
"NOTSURE"="NOTSURE","ZOMBIE"="ZOMBIE","MAILING"="MAILING","PRELEVEMENT"="PRELEVEMENT","INTERNET"="INTERNET",
"CHEQUE"="CHEQUE","CARTE_BANCAIRE"="CARTE_BANCAIRE","Average Amount"="avg","Max Amount"="max"),inline=TRUE)),
mainPanel(
# Output: Tabset w/ plot, summary, and table ----
tabsetPanel(type = "tabs",
tabPanel("Transition Matrix", sankeyNetworkOutput("plot")),
tabPanel("Evolution of Segments", plotOutput("plot2"))
)
)
)
)
server <- function(input, output) {
output$plot <- renderSankeyNetwork({
datainput = data[which(data$year == input$year),]
links=data.frame(source = datainput$old_period, target = datainput$new_period, value = datainput$value)
nodes=data.frame(name=c(as.character(links$source), as.character(links$target)) %>% unique())
nodes$group=as.factor(c("my_unique_group"))
links$IDsource=match(links$source, nodes$name)-1
links$IDtarget=match(links$target, nodes$name)-1
links$group=as.factor(datainput$old_period)
sankeyNetwork(Links = links, Nodes = nodes, Source = "IDsource", Target = "IDtarget", Value = "value", NodeID = "name", colourScale=my_color, LinkGroup="group", NodeGroup="group")
})
output$plot2 <- renderPlot({
y=input$Variable
# Render a barplot
ggplot(data2[which(data2$query == input$Segment),], aes_string(x="period_id", y=y)) + geom_line() +
labs(x="Year", y="Variable Selected")+ scale_x_continuous(breaks=c(2008:2018))
})
}
shinyApp(ui, server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weightedQ.R
\name{weightedQ}
\alias{weightedQ}
\title{make GMRF precision matrix with weights}
\usage{
weightedQ(weights, nn)
}
\arguments{
\item{weights}{weights}
\item{nn}{matrix with pair of neighbours per row.}
}
\value{
sparseMatrix of dimension max(nn)
}
\description{
make GMRF precision matrix with weights
}
\examples{
Q<-weightedQ(1:4,matrix(c(1,1,2,3,2,3,3,4),ncol=2))
print(Q)
}
|
/man/weightedQ.Rd
|
no_license
|
bioimaginggroup/BayGMRF
|
R
| false
| true
| 470
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weightedQ.R
\name{weightedQ}
\alias{weightedQ}
\title{make GMRF precision matrix with weights}
\usage{
weightedQ(weights, nn)
}
\arguments{
\item{weights}{weights}
\item{nn}{matrix with pair of neighbours per row.}
}
\value{
sparseMatrix of dimension max(nn)
}
\description{
make GMRF precision matrix with weights
}
\examples{
Q<-weightedQ(1:4,matrix(c(1,1,2,3,2,3,3,4),ncol=2))
print(Q)
}
|
# R数据帧操作
### 在数据帧中加入行和列
### 创建矢量对象
city <- c("Tampa","Seattle","Hartford","Denver")
state <- c("FL","WA","CT","CO")
zipcode <- c(33602,98104,16161,80294)
### 组合三个矢量到一个数据帧
addresses <- cbind(city,state,zipcode)
print(addresses)
### 用同样的列结构创建一个数据帧
new.address <- data.frame(
city = c("Lowry","Charlotte"),
state = c("CO","FL"),
zipcode = c("80230","33949"),
stringsAsFactors = FALSE
)
print(new.address)
### 将新生成的数据添加到原本的数据帧中
all.addresses <- rbind(addresses,new.address)
print(all.addresses)
### 合并数据帧
### 基于血压(“bp”)和体重指数(“bmi”)的值合并两个数据集。
### 两个变量的值在两个数据集中匹配的记录被组合在一起以形成单个数据帧。
library(MASS)
merged.Pima <- merge(x = Pima.te, y = Pima.tr,
by.x = c("bp", "bmi"),
by.y = c("bp", "bmi")
)
print(merged.Pima)
nrow(merged.Pima)
|
/R-Program/RdataFrame.R
|
permissive
|
Johnwei386/Warehouse
|
R
| false
| false
| 1,001
|
r
|
# R数据帧操作
### 在数据帧中加入行和列
### 创建矢量对象
city <- c("Tampa","Seattle","Hartford","Denver")
state <- c("FL","WA","CT","CO")
zipcode <- c(33602,98104,16161,80294)
### 组合三个矢量到一个数据帧
addresses <- cbind(city,state,zipcode)
print(addresses)
### 用同样的列结构创建一个数据帧
new.address <- data.frame(
city = c("Lowry","Charlotte"),
state = c("CO","FL"),
zipcode = c("80230","33949"),
stringsAsFactors = FALSE
)
print(new.address)
### 将新生成的数据添加到原本的数据帧中
all.addresses <- rbind(addresses,new.address)
print(all.addresses)
### 合并数据帧
### 基于血压(“bp”)和体重指数(“bmi”)的值合并两个数据集。
### 两个变量的值在两个数据集中匹配的记录被组合在一起以形成单个数据帧。
library(MASS)
merged.Pima <- merge(x = Pima.te, y = Pima.tr,
by.x = c("bp", "bmi"),
by.y = c("bp", "bmi")
)
print(merged.Pima)
nrow(merged.Pima)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{predict.cmfrec}
\alias{predict.cmfrec}
\title{Predict entries in the factorized `X` matrix}
\usage{
\method{predict}{cmfrec}(object, user, item = NULL, nthreads = object$info$nthreads, ...)
}
\arguments{
\item{object}{A collective matrix factorization model from this package - see
\link{fit_models} for details.}
\item{user}{The user IDs for which to make predictions. If `X` to which the model
was fit was a `data.frame`, should pass IDs matching to the first column of `X`
(the user indices, should be a character vector),
otherwise should pass row numbers for `X`, with numeration
starting at 1 (should be an integer vector).
If passing a single entry for `user` and `item` has more entries, will
predict all the entries in `item` for that single `user.`
Alternatively, might instead pass a sparse matrix in COO/triplets formats,
for which the \bold{non-missing} entries will be predicted, in which case it
its not necessary to pass `item`.
If passing a sparse matrix, can be from package `Matrix` (class `dgTMatrix` or `ngTMatrix`)
or from package `SparseM` (class `matrix.coo`). If using the package `softImpute`,
its objects of class `incomplete` might be convertable to `Matrix` objects through
e.g. `as(as(X, "TsparseMatrix"), "nMatrix")`.}
\item{item}{The item IDs for which to make predictions - see the documentation
about `user` for details about the indexing.
If passing a single entry for `item` and `user` has more entries, will
predict all the entries in `user` for that single `item`.
If passing a sparse matrix as `user`, `item` will be ignored.}
\item{nthreads}{Number of parallel threads to use.}
\item{...}{Not used.}
}
\value{
A numeric vector with the predicted values at the requested combinations.
If the `user` passed was a sparse matrix, and it was not of class `ngTMatrix`,
will instead return a sparse matrix of the same format, with the non-missing entries
set to the predicted values.
}
\description{
Predict entries in the `X` matrix according to the model
at the combinations [row,column] given by the entries in
`user` and `item` (e.g. passing `user=c(1,2,3), item=c(1,1,1)` will predict
X[1,1], X[2,1], X[3,1]).
Alternatively, might pass a sparse matrix, in which case it will make
predictions for all of its non-missing entries.
Invalid combinations (e.g. rows and columns outside of the range of `X` to
which the model was fit) will be filled with global mean plus biases if applicable
for `CMF_explicit`, and with NAs for the other models.
For example usage, see the main section \link{fit_models}.
}
\seealso{
\link{predict_new} \link{topN}
}
|
/man/predict.cmfrec.Rd
|
permissive
|
david-cortes/cmfrec
|
R
| false
| true
| 2,689
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{predict.cmfrec}
\alias{predict.cmfrec}
\title{Predict entries in the factorized `X` matrix}
\usage{
\method{predict}{cmfrec}(object, user, item = NULL, nthreads = object$info$nthreads, ...)
}
\arguments{
\item{object}{A collective matrix factorization model from this package - see
\link{fit_models} for details.}
\item{user}{The user IDs for which to make predictions. If `X` to which the model
was fit was a `data.frame`, should pass IDs matching to the first column of `X`
(the user indices, should be a character vector),
otherwise should pass row numbers for `X`, with numeration
starting at 1 (should be an integer vector).
If passing a single entry for `user` and `item` has more entries, will
predict all the entries in `item` for that single `user.`
Alternatively, might instead pass a sparse matrix in COO/triplets formats,
for which the \bold{non-missing} entries will be predicted, in which case it
its not necessary to pass `item`.
If passing a sparse matrix, can be from package `Matrix` (class `dgTMatrix` or `ngTMatrix`)
or from package `SparseM` (class `matrix.coo`). If using the package `softImpute`,
its objects of class `incomplete` might be convertable to `Matrix` objects through
e.g. `as(as(X, "TsparseMatrix"), "nMatrix")`.}
\item{item}{The item IDs for which to make predictions - see the documentation
about `user` for details about the indexing.
If passing a single entry for `item` and `user` has more entries, will
predict all the entries in `user` for that single `item`.
If passing a sparse matrix as `user`, `item` will be ignored.}
\item{nthreads}{Number of parallel threads to use.}
\item{...}{Not used.}
}
\value{
A numeric vector with the predicted values at the requested combinations.
If the `user` passed was a sparse matrix, and it was not of class `ngTMatrix`,
will instead return a sparse matrix of the same format, with the non-missing entries
set to the predicted values.
}
\description{
Predict entries in the `X` matrix according to the model
at the combinations [row,column] given by the entries in
`user` and `item` (e.g. passing `user=c(1,2,3), item=c(1,1,1)` will predict
X[1,1], X[2,1], X[3,1]).
Alternatively, might pass a sparse matrix, in which case it will make
predictions for all of its non-missing entries.
Invalid combinations (e.g. rows and columns outside of the range of `X` to
which the model was fit) will be filled with global mean plus biases if applicable
for `CMF_explicit`, and with NAs for the other models.
For example usage, see the main section \link{fit_models}.
}
\seealso{
\link{predict_new} \link{topN}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrap.r
\name{kalman}
\alias{kalman}
\title{Run a Kalman filter}
\usage{
kalman(ssm, dt = NULL, id = 0, root = NULL, n_obs = NULL,
eps_abs_integ = NULL, eps_rel_integ = NULL, like_min = NULL,
freeze_forcing = NULL, interpolator = NULL, verbose = FALSE,
warning = FALSE, no_dem_sto = FALSE, no_white_noise = FALSE,
no_diff = FALSE, traj = TRUE, hat = FALSE, trace = TRUE,
diag = TRUE, prior = FALSE, seed_time = TRUE)
}
\arguments{
\item{ssm}{a \code{ssm} object, returned by \code{\link{new_ssm}}.}
\item{dt}{numeric, integration time step.}
\item{id}{integer, unique integer identifier that will be appended to the output.}
\item{root}{character, root path for output files (if any) (no trailing slash). If \code{NULL} (default), outputs are written in "your_model_path/the_name_of_the_wrapper".}
\item{n_obs}{numeric, number of observations to be fitted (for tempering). If \code{NULL} (default), all observations are fitted.}
\item{eps_abs_integ}{numeric, absolute error for adaptive step-size control.}
\item{eps_rel_integ}{numeric, relative error for adaptive step-size control.}
\item{like_min}{numeric, particles with likelihood smaller than \code{like_min} are considered lost. If \code{NULL} (default) lower bound on likelihood based on machine precision.}
\item{freeze_forcing}{character, freeze covariates to their value at specified date (in \code{YYYY-MM-DD} format).}
\item{interpolator}{character, gsl interpolator for covariates}
\item{verbose}{logical, print logs (verbose). Default to \code{FALSE}.}
\item{warning}{logical, print warnings. Default to \code{FALSE}.}
\item{no_dem_sto}{logical, turn off demographic stochasticity (if any). Default to \code{FALSE}.}
\item{no_white_noise}{logical, turn off white noises (if any). Default to \code{FALSE}.}
\item{no_diff}{logical, turn off diffusions (if any). Default to \code{FALSE}.}
\item{traj}{logical, print the trajectories. Default to \code{TRUE}.}
\item{hat}{logical, print the state estimates. Default to \code{FALSE}.}
\item{trace}{logical, print the trace. Default to \code{TRUE}.}
\item{diag}{logical, print the diagnostics outputs (e.g. prediction residuals). Default to \code{TRUE}.}
\item{prior}{logical, add log(prior) to the estimated log-likelihood. Default to \code{TRUE}.}
\item{seed_time}{logical, seed the random number generator with the current time. Default to \code{TRUE}.}
}
\value{
a \code{ssm} object updated with latest SSM output and ready to be piped into another SSM block.
}
\description{
Function to run a Kalman filter on the \code{sde} approximation of a \code{ssm}.
}
|
/man/kalman.Rd
|
no_license
|
kath-o-reilly/ssminr
|
R
| false
| true
| 2,679
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrap.r
\name{kalman}
\alias{kalman}
\title{Run a Kalman filter}
\usage{
kalman(ssm, dt = NULL, id = 0, root = NULL, n_obs = NULL,
eps_abs_integ = NULL, eps_rel_integ = NULL, like_min = NULL,
freeze_forcing = NULL, interpolator = NULL, verbose = FALSE,
warning = FALSE, no_dem_sto = FALSE, no_white_noise = FALSE,
no_diff = FALSE, traj = TRUE, hat = FALSE, trace = TRUE,
diag = TRUE, prior = FALSE, seed_time = TRUE)
}
\arguments{
\item{ssm}{a \code{ssm} object, returned by \code{\link{new_ssm}}.}
\item{dt}{numeric, integration time step.}
\item{id}{integer, unique integer identifier that will be appended to the output.}
\item{root}{character, root path for output files (if any) (no trailing slash). If \code{NULL} (default), outputs are written in "your_model_path/the_name_of_the_wrapper".}
\item{n_obs}{numeric, number of observations to be fitted (for tempering). If \code{NULL} (default), all observations are fitted.}
\item{eps_abs_integ}{numeric, absolute error for adaptive step-size control.}
\item{eps_rel_integ}{numeric, relative error for adaptive step-size control.}
\item{like_min}{numeric, particles with likelihood smaller than \code{like_min} are considered lost. If \code{NULL} (default) lower bound on likelihood based on machine precision.}
\item{freeze_forcing}{character, freeze covariates to their value at specified date (in \code{YYYY-MM-DD} format).}
\item{interpolator}{character, gsl interpolator for covariates}
\item{verbose}{logical, print logs (verbose). Default to \code{FALSE}.}
\item{warning}{logical, print warnings. Default to \code{FALSE}.}
\item{no_dem_sto}{logical, turn off demographic stochasticity (if any). Default to \code{FALSE}.}
\item{no_white_noise}{logical, turn off white noises (if any). Default to \code{FALSE}.}
\item{no_diff}{logical, turn off diffusions (if any). Default to \code{FALSE}.}
\item{traj}{logical, print the trajectories. Default to \code{TRUE}.}
\item{hat}{logical, print the state estimates. Default to \code{FALSE}.}
\item{trace}{logical, print the trace. Default to \code{TRUE}.}
\item{diag}{logical, print the diagnostics outputs (e.g. prediction residuals). Default to \code{TRUE}.}
\item{prior}{logical, add log(prior) to the estimated log-likelihood. Default to \code{TRUE}.}
\item{seed_time}{logical, seed the random number generator with the current time. Default to \code{TRUE}.}
}
\value{
a \code{ssm} object updated with latest SSM output and ready to be piped into another SSM block.
}
\description{
Function to run a Kalman filter on the \code{sde} approximation of a \code{ssm}.
}
|
#!/usr/bin/env Rscript
# Meeyoung Park, 04/13/2016
# Command: Rscript <filename> <outdir>
<<<<<<< HEAD
# Input: "_.csv"
=======
# Input: "_Final.csv"
>>>>>>> 6a9203dfe1d39e001ea338d2c1ec3d898b42a103
# Process: Grouping by strains
# Output: "groupedFC.csv"
#install.packages('plyr')
#install.packages('ggplot2')
#install.packages('gridExtra')
#install.packages('reshape2')
library(plyr)
library(ggplot2)
library(gridExtra)
library(reshape2)
# Grouping by strains
args = commandArgs(TRUE)
print(args[1])
df = read.csv(args[1], header = TRUE, sep = ",")
<<<<<<< HEAD
#df = read.csv('../Final_Analysis_050516/Final_SCN/SCN_WT_BL6.csv', header = TRUE, sep = ",")
=======
#df = read.csv('../Final_Analysis_050516_Incorrect/Final_Liver/Liver_WT_BL6.csv', header = TRUE, sep = ",")
>>>>>>> 6a9203dfe1d39e001ea338d2c1ec3d898b42a103
# Get Group FC
# Separate lipid class
lipid.name <- t(as.data.frame(strsplit(as.character(df$Sample), " ")));
colnames(lipid.name) <- c("Class", "Carbon")
lipid.name.class <- data.frame('Class'=lipid.name[,1])
<<<<<<< HEAD
LipidData <- data.frame('Class'=lipid.name.class, df[, 2:9])
# Grouping by class
NumCtl <- 4
NumCase <- 4
new_df <- as.matrix(log2(LipidData[,2:9]))
data.ctrl <- new_df[, 1: NumCtl]
data.case <- new_df[, (NumCtl+1) : (NumCtl+NumCase)]
# Control
ctrl.df <- data.frame('Class'=LipidData$Class, data.ctrl)
ctrl.melted <- melt(ctrl.df, id.vars="Class")
ctrl.grouped <- ddply(ctrl.melted, "Class", summarise,
N = length(value)/NumCtl,
mean = mean(value),
sd = sd(value),
se = sd / sqrt(N)
);
# Case
case.df <- data.frame('Class'=LipidData$Class, data.case)
case.melted <- melt(case.df, id.vars="Class")
case.grouped <- ddply(case.melted, "Class", summarise,
N = length(value)/NumCase,
mean = mean(value),
sd = sd(value),
se = sd / sqrt(N)
);
# FC
raw.FC <- (2^case.grouped$mean)/(2^ctrl.grouped$mean)
log.FC <- log2(raw.FC)
Grouped.logFC <- data.frame('Class'=ctrl.grouped$Class,'Number'=ctrl.grouped$N, 'Ctrl.mean'=ctrl.grouped$mean,'Ctrl.SD'=ctrl.grouped$sd, 'Ctrl.SE'=ctrl.grouped$se,
'Case.mean'=case.grouped$mean, 'Case.SD'=case.grouped$sd, 'Case.SE'=case.grouped$se,'FC' = raw.FC, 'logFC'=log.FC)
=======
LipidData <- data.frame('Class'=lipid.name.class, df[, 2:length(df)])
# Grouping by class
NumCtl <- args[2]
NumCase <- args[3]
new_df <- as.matrix(LipidData[,2:length(df)])
data.ctrl <- new_df[, 1: NumCtl]
data.case <- new_df[, (NumCtl+1) : (NumCtl+NumCase)]
# Mean of each row (mean of replicates)
RowM.ctrl <- rowMeans(data.ctrl, na.rm = FALSE, dims = 1)
RowM.case <- rowMeans(data.case, na.rm = FALSE, dims = 1)
mean.df <- data.frame("Ctrl_Mean" = RowM.ctrl, "Case_Mean"= RowM.case)
# All
final.df <- data.frame('Class'=LipidData$Class, mean.df)
df.melted <- melt(final.df, id.vars="Class")
df.grouped <- ddply(df.melted, "Class", summarise,
N = length(value),
SUM = sum(value),
Mean = mean(value),
SD = sd(value),
SE = SD / sqrt(N)
);
# Control
ctrl.df <- data.frame('Class'=LipidData$Class, RowM.ctrl)
ctrl.melted <- melt(ctrl.df, id.vars="Class")
ctrl.grouped <- ddply(ctrl.melted, "Class", summarise,
N = length(value),
SUM = sum(value),
Mean = mean(value),
SD = sd(value),
SE = SD / sqrt(N)
);
# Case
case.df <- data.frame('Class'=LipidData$Class, RowM.case)
case.melted <- melt(case.df, id.vars="Class")
case.grouped <- ddply(case.melted, "Class", summarise,
N = length(value),
SUM = sum(value),
Mean = mean(value),
SD = sd(value),
SE = SD / sqrt(N)
);
raw.FC <- case.grouped$Mean/ctrl.grouped$Mean
log.FC <- log2(raw.FC)
Grouped.Info <- data.frame('Class'=ctrl.grouped$Class,'N'=ctrl.grouped$N, 'Ctrl.Sum'=ctrl.grouped$SUM, 'Ctrl.Mean'=ctrl.grouped$Mean,'Ctrl.SD'=ctrl.grouped$SD, 'Ctrl.SE'=ctrl.grouped$SE,
'Case.Sum'=case.grouped$SUM, 'Case.Mean'=case.grouped$Mean, 'Case.SD'=case.grouped$SD, 'Case.SE'=case.grouped$SE,'FC' = raw.FC, 'logFC'=log.FC)
#raw.means.sem <- transform(raw.grouped, lower=mean-se, upper=mean+se)
# geom_errorbar(aes(ymax=upper, ymin=lower),position=position_dodge(0.5), width=.3, data= log.means.sem.data)
>>>>>>> 6a9203dfe1d39e001ea338d2c1ec3d898b42a103
# Output file name
name_temp <- strsplit(args[1], "/")
tmp <- strsplit(name_temp[[1]][length(name_temp[[1]])], "\\.") #Check the position of file name
tissue_name <- strsplit(tmp[[1]][1], "\\_")
filename1 <- paste(tissue_name[[1]][1],tissue_name[[1]][2],tissue_name[[1]][3], sep="_")
<<<<<<< HEAD
write.csv(Grouped.logFC, paste(args[2],filename1,'_groupedFC.csv',sep = ""), row.names=FALSE)
=======
write.csv(Grouped.Info, paste(filename1,'_groupedInfo.csv',sep = ""), row.names=FALSE)
>>>>>>> 6a9203dfe1d39e001ea338d2c1ec3d898b42a103
|
/Scripts/7_GroupStatistics.R
|
no_license
|
mpark-bioinfo/HF11
|
R
| false
| false
| 5,190
|
r
|
#!/usr/bin/env Rscript
# Meeyoung Park, 04/13/2016
# Command: Rscript <filename> <outdir>
<<<<<<< HEAD
# Input: "_.csv"
=======
# Input: "_Final.csv"
>>>>>>> 6a9203dfe1d39e001ea338d2c1ec3d898b42a103
# Process: Grouping by strains
# Output: "groupedFC.csv"
#install.packages('plyr')
#install.packages('ggplot2')
#install.packages('gridExtra')
#install.packages('reshape2')
library(plyr)
library(ggplot2)
library(gridExtra)
library(reshape2)
# Grouping by strains
args = commandArgs(TRUE)
print(args[1])
df = read.csv(args[1], header = TRUE, sep = ",")
<<<<<<< HEAD
#df = read.csv('../Final_Analysis_050516/Final_SCN/SCN_WT_BL6.csv', header = TRUE, sep = ",")
=======
#df = read.csv('../Final_Analysis_050516_Incorrect/Final_Liver/Liver_WT_BL6.csv', header = TRUE, sep = ",")
>>>>>>> 6a9203dfe1d39e001ea338d2c1ec3d898b42a103
# Get Group FC
# Separate lipid class
lipid.name <- t(as.data.frame(strsplit(as.character(df$Sample), " ")));
colnames(lipid.name) <- c("Class", "Carbon")
lipid.name.class <- data.frame('Class'=lipid.name[,1])
<<<<<<< HEAD
LipidData <- data.frame('Class'=lipid.name.class, df[, 2:9])
# Grouping by class
NumCtl <- 4
NumCase <- 4
new_df <- as.matrix(log2(LipidData[,2:9]))
data.ctrl <- new_df[, 1: NumCtl]
data.case <- new_df[, (NumCtl+1) : (NumCtl+NumCase)]
# Control
ctrl.df <- data.frame('Class'=LipidData$Class, data.ctrl)
ctrl.melted <- melt(ctrl.df, id.vars="Class")
ctrl.grouped <- ddply(ctrl.melted, "Class", summarise,
N = length(value)/NumCtl,
mean = mean(value),
sd = sd(value),
se = sd / sqrt(N)
);
# Case
case.df <- data.frame('Class'=LipidData$Class, data.case)
case.melted <- melt(case.df, id.vars="Class")
case.grouped <- ddply(case.melted, "Class", summarise,
N = length(value)/NumCase,
mean = mean(value),
sd = sd(value),
se = sd / sqrt(N)
);
# FC
raw.FC <- (2^case.grouped$mean)/(2^ctrl.grouped$mean)
log.FC <- log2(raw.FC)
Grouped.logFC <- data.frame('Class'=ctrl.grouped$Class,'Number'=ctrl.grouped$N, 'Ctrl.mean'=ctrl.grouped$mean,'Ctrl.SD'=ctrl.grouped$sd, 'Ctrl.SE'=ctrl.grouped$se,
'Case.mean'=case.grouped$mean, 'Case.SD'=case.grouped$sd, 'Case.SE'=case.grouped$se,'FC' = raw.FC, 'logFC'=log.FC)
=======
LipidData <- data.frame('Class'=lipid.name.class, df[, 2:length(df)])
# Grouping by class
NumCtl <- args[2]
NumCase <- args[3]
new_df <- as.matrix(LipidData[,2:length(df)])
data.ctrl <- new_df[, 1: NumCtl]
data.case <- new_df[, (NumCtl+1) : (NumCtl+NumCase)]
# Mean of each row (mean of replicates)
RowM.ctrl <- rowMeans(data.ctrl, na.rm = FALSE, dims = 1)
RowM.case <- rowMeans(data.case, na.rm = FALSE, dims = 1)
mean.df <- data.frame("Ctrl_Mean" = RowM.ctrl, "Case_Mean"= RowM.case)
# All
final.df <- data.frame('Class'=LipidData$Class, mean.df)
df.melted <- melt(final.df, id.vars="Class")
df.grouped <- ddply(df.melted, "Class", summarise,
N = length(value),
SUM = sum(value),
Mean = mean(value),
SD = sd(value),
SE = SD / sqrt(N)
);
# Control
ctrl.df <- data.frame('Class'=LipidData$Class, RowM.ctrl)
ctrl.melted <- melt(ctrl.df, id.vars="Class")
ctrl.grouped <- ddply(ctrl.melted, "Class", summarise,
N = length(value),
SUM = sum(value),
Mean = mean(value),
SD = sd(value),
SE = SD / sqrt(N)
);
# Case
case.df <- data.frame('Class'=LipidData$Class, RowM.case)
case.melted <- melt(case.df, id.vars="Class")
case.grouped <- ddply(case.melted, "Class", summarise,
N = length(value),
SUM = sum(value),
Mean = mean(value),
SD = sd(value),
SE = SD / sqrt(N)
);
raw.FC <- case.grouped$Mean/ctrl.grouped$Mean
log.FC <- log2(raw.FC)
Grouped.Info <- data.frame('Class'=ctrl.grouped$Class,'N'=ctrl.grouped$N, 'Ctrl.Sum'=ctrl.grouped$SUM, 'Ctrl.Mean'=ctrl.grouped$Mean,'Ctrl.SD'=ctrl.grouped$SD, 'Ctrl.SE'=ctrl.grouped$SE,
'Case.Sum'=case.grouped$SUM, 'Case.Mean'=case.grouped$Mean, 'Case.SD'=case.grouped$SD, 'Case.SE'=case.grouped$SE,'FC' = raw.FC, 'logFC'=log.FC)
#raw.means.sem <- transform(raw.grouped, lower=mean-se, upper=mean+se)
# geom_errorbar(aes(ymax=upper, ymin=lower),position=position_dodge(0.5), width=.3, data= log.means.sem.data)
>>>>>>> 6a9203dfe1d39e001ea338d2c1ec3d898b42a103
# Output file name
name_temp <- strsplit(args[1], "/")
tmp <- strsplit(name_temp[[1]][length(name_temp[[1]])], "\\.") #Check the position of file name
tissue_name <- strsplit(tmp[[1]][1], "\\_")
filename1 <- paste(tissue_name[[1]][1],tissue_name[[1]][2],tissue_name[[1]][3], sep="_")
<<<<<<< HEAD
write.csv(Grouped.logFC, paste(args[2],filename1,'_groupedFC.csv',sep = ""), row.names=FALSE)
=======
write.csv(Grouped.Info, paste(filename1,'_groupedInfo.csv',sep = ""), row.names=FALSE)
>>>>>>> 6a9203dfe1d39e001ea338d2c1ec3d898b42a103
|
library(dplyr)
library(tidyr)
library(readr)
a<-grep("^household.*[.txt]$",list.files(),value = TRUE)
electric_consumption<-read.table(a, header = TRUE, sep = ";", na.strings = c("NA","?"))
electric_consumption$Date<-as.Date(electric_consumption$Date, "%d/%m/%Y")
electric_consumption$Time<-strptime(paste(electric_consumption$Date,electric_consumption$Time), "%Y-%m-%d %H:%M:%S", tz="CET")
electric_consumption<-filter(electric_consumption, between(Date, as.Date("2007-02-01"), as.Date("2007-02-02")))
#Plot 3 - Multiple Line Graph
png(filename = "plot3.png")
plot(electric_consumption$Time, electric_consumption$Sub_metering_1, type = "l", xlab = "", ylab = "Energy Sub Metering")
lines(electric_consumption$Time, electric_consumption$Sub_metering_2, col="red")
lines(electric_consumption$Time, electric_consumption$Sub_metering_3, col="blue")
legend("topright" ,legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black", "red", "blue"), lty=c(1,1,1), cex = 1, pt.cex = 0.9)
dev.off()
|
/plot3.R
|
no_license
|
Antara-Sarkar114/ExData_Plotting1
|
R
| false
| false
| 1,006
|
r
|
library(dplyr)
library(tidyr)
library(readr)
a<-grep("^household.*[.txt]$",list.files(),value = TRUE)
electric_consumption<-read.table(a, header = TRUE, sep = ";", na.strings = c("NA","?"))
electric_consumption$Date<-as.Date(electric_consumption$Date, "%d/%m/%Y")
electric_consumption$Time<-strptime(paste(electric_consumption$Date,electric_consumption$Time), "%Y-%m-%d %H:%M:%S", tz="CET")
electric_consumption<-filter(electric_consumption, between(Date, as.Date("2007-02-01"), as.Date("2007-02-02")))
#Plot 3 - Multiple Line Graph
png(filename = "plot3.png")
plot(electric_consumption$Time, electric_consumption$Sub_metering_1, type = "l", xlab = "", ylab = "Energy Sub Metering")
lines(electric_consumption$Time, electric_consumption$Sub_metering_2, col="red")
lines(electric_consumption$Time, electric_consumption$Sub_metering_3, col="blue")
legend("topright" ,legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black", "red", "blue"), lty=c(1,1,1), cex = 1, pt.cex = 0.9)
dev.off()
|
#' Coal tits breeding survey data
#'
#'
#' The Swiss coal tit (\emph{Parus ater}) annual territory counts from
#' the Swiss breeding bird survey MHB from 1999 to 2007.
#'
#' The variables are as follows:
#' \itemize{
#' \item \code{site} the site code (factor with 235 levels).
#' \item \code{spec} the species (factor with one level = "Coaltit").
#' \item \code{elevation} the elevation im masl (integer).
#' \item \code{forest} the percent forest cover (integer).
#' \item \code{y1999, y2000, ..., y2007} the site count by year (integer).
#' \item \code{obs1999, obs2000, ..., obs2007} the observer code by year (integer).
#' \item \code{first1999, first2000, ..., first2007} the first-time observer indicator by year (integer with two values 0 or 1).
#' }
#'
#' @format A data frame with 235 rows and 31 columns
#' @references
#' Kery M & Schaub M Bayesian Population Analysis
#' using WinBUGS. Academic Press. (\url{http://www.vogelwarte.ch/bpa})
"tits"
|
/R/data-tits.R
|
no_license
|
poissonconsulting/bauw
|
R
| false
| false
| 972
|
r
|
#' Coal tits breeding survey data
#'
#'
#' The Swiss coal tit (\emph{Parus ater}) annual territory counts from
#' the Swiss breeding bird survey MHB from 1999 to 2007.
#'
#' The variables are as follows:
#' \itemize{
#' \item \code{site} the site code (factor with 235 levels).
#' \item \code{spec} the species (factor with one level = "Coaltit").
#' \item \code{elevation} the elevation im masl (integer).
#' \item \code{forest} the percent forest cover (integer).
#' \item \code{y1999, y2000, ..., y2007} the site count by year (integer).
#' \item \code{obs1999, obs2000, ..., obs2007} the observer code by year (integer).
#' \item \code{first1999, first2000, ..., first2007} the first-time observer indicator by year (integer with two values 0 or 1).
#' }
#'
#' @format A data frame with 235 rows and 31 columns
#' @references
#' Kery M & Schaub M Bayesian Population Analysis
#' using WinBUGS. Academic Press. (\url{http://www.vogelwarte.ch/bpa})
"tits"
|
context("Common API")
opts <- options(keyring_warn_for_env_fallback = FALSE)
on.exit(options(opts), add = TRUE)
test_that("set, get, delete", {
skip_on_cran()
service <- random_service()
username <- random_username()
password <- random_password()
expect_silent(key_set_with_value(service, username, password))
expect_equal(key_get(service, username), password)
expect_silent(key_delete(service, username))
})
test_that("set, get, delete without username", {
skip_on_cran()
service <- random_service()
password <- random_password()
expect_silent(key_set_with_value(service, password = password))
expect_equal(key_get(service), password)
expect_silent(key_delete(service))
})
test_that("set can update", {
skip_on_cran()
service <- random_service()
username <- random_username()
password <- random_password()
expect_silent({
key_set_with_value(service, username, "foobar")
key_set_with_value(service, username, password)
})
expect_equal(key_get(service, username), password)
expect_silent(key_delete(service, username))
})
test_that("list", {
skip_on_cran()
if (default_backend()$name == "env") skip("'env' backend has no 'list' support")
service <- random_service()
username <- random_username()
password <- random_password()
expect_silent({
key_set_with_value(service, username, password)
list <- key_list()
})
expect_equal(list$username[match(service, list$service)], username)
list2 <- key_list(service = service)
expect_equal(nrow(list2), 1)
expect_equal(list2$username, username)
expect_silent(key_delete(service, username))
})
|
/tests/testthat/test-common.R
|
permissive
|
aecoleman/keyring
|
R
| false
| false
| 1,647
|
r
|
context("Common API")
opts <- options(keyring_warn_for_env_fallback = FALSE)
on.exit(options(opts), add = TRUE)
test_that("set, get, delete", {
skip_on_cran()
service <- random_service()
username <- random_username()
password <- random_password()
expect_silent(key_set_with_value(service, username, password))
expect_equal(key_get(service, username), password)
expect_silent(key_delete(service, username))
})
test_that("set, get, delete without username", {
skip_on_cran()
service <- random_service()
password <- random_password()
expect_silent(key_set_with_value(service, password = password))
expect_equal(key_get(service), password)
expect_silent(key_delete(service))
})
test_that("set can update", {
skip_on_cran()
service <- random_service()
username <- random_username()
password <- random_password()
expect_silent({
key_set_with_value(service, username, "foobar")
key_set_with_value(service, username, password)
})
expect_equal(key_get(service, username), password)
expect_silent(key_delete(service, username))
})
test_that("list", {
skip_on_cran()
if (default_backend()$name == "env") skip("'env' backend has no 'list' support")
service <- random_service()
username <- random_username()
password <- random_password()
expect_silent({
key_set_with_value(service, username, password)
list <- key_list()
})
expect_equal(list$username[match(service, list$service)], username)
list2 <- key_list(service = service)
expect_equal(nrow(list2), 1)
expect_equal(list2$username, username)
expect_silent(key_delete(service, username))
})
|
## vim: tw=120 shiftwidth=4 softtabstop=4 expandtab:
#' Class to Store adp (ADCP) Data
#'
#' This class stores data from acoustic Doppler profilers. Some manufacturers
#' call these ADCPs, while others call them ADPs; here the shorter form is
#' used by analogy to ADVs.
#'
#' @templateVar class adp
#'
#' @templateVar dataExample The key items stored in this slot include `time`, `distance`, and `v`, along with angles `heading`, `pitch` and `roll`.
#'
#' @templateVar metadataExample Examples that are of common interest include `oceCoordinate`, `orientation`, `frequency`, and `beamAngle`.
#'
#' @template slot_summary
#'
#' @template slot_put
#'
#' @template slot_get
#'
#' @section Reading/creating `adp` objects:
#'
#' The `metadata` slot contains various
#' items relating to the dataset, including source file name, sampling rate,
#' velocity resolution, velocity maximum value, and so on. Some of these are
#' particular to particular instrument types, and prudent researchers will take
#' a moment to examine the whole contents of the metadata, either in summary
#' form (with `str(adp[["metadata"]])`) or in detail (with
#' `adp[["metadata"]]`). Perhaps the most useful general properties are
#' `adp[["bin1Distance"]]` (the distance, in metres, from the sensor to
#' the bottom of the first bin), `adp[["cellSize"]]` (the cell height, in
#' metres, in the vertical direction, *not* along the beam), and
#' `adp[["beamAngle"]]` (the angle, in degrees, between beams and an
#' imaginary centre line that bisects all beam pairs).
#'
#' The diagram provided below indicates the coordinate-axis and beam-numbering
#' conventions for three- and four-beam ADP devices, viewed as though the
#' reader were looking towards the beams being emitted from the transducers.
#'
#' \if{html}{\figure{adp_beams.png}{options: width=400px alt="Figure: adp_beams.png"}}
#'
#' The bin geometry of a four-beam profiler is illustrated below, for
#' `adp[["beamAngle"]]` equal to 20 degrees, `adp[["bin1Distance"]]`
#' equal to 2m, and `adp[["cellSize"]]` equal to 1m. In the diagram, the
#' viewer is in the plane containing two beams that are not shown, so the two
#' visible beams are separated by 40 degrees. Circles indicate the centres of
#' the range-gated bins within the beams. The lines enclosing those circles
#' indicate the coverage of beams that spread plus and minus 2.5 degrees from
#' their centreline.
#'
#' \if{html}{\figure{adpgeometry2.png}{options: width=400px alt="Figure: adpgeometry2.png"}}
#'
#' Note that `adp[["oceCoordinate"]]` stores the present coordinate system
#' of the object, and it has possible values `"beam"`, `"xyz"`, `"sfm"` or
#' `"enu"`. (This should not be confused with
#' `adp[["originalCoordinate"]]`, which stores the coordinate system used
#' in the original data file.)
#'
#' The `data` slot holds some standardized items, and
#' many that vary from instrument to instrument. One standard item is
#' `adp[["v"]]`, a three-dimensional numeric array of velocities in
#' m/s. In this matrix, the first index indicates time, the second bin
#' number, and the third beam number. The meaning of beams number depends on
#' whether the object is in beam coordinates, frame coordinates, or earth
#' coordinates. For example, if in earth coordinates, then beam 1 is
#' the eastward component of velocity.
#' Thus, for example,
#' \preformatted{
#' library(oce)
#' data(adp)
#' t <- adp[['time']]
#' d <- adp[['distance']]
#' eastward <- adp[['v']][,,1]
#' imagep(t, d, eastward, missingColor="gray")
#' }
#' plots an image of the eastward component of velocity as a function of time (the x axis)
#' and distance from sensor (y axis), since the `adp` dataset is
#' in earth coordinates. Note the semidurnal tidal signal, and the pattern of missing
#' data at the ocean surface (gray blotches at the top).
#'
#' Corresponding to the velocity array are two arrays of type raw, and
#' identical dimension, accessed by `adp[["a"]]` and `adp[["q"]]`,
#' holding measures of signal strength and data quality quality,
#' respectively. (The exact meanings of these depend on the particular type
#' of instrument, and it is assumed that users will be familiar enough with
#' instruments to know both the meanings and their practical consequences in
#' terms of data-quality assessment, etc.)
#'
#' In addition to the arrays, there are time-based vectors. The vector
#' `adp[["time"]]` (of length equal to the first index of
#' `adp[["v"]]`, etc.) holds times of observation. Depending on type of
#' instrument and its configuration, there may also be corresponding vectors
#' for sound speed (`adp[["soundSpeed"]]`), pressure
#' (`adp[["pressure"]]`), temperature (`adp[["temperature"]]`),
#' heading (`adp[["heading"]]`) pitch (`adp[["pitch"]]`), and roll
#' (`adp[["roll"]]`), depending on the setup of the instrument.
#'
#' The precise meanings of the data items depend on the instrument type. All
#' instruments have `v` (for velocity), `q` (for a measure of data
#' quality) and `a` (for a measure of backscatter amplitude, also called
#' echo intensity).
#' Teledyne-RDI profilers have an additional item `g` (for
#' percent-good).
#'
#' VmDas-equipped Teledyne-RDI profilers additional navigation data, with
#' details listed in the table below; note that the RDI documentation (reference 2) and
#' the RDI gui use inconsistent names for most items.
#'
#' \tabular{lll}{
#' **Oce name**\tab **RDI doc name**\tab **RDI GUI name**\cr
#' `avgSpeed`\tab Avg Speed\tab Speed/Avg/Mag\cr
#' `avgMagnitudeVelocityEast`\tab Avg Mag Vel East\tab ?\cr
#' `avgMagnitudeVelocityNorth`\tab Avg Mag Vel North\tab ?\cr
#' `avgTrackMagnetic`\tab Avg Track Magnetic\tab Speed/Avg/Dir (?)\cr
#' `avgTrackTrue`\tab Avg Track True\tab Speed/Avg/Dir (?)\cr
#' `avgTrueVelocityEast`\tab Avg True Vel East\tab ?\cr
#' `avgTrueVelocityNorth`\tab Avg True Vel North\tab ?\cr
#' `directionMadeGood`\tab Direction Made Good\tab Speed/Made Good/Dir\cr
#' `firstLatitude`\tab First latitude\tab Start Lat\cr
#' `firstLongitude`\tab First longitude\tab Start Lon\cr
#' `firstTime`\tab UTC Time of last fix\tab End Time\cr
#' `lastLatitude`\tab Last latitude\tab End Lat\cr
#' `lastLongitude`\tab Last longitude\tab End Lon\cr
#' `lastTime`\tab UTC Time of last fix\tab End Time\cr
#' `numberOfHeadingSamplesAveraged`\tab Number heading samples averaged\tab ?\cr
#' `numberOfMagneticTrackSamplesAveraged`\tab Number of magnetic track samples averaged\tab ? \cr
#' `numberOfPitchRollSamplesAvg`\tab Number of magnetic track samples averaged\tab ? \cr
#' `numberOfSpeedSamplesAveraged`\tab Number of speed samples averaged\tab ? \cr
#' `numberOfTrueTrackSamplesAvg`\tab Number of true track samples averaged\tab ? \cr
#' `primaryFlags`\tab Primary Flags\tab ?\cr
#' `shipHeading`\tab Heading\tab ?\cr
#' `shipPitch`\tab Pitch\tab ?\cr
#' `shipRoll`\tab Roll\tab ?\cr
#' `speedMadeGood`\tab Speed Made Good\tab Speed/Made Good/Mag\cr
#' `speedMadeGoodEast`\tab Speed MG East\tab ?\cr
#' `speedMadeGoodNorth`\tab Speed MG North\tab ?\cr
#' }
#'
#' For Teledyne-RDI profilers, there are four three-dimensional arrays
#' holding beamwise data. In these, the first index indicates time, the
#' second bin number, and the third beam number (or coordinate number, for
#' data in `xyz`, `sfm`, `enu` or `other` coordinate systems). In
#' the list below, the quoted phrases are quantities as defined in Figure 9
#' of reference 1.
#'
#' * `v` is ``velocity'' in m/s, inferred from two-byte signed
#' integer values (multiplied by the scale factor that is stored in
#' `velocityScale` in the metadata).
#'
#' * `q` is ``correlation magnitude'' a one-byte quantity stored
#' as type `raw` in the object. The values may range from 0 to 255.
#'
#' * `a` is ``backscatter amplitude``, also known as ``echo
#' intensity'' a one-byte quantity stored as type `raw` in the object.
#' The values may range from 0 to 255.
#'
#' * `g` is ``percent good'' a one-byte quantity stored as `raw`
#' in the object. The values may range from 0 to 100.
#'
#' Finally, there is a vector `adp[["distance"]]` that indicates the bin
#' distances from the sensor, measured in metres along an imaginary centre
#' line bisecting beam pairs. The length of this vector equals
#' `dim(adp[["v"]])[2]`.
#'
#' @section Teledyne-RDI Sentinel V ADCPs: As of 2016-09-27 there is
#' provisional support for the TRDI "SentinelV" ADCPs, which are 5
#' beam ADCPs with a vertical centre beam. Relevant vertical beam
#' fields are called `adp[["vv"]]`, `adp[["va"]]`,
#' `adp[["vq"]]`, and `adp[["vg"]]` in analogy with the
#' standard 4-beam fields.
#'
#' @section Accessing and altering information within [adp-class] objects:
#' *Extracting values* Matrix data may be accessed as illustrated
#' above, e.g. or an adp object named `adv`, the data are provided by
#' `adp[["v"]]`, `adp[["a"]]`, and `adp[["q"]]`. As a
#' convenience, the last two of these can be accessed as numeric (as opposed to
#' raw) values by e.g. `adp[["a", "numeric"]]`. The vectors are accessed
#' in a similar way, e.g. `adp[["heading"]]`, etc. Quantities in the
#' `metadata` slot are also available by name, e.g.
#' `adp[["velocityResolution"]]`, etc.
#'
#' *Assigning values.* This follows the standard form, e.g. to increase
#' all velocity data by 1 cm/s, use `adp[["v"]] <- 0.01 + adp[["v"]]`.
#'
#' *Overview of contents* The `show` method (e.g.
#' `show(d)`) displays information about an ADP object named `d`.
#'
#' @section Dealing with suspect data:
#' There are many possibilities for confusion
#' with `adp` devices, owing partly to the flexibility that manufacturers
#' provide in the setup. Prudent users will undertake many tests before trusting
#' the details of the data. Are mean currents in the expected direction, and of
#' the expected magnitude, based on other observations or physical constraints?
#' Is the phasing of currents as expected? If the signals are suspect, could an
#' incorrect scale account for it? Could the transformation matrix be incorrect?
#' Might the data have exceeded the maximum value, and then ``wrapped around'' to
#' smaller values? Time spent on building confidence in data quality is seldom
#' time wasted.
#'
#' @section References:
#' 1. Teledyne-RDI, 2007.
#' *WorkHorse commands and output data format.*
#' P/N 957-6156-00 (November 2007).
#'
#' 2. Teledyne-RDI, 2012. *VmDas User's Guide, Ver. 1.46.5*.
#'
#' @seealso
#' A file containing ADP data is usually recognized by Oce, and so
#' [read.oce()] will usually read the data. If not, one may use the
#' general ADP function [read.adp()] or specialized variants
#' [read.adp.rdi()], [read.adp.nortek()],
#' [read.adp.ad2cp()],
#' [read.adp.sontek()] or [read.adp.sontek.serial()].
#'
#' ADP data may be plotted with [plot,adp-method()], which is a
#' generic function so it may be called simply as `plot`.
#'
#' Statistical summaries of ADP data are provided by the generic function
#' `summary`, while briefer overviews are provided with `show`.
#'
#' Conversion from beam to xyz coordinates may be done with
#' [beamToXyzAdp()], and from xyz to enu (east north up) may be done
#' with [xyzToEnuAdp()]. [toEnuAdp()] may be used to
#' transfer either beam or xyz to enu. Enu may be converted to other coordinates
#' (e.g. aligned with a coastline) with [enuToOtherAdp()].
#'
#' @family classes provided by oce
#' @family things related to adp data
setClass("adp", contains="oce")
#' ADP (acoustic-doppler profiler) dataset
#'
#' This is degraded subsample of measurements that were made with an
#' upward-pointing ADP manufactured by Teledyne-RDI, as part of the St Lawrence
#' Internal Wave Experiment (SLEIWEX).
#'
#' @name adp
#'
#' @docType data
#'
#' @usage data(adp)
#'
#' @examples
#'\donttest{
#' library(oce)
#' data(adp)
#'
#' # Velocity components. (Note: we should probably trim some bins at top.)
#' plot(adp)
#'
#' # Note that tides have moved the mooring.
#' plot(adp, which=15:18)
#'}
#'
#'
#' @source This file came from the SLEIWEX-2008 experiment.
#'
#' @family datasets provided with oce
#' @family things related to adp data
NULL
setMethod(f="initialize",
signature="adp",
definition=function(.Object, time, distance, v, a, q, oceCoordinate="enu", orientation="upward", ...) {
.Object <- callNextMethod(.Object, ...)
if (!missing(time)) .Object@data$time <- time
if (!missing(distance)) {
.Object@data$distance <- distance
.Object@metadata$cellSize <- tail(diff(distance), 1) # first one has blanking, perhaps
}
if (!missing(v)) {
.Object@data$v <- v
.Object@metadata$numberOfBeams <- dim(v)[3]
.Object@metadata$numberOfCells <- dim(v)[2]
}
if (!missing(a)) .Object@data$a <- a
if (!missing(q)) .Object@data$q <- q
.Object@metadata$units$v <- list(unit=expression(m/s), scale="")
.Object@metadata$units$distance <- list(unit=expression(m), scale="")
.Object@metadata$oceCoordinate <- oceCoordinate # FIXME: should check that it is allowed
.Object@metadata$orientation <- orientation # FIXME: should check that it is allowed
.Object@processingLog$time <- presentTime()
.Object@processingLog$value <- "create 'adp' object"
return(.Object)
})
## DEVELOPERS: please pattern functions and documentation on this, for uniformity.
## DEVELOPERS: You will need to change the docs, and the 3 spots in the code
## DEVELOPERS: marked '# DEVELOPER 1:', etc.
#' @title Handle Flags in adp Objects
#'
#' @details
#' If `flags` and `actions` are not provided, the
#' default is to consider a flag value of 1 to indicate bad data,
#' and 0 to indicate good data. Note that it only makes sense to use
#' velocity (`v`) flags, because other flags are, at least
#' for some instruments, stored as `raw` quantities, and such
#' quantities may not be set to `NA`.
#'
#' @param object an [adp-class] object.
#'
#' @template handleFlagsTemplate
#'
#' @examples
#' # Flag low "goodness" or high "error beam" values.
#' library(oce)
#' data(adp)
#' # Same as Example 2 of ?'setFlags,adp-method'
#' v <- adp[["v"]]
#' i2 <- array(FALSE, dim=dim(v))
#' g <- adp[["g", "numeric"]]
#' # Thresholds on percent "goodness" and error "velocity"
#' G <- 25
#' V4 <- 0.45
#' for (k in 1:3)
#' i2[,,k] <- ((g[,,k]+g[,,4]) < G) | (v[,,4] > V4)
#' adpQC <- initializeFlags(adp, "v", 2)
#' adpQC <- setFlags(adpQC, "v", i2, 3)
#' adpClean <- handleFlags(adpQC, flags=list(3), actions=list("NA"))
#' # Demonstrate (subtle) change graphically.
#' par(mfcol=c(2, 1))
#' plot(adp, which="u1")
#' plot(adpClean, which="u1")
#'
#' @family things related to adp data
setMethod("handleFlags", signature=c(object="adp", flags="ANY", actions="ANY", where="ANY", debug="ANY"),
definition=function(object, flags=NULL, actions=NULL, where=NULL, debug=getOption("oceDebug")) {
## DEVELOPER 1: alter the next comment to explain your setup
## Flag=1 means bad velocity; 0 means good
if (is.null(flags)) {
flags <- defaultFlags(object)
if (is.null(flags))
stop("must supply 'flags', or use initializeFlagScheme() on the adp object first")
}
if (is.null(actions)) {
actions <- list("NA") # DEVELOPER 3: alter this line to suit a new data class
names(actions) <- names(flags)
}
if (any(names(actions)!=names(flags)))
stop("names of flags and actions must match")
handleFlagsInternal(object=object, flags=flags, actions=actions, where=where, debug=debug)
})
#' @templateVar class adp
#' @templateVar details There are no agreed-upon flag schemes for adp data.
#' @template initializeFlagsTemplate
setMethod("initializeFlags",
c(object="adp", name="ANY", value="ANY", debug="ANY"),
function(object, name=NULL, value=NULL, debug=getOption("oceDebug")) {
oceDebug(debug, "setFlags,adp-method name=", name, ", value=", value, "\n")
if (is.null(name))
stop("must supply 'name'")
if (name != "v")
stop("the only flag that adp objects can handle is for \"v\"")
res <- initializeFlagsInternal(object, name, value, debug-1)
res
})
#' @templateVar class adp
#' @templateVar note The only flag that may be set is `v`, for the array holding velocity. See \dQuote{Indexing rules}, noting that adp data are stored in 3D arrays; Example 1 shows using a data frame for `i`, while Example 2 shows using an array.
#' @template setFlagsTemplate
#' @examples
#' library(oce)
#' data(adp)
#'
#' ## Example 1: flag first 10 samples in a mid-depth bin of beam 1
#' i1 <- data.frame(1:20, 40, 1)
#' adpQC <- initializeFlags(adp, "v", 2)
#' adpQC <- setFlags(adpQC, "v", i1, 3)
#' adpClean1 <- handleFlags(adpQC, flags=list(3), actions=list("NA"))
#' par(mfrow=c(2, 1))
#' ## Top: original, bottom: altered
#' plot(adp, which="u1")
#' plot(adpClean1, which="u1")
#'
#' ## Example 2: percent-good and error-beam scheme
#' v <- adp[["v"]]
#' i2 <- array(FALSE, dim=dim(v))
#' g <- adp[["g", "numeric"]]
#' # Thresholds on percent "goodness" and error "velocity"
#' G <- 25
#' V4 <- 0.45
#' for (k in 1:3)
#' i2[,,k] <- ((g[,,k]+g[,,4]) < G) | (v[,,4] > V4)
#' adpQC2 <- initializeFlags(adp, "v", 2)
#' adpQC2 <- setFlags(adpQC2, "v", i2, 3)
#' adpClean2 <- handleFlags(adpQC2, flags=list(3), actions=list("NA"))
#' ## Top: original, bottom: altered
#' plot(adp, which="u1")
#' plot(adpClean2, which="u1") # differs at 8h and 20h
#'
#' @family things related to adp data
setMethod("setFlags",
c(object="adp", name="ANY", i="ANY", value="ANY", debug="ANY"),
function(object, name=NULL, i=NULL, value=NULL, debug=getOption("oceDebug")) {
if (is.null(name))
stop("must specify 'name'")
if (name != "v")
stop("in adp objects, the only flag that can be set is for \"v\"")
setFlagsInternal(object, name, i, value, debug-1)
})
#' Summarize an ADP Object
#'
#' Summarize data in an `adp` object.
#'
#' Pertinent summary information is presented.
#'
#' @aliases summary.adp summary,adp,missing-method summary,adp-method
#'
#' @param object an object of class `"adp"`, usually, a result of a call
#' to [read.oce()], [read.adp.rdi()], or
#' [read.adp.nortek()].
#'
#' @param \dots further arguments passed to or from other methods.
#'
#' @return A matrix containing statistics of the elements of the `data`
#' slot.
#'
#' @author Dan Kelley
#'
#' @family things related to adp data
setMethod(f="summary",
signature="adp",
definition=function(object, ...) {
mnames <- names(object@metadata)
cat("ADP Summary\n-----------\n\n", ...)
if ("instrumentType" %in% mnames)
cat(paste("* Instrument: ", object@metadata$instrumentType, "\n", sep=""), ...)
if ("manufacturere" %in% mnames)
cat("* Manufacturer: ", object@metadata$manufacturer, "\n")
if ("serialNumber" %in% mnames)
cat(paste("* Serial number: ", object@metadata$serialNumber, "\n", sep=""), ...)
if ("firmwareVersion" %in% mnames)
cat(paste("* Firmware version: ", object@metadata$firmwareVersion, "\n", sep=""), ...)
if ("filename" %in% mnames)
cat(paste("* Source filename: ``", object@metadata$filename, "``\n", sep=""), ...)
if ("latitude" %in% names(object@metadata)) {
cat(paste("* Location: ",
if (is.na(object@metadata$latitude)) "unknown latitude" else sprintf("%.5f N", object@metadata$latitude), ", ",
if (is.na(object@metadata$longitude)) "unknown longitude" else sprintf("%.5f E",
object@metadata$longitude),
"\n", sep=''))
}
v.dim <- dim(object[["v"]])
if (!is.ad2cp(object)) {
cat("* Number of profiles:", v.dim[1], "\n")
cat("* Number of cells: ", v.dim[2], "\n")
cat("* Number of beams: ", v.dim[3], "\n")
cat("* Cell size: ", object[["cellSize"]], "m\n")
}
if ("time" %in% names(object@data)) {
cat("* Summary of times between profiles:\n")
print(summary(diff(as.numeric(object@data$time))))
}
if (1 == length(agrep("nortek", object@metadata$manufacturer, ignore.case=TRUE))) {
resSpecific <- list(internalCodeVersion=object@metadata$internalCodeVersion,
hardwareRevision=object@metadata$hardwareRevision,
recSize=object@metadata$recSize*65536/1024/1024,
velocityRange=object@metadata$velocityRange,
firmwareVersion=object@metadata$firmwareVersion,
config=object@metadata$config,
configPressureSensor=object@metadata$configPressureSensor,
configMagnetometerSensor=object@metadata$configMagnetometerSensor,
configPressureSensor=object@metadata$configPressureSensor,
configTiltSensor=object@metadata$configTiltSensor,
configTiltSensorOrientation=object@metadata$configTiltSensorOrientation,
serialNumberHead=object@metadata$serialNumberHead,
blankingDistance=object@metadata$blankingDistance,
measurementInterval=object@metadata$measurementInterval,
deploymentName=object@metadata$deploymentName,
velocityScale=object@metadata$velocityScale)
} else if (1 == length(agrep("rdi", object@metadata$manufacturer, ignore.case=TRUE))) {
resSpecific <- list(instrumentSubtype=object@metadata[["instrumentSubtype"]],
manufacturer=object@metadata$manufacturer,
numberOfDataTypes=object@metadata$numberOfDataTypes,
ensembleInFile=object@metadata$ensembleInFile,
headingAlignment=object@metadata$headingAlignment,
headingBias=object@metadata$headingBias,
pingsPerEnsemble=object@metadata$pingsPerEnsemble,
bin1Distance=object@metadata$bin1Distance,
xmitPulseLength=object@metadata$xmitPulseLength,
oceBeamSpreaded=object@metadata$oceBeamSpreaded,
beamConfig=object@metadata$beamConfig)
} else if (1 == length(agrep("sontek", object@metadata$manufacturer, ignore.case=TRUE))) {
resSpecific <- list(cpuSoftwareVerNum=object@metadata$cpuSoftwareVerNum,
dspSoftwareVerNum=object@metadata$dspSoftwareVerNum,
boardRev=object@metadata$boardRev,
adpType=object@metadata$adpType,
slantAngle=object@metadata$slantAngle,
orientation=object@metadata$orientation)
} else {
resSpecific <- list(orientation=object@metadata$orientation)
#stop("can only summarize ADP objects of sub-type \"rdi\", \"sontek\", or \"nortek\", not class ", paste(class(object),collapse=","))
}
## 20170107: drop the printing of these. In the new scheme, we can subsample
## 20170107: files, and therefore do not read to the end, and it seems silly
## 20170107: to use time going through the whole file to find this out. If we
## 20170107: decide that this is needed, we could do a seek() to the end of the
## 20170107: and then go back to find the final time.
## cat(sprintf("* Measurements: %s %s to %s %s sampled at %.4g Hz\n",
## format(object@metadata$measurementStart), attr(object@metadata$measurementStart, "tzone"),
## format(object@metadata$measurementEnd), attr(object@metadata$measurementEnd, "tzone"),
## 1 / object@metadata$measurementDeltat))
## subsampleStart <- object@data$time[1]
## subsampleDeltat <- as.numeric(object@data$time[2]) - as.numeric(object@data$time[1])
## subsampleEnd <- object@data$time[length(object@data$time)]
## cat(sprintf("* Subsample: %s %s to %s %s sampled at %.4g Hz\n",
## format(subsampleStart), attr(subsampleStart, "tzone"),
## format(subsampleEnd), attr(subsampleEnd, "tzone"),
## 1 / subsampleDeltat))
metadataNames <- names(object@metadata)
cat("* Frequency: ", object[["frequency"]], "kHz\n", ...)
if ("ensembleNumber" %in% names(object@metadata)) {
cat(paste("* Ensemble Numbers: ", vectorShow(object@metadata$ensembleNumber, msg="")))
}
isAD2CP <- is.ad2cp(object)
if (!isAD2CP) {
if ("numberOfCells" %in% metadataNames) {
dist <- object[["distance"]]
if (object[["numberOfCells"]] > 1) {
cat(sprintf("* Cells: %d, centered at %.3f m to %.3f m, spaced by %.3f m\n",
object[["numberOfCells"]], dist[1], tail(dist, 1), diff(dist[1:2])), ...)
} else {
cat(sprintf("* Cells: one cell, centered at %.3f m\n", dist[1]), ...)
}
}
originalCoordinate <- object[["originalCoordinate"]]
oceCoordinate <- object[["oceCoordinate"]]
cat("* Coordinate system: ",
if (is.null(originalCoordinate)) "?" else originalCoordinate, "[originally],",
if (is.null(oceCoordinate)) "?" else oceCoordinate, "[presently]\n", ...)
numberOfBeams <- object[["numberOfBeams"]]
beamAngle <- object[["beamAngle"]]
## As of Aug 10, 2019, orientation may be a vector, so we summarize
## a table of values, if so.
orientation <- object[["orientation"]]
if (length(orientation) > 1) {
torientation <- table(orientation)
orientation <- paste(unlist(lapply(names(torientation),
function(x)
paste(x, torientation[[x]], sep=":"))),
collapse=", ")
}
beamUnspreaded <- object[["oceBeamUnspreaded"]]
cat("* Beams::\n")
cat(" Number: ", if (is.null(numberOfBeams)) "?" else numberOfBeams, "\n")
cat(" Slantwise Angle: ", if (is.null(beamAngle)) "?" else beamAngle , "\n")
cat(" Orientation: ", if (is.null(orientation)) "?" else orientation, "\n")
cat(" Unspreaded: ", if (is.null(beamUnspreaded)) "?" else beamUnspreaded, "\n")
}
transformationMatrix <- object[["transformationMatrix"]]
if (!is.null(transformationMatrix) && dim(transformationMatrix)[2] >= 3) {
digits <- 4
cat("* Transformation matrix::\n")
cat(" ", format(transformationMatrix[1, ], width=digits+4, digits=digits, justify="right"), "\n")
cat(" ", format(transformationMatrix[2, ], width=digits+4, digits=digits, justify="right"), "\n")
cat(" ", format(transformationMatrix[3, ], width=digits+4, digits=digits, justify="right"), "\n")
if (object[["numberOfBeams"]] > 3)
cat(" ", format(transformationMatrix[4, ], width=digits+4, digits=digits, justify="right"), "\n")
}
if (isAD2CP) {
default <- ad2cpDefaultDataItem(object)
for (rt in object[["recordTypes"]]) {
if (rt != "text") {
isTheDefault <- rt == default
cat("* Record type '", rt, "'", if (isTheDefault) " (the default item)::\n" else "::\n", sep="")
cat(" Number of profiles: ", length(object[["time", rt]]), "\n")
cat(" Number of cells: ", object[["numberOfCells", rt]], "\n")
cat(" Blanking distance: ", object[["blankingDistance", rt]], "\n")
cat(" Cell size: ", object[["cellSize", rt]], "\n")
numberOfBeams <- object[["numberOfBeams", rt]]
cat(" Number of beams: ", numberOfBeams, "\n")
cat(" Beam angle: ", if (numberOfBeams == 1) 0 else object[["beamAngle"]], "\n")
cat(" Coordinate system: ", object[["oceCoordinate", rt]], "\n")
}
}
processingLogShow(object)
invisible()
} else {
invisible(callNextMethod()) # summary
}
})
#' Concatenate adp objects
#'
#' @templateVar class adp
#'
#' @template concatenateTemplate
setMethod(f="concatenate",
signature="adp",
definition=function(object, ...) {
rval <- callNextMethod() # do general work
## Make the metadata profile count match the data array dimensions.
rval@metadata$numberOfSamples <- dim(rval@data$v)[1] # FIXME: handle AD2CP
## The general method didn't know that 'distance' was special, and should
## not be concatenated, so undo that.
rval@data$distance <- object@data$distance # FIXME: handle AD2CP
rval
})
#' @title Extract Something from an adp Object
#'
#' @param x an [adp-class] object.
#'
#' @examples
#' data(adp)
#' # Tests for beam 1, distance bin 1, first 5 observation times
#' adp[["v"]][1:5,1,1]
#' adp[["a"]][1:5,1,1]
#' adp[["a", "numeric"]][1:5,1,1]
#' as.numeric(adp[["a"]][1:5,1,1]) # same as above
#'
#' @template sub_subTemplate
#'
#' @section Details of the specialized `adp` method:
#'
#' In addition to the usual extraction of elements by name, some shortcuts
#' are also provided, e.g. `x[["u1"]]` retrieves `v[,1]`, and similarly
#' for the other velocity components. The `a` and `q`
#' data can be retrieved in [raw()] form or numeric
#' form (see examples). The coordinate system may be
#' retrieved with e.g. `x[["coordinate"]]`.
#'
#' @author Dan Kelley
#'
#' @family things related to adp data
setMethod(f="[[",
signature(x="adp", i="ANY", j="ANY"),
definition=function(x, i, j, ...) {
##>message("top: i='", i, "'")
## if (i == "a") {
## if (!missing(j) && j == "numeric") {
## res <- x@data$a
## dim <- dim(res)
## res <- as.numeric(res)
## dim(res) <- dim
## } else {
## res <- x@data$a
## }
## res
## } else if (i == "q") {
## if (!missing(j) && j == "numeric") {
## res <- x@data$q
## dim <- dim(res)
## res <- as.numeric(res)
## dim(res) <- dim
## } else {
## res <- x@data$q
## }
## res
## if (i == "g") {
## if (!missing(j) && 1 == length("numeric", j)) {
## res <- x@data$g
## dim <- dim(res)
## res <- as.numeric(res)
## dim(res) <- dim
## } else {
## res <- x@data$g
## }
## res
##} else
ISAD2CP <- is.ad2cp(x)
##>message("ISAD2CP=", ISAD2CP)
if (i == "distance") {
##>message("asking for 'distance'")
if (ISAD2CP) {
## AD2CP is stored in a tricky way.
j <- if (missing(j)) ad2cpDefaultDataItem(x) else ad2cpDefaultDataItem(x, j)
res <- x@data[[j]]$blankingDistance + x@data[[j]]$cellSize*seq(1, x@data[[j]]$numberOfCells)
} else {
res <- x@data$distance
}
res
} else if (i %in% c("originalCoordinate", "oceCoordinate",
"cellSize", "blankingDistance", "orientation",
"beamUnspreaded", # Note: beamAngle is handled later since it is in metadata
"accelerometerx", "accelerometery", "accelerometerz",
"orientation", "heading", "pitch", "roll",
"ensemble", "time", "pressure", "soundSpeed",
"temperature", "temperatureMagnetometer", "temperatureRTC",
"nominalCorrelation",
"powerLevel", "transmitEnergy",
"v", "a", "q", "g",
"echosounder", "AHRS", "altimeterDistance", "altimeterFigureOfMerit")) {
##>message("asking for i='", i, "' which is in that long list")
##message("i='", i, "'")
metadataNames <- names(x@metadata)
##. dataNames <- names(x@data)
if (ISAD2CP) {
## AD2CP has 'burst' data records in one list, with 'average' records in another one.
## Permit e.g. "burst:numeric" and "burst numeric" ## FIXME: document this
returnNumeric <- FALSE # defult: leave 'raw' data as 'raw'.
if (missing(j)) {
##>message("0 a")
j <- ad2cpDefaultDataItem(x)
returnNumeric <- FALSE
jorig <- "(missing)"
##>message("'[[' is defaulting to '", j, "' type of data-record, since 'j' not specified", sep="")
} else {
jorig <- j
##>message("0 a. j='", j, "'")
## find out if numeric or raw, and clean 'j' of that flag once it is known
if (length(grep("numeric", j))) {
returnNumeric <- TRUE
j <- gsub("numeric", "", j)
##>message("0 b. j='", j, "'")
} else if (length(grep("raw", j))) {
returnNumeric <- FALSE
j <- gsub("raw", "", j)
##>message("0 c. j='", j, "'")
}
j <- gsub("[ :]+", "", j) # clean spaces or colons, if any
## Look up this name
##>message("0 d. j='", j, "'")
j <- ad2cpDefaultDataItem(x, j)
##>message("0 e. j='", j, "'")
}
##message("1. j = '", j, "'; jorig='", jorig, "'", sep="")
##numericMode <- 1 == length(grep("numeric", j))
##message("2. numericMode=", numericMode)
##j <- gsub("[: ]?numeric", "", j)
##message("3. j = '", j, "'", sep="")
#if (missing(j)) { # default to 'average', if it exists, or to 'burst' if that exists, or fail.
# j <- if (length(x@data$average)) "average" else if (length(x@data$burst))
# "burst" else stop("object's data slot does not contain either 'average' or 'burst'")
#}
##message("4. j = '", j, "'", sep="")
## Default to "average" if no j specified
if (1 == length(grep("^[ ]*$", j)))
j <- "average"
##>message("5. j = '", j, "'", sep="")
j <- ad2cpDefaultDataItem(x, j)
##>message("6. i='", i, "', j='", j, "', returnNumeric=", returnNumeric)
res <- x@data[[j]][[i]]
if (returnNumeric) {
##>message("6-a.")
dimres <- dim(res)
res <- as.numeric(res)
dim(res) <- dimres
}
##>message("7 res=", res)
res
} else {
if (!missing(j) && 1 == length(grep("numeric", j))) {
res <- x@data[[i]]
dim <- dim(res)
res <- as.numeric(res)
dim(res) <- dim
res
} else {
if (i %in% metadataNames) x@metadata[[i]] else x@data[[i]]
}
}
} else if (i %in% c("numberOfBeams", "numberOfCells")) {
##>message("asking for 'numberOfBeams' or 'numberOfCells'")
##message("AA i=", i)
if (ISAD2CP) {
j <- if (missing(j)) ad2cpDefaultDataItem(x) else ad2cpDefaultDataItem(x, j)
x@data[[j]][[i]]
} else {
x@metadata[[i]]
}
} else if (i == "transformationMatrix") {
##>message("0000")
if (ISAD2CP) {
##>message("AD2CP tm...")
theta <- x@metadata$beamAngle * atan2(1, 1) / 45
## The creation of a transformation matrix is covered in Section 5.3 of
## RD Instruments. ADCP Coordinate Transformation. RD Instruments, July 1998.
TMc <- 1 # for convex (diverging) beam setup; use -1 for concave
TMa <- 1 / (2 * sin(theta))
TMb <- 1 / (4 * cos(theta))
TMd <- TMa / sqrt(2)
rbind(c(TMc*TMa, -TMc*TMa, 0, 0),
c( 0, 0, -TMc*TMa, TMc*TMa),
c( TMb, TMb, TMb, TMb),
c( TMd, TMd, -TMd, -TMd))
} else {
## message("normal tm...")
x@metadata$transformationMatrix
}
} else if (i == "recordTypes") {
##>message("asking for 'recordTypes'")
## FIXME: _AD2CPrecordtype_ update if new record types added to read.adp.ad2cp()
if (ISAD2CP) {
allowed <- c("burst", "average", "bottomTrack", "interleavedBurst", "burstAltimeter",
"DVLBottomTrack", "echosounder", "waterTrack", "altimeter", "averageAltimeter", "text")
res <- allowed[allowed %in% names(x@data)]
} else {
res <- "depends on the data setup"
}
res
} else if (i == "va") {
##>message("asking for 'va'")
if (!"va" %in% names(x@data)) {
res <- NULL
} else {
if (!missing(j) && 1 == length(grep("numeric", j))) {
res <- x@data$va
dim <- dim(res)
res <- as.numeric(res)
dim(res) <- dim
} else {
res <- x@data$va
}
}
res
} else if (i == "vq") {
##>message("asking for 'vq'")
if (!"vq" %in% names(x@data)) {
res <- NULL
} else {
if (!missing(j) && 1 == length(grep("numeric", j))) {
res <- x@data$vq
dim <- dim(res)
res <- as.numeric(res)
dim(res) <- dim
} else {
res <- x@data$vq
}
}
res
} else if (i == "vg") {
##>message("asking for 'vg'")
if (!"vg" %in% names(x@data)) {
res <- NULL
} else {
if (!missing(j) && 1 == length(grep("numeric", j))) {
res <- x@data$vg
dim <- dim(res)
res <- as.numeric(res)
dim(res) <- dim
} else {
res <- x@data$vg
}
}
res
} else if (i == "vv") {
##>message("asking for 'vv'")
if (!"vv" %in% names(x@data)) {
res <- NULL
} else {
if (!missing(j) && 1 == length(grep("numeric", j))) {
res <- x@data$vv
dim <- dim(res)
res <- as.numeric(res)
dim(res) <- dim
} else {
res <- x@data$vv
}
}
res
} else {
callNextMethod() # [[
}
})
#' Replace Parts of an ADP Object
#'
#' In addition to the usual insertion of elements by name, note
#' that e.g. `pitch` gets stored into `pitchSlow`.
#'
#' @param x an [adp-class] object.
#'
#' @template sub_subsetTemplate
#'
#' @author Dan Kelley
#'
#' @family things related to adp data
setMethod(f="[[<-",
signature="adp",
definition=function(x, i, j, ..., value) {
## FIXME: use j for e.g. times
if (i %in% names(x@metadata)) {
x@metadata[[i]] <- value
} else if (i %in% names(x@data)) {
x@data[[i]] <- value
} else {
x <- callNextMethod(x=x, i=i, j=j, ...=..., value=value) # [[<-
}
## Not checking validity because user may want to shorten items one by one, and check validity later.
## validObject(x)
invisible(x)
})
setValidity("adp",
function(object) {
if (!("v" %in% names(object@data))) {
cat("object@data$v is missing")
return(FALSE)
}
if (!("a" %in% names(object@data))) {
cat("object@data$a is missing")
return(FALSE)
}
if (!("q" %in% names(object@data))) {
cat("object@data$q is missing")
return(FALSE)
}
mdim <- dim(object@data$v)
if ("a" %in% names(object@data) && !all.equal(mdim, dim(object@data$a))) {
cat("dimension of 'a' is (", dim(object@data$a), "), which does not match that of 'v' (", mdim, ")\n")
return(FALSE)
}
if ("q" %in% names(object@data) && !all.equal(mdim, dim(object@data$q))) {
cat("dimension of 'a' is (", dim(object@data$a), "), which does not match that of 'v' (", mdim, ")\n")
return(FALSE)
}
if ("time" %in% names(object@data)) {
n <- length(object@data$time)
for (item in c("pressure", "temperature", "salinity", "depth", "heading", "pitch", "roll")) {
if (item %in% names(object@data) && length(object@data[[item]]) != n) {
cat("length of time vector is ", n, " but the length of ", item, " is ",
length(object@data[[item]]), "\n")
return(FALSE)
}
}
return(TRUE)
}
})
#' Subset an ADP Object
#'
#' Subset an adp (acoustic Doppler profile) object, in a manner that is function
#' is somewhat analogous to [subset.data.frame()].
#'
#' For any data type,
#' subsetting can be by `time`, `ensembleNumber`, or `distance`.
#' These may not be combined, but it is easy to use a string of calls to
#' carry out combined operations, e.g.
#' `subset(subset(adp,distance<d0), time<t0)`
#'
#' For the special
#' case of AD2CP data (see [read.adp.ad2cp()]), it is possible to subset
#' to the "average" data records with `subset="average"`, to the
#' "burst" records with `subset="burst"`, or to the "interleavedBurst"
#' with `subset="interleavedBurst"`; note that no warning is issued,
#' if this leaves an object with no useful data.
#'
#' @param x an [adp-class] object.
#'
#' @param subset A condition to be applied to the `data` portion of
#' `x`. See \sQuote{Details}.
#'
#' @param ... Ignored.
#'
#' @return An [adp-class] object.
#'
#' @examples
#' library(oce)
#' data(adp)
#' # 1. Look at first part of time series, organized by time
#' earlyTime <- subset(adp, time < mean(range(adp[['time']])))
#' plot(earlyTime)
#'
#' # 2. Look at first ten ensembles (AKA profiles)
#' en <- adp[["ensembleNumber"]]
#' firstTen <- subset(adp, ensembleNumber < en[11])
#' plot(firstTen)
#'
#' @author Dan Kelley
#'
#' @family things related to adp data
#' @family functions that subset oce objects
setMethod(f="subset",
signature="adp",
definition=function(x, subset, ...) {
subsetString <- paste(deparse(substitute(subset)), collapse=" ")
res <- x
dots <- list(...)
debug <- getOption("oceDebug")
if (length(dots) && ("debug" %in% names(dots)))
debug <- dots$debug
if (missing(subset))
stop("must give 'subset'")
if (grepl("time", subsetString) || grepl("ensembleNumber", subsetString)) {
if (grepl("time", subsetString)) {
oceDebug(debug, "subsetting an adp by time\n")
if (length(grep("distance", subsetString)))
stop("cannot subset by both time and distance; split into multiple calls")
keep <- eval(substitute(subset), x@data, parent.frame(2))
} else if (grepl("ensembleNumber", subsetString)) {
oceDebug(debug, "subsetting an adp by ensembleNumber\n")
if (length(grep("distance", subsetString)))
stop("cannot subset by both ensembleNumber and distance; split into multiple calls")
if (!"ensembleNumber" %in% names(x@metadata))
stop("cannot subset by ensembleNumber because this adp object lacks that information")
keep <- eval(substitute(subset), x@metadata, parent.frame(2))
} else {
stop("internal coding error -- please report to developers")
}
names <- names(x@data)
haveDia <- "timeDia" %in% names
if (haveDia) {
subsetDiaString <- gsub("time", "timeDia", subsetString)
keepDia <- eval(parse(text=subsetDiaString), x@data)
oceDebug(debug, "for diagnostics, keeping ", 100*sum(keepDia) / length(keepDia), "% of data\n")
}
oceDebug(debug, vectorShow(keep, "keeping bins:"))
oceDebug(debug, "number of kept bins:", sum(keep), "\n")
if (sum(keep) < 2)
stop("must keep at least 2 profiles")
res <- x
## Update those metadata that have one value per ensemble
mnames <- names(x@metadata)
for (name in c("ensembleNumber", "orientation")) {
if (name %in% mnames)
res@metadata[[name]] <- x@metadata[[name]][keep]
}
## FIXME: check to see if we handling slow timescale data properly
for (name in names(x@data)) {
if (length(grep("Dia$", name))) {
if ("distance" == name)
next
if (name == "timeDia" || is.vector(x@data[[name]])) {
oceDebug(debug, "subsetting x@data$", name, ", which is a vector\n", sep="")
res@data[[name]] <- x@data[[name]][keepDia]
} else if (is.matrix(x@data[[name]])) {
oceDebug(debug, "subsetting x@data$", name, ", which is a matrix\n", sep="")
res@data[[name]] <- x@data[[name]][keepDia, ]
} else if (is.array(x@data[[name]])) {
oceDebug(debug, "subsetting x@data$", name, ", which is an array\n", sep="")
res@data[[name]] <- x@data[[name]][keepDia, , , drop=FALSE]
}
} else {
if (name == "time" || is.vector(x@data[[name]])) {
if ("distance" == name)
next
oceDebug(debug, "subsetting x@data$", name, ", which is a vector\n", sep="")
res@data[[name]] <- x@data[[name]][keep] # FIXME: what about fast/slow
} else if (is.matrix(x@data[[name]])) {
oceDebug(debug, "subsetting x@data$", name, ", which is a matrix\n", sep="")
res@data[[name]] <- x@data[[name]][keep, ]
} else if (is.array(x@data[[name]])) {
oceDebug(debug, "subsetting x@data$", name, ", which is an array\n", sep="")
res@data[[name]] <- x@data[[name]][keep, , , drop=FALSE]
}
}
}
if ("v" %in% names(x@metadata$flags)) {
dim <- dim(x@metadata$flags$v)
res@metadata$flags$v <- x@metadata$flags$v[keep, , , drop=FALSE]
oceDebug(debug, "subsetting flags$v original dim=",
paste(dim, collapse="x"), "; new dim=",
paste(dim(res@metadata$flags$v), collapse="x"))
}
} else if (length(grep("distance", subsetString))) {
oceDebug(debug, "subsetting an adp by distance\n")
if (length(grep("time", subsetString)))
stop("cannot subset by both time and distance; split into multiple calls")
keep <- eval(substitute(subset), x@data, parent.frame(2))
oceDebug(debug, vectorShow(keep, "keeping bins:"), "\n")
if (sum(keep) < 2)
stop("must keep at least 2 bins")
res <- x
res@data$distance <- x@data$distance[keep] # FIXME: broken for AD2CP
for (name in names(x@data)) {
if (name == "time")
next
if (is.array(x@data[[name]]) && 3 == length(dim(x@data[[name]]))) {
oceDebug(debug, "subsetting array data[[", name, "]] by distance\n")
oceDebug(debug, "before, dim(", name, ") =", dim(res@data[[name]]), "\n")
res@data[[name]] <- x@data[[name]][, keep, , drop=FALSE]
oceDebug(debug, "after, dim(", name, ") =", dim(res@data[[name]]), "\n")
}
}
oceDebug(debug, "names of flags: ", paste(names(x@metadata$flags), collapse=" "), "\n")
if ("v" %in% names(x@metadata$flags)) {
vdim <- dim(x@metadata$flags$v)
res@metadata$flags$v <- x@metadata$flags$v[, keep, , drop=FALSE]
oceDebug(debug, "subsetting flags$v original dim=",
paste(vdim, collapse="x"), "; new dim=",
paste(dim(res@metadata$flags$v), collapse="x"), "\n")
}
} else if (length(grep("pressure", subsetString))) {
keep <- eval(substitute(subset), x@data, parent.frame(2))
res <- x
res@data$v <- res@data$v[keep, , ]
res@data$a <- res@data$a[keep, , ]
res@data$q <- res@data$q[keep, , ]
res@data$time <- res@data$time[keep]
if ("v" %in% names(x@metadata$flags)) {
dim <- dim(x@metadata$flags$v)
res@metadata$flags$v <- x@metadata$flags$v[keep, , drop=FALSE]
oceDebug(debug, "subsetting flags$v original dim=",
paste(dim, collapse="x"), "; new dim=",
paste(dim(res@metadata$flags$v), collapse="x"))
}
## the items below may not be in the dataset
names <- names(res@data)
if ("bottomRange" %in% names) res@data$bottomRange <- res@data$bottomRange[keep, ]
if ("pressure" %in% names) res@data$pressure <- res@data$pressure[keep]
if ("temperature" %in% names) res@data$temperature <- res@data$temperature[keep]
if ("salinity" %in% names) res@data$salinity <- res@data$salinity[keep]
if ("depth" %in% names) res@data$depth <- res@data$depth[keep]
if ("heading" %in% names) res@data$heading <- res@data$heading[keep]
if ("pitch" %in% names) res@data$pitch <- res@data$pitch[keep]
if ("roll" %in% names) res@data$roll <- res@data$roll[keep]
} else if (length(grep("average", subsetString))) {
res@data$burst <- NULL
res@data$interleavedBurst <- NULL
} else if (length(grep("burst", subsetString))) {
res@data$average <- NULL
res@data$interleavedBurst <- NULL
} else if (length(grep("interleavedBurst", subsetString))) {
res@data$average <- NULL
res@data$burst <- NULL
} else {
stop('subset should be "distance", "time", "average", "burst", or "interleavedBurst"; "',
subsetString, '" is not permitted')
}
res@metadata$numberOfSamples <- dim(res@data$v)[1] # FIXME: handle AD2CP
res@metadata$numberOfCells <- dim(res@data$v)[2] # FIXME: handle AD2CP
res@processingLog <- processingLogAppend(res@processingLog, paste("subset.adp(x, subset=", subsetString, ")", sep=""))
res
})
#' Create an ADP Object
#'
#' @details
#' Construct an [adp-class] object. Only a basic
#' subset of the typical `data` slot is represented in the arguments
#' to this function, on the assumption that typical usage in reading data
#' is to set up a nearly-blank [adp-class] object, the `data`
#' slot of which is then inserted. However, in some testing situations it
#' can be useful to set up artificial `adp` objects, so the other
#' arguments may be useful.
#'
#' @param time of observations in POSIXct format
#'
#' @param distance to centre of bins
#'
#' @param v array of velocities, with first index for time, second for bin number, and third for beam number
#'
#' @param a amplitude, a [raw()] array with dimensions matching `u`
#'
#' @param q quality, a [raw()] array with dimensions matching `u`
#'
#' @param orientation a string indicating sensor orientation, e.g. `"upward"` and `"downward"`
#'
#' @param coordinate a string indicating the coordinate system, `"enu"`, `"beam"`, `"xy"`, or `"other"`
#'
#' @return An [adp-class] object.
#'
#' @examples
#' data(adp)
#' t <- adp[["time"]]
#' d <- adp[["distance"]]
#' v <- adp[["v"]]
#' a <- as.adp(time=t, distance=d, v=v)
#'\donttest{
#' plot(a)
#'}
#'
#' @author Dan Kelley
#'
#' @family things related to adp data
as.adp <- function(time, distance, v, a=NULL, q=NULL, orientation="upward", coordinate="enu")
{
res <- new("adp", time=time, distance=distance, v=v, a=a, q=q)
if (!missing(v)) {
res@metadata$numberOfBeams <- dim(v)[3] # FIXME: handle AD2CP
res@metadata$numberOfCells <- dim(v)[2] # FIXME: handle AD2CP
}
res@metadata$oceCoordinate <- coordinate # FIXME: handle AD2CP
res@metadata$orientation <- orientation # FIXME: handle AD2CP
res@metadata$cellSize <- if (missing(distance)) NA else diff(distance[1:2]) # FIXME: handle AD2CP
res@metadata$units <- list(v="m/s", distance="m")
res
}
## head.adp <- function(x, n=6L, ...)
## {
## numberOfProfiles <- dim(x[["v"]])[1]
## if (n < 0)
## look <- seq.int(max(1, (1 + numberOfProfiles + n)), numberOfProfiles)
## else
## look <- seq.int(1, min(n, numberOfProfiles))
## res <- x
## for (name in names(x@data)) {
## if ("distance" == name)
## next
## if (is.vector(x@data[[name]])) {
## res@data[[name]] <- x@data[[name]][look]
## } else if (is.matrix(x@data[[name]])) {
## res@data[[name]] <- x@data[[name]][look,]
## } else if (is.array(x@data[[name]])) {
## res@data[[name]] <- x@data[[name]][look,,]
## } else {
## res@data[[name]] <- x@data[[name]][look] # for reasons unknown, 'time' is not a vector
## }
## }
## res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
## res
## }
## tail.adp <- function(x, n = 6L, ...)
## {
## numberOfProfiles <- dim(x[["v"]])[1]
## if (n < 0)
## look <- seq.int(1, min(numberOfProfiles, numberOfProfiles + n))
## else
## look <- seq.int(max(1, (1 + numberOfProfiles - n)), numberOfProfiles)
## res <- x
## for (name in names(x@data)) {
## if (is.vector(x@data[[name]])) {
## res@data[[name]] <- x@data[[name]][look]
## } else if (is.matrix(x@data[[name]])) {
## res@data[[name]] <- x@data[[name]][look,]
## } else if (is.array(x@data[[name]])) {
## res@data[[name]] <- x@data[[name]][look,,]
## } else {
## res@data[[name]] <- x@data[[name]][look] # for reasons unknown, 'time' is not a vector
## }
## }
## res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
## res
## }
#' Get names of Acoustic-Doppler Beams
#'
#' @param x an [adp-class] object.
#'
#' @param which an integer indicating beam number.
#'
#' @return A character string containing a reasonable name for the beam, of the
#' form `"beam 1"`, etc., for beam coordinates, `"east"`, etc. for
#' enu coordinates, `"u"`, etc. for `"xyz"`, or `"u'"`, etc.,
#' for `"other"` coordinates. The coordinate system is determined
#' with `x[["coordinate"]]`.
#'
#' @author Dan Kelley
#'
#' @seealso This is used by [read.oce()].
#' @family things related to adp data
#' @family things related to adv data
beamName <- function(x, which)
{
bn <- x[["oceCoordinate"]]
if (bn == "beam") {
paste(gettext("beam", domain="R-oce"), 1:4)[which]
} else if (bn == "enu") {
c(gettext("east", domain="R-oce"),
gettext("north", domain="R-oce"),
gettext("up", domain="R-oce"),
gettext("error", domain="R-oce"))[which]
} else if (bn == "xyz") {
c("u", "v", "w", "e")[which]
} else if (bn == "other") {
c("u'", "v'", "w'", "e")[which]
} else {
" "
}
}
#' Read an ADP File
#'
#' Read an ADP data file, producing an [adp-class] object.
#'
#' Several file types can be handled. Some of
#' these functions are wrappers that map to device names, e.g.
#' `read.aquadoppProfiler` does its work by calling
#' `read.adp.nortek`; in this context, it is worth noting that the
#' ``aquadopp'' instrument is a one-cell profiler that might just as well have
#' been documented under the heading [read.adv()].
#'
#' @param manufacturer an optional character string indicating the manufacturer, used by
#' the general function `read.adp` to select a subsidiary function to use. If this
#' is not given, then [oceMagic()] is used to try to infer the type. If this
#' is provided, then the value `"rdi"` will cause [read.adp.rdi()]
#' to be used, `"nortek"` will cause [read.adp.nortek()] to be used,
#' and `"sontek"` will cause [read.adp.sontek()] to be used.
#'
#' @param despike if `TRUE`, [despike()] will be used to clean
#' anomalous spikes in heading, etc.
#' @template adpTemplate
#'
#' @author Dan Kelley and Clark Richards
#'
#' @family things related to adp data
read.adp <- function(file, from, to, by, tz=getOption("oceTz"),
longitude=NA, latitude=NA,
manufacturer,
monitor=FALSE, despike=FALSE, processingLog,
debug=getOption("oceDebug"),
...)
{
fromGiven <- !missing(from) # FIXME document THIS
toGiven <- !missing(to) # FIXME document THIS
byGiven <- !missing(by) # FIXME document THIS
oceDebug(debug, "read.adp(\"", file, "\"",
", from=", if (fromGiven) format(from) else "(missing)",
", to=", if (toGiven) format(to) else "(missing)",
", by=", if (byGiven) format(by) else "(missing)",
", manufacturer=\"", if (missing(manufacturer)) "(missing)" else manufacturer, "\", ...) {\n",
sep="", unindent=1)
if (!fromGiven)
from <- 1
if (!byGiven)
by <- 1
if (!toGiven)
to <- 0
if (is.character(file) && 0 == file.info(file)$size)
stop("empty file")
if (missing(manufacturer)) {
oceDebug(debug, "using read.oce() since 'manufacturer' argument is missing\n")
res <- read.oce(file=file, from=from, to=to, by=by, tz=tz,
longitude=longitude, latitude=latitude,
debug=debug-1, monitor=monitor, despike=despike,
...)
} else {
manufacturer <- pmatch(manufacturer, c("rdi", "nortek", "sontek"))
oceDebug(debug, "inferred manufacturer to be \"", manufacturer, "\"\n")
res <- if (manufacturer == "rdi") {
read.adp.rdi(file=file, from=from, to=to, by=by, tz=tz,
longitude=longitude, latitude=latitude,
debug=debug-1, monitor=monitor, despike=despike,
processingLog=processingLog, ...)
} else if (manufacturer == "nortek") {
read.adp.nortek(file=file, from=from, to=to, by=by, tz=tz,
longitude=longitude, latitude=latitude,
debug=debug-1, monitor=monitor, despike=despike,
processingLog=processingLog, ...)
} else if (manufacturer == "sontek") {
read.adp.sontek(file=file, from=from, to=to, by=by, tz=tz,
longitude=longitude, latitude=latitude,
debug=debug-1, monitor=monitor, despike=despike,
processingLog=processingLog, ...)
}
}
oceDebug(debug, "} # read.adp()\n", unindent=1)
res
}
#' Plot ADP Data
#'
#' Create a summary plot of data measured by an acoustic doppler profiler.
#'
#' The plot may have one or more panels, with the content being controlled by
#' the `which` argument.
#'
#' * `which=1:4` (or `which="u1"` to `"u4"`) yield a
#' distance-time image plot of a velocity component. If `x` is in
#' `beam` coordinates (signalled by
#' `metadata$oce.coordinate=="beam"`), this will be the beam velocity,
#' labelled `b[1]` etc. If `x` is in xyz coordinates (sometimes
#' called frame coordinates, or ship coordinates), it will be the velocity
#' component to the right of the frame or ship (labelled `u` etc).
#' Finally, if `x` is in `"enu"` coordinates, the image will show the
#' the eastward component (labelled `east`). If `x` is in
#' `"other"` coordinates, it will be component corresponding to east,
#' after rotation (labelled `u\'`). Note that the coordinate is set by
#' [read.adp()], or by [beamToXyzAdp()],
#' [xyzToEnuAdp()], or [enuToOtherAdp()].
#'
#' * `which=5:8` (or `which="a1"` to `"a4"`) yield
#' distance-time images of backscatter intensity of the respective beams. (For
#' data derived from Teledyne-RDI instruments, this is the item called ``echo
#' intensity.'')
#'
#' * `which=9:12` (or `which="q1"` to `"q4"`) yield
#' distance-time images of signal quality for the respective beams. (For RDI
#' data derived from instruments, this is the item called ``correlation
#' magnitude.'')
#'
#' * `which=60` or `which="map"` draw a map of location(s).
#'
#' * `which=70:73` (or `which="g1"` to `"g4"`) yield
#' distance-time images of percent-good for the respective beams. (For data
#' derived from Teledyne-RDI instruments, which are the only instruments that
#' yield this item, it is called ``percent good.'')
#'
#' * `which=80:83` (or `which="vv"`, `which="va"`,
#' `which="vq"`, and `which="vg"`) yield distance-time
#' images of the vertical beam fields for a 5 beam "SentinelV" ADCP
#' from Teledyne RDI.
#'
#' * `which="vertical"` yields a two panel distance-time
#' image of vertical beam velocity and amplitude.
#'
#' * `which=13` (or `which="salinity"`) yields a time-series plot
#' of salinity.
#'
#' * `which=14` (or `which="temperature"`) yields a time-series
#' plot of temperature.
#'
#' * `which=15` (or `which="pressure"`) yields a time-series plot
#' of pressure.
#'
#' * `which=16` (or `which="heading"`) yields a time-series plot
#' of instrument heading.
#'
#' * `which=17` (or `which="pitch"`) yields a time-series plot of
#' instrument pitch.
#'
#' * `which=18` (or `which="roll"`) yields a time-series plot of
#' instrument roll.
#'
#' * `which=19` yields a time-series plot of distance-averaged
#' velocity for beam 1, rightward velocity, eastward velocity, or
#' rotated-eastward velocity, depending on the coordinate system.
#'
#' * `which=20` yields a time-series of distance-averaged velocity for
#' beam 2, foreward velocity, northward velocity, or rotated-northward
#' velocity, depending on the coordinate system.
#'
#' * `which=21` yields a time-series of distance-averaged velocity for
#' beam 3, up-frame velocity, upward velocity, or rotated-upward velocity,
#' depending on the coordinate system.
#'
#' * `which=22` yields a time-series of distance-averaged velocity for
#' beam 4, for `beam` coordinates, or velocity estimate, for other
#' coordinates. (This is ignored for 3-beam data.)
#'
#' * `which="progressiveVector"` (or `which=23`) yields a progressive-vector diagram in the horizontal
#' plane, plotted with `asp=1`. Normally, the depth-averaged velocity
#' components are used, but if the `control` list contains an item named
#' `bin`, then the depth bin will be used (with an error resulting if the
#' bin is out of range).
#'
#' * `which=24` yields a time-averaged profile of the first component
#' of velocity (see `which=19` for the meaning of the component, in
#' various coordinate systems).
#'
#' * `which=25` as for 24, but the second component.
#'
#' * `which=26` as for 24, but the third component.
#'
#' * `which=27` as for 24, but the fourth component (if that makes
#' sense, for the given instrument).
#'
#' * `which=28` or `"uv"` yields velocity plot in the horizontal
#' plane, i.e. `u[2]` versus `u[1]`. If the number of data points is small, a
#' scattergraph is used, but if it is large, [smoothScatter()] is
#' used.
#'
#' * `which=29` or `"uv+ellipse"` as the `"uv"` case, but
#' with an added indication of the tidal ellipse, calculated from the eigen
#' vectors of the covariance matrix.
#'
#' * `which=30` or `"uv+ellipse+arrow"` as the
#' `"uv+ellipse"` case, but with an added arrow indicating the mean
#' current.
#'
#' * `which=40` or `"bottomRange"` for average bottom range from
#' all beams of the instrument.
#'
#' * `which=41` to `44` (or `"bottomRange1"` to
#' `"bottomRange4"`) for bottom range from beams 1 to 4.
#'
#' * `which=50` or `"bottomVelocity"` for average bottom velocity
#' from all beams of the instrument.
#'
#' * `which=51` to `54` (or `"bottomVelocity1"` to
#' `"bottomVelocity4"`) for bottom velocity from beams 1 to 4.
#'
#' * `which=55` (or `"heaving"`) for time-integrated,
#' depth-averaged, vertical velocity, i.e. a time series of heaving.
#'
#' * `which=100` (or `"soundSpeed"`) for a time series of sound speed.
#'
#'
#' In addition to the above, the following shortcuts are defined:
#'
#' * `which="velocity"` equivalent to `which=1:3` or `1:4`
#' (depending on the device) for velocity components.
#'
#' * `which="amplitude"` equivalent to `which=5:7`
#' or `5:8` (depending on the device) for backscatter intensity
#' components.
#'
#' * `which="quality"` equivalent to `which=9:11` or `9:12`
#' (depending on the device) for quality components.
#'
#' * `which="hydrography"` equivalent to `which=14:15`
#' for temperature and pressure.
#'
#' * `which="angles"` equivalent to `which=16:18` for
#' heading, pitch and roll.
#'
#' The color scheme for image plots (`which` in 1:12) is provided by the
#' `col` argument, which is passed to [image()] to do the actual
#' plotting. See \dQuote{Examples} for some comparisons.
#'
#' A common quick-look plot to assess mooring movement is to use
#' `which=15:18` (pressure being included to signal the tide, and tidal
#' currents may dislodge a mooring or cause it to settle).
#'
#' By default, `plot,adp-method` uses a `zlim` value for the
#' [image()] that is constructed to contain all the data, but to be
#' symmetric about zero. This is done on a per-panel basis, and the scale is
#' plotted at the top-right corner, along with the name of the variable being
#' plotted. You may also supply `zlim` as one of the \dots{} arguments,
#' but be aware that a reasonable limit on horizontal velocity components is
#' unlikely to be of much use for the vertical component.
#'
#' A good first step in the analysis of measurements made from a moored device
#' (stored in `d`, say) is to do `plot(d, which=14:18)`. This shows
#' time series of water properties and sensor orientation, which is helpful in
#' deciding which data to trim at the start and end of the deployment, because
#' they were measured on the dock or on the ship as it travelled to the mooring
#' site.
#'
#' @param x an [adp-class] object.
#'
#' @param which list of desired plot types. These are graphed in panels
#' running down from the top of the page. If `which` is not given,
#' the plot will show images of the distance-time dependence of velocity
#' for each beam. See \dQuote{Details} for the meanings of various values of `which`.
#'
#' @param j optional string specifying a sub-class of `which`. For
#' Nortek Aquadopp profilers, this may either be `"default"` (or missing)
#' to get the main signal, or `"diagnostic"` to get a diagnostic
#' signal. For Nortek AD2CP profiles, this may be any one of
#' `"average"` (or missing) for averaged data, `"burst"`
#' for burst data, or `"interleaved burst"` for interleaved burst data;
#' more data types are provided by that instrument, and may be added here
#' at some future time.
#'
#' @param col optional indication of color(s) to use. If not provided, the
#' default for images is `oce.colorsPalette(128,1)`, and for lines and
#' points is black.
#'
#' @param breaks optional breaks for color scheme
#'
#' @param zlim a range to be used as the `zlim` parameter to the
#' [imagep()] call that is used to create the image. If omitted,
#' `zlim` is set for each panel individually, to encompass the data of the
#' panel and to be centred around zero. If provided as a two-element vector,
#' then that is used for each panel. If provided as a two-column matrix, then
#' each panel of the graph uses the corresponding row of the matrix; for
#' example, setting `zlim=rbind(c(-1,1),c(-1,1),c(-.1,.1))` might make
#' sense for `which=1:3`, so that the two horizontal velocities have one
#' scale, and the smaller vertical velocity has another.
#'
#' @param titles optional vector of character strings to be used as labels for
#' the plot panels. For images, these strings will be placed in the right hand
#' side of the top margin. For timeseries, these strings are ignored. If this
#' is provided, its length must equal that of `which`.
#'
#' @param lwd if the plot is of a time-series or scattergraph format with
#' lines, this is used in the usual way; otherwise, e.g. for image formats,
#' this is ignored.
#'
#' @param type if the plot is of a time-series or scattergraph format, this is
#' used in the usual way, e.g. `"l"` for lines, etc.; otherwise, as for
#' image formats, this is ignored.
#'
#' @param ytype character string controlling the type of the y axis for images
#' (ignored for time series). If `"distance"`, then the y axis will be
#' distance from the sensor head, with smaller distances nearer the bottom of
#' the graph. If `"profile"`, then this will still be true for
#' upward-looking instruments, but the y axis will be flipped for
#' downward-looking instruments, so that in either case, the top of the graph
#' will represent the sample nearest the sea surface.
#'
#' @param drawTimeRange boolean that applies to panels with time as the
#' horizontal axis, indicating whether to draw the time range in the top-left
#' margin of the plot.
#'
#' @param useSmoothScatter boolean that indicates whether to use
#' [smoothScatter()] in various plots, such as `which="uv"`. If
#' not provided a default is used, with [smoothScatter()] being used
#' if there are more than 2000 points to plot.
#'
#' @param missingColor color used to indicate `NA` values in images (see
#' [imagep()]); set to `NULL` to avoid this indication.
#'
#' @template mgpTemplate
#'
#' @template marTemplate
#'
#' @param mai.palette margins, in inches, to be added to those calculated for
#' the palette; alter from the default only with caution
#'
#' @param tformat optional argument passed to [oce.plot.ts()], for
#' plot types that call that function. (See [strptime()] for the
#' format used.)
#'
#' @param marginsAsImage boolean, `TRUE` to put a wide margin to the right
#' of time-series plots, even if there are no images in the `which` list.
#' (The margin is made wide if there are some images in the sequence.)
#'
#' @param cex numeric character expansion factor for plot symbols; see [par()].
#'
#' @param cex.axis,cex.lab character expansion factors for axis numbers and axis names; see [par()].
#'
#' @param xlim optional 2-element list for `xlim`, or 2-column matrix, in
#' which case the rows are used, in order, for the panels of the graph.
#'
#' @param ylim optional 2-element list for `ylim`, or 2-column matrix, in
#' which case the rows are used, in order, for the panels of the graph.
#'
#' @param control optional list of parameters that may be used for different
#' plot types. Possibilities are `drawBottom` (a boolean that indicates
#' whether to draw the bottom) and `bin` (a numeric giving the index of
#' the bin on which to act, as explained in \dQuote{Details}).
#'
#' @param useLayout set to `FALSE` to prevent using [layout()]
#' to set up the plot. This is needed if the call is to be part of a sequence
#' set up by e.g. `par(mfrow)`.
#'
#' @param coastline a `coastline` object, or a character string naming
#' one. This is used only for `which="map"`. See notes at
#' [plot,ctd-method()] for more information on built-in coastlines.
#'
#' @param span approximate span of map in km
#'
#' @param main main title for plot, used just on the top panel, if there are
#' several panels.
#'
#' @param grid if `TRUE`, a grid will be drawn for each panel. (This
#' argument is needed, because calling [grid()] after doing a
#' sequence of plots will not result in useful results for the individual
#' panels.
#'
#' @param grid.col color of grid
#'
#' @param grid.lty line type of grid
#'
#' @param grid.lwd line width of grid
#' @template debugTemplate
#'
#' @param \dots optional arguments passed to plotting functions. For example,
#' supplying `despike=TRUE` will cause time-series panels to be de-spiked
#' with [despike()]. Another common action is to set the color for
#' missing values on image plots, with the argument `missingColor` (see
#' [imagep()]). Note that it is an error to give `breaks` in
#' \dots{}, if the formal argument `zlim` was also given, because they
#' could contradict each other.
#'
#' @return A list is silently returned, containing `xat` and `yat`,
#' values that can be used by [oce.grid()] to add a grid to the plot.
#'
#' @examples
#' library(oce)
#' data(adp)
#' plot(adp, which=1:3)
#' plot(adp, which='temperature', tformat='%H:%M')
#'
#' @author Dan Kelley
#'
#' @family functions that plot oce data
#' @family things related to adp data
#'
#' @aliases plot.adp
## DEVELOPER NOTE: update first test in tests/testthat/test_adp.R if a new 'which' is handled
setMethod(f="plot",
signature=signature("adp"),
definition=function(x, which, j,
col, breaks, zlim,
titles,
lwd=par('lwd'),
type='l',
ytype=c("profile", "distance"),
drawTimeRange=getOption("oceDrawTimeRange"),
useSmoothScatter,
missingColor="gray",
mgp=getOption("oceMgp"),
mar=c(mgp[1]+1.5, mgp[1]+1.5, 1.5, 1.5),
mai.palette=rep(0, 4),
tformat,
marginsAsImage=FALSE,
cex=par("cex"), cex.axis=par("cex.axis"), cex.lab=par("cex.lab"),
xlim, ylim,
control,
useLayout=FALSE,
coastline="coastlineWorld", span=300,
main="",
grid=FALSE, grid.col='darkgray', grid.lty='dotted', grid.lwd=1,
debug=getOption("oceDebug"),
...)
{
debug <- max(0, min(debug, 4))
oceDebug(debug, "plot,adp-method(x, ",
argShow(mar),
"\n", sep="", unindent=1, style="bold")
oceDebug(debug, " ",
argShow(mgp),
"\n", sep="", unindent=1, style="bold")
oceDebug(debug, " ",
argShow(which),
"\n", sep="", unindent=1, style="bold")
oceDebug(debug, " ",
argShow(cex),
argShow(cex.axis),
argShow(cex.lab),
"\n", sep="", unindent=1, style="bold")
oceDebug(debug, " ",
argShow(breaks),
argShow(j),
"...) {\n", sep="", unindent=1, style="bold")
## oceDebug(debug, "par(mar)=", paste(par('mar'), collapse=" "), "\n")
## oceDebug(debug, "par(mai)=", paste(par('mai'), collapse=" "), "\n")
## oceDebug(debug, "par(mfg)=", paste(par('mfg'), collapse=" "), "\n")
## oceDebug(debug, "mai.palette=", paste(mai.palette, collapse=" "), "\n")
if ("adorn" %in% names(list(...)))
warning("In plot,adp-method() : the 'adorn' argument was removed in November 2017", call.=FALSE)
instrumentType <- x[["instrumentType"]]
if (is.null(instrumentType))
instrumentType <- "" # simplifies later checks
oceDebug(debug, "instrumentType=\"", instrumentType, "\"\n", sep="")
## interpret mode, j
if (missing(j))
j <- ""
if (instrumentType == "aquadopp") {
if (!missing(j) && j == "diagnostic") {
if (x[["numberOfCells"]] != 1) {
warning("This object claims to be Nortek Aquadopp, but there is more than 1 cell, so it must not be; so j=\"diagnostic\" is being ignored")
j <- 'normal'
}
if (!("timeDia" %in% names(x@data))) {
warning("This instrument did not record Diagnostic data, so j=\"diagnostic\" is being ignored")
j <- "normal"
}
}
} else if (instrumentType == "AD2CP") {
jOrig <- j
j <- ad2cpDefaultDataItem(x, j)
if (j != jOrig)
oceDebug(debug, "given the object contents, 'j' was changed from \"", jOrig, "\" to \"", j, "\", for this Nortek AD2CP instrument\n", sep="")
}
if (missing(which)) {
## Note that j is ignored for e.g. RDI adp.
which <- 1:dim(x[["v", j]])[3]
oceDebug(debug, "setting which=c(", paste(which, collapse=","), "), based on the data\n", sep="")
}
colGiven <- !missing(col)
breaksGiven <- !missing(breaks)
zlimGiven <- !missing(zlim)
if (breaksGiven && zlimGiven)
stop("cannot supply both zlim and breaks")
ylimGiven <- !missing(ylim)
oceDebug(debug, 'ylimGiven=', ylimGiven, '\n')
res <- list(xat=NULL, yat=NULL)
if (ylimGiven)
oceDebug(debug, "ylim=c(", paste(ylim, collapse=", "), ")\n")
if (!inherits(x, "adp"))
stop("method is only for objects of class '", "adp", "'")
if (!(is.null(x@metadata$haveActualData) || x@metadata$haveActualData))
stop("In plot,adp-method() : there are no profiles in this dataset", call.=FALSE)
opar <- par(no.readonly = TRUE)
nw <- length(which)
fac <- if (nw < 3) 1 else 0.66 # try to emulate par(mfrow)
## par(cex=cex*fac, cex.axis=fac*cex.axis, cex.lab=fac*cex.lab) # BUILD-TEST FAILURE
## par(cex=cex*fac) # OK
oceDebug(debug, "adp.R:1759 cex=", cex, ", original par('cex')=", par('cex'), style="blue")
##par(cex=cex*fac, cex.axis=fac*cex.axis) # OK
par(cex.axis=fac*cex.axis, cex.lab=fac*cex.lab) # OK
oceDebug(debug, "adp.R:1761 ... after par() call, have par('cex')=", par('cex'), style="blue")
rm(fac)
numberOfBeams <- x[["numberOfBeams", j]]
oceDebug(debug, "numberOfBeams=", numberOfBeams, " (note: j=\"", j, "\")\n", sep="")
numberOfCells <- x[["numberOfCells", j]]
oceDebug(debug, "numberOfCells=", numberOfCells, " (note: j=\"", j, "\")\n", sep="")
if (nw == 1) {
pm <- pmatch(which, c("velocity", "amplitude", "quality", "hydrography", "angles"))
## FIXME: decide what to do about 5-beam ADCPs
if (!is.na(pm)) {
if (pm == 1)
which <- 0 + seq(1, min(4, numberOfBeams)) # 5th beam not included
else if (pm == 2)
which <- 4 + seq(1, min(4, numberOfBeams)) # 5th beam not included
else if (pm == 3)
which <- 8 + seq(1, min(4, numberOfBeams)) # 5th beam not included
else if (pm == 4)
which <- 14:15
else if (pm == 5)
which <- 16:18
nw <- length(which)
}
}
if (!missing(titles) && length(titles) != nw)
stop("length of 'titles' must equal length of 'which'")
if (nw > 1)
on.exit(par(opar))
if (is.numeric(which)) {
whichFraction <- which - floor(which)
which <- floor(which)
} else {
whichFraction <- rep(0, length(which))
}
par(mgp=mgp, mar=mar, cex=cex)
dots <- list(...)
ytype <- match.arg(ytype)
## user may specify a matrix for xlim and ylim
if (ylimGiven) {
if (is.matrix(ylim)) {
if (dim(ylim)[2] != nw) {
ylim2 <- matrix(ylim, ncol=2, nrow=nw, byrow=TRUE) # FIXME: is this what I want?
}
} else {
ylim2 <- matrix(ylim, ncol=2, nrow=nw, byrow=TRUE) # FIXME: is this what I want?
}
class(ylim2) <- class(ylim)
ylim <- ylim2
}
xlimGiven <- !missing(xlim)
if (xlimGiven) {
if (is.matrix(xlim)) {
if (dim(xlim)[2] != nw) {
xlim2 <- matrix(xlim, ncol=2, nrow=nw) # FIXME: is this what I want?
}
} else {
if (length(xlim) != 2)
stop("xlim must be a vector of length 2, or a 2-column matrix")
xlim2 <- matrix(xlim[1:2], ncol=2, nrow=nw, byrow=TRUE)
}
class(xlim2) <- class(xlim)
attr(xlim2, "tzone") <- attr(xlim, "tzone")
xlim <- xlim2
}
if (missing(zlim)) {
zlimGiven <- FALSE
zlimAsGiven <- NULL
} else {
zlimGiven <- TRUE
if (is.vector(zlim)) {
if (length(zlim) == 2) {
zlimAsGiven <- matrix(rep(zlim, length(which)), ncol=2, byrow=TRUE)
} else {
stop("zlim must be a vector of length 2, or a matrix with 2 columns")
}
} else {
## FIXME: should this be made into a matrix?
zlimAsGiven <- zlim
}
}
ylimAsGiven <- if (ylimGiven) ylim else NULL
if (missing(lwd))
lwd <- rep(par('lwd'), length.out=nw)
else
lwd <- rep(lwd, length.out=nw)
if (missing(main))
main <- rep('', length.out=nw)
else
main <- rep(main, length.out=nw)
## oceDebug(debug, "later on in plot,adp-method:\n")
## oceDebug(debug, " par(mar)=", paste(par('mar'), collapse=" "), "\n")
## oceDebug(debug, " par(mai)=", paste(par('mai'), collapse=" "), "\n")
oceDebug(debug, "which:", which, "\n")
whichOrig <- which
which <- oce.pmatch(which,
list(u1=1, u2=2, u3=3, u4=4,
a1=5, a2=6, a3=7, a4=8,
q1=9, q2=10, q3=11, q4=12,
g1=70, g2=71, g3=72, g4=73,
salinity=13,
temperature=14,
pressure=15,
heading=16,
pitch=17,
roll=18,
progressiveVector=23,
uv=28,
"uv+ellipse"=29,
"uv+ellipse+arrow"=30,
bottomRange=40,
bottomRange1=41, bottomRange2=42, bottomRange3=43, bottomRange4=44,
bottomVelocity=50,
bottomVelocity1=51, bottomVelocity2=52, bottomVelocity3=53, bottomVelocity4=54,
heaving=55,
map=60,
soundSpeed=100,
velocity=1:3,
amplitude=5:7,
quality=9:11,
hydrography=14:15,
angles=16:18,
vertical=80:81,
vv=80, va=81, vq=82, vg=83))
nw <- length(which) # may be longer with e.g. which='velocity'
if (any(is.na(which)))
stop("plot,adp-method(): unrecognized 'which' code: ", paste(whichOrig[is.na(which)], collapse=" "),
call.=FALSE)
oceDebug(debug, "which:", which, "(after conversion to numerical codes)\n")
## FIXME: delete this comment-block after key plot types are checked.
## I had this as a test, in early Nov 2018. But now, I prefer
##OLD if ("instrumentType" %in% names(x@metadata) && !is.null(x@metadata$instrumentType) && x@metadata$instrumentType == "AD2CP") {
##OLD if (!all(which %in% 1:4))
##OLD warning("In plot,adp-method() : only 'which' <5 has been tested", call.=FALSE)
##OLD }
images <- c(1:12, 70:73, 80:83)
timeseries <- c(13:22, 40:44, 50:54, 55, 100)
spatial <- 23:27
#speed <- 28
tt <- x[["time", j]]
##ttDia <- x@data$timeDia # may be null
class(tt) <- "POSIXct" # otherwise image() gives warnings
if (!zlimGiven && all(which %in% 5:8)) {
## single scale for all 'a' (amplitude) data
zlim <- range(abs(as.numeric(x[["a"]][, , which[1]-4])), na.rm=TRUE) # FIXME name of item missing, was ma
if (length(which) > 1) {
for (w in 2:length(which)) {
zlim <- range(abs(c(zlim, x[["a"]][, , which[w]-4])), na.rm=TRUE) # FIXME: check name
}
}
}
##oceDebug(debug, "useLayout=", useLayout, "\n")
showBottom <- ("bottomRange" %in% names(x@data)) && !missing(control) && !is.null(control["drawBottom"])
if (showBottom)
bottom <- apply(x@data$bottomRange, 1, mean, na.rm=TRUE)
oceDebug(debug, "showBottom=", showBottom, "\n")
oceDebug(debug, "cex=", cex, ", par('cex')=", par('cex'), style="blue")
if (useLayout) {
if (any(which %in% images) || marginsAsImage) {
w <- 1.5
lay <- layout(matrix(1:(2*nw), nrow=nw, byrow=TRUE), widths=rep(c(1, lcm(w)), nw))
oceDebug(debug, "calling layout(matrix...)\n")
oceDebug(debug, "using layout, since this is an image, or has marginsAsImage\n")
} else {
if (nw != 1 || which != 23) {
lay <- layout(cbind(1:nw))
oceDebug(debug, "calling layout(cbind(1:", nw, ")\n")
oceDebug(debug, "using layout\n")
}
}
} else {
if (nw > 1) {
par(mfrow=c(nw, 1))
oceDebug(debug, "calling par(mfrow=c(", nw, ", 1)\n")
}
}
flipy <- ytype == "profile" && x@metadata$orientation[1] == "downward"
##message("numberOfBeams=", numberOfBeams)
##message("numberOfCells=", numberOfCells)
haveTimeImages <- any(which %in% images) && 1 < numberOfCells
oceDebug(debug, 'haveTimeImages=', haveTimeImages, '(if TRUE, it means any timeseries graphs get padding on RHS)\n')
par(mar=mar, mgp=mgp)
if (haveTimeImages) {
oceDebug(debug, "setting up margin spacing before plotting\n", style="italic")
oceDebug(debug, " before: ", vectorShow(par("mar")), unindent=1, style="blue")
## Since zlim not given, this just does calculations
drawPalette(#cex.axis=cex * (1 - min(nw / 8, 1/4)),
debug=debug-1)
oceDebug(debug, " after: ", vectorShow(par("mar")), unindent=1, style="blue")
}
omar <- par("mar")
oceDebug(debug, vectorShow(omar), style="red")
##oceDebug(debug, "drawTimeRange=", drawTimeRange, "\n", sep="")
oceDebug(debug, "cex=", cex, ", par('cex')=", par('cex'), style="blue")
for (w in 1:nw) {
oceDebug(debug, "plot,adp-method top of loop (before setting par('mar'))\n", style="italic")
oceDebug(debug, vectorShow(par("mar")), style="blue")
oceDebug(debug, vectorShow(par("mai")), style="blue")
oceDebug(debug, vectorShow(omar), style="blue")
par(mar=omar) # ensures all panels start with original mar
oceDebug(debug, "which[", w, "]=", which[w], "\n", sep="", style="red")
if (which[w] %in% images) {
## image types
skip <- FALSE
numberOfBeams <- x[["numberOfBeams", j]]
v <- x[["v"]]
if (which[w] %in% 1:4) {
## velocity
if (instrumentType == "aquadopp" && j == "diagnostic") {
oceDebug(debug, "a diagnostic velocity component image/timeseries\n")
z <- x@data$vDia[, , which[w]]
zlab <- if (missing(titles)) paste(beamName(x, which[w]), "Dia", sep="") else titles[w]
xdistance <- x[["distance", j]]
oceDebug(debug, vectorShow(xdistance))
y.look <- if (ylimGiven) (ylimAsGiven[w, 1] <= xdistance & xdistance <= ylimAsGiven[w, 2]) else rep(TRUE, length(xdistance))
zlim <- if (zlimGiven) zlimAsGiven[w, ] else
max(abs(x@data$vDia[, y.look, which[w]]), na.rm=TRUE) * c(-1, 1)
} else {
oceDebug(debug, "a velocity component image/timeseries\n")
z <- x[["v", j]][, , which[w]]
zlab <- if (missing(titles)) beamName(x, which[w]) else titles[w]
oceDebug(debug, "zlab:", zlab, "\n")
xdistance <- x[["distance", j]]
oceDebug(debug, vectorShow(xdistance))
y.look <- if (ylimGiven) ylimAsGiven[w, 1] <= xdistance & xdistance <= ylimAsGiven[w, 2] else rep(TRUE, length(xdistance))
oceDebug(debug, vectorShow(y.look))
if (0 == sum(y.look))
stop("no data in the provided ylim=c(", paste(ylimAsGiven[w, ], collapse=","), ")")
zlim <- if (zlimGiven) zlimAsGiven[w, ] else {
if (breaksGiven) NULL else max(abs(z[, y.look]), na.rm=TRUE) * c(-1, 1)
}
oceDebug(debug, "zlim: ", paste(zlim, collapse=" "), "\n")
}
oceDebug(debug, "flipy =", flipy, "\n")
} else if (which[w] %in% 5:8) {
oceDebug(debug, "which[", w, "]=", which[w], "; this is some type of amplitude\n", sep="")
## amplitude
if (j == "diagnostic" && "aDia" %in% names(x@data)) {
oceDebug(debug, "a diagnostic amplitude component image/timeseries\n")
z <- x[["aDia", "numeric"]][, , which[w]-4]
xdistance <- x[["distance", j]]
oceDebug(debug, vectorShow(xdistance))
y.look <- if (ylimGiven) ylimAsGiven[1] <= xdistance & xdistance <= ylimAsGiven[2] else rep(TRUE, length(xdistance))
oceDebug(debug, vectorShow(y.look))
zlim <- if (zlimGiven) zlimAsGiven[w, ] else {
if (breaksGiven) NULL else range(z[, y.look], na.rm=TRUE)
}
zlab <- c(expression(aDia[1]), expression(a[2]), expression(aDia[3]), expression(aDia[4]))[which[w]-4]
} else {
oceDebug(debug, "an amplitude component image/timeseries\n")
a <- x[["a", paste(j, "numeric")]]
z <- a[, , which[w]-4]
dim(z) <- dim(a)[1:2]
oceDebug(debug, "accessed data, of dim=", paste(dim(z), collapse="x"), "\n")
##OLD dim(z) <- dim(x@data$a)[1:2] # FIXME: why was this here?
xdistance <- x[["distance", j]]
oceDebug(debug, vectorShow(xdistance))
y.look <- if (ylimGiven) ylimAsGiven[1] <= xdistance & xdistance <= ylimAsGiven[2] else rep(TRUE, length(xdistance))
oceDebug(debug, vectorShow(y.look))
zlim <- if (zlimGiven) zlimAsGiven[w, ] else {
if (breaksGiven) NULL else range(as.numeric(z[, y.look]), na.rm=TRUE)
}
oceDebug(debug, "zlim: ", paste(zlim, collapse=" "), "\n")
zlab <- c(expression(a[1]), expression(a[2]), expression(a[3]), expression(a[4]))[which[w]-4]
oceDebug(debug, "zlab: '", as.character(zlab), "'\n")
}
} else if (which[w] %in% 9:12) {
oceDebug(debug, " which[",w,"]=",which[w],": quality or correlation\n",sep="")
## correlation, or quality. First, try 'q', then 'amp'
q <- x[["q", paste(j, "numeric")]]
if (!is.null(q)) {
oceDebug(debug, "[['q']] works for this object\n")
z <- q[, , which[w]-8]
dim(z) <- dim(q)[1:2]
rm(q)
zlim <- c(0, 256)
zlab <- c(expression(q[1]), expression(q[2]), expression(q[3]))[which[w]-8]
} else {
amp <- x[["amp"]]
if (!is.null(amp)) {
oceDebug(debug, "[['amp']] works for this object\n")
z <- amp[, , which[w]-8]
dim(z) <- dim(amp)[1:2]
rm(amp)
zlim <- c(0, max(z, na.rm=TRUE))
zlab <- c(expression(amp[1]), expression(amp[2]), expression(amp[3]))[which[w]-8]
} else {
stop("In plot,adp-method() : ADP object lacks both 'q' and 'amp' data items", call.=FALSE)
}
}
} else if (which[w] %in% 70:(69+x[["numberOfBeams"]])) {
## correlation
xg <- x[["g", paste(j, "numeric")]]
if (!is.null(xg)) {
z <- as.numeric(xg[, , which[w]-69])
dim(z) <- dim(xg)[1:2]
rm(xg)
zlim <- c(0, 100)
zlab <- c(expression(g[1]), expression(g[2]), expression(g[3]))[which[w]-8]
} else {
stop("In plot,adp-method() : ADP object lacks a 'g' data item", call.=FALSE)
}
} else if (which[w] == 80) {
## vertical beam velocity
z <- x[["vv", j]]
if (!is.null(z)) {
oceDebug(debug, "vertical beam velocity\n")
zlab <- if (missing(titles)) expression(w[vert]) else titles[w]
xdistance <- x[["distance", j]]
y.look <- if (ylimGiven) ylimAsGiven[w, 1] <= xdistance & xdistance <= ylimAsGiven[w, 2] else rep(TRUE, length(xdistance))
if (0 == sum(y.look))
stop("no data in the provided ylim=c(", paste(ylimAsGiven[w, ], collapse=","), ")")
zlim <- if (zlimGiven) zlimAsGiven[w, ] else {
if (breaksGiven) NULL else c(-1, 1)
}
} else {
stop("In plot,adp-method() : ADP object lacks a 'vv' data item, so which=80 and which=\"vv\" cannot work", call.=FALSE)
}
} else if (which[w] == 81) {
## vertical beam amplitude
z <- x[["va", paste(j, "numeric")]]
if (!is.null(z)) {
oceDebug(debug, "vertical beam amplitude\n")
xdistance <- x[["distance", j]]
y.look <- if (ylimGiven) ylimAsGiven[1] <= xdistance & xdistance <= ylimAsGiven[2] else rep(TRUE, length(xdistance))
zlim <- if (zlimGiven) zlimAsGiven[w, ] else {
if (breaksGiven) NULL else range(as.numeric(x@data$va[, y.look]), na.rm=TRUE)
}
zlab <- expression(a[vert])
} else {
stop("In plot,adp-method() : ADP object lacks a 'va' data item, so which=81 and which=\"va\" cannot work", call.=FALSE)
}
} else if (which[w] == 82) {
## vertical beam correlation
z <- x[["vq", paste(j, "numeric")]]
if (!is.null(z)) {
oceDebug(debug, "vertical beam correlation\n")
xdistance <- x[["distance", j]]
y.look <- if (ylimGiven) ylimAsGiven[1] <= xdistance & xdistance <= ylimAsGiven[2] else rep(TRUE, length(xdistance))
zlim <- if (zlimGiven) zlimAsGiven[w, ] else {
if (breaksGiven) NULL else range(as.numeric(x@data$vq[, y.look]), na.rm=TRUE)
}
zlab <- expression(q[vert])
} else {
stop("In plot,adp-method() : ADP object lacks a 'vq' data item, so which=82 and which=\"vq\" cannot work", call.=FALSE)
}
} else if (which[w] == 83) {
## vertical beam percent good
z <- x[["vg", paste(j, "numeric")]]
if (!is.null(z)) {
oceDebug(debug, "vertical beam percent good\n")
xdistance <- x[["distance", j]]
y.look <- if (ylimGiven) ylimAsGiven[1] <= xdistance & xdistance <= ylimAsGiven[2] else rep(TRUE, length(xdistance))
zlim <- if (zlimGiven) zlimAsGiven[w, ] else {
if (breaksGiven) NULL else range(x[["vg", "numeric"]][, y.look], na.rm=TRUE)
}
zlab <- expression(g[vert])
} else {
stop("In plot,adp-method() : ADP object lacks a 'vg' data item, so which=83 and which=\"vg\" cannot work", call.=FALSE)
}
} else {
skip <- TRUE
}
if (!skip) {
if (numberOfCells > 1) {
if (xlimGiven) {
oceDebug(debug, "about to call imagep() with xlim given: par('cex')=", par("cex"), ", cex=", cex, style="blue")
oceDebug(debug, "xlimGiven case\n")
ats <- imagep(x=tt, y=x[["distance", j]], z=z,
xlim=xlim[w, ],
zlim=zlim,
flipy=flipy,
col=if (colGiven) col else {
if (missing(breaks)) oce.colorsPalette(128, 1)
else oce.colorsPalette(length(breaks)-1, 1)
},
breaks=breaks,
ylab=resizableLabel("distance km"),
xlab="Time",
zlab=zlab,
tformat=tformat,
drawTimeRange=drawTimeRange,
drawContours=FALSE,
missingColor=missingColor,
mgp=mgp,
mar=omar,
mai.palette=mai.palette,
cex=1,
main=main[w],
debug=debug-1,
...)
} else {
oceDebug(debug, "about to call imagep() with no xlim. cex=", cex, ", par('cex')=", par("cex"), ", par('cex.axis')=", par("cex.axis"), style="blue")
oceDebug(debug, "about to do an image plot with no xlim given, with cex=", cex, ", par(\"cex\")=", par("cex"), ", nw=", nw, ", cex sent to oce.plots=", cex*(1-min(nw/8, 1/4)), "\n")
oceDebug(debug, " with par('mar')=c(", paste(par('mar'),collapse=","), ", mar=c(", paste(mar,collapse=","), ") and mgp=c(",paste(mgp,collapse=","),")", "\n")
oceDebug(debug, " with time[1]=", format(tt[[1]], "%Y-%m-%d %H:%M:%S"), "\n")
ats <- imagep(x=tt, y=x[["distance", j]], z=z,
zlim=zlim,
flipy=flipy,
ylim=if (ylimGiven) ylim[w, ] else range(x[["distance", j]], na.rm=TRUE),
col=if (colGiven) col else { if (missing(breaks)) oce.colorsPalette(128, 1) else oce.colorsPalette(length(breaks)-1, 1) },
breaks=breaks,
ylab=resizableLabel("distance"),
xaxs="i",
xlab="Time",
zlab=zlab,
tformat=tformat,
drawTimeRange=drawTimeRange,
drawContours=FALSE,
missingColor=missingColor,
mgp=mgp,
mar=mar,
mai.palette=mai.palette,
cex=1,
main=main[w],
debug=debug-1,
...)
}
if (showBottom)
lines(x[["time", j]], bottom)
} else {
col <- if (colGiven) rep(col, length.out=nw) else rep("black", length.out=nw)
time <- if (j== "diagnostic") x@data$timeDia else x[["time"]]
tlim <- range(time)
ats <- oce.plot.ts(time, z, ylab=zlab,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
type=type,
mgp=mgp,
mar=omar,
tformat=tformat,
debug=debug-1)
res$xat <- ats$xat
res$yat <- ats$yat
}
}
drawTimeRange <- FALSE
} else if (which[w] %in% timeseries) {
## time-series types
col <- if (colGiven) rep(col, length.out=nw) else rep("black", length.out=nw)
oceDebug(debug, "graph ", w, " is a timeseries\n", sep="")
##par(mgp=mgp, mar=mar, cex=cex)
tlim <- range(x[["time", j]])
if (which[w] == 13) {
oceDebug(debug, "which[", w, "] == 13 (salinity)\n", sep="")
if (haveTimeImages) drawPalette(debug=debug-1)
ats <- oce.plot.ts(x[["time", j]], x[["salinity", j]],
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=resizableLabel("S"),
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else if (which[w] == 14) {
oceDebug(debug, "which[", w, "] == 14 (temperature)\n", sep="")
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
if (j == "diagnostic" && "temperatureDia" %in% names(x@data)) {
ats <- oce.plot.ts(x@data$timeDia, x@data$temperatureDia,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=expression(paste("Diagnostic T [ ", degree, "C ]")),
type=type,
mgp=mgp,
mar=omar,
tformat=tformat,
debug=debug-1)
} else {
ats <- oce.plot.ts(x[["time", j]], x[["temperature", j]],
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=expression(paste("T [ ", degree, "C ]")),
type=type,
mgp=mgp,
mar=omar,
tformat=tformat,
debug=debug-1)
}
} else if (which[w] == 15) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
oceDebug(debug, "which[", w, "] == 15 (pressure)\n", sep="")
if (j == "diagnostic" && "pressureDia" %in% names(x@data)) {
ats <- oce.plot.ts(x@data$timeDia, x@data$pressureDia,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab="pDia",
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
oceDebug(debug, "about to do non-diagnostic pressure plot, with cex=", cex, ", par(\"cex\")=", par("cex"), ", nw=", nw, ", cex sent to oce.plots=", cex*(1-min(nw/8, 1/4)), "\n", sep="", style="italic")
oceDebug(debug, vectorShow(mar), style="blue")
oceDebug(debug, vectorShow(par("mar")), style="blue")
oceDebug(debug, vectorShow(par("mai")), style="blue")
oceDebug(debug, vectorShow(haveTimeImages), style="blue")
oceDebug(debug, "time[1]=", format(x[["time",j]][1], "%Y-%m-%d %H:%M:%S"), "\n", style="blue")
ats <- oce.plot.ts(x[["time", j]], x[["pressure", j]],
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=resizableLabel("p"),
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
}
} else if (which[w] == 16) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
if (j == "diagnostic" && "headingDia" %in% names(x@data)) {
ats <- oce.plot.ts(x@data$timeDia, x@data$headingDia,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab="headingDia",
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
ats <- oce.plot.ts(x[["time", j]], x[["heading", j]],
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=resizableLabel("heading"),
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
}
} else if (which[w] == 17) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
if (j == "diagnostic" && "pitchDia" %in% names(x@data)) {
ats <- oce.plot.ts(x@data$timeDia, x@data$pitchDia,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab="pitchDia",
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
ats <- oce.plot.ts(x[["time", j]], x[["pitch", j]],
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=resizableLabel("pitch"),
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
}
} else if (which[w] == 18) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
if (j == "diagnostic" && "rollDia" %in% names(x@data)) {
ats <- oce.plot.ts(x@data$timeDia, x@data$rollDia,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab="rollDia",
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
ats <- oce.plot.ts(x[["time", j]], x[["roll", j]],
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=resizableLabel("roll"),
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
}
} else if (which[w] == 19) {
if (x[["numberOfBeams"]] > 0) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
ats <- oce.plot.ts(x[["time", j]], apply(x[["v", j]][, , 1], 1, mean, na.rm=TRUE),
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=beamName(x, 1),
type=type,
mgp=mgp,
mar=omar,
#mai.palette=mai.palette,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
stop("In plot,adp-method() : cannot plot beam/velo 1 because the device no beams", call.=FALSE)
}
} else if (which[w] == 20) {
if (x[["numberOfBeams"]] > 1) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
ats <- oce.plot.ts(x[["time", j]], apply(x[["v", j]][, , 2], 1, mean, na.rm=TRUE),
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=beamName(x, 2),
type=type,
mgp=mgp,
mar=omar,
#mai.palette=mai.palette,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
stop("In plot,adp-method() : cannot plot beam/velo 2 because the device has only ", x[["numberOfBeams"]], " beams", call.=FALSE)
}
} else if (which[w] == 21) {
if (x[["numberOfBeams"]] > 2) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
ats <- oce.plot.ts(x[["time", j]], apply(x[["v", j]][, , 3], 1, mean, na.rm=TRUE),
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=beamName(x, 3),
type=type,
mgp=mgp,
mar=omar,
#mai.palette=mai.palette,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
stop("In plot,adp-method() : cannot plot beam/velo 3 because the device has only", x[["numberOfBeams"]], "beams", call.=FALSE)
}
} else if (which[w] == 22) {
if (x[["numberOfBeams"]] > 3) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
ats <- oce.plot.ts(x[["time", j]], apply(x[["v", j]][, , 4], 1, mean, na.rm=TRUE),
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=beamName(x, 4),
type=type,
mgp=mgp,
mar=omar,
#mai.palette=mai.palette,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
stop("In plot,adp-method() : cannot plot beam/velo 4 because the device has only", x[["numberOfBeams"]], "beams", call.=FALSE)
}
} else if (which[w] == 55) {
## heaving
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
dt <- as.numeric(x[["time"]][2]) - as.numeric(x[["time"]][1])
ats <- oce.plot.ts(x[["time", j]], dt * cumsum(apply(x[["v", j]][, , 3], 1, mean, na.rm=TRUE)),
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab="Heaving [m]",
type=type,
mgp=mgp,
mar=omar,
#mai.palette=mai.palette,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
drawTimeRange <- FALSE
} else if (which[w] == 100) {
oceDebug(debug, "draw(ctd, ...) of type 'soundSpeed'\n")
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
ats <- oce.plot.ts(x[["time", j]], x[["soundSpeed", j]],
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab="Sound Speed [m/s]",
type=type,
mgp=mgp,
mar=omar,
tformat=tformat,
debug=debug-1)
} else if (which[w] %in% 40:44) {
## bottomRange
par(mar=c(mgp[1]+1, mgp[1]+1, 1, 1))
n <- prod(dim(x[["v"]])[1:2])
if ("br" %in% names(x@data)) {
if (which[w] == 40) {
R <- apply(x@data$br, 1, mean, na.rm=TRUE)
ats <- oce.plot.ts(x[["time", j]], R,
ylab="Bottom range [m]",
type=type,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ] else range(R, na.rm=TRUE),
cex=1, cex.axis=1, cex.lab=1,
tformat=tformat,
mar=omar,
debug=debug-1)
} else {
R <- x@data$br[, which[w]-40]
ats <- oce.plot.ts(x[["time"]], R,
ylab=paste("Beam", which[w]-40, "bottom range [m]"),
type=type,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ] else range(R, na.rm=TRUE),
cex=1, cex.axis=1, cex.lab=1,
tformat=tformat,
mar=omar,
debug=debug-1)
}
} else {
stop("In plot,adp-method() : ADP object lacks bottom-tracking data, so which=40:44 and which=\"bottomRange[*]\" cannot work", call.=FALSE)
}
} else if (which[w] %in% 50:54) {
## bottom velocity
par(mar=c(mgp[1]+1, mgp[1]+1, 1, 1))
n <- prod(dim(x[["v"]])[1:2])
if ("bv" %in% names(x@data)) {
if (which[w] == 50) {
V <- apply(x@data$bv, 1, mean, na.rm=TRUE)
ats <- oce.plot.ts(x[["time"]], V,
ylab="Bottom speed [m/s]",
type=type,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ] else range(V, na.rm=TRUE),
tformat=tformat,
cex=1, cex.axis=1, cex.lab=1,
mar=omar,
debug=debug-1)
} else {
V <- x@data$bv[, which[w]-50]
ats <- oce.plot.ts(x[["time"]], V,
ylab=paste("Beam", which[w]-50, "bottom velocity [m/s]"),
type=type,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ] else range(V, na.rm=TRUE),
tformat=tformat,
cex=1, cex.axis=1, cex.lab=1,
mar=omar,
debug=debug-1)
}
} else {
stop("In plot,adp-method() : ADP object lacks bottom-tracking data, so which=50:54 and which=\"bottomVelocity[*]\" cannot work", call.=FALSE)
}
}
## FIXME delete the next block, after testing.
if (marginsAsImage && useLayout) {
## FIXME: I think this should be deleted
## blank plot, to get axis length same as for images
omar <- par("mar")
par(mar=c(mar[1], 1/4, mgp[2]+1/2, mgp[2]+1))
plot(1:2, 1:2, type='n', axes=FALSE, xlab="", ylab="", cex=1, cex.axis=1, cex.lab=1)
par(mar=omar)
}
} else if (which[w] %in% spatial) {
## various spatial types
if (which[w] == 23) {
## progressive vector
par(mar=c(mgp[1]+1, mgp[1]+1, 1, 1))
if (j == 'diagnostic')
dt <- as.numeric(difftime(x@data$timeDia[2], x@data$timeDia[1], units="sec")) # FIXME: should not assume all equal
else
dt <- as.numeric(difftime(x[["time"]][2], x[["time"]][1], units="sec")) # FIXME: should not assume all equal
mPerKm <- 1000
if (j == 'diagnostic') {
U <- x@data$vDia[, 1, 1]
V <- x@data$vDia[, 1, 2]
ttt <- x@data$timeDia
} else {
U <- x[["v", j]][, , 1]
V <- x[["v", j]][, , 2]
ttt <- x[["time", j]]
}
if (!missing(control) && !is.null(control$bin)) {
if (control$bin < 1)
stop("In plot,adp-method() : cannot have control$bin less than 1, but got ", control$bin, call.=FALSE)
max.bin <- dim(x[["v"]])[2]
if (control$bin > max.bin)
stop("In plot,adp-method() : cannot have control$bin larger than ", max.bin, " but got ", control$bin, call.=FALSE)
u <- U[, control$bin] #EAC: bug fix, attempt to subset 2D matrix by 3 dimensions
v <- V[, control$bin]
} else {
if (x[["numberOfCells", j]] > 1) {
u <- apply(U, 1, mean, na.rm=TRUE)
v <- apply(V, 1, mean, na.rm=TRUE)
} else {
u <- U
v <- V
}
}
u[is.na(u)] <- 0 # zero out missing
v[is.na(v)] <- 0
xDist <- integrateTrapezoid(ttt, u, 'cA') / mPerKm
yDist<- integrateTrapezoid(ttt, v, 'cA') / mPerKm
plot(xDist, yDist, xlab="km", ylab="km", type='l', asp=1,
col=if (colGiven) col else "black",
cex=1, cex.axis=1, cex.lab=1,
...)
xaxp <- par("xaxp")
xat <- seq(xaxp[1], xaxp[2], length.out=1+xaxp[3])
yaxp <- par("yaxp")
yat <- seq(yaxp[1], yaxp[2], length.out=1+yaxp[3])
ats <- list(xat=xat, yat=yat)
} else if (which[w] %in% 24:27) {
par(mar=c(mgp[1]+1, mgp[1]+1, 1, 1))
if (which[w] == 27 && x[["numberOfBeams"]] < 4) {
stop("In plot,adp-method() : cannot use which=27 for a 3-beam instrument", call.=FALSE)
} else {
value <- apply(x[["v", j]][, , which[w]-23], 2, mean, na.rm=TRUE)
yy <- x[["distance", j]]
if (ytype == "profile" && x@metadata$orientation[1] == "downward" && !ylimGiven) {
plot(value, yy, xlab=beamName(x, which[w]-23),
ylab=resizableLabel("distance"), type='l', ylim=rev(range(yy)),
cex=1, cex.axis=1, cex.lab=1,
...)
} else {
plot(value, yy, xlab=beamName(x, 1),
ylab=resizableLabel("distance"), type='l',
cex=1, cex.axis=1, cex.lab=1,
...)
}
xaxp <- par("xaxp")
xat <- seq(xaxp[1], xaxp[2], length.out=1+xaxp[3])
yaxp <- par("yaxp")
yat <- seq(yaxp[1], yaxp[2], length.out=1+yaxp[3])
ats <- list(xat=xat, yat=yat)
}
}
} else if (which[w] %in% 28:30) {
## "uv", "uv+ellipse", or "uv+ellipse+arrow"
par(mar=c(mgp[1]+1, mgp[1]+1, 1, 1))
n <- dim(x[["v", j]])[1]
if (!missing(control) && !is.null(control$bin)) {
if (control$bin < 1)
stop("In plot,adp-method() : cannot have control$bin less than 1, but got ", control$bin, call.=FALSE)
max.bin <- dim(x[["v"]])[2]
if (control$bin > max.bin)
stop("In plot,adp-method() : cannot have control$bin larger than ", max.bin, " but got ", control$bin, call.=FALSE)
u <- x[["v", j]][, control$bin, 1]
v <- x[["v", j]][, control$bin, 2]
} else {
if (x[["numberOfCells", j]] > 1) {
u <- apply(x[["v"]][, , 1], 1, mean, na.rm=TRUE)
v <- apply(x[["v"]][, , 2], 1, mean, na.rm=TRUE)
} else {
u <- x[["v", j]][, 1, 1]
v <- x[["v", j]][, 1, 2]
}
}
oceDebug(debug, "uv type plot\n")
if (n < 5000 || (!missing(useSmoothScatter) && !useSmoothScatter)) {
if ("type" %in% names(dots)) {
plot(u, v,
xlab=resizableLabel("u"),
ylab=resizableLabel("v"),
asp=1, col=if (colGiven) col else "black",
xlim=if (xlimGiven) xlim[w, ] else range(u, na.rm=TRUE),
ylim=if (ylimGiven) ylim[w, ] else range(v, na.rm=TRUE),
cex=1, cex.axis=1, cex.lab=1,
...)
} else {
plot(u, v,
xlab=resizableLabel("u"),
ylab=resizableLabel("v"),
type='n', asp=1,
xlim=if (xlimGiven) xlim[w, ] else range(u, na.rm=TRUE),
ylim=if (ylimGiven) ylim[w, ] else range(v, na.rm=TRUE),
cex=1, cex.axis=1, cex.lab=1,
...)
points(u, v, cex=cex/2, col=if (colGiven) col else "black")
}
} else {
smoothScatter(u, v,
xlab=resizableLabel("u"),
ylab=resizableLabel("v"),
asp=1,
xlim=if (xlimGiven) xlim[w, ] else range(u, na.rm=TRUE),
ylim=if (ylimGiven) ylim[w, ] else range(v, na.rm=TRUE),
cex=1, cex.axis=1, cex.lab=1,
...)
}
xaxp <- par("xaxp")
xat <- seq(xaxp[1], xaxp[2], length.out=1+xaxp[3])
yaxp <- par("yaxp")
yat <- seq(yaxp[1], yaxp[2], length.out=1+yaxp[3])
ats <- list(xat=xat, yat=yat)
if (main[w] != "") {
oceDebug(debug, "about to title the plot with character size ", cex.lab*par("cex"), "\n")
mtext(main[w], adj=1, cex=cex.lab*par("cex"))
}
if (which[w] >= 29 && which[w] < 40) {
ok <- !is.na(u) & !is.na(v)
e <- eigen(cov(data.frame(u[ok], v[ok])))
major <- sqrt(e$values[1]) # major
minor <- sqrt(e$values[2]) # minor
theta <- seq(0, 2*pi, length.out=360/5)
xx <- major * cos(theta)
yy <- minor * sin(theta)
theta0 <- atan2(e$vectors[2, 1], e$vectors[1, 1])
##cat("major", major, "minor", minor, "theta0", theta0, "\n")
rotate <- rbind(c(cos(theta0), -sin(theta0)),
c(sin(theta0), cos(theta0)))
xxyy <- rotate %*% rbind(xx, yy)
col <- if (colGiven) col else "black"
lines(xxyy[1, ], xxyy[2, ], lwd=4, col="white")
lines(xxyy[1, ], xxyy[2, ], lwd=2, col=col)
res$ellipseMajor <- major
res$ellipseMinor <- minor
res$ellipseAngle <- theta
if (which[w] >= 30) {
if (!missing(control) && !is.null(control$bin)) {
if (control$bin < 1)
stop("In plot,adp-method() : cannot have control$bin less than 1, but got ", control$bin, call.=FALSE)
max.bin <- dim(x[["v"]])[2]
if (control$bin > max.bin)
stop("In plot,adp-method() : cannot have control$bin larger than ", max.bin, " but got ", control$bin, call.=FALSE)
umean <- mean(x[["v", j]][, control$bin, 2], na.rm=TRUE)
vmean <- mean(x[["v", j]][, control$bin, 2], na.rm=TRUE)
} else {
umean <- mean(x[["v", j]][, , 1], na.rm=TRUE)
vmean <- mean(x[["v", j]][, , 2], na.rm=TRUE)
}
res$meanU <- umean
res$meanV <- vmean
arrows(0, 0, umean, vmean, lwd=4, length=1/10, col="white")
arrows(0, 0, umean, vmean, lwd=2, length=1/10, col=col)
}
}
} else if (which[w] == 60) {
oceDebug(debug, "draw(adp, ...) of type MAP\n")
## get coastline file
if (is.character(coastline)) {
if (coastline == "none") {
if (!is.null(x@metadata$station) && !is.na(x@metadata$station)) {
plot(x@metadata$longitude, x@metadata$latitude, xlab="", ylab="",
cex=1, cex.axis=1, cex.lab=1)
} else {
stop("In plot,adp-method() : no latitude or longitude in object's metadata, so cannot draw map", call.=FALSE)
}
} else {
## named coastline
if (!exists(paste("^", coastline, "$", sep=""))) {
## load it, if necessary
if (requireNamespace("ocedata", quietly=TRUE)) {
if (coastline == "best") {
best <- coastlineBest(span=span, debug=debug-1)
data(list=best, package="oce", envir=environment())
coastline <- get(best)
} else if (coastline == "coastlineWorld") {
data("coastlineWorld", package="oce", envir=environment())
coastline <- get("coastlineWorld")
} else if (coastline == "coastlineWorldFine") {
data("coastlineWorldFine", package="ocedata", envir=environment())
coastline <- get("coastlineWorldFine")
} else if (coastline == "coastlineWorldMedium") {
data("coastlineWorldMedium", package="ocedata", envir=environment())
coastline <- get("coastlineWorldMedium")
} else {
stop("there is no built-in coastline file of name \"", coastline, "\"")
}
} else {
data("coastlineWorld", package="oce", envir=environment())
coastline <- get("coastlineWorld")
}
}
}
## FIXME: span should be an arg
if ("firstLatitude" %in% names(x@data)) {
lat <- x[["firstLatitude"]]
lon <- x[["firstLongitude"]]
##asp <- 1 / cos(mean(lat, na.rm=TRUE) * pi / 180)
plot(coastline, clatitude=mean(lat, na.rm=TRUE),
clongitude=mean(lon, na.rm=TRUE),
span=span,
cex=1, cex.axis=1, cex.lab=1)
points(lon, lat)
} else if ("latitude" %in% names(x@metadata)) {
lat <- x[["latitude"]]
lon <- x[["longitude"]]
if (is.finite(lat) && is.finite(lon)) {
plot(coastline, clatitude=lat, clongitude=lon, span=50,
cex=1, cex.axis=1, cex.lab=1)
points(x[["longitude"]], x[["latitude"]], cex=2*par('cex'))
} else {
stop("In plot,adp-method() : nothing to map", call.=FALSE)
}
} else {
stop("In plot,adp-method() : nothing to map", call.=FALSE)
}
}
} else {
stop("In plot,adp-method() : unknown value of which (", which[w], ")", call.=FALSE)
}
if (is.logical(grid[1]) && grid[1])
grid(col=grid.col, lty=grid.lty, lwd=grid.lwd)
oceDebug(debug, "plot,adp-method bottom of loop, before reseting par('mar'):\n", style="italic")
oceDebug(debug, vectorShow(par("mar")), style="blue")
oceDebug(debug, vectorShow(par("mai")), style="blue")
par(mar=omar) # prevent margin creep if we have non-images after images (issue 1632 item 2)
oceDebug(debug, "...after reseting par('mar'):", style="italic")
oceDebug(debug, vectorShow(par("mar")), style="blue")
oceDebug(debug, vectorShow(par("mai")), style="blue")
}
par(cex=opar$cex, cex.axis=opar$cex.axis, cex.lab=opar$cex.lab)
if (exists("ats")) {
res$xat <- ats$xat
res$yat <- ats$yat
}
oceDebug(debug, "} # plot,adp-method()\n", unindent=1, style="bold")
invisible(res)
})
#' Convert an ADP Object to ENU Coordinates
#'
#' @param x an [adp-class] object.
#'
#' @param declination magnetic declination to be added to the heading, to get
#' ENU with N as "true" north.
#'
#' @template debugTemplate
#'
#' @author Dan Kelley
#'
#' @seealso See [read.adp()] for notes on functions relating to
#' `"adp"` objects. Also, see [beamToXyzAdp()] and
#' [xyzToEnuAdp()].
#'
#' @references
#' \url{https://www.nortekgroup.com/faq/how-is-a-coordinate-transformation-done}
#' @family things related to adp data
toEnuAdp <- function(x, declination=0, debug=getOption("oceDebug"))
{
debug <- if (debug > 0) 1 else 0
oceDebug(debug, "toEnuAdp() {\n", unindent=1)
coord <- x[["oceCoordinate"]]
if (coord == "beam") {
x <- xyzToEnuAdp(beamToXyzAdp(x, debug=debug-1), declination=declination, debug=debug-1)
} else if (coord == "xyz") {
x <- xyzToEnuAdp(x, declination=declination, debug=debug-1)
} else if (coord == "sfm") {
x <- xyzToEnuAdp(x, declination=declination, debug=debug-1)
} else if (coord == "enu") {
warning("toEnuAdp cannot convert, object is already in cooordinate system ENU, returning argument as-is")
} else {
warning("toEnuAdp cannot convert from coordinate system ", coord, " to ENU, so returning argument as-is")
}
oceDebug(debug, "} # toEnuAdp()\n", unindent=1)
x
}
#' Adjust ADP Signal for Spherical Spreading
#'
#' Compensate ADP signal strength for spherical spreading.
#'
#' First, beam echo intensity is converted from counts to decibels, by
#' multiplying by `count2db`. Then, the signal decrease owing to
#' spherical spreading is compensated for by adding the term
#' \eqn{20\log10(r)}{20*log10(r)}, where \eqn{r}{r} is the distance from the
#' sensor head to the water from which scattering is occurring. \eqn{r}{r} is
#' given by `x[["distance"]]`.
#'
#' @param x an [adp-class] object.
#'
#' @param count2db a set of coefficients, one per beam, to convert from beam
#' echo intensity to decibels.
#'
#' @param asMatrix a boolean that indicates whether to return a numeric matrix,
#' as opposed to returning an updated object (in which the matrix is cast to a
#' raw value).
#'
#' @template debugTemplate
#'
#' @return An [adp-class] object.
#'
#' @author Dan Kelley
#'
#' @references The coefficient to convert to decibels is a personal
#' communication. The logarithmic term is explained in textbooks on acoustics,
#' optics, etc.
#'
#' @examples
#' library(oce)
#' data(adp)
#' plot(adp, which=5) # beam 1 echo intensity
#' adp.att <- beamUnspreadAdp(adp)
#' plot(adp.att, which=5) # beam 1 echo intensity
#' ## Profiles
#' par(mar=c(4, 4, 1, 1))
#' a <- adp[["a", "numeric"]] # second arg yields matrix return value
#' distance <- adp[["distance"]]
#' plot(apply(a,2,mean), distance, type='l', xlim=c(0,256))
#' lines(apply(a,2,median), distance, type='l',col='red')
#' legend("topright",lwd=1,col=c("black","red"),legend=c("original","attenuated"))
#' ## Image
#' plot(adp.att, which="amplitude",col=oce.colorsJet(100))
#'
#' @family things related to adp data
beamUnspreadAdp <- function(x, count2db=c(0.45, 0.45, 0.45, 0.45), asMatrix=FALSE, debug=getOption("oceDebug"))
{
oceDebug(debug, "beamUnspreadAdp(...) {\n", unindent=1)
if (!inherits(x, "adp"))
stop("method is only for objects of class '", "adp", "'")
## make compatible with old function name (will remove in Jan 2013)
if (!is.null(x@metadata$oceBeamUnattenuated) && x@metadata$oceBeamUnattenuated) {
warning("the beams are already unspreaded in this dataset.")
return(x)
}
if (!is.null(x@metadata$oceBeamUnspreaded) && x@metadata$oceBeamUnspreaded) {
warning("the beams are already unspreaded in this dataset")
return(x)
}
numberOfProfiles <- dim(x@data$a)[1]
oceDebug(debug, "numberOfProfiles=", numberOfProfiles, "\n")
correction <- matrix(rep(20 * log10(x[["distance"]]), numberOfProfiles),
nrow=numberOfProfiles, byrow=TRUE)
if (asMatrix) {
res <- array(double(), dim=dim(x@data$a))
for (beam in 1:x[["numberOfBeams"]]) {
oceDebug(debug, "beam=", beam, "\n")
res[, , beam] <- count2db[beam] * as.numeric(x@data$a[, , beam]) + correction
}
} else {
res <- x
for (beam in 1:x[["numberOfBeams"]]) {
oceDebug(debug, "beam=", beam, "\n")
tmp <- floor(count2db[beam] * as.numeric(x@data$a[, , beam]) + correction)
tmp[tmp < 0] <- 0
tmp[tmp > 255] <- 255
res@data$a[, , beam] <- as.raw(tmp)
}
res@metadata$oceBeamUnspreaded <- TRUE
res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
}
oceDebug(debug, "} # beamUnspreadAdp()\n", unindent=1)
res
}
#' Convert ADP From Beam to XYZ Coordinates
#'
#' Convert ADP velocity components from a beam-based coordinate system to a
#' xyz-based coordinate system. The action depends on the type of object.
#' Objects creating by reading RDI Teledyne, Sontek, and some Nortek
#' instruments are handled directly. However, Nortek
#' data stored in in the AD2CP format are handled by the specialized
#' function [beamToXyzAdpAD2CP()], the documentation for which
#' should be consulted, rather than the material given blow.
#'
#' For a 3-beam Nortek `aquadopp` object, the beams are transformed into
#' velocities using the matrix stored in the header.
#'
#' For 4-beam objects (and for the slanted 4 beams of 5-beam
#' objects), the along-beam velocity components \eqn{B_1}{B1}
#' \eqn{B_2}{B1}, \eqn{B_3}{B3}, and \eqn{B_4}{B4}
#' are converted to Cartesian velocity components \eqn{u}{u}
#' \eqn{v}{v} and \eqn{w}{w}
#' using formulae from section 5.5 of *RD Instruments* (1998), viz. the
#' along-beam velocity components \eqn{B_1}{B1}, \eqn{B_2}{B2}, \eqn{B_3}{B3},
#' and \eqn{B_4}{B4} are used to calculate velocity components in a cartesian
#' system referenced to the instrument using the following formulae:
#' \eqn{u=ca(B_1-B_2)}{u=c*a*(B1-B2)}, \eqn{v=ca(B_4-B_3)}{v=c*a*(B4-B3)},
#' \eqn{w=-b(B_1+B_2+B_3+B_4)}{w=-b*(B1+B2+B3+B4)}. In addition to these,
#' an estimate of the
#' error in velocity is computed as
#' \eqn{e=d(B_1+B_2-B_3-B_4)}{e=d*(B1+B2-B3-B4)}.
#' The geometrical factors in these formulae are:
#' `c` is +1 for convex beam geometry or -1 for concave beam geometry,
#' \eqn{a=1/(2\sin\theta)}{a=1/(2*sin(theta))}
#' where \eqn{\theta}{theta} is the angle the beams make to the axial direction
#' (which is available as `x[["beamAngle"]]`),
#' \eqn{b=1/(4\cos\theta)}{b=1/(4*cos(theta))}, and
#' \eqn{d=a/\sqrt{2}}{d=a/sqrt(2)}.
#'
#' @param x an [adp-class] object.
#'
#' @template debugTemplate
#'
#' @return An object with the first 3 velocity indices having been altered to
#' represent velocity components in xyz (or instrument) coordinates. (For
#' `rdi` data, the values at the 4th velocity index are changed to
#' represent the "error" velocity.)
#' To indicate the change, the value of `x[["oceCoordinate"]]` is
#' changed from `beam` to `xyz`.
#'
#' @author Dan Kelley
#'
#' @seealso See [read.adp()] for other functions that relate to
#' objects of class `"adp"`.
#'
#' @references
#' 1. Teledyne RD Instruments. \dQuote{ADCP Coordinate Transformation: Formulas and
#' Calculations,} January 2010. P/N 951-6079-00.
#'
#' 2. WHOI/USGS-provided Matlab code for beam-enu transformation
#' \samp{http://woodshole.er.usgs.gov/pubs/of2005-1429/MFILES/AQDPTOOLS/beam2enu.m}
#'
#' @family things related to adp data
beamToXyzAdp <- function(x, debug=getOption("oceDebug"))
{
if (!inherits(x, "adp"))
stop("method is only for objects of class \"adp\"")
if (x[["oceCoordinate"]] != "beam")
stop("input must be in beam coordinates")
if (is.ad2cp(x)) {
oceDebug(debug, "beamToXyzAdp(x, debug=", debug, ") {\n", sep="", unindent=1)
res <- beamToXyzAdpAD2CP(x=x, debug=debug - 1)
oceDebug(debug, "} # beamToXyzAdp()\n", unindent=1)
return(res)
}
oceDebug(debug, "beamToXyzAdp(x, debug=", debug, ") {\n", sep="", unindent=1)
nb <- x[["numberOfBeams"]]
if (is.null(nb))
stop("missing x[[\"numberOfBeams\"]]")
tm <- x[["transformationMatrix"]]
if (is.null(tm))
stop("missing x[[\"transformationMatrix\"]]")
if (!all.equal(dim(tm), c(nb, nb)))
stop("number of beams, ", nb, ", contradicts the ", dim(tm)[1], "x", dim(tm)[2], " transformationMatrix")
manufacturer <- x[["manufacturer"]]
if (is.null(manufacturer))
stop("cannot rotate the data, since there is no 'manufacturer' entry in the metadata slot")
oceDebug(debug, "transformation matrix follows\n")
if (debug > 0)
print(tm)
res <- x
V <- x[["v"]]
if (length(grep(".*rdi.*", manufacturer))) {
if (nb != 4)
stop("can only handle 4-beam ADP units from RDI")
res@data$v[,,1] <- tm[1,1]*V[,,1] + tm[1,2]*V[,,2] + tm[1,3]*V[,,3] + tm[1,4]*V[,,4]
res@data$v[,,2] <- tm[2,1]*V[,,1] + tm[2,2]*V[,,2] + tm[2,3]*V[,,3] + tm[2,4]*V[,,4]
res@data$v[,,3] <- tm[3,1]*V[,,1] + tm[3,2]*V[,,2] + tm[3,3]*V[,,3] + tm[3,4]*V[,,4]
res@data$v[,,4] <- tm[4,1]*V[,,1] + tm[4,2]*V[,,2] + tm[4,3]*V[,,3] + tm[4,4]*V[,,4]
if ("bv" %in% names(x@data)) {
## bottom velocity
V <- x@data$bv
res@data$bv[,1] <- tm[1,1]*V[,1] + tm[1,2]*V[,2] + tm[1,3]*V[,3] + tm[1,4]*V[,4]
res@data$bv[,2] <- tm[2,1]*V[,1] + tm[2,2]*V[,2] + tm[2,3]*V[,3] + tm[2,4]*V[,4]
res@data$bv[,3] <- tm[3,1]*V[,1] + tm[3,2]*V[,2] + tm[3,3]*V[,3] + tm[3,4]*V[,4]
res@data$bv[,4] <- tm[4,1]*V[,1] + tm[4,2]*V[,2] + tm[4,3]*V[,3] + tm[4,4]*V[,4]
}
res@metadata$oceCoordinate <- "xyz"
} else if (length(grep(".*nortek.*", manufacturer))) {
if (nb == 3) {
res@data$v[,,1] <- tm[1,1]*V[,,1] + tm[1,2]*V[,,2] + tm[1,3]*V[,,3]
res@data$v[,,2] <- tm[2,1]*V[,,1] + tm[2,2]*V[,,2] + tm[2,3]*V[,,3]
res@data$v[,,3] <- tm[3,1]*V[,,1] + tm[3,2]*V[,,2] + tm[3,3]*V[,,3]
if ("bv" %in% names(x@data)) {
## bottom velocity
V <- x@data$bv
res@data$bv[,1] <- tm[1,1]*V[,1] + tm[1,2]*V[,2] + tm[1,3]*V[,3]
res@data$bv[,2] <- tm[2,1]*V[,1] + tm[2,2]*V[,2] + tm[2,3]*V[,3]
res@data$bv[,3] <- tm[3,1]*V[,1] + tm[3,2]*V[,2] + tm[3,3]*V[,3]
}
res@metadata$oceCoordinate <- "xyz"
} else if (nb == 4) {
stop("the only 4-beam Nortek format supported is AD2CP")
} else {
stop("can only handle 3-beam and 4-beam ADP units from nortek")
}
} else if (length(grep(".*sontek.*", manufacturer))) {
res@data$v[,,1] <- tm[1,1]*V[,,1] + tm[1,2]*V[,,2] + tm[1,3]*V[,,3]
res@data$v[,,2] <- tm[2,1]*V[,,1] + tm[2,2]*V[,,2] + tm[2,3]*V[,,3]
res@data$v[,,3] <- tm[3,1]*V[,,1] + tm[3,2]*V[,,2] + tm[3,3]*V[,,3]
if ("bv" %in% names(x@data)) {
## bottom velocity
V <- x@data$bv
res@data$bv[,1] <- tm[1,1]*V[,1] + tm[1,2]*V[,2] + tm[1,3]*V[,3]
res@data$bv[,2] <- tm[2,1]*V[,1] + tm[2,2]*V[,2] + tm[2,3]*V[,3]
res@data$bv[,3] <- tm[3,1]*V[,1] + tm[3,2]*V[,2] + tm[3,3]*V[,3]
}
res@metadata$oceCoordinate <- "xyz"
} else {
stop("adp type must be either \"rdi\" or \"nortek\" or \"sontek\"")
}
res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
oceDebug(debug, "} # beamToXyzAdp()\n", unindent=1)
res
}
#' Convert AD2CP-style adp data From Beam to XYZ Coordinates
#'
#' This looks at all the items in the `data` slot of `x`, to
#' see if they contain an array named `v` that holds velocity.
#' If that velocity has 4 components, and if `oceCoordinate` for
#' the item is `"beam"`, then
#' along-beam velocity components \eqn{B_1}{B1}
#' \eqn{B_2}{B1}, \eqn{B_3}{B3}, and \eqn{B_4}{B4}
#' are converted to instrument-oriented Cartesian velocity components \eqn{u}{u}
#' \eqn{v}{v} and \eqn{w}{w}
#' using the convex-geometry formulae from section 5.5 of reference 1,
#' viz.
#' \eqn{u=ca(B_1-B_2)}{u=a*(B1-B2)}, \eqn{v=ca(B_4-B_3)}{v=a*(B4-B3)},
#' \eqn{w=-b(B_1+B_2+B_3+B_4)}{w=-b*(B1+B2+B3+B4)}. In addition to these,
#' an estimate of the
#' error in velocity is computed as
#' \eqn{e=d(B_1+B_2-B_3-B_4)}{e=d*(B1+B2-B3-B4)}.
#' The geometrical factors in these formulae are:
#' \eqn{a=1/(2\sin\theta)}{a=1/(2*sin(theta))}
#' where \eqn{\theta}{theta} is the angle the beams make to the axial direction
#' (which is available as `x[["beamAngle"]]`),
#' \eqn{b=1/(4\cos\theta)}{b=1/(4*cos(theta))}, and
#' \eqn{d=a/\sqrt{2}}{d=a/sqrt(2)}.
#'
#' @param x an [adp-class] object.
#'
#' @template debugTemplate
#'
#' @references
#' 1. Teledyne RD Instruments.
#' \dQuote{ADCP Coordinate Transformation: Formulas and Calculations,}
#' January 2010. P/N 951-6079-00.
#
#' @family things related to adp data
beamToXyzAdpAD2CP <- function(x, debug=getOption("oceDebug"))
{
debug <- if (debug > 0) 1 else 0
oceDebug(debug, "beamToXyzAdpAD2CP(x, debug=", debug, ") {\n", sep="", unindent=1)
if (!inherits(x, "adp"))
stop("method is only for objects of class \"adp\"")
if (!is.ad2cp(x))
stop("method is only for AD2CP objects")
if (!is.ad2cp(x))
stop("only 4-beam AD2CP data are handled")
res <- x
for (item in names(x@data)) {
oceDebug(debug, "item='", item, "'...\n", sep="")
## Do not try to alter unsuitable items, e.g. the vertical beam, the altimeter, etc.
if (is.list(x@data[[item]]) && "v" %in% names(x@data[[item]])) {
if (x@data[[item]]$oceCoordinate == "beam") {
numberOfBeams <- x@data[[item]]$numberOfBeams
oceDebug(debug, " numberOfBeams=", numberOfBeams, "\n")
if (4 == numberOfBeams) {
v <- x@data[[item]]$v
## Possibly speed things up by reducing need to index 4 times.
v1 <- v[,,1]
v2 <- v[,,2]
v3 <- v[,,3]
v4 <- v[,,4]
rm(v) # perhaps help by reducing memory pressure a bit
beamAngle <- x@metadata$beamAngle
if (is.null(beamAngle))
stop("cannot look up beamAngle")
theta <- beamAngle * atan2(1, 1) / 45
TMc <- 1 # for convex (diverging) beam setup; use -1 for concave
TMa <- 1 / (2 * sin(theta))
TMb <- 1 / (4 * cos(theta))
TMd <- TMa / sqrt(2)
tm <- rbind(c(TMc*TMa, -TMc*TMa, 0, 0),
c( 0, 0, -TMc*TMa, TMc*TMa),
c( TMb, TMb, TMb, TMb),
c( TMd, TMd, -TMd, -TMd))
## TIMING new way:
## TIMING user system elapsed
## TIMING 11.661 27.300 89.293
## TIMING old way:
## TIMING user system elapsed
## TIMING 15.977 24.182 88.971
## TIMING cat("new way:\n")
## TIMING print(system.time({
## TIMING v1 <- V[,,1]
## TIMING v2 <- V[,,2]
## TIMING v3 <- V[,,3]
## TIMING v4 <- V[,,4]
## TIMING res@data[[j]]$v[,,1] <- tm[1,1]*v1 + tm[1,2]*v2 + tm[1,3]*v3 + tm[1,4]*v4
## TIMING res@data[[j]]$v[,,2] <- tm[2,1]*v1 + tm[2,2]*v2 + tm[2,3]*v3 + tm[2,4]*v4
## TIMING res@data[[j]]$v[,,3] <- tm[3,1]*v1 + tm[3,2]*v2 + tm[3,3]*v3 + tm[3,4]*v4
## TIMING res@data[[j]]$v[,,4] <- tm[4,1]*v1 + tm[4,2]*v2 + tm[4,3]*v3 + tm[4,4]*v4
## TIMING rm(v1, v2, v3, v4)
## TIMING }))
## TIMING cat("old way:\n")
## TIMING print(system.time({
## TIMING res@data[[j]]$v[,,1] <- tm[1,1]*V[,,1] + tm[1,2]*V[,,2] + tm[1,3]*V[,,3] + tm[1,4]*V[,,4]
## TIMING res@data[[j]]$v[,,2] <- tm[2,1]*V[,,1] + tm[2,2]*V[,,2] + tm[2,3]*V[,,3] + tm[2,4]*V[,,4]
## TIMING res@data[[j]]$v[,,3] <- tm[3,1]*V[,,1] + tm[3,2]*V[,,2] + tm[3,3]*V[,,3] + tm[3,4]*V[,,4]
## TIMING res@data[[j]]$v[,,4] <- tm[4,1]*V[,,1] + tm[4,2]*V[,,2] + tm[4,3]*V[,,3] + tm[4,4]*V[,,4]
## TIMING }))
res@data[[item]]$v[,,1] <- tm[1,1]*v1 + tm[1,2]*v2 + tm[1,3]*v3 + tm[1,4]*v4
res@data[[item]]$v[,,2] <- tm[2,1]*v1 + tm[2,2]*v2 + tm[2,3]*v3 + tm[2,4]*v4
res@data[[item]]$v[,,3] <- tm[3,1]*v1 + tm[3,2]*v2 + tm[3,3]*v3 + tm[3,4]*v4
res@data[[item]]$v[,,4] <- tm[4,1]*v1 + tm[4,2]*v2 + tm[4,3]*v3 + tm[4,4]*v4
res@data[[item]]$oceCoordinate <- "xyz"
res@metadata$oceCoordinate <- NULL # remove, just in case it got added by mistake
oceDebug(debug, " converted from 'beam' to 'xyz'\n")
} else {
oceDebug(debug, " skipping, since not 4 beams\n")
}
} else {
oceDebug(debug, " skipping, since not in 'beam' coordinate\n")
}
} else {
oceDebug(debug, " skipping, since not a list\n")
}
}
res@processingLog <- processingLogAppend(res@processingLog,
paste("beamToXyzAdpAD2CP(x",
", debug=", debug, ")", sep=""))
oceDebug(debug, "} # beamToXyzAdpAD2CP()\n", unindent=1)
res
}
#' Convert ADP From XYZ to ENU Coordinates
#'
#' Convert ADP velocity components from a xyz-based coordinate system to
#' an enu-based coordinate system, by using the instrument's recording of
#' information relating to heading, pitch, and roll. The action is based
#' on what is stored in the data, and so it depends greatly on instrument type
#' and the style of original data format. This function handles data from
#' RDI Teledyne, Sontek, and some Nortek instruments directly. However, Nortek
#' data stored in in the AD2CP format are handled by the specialized
#' function [xyzToEnuAdpAD2CP()], the documentation for which
#' should be consulted, rather than the material given blow.
#'
#' The first step is to convert the (x,y,z) velocity components (stored in the
#' three columns of `x[["v"]][,,1:3]`) into what RDI (reference 1, pages 11 and 12)
#' calls "ship" (or "righted") components. For example, the z coordinate,
#' which may point upwards or downwards depending on instrument orientation, is
#' mapped onto a "mast" coordinate that points more nearly upwards than
#' downward. The other ship coordinates are called "starboard" and "forward",
#' the meanings of which will be clear to mariners. Once the (x,y,z)
#' velocities are converted to ship velocities, the orientation of the
#' instrument is extracted from heading, pitch, and roll vectors stored in the
#' object. These angles are defined differently for RDI and Sontek profilers.
#'
#' The code handles every case individually, based on the table given below.
#' The table comes from Clark Richards, a former PhD student at Dalhousie
#' University (reference 2), who developed it based on instrument documentation,
#' discussion on user groups, and analysis of measurements acquired with RDI
#' and Sontek acoustic current profilers in the SLEIWEX experiment. In the
#' table, (X, Y, Z) denote instrument-coordinate velocities, (S, F, M) denote
#' ship-coordinate velocities, and (H, P, R) denote heading, pitch, and roll.
#'
#' \tabular{rrrrrrrrrrrr}{ **Case** \tab **Mfr.** \tab
#' **Instr.** **Orient.** \tab **H** \tab **P** \tab
#' **R** \tab **S** \tab **F** \tab **M**\cr 1 \tab RDI
#' \tab ADCP \tab up \tab H \tab arctan(tan(P)*cos(R)) \tab R \tab -X \tab Y
#' \tab -Z\cr 2 \tab RDI \tab ADCP \tab down \tab H \tab arctan(tan(P)*cos(R))
#' \tab -R \tab X \tab Y \tab Z\cr 3 \tab Nortek \tab ADP \tab up \tab H-90
#' \tab R \tab -P \tab X \tab Y \tab Z\cr 4 \tab Nortek \tab ADP \tab down \tab
#' H-90 \tab R \tab -P \tab X \tab -Y \tab -Z\cr 5 \tab Sontek \tab ADP \tab up
#' \tab H-90 \tab -P \tab -R \tab X \tab Y \tab Z\cr 6 \tab Sontek \tab ADP
#' \tab down \tab H-90 \tab -P \tab -R \tab X \tab Y \tab Z\cr 7 \tab Sontek
#' \tab PCADP \tab up \tab H-90 \tab R \tab -P \tab X \tab Y \tab Z\cr 8 \tab
#' Sontek \tab PCADP \tab down \tab H-90 \tab R \tab -P \tab X \tab Y \tab Z\cr
#' }
#'
#' Finally, a standardized rotation matrix is used to convert from ship
#' coordinates to earth coordinates. As described in the RDI coordinate
#' transformation manual (reference 1, pages 13 and 14), this matrix is based on sines
#' and cosines of heading, pitch, and roll If `CH` and `SH` denote
#' cosine and sine of heading (after adjusting for declination), with similar
#' terms for pitch and roll using second letters `P` and `R`, the
#' rotation matrix is
#'
#' \preformatted{ rbind(c( CH*CR + SH*SP*SR, SH*CP, CH*SR - SH*SP*CR), c(-SH*CR
#' + CH*SP*SR, CH*CP, -SH*SR - CH*SP*CR), c( -CP*SR, SP, CP*CR)) }
#'
#' This matrix is left-multiplied by a matrix with three rows, the top a vector
#' of "starboard" values, the middle a vector of "forward" values, and the
#' bottom a vector of "mast" values. Finally, the columns of
#' `data$v[,,1:3]` are filled in with the result of the matrix
#' multiplication.
#'
#' @param x an [adp-class] object.
#'
#' @param declination magnetic declination to be added to the heading after
#' "righting" (see below), to get ENU with N as "true" north.
#'
#' @template debugTemplate
#'
#' @return An object with `data$v[,,1:3]` altered appropriately, and
#' `x[["oceCoordinate"]]` changed from `xyz` to `enu`.
#'
#' @author Dan Kelley and Clark Richards
#'
## @section Limitations:
## For AD2CP objects, created by[read.adp.ad2cp()],
## the transformation to ENU coordinates is only possible if the instrument
## orientation is `"AHRS"`. Other orientations may be added, if users
## indicat a need for them, and supply the developers with test file (including
## at least a few expected results).
#'
#' @references
#' 1. Teledyne RD Instruments. \dQuote{ADCP Coordinate Transformation: Formulas and Calculations,}
#' January 2010. P/N 951-6079-00.
#'
#' 2. Clark Richards, 2012, PhD Dalhousie University Department of
#' Oceanography.
#'
#' @family things related to adp data
xyzToEnuAdp <- function(x, declination=0, debug=getOption("oceDebug"))
{
debug <- if (debug > 0) 1 else 0
if (!inherits(x, "adp"))
stop("method is only for objects of class '", "adp", "'")
## Treat AD2CP differently because e.g. if it has AHRS, then there is may be need or
## benefit in extracting heading, etc., as for the other cases. Also, the orientation
## names are different for this type, so isolating the code makes things clearer
## and easier to maintain. (FIXME: consider splitting the RDI and Sontek cases, too.)
if (is.ad2cp(x))
return(xyzToEnuAdpAD2CP(x=x, declination=declination, debug=debug))
oceDebug(debug, "xyzToEnuAdp(x, declination=", declination, ", debug=", debug, ") {\n", sep="", unindent=1)
## Now, address non-AD2CP cases.
manufacturer <- x[["manufacturer"]]
oceCoordinate = x[["oceCoordinate"]]
orientation = x[["orientation"]][1]
if (is.null(orientation)) {
warning("instrument orientation is not stored in x; assuming it is \"upward\"")
orientation <- "upward"
}
if (is.null(oceCoordinate) || (oceCoordinate != "xyz" & oceCoordinate != "sfm"))
stop("input must be in xyz or sfm coordinates")
heading <- x[["heading"]]
pitch <- x[["pitch"]]
roll <- x[["roll"]]
res <- x
isAD2CP <- is.ad2cp(x)
haveBv <- "bv" %in% names(x@data)
## Case-by-case alteration of heading, pitch and roll, so we can use one formula for all.
if (1 == length(agrep("rdi", manufacturer, ignore.case=TRUE))) {
## "teledyne rdi"
## h/p/r and s/f/m from Clark Richards pers. comm. 2011-03-14, revised 2011-03-15
if (oceCoordinate == "sfm" & !res@metadata$tiltUsed) {
oceDebug(debug, "Case 1: RDI ADCP in SFM coordinates.\n")
oceDebug(debug, " No coordinate changes required prior to ENU.\n")
starboard <- res@data$v[, , 1] # p11 "RDI Coordinate Transformation Manual" (July 1998)
forward <- res@data$v[, , 2] # p11 "RDI Coordinate Transformation Manual" (July 1998)
mast <- res@data$v[, , 3] # p11 "RDI Coordinate Transformation Manual" (July 1998)
if (haveBv) {
## bottom velocity
starboardBv <- res@data$bv[, 1]
forwardBv <- res@data$bv[, 2]
mastBv <- res@data$bv[, 3]
}
} else if (oceCoordinate == "sfm" & res@metadata$tiltUsed) {
oceDebug(debug, "Case 2: RDI ADCP in SFM coordinates, but with tilts already applied.\n")
oceDebug(debug, " No coordinate changes required prior to ENU.\n")
starboard <- res@data$v[, , 1] # p11 "RDI Coordinate Transformation Manual" (July 1998)
forward <- res@data$v[, , 2] # p11 "RDI Coordinate Transformation Manual" (July 1998)
mast <- res@data$v[, , 3] # p11 "RDI Coordinate Transformation Manual" (July 1998)
pitch <- rep(0, length(heading))
roll <- rep(0, length(heading))
if (haveBv) {
## bottom velocity
starboardBv <- res@data$bv[, 1]
forwardBv <- res@data$bv[, 2]
mastBv <- res@data$bv[, 3]
}
} else if (orientation == "upward") {
oceDebug(debug, "Case 3: RDI ADCP in XYZ coordinates with upward-pointing sensor.\n")
oceDebug(debug, " Using S=-X, F=Y, and M=-Z.\n")
## As an alternative to the next three lines, could just add 180 degrees to roll
starboard <- -res@data$v[, , 1] # p11 "RDI Coordinate Transformation Manual" (July 1998)
forward <- res@data$v[, , 2] # p11 "RDI Coordinate Transformation Manual" (July 1998)
mast <- -res@data$v[, , 3] # p11 "RDI Coordinate Transformation Manual" (July 1998)
if (haveBv) {
## bottom velocity
starboardBv <- -res@data$bv[, 1]
forwardBv <- res@data$bv[, 2]
mastBv <- -res@data$bv[, 3]
}
oceDebug(debug, " defined starboard, etc\n")
} else if (orientation == "downward") {
oceDebug(debug, "Case 4: RDI ADCP in XYZ coordinates with downward-pointing sensor.\n")
oceDebug(debug, " Using roll=-roll, S=X, F=Y, and M=Z.\n")
roll <- -roll
starboard <- res@data$v[, , 1] # p11 "RDI Coordinate Transformation Manual" (July 1998)
forward <- res@data$v[, , 2] # p11 "RDI Coordinate Transformation Manual" (July 1998)
mast <- res@data$v[, , 3] # p11 "RDI Coordinate Transformation Manual" (July 1998)
if (haveBv) {
## bottom velocity
starboardBv <- res@data$bv[, 1]
forwardBv <- res@data$bv[, 2]
mastBv <- res@data$bv[, 3]
}
} else {
stop("need orientation='upward' or 'downward', not '", orientation, "'")
}
} else if (1 == length(agrep("nortek", manufacturer))) {
V <- x[["v"]]
if (orientation == "upward") {
## h/p/r and s/f/m from Clark Richards pers. comm. 2011-03-14
oceDebug(debug, "Case 3: Nortek ADP with upward-pointing sensor.\n")
oceDebug(debug, " Using heading=heading-90, pitch=roll, roll=-pitch, S=X, F=Y, and M=Z.\n")
heading <- heading - 90
tmp <- pitch
pitch <- roll
roll <- -tmp
starboard <- V[, , 1]
forward <- V[, , 2]
mast <- V[, , 3]
if (!isAD2CP && haveBv) {
## bottom velocity
starboardBv <- res@data$bv[, 1]
forwardBv <- res@data$bv[, 2]
mastBv <- res@data$bv[, 3]
}
} else if (orientation == "downward") {
oceDebug(debug, "Case 4: Nortek ADP with downward-pointing sensor.\n")
oceDebug(debug, " Using heading=heading-90, pitch=roll, roll=-pitch, S=X, F=-Y, and M=-Z.\n")
heading <- heading - 90
tmp <- pitch
pitch <- roll
roll <- -tmp
starboard <- V[, , 1]
forward <- -V[, , 2]
mast <- -V[, , 3]
if (!isAD2CP && haveBv) {
## bottom velocity
starboardBv <- res@data$bv[, 1]
forwardBv <- -res@data$bv[, 2]
mastBv <- res@data$bv[, 3]
}
} else {
stop("need orientation='upward' or 'downward', not '", orientation, "'")
}
} else if (1 == length(agrep("sontek", manufacturer))) {
## "sontek"
if (orientation == "upward") {
oceDebug(debug, "Case 5: Sontek ADP with upward-pointing sensor.\n")
oceDebug(debug, " Using heading=heading-90, pitch=-pitch, roll=-roll, S=X, F=Y, and M=Z.\n")
heading <- heading - 90
pitch <- -pitch
roll <- -roll
starboard <- res@data$v[, , 1]
forward <- res@data$v[, , 2]
mast <- res@data$v[, , 3]
if (haveBv) {
## bottom velocity
starboardBv <- res@data$bv[, 1]
forwardBv <- res@data$bv[, 2]
mastBv <- res@data$bv[, 3]
}
} else if (orientation == "downward") {
oceDebug(debug, "Case 6: Sontek ADP with downward-pointing sensor.\n")
oceDebug(debug, " Using heading=heading-90, pitch=-pitch, roll=-roll, S=X, F=Y, and M=Z.\n")
heading <- heading - 90
pitch <- -pitch
roll <- -roll
starboard <- res@data$v[, , 1]
forward <- res@data$v[, , 2]
mast <- res@data$v[, , 3]
if (haveBv) {
## bottom velocity
starboardBv <- res@data$bv[, 1]
forwardBv <- res@data$bv[, 2]
mastBv <- res@data$bv[, 3]
}
} else {
stop("need orientation='upward' or 'downward', not '", orientation, "'")
}
} else {
stop("unrecognized manufacturer; should be 'teledyne rdi', 'sontek', or 'nortek', but is '",
manufacturer, "'")
}
oceDebug(debug, vectorShow(heading, "heading (after adjustment)"))
oceDebug(debug, vectorShow(pitch, "pitch (after adjustment)"))
oceDebug(debug, vectorShow(roll, "roll (after adjustment)"))
nc <- dim(x@data$v)[2] # numberOfCells
np <- dim(x@data$v)[1] # number of profiles
if (length(heading) < np)
heading <- rep(heading, length.out=np)
if (length(pitch) < np)
pitch <- rep(pitch, length.out=np)
if (length(roll) < np)
roll <- rep(roll, length.out=np)
## ADP and ADV calculations are both handled by sfm_enu for non-AD2CP.
for (c in 1:nc) {
enu <- do_sfm_enu(heading + declination, pitch, roll, starboard[, c], forward[, c], mast[, c])
res@data$v[, c, 1] <- enu$east
res@data$v[, c, 2] <- enu$north
res@data$v[, c, 3] <- enu$up
}
if (haveBv) {
enu <- do_sfm_enu(heading + declination, pitch, roll, starboardBv, forwardBv, mastBv)
res@data$bv[, 1] <- enu$east
res@data$bv[, 2] <- enu$north
res@data$bv[, 3] <- enu$up
}
res@metadata$oceCoordinate <- "enu"
res@processingLog <- processingLogAppend(res@processingLog,
paste("xyzToEnuAdp(x", ", declination=", declination, ", debug=", debug, ")", sep=""))
oceDebug(debug, "} # xyzToEnuAdp()\n", unindent=1)
res
}
#' Convert ADP2CP adp object From XYZ to ENU Coordinates
#'
#' **This function will b in active development through the early
#' months of 2019, and both the methodology and user interface may change
#' without notice. Only developers (or invitees) should be trying to
#' use this function.**
#'
#' @param x an [adp-class] object created by [read.adp.ad2cp()].
#'
#' @param declination IGNORED at present, but will be used at some later time.
#' @template debugTemplate
#'
#' @return An object with `data$v[,,1:3]` altered appropriately, and
#' `x[["oceCoordinate"]]` changed from `xyz` to `enu`.
#'
#' @author Dan Kelley
#'
#' @section Limitations:
#' This only works if the instrument orientation is `"AHRS"`, and even
#' that is not tested yet. Plus, as noted, the declination is ignored.
#'
#' @references
#' 1. Nortek AS. \dQuote{Signature Integration 55|250|500|1000kHz.} Nortek AS, 2017.
#'
#' 2. Nortek AS. \dQuote{Signature Integration 55|250|500|1000kHz.} Nortek AS, 2018.
#' https://www.nortekgroup.com/assets/software/N3015-007-Integrators-Guide-AD2CP_1018.pdf.
#'
#' @family things related to adp data
xyzToEnuAdpAD2CP <- function(x, declination=0, debug=getOption("oceDebug"))
{
debug <- if (debug > 0) 1 else 0
oceDebug(debug, "xyzToEnuAdpAD2CP(x, declination=", declination, ", debug=", debug, ") {\n", sep="", unindent=1)
if (!inherits(x, "adp"))
stop("method is only for objects of class '", "adp", "'")
if (!is.ad2cp(x))
stop("this function only works for adp objects created by read.adp.ad2cp()")
if (0 != declination)
stop("nonzero declination is not handled yet; please contact the author if you ned this") # FIXME
res <- x
## FIXME: deal with other ad2cp orientations. Can (should) we use a methodology
## similar to the non-ad2cp, for non-AHRS cases?
## FIXME: do a loop like this for beamToXyzAdpAD2CP() also.
for (item in names(x@data)) {
## Do not try to rotate non-rotatable items, e.g. the vertical beam, the altimeter, etc.
if (is.list(x@data[[item]])) {
numberOfBeams <- x@data[[item]]$numberOfBeams
##. message(" numberOfBeams=", numberOfBeams)
if (!is.null(numberOfBeams) && numberOfBeams == 4) {
orientation <- x@data[[item]]$orientation
if (is.null(orientation))
stop("no known orientation for '", item, "' in the object data slot")
## FIXME: handle 'xup', 'xdown', 'yup', 'ydown', 'zup', 'zdown'
if (orientation[1] != "AHRS")
stop("only the 'AHRS' orientation is handled, but '", item, "' has orientation '", orientation[1], "'")
AHRS <- x@data[[item]]$AHRS
if (is.null(AHRS))
stop("'", item, "' within the object data slot does not contain coordinate-change matrix 'AHRS'")
oceCoordinate <- x@data[[item]]$oceCoordinate
if (is.null(oceCoordinate))
stop("'", item, "' within the object data slot has no 'oceCoordinate'")
## If the item is already in 'enu', we just leave it alone
##. message("oceCoordinate: '", oceCoordinate, "'")
if (oceCoordinate == "xyz") {
V <- x@data[[item]]$v
if (is.null(V))
stop("'", item, "' within the object data slot does not contain velocity 'v'")
nc <- dim(V)[2]
## DEVELOPER NOTE
##
## I thought it might be faster to use C++ for the calculations, since the memory pressure ought to
## be a bit smaller (because there is no need to rep() the AHRS values across cells). However, I
## tried a test, but the results, below, suggest the R method is much faster. Also, it will be
## easier for others to modify, I think, so we will use it.
##
## Speed test with 292M file:
##
## C++ method
## user system elapsed
## 2.553 0.952 3.511
##> message("C++ method")
##> for (cell in 1:nc) {
##> res@data[[item]]$v[, cell, 1:3] <- do_ad2cp_ahrs(V[, cell, 1:3], AHRS)
##
## R method
## user system elapsed
## 0.400 0.139 0.540
##
##> message("R method")
e <- V[,,1]*rep(AHRS[,1], times=nc) + V[,,2]*rep(AHRS[,2], times=nc) + V[,,3]*rep(AHRS[,3], times=nc)
n <- V[,,1]*rep(AHRS[,4], times=nc) + V[,,2]*rep(AHRS[,5], times=nc) + V[,,3]*rep(AHRS[,6], times=nc)
u <- V[,,1]*rep(AHRS[,7], times=nc) + V[,,2]*rep(AHRS[,8], times=nc) + V[,,3]*rep(AHRS[,9], times=nc)
## FIXME: perhaps use the declination now, rotating e and n. But first, we will need to know
## what declination was used by the instrument, in its creation of AHRS.
res@data[[item]]$v[,,1] <- e
res@data[[item]]$v[,,2] <- n
res@data[[item]]$v[,,3] <- u
res@data[[item]]$oceCoordinate <- "enu"
} else if (oceCoordinate == "beam") {
stop("cannot convert from beam to Enu coordinates; use beamToXyz() first")
}
}
}
}
res@processingLog <- processingLogAppend(res@processingLog,
paste("xyzToEnuAdpAD2CP(x",
", declination=", declination,
", debug=", debug, ")", sep=""))
oceDebug(debug, "} # xyzToEnuAdpAD2CP()\n", unindent=1)
res
}
#' Convert ADP ENU to Rotated Coordinate
#'
#' Convert ADP velocity components from an enu-based coordinate system to
#' another system, perhaps to align axes with the coastline.
#'
#' The supplied angles specify rotations to be made around the axes for which
#' heading, pitch, and roll are defined. For example, an eastward current will
#' point southeast if `heading=45` is used.
#'
#' The returned value has heading, pitch, and roll matching those of `x`,
#' so these angles retain their meaning as the instrument orientation.
#'
#' NOTE: this function works similarly to [xyzToEnuAdp()], except
#' that in the present function, it makes no difference whether the instrument
#' points up or down, etc.
#'
#' @param x an [adp-class] object.
#'
#' @param heading number or vector of numbers, giving the angle, in degrees, to
#' be added to the heading. See \dQuote{Details}.
#'
#' @param pitch as `heading` but for pitch.
#'
#' @param roll as `heading` but for roll.
#'
#' @return An object with `data$v[,1:3,]` altered appropriately, and
#' `metadata$oce.coordinate` changed from `enu` to `other`.
#'
#' @author Dan Kelley
#'
#' @seealso See [read.adp()] for other functions that relate to
#' objects of class `"adp"`.
#'
#' @references
#' 1. Teledyne RD Instruments. \dQuote{ADCP Coordinate Transformation: Formulas and
#' Calculations,} January 2010. P/N 951-6079-00.
#'
#' @examples
#'
#' library(oce)
#' data(adp)
#' o <- enuToOtherAdp(adp, heading=-31.5)
#' plot(o, which=1:3)
#'
#' @family things related to adp data
enuToOtherAdp <- function(x, heading=0, pitch=0, roll=0)
{
if (!inherits(x, "adp"))
stop("method is only for objects of class '", "adp", "'")
if (is.ad2cp(x))
stop("this function does not work yet for AD2CP data")
oceCoordinate <- x[["oceCoordinate"]]
if (oceCoordinate != "enu")
stop("input must be in enu coordinates, but it is in ", oceCoordinate, " coordinates")
res <- x
np <- dim(x[["v"]])[1] # number of profiles
if (length(heading) != np)
heading <- rep(heading, length.out=np)
if (length(pitch) != np)
pitch <- rep(pitch, length.out=np)
if (length(roll) != np)
roll <- rep(roll, length.out=np)
nc <- dim(x[["v"]])[2] # numberOfCells
for (c in 1:nc) {
other <- do_sfm_enu(heading, pitch, roll, x[["v"]][, c, 1], x[["v"]][, c, 2], x[["v"]][, c, 3])
res@data$v[, c, 1] <- other$east
res@data$v[, c, 2] <- other$north
res@data$v[, c, 3] <- other$up
}
if ("bv" %in% names(x@data)) {
other <- do_sfm_enu(heading, pitch, roll, x@data$bv[, 1], x@data$bv[, 2], x@data$bv[, 3])
res@data$bv[, 1] <- other$east
res@data$bv[, 2] <- other$north
res@data$bv[, 3] <- other$up
}
res@metadata$oceCoordinate <- "other"
res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
res
}
peek.ahead <- function(file, bytes=2, debug=!TRUE)
{
pos <- seek(file)
res <- readBin(file, "raw", n=bytes, size=1)
oceDebug(debug, "peeked at", paste("0x", paste(res, sep=" "), sep=""), "\n")
seek(file, pos)
res
}
display.bytes <- function(b, label="", ...)
{
n <- length(b)
cat("\n", label, " (", n, "bytes)\n", sep="", ...)
print(b, ...)
}
#' Subtract Bottom Velocity from ADP
#'
#' Subtracts bottom tracking velocities from an `"adp"` object. Works for
#' all coordinate systems (`beam`, `xyz`, and `enu`).
#'
#' @param x an [adp-class] object that contains bottom-tracking velocities.
#'
#' @template debugTemplate
#'
#' @author Dan Kelley and Clark Richards
#'
#' @seealso See [read.adp()] for notes on functions relating to
#' `"adp"` objects, and [adp-class] for notes on the ADP
#' object class.
#'
#' @family things related to adp data
subtractBottomVelocity <- function(x, debug=getOption("oceDebug"))
{
oceDebug(debug, "subtractBottomVelocity(x) {\n", unindent=1)
if (!("bv" %in% names(x@data))) {
warning("there is no bottom velocity in this object")
return(x)
}
res <- x
numberOfBeams <- dim(x[["v"]])[3] # could also get from metadata but this is less brittle
for (beam in 1:numberOfBeams) {
oceDebug(debug, "beam #", beam, "\n")
res@data$v[, , beam] <- x[["v"]][, , beam] - x@data$bv[, beam]
}
oceDebug(debug, "} # subtractBottomVelocity()\n", unindent=1)
res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
res
}
#' Bin-map an ADP object
#'
#' Bin-map an ADP object, by interpolating velocities, backscatter amplitudes,
#' etc., to uniform depth bins, thus compensating for the pitch and roll of the
#' instrument. This only makes sense for ADP objects that are in beam
#' coordinates.
#'
#' @param x an [adp-class] object.
#'
#' @template debugTemplate
#'
#' @return An [adp-class] object.
#'
#' @section Bugs: This only works for 4-beam RDI ADP objects.
#'
#' @author Dan Kelley and Clark Richards
#'
#' @seealso See [adp-class] for a discussion of `adp` objects
#' and notes on the many functions dealing with them.
#'
#' @references The method was devised by Clark Richards for use in his PhD work
#' at Department of Oceanography at Dalhousie University.
#'
#' @examples
#'\dontrun{
#' library(oce)
#' beam <- read.oce("/data/archive/sleiwex/2008/moorings/m09/adp/rdi_2615/raw/adp_rdi_2615.000",
#' from=as.POSIXct("2008-06-26", tz="UTC"),
#' to=as.POSIXct("2008-06-26 00:10:00", tz="UTC"),
#' longitude=-69.73433, latitude=47.88126)
#' beam2 <- binmapAdp(beam)
#' plot(enuToOther(toEnu(beam), heading=-31.5))
#' plot(enuToOther(toEnu(beam2), heading=-31.5))
#' plot(beam, which=5:8) # backscatter amplitude
#' plot(beam2, which=5:8)
#'}
#'
#' @family things related to adp data
binmapAdp <- function(x, debug=getOption("oceDebug"))
{
oceDebug(debug, "binmap(x, debug) {\n", unindent=1)
if (!inherits(x, "adp"))
stop("x must be an \"adp\" object")
v <- x[["v"]]
a <- x[["a"]] ## FIXME: should ensure that this exist
q <- x[["q"]]
g <- x[["g"]]
if (4 != dim(v)[3])
stop("binmap() only works for 4-beam instruments")
theta <- x[['beamAngle']] # FIXME: check that not missing or weird
distance <- x[["distance"]]
roll <- x[["roll"]]
pitch <- x[["pitch"]]
## Below, we loop through the profiles. I tried an experiment in
## vectorizing across the loop, by combining into a single vector
## for (distance, cr, ...), but it was no faster, and the code was
## more complicated to read.
vbm <- array(double(), dim=dim(v))
abm <- array(raw(), dim=dim(v))
qbm <- array(raw(), dim=dim(v))
gbm <- array(raw(), dim=dim(v))
nprofile <- dim(v)[1]
res <- x
for (profile in 1:nprofile) {
r <- roll[profile]
p <- pitch[profile]
cr <- cos(r * pi / 180)
sr <- sin(r * pi / 180)
cp <- cos(p * pi / 180)
sp <- sin(p * pi / 180)
tt <- tan(theta * pi / 180)
z1 <- distance * (cr - tt * sr) * cp
##if (profile == 1) {
## cat('R : r', r, 'p', p, 'cr', cr, 'sr', sr, 'cp', cp, 'sp', sp, 'tt', tt, '\n')
## cat("R : z1 ", format(z1[1:8], width=11, digits=7), '\n')
##}
z2 <- distance * (cr + tt * sr) * cp
z3 <- distance * (cp + tt * sp) * cr
z4 <- distance * (cp - tt * sp) * cr
## FIXME: check on whether we can speed things up by using e.g. x[["v"]]
## instead of v, which would lower the memory requirements.
## v=velocity
## Need to check all four beams that there are more than 2
## non-NA values in the profiles, otherwise set to 0
checkNA <- sum(!is.na(v[profile, , 1])) > 1 & sum(!is.na(v[profile, , 2])) > 1 & sum(!is.na(v[profile, , 3])) > 1 & sum(!is.na(v[profile, , 4])) > 1
if (checkNA) {
vbm[profile, , 1] <- approx(z1, v[profile, , 1], distance)$y
vbm[profile, , 2] <- approx(z2, v[profile, , 2], distance)$y
vbm[profile, , 3] <- approx(z3, v[profile, , 3], distance)$y
vbm[profile, , 4] <- approx(z4, v[profile, , 4], distance)$y
} else {
vbm[profile, , 1] <- NA
vbm[profile, , 2] <- NA
vbm[profile, , 3] <- NA
vbm[profile, , 4] <- NA
}
## a
rule <- 2 # FIXME: is is OK to extend data to edges?
abm[profile, , 1] <- oce.as.raw(approx(z1, as.numeric(a[profile, , 1], rule=rule), distance)$y)
abm[profile, , 2] <- oce.as.raw(approx(z2, as.numeric(a[profile, , 2], rule=rule), distance)$y)
abm[profile, , 3] <- oce.as.raw(approx(z3, as.numeric(a[profile, , 3], rule=rule), distance)$y)
abm[profile, , 4] <- oce.as.raw(approx(z4, as.numeric(a[profile, , 4], rule=rule), distance)$y)
## q
qbm[profile, , 1] <- oce.as.raw(approx(z1, as.numeric(q[profile, , 1], rule=rule), distance)$y)
qbm[profile, , 2] <- oce.as.raw(approx(z2, as.numeric(q[profile, , 2], rule=rule), distance)$y)
qbm[profile, , 3] <- oce.as.raw(approx(z3, as.numeric(q[profile, , 3], rule=rule), distance)$y)
qbm[profile, , 4] <- oce.as.raw(approx(z4, as.numeric(q[profile, , 4], rule=rule), distance)$y)
## g
gbm[profile, , 1] <- oce.as.raw(approx(z1, as.numeric(g[profile, , 1], rule=rule), distance)$y)
gbm[profile, , 2] <- oce.as.raw(approx(z2, as.numeric(g[profile, , 2], rule=rule), distance)$y)
gbm[profile, , 3] <- oce.as.raw(approx(z3, as.numeric(g[profile, , 3], rule=rule), distance)$y)
gbm[profile, , 4] <- oce.as.raw(approx(z4, as.numeric(g[profile, , 4], rule=rule), distance)$y)
}
res@data$v <- vbm
##cat("R : v1 ", format(v[1,1:8,1], width=11, digits=7), '\n')
##cat("R : V1 ", format(vbm[1,1:8,1], width=11, digits=7), '\n')
res@data$a <- abm
res@data$q <- qbm
res@data$g <- gbm
res
}
#' Ensemble Average an ADP Object in Time
#'
#' Ensemble averaging of `adp` objects is often necessary to
#' reduce the uncertainty in velocity estimates from single
#' pings. Many types of ADPs can be configured to perform the
#' ensemble averaging during the data collection, due to memory
#' limitations for long deployments. In cases where the instrument is
#' not memory limited, it may be desirable to perform the ensemble
#' averaging during post-processing, thereby reducing the overall
#' size of the data set and decreasing the uncertainty of the
#' velocity estimates (by averaging out Doppler noise).
#'
#' @param x an [adp-class] object.
#'
#' @param n number of pings to average together.
#'
#' @param leftover a logical value indicating how to proceed in cases
#' where `n` does not divide evenly into the number of ensembles
#' in `x`. If `leftover` is `FALSE` (the default) then any extra
#' ensembles at the end of `x` are ignored. Otherwise, they are used
#' to create a final ensemble in the returned value.
#'
#' @param na.rm a logical value indicating whether NA values should be stripped
#' before the computation proceeds
#'
#' @param ... extra arguments to be passed to the `mean()` function.
#'
#' @return A new [adp-class] object with ensembles averaged as specified. E.g. for an `adp` object with 100 pings and `n=5` the number of rows of the data arrays will be reduced by a factor of 5.
#'
#' @author Clark Richards and Dan Kelley
#'
#' @examples
#' library(oce)
#' data(adp)
#' adpAvg <- adpEnsembleAverage(adp, n=2)
#' plot(adpAvg)
#'
#' @family things related to adp data
adpEnsembleAverage <- function(x, n=5, leftover=FALSE, na.rm=TRUE, ...)
{
if (!inherits(x, 'adp')) stop('Must be an object of class adp')
res <- new('adp', distance=x[['distance']])
res@metadata <- x@metadata
d <- x@data
t <- as.POSIXct(d$time) # ensure POSIXct so next line works right
ntx <- length(t)
pings <- seq_along(t)
## Note the limits of the breaks, below. We start at 0 to catch the first
## pings value. If leftover is TRUE, we also extend at the right, to catch
## the fractional chunk that will exist at the end, if n does not divide into ntx.
breaks <- if (leftover) seq(0, ntx+n, n) else seq(0, ntx, n)
fac <- cut(pings, breaks=breaks, labels=FALSE) # used to split() data items
##res@data$time <- numberAsPOSIXct(binAverage(pings, t, xinc=n)$y)
res@data$time <- numberAsPOSIXct(as.numeric(lapply(split(as.numeric(t), fac), mean, na.rm=na.rm, ...)))
for (field in names(d)) {
if (field != 'time' & field != 'distance') {
if (is.vector(d[[field]])) {
##res@data[[field]] <- binAverage(pings, d[[field]], xinc=n)$y
res@data[[field]] <- as.numeric(lapply(split(as.numeric(d[[field]]), fac), mean, na.rm=na.rm, ...))
} else if (is.array(d[[field]])) {
fdim <- dim(d[[field]])
res@data[[field]] <- array(NA, dim=c(length(res@data[['time']]), fdim[-1]))
for (j in 1:tail(fdim, 1)) {
if (length(fdim) == 2) { # for fields like bottom range
##res@data[[field]][, j] <- binAverage(pings, d[[field]][, j], xinc=n)$y
res@data[[field]][, j] <- unlist(lapply(split(as.numeric(d[[field]][, j]), fac), mean, na.rm=na.rm, ...))
} else if (length(fdim) == 3) { # for array fields like v, a, q, etc
for (i in 1:fdim[2]) {
##res@data[[field]][, i, j] <- binAverage(pings, d[[field]][, i, j], xinc=n)$y
res@data[[field]][, i, j] <- unlist(lapply(split(as.numeric(d[[field]][, i, j]), fac), mean, na.rm=na.rm, ...))
}
}
}
if (is.raw(d[[field]])) {
dims <- dim(res@data[[field]])
res@data[[field]] <- array(as.raw(res@data[[field]]), dim=dims)
}
}
}
}
res@metadata$numberOfSamples <- length(res@data$time) # FIXME: handle AD2CP
res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
res
}
|
/issuestests/oce/R/adp.R
|
no_license
|
akhikolla/RcppDeepStateTest
|
R
| false
| false
| 210,382
|
r
|
## vim: tw=120 shiftwidth=4 softtabstop=4 expandtab:
#' Class to Store adp (ADCP) Data
#'
#' This class stores data from acoustic Doppler profilers. Some manufacturers
#' call these ADCPs, while others call them ADPs; here the shorter form is
#' used by analogy to ADVs.
#'
#' @templateVar class adp
#'
#' @templateVar dataExample The key items stored in this slot include `time`, `distance`, and `v`, along with angles `heading`, `pitch` and `roll`.
#'
#' @templateVar metadataExample Examples that are of common interest include `oceCoordinate`, `orientation`, `frequency`, and `beamAngle`.
#'
#' @template slot_summary
#'
#' @template slot_put
#'
#' @template slot_get
#'
#' @section Reading/creating `adp` objects:
#'
#' The `metadata` slot contains various
#' items relating to the dataset, including source file name, sampling rate,
#' velocity resolution, velocity maximum value, and so on. Some of these are
#' particular to particular instrument types, and prudent researchers will take
#' a moment to examine the whole contents of the metadata, either in summary
#' form (with `str(adp[["metadata"]])`) or in detail (with
#' `adp[["metadata"]]`). Perhaps the most useful general properties are
#' `adp[["bin1Distance"]]` (the distance, in metres, from the sensor to
#' the bottom of the first bin), `adp[["cellSize"]]` (the cell height, in
#' metres, in the vertical direction, *not* along the beam), and
#' `adp[["beamAngle"]]` (the angle, in degrees, between beams and an
#' imaginary centre line that bisects all beam pairs).
#'
#' The diagram provided below indicates the coordinate-axis and beam-numbering
#' conventions for three- and four-beam ADP devices, viewed as though the
#' reader were looking towards the beams being emitted from the transducers.
#'
#' \if{html}{\figure{adp_beams.png}{options: width=400px alt="Figure: adp_beams.png"}}
#'
#' The bin geometry of a four-beam profiler is illustrated below, for
#' `adp[["beamAngle"]]` equal to 20 degrees, `adp[["bin1Distance"]]`
#' equal to 2m, and `adp[["cellSize"]]` equal to 1m. In the diagram, the
#' viewer is in the plane containing two beams that are not shown, so the two
#' visible beams are separated by 40 degrees. Circles indicate the centres of
#' the range-gated bins within the beams. The lines enclosing those circles
#' indicate the coverage of beams that spread plus and minus 2.5 degrees from
#' their centreline.
#'
#' \if{html}{\figure{adpgeometry2.png}{options: width=400px alt="Figure: adpgeometry2.png"}}
#'
#' Note that `adp[["oceCoordinate"]]` stores the present coordinate system
#' of the object, and it has possible values `"beam"`, `"xyz"`, `"sfm"` or
#' `"enu"`. (This should not be confused with
#' `adp[["originalCoordinate"]]`, which stores the coordinate system used
#' in the original data file.)
#'
#' The `data` slot holds some standardized items, and
#' many that vary from instrument to instrument. One standard item is
#' `adp[["v"]]`, a three-dimensional numeric array of velocities in
#' m/s. In this matrix, the first index indicates time, the second bin
#' number, and the third beam number. The meaning of beams number depends on
#' whether the object is in beam coordinates, frame coordinates, or earth
#' coordinates. For example, if in earth coordinates, then beam 1 is
#' the eastward component of velocity.
#' Thus, for example,
#' \preformatted{
#' library(oce)
#' data(adp)
#' t <- adp[['time']]
#' d <- adp[['distance']]
#' eastward <- adp[['v']][,,1]
#' imagep(t, d, eastward, missingColor="gray")
#' }
#' plots an image of the eastward component of velocity as a function of time (the x axis)
#' and distance from sensor (y axis), since the `adp` dataset is
#' in earth coordinates. Note the semidurnal tidal signal, and the pattern of missing
#' data at the ocean surface (gray blotches at the top).
#'
#' Corresponding to the velocity array are two arrays of type raw, and
#' identical dimension, accessed by `adp[["a"]]` and `adp[["q"]]`,
#' holding measures of signal strength and data quality quality,
#' respectively. (The exact meanings of these depend on the particular type
#' of instrument, and it is assumed that users will be familiar enough with
#' instruments to know both the meanings and their practical consequences in
#' terms of data-quality assessment, etc.)
#'
#' In addition to the arrays, there are time-based vectors. The vector
#' `adp[["time"]]` (of length equal to the first index of
#' `adp[["v"]]`, etc.) holds times of observation. Depending on type of
#' instrument and its configuration, there may also be corresponding vectors
#' for sound speed (`adp[["soundSpeed"]]`), pressure
#' (`adp[["pressure"]]`), temperature (`adp[["temperature"]]`),
#' heading (`adp[["heading"]]`) pitch (`adp[["pitch"]]`), and roll
#' (`adp[["roll"]]`), depending on the setup of the instrument.
#'
#' The precise meanings of the data items depend on the instrument type. All
#' instruments have `v` (for velocity), `q` (for a measure of data
#' quality) and `a` (for a measure of backscatter amplitude, also called
#' echo intensity).
#' Teledyne-RDI profilers have an additional item `g` (for
#' percent-good).
#'
#' VmDas-equipped Teledyne-RDI profilers additional navigation data, with
#' details listed in the table below; note that the RDI documentation (reference 2) and
#' the RDI gui use inconsistent names for most items.
#'
#' \tabular{lll}{
#' **Oce name**\tab **RDI doc name**\tab **RDI GUI name**\cr
#' `avgSpeed`\tab Avg Speed\tab Speed/Avg/Mag\cr
#' `avgMagnitudeVelocityEast`\tab Avg Mag Vel East\tab ?\cr
#' `avgMagnitudeVelocityNorth`\tab Avg Mag Vel North\tab ?\cr
#' `avgTrackMagnetic`\tab Avg Track Magnetic\tab Speed/Avg/Dir (?)\cr
#' `avgTrackTrue`\tab Avg Track True\tab Speed/Avg/Dir (?)\cr
#' `avgTrueVelocityEast`\tab Avg True Vel East\tab ?\cr
#' `avgTrueVelocityNorth`\tab Avg True Vel North\tab ?\cr
#' `directionMadeGood`\tab Direction Made Good\tab Speed/Made Good/Dir\cr
#' `firstLatitude`\tab First latitude\tab Start Lat\cr
#' `firstLongitude`\tab First longitude\tab Start Lon\cr
#' `firstTime`\tab UTC Time of last fix\tab End Time\cr
#' `lastLatitude`\tab Last latitude\tab End Lat\cr
#' `lastLongitude`\tab Last longitude\tab End Lon\cr
#' `lastTime`\tab UTC Time of last fix\tab End Time\cr
#' `numberOfHeadingSamplesAveraged`\tab Number heading samples averaged\tab ?\cr
#' `numberOfMagneticTrackSamplesAveraged`\tab Number of magnetic track samples averaged\tab ? \cr
#' `numberOfPitchRollSamplesAvg`\tab Number of magnetic track samples averaged\tab ? \cr
#' `numberOfSpeedSamplesAveraged`\tab Number of speed samples averaged\tab ? \cr
#' `numberOfTrueTrackSamplesAvg`\tab Number of true track samples averaged\tab ? \cr
#' `primaryFlags`\tab Primary Flags\tab ?\cr
#' `shipHeading`\tab Heading\tab ?\cr
#' `shipPitch`\tab Pitch\tab ?\cr
#' `shipRoll`\tab Roll\tab ?\cr
#' `speedMadeGood`\tab Speed Made Good\tab Speed/Made Good/Mag\cr
#' `speedMadeGoodEast`\tab Speed MG East\tab ?\cr
#' `speedMadeGoodNorth`\tab Speed MG North\tab ?\cr
#' }
#'
#' For Teledyne-RDI profilers, there are four three-dimensional arrays
#' holding beamwise data. In these, the first index indicates time, the
#' second bin number, and the third beam number (or coordinate number, for
#' data in `xyz`, `sfm`, `enu` or `other` coordinate systems). In
#' the list below, the quoted phrases are quantities as defined in Figure 9
#' of reference 1.
#'
#' * `v` is ``velocity'' in m/s, inferred from two-byte signed
#' integer values (multiplied by the scale factor that is stored in
#' `velocityScale` in the metadata).
#'
#' * `q` is ``correlation magnitude'' a one-byte quantity stored
#' as type `raw` in the object. The values may range from 0 to 255.
#'
#' * `a` is ``backscatter amplitude``, also known as ``echo
#' intensity'' a one-byte quantity stored as type `raw` in the object.
#' The values may range from 0 to 255.
#'
#' * `g` is ``percent good'' a one-byte quantity stored as `raw`
#' in the object. The values may range from 0 to 100.
#'
#' Finally, there is a vector `adp[["distance"]]` that indicates the bin
#' distances from the sensor, measured in metres along an imaginary centre
#' line bisecting beam pairs. The length of this vector equals
#' `dim(adp[["v"]])[2]`.
#'
#' @section Teledyne-RDI Sentinel V ADCPs: As of 2016-09-27 there is
#' provisional support for the TRDI "SentinelV" ADCPs, which are 5
#' beam ADCPs with a vertical centre beam. Relevant vertical beam
#' fields are called `adp[["vv"]]`, `adp[["va"]]`,
#' `adp[["vq"]]`, and `adp[["vg"]]` in analogy with the
#' standard 4-beam fields.
#'
#' @section Accessing and altering information within [adp-class] objects:
#' *Extracting values* Matrix data may be accessed as illustrated
#' above, e.g. or an adp object named `adv`, the data are provided by
#' `adp[["v"]]`, `adp[["a"]]`, and `adp[["q"]]`. As a
#' convenience, the last two of these can be accessed as numeric (as opposed to
#' raw) values by e.g. `adp[["a", "numeric"]]`. The vectors are accessed
#' in a similar way, e.g. `adp[["heading"]]`, etc. Quantities in the
#' `metadata` slot are also available by name, e.g.
#' `adp[["velocityResolution"]]`, etc.
#'
#' *Assigning values.* This follows the standard form, e.g. to increase
#' all velocity data by 1 cm/s, use `adp[["v"]] <- 0.01 + adp[["v"]]`.
#'
#' *Overview of contents* The `show` method (e.g.
#' `show(d)`) displays information about an ADP object named `d`.
#'
#' @section Dealing with suspect data:
#' There are many possibilities for confusion
#' with `adp` devices, owing partly to the flexibility that manufacturers
#' provide in the setup. Prudent users will undertake many tests before trusting
#' the details of the data. Are mean currents in the expected direction, and of
#' the expected magnitude, based on other observations or physical constraints?
#' Is the phasing of currents as expected? If the signals are suspect, could an
#' incorrect scale account for it? Could the transformation matrix be incorrect?
#' Might the data have exceeded the maximum value, and then ``wrapped around'' to
#' smaller values? Time spent on building confidence in data quality is seldom
#' time wasted.
#'
#' @section References:
#' 1. Teledyne-RDI, 2007.
#' *WorkHorse commands and output data format.*
#' P/N 957-6156-00 (November 2007).
#'
#' 2. Teledyne-RDI, 2012. *VmDas User's Guide, Ver. 1.46.5*.
#'
#' @seealso
#' A file containing ADP data is usually recognized by Oce, and so
#' [read.oce()] will usually read the data. If not, one may use the
#' general ADP function [read.adp()] or specialized variants
#' [read.adp.rdi()], [read.adp.nortek()],
#' [read.adp.ad2cp()],
#' [read.adp.sontek()] or [read.adp.sontek.serial()].
#'
#' ADP data may be plotted with [plot,adp-method()], which is a
#' generic function so it may be called simply as `plot`.
#'
#' Statistical summaries of ADP data are provided by the generic function
#' `summary`, while briefer overviews are provided with `show`.
#'
#' Conversion from beam to xyz coordinates may be done with
#' [beamToXyzAdp()], and from xyz to enu (east north up) may be done
#' with [xyzToEnuAdp()]. [toEnuAdp()] may be used to
#' transfer either beam or xyz to enu. Enu may be converted to other coordinates
#' (e.g. aligned with a coastline) with [enuToOtherAdp()].
#'
#' @family classes provided by oce
#' @family things related to adp data
setClass("adp", contains="oce")
#' ADP (acoustic-doppler profiler) dataset
#'
#' This is degraded subsample of measurements that were made with an
#' upward-pointing ADP manufactured by Teledyne-RDI, as part of the St Lawrence
#' Internal Wave Experiment (SLEIWEX).
#'
#' @name adp
#'
#' @docType data
#'
#' @usage data(adp)
#'
#' @examples
#'\donttest{
#' library(oce)
#' data(adp)
#'
#' # Velocity components. (Note: we should probably trim some bins at top.)
#' plot(adp)
#'
#' # Note that tides have moved the mooring.
#' plot(adp, which=15:18)
#'}
#'
#'
#' @source This file came from the SLEIWEX-2008 experiment.
#'
#' @family datasets provided with oce
#' @family things related to adp data
NULL
setMethod(f="initialize",
signature="adp",
definition=function(.Object, time, distance, v, a, q, oceCoordinate="enu", orientation="upward", ...) {
.Object <- callNextMethod(.Object, ...)
if (!missing(time)) .Object@data$time <- time
if (!missing(distance)) {
.Object@data$distance <- distance
.Object@metadata$cellSize <- tail(diff(distance), 1) # first one has blanking, perhaps
}
if (!missing(v)) {
.Object@data$v <- v
.Object@metadata$numberOfBeams <- dim(v)[3]
.Object@metadata$numberOfCells <- dim(v)[2]
}
if (!missing(a)) .Object@data$a <- a
if (!missing(q)) .Object@data$q <- q
.Object@metadata$units$v <- list(unit=expression(m/s), scale="")
.Object@metadata$units$distance <- list(unit=expression(m), scale="")
.Object@metadata$oceCoordinate <- oceCoordinate # FIXME: should check that it is allowed
.Object@metadata$orientation <- orientation # FIXME: should check that it is allowed
.Object@processingLog$time <- presentTime()
.Object@processingLog$value <- "create 'adp' object"
return(.Object)
})
## DEVELOPERS: please pattern functions and documentation on this, for uniformity.
## DEVELOPERS: You will need to change the docs, and the 3 spots in the code
## DEVELOPERS: marked '# DEVELOPER 1:', etc.
#' @title Handle Flags in adp Objects
#'
#' @details
#' If `flags` and `actions` are not provided, the
#' default is to consider a flag value of 1 to indicate bad data,
#' and 0 to indicate good data. Note that it only makes sense to use
#' velocity (`v`) flags, because other flags are, at least
#' for some instruments, stored as `raw` quantities, and such
#' quantities may not be set to `NA`.
#'
#' @param object an [adp-class] object.
#'
#' @template handleFlagsTemplate
#'
#' @examples
#' # Flag low "goodness" or high "error beam" values.
#' library(oce)
#' data(adp)
#' # Same as Example 2 of ?'setFlags,adp-method'
#' v <- adp[["v"]]
#' i2 <- array(FALSE, dim=dim(v))
#' g <- adp[["g", "numeric"]]
#' # Thresholds on percent "goodness" and error "velocity"
#' G <- 25
#' V4 <- 0.45
#' for (k in 1:3)
#' i2[,,k] <- ((g[,,k]+g[,,4]) < G) | (v[,,4] > V4)
#' adpQC <- initializeFlags(adp, "v", 2)
#' adpQC <- setFlags(adpQC, "v", i2, 3)
#' adpClean <- handleFlags(adpQC, flags=list(3), actions=list("NA"))
#' # Demonstrate (subtle) change graphically.
#' par(mfcol=c(2, 1))
#' plot(adp, which="u1")
#' plot(adpClean, which="u1")
#'
#' @family things related to adp data
setMethod("handleFlags", signature=c(object="adp", flags="ANY", actions="ANY", where="ANY", debug="ANY"),
definition=function(object, flags=NULL, actions=NULL, where=NULL, debug=getOption("oceDebug")) {
## DEVELOPER 1: alter the next comment to explain your setup
## Flag=1 means bad velocity; 0 means good
if (is.null(flags)) {
flags <- defaultFlags(object)
if (is.null(flags))
stop("must supply 'flags', or use initializeFlagScheme() on the adp object first")
}
if (is.null(actions)) {
actions <- list("NA") # DEVELOPER 3: alter this line to suit a new data class
names(actions) <- names(flags)
}
if (any(names(actions)!=names(flags)))
stop("names of flags and actions must match")
handleFlagsInternal(object=object, flags=flags, actions=actions, where=where, debug=debug)
})
#' @templateVar class adp
#' @templateVar details There are no agreed-upon flag schemes for adp data.
#' @template initializeFlagsTemplate
setMethod("initializeFlags",
c(object="adp", name="ANY", value="ANY", debug="ANY"),
function(object, name=NULL, value=NULL, debug=getOption("oceDebug")) {
oceDebug(debug, "setFlags,adp-method name=", name, ", value=", value, "\n")
if (is.null(name))
stop("must supply 'name'")
if (name != "v")
stop("the only flag that adp objects can handle is for \"v\"")
res <- initializeFlagsInternal(object, name, value, debug-1)
res
})
#' @templateVar class adp
#' @templateVar note The only flag that may be set is `v`, for the array holding velocity. See \dQuote{Indexing rules}, noting that adp data are stored in 3D arrays; Example 1 shows using a data frame for `i`, while Example 2 shows using an array.
#' @template setFlagsTemplate
#' @examples
#' library(oce)
#' data(adp)
#'
#' ## Example 1: flag first 10 samples in a mid-depth bin of beam 1
#' i1 <- data.frame(1:20, 40, 1)
#' adpQC <- initializeFlags(adp, "v", 2)
#' adpQC <- setFlags(adpQC, "v", i1, 3)
#' adpClean1 <- handleFlags(adpQC, flags=list(3), actions=list("NA"))
#' par(mfrow=c(2, 1))
#' ## Top: original, bottom: altered
#' plot(adp, which="u1")
#' plot(adpClean1, which="u1")
#'
#' ## Example 2: percent-good and error-beam scheme
#' v <- adp[["v"]]
#' i2 <- array(FALSE, dim=dim(v))
#' g <- adp[["g", "numeric"]]
#' # Thresholds on percent "goodness" and error "velocity"
#' G <- 25
#' V4 <- 0.45
#' for (k in 1:3)
#' i2[,,k] <- ((g[,,k]+g[,,4]) < G) | (v[,,4] > V4)
#' adpQC2 <- initializeFlags(adp, "v", 2)
#' adpQC2 <- setFlags(adpQC2, "v", i2, 3)
#' adpClean2 <- handleFlags(adpQC2, flags=list(3), actions=list("NA"))
#' ## Top: original, bottom: altered
#' plot(adp, which="u1")
#' plot(adpClean2, which="u1") # differs at 8h and 20h
#'
#' @family things related to adp data
setMethod("setFlags",
c(object="adp", name="ANY", i="ANY", value="ANY", debug="ANY"),
function(object, name=NULL, i=NULL, value=NULL, debug=getOption("oceDebug")) {
if (is.null(name))
stop("must specify 'name'")
if (name != "v")
stop("in adp objects, the only flag that can be set is for \"v\"")
setFlagsInternal(object, name, i, value, debug-1)
})
#' Summarize an ADP Object
#'
#' Summarize data in an `adp` object.
#'
#' Pertinent summary information is presented.
#'
#' @aliases summary.adp summary,adp,missing-method summary,adp-method
#'
#' @param object an object of class `"adp"`, usually, a result of a call
#' to [read.oce()], [read.adp.rdi()], or
#' [read.adp.nortek()].
#'
#' @param \dots further arguments passed to or from other methods.
#'
#' @return A matrix containing statistics of the elements of the `data`
#' slot.
#'
#' @author Dan Kelley
#'
#' @family things related to adp data
setMethod(f="summary",
signature="adp",
definition=function(object, ...) {
mnames <- names(object@metadata)
cat("ADP Summary\n-----------\n\n", ...)
if ("instrumentType" %in% mnames)
cat(paste("* Instrument: ", object@metadata$instrumentType, "\n", sep=""), ...)
if ("manufacturere" %in% mnames)
cat("* Manufacturer: ", object@metadata$manufacturer, "\n")
if ("serialNumber" %in% mnames)
cat(paste("* Serial number: ", object@metadata$serialNumber, "\n", sep=""), ...)
if ("firmwareVersion" %in% mnames)
cat(paste("* Firmware version: ", object@metadata$firmwareVersion, "\n", sep=""), ...)
if ("filename" %in% mnames)
cat(paste("* Source filename: ``", object@metadata$filename, "``\n", sep=""), ...)
if ("latitude" %in% names(object@metadata)) {
cat(paste("* Location: ",
if (is.na(object@metadata$latitude)) "unknown latitude" else sprintf("%.5f N", object@metadata$latitude), ", ",
if (is.na(object@metadata$longitude)) "unknown longitude" else sprintf("%.5f E",
object@metadata$longitude),
"\n", sep=''))
}
v.dim <- dim(object[["v"]])
if (!is.ad2cp(object)) {
cat("* Number of profiles:", v.dim[1], "\n")
cat("* Number of cells: ", v.dim[2], "\n")
cat("* Number of beams: ", v.dim[3], "\n")
cat("* Cell size: ", object[["cellSize"]], "m\n")
}
if ("time" %in% names(object@data)) {
cat("* Summary of times between profiles:\n")
print(summary(diff(as.numeric(object@data$time))))
}
if (1 == length(agrep("nortek", object@metadata$manufacturer, ignore.case=TRUE))) {
resSpecific <- list(internalCodeVersion=object@metadata$internalCodeVersion,
hardwareRevision=object@metadata$hardwareRevision,
recSize=object@metadata$recSize*65536/1024/1024,
velocityRange=object@metadata$velocityRange,
firmwareVersion=object@metadata$firmwareVersion,
config=object@metadata$config,
configPressureSensor=object@metadata$configPressureSensor,
configMagnetometerSensor=object@metadata$configMagnetometerSensor,
configPressureSensor=object@metadata$configPressureSensor,
configTiltSensor=object@metadata$configTiltSensor,
configTiltSensorOrientation=object@metadata$configTiltSensorOrientation,
serialNumberHead=object@metadata$serialNumberHead,
blankingDistance=object@metadata$blankingDistance,
measurementInterval=object@metadata$measurementInterval,
deploymentName=object@metadata$deploymentName,
velocityScale=object@metadata$velocityScale)
} else if (1 == length(agrep("rdi", object@metadata$manufacturer, ignore.case=TRUE))) {
resSpecific <- list(instrumentSubtype=object@metadata[["instrumentSubtype"]],
manufacturer=object@metadata$manufacturer,
numberOfDataTypes=object@metadata$numberOfDataTypes,
ensembleInFile=object@metadata$ensembleInFile,
headingAlignment=object@metadata$headingAlignment,
headingBias=object@metadata$headingBias,
pingsPerEnsemble=object@metadata$pingsPerEnsemble,
bin1Distance=object@metadata$bin1Distance,
xmitPulseLength=object@metadata$xmitPulseLength,
oceBeamSpreaded=object@metadata$oceBeamSpreaded,
beamConfig=object@metadata$beamConfig)
} else if (1 == length(agrep("sontek", object@metadata$manufacturer, ignore.case=TRUE))) {
resSpecific <- list(cpuSoftwareVerNum=object@metadata$cpuSoftwareVerNum,
dspSoftwareVerNum=object@metadata$dspSoftwareVerNum,
boardRev=object@metadata$boardRev,
adpType=object@metadata$adpType,
slantAngle=object@metadata$slantAngle,
orientation=object@metadata$orientation)
} else {
resSpecific <- list(orientation=object@metadata$orientation)
#stop("can only summarize ADP objects of sub-type \"rdi\", \"sontek\", or \"nortek\", not class ", paste(class(object),collapse=","))
}
## 20170107: drop the printing of these. In the new scheme, we can subsample
## 20170107: files, and therefore do not read to the end, and it seems silly
## 20170107: to use time going through the whole file to find this out. If we
## 20170107: decide that this is needed, we could do a seek() to the end of the
## 20170107: and then go back to find the final time.
## cat(sprintf("* Measurements: %s %s to %s %s sampled at %.4g Hz\n",
## format(object@metadata$measurementStart), attr(object@metadata$measurementStart, "tzone"),
## format(object@metadata$measurementEnd), attr(object@metadata$measurementEnd, "tzone"),
## 1 / object@metadata$measurementDeltat))
## subsampleStart <- object@data$time[1]
## subsampleDeltat <- as.numeric(object@data$time[2]) - as.numeric(object@data$time[1])
## subsampleEnd <- object@data$time[length(object@data$time)]
## cat(sprintf("* Subsample: %s %s to %s %s sampled at %.4g Hz\n",
## format(subsampleStart), attr(subsampleStart, "tzone"),
## format(subsampleEnd), attr(subsampleEnd, "tzone"),
## 1 / subsampleDeltat))
metadataNames <- names(object@metadata)
cat("* Frequency: ", object[["frequency"]], "kHz\n", ...)
if ("ensembleNumber" %in% names(object@metadata)) {
cat(paste("* Ensemble Numbers: ", vectorShow(object@metadata$ensembleNumber, msg="")))
}
isAD2CP <- is.ad2cp(object)
if (!isAD2CP) {
if ("numberOfCells" %in% metadataNames) {
dist <- object[["distance"]]
if (object[["numberOfCells"]] > 1) {
cat(sprintf("* Cells: %d, centered at %.3f m to %.3f m, spaced by %.3f m\n",
object[["numberOfCells"]], dist[1], tail(dist, 1), diff(dist[1:2])), ...)
} else {
cat(sprintf("* Cells: one cell, centered at %.3f m\n", dist[1]), ...)
}
}
originalCoordinate <- object[["originalCoordinate"]]
oceCoordinate <- object[["oceCoordinate"]]
cat("* Coordinate system: ",
if (is.null(originalCoordinate)) "?" else originalCoordinate, "[originally],",
if (is.null(oceCoordinate)) "?" else oceCoordinate, "[presently]\n", ...)
numberOfBeams <- object[["numberOfBeams"]]
beamAngle <- object[["beamAngle"]]
## As of Aug 10, 2019, orientation may be a vector, so we summarize
## a table of values, if so.
orientation <- object[["orientation"]]
if (length(orientation) > 1) {
torientation <- table(orientation)
orientation <- paste(unlist(lapply(names(torientation),
function(x)
paste(x, torientation[[x]], sep=":"))),
collapse=", ")
}
beamUnspreaded <- object[["oceBeamUnspreaded"]]
cat("* Beams::\n")
cat(" Number: ", if (is.null(numberOfBeams)) "?" else numberOfBeams, "\n")
cat(" Slantwise Angle: ", if (is.null(beamAngle)) "?" else beamAngle , "\n")
cat(" Orientation: ", if (is.null(orientation)) "?" else orientation, "\n")
cat(" Unspreaded: ", if (is.null(beamUnspreaded)) "?" else beamUnspreaded, "\n")
}
transformationMatrix <- object[["transformationMatrix"]]
if (!is.null(transformationMatrix) && dim(transformationMatrix)[2] >= 3) {
digits <- 4
cat("* Transformation matrix::\n")
cat(" ", format(transformationMatrix[1, ], width=digits+4, digits=digits, justify="right"), "\n")
cat(" ", format(transformationMatrix[2, ], width=digits+4, digits=digits, justify="right"), "\n")
cat(" ", format(transformationMatrix[3, ], width=digits+4, digits=digits, justify="right"), "\n")
if (object[["numberOfBeams"]] > 3)
cat(" ", format(transformationMatrix[4, ], width=digits+4, digits=digits, justify="right"), "\n")
}
if (isAD2CP) {
default <- ad2cpDefaultDataItem(object)
for (rt in object[["recordTypes"]]) {
if (rt != "text") {
isTheDefault <- rt == default
cat("* Record type '", rt, "'", if (isTheDefault) " (the default item)::\n" else "::\n", sep="")
cat(" Number of profiles: ", length(object[["time", rt]]), "\n")
cat(" Number of cells: ", object[["numberOfCells", rt]], "\n")
cat(" Blanking distance: ", object[["blankingDistance", rt]], "\n")
cat(" Cell size: ", object[["cellSize", rt]], "\n")
numberOfBeams <- object[["numberOfBeams", rt]]
cat(" Number of beams: ", numberOfBeams, "\n")
cat(" Beam angle: ", if (numberOfBeams == 1) 0 else object[["beamAngle"]], "\n")
cat(" Coordinate system: ", object[["oceCoordinate", rt]], "\n")
}
}
processingLogShow(object)
invisible()
} else {
invisible(callNextMethod()) # summary
}
})
#' Concatenate adp objects
#'
#' @templateVar class adp
#'
#' @template concatenateTemplate
setMethod(f="concatenate",
signature="adp",
definition=function(object, ...) {
rval <- callNextMethod() # do general work
## Make the metadata profile count match the data array dimensions.
rval@metadata$numberOfSamples <- dim(rval@data$v)[1] # FIXME: handle AD2CP
## The general method didn't know that 'distance' was special, and should
## not be concatenated, so undo that.
rval@data$distance <- object@data$distance # FIXME: handle AD2CP
rval
})
#' @title Extract Something from an adp Object
#'
#' @param x an [adp-class] object.
#'
#' @examples
#' data(adp)
#' # Tests for beam 1, distance bin 1, first 5 observation times
#' adp[["v"]][1:5,1,1]
#' adp[["a"]][1:5,1,1]
#' adp[["a", "numeric"]][1:5,1,1]
#' as.numeric(adp[["a"]][1:5,1,1]) # same as above
#'
#' @template sub_subTemplate
#'
#' @section Details of the specialized `adp` method:
#'
#' In addition to the usual extraction of elements by name, some shortcuts
#' are also provided, e.g. `x[["u1"]]` retrieves `v[,1]`, and similarly
#' for the other velocity components. The `a` and `q`
#' data can be retrieved in [raw()] form or numeric
#' form (see examples). The coordinate system may be
#' retrieved with e.g. `x[["coordinate"]]`.
#'
#' @author Dan Kelley
#'
#' @family things related to adp data
setMethod(f="[[",
signature(x="adp", i="ANY", j="ANY"),
definition=function(x, i, j, ...) {
##>message("top: i='", i, "'")
## if (i == "a") {
## if (!missing(j) && j == "numeric") {
## res <- x@data$a
## dim <- dim(res)
## res <- as.numeric(res)
## dim(res) <- dim
## } else {
## res <- x@data$a
## }
## res
## } else if (i == "q") {
## if (!missing(j) && j == "numeric") {
## res <- x@data$q
## dim <- dim(res)
## res <- as.numeric(res)
## dim(res) <- dim
## } else {
## res <- x@data$q
## }
## res
## if (i == "g") {
## if (!missing(j) && 1 == length("numeric", j)) {
## res <- x@data$g
## dim <- dim(res)
## res <- as.numeric(res)
## dim(res) <- dim
## } else {
## res <- x@data$g
## }
## res
##} else
ISAD2CP <- is.ad2cp(x)
##>message("ISAD2CP=", ISAD2CP)
if (i == "distance") {
##>message("asking for 'distance'")
if (ISAD2CP) {
## AD2CP is stored in a tricky way.
j <- if (missing(j)) ad2cpDefaultDataItem(x) else ad2cpDefaultDataItem(x, j)
res <- x@data[[j]]$blankingDistance + x@data[[j]]$cellSize*seq(1, x@data[[j]]$numberOfCells)
} else {
res <- x@data$distance
}
res
} else if (i %in% c("originalCoordinate", "oceCoordinate",
"cellSize", "blankingDistance", "orientation",
"beamUnspreaded", # Note: beamAngle is handled later since it is in metadata
"accelerometerx", "accelerometery", "accelerometerz",
"orientation", "heading", "pitch", "roll",
"ensemble", "time", "pressure", "soundSpeed",
"temperature", "temperatureMagnetometer", "temperatureRTC",
"nominalCorrelation",
"powerLevel", "transmitEnergy",
"v", "a", "q", "g",
"echosounder", "AHRS", "altimeterDistance", "altimeterFigureOfMerit")) {
##>message("asking for i='", i, "' which is in that long list")
##message("i='", i, "'")
metadataNames <- names(x@metadata)
##. dataNames <- names(x@data)
if (ISAD2CP) {
## AD2CP has 'burst' data records in one list, with 'average' records in another one.
## Permit e.g. "burst:numeric" and "burst numeric" ## FIXME: document this
returnNumeric <- FALSE # defult: leave 'raw' data as 'raw'.
if (missing(j)) {
##>message("0 a")
j <- ad2cpDefaultDataItem(x)
returnNumeric <- FALSE
jorig <- "(missing)"
##>message("'[[' is defaulting to '", j, "' type of data-record, since 'j' not specified", sep="")
} else {
jorig <- j
##>message("0 a. j='", j, "'")
## find out if numeric or raw, and clean 'j' of that flag once it is known
if (length(grep("numeric", j))) {
returnNumeric <- TRUE
j <- gsub("numeric", "", j)
##>message("0 b. j='", j, "'")
} else if (length(grep("raw", j))) {
returnNumeric <- FALSE
j <- gsub("raw", "", j)
##>message("0 c. j='", j, "'")
}
j <- gsub("[ :]+", "", j) # clean spaces or colons, if any
## Look up this name
##>message("0 d. j='", j, "'")
j <- ad2cpDefaultDataItem(x, j)
##>message("0 e. j='", j, "'")
}
##message("1. j = '", j, "'; jorig='", jorig, "'", sep="")
##numericMode <- 1 == length(grep("numeric", j))
##message("2. numericMode=", numericMode)
##j <- gsub("[: ]?numeric", "", j)
##message("3. j = '", j, "'", sep="")
#if (missing(j)) { # default to 'average', if it exists, or to 'burst' if that exists, or fail.
# j <- if (length(x@data$average)) "average" else if (length(x@data$burst))
# "burst" else stop("object's data slot does not contain either 'average' or 'burst'")
#}
##message("4. j = '", j, "'", sep="")
## Default to "average" if no j specified
if (1 == length(grep("^[ ]*$", j)))
j <- "average"
##>message("5. j = '", j, "'", sep="")
j <- ad2cpDefaultDataItem(x, j)
##>message("6. i='", i, "', j='", j, "', returnNumeric=", returnNumeric)
res <- x@data[[j]][[i]]
if (returnNumeric) {
##>message("6-a.")
dimres <- dim(res)
res <- as.numeric(res)
dim(res) <- dimres
}
##>message("7 res=", res)
res
} else {
if (!missing(j) && 1 == length(grep("numeric", j))) {
res <- x@data[[i]]
dim <- dim(res)
res <- as.numeric(res)
dim(res) <- dim
res
} else {
if (i %in% metadataNames) x@metadata[[i]] else x@data[[i]]
}
}
} else if (i %in% c("numberOfBeams", "numberOfCells")) {
##>message("asking for 'numberOfBeams' or 'numberOfCells'")
##message("AA i=", i)
if (ISAD2CP) {
j <- if (missing(j)) ad2cpDefaultDataItem(x) else ad2cpDefaultDataItem(x, j)
x@data[[j]][[i]]
} else {
x@metadata[[i]]
}
} else if (i == "transformationMatrix") {
##>message("0000")
if (ISAD2CP) {
##>message("AD2CP tm...")
theta <- x@metadata$beamAngle * atan2(1, 1) / 45
## The creation of a transformation matrix is covered in Section 5.3 of
## RD Instruments. ADCP Coordinate Transformation. RD Instruments, July 1998.
TMc <- 1 # for convex (diverging) beam setup; use -1 for concave
TMa <- 1 / (2 * sin(theta))
TMb <- 1 / (4 * cos(theta))
TMd <- TMa / sqrt(2)
rbind(c(TMc*TMa, -TMc*TMa, 0, 0),
c( 0, 0, -TMc*TMa, TMc*TMa),
c( TMb, TMb, TMb, TMb),
c( TMd, TMd, -TMd, -TMd))
} else {
## message("normal tm...")
x@metadata$transformationMatrix
}
} else if (i == "recordTypes") {
##>message("asking for 'recordTypes'")
## FIXME: _AD2CPrecordtype_ update if new record types added to read.adp.ad2cp()
if (ISAD2CP) {
allowed <- c("burst", "average", "bottomTrack", "interleavedBurst", "burstAltimeter",
"DVLBottomTrack", "echosounder", "waterTrack", "altimeter", "averageAltimeter", "text")
res <- allowed[allowed %in% names(x@data)]
} else {
res <- "depends on the data setup"
}
res
} else if (i == "va") {
##>message("asking for 'va'")
if (!"va" %in% names(x@data)) {
res <- NULL
} else {
if (!missing(j) && 1 == length(grep("numeric", j))) {
res <- x@data$va
dim <- dim(res)
res <- as.numeric(res)
dim(res) <- dim
} else {
res <- x@data$va
}
}
res
} else if (i == "vq") {
##>message("asking for 'vq'")
if (!"vq" %in% names(x@data)) {
res <- NULL
} else {
if (!missing(j) && 1 == length(grep("numeric", j))) {
res <- x@data$vq
dim <- dim(res)
res <- as.numeric(res)
dim(res) <- dim
} else {
res <- x@data$vq
}
}
res
} else if (i == "vg") {
##>message("asking for 'vg'")
if (!"vg" %in% names(x@data)) {
res <- NULL
} else {
if (!missing(j) && 1 == length(grep("numeric", j))) {
res <- x@data$vg
dim <- dim(res)
res <- as.numeric(res)
dim(res) <- dim
} else {
res <- x@data$vg
}
}
res
} else if (i == "vv") {
##>message("asking for 'vv'")
if (!"vv" %in% names(x@data)) {
res <- NULL
} else {
if (!missing(j) && 1 == length(grep("numeric", j))) {
res <- x@data$vv
dim <- dim(res)
res <- as.numeric(res)
dim(res) <- dim
} else {
res <- x@data$vv
}
}
res
} else {
callNextMethod() # [[
}
})
#' Replace Parts of an ADP Object
#'
#' In addition to the usual insertion of elements by name, note
#' that e.g. `pitch` gets stored into `pitchSlow`.
#'
#' @param x an [adp-class] object.
#'
#' @template sub_subsetTemplate
#'
#' @author Dan Kelley
#'
#' @family things related to adp data
setMethod(f="[[<-",
signature="adp",
definition=function(x, i, j, ..., value) {
## FIXME: use j for e.g. times
if (i %in% names(x@metadata)) {
x@metadata[[i]] <- value
} else if (i %in% names(x@data)) {
x@data[[i]] <- value
} else {
x <- callNextMethod(x=x, i=i, j=j, ...=..., value=value) # [[<-
}
## Not checking validity because user may want to shorten items one by one, and check validity later.
## validObject(x)
invisible(x)
})
setValidity("adp",
function(object) {
if (!("v" %in% names(object@data))) {
cat("object@data$v is missing")
return(FALSE)
}
if (!("a" %in% names(object@data))) {
cat("object@data$a is missing")
return(FALSE)
}
if (!("q" %in% names(object@data))) {
cat("object@data$q is missing")
return(FALSE)
}
mdim <- dim(object@data$v)
if ("a" %in% names(object@data) && !all.equal(mdim, dim(object@data$a))) {
cat("dimension of 'a' is (", dim(object@data$a), "), which does not match that of 'v' (", mdim, ")\n")
return(FALSE)
}
if ("q" %in% names(object@data) && !all.equal(mdim, dim(object@data$q))) {
cat("dimension of 'a' is (", dim(object@data$a), "), which does not match that of 'v' (", mdim, ")\n")
return(FALSE)
}
if ("time" %in% names(object@data)) {
n <- length(object@data$time)
for (item in c("pressure", "temperature", "salinity", "depth", "heading", "pitch", "roll")) {
if (item %in% names(object@data) && length(object@data[[item]]) != n) {
cat("length of time vector is ", n, " but the length of ", item, " is ",
length(object@data[[item]]), "\n")
return(FALSE)
}
}
return(TRUE)
}
})
#' Subset an ADP Object
#'
#' Subset an adp (acoustic Doppler profile) object, in a manner that is function
#' is somewhat analogous to [subset.data.frame()].
#'
#' For any data type,
#' subsetting can be by `time`, `ensembleNumber`, or `distance`.
#' These may not be combined, but it is easy to use a string of calls to
#' carry out combined operations, e.g.
#' `subset(subset(adp,distance<d0), time<t0)`
#'
#' For the special
#' case of AD2CP data (see [read.adp.ad2cp()]), it is possible to subset
#' to the "average" data records with `subset="average"`, to the
#' "burst" records with `subset="burst"`, or to the "interleavedBurst"
#' with `subset="interleavedBurst"`; note that no warning is issued,
#' if this leaves an object with no useful data.
#'
#' @param x an [adp-class] object.
#'
#' @param subset A condition to be applied to the `data` portion of
#' `x`. See \sQuote{Details}.
#'
#' @param ... Ignored.
#'
#' @return An [adp-class] object.
#'
#' @examples
#' library(oce)
#' data(adp)
#' # 1. Look at first part of time series, organized by time
#' earlyTime <- subset(adp, time < mean(range(adp[['time']])))
#' plot(earlyTime)
#'
#' # 2. Look at first ten ensembles (AKA profiles)
#' en <- adp[["ensembleNumber"]]
#' firstTen <- subset(adp, ensembleNumber < en[11])
#' plot(firstTen)
#'
#' @author Dan Kelley
#'
#' @family things related to adp data
#' @family functions that subset oce objects
setMethod(f="subset",
signature="adp",
definition=function(x, subset, ...) {
subsetString <- paste(deparse(substitute(subset)), collapse=" ")
res <- x
dots <- list(...)
debug <- getOption("oceDebug")
if (length(dots) && ("debug" %in% names(dots)))
debug <- dots$debug
if (missing(subset))
stop("must give 'subset'")
if (grepl("time", subsetString) || grepl("ensembleNumber", subsetString)) {
if (grepl("time", subsetString)) {
oceDebug(debug, "subsetting an adp by time\n")
if (length(grep("distance", subsetString)))
stop("cannot subset by both time and distance; split into multiple calls")
keep <- eval(substitute(subset), x@data, parent.frame(2))
} else if (grepl("ensembleNumber", subsetString)) {
oceDebug(debug, "subsetting an adp by ensembleNumber\n")
if (length(grep("distance", subsetString)))
stop("cannot subset by both ensembleNumber and distance; split into multiple calls")
if (!"ensembleNumber" %in% names(x@metadata))
stop("cannot subset by ensembleNumber because this adp object lacks that information")
keep <- eval(substitute(subset), x@metadata, parent.frame(2))
} else {
stop("internal coding error -- please report to developers")
}
names <- names(x@data)
haveDia <- "timeDia" %in% names
if (haveDia) {
subsetDiaString <- gsub("time", "timeDia", subsetString)
keepDia <- eval(parse(text=subsetDiaString), x@data)
oceDebug(debug, "for diagnostics, keeping ", 100*sum(keepDia) / length(keepDia), "% of data\n")
}
oceDebug(debug, vectorShow(keep, "keeping bins:"))
oceDebug(debug, "number of kept bins:", sum(keep), "\n")
if (sum(keep) < 2)
stop("must keep at least 2 profiles")
res <- x
## Update those metadata that have one value per ensemble
mnames <- names(x@metadata)
for (name in c("ensembleNumber", "orientation")) {
if (name %in% mnames)
res@metadata[[name]] <- x@metadata[[name]][keep]
}
## FIXME: check to see if we handling slow timescale data properly
for (name in names(x@data)) {
if (length(grep("Dia$", name))) {
if ("distance" == name)
next
if (name == "timeDia" || is.vector(x@data[[name]])) {
oceDebug(debug, "subsetting x@data$", name, ", which is a vector\n", sep="")
res@data[[name]] <- x@data[[name]][keepDia]
} else if (is.matrix(x@data[[name]])) {
oceDebug(debug, "subsetting x@data$", name, ", which is a matrix\n", sep="")
res@data[[name]] <- x@data[[name]][keepDia, ]
} else if (is.array(x@data[[name]])) {
oceDebug(debug, "subsetting x@data$", name, ", which is an array\n", sep="")
res@data[[name]] <- x@data[[name]][keepDia, , , drop=FALSE]
}
} else {
if (name == "time" || is.vector(x@data[[name]])) {
if ("distance" == name)
next
oceDebug(debug, "subsetting x@data$", name, ", which is a vector\n", sep="")
res@data[[name]] <- x@data[[name]][keep] # FIXME: what about fast/slow
} else if (is.matrix(x@data[[name]])) {
oceDebug(debug, "subsetting x@data$", name, ", which is a matrix\n", sep="")
res@data[[name]] <- x@data[[name]][keep, ]
} else if (is.array(x@data[[name]])) {
oceDebug(debug, "subsetting x@data$", name, ", which is an array\n", sep="")
res@data[[name]] <- x@data[[name]][keep, , , drop=FALSE]
}
}
}
if ("v" %in% names(x@metadata$flags)) {
dim <- dim(x@metadata$flags$v)
res@metadata$flags$v <- x@metadata$flags$v[keep, , , drop=FALSE]
oceDebug(debug, "subsetting flags$v original dim=",
paste(dim, collapse="x"), "; new dim=",
paste(dim(res@metadata$flags$v), collapse="x"))
}
} else if (length(grep("distance", subsetString))) {
oceDebug(debug, "subsetting an adp by distance\n")
if (length(grep("time", subsetString)))
stop("cannot subset by both time and distance; split into multiple calls")
keep <- eval(substitute(subset), x@data, parent.frame(2))
oceDebug(debug, vectorShow(keep, "keeping bins:"), "\n")
if (sum(keep) < 2)
stop("must keep at least 2 bins")
res <- x
res@data$distance <- x@data$distance[keep] # FIXME: broken for AD2CP
for (name in names(x@data)) {
if (name == "time")
next
if (is.array(x@data[[name]]) && 3 == length(dim(x@data[[name]]))) {
oceDebug(debug, "subsetting array data[[", name, "]] by distance\n")
oceDebug(debug, "before, dim(", name, ") =", dim(res@data[[name]]), "\n")
res@data[[name]] <- x@data[[name]][, keep, , drop=FALSE]
oceDebug(debug, "after, dim(", name, ") =", dim(res@data[[name]]), "\n")
}
}
oceDebug(debug, "names of flags: ", paste(names(x@metadata$flags), collapse=" "), "\n")
if ("v" %in% names(x@metadata$flags)) {
vdim <- dim(x@metadata$flags$v)
res@metadata$flags$v <- x@metadata$flags$v[, keep, , drop=FALSE]
oceDebug(debug, "subsetting flags$v original dim=",
paste(vdim, collapse="x"), "; new dim=",
paste(dim(res@metadata$flags$v), collapse="x"), "\n")
}
} else if (length(grep("pressure", subsetString))) {
keep <- eval(substitute(subset), x@data, parent.frame(2))
res <- x
res@data$v <- res@data$v[keep, , ]
res@data$a <- res@data$a[keep, , ]
res@data$q <- res@data$q[keep, , ]
res@data$time <- res@data$time[keep]
if ("v" %in% names(x@metadata$flags)) {
dim <- dim(x@metadata$flags$v)
res@metadata$flags$v <- x@metadata$flags$v[keep, , drop=FALSE]
oceDebug(debug, "subsetting flags$v original dim=",
paste(dim, collapse="x"), "; new dim=",
paste(dim(res@metadata$flags$v), collapse="x"))
}
## the items below may not be in the dataset
names <- names(res@data)
if ("bottomRange" %in% names) res@data$bottomRange <- res@data$bottomRange[keep, ]
if ("pressure" %in% names) res@data$pressure <- res@data$pressure[keep]
if ("temperature" %in% names) res@data$temperature <- res@data$temperature[keep]
if ("salinity" %in% names) res@data$salinity <- res@data$salinity[keep]
if ("depth" %in% names) res@data$depth <- res@data$depth[keep]
if ("heading" %in% names) res@data$heading <- res@data$heading[keep]
if ("pitch" %in% names) res@data$pitch <- res@data$pitch[keep]
if ("roll" %in% names) res@data$roll <- res@data$roll[keep]
} else if (length(grep("average", subsetString))) {
res@data$burst <- NULL
res@data$interleavedBurst <- NULL
} else if (length(grep("burst", subsetString))) {
res@data$average <- NULL
res@data$interleavedBurst <- NULL
} else if (length(grep("interleavedBurst", subsetString))) {
res@data$average <- NULL
res@data$burst <- NULL
} else {
stop('subset should be "distance", "time", "average", "burst", or "interleavedBurst"; "',
subsetString, '" is not permitted')
}
res@metadata$numberOfSamples <- dim(res@data$v)[1] # FIXME: handle AD2CP
res@metadata$numberOfCells <- dim(res@data$v)[2] # FIXME: handle AD2CP
res@processingLog <- processingLogAppend(res@processingLog, paste("subset.adp(x, subset=", subsetString, ")", sep=""))
res
})
#' Create an ADP Object
#'
#' @details
#' Construct an [adp-class] object. Only a basic
#' subset of the typical `data` slot is represented in the arguments
#' to this function, on the assumption that typical usage in reading data
#' is to set up a nearly-blank [adp-class] object, the `data`
#' slot of which is then inserted. However, in some testing situations it
#' can be useful to set up artificial `adp` objects, so the other
#' arguments may be useful.
#'
#' @param time of observations in POSIXct format
#'
#' @param distance to centre of bins
#'
#' @param v array of velocities, with first index for time, second for bin number, and third for beam number
#'
#' @param a amplitude, a [raw()] array with dimensions matching `u`
#'
#' @param q quality, a [raw()] array with dimensions matching `u`
#'
#' @param orientation a string indicating sensor orientation, e.g. `"upward"` and `"downward"`
#'
#' @param coordinate a string indicating the coordinate system, `"enu"`, `"beam"`, `"xy"`, or `"other"`
#'
#' @return An [adp-class] object.
#'
#' @examples
#' data(adp)
#' t <- adp[["time"]]
#' d <- adp[["distance"]]
#' v <- adp[["v"]]
#' a <- as.adp(time=t, distance=d, v=v)
#'\donttest{
#' plot(a)
#'}
#'
#' @author Dan Kelley
#'
#' @family things related to adp data
as.adp <- function(time, distance, v, a=NULL, q=NULL, orientation="upward", coordinate="enu")
{
res <- new("adp", time=time, distance=distance, v=v, a=a, q=q)
if (!missing(v)) {
res@metadata$numberOfBeams <- dim(v)[3] # FIXME: handle AD2CP
res@metadata$numberOfCells <- dim(v)[2] # FIXME: handle AD2CP
}
res@metadata$oceCoordinate <- coordinate # FIXME: handle AD2CP
res@metadata$orientation <- orientation # FIXME: handle AD2CP
res@metadata$cellSize <- if (missing(distance)) NA else diff(distance[1:2]) # FIXME: handle AD2CP
res@metadata$units <- list(v="m/s", distance="m")
res
}
## head.adp <- function(x, n=6L, ...)
## {
## numberOfProfiles <- dim(x[["v"]])[1]
## if (n < 0)
## look <- seq.int(max(1, (1 + numberOfProfiles + n)), numberOfProfiles)
## else
## look <- seq.int(1, min(n, numberOfProfiles))
## res <- x
## for (name in names(x@data)) {
## if ("distance" == name)
## next
## if (is.vector(x@data[[name]])) {
## res@data[[name]] <- x@data[[name]][look]
## } else if (is.matrix(x@data[[name]])) {
## res@data[[name]] <- x@data[[name]][look,]
## } else if (is.array(x@data[[name]])) {
## res@data[[name]] <- x@data[[name]][look,,]
## } else {
## res@data[[name]] <- x@data[[name]][look] # for reasons unknown, 'time' is not a vector
## }
## }
## res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
## res
## }
## tail.adp <- function(x, n = 6L, ...)
## {
## numberOfProfiles <- dim(x[["v"]])[1]
## if (n < 0)
## look <- seq.int(1, min(numberOfProfiles, numberOfProfiles + n))
## else
## look <- seq.int(max(1, (1 + numberOfProfiles - n)), numberOfProfiles)
## res <- x
## for (name in names(x@data)) {
## if (is.vector(x@data[[name]])) {
## res@data[[name]] <- x@data[[name]][look]
## } else if (is.matrix(x@data[[name]])) {
## res@data[[name]] <- x@data[[name]][look,]
## } else if (is.array(x@data[[name]])) {
## res@data[[name]] <- x@data[[name]][look,,]
## } else {
## res@data[[name]] <- x@data[[name]][look] # for reasons unknown, 'time' is not a vector
## }
## }
## res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
## res
## }
#' Get names of Acoustic-Doppler Beams
#'
#' @param x an [adp-class] object.
#'
#' @param which an integer indicating beam number.
#'
#' @return A character string containing a reasonable name for the beam, of the
#' form `"beam 1"`, etc., for beam coordinates, `"east"`, etc. for
#' enu coordinates, `"u"`, etc. for `"xyz"`, or `"u'"`, etc.,
#' for `"other"` coordinates. The coordinate system is determined
#' with `x[["coordinate"]]`.
#'
#' @author Dan Kelley
#'
#' @seealso This is used by [read.oce()].
#' @family things related to adp data
#' @family things related to adv data
beamName <- function(x, which)
{
bn <- x[["oceCoordinate"]]
if (bn == "beam") {
paste(gettext("beam", domain="R-oce"), 1:4)[which]
} else if (bn == "enu") {
c(gettext("east", domain="R-oce"),
gettext("north", domain="R-oce"),
gettext("up", domain="R-oce"),
gettext("error", domain="R-oce"))[which]
} else if (bn == "xyz") {
c("u", "v", "w", "e")[which]
} else if (bn == "other") {
c("u'", "v'", "w'", "e")[which]
} else {
" "
}
}
#' Read an ADP File
#'
#' Read an ADP data file, producing an [adp-class] object.
#'
#' Several file types can be handled. Some of
#' these functions are wrappers that map to device names, e.g.
#' `read.aquadoppProfiler` does its work by calling
#' `read.adp.nortek`; in this context, it is worth noting that the
#' ``aquadopp'' instrument is a one-cell profiler that might just as well have
#' been documented under the heading [read.adv()].
#'
#' @param manufacturer an optional character string indicating the manufacturer, used by
#' the general function `read.adp` to select a subsidiary function to use. If this
#' is not given, then [oceMagic()] is used to try to infer the type. If this
#' is provided, then the value `"rdi"` will cause [read.adp.rdi()]
#' to be used, `"nortek"` will cause [read.adp.nortek()] to be used,
#' and `"sontek"` will cause [read.adp.sontek()] to be used.
#'
#' @param despike if `TRUE`, [despike()] will be used to clean
#' anomalous spikes in heading, etc.
#' @template adpTemplate
#'
#' @author Dan Kelley and Clark Richards
#'
#' @family things related to adp data
read.adp <- function(file, from, to, by, tz=getOption("oceTz"),
longitude=NA, latitude=NA,
manufacturer,
monitor=FALSE, despike=FALSE, processingLog,
debug=getOption("oceDebug"),
...)
{
fromGiven <- !missing(from) # FIXME document THIS
toGiven <- !missing(to) # FIXME document THIS
byGiven <- !missing(by) # FIXME document THIS
oceDebug(debug, "read.adp(\"", file, "\"",
", from=", if (fromGiven) format(from) else "(missing)",
", to=", if (toGiven) format(to) else "(missing)",
", by=", if (byGiven) format(by) else "(missing)",
", manufacturer=\"", if (missing(manufacturer)) "(missing)" else manufacturer, "\", ...) {\n",
sep="", unindent=1)
if (!fromGiven)
from <- 1
if (!byGiven)
by <- 1
if (!toGiven)
to <- 0
if (is.character(file) && 0 == file.info(file)$size)
stop("empty file")
if (missing(manufacturer)) {
oceDebug(debug, "using read.oce() since 'manufacturer' argument is missing\n")
res <- read.oce(file=file, from=from, to=to, by=by, tz=tz,
longitude=longitude, latitude=latitude,
debug=debug-1, monitor=monitor, despike=despike,
...)
} else {
manufacturer <- pmatch(manufacturer, c("rdi", "nortek", "sontek"))
oceDebug(debug, "inferred manufacturer to be \"", manufacturer, "\"\n")
res <- if (manufacturer == "rdi") {
read.adp.rdi(file=file, from=from, to=to, by=by, tz=tz,
longitude=longitude, latitude=latitude,
debug=debug-1, monitor=monitor, despike=despike,
processingLog=processingLog, ...)
} else if (manufacturer == "nortek") {
read.adp.nortek(file=file, from=from, to=to, by=by, tz=tz,
longitude=longitude, latitude=latitude,
debug=debug-1, monitor=monitor, despike=despike,
processingLog=processingLog, ...)
} else if (manufacturer == "sontek") {
read.adp.sontek(file=file, from=from, to=to, by=by, tz=tz,
longitude=longitude, latitude=latitude,
debug=debug-1, monitor=monitor, despike=despike,
processingLog=processingLog, ...)
}
}
oceDebug(debug, "} # read.adp()\n", unindent=1)
res
}
#' Plot ADP Data
#'
#' Create a summary plot of data measured by an acoustic doppler profiler.
#'
#' The plot may have one or more panels, with the content being controlled by
#' the `which` argument.
#'
#' * `which=1:4` (or `which="u1"` to `"u4"`) yield a
#' distance-time image plot of a velocity component. If `x` is in
#' `beam` coordinates (signalled by
#' `metadata$oce.coordinate=="beam"`), this will be the beam velocity,
#' labelled `b[1]` etc. If `x` is in xyz coordinates (sometimes
#' called frame coordinates, or ship coordinates), it will be the velocity
#' component to the right of the frame or ship (labelled `u` etc).
#' Finally, if `x` is in `"enu"` coordinates, the image will show the
#' the eastward component (labelled `east`). If `x` is in
#' `"other"` coordinates, it will be component corresponding to east,
#' after rotation (labelled `u\'`). Note that the coordinate is set by
#' [read.adp()], or by [beamToXyzAdp()],
#' [xyzToEnuAdp()], or [enuToOtherAdp()].
#'
#' * `which=5:8` (or `which="a1"` to `"a4"`) yield
#' distance-time images of backscatter intensity of the respective beams. (For
#' data derived from Teledyne-RDI instruments, this is the item called ``echo
#' intensity.'')
#'
#' * `which=9:12` (or `which="q1"` to `"q4"`) yield
#' distance-time images of signal quality for the respective beams. (For RDI
#' data derived from instruments, this is the item called ``correlation
#' magnitude.'')
#'
#' * `which=60` or `which="map"` draw a map of location(s).
#'
#' * `which=70:73` (or `which="g1"` to `"g4"`) yield
#' distance-time images of percent-good for the respective beams. (For data
#' derived from Teledyne-RDI instruments, which are the only instruments that
#' yield this item, it is called ``percent good.'')
#'
#' * `which=80:83` (or `which="vv"`, `which="va"`,
#' `which="vq"`, and `which="vg"`) yield distance-time
#' images of the vertical beam fields for a 5 beam "SentinelV" ADCP
#' from Teledyne RDI.
#'
#' * `which="vertical"` yields a two panel distance-time
#' image of vertical beam velocity and amplitude.
#'
#' * `which=13` (or `which="salinity"`) yields a time-series plot
#' of salinity.
#'
#' * `which=14` (or `which="temperature"`) yields a time-series
#' plot of temperature.
#'
#' * `which=15` (or `which="pressure"`) yields a time-series plot
#' of pressure.
#'
#' * `which=16` (or `which="heading"`) yields a time-series plot
#' of instrument heading.
#'
#' * `which=17` (or `which="pitch"`) yields a time-series plot of
#' instrument pitch.
#'
#' * `which=18` (or `which="roll"`) yields a time-series plot of
#' instrument roll.
#'
#' * `which=19` yields a time-series plot of distance-averaged
#' velocity for beam 1, rightward velocity, eastward velocity, or
#' rotated-eastward velocity, depending on the coordinate system.
#'
#' * `which=20` yields a time-series of distance-averaged velocity for
#' beam 2, foreward velocity, northward velocity, or rotated-northward
#' velocity, depending on the coordinate system.
#'
#' * `which=21` yields a time-series of distance-averaged velocity for
#' beam 3, up-frame velocity, upward velocity, or rotated-upward velocity,
#' depending on the coordinate system.
#'
#' * `which=22` yields a time-series of distance-averaged velocity for
#' beam 4, for `beam` coordinates, or velocity estimate, for other
#' coordinates. (This is ignored for 3-beam data.)
#'
#' * `which="progressiveVector"` (or `which=23`) yields a progressive-vector diagram in the horizontal
#' plane, plotted with `asp=1`. Normally, the depth-averaged velocity
#' components are used, but if the `control` list contains an item named
#' `bin`, then the depth bin will be used (with an error resulting if the
#' bin is out of range).
#'
#' * `which=24` yields a time-averaged profile of the first component
#' of velocity (see `which=19` for the meaning of the component, in
#' various coordinate systems).
#'
#' * `which=25` as for 24, but the second component.
#'
#' * `which=26` as for 24, but the third component.
#'
#' * `which=27` as for 24, but the fourth component (if that makes
#' sense, for the given instrument).
#'
#' * `which=28` or `"uv"` yields velocity plot in the horizontal
#' plane, i.e. `u[2]` versus `u[1]`. If the number of data points is small, a
#' scattergraph is used, but if it is large, [smoothScatter()] is
#' used.
#'
#' * `which=29` or `"uv+ellipse"` as the `"uv"` case, but
#' with an added indication of the tidal ellipse, calculated from the eigen
#' vectors of the covariance matrix.
#'
#' * `which=30` or `"uv+ellipse+arrow"` as the
#' `"uv+ellipse"` case, but with an added arrow indicating the mean
#' current.
#'
#' * `which=40` or `"bottomRange"` for average bottom range from
#' all beams of the instrument.
#'
#' * `which=41` to `44` (or `"bottomRange1"` to
#' `"bottomRange4"`) for bottom range from beams 1 to 4.
#'
#' * `which=50` or `"bottomVelocity"` for average bottom velocity
#' from all beams of the instrument.
#'
#' * `which=51` to `54` (or `"bottomVelocity1"` to
#' `"bottomVelocity4"`) for bottom velocity from beams 1 to 4.
#'
#' * `which=55` (or `"heaving"`) for time-integrated,
#' depth-averaged, vertical velocity, i.e. a time series of heaving.
#'
#' * `which=100` (or `"soundSpeed"`) for a time series of sound speed.
#'
#'
#' In addition to the above, the following shortcuts are defined:
#'
#' * `which="velocity"` equivalent to `which=1:3` or `1:4`
#' (depending on the device) for velocity components.
#'
#' * `which="amplitude"` equivalent to `which=5:7`
#' or `5:8` (depending on the device) for backscatter intensity
#' components.
#'
#' * `which="quality"` equivalent to `which=9:11` or `9:12`
#' (depending on the device) for quality components.
#'
#' * `which="hydrography"` equivalent to `which=14:15`
#' for temperature and pressure.
#'
#' * `which="angles"` equivalent to `which=16:18` for
#' heading, pitch and roll.
#'
#' The color scheme for image plots (`which` in 1:12) is provided by the
#' `col` argument, which is passed to [image()] to do the actual
#' plotting. See \dQuote{Examples} for some comparisons.
#'
#' A common quick-look plot to assess mooring movement is to use
#' `which=15:18` (pressure being included to signal the tide, and tidal
#' currents may dislodge a mooring or cause it to settle).
#'
#' By default, `plot,adp-method` uses a `zlim` value for the
#' [image()] that is constructed to contain all the data, but to be
#' symmetric about zero. This is done on a per-panel basis, and the scale is
#' plotted at the top-right corner, along with the name of the variable being
#' plotted. You may also supply `zlim` as one of the \dots{} arguments,
#' but be aware that a reasonable limit on horizontal velocity components is
#' unlikely to be of much use for the vertical component.
#'
#' A good first step in the analysis of measurements made from a moored device
#' (stored in `d`, say) is to do `plot(d, which=14:18)`. This shows
#' time series of water properties and sensor orientation, which is helpful in
#' deciding which data to trim at the start and end of the deployment, because
#' they were measured on the dock or on the ship as it travelled to the mooring
#' site.
#'
#' @param x an [adp-class] object.
#'
#' @param which list of desired plot types. These are graphed in panels
#' running down from the top of the page. If `which` is not given,
#' the plot will show images of the distance-time dependence of velocity
#' for each beam. See \dQuote{Details} for the meanings of various values of `which`.
#'
#' @param j optional string specifying a sub-class of `which`. For
#' Nortek Aquadopp profilers, this may either be `"default"` (or missing)
#' to get the main signal, or `"diagnostic"` to get a diagnostic
#' signal. For Nortek AD2CP profiles, this may be any one of
#' `"average"` (or missing) for averaged data, `"burst"`
#' for burst data, or `"interleaved burst"` for interleaved burst data;
#' more data types are provided by that instrument, and may be added here
#' at some future time.
#'
#' @param col optional indication of color(s) to use. If not provided, the
#' default for images is `oce.colorsPalette(128,1)`, and for lines and
#' points is black.
#'
#' @param breaks optional breaks for color scheme
#'
#' @param zlim a range to be used as the `zlim` parameter to the
#' [imagep()] call that is used to create the image. If omitted,
#' `zlim` is set for each panel individually, to encompass the data of the
#' panel and to be centred around zero. If provided as a two-element vector,
#' then that is used for each panel. If provided as a two-column matrix, then
#' each panel of the graph uses the corresponding row of the matrix; for
#' example, setting `zlim=rbind(c(-1,1),c(-1,1),c(-.1,.1))` might make
#' sense for `which=1:3`, so that the two horizontal velocities have one
#' scale, and the smaller vertical velocity has another.
#'
#' @param titles optional vector of character strings to be used as labels for
#' the plot panels. For images, these strings will be placed in the right hand
#' side of the top margin. For timeseries, these strings are ignored. If this
#' is provided, its length must equal that of `which`.
#'
#' @param lwd if the plot is of a time-series or scattergraph format with
#' lines, this is used in the usual way; otherwise, e.g. for image formats,
#' this is ignored.
#'
#' @param type if the plot is of a time-series or scattergraph format, this is
#' used in the usual way, e.g. `"l"` for lines, etc.; otherwise, as for
#' image formats, this is ignored.
#'
#' @param ytype character string controlling the type of the y axis for images
#' (ignored for time series). If `"distance"`, then the y axis will be
#' distance from the sensor head, with smaller distances nearer the bottom of
#' the graph. If `"profile"`, then this will still be true for
#' upward-looking instruments, but the y axis will be flipped for
#' downward-looking instruments, so that in either case, the top of the graph
#' will represent the sample nearest the sea surface.
#'
#' @param drawTimeRange boolean that applies to panels with time as the
#' horizontal axis, indicating whether to draw the time range in the top-left
#' margin of the plot.
#'
#' @param useSmoothScatter boolean that indicates whether to use
#' [smoothScatter()] in various plots, such as `which="uv"`. If
#' not provided a default is used, with [smoothScatter()] being used
#' if there are more than 2000 points to plot.
#'
#' @param missingColor color used to indicate `NA` values in images (see
#' [imagep()]); set to `NULL` to avoid this indication.
#'
#' @template mgpTemplate
#'
#' @template marTemplate
#'
#' @param mai.palette margins, in inches, to be added to those calculated for
#' the palette; alter from the default only with caution
#'
#' @param tformat optional argument passed to [oce.plot.ts()], for
#' plot types that call that function. (See [strptime()] for the
#' format used.)
#'
#' @param marginsAsImage boolean, `TRUE` to put a wide margin to the right
#' of time-series plots, even if there are no images in the `which` list.
#' (The margin is made wide if there are some images in the sequence.)
#'
#' @param cex numeric character expansion factor for plot symbols; see [par()].
#'
#' @param cex.axis,cex.lab character expansion factors for axis numbers and axis names; see [par()].
#'
#' @param xlim optional 2-element list for `xlim`, or 2-column matrix, in
#' which case the rows are used, in order, for the panels of the graph.
#'
#' @param ylim optional 2-element list for `ylim`, or 2-column matrix, in
#' which case the rows are used, in order, for the panels of the graph.
#'
#' @param control optional list of parameters that may be used for different
#' plot types. Possibilities are `drawBottom` (a boolean that indicates
#' whether to draw the bottom) and `bin` (a numeric giving the index of
#' the bin on which to act, as explained in \dQuote{Details}).
#'
#' @param useLayout set to `FALSE` to prevent using [layout()]
#' to set up the plot. This is needed if the call is to be part of a sequence
#' set up by e.g. `par(mfrow)`.
#'
#' @param coastline a `coastline` object, or a character string naming
#' one. This is used only for `which="map"`. See notes at
#' [plot,ctd-method()] for more information on built-in coastlines.
#'
#' @param span approximate span of map in km
#'
#' @param main main title for plot, used just on the top panel, if there are
#' several panels.
#'
#' @param grid if `TRUE`, a grid will be drawn for each panel. (This
#' argument is needed, because calling [grid()] after doing a
#' sequence of plots will not result in useful results for the individual
#' panels.
#'
#' @param grid.col color of grid
#'
#' @param grid.lty line type of grid
#'
#' @param grid.lwd line width of grid
#' @template debugTemplate
#'
#' @param \dots optional arguments passed to plotting functions. For example,
#' supplying `despike=TRUE` will cause time-series panels to be de-spiked
#' with [despike()]. Another common action is to set the color for
#' missing values on image plots, with the argument `missingColor` (see
#' [imagep()]). Note that it is an error to give `breaks` in
#' \dots{}, if the formal argument `zlim` was also given, because they
#' could contradict each other.
#'
#' @return A list is silently returned, containing `xat` and `yat`,
#' values that can be used by [oce.grid()] to add a grid to the plot.
#'
#' @examples
#' library(oce)
#' data(adp)
#' plot(adp, which=1:3)
#' plot(adp, which='temperature', tformat='%H:%M')
#'
#' @author Dan Kelley
#'
#' @family functions that plot oce data
#' @family things related to adp data
#'
#' @aliases plot.adp
## DEVELOPER NOTE: update first test in tests/testthat/test_adp.R if a new 'which' is handled
setMethod(f="plot",
signature=signature("adp"),
definition=function(x, which, j,
col, breaks, zlim,
titles,
lwd=par('lwd'),
type='l',
ytype=c("profile", "distance"),
drawTimeRange=getOption("oceDrawTimeRange"),
useSmoothScatter,
missingColor="gray",
mgp=getOption("oceMgp"),
mar=c(mgp[1]+1.5, mgp[1]+1.5, 1.5, 1.5),
mai.palette=rep(0, 4),
tformat,
marginsAsImage=FALSE,
cex=par("cex"), cex.axis=par("cex.axis"), cex.lab=par("cex.lab"),
xlim, ylim,
control,
useLayout=FALSE,
coastline="coastlineWorld", span=300,
main="",
grid=FALSE, grid.col='darkgray', grid.lty='dotted', grid.lwd=1,
debug=getOption("oceDebug"),
...)
{
debug <- max(0, min(debug, 4))
oceDebug(debug, "plot,adp-method(x, ",
argShow(mar),
"\n", sep="", unindent=1, style="bold")
oceDebug(debug, " ",
argShow(mgp),
"\n", sep="", unindent=1, style="bold")
oceDebug(debug, " ",
argShow(which),
"\n", sep="", unindent=1, style="bold")
oceDebug(debug, " ",
argShow(cex),
argShow(cex.axis),
argShow(cex.lab),
"\n", sep="", unindent=1, style="bold")
oceDebug(debug, " ",
argShow(breaks),
argShow(j),
"...) {\n", sep="", unindent=1, style="bold")
## oceDebug(debug, "par(mar)=", paste(par('mar'), collapse=" "), "\n")
## oceDebug(debug, "par(mai)=", paste(par('mai'), collapse=" "), "\n")
## oceDebug(debug, "par(mfg)=", paste(par('mfg'), collapse=" "), "\n")
## oceDebug(debug, "mai.palette=", paste(mai.palette, collapse=" "), "\n")
if ("adorn" %in% names(list(...)))
warning("In plot,adp-method() : the 'adorn' argument was removed in November 2017", call.=FALSE)
instrumentType <- x[["instrumentType"]]
if (is.null(instrumentType))
instrumentType <- "" # simplifies later checks
oceDebug(debug, "instrumentType=\"", instrumentType, "\"\n", sep="")
## interpret mode, j
if (missing(j))
j <- ""
if (instrumentType == "aquadopp") {
if (!missing(j) && j == "diagnostic") {
if (x[["numberOfCells"]] != 1) {
warning("This object claims to be Nortek Aquadopp, but there is more than 1 cell, so it must not be; so j=\"diagnostic\" is being ignored")
j <- 'normal'
}
if (!("timeDia" %in% names(x@data))) {
warning("This instrument did not record Diagnostic data, so j=\"diagnostic\" is being ignored")
j <- "normal"
}
}
} else if (instrumentType == "AD2CP") {
jOrig <- j
j <- ad2cpDefaultDataItem(x, j)
if (j != jOrig)
oceDebug(debug, "given the object contents, 'j' was changed from \"", jOrig, "\" to \"", j, "\", for this Nortek AD2CP instrument\n", sep="")
}
if (missing(which)) {
## Note that j is ignored for e.g. RDI adp.
which <- 1:dim(x[["v", j]])[3]
oceDebug(debug, "setting which=c(", paste(which, collapse=","), "), based on the data\n", sep="")
}
colGiven <- !missing(col)
breaksGiven <- !missing(breaks)
zlimGiven <- !missing(zlim)
if (breaksGiven && zlimGiven)
stop("cannot supply both zlim and breaks")
ylimGiven <- !missing(ylim)
oceDebug(debug, 'ylimGiven=', ylimGiven, '\n')
res <- list(xat=NULL, yat=NULL)
if (ylimGiven)
oceDebug(debug, "ylim=c(", paste(ylim, collapse=", "), ")\n")
if (!inherits(x, "adp"))
stop("method is only for objects of class '", "adp", "'")
if (!(is.null(x@metadata$haveActualData) || x@metadata$haveActualData))
stop("In plot,adp-method() : there are no profiles in this dataset", call.=FALSE)
opar <- par(no.readonly = TRUE)
nw <- length(which)
fac <- if (nw < 3) 1 else 0.66 # try to emulate par(mfrow)
## par(cex=cex*fac, cex.axis=fac*cex.axis, cex.lab=fac*cex.lab) # BUILD-TEST FAILURE
## par(cex=cex*fac) # OK
oceDebug(debug, "adp.R:1759 cex=", cex, ", original par('cex')=", par('cex'), style="blue")
##par(cex=cex*fac, cex.axis=fac*cex.axis) # OK
par(cex.axis=fac*cex.axis, cex.lab=fac*cex.lab) # OK
oceDebug(debug, "adp.R:1761 ... after par() call, have par('cex')=", par('cex'), style="blue")
rm(fac)
numberOfBeams <- x[["numberOfBeams", j]]
oceDebug(debug, "numberOfBeams=", numberOfBeams, " (note: j=\"", j, "\")\n", sep="")
numberOfCells <- x[["numberOfCells", j]]
oceDebug(debug, "numberOfCells=", numberOfCells, " (note: j=\"", j, "\")\n", sep="")
if (nw == 1) {
pm <- pmatch(which, c("velocity", "amplitude", "quality", "hydrography", "angles"))
## FIXME: decide what to do about 5-beam ADCPs
if (!is.na(pm)) {
if (pm == 1)
which <- 0 + seq(1, min(4, numberOfBeams)) # 5th beam not included
else if (pm == 2)
which <- 4 + seq(1, min(4, numberOfBeams)) # 5th beam not included
else if (pm == 3)
which <- 8 + seq(1, min(4, numberOfBeams)) # 5th beam not included
else if (pm == 4)
which <- 14:15
else if (pm == 5)
which <- 16:18
nw <- length(which)
}
}
if (!missing(titles) && length(titles) != nw)
stop("length of 'titles' must equal length of 'which'")
if (nw > 1)
on.exit(par(opar))
if (is.numeric(which)) {
whichFraction <- which - floor(which)
which <- floor(which)
} else {
whichFraction <- rep(0, length(which))
}
par(mgp=mgp, mar=mar, cex=cex)
dots <- list(...)
ytype <- match.arg(ytype)
## user may specify a matrix for xlim and ylim
if (ylimGiven) {
if (is.matrix(ylim)) {
if (dim(ylim)[2] != nw) {
ylim2 <- matrix(ylim, ncol=2, nrow=nw, byrow=TRUE) # FIXME: is this what I want?
}
} else {
ylim2 <- matrix(ylim, ncol=2, nrow=nw, byrow=TRUE) # FIXME: is this what I want?
}
class(ylim2) <- class(ylim)
ylim <- ylim2
}
xlimGiven <- !missing(xlim)
if (xlimGiven) {
if (is.matrix(xlim)) {
if (dim(xlim)[2] != nw) {
xlim2 <- matrix(xlim, ncol=2, nrow=nw) # FIXME: is this what I want?
}
} else {
if (length(xlim) != 2)
stop("xlim must be a vector of length 2, or a 2-column matrix")
xlim2 <- matrix(xlim[1:2], ncol=2, nrow=nw, byrow=TRUE)
}
class(xlim2) <- class(xlim)
attr(xlim2, "tzone") <- attr(xlim, "tzone")
xlim <- xlim2
}
if (missing(zlim)) {
zlimGiven <- FALSE
zlimAsGiven <- NULL
} else {
zlimGiven <- TRUE
if (is.vector(zlim)) {
if (length(zlim) == 2) {
zlimAsGiven <- matrix(rep(zlim, length(which)), ncol=2, byrow=TRUE)
} else {
stop("zlim must be a vector of length 2, or a matrix with 2 columns")
}
} else {
## FIXME: should this be made into a matrix?
zlimAsGiven <- zlim
}
}
ylimAsGiven <- if (ylimGiven) ylim else NULL
if (missing(lwd))
lwd <- rep(par('lwd'), length.out=nw)
else
lwd <- rep(lwd, length.out=nw)
if (missing(main))
main <- rep('', length.out=nw)
else
main <- rep(main, length.out=nw)
## oceDebug(debug, "later on in plot,adp-method:\n")
## oceDebug(debug, " par(mar)=", paste(par('mar'), collapse=" "), "\n")
## oceDebug(debug, " par(mai)=", paste(par('mai'), collapse=" "), "\n")
oceDebug(debug, "which:", which, "\n")
whichOrig <- which
which <- oce.pmatch(which,
list(u1=1, u2=2, u3=3, u4=4,
a1=5, a2=6, a3=7, a4=8,
q1=9, q2=10, q3=11, q4=12,
g1=70, g2=71, g3=72, g4=73,
salinity=13,
temperature=14,
pressure=15,
heading=16,
pitch=17,
roll=18,
progressiveVector=23,
uv=28,
"uv+ellipse"=29,
"uv+ellipse+arrow"=30,
bottomRange=40,
bottomRange1=41, bottomRange2=42, bottomRange3=43, bottomRange4=44,
bottomVelocity=50,
bottomVelocity1=51, bottomVelocity2=52, bottomVelocity3=53, bottomVelocity4=54,
heaving=55,
map=60,
soundSpeed=100,
velocity=1:3,
amplitude=5:7,
quality=9:11,
hydrography=14:15,
angles=16:18,
vertical=80:81,
vv=80, va=81, vq=82, vg=83))
nw <- length(which) # may be longer with e.g. which='velocity'
if (any(is.na(which)))
stop("plot,adp-method(): unrecognized 'which' code: ", paste(whichOrig[is.na(which)], collapse=" "),
call.=FALSE)
oceDebug(debug, "which:", which, "(after conversion to numerical codes)\n")
## FIXME: delete this comment-block after key plot types are checked.
## I had this as a test, in early Nov 2018. But now, I prefer
##OLD if ("instrumentType" %in% names(x@metadata) && !is.null(x@metadata$instrumentType) && x@metadata$instrumentType == "AD2CP") {
##OLD if (!all(which %in% 1:4))
##OLD warning("In plot,adp-method() : only 'which' <5 has been tested", call.=FALSE)
##OLD }
images <- c(1:12, 70:73, 80:83)
timeseries <- c(13:22, 40:44, 50:54, 55, 100)
spatial <- 23:27
#speed <- 28
tt <- x[["time", j]]
##ttDia <- x@data$timeDia # may be null
class(tt) <- "POSIXct" # otherwise image() gives warnings
if (!zlimGiven && all(which %in% 5:8)) {
## single scale for all 'a' (amplitude) data
zlim <- range(abs(as.numeric(x[["a"]][, , which[1]-4])), na.rm=TRUE) # FIXME name of item missing, was ma
if (length(which) > 1) {
for (w in 2:length(which)) {
zlim <- range(abs(c(zlim, x[["a"]][, , which[w]-4])), na.rm=TRUE) # FIXME: check name
}
}
}
##oceDebug(debug, "useLayout=", useLayout, "\n")
showBottom <- ("bottomRange" %in% names(x@data)) && !missing(control) && !is.null(control["drawBottom"])
if (showBottom)
bottom <- apply(x@data$bottomRange, 1, mean, na.rm=TRUE)
oceDebug(debug, "showBottom=", showBottom, "\n")
oceDebug(debug, "cex=", cex, ", par('cex')=", par('cex'), style="blue")
if (useLayout) {
if (any(which %in% images) || marginsAsImage) {
w <- 1.5
lay <- layout(matrix(1:(2*nw), nrow=nw, byrow=TRUE), widths=rep(c(1, lcm(w)), nw))
oceDebug(debug, "calling layout(matrix...)\n")
oceDebug(debug, "using layout, since this is an image, or has marginsAsImage\n")
} else {
if (nw != 1 || which != 23) {
lay <- layout(cbind(1:nw))
oceDebug(debug, "calling layout(cbind(1:", nw, ")\n")
oceDebug(debug, "using layout\n")
}
}
} else {
if (nw > 1) {
par(mfrow=c(nw, 1))
oceDebug(debug, "calling par(mfrow=c(", nw, ", 1)\n")
}
}
flipy <- ytype == "profile" && x@metadata$orientation[1] == "downward"
##message("numberOfBeams=", numberOfBeams)
##message("numberOfCells=", numberOfCells)
haveTimeImages <- any(which %in% images) && 1 < numberOfCells
oceDebug(debug, 'haveTimeImages=', haveTimeImages, '(if TRUE, it means any timeseries graphs get padding on RHS)\n')
par(mar=mar, mgp=mgp)
if (haveTimeImages) {
oceDebug(debug, "setting up margin spacing before plotting\n", style="italic")
oceDebug(debug, " before: ", vectorShow(par("mar")), unindent=1, style="blue")
## Since zlim not given, this just does calculations
drawPalette(#cex.axis=cex * (1 - min(nw / 8, 1/4)),
debug=debug-1)
oceDebug(debug, " after: ", vectorShow(par("mar")), unindent=1, style="blue")
}
omar <- par("mar")
oceDebug(debug, vectorShow(omar), style="red")
##oceDebug(debug, "drawTimeRange=", drawTimeRange, "\n", sep="")
oceDebug(debug, "cex=", cex, ", par('cex')=", par('cex'), style="blue")
for (w in 1:nw) {
oceDebug(debug, "plot,adp-method top of loop (before setting par('mar'))\n", style="italic")
oceDebug(debug, vectorShow(par("mar")), style="blue")
oceDebug(debug, vectorShow(par("mai")), style="blue")
oceDebug(debug, vectorShow(omar), style="blue")
par(mar=omar) # ensures all panels start with original mar
oceDebug(debug, "which[", w, "]=", which[w], "\n", sep="", style="red")
if (which[w] %in% images) {
## image types
skip <- FALSE
numberOfBeams <- x[["numberOfBeams", j]]
v <- x[["v"]]
if (which[w] %in% 1:4) {
## velocity
if (instrumentType == "aquadopp" && j == "diagnostic") {
oceDebug(debug, "a diagnostic velocity component image/timeseries\n")
z <- x@data$vDia[, , which[w]]
zlab <- if (missing(titles)) paste(beamName(x, which[w]), "Dia", sep="") else titles[w]
xdistance <- x[["distance", j]]
oceDebug(debug, vectorShow(xdistance))
y.look <- if (ylimGiven) (ylimAsGiven[w, 1] <= xdistance & xdistance <= ylimAsGiven[w, 2]) else rep(TRUE, length(xdistance))
zlim <- if (zlimGiven) zlimAsGiven[w, ] else
max(abs(x@data$vDia[, y.look, which[w]]), na.rm=TRUE) * c(-1, 1)
} else {
oceDebug(debug, "a velocity component image/timeseries\n")
z <- x[["v", j]][, , which[w]]
zlab <- if (missing(titles)) beamName(x, which[w]) else titles[w]
oceDebug(debug, "zlab:", zlab, "\n")
xdistance <- x[["distance", j]]
oceDebug(debug, vectorShow(xdistance))
y.look <- if (ylimGiven) ylimAsGiven[w, 1] <= xdistance & xdistance <= ylimAsGiven[w, 2] else rep(TRUE, length(xdistance))
oceDebug(debug, vectorShow(y.look))
if (0 == sum(y.look))
stop("no data in the provided ylim=c(", paste(ylimAsGiven[w, ], collapse=","), ")")
zlim <- if (zlimGiven) zlimAsGiven[w, ] else {
if (breaksGiven) NULL else max(abs(z[, y.look]), na.rm=TRUE) * c(-1, 1)
}
oceDebug(debug, "zlim: ", paste(zlim, collapse=" "), "\n")
}
oceDebug(debug, "flipy =", flipy, "\n")
} else if (which[w] %in% 5:8) {
oceDebug(debug, "which[", w, "]=", which[w], "; this is some type of amplitude\n", sep="")
## amplitude
if (j == "diagnostic" && "aDia" %in% names(x@data)) {
oceDebug(debug, "a diagnostic amplitude component image/timeseries\n")
z <- x[["aDia", "numeric"]][, , which[w]-4]
xdistance <- x[["distance", j]]
oceDebug(debug, vectorShow(xdistance))
y.look <- if (ylimGiven) ylimAsGiven[1] <= xdistance & xdistance <= ylimAsGiven[2] else rep(TRUE, length(xdistance))
oceDebug(debug, vectorShow(y.look))
zlim <- if (zlimGiven) zlimAsGiven[w, ] else {
if (breaksGiven) NULL else range(z[, y.look], na.rm=TRUE)
}
zlab <- c(expression(aDia[1]), expression(a[2]), expression(aDia[3]), expression(aDia[4]))[which[w]-4]
} else {
oceDebug(debug, "an amplitude component image/timeseries\n")
a <- x[["a", paste(j, "numeric")]]
z <- a[, , which[w]-4]
dim(z) <- dim(a)[1:2]
oceDebug(debug, "accessed data, of dim=", paste(dim(z), collapse="x"), "\n")
##OLD dim(z) <- dim(x@data$a)[1:2] # FIXME: why was this here?
xdistance <- x[["distance", j]]
oceDebug(debug, vectorShow(xdistance))
y.look <- if (ylimGiven) ylimAsGiven[1] <= xdistance & xdistance <= ylimAsGiven[2] else rep(TRUE, length(xdistance))
oceDebug(debug, vectorShow(y.look))
zlim <- if (zlimGiven) zlimAsGiven[w, ] else {
if (breaksGiven) NULL else range(as.numeric(z[, y.look]), na.rm=TRUE)
}
oceDebug(debug, "zlim: ", paste(zlim, collapse=" "), "\n")
zlab <- c(expression(a[1]), expression(a[2]), expression(a[3]), expression(a[4]))[which[w]-4]
oceDebug(debug, "zlab: '", as.character(zlab), "'\n")
}
} else if (which[w] %in% 9:12) {
oceDebug(debug, " which[",w,"]=",which[w],": quality or correlation\n",sep="")
## correlation, or quality. First, try 'q', then 'amp'
q <- x[["q", paste(j, "numeric")]]
if (!is.null(q)) {
oceDebug(debug, "[['q']] works for this object\n")
z <- q[, , which[w]-8]
dim(z) <- dim(q)[1:2]
rm(q)
zlim <- c(0, 256)
zlab <- c(expression(q[1]), expression(q[2]), expression(q[3]))[which[w]-8]
} else {
amp <- x[["amp"]]
if (!is.null(amp)) {
oceDebug(debug, "[['amp']] works for this object\n")
z <- amp[, , which[w]-8]
dim(z) <- dim(amp)[1:2]
rm(amp)
zlim <- c(0, max(z, na.rm=TRUE))
zlab <- c(expression(amp[1]), expression(amp[2]), expression(amp[3]))[which[w]-8]
} else {
stop("In plot,adp-method() : ADP object lacks both 'q' and 'amp' data items", call.=FALSE)
}
}
} else if (which[w] %in% 70:(69+x[["numberOfBeams"]])) {
## correlation
xg <- x[["g", paste(j, "numeric")]]
if (!is.null(xg)) {
z <- as.numeric(xg[, , which[w]-69])
dim(z) <- dim(xg)[1:2]
rm(xg)
zlim <- c(0, 100)
zlab <- c(expression(g[1]), expression(g[2]), expression(g[3]))[which[w]-8]
} else {
stop("In plot,adp-method() : ADP object lacks a 'g' data item", call.=FALSE)
}
} else if (which[w] == 80) {
## vertical beam velocity
z <- x[["vv", j]]
if (!is.null(z)) {
oceDebug(debug, "vertical beam velocity\n")
zlab <- if (missing(titles)) expression(w[vert]) else titles[w]
xdistance <- x[["distance", j]]
y.look <- if (ylimGiven) ylimAsGiven[w, 1] <= xdistance & xdistance <= ylimAsGiven[w, 2] else rep(TRUE, length(xdistance))
if (0 == sum(y.look))
stop("no data in the provided ylim=c(", paste(ylimAsGiven[w, ], collapse=","), ")")
zlim <- if (zlimGiven) zlimAsGiven[w, ] else {
if (breaksGiven) NULL else c(-1, 1)
}
} else {
stop("In plot,adp-method() : ADP object lacks a 'vv' data item, so which=80 and which=\"vv\" cannot work", call.=FALSE)
}
} else if (which[w] == 81) {
## vertical beam amplitude
z <- x[["va", paste(j, "numeric")]]
if (!is.null(z)) {
oceDebug(debug, "vertical beam amplitude\n")
xdistance <- x[["distance", j]]
y.look <- if (ylimGiven) ylimAsGiven[1] <= xdistance & xdistance <= ylimAsGiven[2] else rep(TRUE, length(xdistance))
zlim <- if (zlimGiven) zlimAsGiven[w, ] else {
if (breaksGiven) NULL else range(as.numeric(x@data$va[, y.look]), na.rm=TRUE)
}
zlab <- expression(a[vert])
} else {
stop("In plot,adp-method() : ADP object lacks a 'va' data item, so which=81 and which=\"va\" cannot work", call.=FALSE)
}
} else if (which[w] == 82) {
## vertical beam correlation
z <- x[["vq", paste(j, "numeric")]]
if (!is.null(z)) {
oceDebug(debug, "vertical beam correlation\n")
xdistance <- x[["distance", j]]
y.look <- if (ylimGiven) ylimAsGiven[1] <= xdistance & xdistance <= ylimAsGiven[2] else rep(TRUE, length(xdistance))
zlim <- if (zlimGiven) zlimAsGiven[w, ] else {
if (breaksGiven) NULL else range(as.numeric(x@data$vq[, y.look]), na.rm=TRUE)
}
zlab <- expression(q[vert])
} else {
stop("In plot,adp-method() : ADP object lacks a 'vq' data item, so which=82 and which=\"vq\" cannot work", call.=FALSE)
}
} else if (which[w] == 83) {
## vertical beam percent good
z <- x[["vg", paste(j, "numeric")]]
if (!is.null(z)) {
oceDebug(debug, "vertical beam percent good\n")
xdistance <- x[["distance", j]]
y.look <- if (ylimGiven) ylimAsGiven[1] <= xdistance & xdistance <= ylimAsGiven[2] else rep(TRUE, length(xdistance))
zlim <- if (zlimGiven) zlimAsGiven[w, ] else {
if (breaksGiven) NULL else range(x[["vg", "numeric"]][, y.look], na.rm=TRUE)
}
zlab <- expression(g[vert])
} else {
stop("In plot,adp-method() : ADP object lacks a 'vg' data item, so which=83 and which=\"vg\" cannot work", call.=FALSE)
}
} else {
skip <- TRUE
}
if (!skip) {
if (numberOfCells > 1) {
if (xlimGiven) {
oceDebug(debug, "about to call imagep() with xlim given: par('cex')=", par("cex"), ", cex=", cex, style="blue")
oceDebug(debug, "xlimGiven case\n")
ats <- imagep(x=tt, y=x[["distance", j]], z=z,
xlim=xlim[w, ],
zlim=zlim,
flipy=flipy,
col=if (colGiven) col else {
if (missing(breaks)) oce.colorsPalette(128, 1)
else oce.colorsPalette(length(breaks)-1, 1)
},
breaks=breaks,
ylab=resizableLabel("distance km"),
xlab="Time",
zlab=zlab,
tformat=tformat,
drawTimeRange=drawTimeRange,
drawContours=FALSE,
missingColor=missingColor,
mgp=mgp,
mar=omar,
mai.palette=mai.palette,
cex=1,
main=main[w],
debug=debug-1,
...)
} else {
oceDebug(debug, "about to call imagep() with no xlim. cex=", cex, ", par('cex')=", par("cex"), ", par('cex.axis')=", par("cex.axis"), style="blue")
oceDebug(debug, "about to do an image plot with no xlim given, with cex=", cex, ", par(\"cex\")=", par("cex"), ", nw=", nw, ", cex sent to oce.plots=", cex*(1-min(nw/8, 1/4)), "\n")
oceDebug(debug, " with par('mar')=c(", paste(par('mar'),collapse=","), ", mar=c(", paste(mar,collapse=","), ") and mgp=c(",paste(mgp,collapse=","),")", "\n")
oceDebug(debug, " with time[1]=", format(tt[[1]], "%Y-%m-%d %H:%M:%S"), "\n")
ats <- imagep(x=tt, y=x[["distance", j]], z=z,
zlim=zlim,
flipy=flipy,
ylim=if (ylimGiven) ylim[w, ] else range(x[["distance", j]], na.rm=TRUE),
col=if (colGiven) col else { if (missing(breaks)) oce.colorsPalette(128, 1) else oce.colorsPalette(length(breaks)-1, 1) },
breaks=breaks,
ylab=resizableLabel("distance"),
xaxs="i",
xlab="Time",
zlab=zlab,
tformat=tformat,
drawTimeRange=drawTimeRange,
drawContours=FALSE,
missingColor=missingColor,
mgp=mgp,
mar=mar,
mai.palette=mai.palette,
cex=1,
main=main[w],
debug=debug-1,
...)
}
if (showBottom)
lines(x[["time", j]], bottom)
} else {
col <- if (colGiven) rep(col, length.out=nw) else rep("black", length.out=nw)
time <- if (j== "diagnostic") x@data$timeDia else x[["time"]]
tlim <- range(time)
ats <- oce.plot.ts(time, z, ylab=zlab,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
type=type,
mgp=mgp,
mar=omar,
tformat=tformat,
debug=debug-1)
res$xat <- ats$xat
res$yat <- ats$yat
}
}
drawTimeRange <- FALSE
} else if (which[w] %in% timeseries) {
## time-series types
col <- if (colGiven) rep(col, length.out=nw) else rep("black", length.out=nw)
oceDebug(debug, "graph ", w, " is a timeseries\n", sep="")
##par(mgp=mgp, mar=mar, cex=cex)
tlim <- range(x[["time", j]])
if (which[w] == 13) {
oceDebug(debug, "which[", w, "] == 13 (salinity)\n", sep="")
if (haveTimeImages) drawPalette(debug=debug-1)
ats <- oce.plot.ts(x[["time", j]], x[["salinity", j]],
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=resizableLabel("S"),
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else if (which[w] == 14) {
oceDebug(debug, "which[", w, "] == 14 (temperature)\n", sep="")
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
if (j == "diagnostic" && "temperatureDia" %in% names(x@data)) {
ats <- oce.plot.ts(x@data$timeDia, x@data$temperatureDia,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=expression(paste("Diagnostic T [ ", degree, "C ]")),
type=type,
mgp=mgp,
mar=omar,
tformat=tformat,
debug=debug-1)
} else {
ats <- oce.plot.ts(x[["time", j]], x[["temperature", j]],
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=expression(paste("T [ ", degree, "C ]")),
type=type,
mgp=mgp,
mar=omar,
tformat=tformat,
debug=debug-1)
}
} else if (which[w] == 15) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
oceDebug(debug, "which[", w, "] == 15 (pressure)\n", sep="")
if (j == "diagnostic" && "pressureDia" %in% names(x@data)) {
ats <- oce.plot.ts(x@data$timeDia, x@data$pressureDia,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab="pDia",
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
oceDebug(debug, "about to do non-diagnostic pressure plot, with cex=", cex, ", par(\"cex\")=", par("cex"), ", nw=", nw, ", cex sent to oce.plots=", cex*(1-min(nw/8, 1/4)), "\n", sep="", style="italic")
oceDebug(debug, vectorShow(mar), style="blue")
oceDebug(debug, vectorShow(par("mar")), style="blue")
oceDebug(debug, vectorShow(par("mai")), style="blue")
oceDebug(debug, vectorShow(haveTimeImages), style="blue")
oceDebug(debug, "time[1]=", format(x[["time",j]][1], "%Y-%m-%d %H:%M:%S"), "\n", style="blue")
ats <- oce.plot.ts(x[["time", j]], x[["pressure", j]],
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=resizableLabel("p"),
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
}
} else if (which[w] == 16) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
if (j == "diagnostic" && "headingDia" %in% names(x@data)) {
ats <- oce.plot.ts(x@data$timeDia, x@data$headingDia,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab="headingDia",
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
ats <- oce.plot.ts(x[["time", j]], x[["heading", j]],
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=resizableLabel("heading"),
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
}
} else if (which[w] == 17) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
if (j == "diagnostic" && "pitchDia" %in% names(x@data)) {
ats <- oce.plot.ts(x@data$timeDia, x@data$pitchDia,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab="pitchDia",
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
ats <- oce.plot.ts(x[["time", j]], x[["pitch", j]],
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=resizableLabel("pitch"),
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
}
} else if (which[w] == 18) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
if (j == "diagnostic" && "rollDia" %in% names(x@data)) {
ats <- oce.plot.ts(x@data$timeDia, x@data$rollDia,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab="rollDia",
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
ats <- oce.plot.ts(x[["time", j]], x[["roll", j]],
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=resizableLabel("roll"),
type=type,
mgp=mgp,
mar=omar,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
}
} else if (which[w] == 19) {
if (x[["numberOfBeams"]] > 0) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
ats <- oce.plot.ts(x[["time", j]], apply(x[["v", j]][, , 1], 1, mean, na.rm=TRUE),
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=beamName(x, 1),
type=type,
mgp=mgp,
mar=omar,
#mai.palette=mai.palette,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
stop("In plot,adp-method() : cannot plot beam/velo 1 because the device no beams", call.=FALSE)
}
} else if (which[w] == 20) {
if (x[["numberOfBeams"]] > 1) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
ats <- oce.plot.ts(x[["time", j]], apply(x[["v", j]][, , 2], 1, mean, na.rm=TRUE),
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=beamName(x, 2),
type=type,
mgp=mgp,
mar=omar,
#mai.palette=mai.palette,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
stop("In plot,adp-method() : cannot plot beam/velo 2 because the device has only ", x[["numberOfBeams"]], " beams", call.=FALSE)
}
} else if (which[w] == 21) {
if (x[["numberOfBeams"]] > 2) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
ats <- oce.plot.ts(x[["time", j]], apply(x[["v", j]][, , 3], 1, mean, na.rm=TRUE),
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=beamName(x, 3),
type=type,
mgp=mgp,
mar=omar,
#mai.palette=mai.palette,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
stop("In plot,adp-method() : cannot plot beam/velo 3 because the device has only", x[["numberOfBeams"]], "beams", call.=FALSE)
}
} else if (which[w] == 22) {
if (x[["numberOfBeams"]] > 3) {
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
ats <- oce.plot.ts(x[["time", j]], apply(x[["v", j]][, , 4], 1, mean, na.rm=TRUE),
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab=beamName(x, 4),
type=type,
mgp=mgp,
mar=omar,
#mai.palette=mai.palette,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
} else {
stop("In plot,adp-method() : cannot plot beam/velo 4 because the device has only", x[["numberOfBeams"]], "beams", call.=FALSE)
}
} else if (which[w] == 55) {
## heaving
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
dt <- as.numeric(x[["time"]][2]) - as.numeric(x[["time"]][1])
ats <- oce.plot.ts(x[["time", j]], dt * cumsum(apply(x[["v", j]][, , 3], 1, mean, na.rm=TRUE)),
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab="Heaving [m]",
type=type,
mgp=mgp,
mar=omar,
#mai.palette=mai.palette,
drawTimeRange=drawTimeRange,
tformat=tformat,
debug=debug-1)
drawTimeRange <- FALSE
} else if (which[w] == 100) {
oceDebug(debug, "draw(ctd, ...) of type 'soundSpeed'\n")
if (haveTimeImages) drawPalette(debug=debug-1, mai=mai.palette)
ats <- oce.plot.ts(x[["time", j]], x[["soundSpeed", j]],
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ],
xaxs="i",
col=col[w],
lwd=lwd[w],
cex=1, cex.axis=1, cex.lab=1,
main=main[w],
ylab="Sound Speed [m/s]",
type=type,
mgp=mgp,
mar=omar,
tformat=tformat,
debug=debug-1)
} else if (which[w] %in% 40:44) {
## bottomRange
par(mar=c(mgp[1]+1, mgp[1]+1, 1, 1))
n <- prod(dim(x[["v"]])[1:2])
if ("br" %in% names(x@data)) {
if (which[w] == 40) {
R <- apply(x@data$br, 1, mean, na.rm=TRUE)
ats <- oce.plot.ts(x[["time", j]], R,
ylab="Bottom range [m]",
type=type,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ] else range(R, na.rm=TRUE),
cex=1, cex.axis=1, cex.lab=1,
tformat=tformat,
mar=omar,
debug=debug-1)
} else {
R <- x@data$br[, which[w]-40]
ats <- oce.plot.ts(x[["time"]], R,
ylab=paste("Beam", which[w]-40, "bottom range [m]"),
type=type,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ] else range(R, na.rm=TRUE),
cex=1, cex.axis=1, cex.lab=1,
tformat=tformat,
mar=omar,
debug=debug-1)
}
} else {
stop("In plot,adp-method() : ADP object lacks bottom-tracking data, so which=40:44 and which=\"bottomRange[*]\" cannot work", call.=FALSE)
}
} else if (which[w] %in% 50:54) {
## bottom velocity
par(mar=c(mgp[1]+1, mgp[1]+1, 1, 1))
n <- prod(dim(x[["v"]])[1:2])
if ("bv" %in% names(x@data)) {
if (which[w] == 50) {
V <- apply(x@data$bv, 1, mean, na.rm=TRUE)
ats <- oce.plot.ts(x[["time"]], V,
ylab="Bottom speed [m/s]",
type=type,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ] else range(V, na.rm=TRUE),
tformat=tformat,
cex=1, cex.axis=1, cex.lab=1,
mar=omar,
debug=debug-1)
} else {
V <- x@data$bv[, which[w]-50]
ats <- oce.plot.ts(x[["time"]], V,
ylab=paste("Beam", which[w]-50, "bottom velocity [m/s]"),
type=type,
xlim=if (xlimGiven) xlim[w, ] else tlim,
ylim=if (ylimGiven) ylim[w, ] else range(V, na.rm=TRUE),
tformat=tformat,
cex=1, cex.axis=1, cex.lab=1,
mar=omar,
debug=debug-1)
}
} else {
stop("In plot,adp-method() : ADP object lacks bottom-tracking data, so which=50:54 and which=\"bottomVelocity[*]\" cannot work", call.=FALSE)
}
}
## FIXME delete the next block, after testing.
if (marginsAsImage && useLayout) {
## FIXME: I think this should be deleted
## blank plot, to get axis length same as for images
omar <- par("mar")
par(mar=c(mar[1], 1/4, mgp[2]+1/2, mgp[2]+1))
plot(1:2, 1:2, type='n', axes=FALSE, xlab="", ylab="", cex=1, cex.axis=1, cex.lab=1)
par(mar=omar)
}
} else if (which[w] %in% spatial) {
## various spatial types
if (which[w] == 23) {
## progressive vector
par(mar=c(mgp[1]+1, mgp[1]+1, 1, 1))
if (j == 'diagnostic')
dt <- as.numeric(difftime(x@data$timeDia[2], x@data$timeDia[1], units="sec")) # FIXME: should not assume all equal
else
dt <- as.numeric(difftime(x[["time"]][2], x[["time"]][1], units="sec")) # FIXME: should not assume all equal
mPerKm <- 1000
if (j == 'diagnostic') {
U <- x@data$vDia[, 1, 1]
V <- x@data$vDia[, 1, 2]
ttt <- x@data$timeDia
} else {
U <- x[["v", j]][, , 1]
V <- x[["v", j]][, , 2]
ttt <- x[["time", j]]
}
if (!missing(control) && !is.null(control$bin)) {
if (control$bin < 1)
stop("In plot,adp-method() : cannot have control$bin less than 1, but got ", control$bin, call.=FALSE)
max.bin <- dim(x[["v"]])[2]
if (control$bin > max.bin)
stop("In plot,adp-method() : cannot have control$bin larger than ", max.bin, " but got ", control$bin, call.=FALSE)
u <- U[, control$bin] #EAC: bug fix, attempt to subset 2D matrix by 3 dimensions
v <- V[, control$bin]
} else {
if (x[["numberOfCells", j]] > 1) {
u <- apply(U, 1, mean, na.rm=TRUE)
v <- apply(V, 1, mean, na.rm=TRUE)
} else {
u <- U
v <- V
}
}
u[is.na(u)] <- 0 # zero out missing
v[is.na(v)] <- 0
xDist <- integrateTrapezoid(ttt, u, 'cA') / mPerKm
yDist<- integrateTrapezoid(ttt, v, 'cA') / mPerKm
plot(xDist, yDist, xlab="km", ylab="km", type='l', asp=1,
col=if (colGiven) col else "black",
cex=1, cex.axis=1, cex.lab=1,
...)
xaxp <- par("xaxp")
xat <- seq(xaxp[1], xaxp[2], length.out=1+xaxp[3])
yaxp <- par("yaxp")
yat <- seq(yaxp[1], yaxp[2], length.out=1+yaxp[3])
ats <- list(xat=xat, yat=yat)
} else if (which[w] %in% 24:27) {
par(mar=c(mgp[1]+1, mgp[1]+1, 1, 1))
if (which[w] == 27 && x[["numberOfBeams"]] < 4) {
stop("In plot,adp-method() : cannot use which=27 for a 3-beam instrument", call.=FALSE)
} else {
value <- apply(x[["v", j]][, , which[w]-23], 2, mean, na.rm=TRUE)
yy <- x[["distance", j]]
if (ytype == "profile" && x@metadata$orientation[1] == "downward" && !ylimGiven) {
plot(value, yy, xlab=beamName(x, which[w]-23),
ylab=resizableLabel("distance"), type='l', ylim=rev(range(yy)),
cex=1, cex.axis=1, cex.lab=1,
...)
} else {
plot(value, yy, xlab=beamName(x, 1),
ylab=resizableLabel("distance"), type='l',
cex=1, cex.axis=1, cex.lab=1,
...)
}
xaxp <- par("xaxp")
xat <- seq(xaxp[1], xaxp[2], length.out=1+xaxp[3])
yaxp <- par("yaxp")
yat <- seq(yaxp[1], yaxp[2], length.out=1+yaxp[3])
ats <- list(xat=xat, yat=yat)
}
}
} else if (which[w] %in% 28:30) {
## "uv", "uv+ellipse", or "uv+ellipse+arrow"
par(mar=c(mgp[1]+1, mgp[1]+1, 1, 1))
n <- dim(x[["v", j]])[1]
if (!missing(control) && !is.null(control$bin)) {
if (control$bin < 1)
stop("In plot,adp-method() : cannot have control$bin less than 1, but got ", control$bin, call.=FALSE)
max.bin <- dim(x[["v"]])[2]
if (control$bin > max.bin)
stop("In plot,adp-method() : cannot have control$bin larger than ", max.bin, " but got ", control$bin, call.=FALSE)
u <- x[["v", j]][, control$bin, 1]
v <- x[["v", j]][, control$bin, 2]
} else {
if (x[["numberOfCells", j]] > 1) {
u <- apply(x[["v"]][, , 1], 1, mean, na.rm=TRUE)
v <- apply(x[["v"]][, , 2], 1, mean, na.rm=TRUE)
} else {
u <- x[["v", j]][, 1, 1]
v <- x[["v", j]][, 1, 2]
}
}
oceDebug(debug, "uv type plot\n")
if (n < 5000 || (!missing(useSmoothScatter) && !useSmoothScatter)) {
if ("type" %in% names(dots)) {
plot(u, v,
xlab=resizableLabel("u"),
ylab=resizableLabel("v"),
asp=1, col=if (colGiven) col else "black",
xlim=if (xlimGiven) xlim[w, ] else range(u, na.rm=TRUE),
ylim=if (ylimGiven) ylim[w, ] else range(v, na.rm=TRUE),
cex=1, cex.axis=1, cex.lab=1,
...)
} else {
plot(u, v,
xlab=resizableLabel("u"),
ylab=resizableLabel("v"),
type='n', asp=1,
xlim=if (xlimGiven) xlim[w, ] else range(u, na.rm=TRUE),
ylim=if (ylimGiven) ylim[w, ] else range(v, na.rm=TRUE),
cex=1, cex.axis=1, cex.lab=1,
...)
points(u, v, cex=cex/2, col=if (colGiven) col else "black")
}
} else {
smoothScatter(u, v,
xlab=resizableLabel("u"),
ylab=resizableLabel("v"),
asp=1,
xlim=if (xlimGiven) xlim[w, ] else range(u, na.rm=TRUE),
ylim=if (ylimGiven) ylim[w, ] else range(v, na.rm=TRUE),
cex=1, cex.axis=1, cex.lab=1,
...)
}
xaxp <- par("xaxp")
xat <- seq(xaxp[1], xaxp[2], length.out=1+xaxp[3])
yaxp <- par("yaxp")
yat <- seq(yaxp[1], yaxp[2], length.out=1+yaxp[3])
ats <- list(xat=xat, yat=yat)
if (main[w] != "") {
oceDebug(debug, "about to title the plot with character size ", cex.lab*par("cex"), "\n")
mtext(main[w], adj=1, cex=cex.lab*par("cex"))
}
if (which[w] >= 29 && which[w] < 40) {
ok <- !is.na(u) & !is.na(v)
e <- eigen(cov(data.frame(u[ok], v[ok])))
major <- sqrt(e$values[1]) # major
minor <- sqrt(e$values[2]) # minor
theta <- seq(0, 2*pi, length.out=360/5)
xx <- major * cos(theta)
yy <- minor * sin(theta)
theta0 <- atan2(e$vectors[2, 1], e$vectors[1, 1])
##cat("major", major, "minor", minor, "theta0", theta0, "\n")
rotate <- rbind(c(cos(theta0), -sin(theta0)),
c(sin(theta0), cos(theta0)))
xxyy <- rotate %*% rbind(xx, yy)
col <- if (colGiven) col else "black"
lines(xxyy[1, ], xxyy[2, ], lwd=4, col="white")
lines(xxyy[1, ], xxyy[2, ], lwd=2, col=col)
res$ellipseMajor <- major
res$ellipseMinor <- minor
res$ellipseAngle <- theta
if (which[w] >= 30) {
if (!missing(control) && !is.null(control$bin)) {
if (control$bin < 1)
stop("In plot,adp-method() : cannot have control$bin less than 1, but got ", control$bin, call.=FALSE)
max.bin <- dim(x[["v"]])[2]
if (control$bin > max.bin)
stop("In plot,adp-method() : cannot have control$bin larger than ", max.bin, " but got ", control$bin, call.=FALSE)
umean <- mean(x[["v", j]][, control$bin, 2], na.rm=TRUE)
vmean <- mean(x[["v", j]][, control$bin, 2], na.rm=TRUE)
} else {
umean <- mean(x[["v", j]][, , 1], na.rm=TRUE)
vmean <- mean(x[["v", j]][, , 2], na.rm=TRUE)
}
res$meanU <- umean
res$meanV <- vmean
arrows(0, 0, umean, vmean, lwd=4, length=1/10, col="white")
arrows(0, 0, umean, vmean, lwd=2, length=1/10, col=col)
}
}
} else if (which[w] == 60) {
oceDebug(debug, "draw(adp, ...) of type MAP\n")
## get coastline file
if (is.character(coastline)) {
if (coastline == "none") {
if (!is.null(x@metadata$station) && !is.na(x@metadata$station)) {
plot(x@metadata$longitude, x@metadata$latitude, xlab="", ylab="",
cex=1, cex.axis=1, cex.lab=1)
} else {
stop("In plot,adp-method() : no latitude or longitude in object's metadata, so cannot draw map", call.=FALSE)
}
} else {
## named coastline
if (!exists(paste("^", coastline, "$", sep=""))) {
## load it, if necessary
if (requireNamespace("ocedata", quietly=TRUE)) {
if (coastline == "best") {
best <- coastlineBest(span=span, debug=debug-1)
data(list=best, package="oce", envir=environment())
coastline <- get(best)
} else if (coastline == "coastlineWorld") {
data("coastlineWorld", package="oce", envir=environment())
coastline <- get("coastlineWorld")
} else if (coastline == "coastlineWorldFine") {
data("coastlineWorldFine", package="ocedata", envir=environment())
coastline <- get("coastlineWorldFine")
} else if (coastline == "coastlineWorldMedium") {
data("coastlineWorldMedium", package="ocedata", envir=environment())
coastline <- get("coastlineWorldMedium")
} else {
stop("there is no built-in coastline file of name \"", coastline, "\"")
}
} else {
data("coastlineWorld", package="oce", envir=environment())
coastline <- get("coastlineWorld")
}
}
}
## FIXME: span should be an arg
if ("firstLatitude" %in% names(x@data)) {
lat <- x[["firstLatitude"]]
lon <- x[["firstLongitude"]]
##asp <- 1 / cos(mean(lat, na.rm=TRUE) * pi / 180)
plot(coastline, clatitude=mean(lat, na.rm=TRUE),
clongitude=mean(lon, na.rm=TRUE),
span=span,
cex=1, cex.axis=1, cex.lab=1)
points(lon, lat)
} else if ("latitude" %in% names(x@metadata)) {
lat <- x[["latitude"]]
lon <- x[["longitude"]]
if (is.finite(lat) && is.finite(lon)) {
plot(coastline, clatitude=lat, clongitude=lon, span=50,
cex=1, cex.axis=1, cex.lab=1)
points(x[["longitude"]], x[["latitude"]], cex=2*par('cex'))
} else {
stop("In plot,adp-method() : nothing to map", call.=FALSE)
}
} else {
stop("In plot,adp-method() : nothing to map", call.=FALSE)
}
}
} else {
stop("In plot,adp-method() : unknown value of which (", which[w], ")", call.=FALSE)
}
if (is.logical(grid[1]) && grid[1])
grid(col=grid.col, lty=grid.lty, lwd=grid.lwd)
oceDebug(debug, "plot,adp-method bottom of loop, before reseting par('mar'):\n", style="italic")
oceDebug(debug, vectorShow(par("mar")), style="blue")
oceDebug(debug, vectorShow(par("mai")), style="blue")
par(mar=omar) # prevent margin creep if we have non-images after images (issue 1632 item 2)
oceDebug(debug, "...after reseting par('mar'):", style="italic")
oceDebug(debug, vectorShow(par("mar")), style="blue")
oceDebug(debug, vectorShow(par("mai")), style="blue")
}
par(cex=opar$cex, cex.axis=opar$cex.axis, cex.lab=opar$cex.lab)
if (exists("ats")) {
res$xat <- ats$xat
res$yat <- ats$yat
}
oceDebug(debug, "} # plot,adp-method()\n", unindent=1, style="bold")
invisible(res)
})
#' Convert an ADP Object to ENU Coordinates
#'
#' @param x an [adp-class] object.
#'
#' @param declination magnetic declination to be added to the heading, to get
#' ENU with N as "true" north.
#'
#' @template debugTemplate
#'
#' @author Dan Kelley
#'
#' @seealso See [read.adp()] for notes on functions relating to
#' `"adp"` objects. Also, see [beamToXyzAdp()] and
#' [xyzToEnuAdp()].
#'
#' @references
#' \url{https://www.nortekgroup.com/faq/how-is-a-coordinate-transformation-done}
#' @family things related to adp data
toEnuAdp <- function(x, declination=0, debug=getOption("oceDebug"))
{
debug <- if (debug > 0) 1 else 0
oceDebug(debug, "toEnuAdp() {\n", unindent=1)
coord <- x[["oceCoordinate"]]
if (coord == "beam") {
x <- xyzToEnuAdp(beamToXyzAdp(x, debug=debug-1), declination=declination, debug=debug-1)
} else if (coord == "xyz") {
x <- xyzToEnuAdp(x, declination=declination, debug=debug-1)
} else if (coord == "sfm") {
x <- xyzToEnuAdp(x, declination=declination, debug=debug-1)
} else if (coord == "enu") {
warning("toEnuAdp cannot convert, object is already in cooordinate system ENU, returning argument as-is")
} else {
warning("toEnuAdp cannot convert from coordinate system ", coord, " to ENU, so returning argument as-is")
}
oceDebug(debug, "} # toEnuAdp()\n", unindent=1)
x
}
#' Adjust ADP Signal for Spherical Spreading
#'
#' Compensate ADP signal strength for spherical spreading.
#'
#' First, beam echo intensity is converted from counts to decibels, by
#' multiplying by `count2db`. Then, the signal decrease owing to
#' spherical spreading is compensated for by adding the term
#' \eqn{20\log10(r)}{20*log10(r)}, where \eqn{r}{r} is the distance from the
#' sensor head to the water from which scattering is occurring. \eqn{r}{r} is
#' given by `x[["distance"]]`.
#'
#' @param x an [adp-class] object.
#'
#' @param count2db a set of coefficients, one per beam, to convert from beam
#' echo intensity to decibels.
#'
#' @param asMatrix a boolean that indicates whether to return a numeric matrix,
#' as opposed to returning an updated object (in which the matrix is cast to a
#' raw value).
#'
#' @template debugTemplate
#'
#' @return An [adp-class] object.
#'
#' @author Dan Kelley
#'
#' @references The coefficient to convert to decibels is a personal
#' communication. The logarithmic term is explained in textbooks on acoustics,
#' optics, etc.
#'
#' @examples
#' library(oce)
#' data(adp)
#' plot(adp, which=5) # beam 1 echo intensity
#' adp.att <- beamUnspreadAdp(adp)
#' plot(adp.att, which=5) # beam 1 echo intensity
#' ## Profiles
#' par(mar=c(4, 4, 1, 1))
#' a <- adp[["a", "numeric"]] # second arg yields matrix return value
#' distance <- adp[["distance"]]
#' plot(apply(a,2,mean), distance, type='l', xlim=c(0,256))
#' lines(apply(a,2,median), distance, type='l',col='red')
#' legend("topright",lwd=1,col=c("black","red"),legend=c("original","attenuated"))
#' ## Image
#' plot(adp.att, which="amplitude",col=oce.colorsJet(100))
#'
#' @family things related to adp data
beamUnspreadAdp <- function(x, count2db=c(0.45, 0.45, 0.45, 0.45), asMatrix=FALSE, debug=getOption("oceDebug"))
{
oceDebug(debug, "beamUnspreadAdp(...) {\n", unindent=1)
if (!inherits(x, "adp"))
stop("method is only for objects of class '", "adp", "'")
## make compatible with old function name (will remove in Jan 2013)
if (!is.null(x@metadata$oceBeamUnattenuated) && x@metadata$oceBeamUnattenuated) {
warning("the beams are already unspreaded in this dataset.")
return(x)
}
if (!is.null(x@metadata$oceBeamUnspreaded) && x@metadata$oceBeamUnspreaded) {
warning("the beams are already unspreaded in this dataset")
return(x)
}
numberOfProfiles <- dim(x@data$a)[1]
oceDebug(debug, "numberOfProfiles=", numberOfProfiles, "\n")
correction <- matrix(rep(20 * log10(x[["distance"]]), numberOfProfiles),
nrow=numberOfProfiles, byrow=TRUE)
if (asMatrix) {
res <- array(double(), dim=dim(x@data$a))
for (beam in 1:x[["numberOfBeams"]]) {
oceDebug(debug, "beam=", beam, "\n")
res[, , beam] <- count2db[beam] * as.numeric(x@data$a[, , beam]) + correction
}
} else {
res <- x
for (beam in 1:x[["numberOfBeams"]]) {
oceDebug(debug, "beam=", beam, "\n")
tmp <- floor(count2db[beam] * as.numeric(x@data$a[, , beam]) + correction)
tmp[tmp < 0] <- 0
tmp[tmp > 255] <- 255
res@data$a[, , beam] <- as.raw(tmp)
}
res@metadata$oceBeamUnspreaded <- TRUE
res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
}
oceDebug(debug, "} # beamUnspreadAdp()\n", unindent=1)
res
}
#' Convert ADP From Beam to XYZ Coordinates
#'
#' Convert ADP velocity components from a beam-based coordinate system to a
#' xyz-based coordinate system. The action depends on the type of object.
#' Objects creating by reading RDI Teledyne, Sontek, and some Nortek
#' instruments are handled directly. However, Nortek
#' data stored in in the AD2CP format are handled by the specialized
#' function [beamToXyzAdpAD2CP()], the documentation for which
#' should be consulted, rather than the material given blow.
#'
#' For a 3-beam Nortek `aquadopp` object, the beams are transformed into
#' velocities using the matrix stored in the header.
#'
#' For 4-beam objects (and for the slanted 4 beams of 5-beam
#' objects), the along-beam velocity components \eqn{B_1}{B1}
#' \eqn{B_2}{B1}, \eqn{B_3}{B3}, and \eqn{B_4}{B4}
#' are converted to Cartesian velocity components \eqn{u}{u}
#' \eqn{v}{v} and \eqn{w}{w}
#' using formulae from section 5.5 of *RD Instruments* (1998), viz. the
#' along-beam velocity components \eqn{B_1}{B1}, \eqn{B_2}{B2}, \eqn{B_3}{B3},
#' and \eqn{B_4}{B4} are used to calculate velocity components in a cartesian
#' system referenced to the instrument using the following formulae:
#' \eqn{u=ca(B_1-B_2)}{u=c*a*(B1-B2)}, \eqn{v=ca(B_4-B_3)}{v=c*a*(B4-B3)},
#' \eqn{w=-b(B_1+B_2+B_3+B_4)}{w=-b*(B1+B2+B3+B4)}. In addition to these,
#' an estimate of the
#' error in velocity is computed as
#' \eqn{e=d(B_1+B_2-B_3-B_4)}{e=d*(B1+B2-B3-B4)}.
#' The geometrical factors in these formulae are:
#' `c` is +1 for convex beam geometry or -1 for concave beam geometry,
#' \eqn{a=1/(2\sin\theta)}{a=1/(2*sin(theta))}
#' where \eqn{\theta}{theta} is the angle the beams make to the axial direction
#' (which is available as `x[["beamAngle"]]`),
#' \eqn{b=1/(4\cos\theta)}{b=1/(4*cos(theta))}, and
#' \eqn{d=a/\sqrt{2}}{d=a/sqrt(2)}.
#'
#' @param x an [adp-class] object.
#'
#' @template debugTemplate
#'
#' @return An object with the first 3 velocity indices having been altered to
#' represent velocity components in xyz (or instrument) coordinates. (For
#' `rdi` data, the values at the 4th velocity index are changed to
#' represent the "error" velocity.)
#' To indicate the change, the value of `x[["oceCoordinate"]]` is
#' changed from `beam` to `xyz`.
#'
#' @author Dan Kelley
#'
#' @seealso See [read.adp()] for other functions that relate to
#' objects of class `"adp"`.
#'
#' @references
#' 1. Teledyne RD Instruments. \dQuote{ADCP Coordinate Transformation: Formulas and
#' Calculations,} January 2010. P/N 951-6079-00.
#'
#' 2. WHOI/USGS-provided Matlab code for beam-enu transformation
#' \samp{http://woodshole.er.usgs.gov/pubs/of2005-1429/MFILES/AQDPTOOLS/beam2enu.m}
#'
#' @family things related to adp data
beamToXyzAdp <- function(x, debug=getOption("oceDebug"))
{
if (!inherits(x, "adp"))
stop("method is only for objects of class \"adp\"")
if (x[["oceCoordinate"]] != "beam")
stop("input must be in beam coordinates")
if (is.ad2cp(x)) {
oceDebug(debug, "beamToXyzAdp(x, debug=", debug, ") {\n", sep="", unindent=1)
res <- beamToXyzAdpAD2CP(x=x, debug=debug - 1)
oceDebug(debug, "} # beamToXyzAdp()\n", unindent=1)
return(res)
}
oceDebug(debug, "beamToXyzAdp(x, debug=", debug, ") {\n", sep="", unindent=1)
nb <- x[["numberOfBeams"]]
if (is.null(nb))
stop("missing x[[\"numberOfBeams\"]]")
tm <- x[["transformationMatrix"]]
if (is.null(tm))
stop("missing x[[\"transformationMatrix\"]]")
if (!all.equal(dim(tm), c(nb, nb)))
stop("number of beams, ", nb, ", contradicts the ", dim(tm)[1], "x", dim(tm)[2], " transformationMatrix")
manufacturer <- x[["manufacturer"]]
if (is.null(manufacturer))
stop("cannot rotate the data, since there is no 'manufacturer' entry in the metadata slot")
oceDebug(debug, "transformation matrix follows\n")
if (debug > 0)
print(tm)
res <- x
V <- x[["v"]]
if (length(grep(".*rdi.*", manufacturer))) {
if (nb != 4)
stop("can only handle 4-beam ADP units from RDI")
res@data$v[,,1] <- tm[1,1]*V[,,1] + tm[1,2]*V[,,2] + tm[1,3]*V[,,3] + tm[1,4]*V[,,4]
res@data$v[,,2] <- tm[2,1]*V[,,1] + tm[2,2]*V[,,2] + tm[2,3]*V[,,3] + tm[2,4]*V[,,4]
res@data$v[,,3] <- tm[3,1]*V[,,1] + tm[3,2]*V[,,2] + tm[3,3]*V[,,3] + tm[3,4]*V[,,4]
res@data$v[,,4] <- tm[4,1]*V[,,1] + tm[4,2]*V[,,2] + tm[4,3]*V[,,3] + tm[4,4]*V[,,4]
if ("bv" %in% names(x@data)) {
## bottom velocity
V <- x@data$bv
res@data$bv[,1] <- tm[1,1]*V[,1] + tm[1,2]*V[,2] + tm[1,3]*V[,3] + tm[1,4]*V[,4]
res@data$bv[,2] <- tm[2,1]*V[,1] + tm[2,2]*V[,2] + tm[2,3]*V[,3] + tm[2,4]*V[,4]
res@data$bv[,3] <- tm[3,1]*V[,1] + tm[3,2]*V[,2] + tm[3,3]*V[,3] + tm[3,4]*V[,4]
res@data$bv[,4] <- tm[4,1]*V[,1] + tm[4,2]*V[,2] + tm[4,3]*V[,3] + tm[4,4]*V[,4]
}
res@metadata$oceCoordinate <- "xyz"
} else if (length(grep(".*nortek.*", manufacturer))) {
if (nb == 3) {
res@data$v[,,1] <- tm[1,1]*V[,,1] + tm[1,2]*V[,,2] + tm[1,3]*V[,,3]
res@data$v[,,2] <- tm[2,1]*V[,,1] + tm[2,2]*V[,,2] + tm[2,3]*V[,,3]
res@data$v[,,3] <- tm[3,1]*V[,,1] + tm[3,2]*V[,,2] + tm[3,3]*V[,,3]
if ("bv" %in% names(x@data)) {
## bottom velocity
V <- x@data$bv
res@data$bv[,1] <- tm[1,1]*V[,1] + tm[1,2]*V[,2] + tm[1,3]*V[,3]
res@data$bv[,2] <- tm[2,1]*V[,1] + tm[2,2]*V[,2] + tm[2,3]*V[,3]
res@data$bv[,3] <- tm[3,1]*V[,1] + tm[3,2]*V[,2] + tm[3,3]*V[,3]
}
res@metadata$oceCoordinate <- "xyz"
} else if (nb == 4) {
stop("the only 4-beam Nortek format supported is AD2CP")
} else {
stop("can only handle 3-beam and 4-beam ADP units from nortek")
}
} else if (length(grep(".*sontek.*", manufacturer))) {
res@data$v[,,1] <- tm[1,1]*V[,,1] + tm[1,2]*V[,,2] + tm[1,3]*V[,,3]
res@data$v[,,2] <- tm[2,1]*V[,,1] + tm[2,2]*V[,,2] + tm[2,3]*V[,,3]
res@data$v[,,3] <- tm[3,1]*V[,,1] + tm[3,2]*V[,,2] + tm[3,3]*V[,,3]
if ("bv" %in% names(x@data)) {
## bottom velocity
V <- x@data$bv
res@data$bv[,1] <- tm[1,1]*V[,1] + tm[1,2]*V[,2] + tm[1,3]*V[,3]
res@data$bv[,2] <- tm[2,1]*V[,1] + tm[2,2]*V[,2] + tm[2,3]*V[,3]
res@data$bv[,3] <- tm[3,1]*V[,1] + tm[3,2]*V[,2] + tm[3,3]*V[,3]
}
res@metadata$oceCoordinate <- "xyz"
} else {
stop("adp type must be either \"rdi\" or \"nortek\" or \"sontek\"")
}
res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
oceDebug(debug, "} # beamToXyzAdp()\n", unindent=1)
res
}
#' Convert AD2CP-style adp data From Beam to XYZ Coordinates
#'
#' This looks at all the items in the `data` slot of `x`, to
#' see if they contain an array named `v` that holds velocity.
#' If that velocity has 4 components, and if `oceCoordinate` for
#' the item is `"beam"`, then
#' along-beam velocity components \eqn{B_1}{B1}
#' \eqn{B_2}{B1}, \eqn{B_3}{B3}, and \eqn{B_4}{B4}
#' are converted to instrument-oriented Cartesian velocity components \eqn{u}{u}
#' \eqn{v}{v} and \eqn{w}{w}
#' using the convex-geometry formulae from section 5.5 of reference 1,
#' viz.
#' \eqn{u=ca(B_1-B_2)}{u=a*(B1-B2)}, \eqn{v=ca(B_4-B_3)}{v=a*(B4-B3)},
#' \eqn{w=-b(B_1+B_2+B_3+B_4)}{w=-b*(B1+B2+B3+B4)}. In addition to these,
#' an estimate of the
#' error in velocity is computed as
#' \eqn{e=d(B_1+B_2-B_3-B_4)}{e=d*(B1+B2-B3-B4)}.
#' The geometrical factors in these formulae are:
#' \eqn{a=1/(2\sin\theta)}{a=1/(2*sin(theta))}
#' where \eqn{\theta}{theta} is the angle the beams make to the axial direction
#' (which is available as `x[["beamAngle"]]`),
#' \eqn{b=1/(4\cos\theta)}{b=1/(4*cos(theta))}, and
#' \eqn{d=a/\sqrt{2}}{d=a/sqrt(2)}.
#'
#' @param x an [adp-class] object.
#'
#' @template debugTemplate
#'
#' @references
#' 1. Teledyne RD Instruments.
#' \dQuote{ADCP Coordinate Transformation: Formulas and Calculations,}
#' January 2010. P/N 951-6079-00.
#
#' @family things related to adp data
beamToXyzAdpAD2CP <- function(x, debug=getOption("oceDebug"))
{
debug <- if (debug > 0) 1 else 0
oceDebug(debug, "beamToXyzAdpAD2CP(x, debug=", debug, ") {\n", sep="", unindent=1)
if (!inherits(x, "adp"))
stop("method is only for objects of class \"adp\"")
if (!is.ad2cp(x))
stop("method is only for AD2CP objects")
if (!is.ad2cp(x))
stop("only 4-beam AD2CP data are handled")
res <- x
for (item in names(x@data)) {
oceDebug(debug, "item='", item, "'...\n", sep="")
## Do not try to alter unsuitable items, e.g. the vertical beam, the altimeter, etc.
if (is.list(x@data[[item]]) && "v" %in% names(x@data[[item]])) {
if (x@data[[item]]$oceCoordinate == "beam") {
numberOfBeams <- x@data[[item]]$numberOfBeams
oceDebug(debug, " numberOfBeams=", numberOfBeams, "\n")
if (4 == numberOfBeams) {
v <- x@data[[item]]$v
## Possibly speed things up by reducing need to index 4 times.
v1 <- v[,,1]
v2 <- v[,,2]
v3 <- v[,,3]
v4 <- v[,,4]
rm(v) # perhaps help by reducing memory pressure a bit
beamAngle <- x@metadata$beamAngle
if (is.null(beamAngle))
stop("cannot look up beamAngle")
theta <- beamAngle * atan2(1, 1) / 45
TMc <- 1 # for convex (diverging) beam setup; use -1 for concave
TMa <- 1 / (2 * sin(theta))
TMb <- 1 / (4 * cos(theta))
TMd <- TMa / sqrt(2)
tm <- rbind(c(TMc*TMa, -TMc*TMa, 0, 0),
c( 0, 0, -TMc*TMa, TMc*TMa),
c( TMb, TMb, TMb, TMb),
c( TMd, TMd, -TMd, -TMd))
## TIMING new way:
## TIMING user system elapsed
## TIMING 11.661 27.300 89.293
## TIMING old way:
## TIMING user system elapsed
## TIMING 15.977 24.182 88.971
## TIMING cat("new way:\n")
## TIMING print(system.time({
## TIMING v1 <- V[,,1]
## TIMING v2 <- V[,,2]
## TIMING v3 <- V[,,3]
## TIMING v4 <- V[,,4]
## TIMING res@data[[j]]$v[,,1] <- tm[1,1]*v1 + tm[1,2]*v2 + tm[1,3]*v3 + tm[1,4]*v4
## TIMING res@data[[j]]$v[,,2] <- tm[2,1]*v1 + tm[2,2]*v2 + tm[2,3]*v3 + tm[2,4]*v4
## TIMING res@data[[j]]$v[,,3] <- tm[3,1]*v1 + tm[3,2]*v2 + tm[3,3]*v3 + tm[3,4]*v4
## TIMING res@data[[j]]$v[,,4] <- tm[4,1]*v1 + tm[4,2]*v2 + tm[4,3]*v3 + tm[4,4]*v4
## TIMING rm(v1, v2, v3, v4)
## TIMING }))
## TIMING cat("old way:\n")
## TIMING print(system.time({
## TIMING res@data[[j]]$v[,,1] <- tm[1,1]*V[,,1] + tm[1,2]*V[,,2] + tm[1,3]*V[,,3] + tm[1,4]*V[,,4]
## TIMING res@data[[j]]$v[,,2] <- tm[2,1]*V[,,1] + tm[2,2]*V[,,2] + tm[2,3]*V[,,3] + tm[2,4]*V[,,4]
## TIMING res@data[[j]]$v[,,3] <- tm[3,1]*V[,,1] + tm[3,2]*V[,,2] + tm[3,3]*V[,,3] + tm[3,4]*V[,,4]
## TIMING res@data[[j]]$v[,,4] <- tm[4,1]*V[,,1] + tm[4,2]*V[,,2] + tm[4,3]*V[,,3] + tm[4,4]*V[,,4]
## TIMING }))
res@data[[item]]$v[,,1] <- tm[1,1]*v1 + tm[1,2]*v2 + tm[1,3]*v3 + tm[1,4]*v4
res@data[[item]]$v[,,2] <- tm[2,1]*v1 + tm[2,2]*v2 + tm[2,3]*v3 + tm[2,4]*v4
res@data[[item]]$v[,,3] <- tm[3,1]*v1 + tm[3,2]*v2 + tm[3,3]*v3 + tm[3,4]*v4
res@data[[item]]$v[,,4] <- tm[4,1]*v1 + tm[4,2]*v2 + tm[4,3]*v3 + tm[4,4]*v4
res@data[[item]]$oceCoordinate <- "xyz"
res@metadata$oceCoordinate <- NULL # remove, just in case it got added by mistake
oceDebug(debug, " converted from 'beam' to 'xyz'\n")
} else {
oceDebug(debug, " skipping, since not 4 beams\n")
}
} else {
oceDebug(debug, " skipping, since not in 'beam' coordinate\n")
}
} else {
oceDebug(debug, " skipping, since not a list\n")
}
}
res@processingLog <- processingLogAppend(res@processingLog,
paste("beamToXyzAdpAD2CP(x",
", debug=", debug, ")", sep=""))
oceDebug(debug, "} # beamToXyzAdpAD2CP()\n", unindent=1)
res
}
#' Convert ADP From XYZ to ENU Coordinates
#'
#' Convert ADP velocity components from a xyz-based coordinate system to
#' an enu-based coordinate system, by using the instrument's recording of
#' information relating to heading, pitch, and roll. The action is based
#' on what is stored in the data, and so it depends greatly on instrument type
#' and the style of original data format. This function handles data from
#' RDI Teledyne, Sontek, and some Nortek instruments directly. However, Nortek
#' data stored in in the AD2CP format are handled by the specialized
#' function [xyzToEnuAdpAD2CP()], the documentation for which
#' should be consulted, rather than the material given blow.
#'
#' The first step is to convert the (x,y,z) velocity components (stored in the
#' three columns of `x[["v"]][,,1:3]`) into what RDI (reference 1, pages 11 and 12)
#' calls "ship" (or "righted") components. For example, the z coordinate,
#' which may point upwards or downwards depending on instrument orientation, is
#' mapped onto a "mast" coordinate that points more nearly upwards than
#' downward. The other ship coordinates are called "starboard" and "forward",
#' the meanings of which will be clear to mariners. Once the (x,y,z)
#' velocities are converted to ship velocities, the orientation of the
#' instrument is extracted from heading, pitch, and roll vectors stored in the
#' object. These angles are defined differently for RDI and Sontek profilers.
#'
#' The code handles every case individually, based on the table given below.
#' The table comes from Clark Richards, a former PhD student at Dalhousie
#' University (reference 2), who developed it based on instrument documentation,
#' discussion on user groups, and analysis of measurements acquired with RDI
#' and Sontek acoustic current profilers in the SLEIWEX experiment. In the
#' table, (X, Y, Z) denote instrument-coordinate velocities, (S, F, M) denote
#' ship-coordinate velocities, and (H, P, R) denote heading, pitch, and roll.
#'
#' \tabular{rrrrrrrrrrrr}{ **Case** \tab **Mfr.** \tab
#' **Instr.** **Orient.** \tab **H** \tab **P** \tab
#' **R** \tab **S** \tab **F** \tab **M**\cr 1 \tab RDI
#' \tab ADCP \tab up \tab H \tab arctan(tan(P)*cos(R)) \tab R \tab -X \tab Y
#' \tab -Z\cr 2 \tab RDI \tab ADCP \tab down \tab H \tab arctan(tan(P)*cos(R))
#' \tab -R \tab X \tab Y \tab Z\cr 3 \tab Nortek \tab ADP \tab up \tab H-90
#' \tab R \tab -P \tab X \tab Y \tab Z\cr 4 \tab Nortek \tab ADP \tab down \tab
#' H-90 \tab R \tab -P \tab X \tab -Y \tab -Z\cr 5 \tab Sontek \tab ADP \tab up
#' \tab H-90 \tab -P \tab -R \tab X \tab Y \tab Z\cr 6 \tab Sontek \tab ADP
#' \tab down \tab H-90 \tab -P \tab -R \tab X \tab Y \tab Z\cr 7 \tab Sontek
#' \tab PCADP \tab up \tab H-90 \tab R \tab -P \tab X \tab Y \tab Z\cr 8 \tab
#' Sontek \tab PCADP \tab down \tab H-90 \tab R \tab -P \tab X \tab Y \tab Z\cr
#' }
#'
#' Finally, a standardized rotation matrix is used to convert from ship
#' coordinates to earth coordinates. As described in the RDI coordinate
#' transformation manual (reference 1, pages 13 and 14), this matrix is based on sines
#' and cosines of heading, pitch, and roll If `CH` and `SH` denote
#' cosine and sine of heading (after adjusting for declination), with similar
#' terms for pitch and roll using second letters `P` and `R`, the
#' rotation matrix is
#'
#' \preformatted{ rbind(c( CH*CR + SH*SP*SR, SH*CP, CH*SR - SH*SP*CR), c(-SH*CR
#' + CH*SP*SR, CH*CP, -SH*SR - CH*SP*CR), c( -CP*SR, SP, CP*CR)) }
#'
#' This matrix is left-multiplied by a matrix with three rows, the top a vector
#' of "starboard" values, the middle a vector of "forward" values, and the
#' bottom a vector of "mast" values. Finally, the columns of
#' `data$v[,,1:3]` are filled in with the result of the matrix
#' multiplication.
#'
#' @param x an [adp-class] object.
#'
#' @param declination magnetic declination to be added to the heading after
#' "righting" (see below), to get ENU with N as "true" north.
#'
#' @template debugTemplate
#'
#' @return An object with `data$v[,,1:3]` altered appropriately, and
#' `x[["oceCoordinate"]]` changed from `xyz` to `enu`.
#'
#' @author Dan Kelley and Clark Richards
#'
## @section Limitations:
## For AD2CP objects, created by[read.adp.ad2cp()],
## the transformation to ENU coordinates is only possible if the instrument
## orientation is `"AHRS"`. Other orientations may be added, if users
## indicat a need for them, and supply the developers with test file (including
## at least a few expected results).
#'
#' @references
#' 1. Teledyne RD Instruments. \dQuote{ADCP Coordinate Transformation: Formulas and Calculations,}
#' January 2010. P/N 951-6079-00.
#'
#' 2. Clark Richards, 2012, PhD Dalhousie University Department of
#' Oceanography.
#'
#' @family things related to adp data
xyzToEnuAdp <- function(x, declination=0, debug=getOption("oceDebug"))
{
debug <- if (debug > 0) 1 else 0
if (!inherits(x, "adp"))
stop("method is only for objects of class '", "adp", "'")
## Treat AD2CP differently because e.g. if it has AHRS, then there is may be need or
## benefit in extracting heading, etc., as for the other cases. Also, the orientation
## names are different for this type, so isolating the code makes things clearer
## and easier to maintain. (FIXME: consider splitting the RDI and Sontek cases, too.)
if (is.ad2cp(x))
return(xyzToEnuAdpAD2CP(x=x, declination=declination, debug=debug))
oceDebug(debug, "xyzToEnuAdp(x, declination=", declination, ", debug=", debug, ") {\n", sep="", unindent=1)
## Now, address non-AD2CP cases.
manufacturer <- x[["manufacturer"]]
oceCoordinate = x[["oceCoordinate"]]
orientation = x[["orientation"]][1]
if (is.null(orientation)) {
warning("instrument orientation is not stored in x; assuming it is \"upward\"")
orientation <- "upward"
}
if (is.null(oceCoordinate) || (oceCoordinate != "xyz" & oceCoordinate != "sfm"))
stop("input must be in xyz or sfm coordinates")
heading <- x[["heading"]]
pitch <- x[["pitch"]]
roll <- x[["roll"]]
res <- x
isAD2CP <- is.ad2cp(x)
haveBv <- "bv" %in% names(x@data)
## Case-by-case alteration of heading, pitch and roll, so we can use one formula for all.
if (1 == length(agrep("rdi", manufacturer, ignore.case=TRUE))) {
## "teledyne rdi"
## h/p/r and s/f/m from Clark Richards pers. comm. 2011-03-14, revised 2011-03-15
if (oceCoordinate == "sfm" & !res@metadata$tiltUsed) {
oceDebug(debug, "Case 1: RDI ADCP in SFM coordinates.\n")
oceDebug(debug, " No coordinate changes required prior to ENU.\n")
starboard <- res@data$v[, , 1] # p11 "RDI Coordinate Transformation Manual" (July 1998)
forward <- res@data$v[, , 2] # p11 "RDI Coordinate Transformation Manual" (July 1998)
mast <- res@data$v[, , 3] # p11 "RDI Coordinate Transformation Manual" (July 1998)
if (haveBv) {
## bottom velocity
starboardBv <- res@data$bv[, 1]
forwardBv <- res@data$bv[, 2]
mastBv <- res@data$bv[, 3]
}
} else if (oceCoordinate == "sfm" & res@metadata$tiltUsed) {
oceDebug(debug, "Case 2: RDI ADCP in SFM coordinates, but with tilts already applied.\n")
oceDebug(debug, " No coordinate changes required prior to ENU.\n")
starboard <- res@data$v[, , 1] # p11 "RDI Coordinate Transformation Manual" (July 1998)
forward <- res@data$v[, , 2] # p11 "RDI Coordinate Transformation Manual" (July 1998)
mast <- res@data$v[, , 3] # p11 "RDI Coordinate Transformation Manual" (July 1998)
pitch <- rep(0, length(heading))
roll <- rep(0, length(heading))
if (haveBv) {
## bottom velocity
starboardBv <- res@data$bv[, 1]
forwardBv <- res@data$bv[, 2]
mastBv <- res@data$bv[, 3]
}
} else if (orientation == "upward") {
oceDebug(debug, "Case 3: RDI ADCP in XYZ coordinates with upward-pointing sensor.\n")
oceDebug(debug, " Using S=-X, F=Y, and M=-Z.\n")
## As an alternative to the next three lines, could just add 180 degrees to roll
starboard <- -res@data$v[, , 1] # p11 "RDI Coordinate Transformation Manual" (July 1998)
forward <- res@data$v[, , 2] # p11 "RDI Coordinate Transformation Manual" (July 1998)
mast <- -res@data$v[, , 3] # p11 "RDI Coordinate Transformation Manual" (July 1998)
if (haveBv) {
## bottom velocity
starboardBv <- -res@data$bv[, 1]
forwardBv <- res@data$bv[, 2]
mastBv <- -res@data$bv[, 3]
}
oceDebug(debug, " defined starboard, etc\n")
} else if (orientation == "downward") {
oceDebug(debug, "Case 4: RDI ADCP in XYZ coordinates with downward-pointing sensor.\n")
oceDebug(debug, " Using roll=-roll, S=X, F=Y, and M=Z.\n")
roll <- -roll
starboard <- res@data$v[, , 1] # p11 "RDI Coordinate Transformation Manual" (July 1998)
forward <- res@data$v[, , 2] # p11 "RDI Coordinate Transformation Manual" (July 1998)
mast <- res@data$v[, , 3] # p11 "RDI Coordinate Transformation Manual" (July 1998)
if (haveBv) {
## bottom velocity
starboardBv <- res@data$bv[, 1]
forwardBv <- res@data$bv[, 2]
mastBv <- res@data$bv[, 3]
}
} else {
stop("need orientation='upward' or 'downward', not '", orientation, "'")
}
} else if (1 == length(agrep("nortek", manufacturer))) {
V <- x[["v"]]
if (orientation == "upward") {
## h/p/r and s/f/m from Clark Richards pers. comm. 2011-03-14
oceDebug(debug, "Case 3: Nortek ADP with upward-pointing sensor.\n")
oceDebug(debug, " Using heading=heading-90, pitch=roll, roll=-pitch, S=X, F=Y, and M=Z.\n")
heading <- heading - 90
tmp <- pitch
pitch <- roll
roll <- -tmp
starboard <- V[, , 1]
forward <- V[, , 2]
mast <- V[, , 3]
if (!isAD2CP && haveBv) {
## bottom velocity
starboardBv <- res@data$bv[, 1]
forwardBv <- res@data$bv[, 2]
mastBv <- res@data$bv[, 3]
}
} else if (orientation == "downward") {
oceDebug(debug, "Case 4: Nortek ADP with downward-pointing sensor.\n")
oceDebug(debug, " Using heading=heading-90, pitch=roll, roll=-pitch, S=X, F=-Y, and M=-Z.\n")
heading <- heading - 90
tmp <- pitch
pitch <- roll
roll <- -tmp
starboard <- V[, , 1]
forward <- -V[, , 2]
mast <- -V[, , 3]
if (!isAD2CP && haveBv) {
## bottom velocity
starboardBv <- res@data$bv[, 1]
forwardBv <- -res@data$bv[, 2]
mastBv <- res@data$bv[, 3]
}
} else {
stop("need orientation='upward' or 'downward', not '", orientation, "'")
}
} else if (1 == length(agrep("sontek", manufacturer))) {
## "sontek"
if (orientation == "upward") {
oceDebug(debug, "Case 5: Sontek ADP with upward-pointing sensor.\n")
oceDebug(debug, " Using heading=heading-90, pitch=-pitch, roll=-roll, S=X, F=Y, and M=Z.\n")
heading <- heading - 90
pitch <- -pitch
roll <- -roll
starboard <- res@data$v[, , 1]
forward <- res@data$v[, , 2]
mast <- res@data$v[, , 3]
if (haveBv) {
## bottom velocity
starboardBv <- res@data$bv[, 1]
forwardBv <- res@data$bv[, 2]
mastBv <- res@data$bv[, 3]
}
} else if (orientation == "downward") {
oceDebug(debug, "Case 6: Sontek ADP with downward-pointing sensor.\n")
oceDebug(debug, " Using heading=heading-90, pitch=-pitch, roll=-roll, S=X, F=Y, and M=Z.\n")
heading <- heading - 90
pitch <- -pitch
roll <- -roll
starboard <- res@data$v[, , 1]
forward <- res@data$v[, , 2]
mast <- res@data$v[, , 3]
if (haveBv) {
## bottom velocity
starboardBv <- res@data$bv[, 1]
forwardBv <- res@data$bv[, 2]
mastBv <- res@data$bv[, 3]
}
} else {
stop("need orientation='upward' or 'downward', not '", orientation, "'")
}
} else {
stop("unrecognized manufacturer; should be 'teledyne rdi', 'sontek', or 'nortek', but is '",
manufacturer, "'")
}
oceDebug(debug, vectorShow(heading, "heading (after adjustment)"))
oceDebug(debug, vectorShow(pitch, "pitch (after adjustment)"))
oceDebug(debug, vectorShow(roll, "roll (after adjustment)"))
nc <- dim(x@data$v)[2] # numberOfCells
np <- dim(x@data$v)[1] # number of profiles
if (length(heading) < np)
heading <- rep(heading, length.out=np)
if (length(pitch) < np)
pitch <- rep(pitch, length.out=np)
if (length(roll) < np)
roll <- rep(roll, length.out=np)
## ADP and ADV calculations are both handled by sfm_enu for non-AD2CP.
for (c in 1:nc) {
enu <- do_sfm_enu(heading + declination, pitch, roll, starboard[, c], forward[, c], mast[, c])
res@data$v[, c, 1] <- enu$east
res@data$v[, c, 2] <- enu$north
res@data$v[, c, 3] <- enu$up
}
if (haveBv) {
enu <- do_sfm_enu(heading + declination, pitch, roll, starboardBv, forwardBv, mastBv)
res@data$bv[, 1] <- enu$east
res@data$bv[, 2] <- enu$north
res@data$bv[, 3] <- enu$up
}
res@metadata$oceCoordinate <- "enu"
res@processingLog <- processingLogAppend(res@processingLog,
paste("xyzToEnuAdp(x", ", declination=", declination, ", debug=", debug, ")", sep=""))
oceDebug(debug, "} # xyzToEnuAdp()\n", unindent=1)
res
}
#' Convert ADP2CP adp object From XYZ to ENU Coordinates
#'
#' **This function will b in active development through the early
#' months of 2019, and both the methodology and user interface may change
#' without notice. Only developers (or invitees) should be trying to
#' use this function.**
#'
#' @param x an [adp-class] object created by [read.adp.ad2cp()].
#'
#' @param declination IGNORED at present, but will be used at some later time.
#' @template debugTemplate
#'
#' @return An object with `data$v[,,1:3]` altered appropriately, and
#' `x[["oceCoordinate"]]` changed from `xyz` to `enu`.
#'
#' @author Dan Kelley
#'
#' @section Limitations:
#' This only works if the instrument orientation is `"AHRS"`, and even
#' that is not tested yet. Plus, as noted, the declination is ignored.
#'
#' @references
#' 1. Nortek AS. \dQuote{Signature Integration 55|250|500|1000kHz.} Nortek AS, 2017.
#'
#' 2. Nortek AS. \dQuote{Signature Integration 55|250|500|1000kHz.} Nortek AS, 2018.
#' https://www.nortekgroup.com/assets/software/N3015-007-Integrators-Guide-AD2CP_1018.pdf.
#'
#' @family things related to adp data
xyzToEnuAdpAD2CP <- function(x, declination=0, debug=getOption("oceDebug"))
{
debug <- if (debug > 0) 1 else 0
oceDebug(debug, "xyzToEnuAdpAD2CP(x, declination=", declination, ", debug=", debug, ") {\n", sep="", unindent=1)
if (!inherits(x, "adp"))
stop("method is only for objects of class '", "adp", "'")
if (!is.ad2cp(x))
stop("this function only works for adp objects created by read.adp.ad2cp()")
if (0 != declination)
stop("nonzero declination is not handled yet; please contact the author if you ned this") # FIXME
res <- x
## FIXME: deal with other ad2cp orientations. Can (should) we use a methodology
## similar to the non-ad2cp, for non-AHRS cases?
## FIXME: do a loop like this for beamToXyzAdpAD2CP() also.
for (item in names(x@data)) {
## Do not try to rotate non-rotatable items, e.g. the vertical beam, the altimeter, etc.
if (is.list(x@data[[item]])) {
numberOfBeams <- x@data[[item]]$numberOfBeams
##. message(" numberOfBeams=", numberOfBeams)
if (!is.null(numberOfBeams) && numberOfBeams == 4) {
orientation <- x@data[[item]]$orientation
if (is.null(orientation))
stop("no known orientation for '", item, "' in the object data slot")
## FIXME: handle 'xup', 'xdown', 'yup', 'ydown', 'zup', 'zdown'
if (orientation[1] != "AHRS")
stop("only the 'AHRS' orientation is handled, but '", item, "' has orientation '", orientation[1], "'")
AHRS <- x@data[[item]]$AHRS
if (is.null(AHRS))
stop("'", item, "' within the object data slot does not contain coordinate-change matrix 'AHRS'")
oceCoordinate <- x@data[[item]]$oceCoordinate
if (is.null(oceCoordinate))
stop("'", item, "' within the object data slot has no 'oceCoordinate'")
## If the item is already in 'enu', we just leave it alone
##. message("oceCoordinate: '", oceCoordinate, "'")
if (oceCoordinate == "xyz") {
V <- x@data[[item]]$v
if (is.null(V))
stop("'", item, "' within the object data slot does not contain velocity 'v'")
nc <- dim(V)[2]
## DEVELOPER NOTE
##
## I thought it might be faster to use C++ for the calculations, since the memory pressure ought to
## be a bit smaller (because there is no need to rep() the AHRS values across cells). However, I
## tried a test, but the results, below, suggest the R method is much faster. Also, it will be
## easier for others to modify, I think, so we will use it.
##
## Speed test with 292M file:
##
## C++ method
## user system elapsed
## 2.553 0.952 3.511
##> message("C++ method")
##> for (cell in 1:nc) {
##> res@data[[item]]$v[, cell, 1:3] <- do_ad2cp_ahrs(V[, cell, 1:3], AHRS)
##
## R method
## user system elapsed
## 0.400 0.139 0.540
##
##> message("R method")
e <- V[,,1]*rep(AHRS[,1], times=nc) + V[,,2]*rep(AHRS[,2], times=nc) + V[,,3]*rep(AHRS[,3], times=nc)
n <- V[,,1]*rep(AHRS[,4], times=nc) + V[,,2]*rep(AHRS[,5], times=nc) + V[,,3]*rep(AHRS[,6], times=nc)
u <- V[,,1]*rep(AHRS[,7], times=nc) + V[,,2]*rep(AHRS[,8], times=nc) + V[,,3]*rep(AHRS[,9], times=nc)
## FIXME: perhaps use the declination now, rotating e and n. But first, we will need to know
## what declination was used by the instrument, in its creation of AHRS.
res@data[[item]]$v[,,1] <- e
res@data[[item]]$v[,,2] <- n
res@data[[item]]$v[,,3] <- u
res@data[[item]]$oceCoordinate <- "enu"
} else if (oceCoordinate == "beam") {
stop("cannot convert from beam to Enu coordinates; use beamToXyz() first")
}
}
}
}
res@processingLog <- processingLogAppend(res@processingLog,
paste("xyzToEnuAdpAD2CP(x",
", declination=", declination,
", debug=", debug, ")", sep=""))
oceDebug(debug, "} # xyzToEnuAdpAD2CP()\n", unindent=1)
res
}
#' Convert ADP ENU to Rotated Coordinate
#'
#' Convert ADP velocity components from an enu-based coordinate system to
#' another system, perhaps to align axes with the coastline.
#'
#' The supplied angles specify rotations to be made around the axes for which
#' heading, pitch, and roll are defined. For example, an eastward current will
#' point southeast if `heading=45` is used.
#'
#' The returned value has heading, pitch, and roll matching those of `x`,
#' so these angles retain their meaning as the instrument orientation.
#'
#' NOTE: this function works similarly to [xyzToEnuAdp()], except
#' that in the present function, it makes no difference whether the instrument
#' points up or down, etc.
#'
#' @param x an [adp-class] object.
#'
#' @param heading number or vector of numbers, giving the angle, in degrees, to
#' be added to the heading. See \dQuote{Details}.
#'
#' @param pitch as `heading` but for pitch.
#'
#' @param roll as `heading` but for roll.
#'
#' @return An object with `data$v[,1:3,]` altered appropriately, and
#' `metadata$oce.coordinate` changed from `enu` to `other`.
#'
#' @author Dan Kelley
#'
#' @seealso See [read.adp()] for other functions that relate to
#' objects of class `"adp"`.
#'
#' @references
#' 1. Teledyne RD Instruments. \dQuote{ADCP Coordinate Transformation: Formulas and
#' Calculations,} January 2010. P/N 951-6079-00.
#'
#' @examples
#'
#' library(oce)
#' data(adp)
#' o <- enuToOtherAdp(adp, heading=-31.5)
#' plot(o, which=1:3)
#'
#' @family things related to adp data
enuToOtherAdp <- function(x, heading=0, pitch=0, roll=0)
{
if (!inherits(x, "adp"))
stop("method is only for objects of class '", "adp", "'")
if (is.ad2cp(x))
stop("this function does not work yet for AD2CP data")
oceCoordinate <- x[["oceCoordinate"]]
if (oceCoordinate != "enu")
stop("input must be in enu coordinates, but it is in ", oceCoordinate, " coordinates")
res <- x
np <- dim(x[["v"]])[1] # number of profiles
if (length(heading) != np)
heading <- rep(heading, length.out=np)
if (length(pitch) != np)
pitch <- rep(pitch, length.out=np)
if (length(roll) != np)
roll <- rep(roll, length.out=np)
nc <- dim(x[["v"]])[2] # numberOfCells
for (c in 1:nc) {
other <- do_sfm_enu(heading, pitch, roll, x[["v"]][, c, 1], x[["v"]][, c, 2], x[["v"]][, c, 3])
res@data$v[, c, 1] <- other$east
res@data$v[, c, 2] <- other$north
res@data$v[, c, 3] <- other$up
}
if ("bv" %in% names(x@data)) {
other <- do_sfm_enu(heading, pitch, roll, x@data$bv[, 1], x@data$bv[, 2], x@data$bv[, 3])
res@data$bv[, 1] <- other$east
res@data$bv[, 2] <- other$north
res@data$bv[, 3] <- other$up
}
res@metadata$oceCoordinate <- "other"
res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
res
}
peek.ahead <- function(file, bytes=2, debug=!TRUE)
{
pos <- seek(file)
res <- readBin(file, "raw", n=bytes, size=1)
oceDebug(debug, "peeked at", paste("0x", paste(res, sep=" "), sep=""), "\n")
seek(file, pos)
res
}
display.bytes <- function(b, label="", ...)
{
n <- length(b)
cat("\n", label, " (", n, "bytes)\n", sep="", ...)
print(b, ...)
}
#' Subtract Bottom Velocity from ADP
#'
#' Subtracts bottom tracking velocities from an `"adp"` object. Works for
#' all coordinate systems (`beam`, `xyz`, and `enu`).
#'
#' @param x an [adp-class] object that contains bottom-tracking velocities.
#'
#' @template debugTemplate
#'
#' @author Dan Kelley and Clark Richards
#'
#' @seealso See [read.adp()] for notes on functions relating to
#' `"adp"` objects, and [adp-class] for notes on the ADP
#' object class.
#'
#' @family things related to adp data
subtractBottomVelocity <- function(x, debug=getOption("oceDebug"))
{
oceDebug(debug, "subtractBottomVelocity(x) {\n", unindent=1)
if (!("bv" %in% names(x@data))) {
warning("there is no bottom velocity in this object")
return(x)
}
res <- x
numberOfBeams <- dim(x[["v"]])[3] # could also get from metadata but this is less brittle
for (beam in 1:numberOfBeams) {
oceDebug(debug, "beam #", beam, "\n")
res@data$v[, , beam] <- x[["v"]][, , beam] - x@data$bv[, beam]
}
oceDebug(debug, "} # subtractBottomVelocity()\n", unindent=1)
res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
res
}
#' Bin-map an ADP object
#'
#' Bin-map an ADP object, by interpolating velocities, backscatter amplitudes,
#' etc., to uniform depth bins, thus compensating for the pitch and roll of the
#' instrument. This only makes sense for ADP objects that are in beam
#' coordinates.
#'
#' @param x an [adp-class] object.
#'
#' @template debugTemplate
#'
#' @return An [adp-class] object.
#'
#' @section Bugs: This only works for 4-beam RDI ADP objects.
#'
#' @author Dan Kelley and Clark Richards
#'
#' @seealso See [adp-class] for a discussion of `adp` objects
#' and notes on the many functions dealing with them.
#'
#' @references The method was devised by Clark Richards for use in his PhD work
#' at Department of Oceanography at Dalhousie University.
#'
#' @examples
#'\dontrun{
#' library(oce)
#' beam <- read.oce("/data/archive/sleiwex/2008/moorings/m09/adp/rdi_2615/raw/adp_rdi_2615.000",
#' from=as.POSIXct("2008-06-26", tz="UTC"),
#' to=as.POSIXct("2008-06-26 00:10:00", tz="UTC"),
#' longitude=-69.73433, latitude=47.88126)
#' beam2 <- binmapAdp(beam)
#' plot(enuToOther(toEnu(beam), heading=-31.5))
#' plot(enuToOther(toEnu(beam2), heading=-31.5))
#' plot(beam, which=5:8) # backscatter amplitude
#' plot(beam2, which=5:8)
#'}
#'
#' @family things related to adp data
binmapAdp <- function(x, debug=getOption("oceDebug"))
{
oceDebug(debug, "binmap(x, debug) {\n", unindent=1)
if (!inherits(x, "adp"))
stop("x must be an \"adp\" object")
v <- x[["v"]]
a <- x[["a"]] ## FIXME: should ensure that this exist
q <- x[["q"]]
g <- x[["g"]]
if (4 != dim(v)[3])
stop("binmap() only works for 4-beam instruments")
theta <- x[['beamAngle']] # FIXME: check that not missing or weird
distance <- x[["distance"]]
roll <- x[["roll"]]
pitch <- x[["pitch"]]
## Below, we loop through the profiles. I tried an experiment in
## vectorizing across the loop, by combining into a single vector
## for (distance, cr, ...), but it was no faster, and the code was
## more complicated to read.
vbm <- array(double(), dim=dim(v))
abm <- array(raw(), dim=dim(v))
qbm <- array(raw(), dim=dim(v))
gbm <- array(raw(), dim=dim(v))
nprofile <- dim(v)[1]
res <- x
for (profile in 1:nprofile) {
r <- roll[profile]
p <- pitch[profile]
cr <- cos(r * pi / 180)
sr <- sin(r * pi / 180)
cp <- cos(p * pi / 180)
sp <- sin(p * pi / 180)
tt <- tan(theta * pi / 180)
z1 <- distance * (cr - tt * sr) * cp
##if (profile == 1) {
## cat('R : r', r, 'p', p, 'cr', cr, 'sr', sr, 'cp', cp, 'sp', sp, 'tt', tt, '\n')
## cat("R : z1 ", format(z1[1:8], width=11, digits=7), '\n')
##}
z2 <- distance * (cr + tt * sr) * cp
z3 <- distance * (cp + tt * sp) * cr
z4 <- distance * (cp - tt * sp) * cr
## FIXME: check on whether we can speed things up by using e.g. x[["v"]]
## instead of v, which would lower the memory requirements.
## v=velocity
## Need to check all four beams that there are more than 2
## non-NA values in the profiles, otherwise set to 0
checkNA <- sum(!is.na(v[profile, , 1])) > 1 & sum(!is.na(v[profile, , 2])) > 1 & sum(!is.na(v[profile, , 3])) > 1 & sum(!is.na(v[profile, , 4])) > 1
if (checkNA) {
vbm[profile, , 1] <- approx(z1, v[profile, , 1], distance)$y
vbm[profile, , 2] <- approx(z2, v[profile, , 2], distance)$y
vbm[profile, , 3] <- approx(z3, v[profile, , 3], distance)$y
vbm[profile, , 4] <- approx(z4, v[profile, , 4], distance)$y
} else {
vbm[profile, , 1] <- NA
vbm[profile, , 2] <- NA
vbm[profile, , 3] <- NA
vbm[profile, , 4] <- NA
}
## a
rule <- 2 # FIXME: is is OK to extend data to edges?
abm[profile, , 1] <- oce.as.raw(approx(z1, as.numeric(a[profile, , 1], rule=rule), distance)$y)
abm[profile, , 2] <- oce.as.raw(approx(z2, as.numeric(a[profile, , 2], rule=rule), distance)$y)
abm[profile, , 3] <- oce.as.raw(approx(z3, as.numeric(a[profile, , 3], rule=rule), distance)$y)
abm[profile, , 4] <- oce.as.raw(approx(z4, as.numeric(a[profile, , 4], rule=rule), distance)$y)
## q
qbm[profile, , 1] <- oce.as.raw(approx(z1, as.numeric(q[profile, , 1], rule=rule), distance)$y)
qbm[profile, , 2] <- oce.as.raw(approx(z2, as.numeric(q[profile, , 2], rule=rule), distance)$y)
qbm[profile, , 3] <- oce.as.raw(approx(z3, as.numeric(q[profile, , 3], rule=rule), distance)$y)
qbm[profile, , 4] <- oce.as.raw(approx(z4, as.numeric(q[profile, , 4], rule=rule), distance)$y)
## g
gbm[profile, , 1] <- oce.as.raw(approx(z1, as.numeric(g[profile, , 1], rule=rule), distance)$y)
gbm[profile, , 2] <- oce.as.raw(approx(z2, as.numeric(g[profile, , 2], rule=rule), distance)$y)
gbm[profile, , 3] <- oce.as.raw(approx(z3, as.numeric(g[profile, , 3], rule=rule), distance)$y)
gbm[profile, , 4] <- oce.as.raw(approx(z4, as.numeric(g[profile, , 4], rule=rule), distance)$y)
}
res@data$v <- vbm
##cat("R : v1 ", format(v[1,1:8,1], width=11, digits=7), '\n')
##cat("R : V1 ", format(vbm[1,1:8,1], width=11, digits=7), '\n')
res@data$a <- abm
res@data$q <- qbm
res@data$g <- gbm
res
}
#' Ensemble Average an ADP Object in Time
#'
#' Ensemble averaging of `adp` objects is often necessary to
#' reduce the uncertainty in velocity estimates from single
#' pings. Many types of ADPs can be configured to perform the
#' ensemble averaging during the data collection, due to memory
#' limitations for long deployments. In cases where the instrument is
#' not memory limited, it may be desirable to perform the ensemble
#' averaging during post-processing, thereby reducing the overall
#' size of the data set and decreasing the uncertainty of the
#' velocity estimates (by averaging out Doppler noise).
#'
#' @param x an [adp-class] object.
#'
#' @param n number of pings to average together.
#'
#' @param leftover a logical value indicating how to proceed in cases
#' where `n` does not divide evenly into the number of ensembles
#' in `x`. If `leftover` is `FALSE` (the default) then any extra
#' ensembles at the end of `x` are ignored. Otherwise, they are used
#' to create a final ensemble in the returned value.
#'
#' @param na.rm a logical value indicating whether NA values should be stripped
#' before the computation proceeds
#'
#' @param ... extra arguments to be passed to the `mean()` function.
#'
#' @return A new [adp-class] object with ensembles averaged as specified. E.g. for an `adp` object with 100 pings and `n=5` the number of rows of the data arrays will be reduced by a factor of 5.
#'
#' @author Clark Richards and Dan Kelley
#'
#' @examples
#' library(oce)
#' data(adp)
#' adpAvg <- adpEnsembleAverage(adp, n=2)
#' plot(adpAvg)
#'
#' @family things related to adp data
adpEnsembleAverage <- function(x, n=5, leftover=FALSE, na.rm=TRUE, ...)
{
if (!inherits(x, 'adp')) stop('Must be an object of class adp')
res <- new('adp', distance=x[['distance']])
res@metadata <- x@metadata
d <- x@data
t <- as.POSIXct(d$time) # ensure POSIXct so next line works right
ntx <- length(t)
pings <- seq_along(t)
## Note the limits of the breaks, below. We start at 0 to catch the first
## pings value. If leftover is TRUE, we also extend at the right, to catch
## the fractional chunk that will exist at the end, if n does not divide into ntx.
breaks <- if (leftover) seq(0, ntx+n, n) else seq(0, ntx, n)
fac <- cut(pings, breaks=breaks, labels=FALSE) # used to split() data items
##res@data$time <- numberAsPOSIXct(binAverage(pings, t, xinc=n)$y)
res@data$time <- numberAsPOSIXct(as.numeric(lapply(split(as.numeric(t), fac), mean, na.rm=na.rm, ...)))
for (field in names(d)) {
if (field != 'time' & field != 'distance') {
if (is.vector(d[[field]])) {
##res@data[[field]] <- binAverage(pings, d[[field]], xinc=n)$y
res@data[[field]] <- as.numeric(lapply(split(as.numeric(d[[field]]), fac), mean, na.rm=na.rm, ...))
} else if (is.array(d[[field]])) {
fdim <- dim(d[[field]])
res@data[[field]] <- array(NA, dim=c(length(res@data[['time']]), fdim[-1]))
for (j in 1:tail(fdim, 1)) {
if (length(fdim) == 2) { # for fields like bottom range
##res@data[[field]][, j] <- binAverage(pings, d[[field]][, j], xinc=n)$y
res@data[[field]][, j] <- unlist(lapply(split(as.numeric(d[[field]][, j]), fac), mean, na.rm=na.rm, ...))
} else if (length(fdim) == 3) { # for array fields like v, a, q, etc
for (i in 1:fdim[2]) {
##res@data[[field]][, i, j] <- binAverage(pings, d[[field]][, i, j], xinc=n)$y
res@data[[field]][, i, j] <- unlist(lapply(split(as.numeric(d[[field]][, i, j]), fac), mean, na.rm=na.rm, ...))
}
}
}
if (is.raw(d[[field]])) {
dims <- dim(res@data[[field]])
res@data[[field]] <- array(as.raw(res@data[[field]]), dim=dims)
}
}
}
}
res@metadata$numberOfSamples <- length(res@data$time) # FIXME: handle AD2CP
res@processingLog <- processingLogAppend(res@processingLog, paste(deparse(match.call()), sep="", collapse=""))
res
}
|
#' @title Position Module
#'
#' @description Module function for establishing sexual role or position in each
#' act on the discordant edgelist.
#'
#' @inheritParams aging_msm
#'
#' @details
#' The sexual role within each act is determined by each nodes "role identity"
#' as exclusively receptive, exclusively insertive, or versatile. This function
#' determines whether the infected or the susceptible partner is the insertive
#' partner for that act. For the first two role identity types, that is
#' deterministic based on identity. For versatile-versatile pairs, this is
#' determined stochastically for each act.
#'
#' @return
#' This function returns the updated discordant edgelist with a \code{ins}
#' attribute for values of whether the infected node is insertive or the
#' susceptible node is insertive for that act.
#'
#' @keywords module msm
#'
#' @export
#'
position_msm <- function(dat, at) {
## Variables
al <- dat$temp$al
if (nrow(al) == 0) {
return(dat)
}
status <- dat$attr$status
dal <- al[which(status[al[, 1]] == 1 & status[al[, 2]] == 0), ]
dat$temp$al <- NULL
role.class <- dat$attr$role.class
ins.quot <- dat$attr$ins.quot
race <- dat$attr$race
vv.iev.BB.prob <- dat$param$vv.iev.BB.prob
vv.iev.BW.prob <- dat$param$vv.iev.BW.prob
vv.iev.WW.prob <- dat$param$vv.iev.WW.prob
## Process
pos.role.class <- role.class[dal[, 1]]
neg.role.class <- role.class[dal[, 2]]
ins <- rep(NA, length(pos.role.class))
ins[which(pos.role.class == "I")] <- 1 # "P"
ins[which(pos.role.class == "R")] <- 0 # "N"
ins[which(neg.role.class == "I")] <- 0 # "N"
ins[which(neg.role.class == "R")] <- 1 # "P"
vv <- which(pos.role.class == "V" & neg.role.class == "V")
vv.race.combo <- paste0(race[dal[, 1]][vv], race[dal[, 2]][vv])
vv.race.combo[vv.race.combo == "WB"] <- "BW"
vv.iev.prob <- (vv.race.combo == "BB") * vv.iev.BB.prob +
(vv.race.combo == "BW") * vv.iev.BW.prob +
(vv.race.combo == "WW") * vv.iev.WW.prob
iev <- rbinom(length(vv), 1, vv.iev.prob)
ins[vv[iev == 1]] <- 2 # "B"
vv.remaining <- vv[iev == 0]
inspos.prob <- ins.quot[dal[, 1][vv.remaining]] /
(ins.quot[dal[, 1][vv.remaining]] + ins.quot[dal[, 2][vv.remaining]])
inspos <- rbinom(length(vv.remaining), 1, inspos.prob)
ins[vv.remaining[inspos == 1]] <- 1 # "P"
ins[vv.remaining[inspos == 0]] <- 0 # "N"
## Output
dat$temp$dal <- cbind(dal, ins)
return(dat)
}
|
/tempR/mod.position.R
|
no_license
|
dth2/EpiModelHIV_SHAMP
|
R
| false
| false
| 2,491
|
r
|
#' @title Position Module
#'
#' @description Module function for establishing sexual role or position in each
#' act on the discordant edgelist.
#'
#' @inheritParams aging_msm
#'
#' @details
#' The sexual role within each act is determined by each nodes "role identity"
#' as exclusively receptive, exclusively insertive, or versatile. This function
#' determines whether the infected or the susceptible partner is the insertive
#' partner for that act. For the first two role identity types, that is
#' deterministic based on identity. For versatile-versatile pairs, this is
#' determined stochastically for each act.
#'
#' @return
#' This function returns the updated discordant edgelist with a \code{ins}
#' attribute for values of whether the infected node is insertive or the
#' susceptible node is insertive for that act.
#'
#' @keywords module msm
#'
#' @export
#'
position_msm <- function(dat, at) {
## Variables
al <- dat$temp$al
if (nrow(al) == 0) {
return(dat)
}
status <- dat$attr$status
dal <- al[which(status[al[, 1]] == 1 & status[al[, 2]] == 0), ]
dat$temp$al <- NULL
role.class <- dat$attr$role.class
ins.quot <- dat$attr$ins.quot
race <- dat$attr$race
vv.iev.BB.prob <- dat$param$vv.iev.BB.prob
vv.iev.BW.prob <- dat$param$vv.iev.BW.prob
vv.iev.WW.prob <- dat$param$vv.iev.WW.prob
## Process
pos.role.class <- role.class[dal[, 1]]
neg.role.class <- role.class[dal[, 2]]
ins <- rep(NA, length(pos.role.class))
ins[which(pos.role.class == "I")] <- 1 # "P"
ins[which(pos.role.class == "R")] <- 0 # "N"
ins[which(neg.role.class == "I")] <- 0 # "N"
ins[which(neg.role.class == "R")] <- 1 # "P"
vv <- which(pos.role.class == "V" & neg.role.class == "V")
vv.race.combo <- paste0(race[dal[, 1]][vv], race[dal[, 2]][vv])
vv.race.combo[vv.race.combo == "WB"] <- "BW"
vv.iev.prob <- (vv.race.combo == "BB") * vv.iev.BB.prob +
(vv.race.combo == "BW") * vv.iev.BW.prob +
(vv.race.combo == "WW") * vv.iev.WW.prob
iev <- rbinom(length(vv), 1, vv.iev.prob)
ins[vv[iev == 1]] <- 2 # "B"
vv.remaining <- vv[iev == 0]
inspos.prob <- ins.quot[dal[, 1][vv.remaining]] /
(ins.quot[dal[, 1][vv.remaining]] + ins.quot[dal[, 2][vv.remaining]])
inspos <- rbinom(length(vv.remaining), 1, inspos.prob)
ins[vv.remaining[inspos == 1]] <- 1 # "P"
ins[vv.remaining[inspos == 0]] <- 0 # "N"
## Output
dat$temp$dal <- cbind(dal, ins)
return(dat)
}
|
week1data<-read.csv("household_power_consumption.txt", sep=";",header=TRUE, na.strings="?", nrows=70000)
data<-subset(week1data, Date=="1/2/2007" | Date=="2/2/2007")
data$Date<-as.Date(data$Date, format = "%d/%m/%Y")
data$DateTime<- paste(data$Date, data$Time)
data$DateTime <- strptime(data$DateTime, format = "%Y-%m-%d %H:%M:%S")
png("Plot2.png", 480, 480)
plot(data$DateTime,data$Global_active_power,type="l",lwd=1.5,xlab="",ylab="Global Active Power (kilowatts)")
dev.off()
|
/Plot2.R
|
no_license
|
gkhnusc/Coursera
|
R
| false
| false
| 477
|
r
|
week1data<-read.csv("household_power_consumption.txt", sep=";",header=TRUE, na.strings="?", nrows=70000)
data<-subset(week1data, Date=="1/2/2007" | Date=="2/2/2007")
data$Date<-as.Date(data$Date, format = "%d/%m/%Y")
data$DateTime<- paste(data$Date, data$Time)
data$DateTime <- strptime(data$DateTime, format = "%Y-%m-%d %H:%M:%S")
png("Plot2.png", 480, 480)
plot(data$DateTime,data$Global_active_power,type="l",lwd=1.5,xlab="",ylab="Global Active Power (kilowatts)")
dev.off()
|
#' @title Scatter Chart
#' @description Plots a Scatter Chart using Cartesian coordinates
#' to display values of a given dataset
#' @param dataset List of numeric vectors containing the datasets to be plotted
#' @param colors A list of plot colors preffred for each dataset
#' @param axis_labels Named list of characters defining the prefered chart axis labels
#' @param labels_pos Named list of characters defining the prefered position of the axis labels
#' e.g for x-axis ( inner-center, inner-left, outer-right, outer-center, outer-left, inner-right [default] )
#' and y-axis ( inner-middle, inner-bottom, outer-top, outer-middle, outer-bottom, inner-top [default] )
#' @param axis_regions Named list of character lists defining the properties of regions to be shadded
#' @param subchart Boolean option to show sub chart for zoom and selection
#' range.Default set to False.
#' @param zoom Boolean option to Zoom by mouse wheel event and
#' @param width,height Fixed width for widget (in css units). The default is
#' NULL, which results in intelligent automatic sizing
#' based on the widget’s container.
#' @param elementId Use an explicit element ID for the widget. Useful if you have
#' other JavaScript that needs to explicitly discover and
#' interact with a specific widget instance. Default NULL
#' which results in an automatically generated one.
#' @examples
#' dataset <- list(
#' data1=c(30, 20, 50, 40, 60, 50),
#' data2=c(200, 130, 90, 240, 130, 220),
#' data3=c(300, 200, 160, 400, 250, 250))
#' axis_labels <- list(x_axis="Petals",y_axis="Sepals")
#' labels_pos <- list(xpos="outer-center",ypos="outer-middle")
#' colors <- list(data1="blue",data2="black",data3="red")
#' axis_regions <- list(list(axis='x',start= 1, end= 2,reg_class='regionX'),
#' list(axis='x',start=4, end=5,reg_class='regionX'))
#'
#'
#'
#' p3_scatter_chart(dataset,colors,axis_labels,labels_pos,axis_regions,TRUE,TRUE)
#' p3_scatter_chart(dataset,NULL,NULL,NULL,axis_regions,FALSE,TRUE,'80%','200%')
#' p3_scatter_chart(dataset,colors,axis_labels,labels_pos)
#' p3_scatter_chart(dataset,colors,axis_labels)
#' \dontrun{
#'
#' p3_scatter_chart(dataset,colors,'100%','500%')
#' }
#'
#' @export
p3_scatter_chart <-function(dataset,colors = NULL,axis_labels = NULL,labels_pos = NULL,axis_regions=NULL,
subchart=FALSE,zoom=TRUE,width = NULL,
height = NULL,elementId=NULL) {
#,elementId = NULL
# forward options using x
# axis_labels_pos = NULL,
if(is.null(colors)){
colors <- list()
}
if(is.null(labels_pos))
{
labels_pos <- list(xs="outer-right",ys="outer-bottom")
}
if(is.null(axis_labels))
{
axis_labels <- list(x_axis="x",y_axis="y")
}
if(is.null(axis_regions))
{
axis_regions <- list(list(axis='x',start=-1,end=-1,reg_class='regionX'))
}
x = list(
dataset = dataset,
colors = colors,
axis_labels = axis_labels,
labels_pos = labels_pos,
axis_regions = axis_regions,
subchart = subchart,
zoom = zoom,
elementId = elementId
)
# create widget
htmlwidgets::createWidget(
name = 'p3_scatter_chart',
x,
width = width,
height = height,
package = 'PantheraWidgets'
)
}
#' Shiny bindings for Scatter
#'
#' Output and render functions for using p3_scatter_chart within Shiny
#' applications and interactive Rmd documents.
#'
#' @param outputId output variable to read from
#' @param width,height Must be a valid CSS unit (like \code{'100\%'},
#' \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a
#' string and have \code{'px'} appended.
#' @param expr An expression that generates a Scatter
#' @param env The environment in which to evaluate \code{expr}.
#' @param quoted Is \code{expr} a quoted expression (with \code{quote()})? This
#' is useful if you want to save an expression in a variable.
#'
#' @name p3_scatter_chart-shiny
#'
#' @export
p3_scatter_chartOutput <- function(outputId, width = '100%', height = '400px'){
htmlwidgets::shinyWidgetOutput(outputId, 'p3_scatter_chart', width, height, package = 'PantheraWidgets')
}
#' @rdname p3_scatter_chart-shiny
#' @export
renderp3_scatter_chart <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
htmlwidgets::shinyRenderWidget(expr, p3_scatter_chartOutput, env, quoted = TRUE)
}
|
/R/p3_scatter_chart.R
|
no_license
|
pantheracorp/PantheraWidgets
|
R
| false
| false
| 4,639
|
r
|
#' @title Scatter Chart
#' @description Plots a Scatter Chart using Cartesian coordinates
#' to display values of a given dataset
#' @param dataset List of numeric vectors containing the datasets to be plotted
#' @param colors A list of plot colors preffred for each dataset
#' @param axis_labels Named list of characters defining the prefered chart axis labels
#' @param labels_pos Named list of characters defining the prefered position of the axis labels
#' e.g for x-axis ( inner-center, inner-left, outer-right, outer-center, outer-left, inner-right [default] )
#' and y-axis ( inner-middle, inner-bottom, outer-top, outer-middle, outer-bottom, inner-top [default] )
#' @param axis_regions Named list of character lists defining the properties of regions to be shadded
#' @param subchart Boolean option to show sub chart for zoom and selection
#' range.Default set to False.
#' @param zoom Boolean option to Zoom by mouse wheel event and
#' @param width,height Fixed width for widget (in css units). The default is
#' NULL, which results in intelligent automatic sizing
#' based on the widget’s container.
#' @param elementId Use an explicit element ID for the widget. Useful if you have
#' other JavaScript that needs to explicitly discover and
#' interact with a specific widget instance. Default NULL
#' which results in an automatically generated one.
#' @examples
#' dataset <- list(
#' data1=c(30, 20, 50, 40, 60, 50),
#' data2=c(200, 130, 90, 240, 130, 220),
#' data3=c(300, 200, 160, 400, 250, 250))
#' axis_labels <- list(x_axis="Petals",y_axis="Sepals")
#' labels_pos <- list(xpos="outer-center",ypos="outer-middle")
#' colors <- list(data1="blue",data2="black",data3="red")
#' axis_regions <- list(list(axis='x',start= 1, end= 2,reg_class='regionX'),
#' list(axis='x',start=4, end=5,reg_class='regionX'))
#'
#'
#'
#' p3_scatter_chart(dataset,colors,axis_labels,labels_pos,axis_regions,TRUE,TRUE)
#' p3_scatter_chart(dataset,NULL,NULL,NULL,axis_regions,FALSE,TRUE,'80%','200%')
#' p3_scatter_chart(dataset,colors,axis_labels,labels_pos)
#' p3_scatter_chart(dataset,colors,axis_labels)
#' \dontrun{
#'
#' p3_scatter_chart(dataset,colors,'100%','500%')
#' }
#'
#' @export
p3_scatter_chart <-function(dataset,colors = NULL,axis_labels = NULL,labels_pos = NULL,axis_regions=NULL,
subchart=FALSE,zoom=TRUE,width = NULL,
height = NULL,elementId=NULL) {
#,elementId = NULL
# forward options using x
# axis_labels_pos = NULL,
if(is.null(colors)){
colors <- list()
}
if(is.null(labels_pos))
{
labels_pos <- list(xs="outer-right",ys="outer-bottom")
}
if(is.null(axis_labels))
{
axis_labels <- list(x_axis="x",y_axis="y")
}
if(is.null(axis_regions))
{
axis_regions <- list(list(axis='x',start=-1,end=-1,reg_class='regionX'))
}
x = list(
dataset = dataset,
colors = colors,
axis_labels = axis_labels,
labels_pos = labels_pos,
axis_regions = axis_regions,
subchart = subchart,
zoom = zoom,
elementId = elementId
)
# create widget
htmlwidgets::createWidget(
name = 'p3_scatter_chart',
x,
width = width,
height = height,
package = 'PantheraWidgets'
)
}
#' Shiny bindings for Scatter
#'
#' Output and render functions for using p3_scatter_chart within Shiny
#' applications and interactive Rmd documents.
#'
#' @param outputId output variable to read from
#' @param width,height Must be a valid CSS unit (like \code{'100\%'},
#' \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a
#' string and have \code{'px'} appended.
#' @param expr An expression that generates a Scatter
#' @param env The environment in which to evaluate \code{expr}.
#' @param quoted Is \code{expr} a quoted expression (with \code{quote()})? This
#' is useful if you want to save an expression in a variable.
#'
#' @name p3_scatter_chart-shiny
#'
#' @export
p3_scatter_chartOutput <- function(outputId, width = '100%', height = '400px'){
htmlwidgets::shinyWidgetOutput(outputId, 'p3_scatter_chart', width, height, package = 'PantheraWidgets')
}
#' @rdname p3_scatter_chart-shiny
#' @export
renderp3_scatter_chart <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
htmlwidgets::shinyRenderWidget(expr, p3_scatter_chartOutput, env, quoted = TRUE)
}
|
## R code written by Elizabeth A. Bowman Oct. 27, 2016
## University of Arizona, School of Plant Sciences, eabowman@email.arizona.edu
## Analyses to evaluate whether the two fungal communities were sampled deeply enough.
#=========================================================================================
# Species accumulation curves
#=========================================================================================
#-----------------------------------------------------------------------------------------
# Read in data frames
#-----------------------------------------------------------------------------------------
#--read in EM data
otu.data <- read.csv(paste0(dat.dir, 'SCM_EM_root_based_site_x_species_matrix.csv'),
as.is = T, header = T)
#-----------------------------------------------------------------------------------------
# EM species accumulation curve based on root tip abundance
#-----------------------------------------------------------------------------------------
#<< With singeltons included, Overall EM >>-----------------------------------------------
#--Isolate OTU data
ov.em <- otu.data[6:length(otu.data)]
#--remove columns equal to 0, no species occurrence
ov.em <- ov.em[which(colSums(ov.em) != 0)]
#--abundance for each otu, double check for discrepancies
summarise_each(ov.em,funs(sum))
#<< With singeltons removed, Overall EM >>------------------------------------------------
#--Using previous dataframe, remove singletons
ovns.em <- ov.em[which(colSums(ov.em) > 1)]
#--abundance for each otu, double check for discrepancies
colSums(ovns.em)
#-----------------------------------------------------------------------------------------
# EM species accumulation curve based on root tip count, combined plots
#-----------------------------------------------------------------------------------------
#<< Combine overall EM plots, with singletons and without >>------------------------------
em.all <- specaccum(ov.em, sample = min(rowSums(ov.em), permutations = 999))
em.all.df <- data.frame(Sites=em.all$sites,
Richness=em.all$richness,
SD=em.all$sd)
em.all.ns <- specaccum(ovns.em, sample = min(rowSums(ovns.em), permutations = 999))
em.all.ns.df <- data.frame(Sites=em.all.ns$sites,
Richness=em.all.ns$richness,
SD=em.all.ns$sd)
ggplot() +
geom_point(data=em.all.df, aes(x=Sites, y=Richness)) +
geom_line(data=em.all.df, aes(x=Sites, y=Richness)) +
geom_ribbon(data=em.all.df ,aes(x=Sites,
ymin=(Richness-2*SD),
ymax=(Richness+2*SD)),
alpha=0.2) +
geom_point(data=em.all.ns.df, aes(x=Sites, y=Richness), colour = 'darkgrey') +
geom_line(data=em.all.ns.df, aes(x=Sites, y=Richness), colour = 'darkgrey') +
geom_ribbon(data=em.all.ns.df ,aes(x=Sites,
ymin=(Richness-2*SD),
ymax=(Richness+2*SD)),
alpha=0.2) +
theme_bw() +
expand_limits(y=c(0,150)) +
ylab('OTUs') +
xlab('Trees sampled') +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
theme(axis.text = element_text(size=28, color = 'black'),
axis.title = element_text(size = 28)) +
theme(legend.position = "none")
ggsave('Appendix_S5_Fig1b.pdf', plot = last_plot(), device = 'pdf', path = fig.dir)
#<< Combine low and highelev. EM plots, with singletons and without >>--------------------
# em.low <- specaccum(le.em, sample = min(rowSums(le.em), permutations = 999))
# em.low.df <- data.frame(Sites=em.low$sites,
# Richness=em.low$richness,
# SD=em.low$sd)
# em.low.ns <- specaccum(lens.em, sample = min(rowSums(lens.em), permutations = 999))
# em.low.ns.df <- data.frame(Sites=em.low.ns$sites,
# Richness=em.low.ns$richness,
# SD=em.low.ns$sd)
# em.high <- specaccum(he.em, sample = min(rowSums(he.em), permutations = 999))
# em.high.df <- data.frame(Sites=em.high$sites,
# Richness=em.high$richness,
# SD=em.high$sd)
# em.high.ns <- specaccum(hens.em, sample = min(rowSums (hens.em), permutations = 999))
# em.high.ns.df <- data.frame(Sites=em.high.ns$sites,
# Richness=em.high.ns$richness,
# SD=em.high.ns$sd)
# ggplot() +
# geom_point(data=em.low.df, aes(x=Sites, y=Richness), size = 3) +
# geom_line(data=em.low.df, aes(x=Sites, y=Richness)) +
# geom_ribbon(data=em.low.df ,aes(x=Sites,
# ymin=(Richness-2*SD),
# ymax=(Richness+2*SD)),
# alpha=0.2) +
# geom_point(data=em.low.ns.df, aes(x=Sites, y=Richness), colour = 'darkgrey', size = 3) +
# geom_line(data=em.low.ns.df, aes(x=Sites, y=Richness), colour = 'darkgrey') +
# geom_ribbon(data=em.low.ns.df ,aes(x=Sites,
# ymin=(Richness-2*SD),
# ymax=(Richness+2*SD)),
# alpha=0.2) +
# geom_point(data=em.high.df, aes(x=Sites, y=Richness), shape = 5, size = 3) +
# geom_line(data=em.high.df, aes(x=Sites, y=Richness)) +
# geom_ribbon(data=em.high.df ,aes(x=Sites,
# ymin=(Richness-2*SD),
# ymax=(Richness+2*SD)),
# alpha=0.2) +
# geom_point(data=em.high.ns.df, aes(x=Sites, y=Richness), colour = 'darkgrey',
# shape = 5, size = 3) +
# geom_line(data=em.high.ns.df, aes(x=Sites, y=Richness), colour = 'darkgrey') +
# geom_ribbon(data=em.high.ns.df ,aes(x=Sites,
# ymin=(Richness-2*SD),
# ymax=(Richness+2*SD)),
# alpha=0.2) +
# theme_bw() +
# expand_limits(y=c(0,150)) +
# ylab('OTUs') +
# xlab('Samples') +
# theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
# theme(axis.text = element_text(size=28, color = 'black'),
# axis.title = element_text(size = 28)) +
# theme(legend.position = "none")
#
# ggsave('Appendix_S5_fig1c.pdf', plot = last_plot(), device = 'pdf', path = fig.dir)
|
/script/Species_accumulation_curves_based_on_EMmorphotype_data.R
|
no_license
|
eabowman/Bowman_and_Arnold_fungal_elev_grad
|
R
| false
| false
| 6,365
|
r
|
## R code written by Elizabeth A. Bowman Oct. 27, 2016
## University of Arizona, School of Plant Sciences, eabowman@email.arizona.edu
## Analyses to evaluate whether the two fungal communities were sampled deeply enough.
#=========================================================================================
# Species accumulation curves
#=========================================================================================
#-----------------------------------------------------------------------------------------
# Read in data frames
#-----------------------------------------------------------------------------------------
#--read in EM data
otu.data <- read.csv(paste0(dat.dir, 'SCM_EM_root_based_site_x_species_matrix.csv'),
as.is = T, header = T)
#-----------------------------------------------------------------------------------------
# EM species accumulation curve based on root tip abundance
#-----------------------------------------------------------------------------------------
#<< With singeltons included, Overall EM >>-----------------------------------------------
#--Isolate OTU data
ov.em <- otu.data[6:length(otu.data)]
#--remove columns equal to 0, no species occurrence
ov.em <- ov.em[which(colSums(ov.em) != 0)]
#--abundance for each otu, double check for discrepancies
summarise_each(ov.em,funs(sum))
#<< With singeltons removed, Overall EM >>------------------------------------------------
#--Using previous dataframe, remove singletons
ovns.em <- ov.em[which(colSums(ov.em) > 1)]
#--abundance for each otu, double check for discrepancies
colSums(ovns.em)
#-----------------------------------------------------------------------------------------
# EM species accumulation curve based on root tip count, combined plots
#-----------------------------------------------------------------------------------------
#<< Combine overall EM plots, with singletons and without >>------------------------------
em.all <- specaccum(ov.em, sample = min(rowSums(ov.em), permutations = 999))
em.all.df <- data.frame(Sites=em.all$sites,
Richness=em.all$richness,
SD=em.all$sd)
em.all.ns <- specaccum(ovns.em, sample = min(rowSums(ovns.em), permutations = 999))
em.all.ns.df <- data.frame(Sites=em.all.ns$sites,
Richness=em.all.ns$richness,
SD=em.all.ns$sd)
ggplot() +
geom_point(data=em.all.df, aes(x=Sites, y=Richness)) +
geom_line(data=em.all.df, aes(x=Sites, y=Richness)) +
geom_ribbon(data=em.all.df ,aes(x=Sites,
ymin=(Richness-2*SD),
ymax=(Richness+2*SD)),
alpha=0.2) +
geom_point(data=em.all.ns.df, aes(x=Sites, y=Richness), colour = 'darkgrey') +
geom_line(data=em.all.ns.df, aes(x=Sites, y=Richness), colour = 'darkgrey') +
geom_ribbon(data=em.all.ns.df ,aes(x=Sites,
ymin=(Richness-2*SD),
ymax=(Richness+2*SD)),
alpha=0.2) +
theme_bw() +
expand_limits(y=c(0,150)) +
ylab('OTUs') +
xlab('Trees sampled') +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
theme(axis.text = element_text(size=28, color = 'black'),
axis.title = element_text(size = 28)) +
theme(legend.position = "none")
ggsave('Appendix_S5_Fig1b.pdf', plot = last_plot(), device = 'pdf', path = fig.dir)
#<< Combine low and highelev. EM plots, with singletons and without >>--------------------
# em.low <- specaccum(le.em, sample = min(rowSums(le.em), permutations = 999))
# em.low.df <- data.frame(Sites=em.low$sites,
# Richness=em.low$richness,
# SD=em.low$sd)
# em.low.ns <- specaccum(lens.em, sample = min(rowSums(lens.em), permutations = 999))
# em.low.ns.df <- data.frame(Sites=em.low.ns$sites,
# Richness=em.low.ns$richness,
# SD=em.low.ns$sd)
# em.high <- specaccum(he.em, sample = min(rowSums(he.em), permutations = 999))
# em.high.df <- data.frame(Sites=em.high$sites,
# Richness=em.high$richness,
# SD=em.high$sd)
# em.high.ns <- specaccum(hens.em, sample = min(rowSums (hens.em), permutations = 999))
# em.high.ns.df <- data.frame(Sites=em.high.ns$sites,
# Richness=em.high.ns$richness,
# SD=em.high.ns$sd)
# ggplot() +
# geom_point(data=em.low.df, aes(x=Sites, y=Richness), size = 3) +
# geom_line(data=em.low.df, aes(x=Sites, y=Richness)) +
# geom_ribbon(data=em.low.df ,aes(x=Sites,
# ymin=(Richness-2*SD),
# ymax=(Richness+2*SD)),
# alpha=0.2) +
# geom_point(data=em.low.ns.df, aes(x=Sites, y=Richness), colour = 'darkgrey', size = 3) +
# geom_line(data=em.low.ns.df, aes(x=Sites, y=Richness), colour = 'darkgrey') +
# geom_ribbon(data=em.low.ns.df ,aes(x=Sites,
# ymin=(Richness-2*SD),
# ymax=(Richness+2*SD)),
# alpha=0.2) +
# geom_point(data=em.high.df, aes(x=Sites, y=Richness), shape = 5, size = 3) +
# geom_line(data=em.high.df, aes(x=Sites, y=Richness)) +
# geom_ribbon(data=em.high.df ,aes(x=Sites,
# ymin=(Richness-2*SD),
# ymax=(Richness+2*SD)),
# alpha=0.2) +
# geom_point(data=em.high.ns.df, aes(x=Sites, y=Richness), colour = 'darkgrey',
# shape = 5, size = 3) +
# geom_line(data=em.high.ns.df, aes(x=Sites, y=Richness), colour = 'darkgrey') +
# geom_ribbon(data=em.high.ns.df ,aes(x=Sites,
# ymin=(Richness-2*SD),
# ymax=(Richness+2*SD)),
# alpha=0.2) +
# theme_bw() +
# expand_limits(y=c(0,150)) +
# ylab('OTUs') +
# xlab('Samples') +
# theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
# theme(axis.text = element_text(size=28, color = 'black'),
# axis.title = element_text(size = 28)) +
# theme(legend.position = "none")
#
# ggsave('Appendix_S5_fig1c.pdf', plot = last_plot(), device = 'pdf', path = fig.dir)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 10078
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 10078
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#140.A#48.c#.w#3.s#3.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3547
c no.of clauses 10078
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 10078
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#140.A#48.c#.w#3.s#3.asp.qdimacs 3547 10078 E1 [] 0 140 3407 10078 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#140.A#48.c#.w#3.s#3.asp/ctrl.e#1.a#3.E#140.A#48.c#.w#3.s#3.asp.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 730
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 10078
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 10078
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#140.A#48.c#.w#3.s#3.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3547
c no.of clauses 10078
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 10078
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#140.A#48.c#.w#3.s#3.asp.qdimacs 3547 10078 E1 [] 0 140 3407 10078 NONE
|
library("LOLA")
library("ggplot2")
library("dplyr")
options(warn = -1)
#######################################################################
########## Function to perform enrichment analyis using LOLA ##########
#######################################################################
enrich = function(assembly, type) {
# Load pre-assembled loci for tested genomic elements
regionDB = loadRegionDB(paste0("reference/LOLA/", assembly))
# Overlapping DISCEPs with various genomic elements
regionSetA = readBed(paste0("inputs/DISCREPs_",assembly,"_10kb.",type,".bed"))
universe = readBed(paste0("inputs/total.",assembly,".bed.binCount.filtered"))
locResults = runLOLA(regionSetA, universe, regionDB, cores=1)
locResults$group = ifelse(type == "overlap", type, paste(assembly,type))
return(locResults)
}
###########################################################
########## Function to perform Fisher exact test ##########
###########################################################
fisher_test = function(df, columns) {
df$fisher = apply(df, 1, function(x) fisher.test(
matrix( c(
as.numeric(x[columns[1]]),
as.numeric(x[columns[2]]),
as.numeric(x[columns[3]]),
as.numeric(x[columns[4]])
), nrow = 2),
))
df$pValue = apply(df, 1, function(x) x['fisher']$fisher$p.value)
df$oddsRatio = apply(df, 1, function(x) x['fisher']$fisher$estimate)
df$CI95_lower = apply(df, 1, function(x) x['fisher']$fisher$conf.int[1])
df$CI95_higher = apply(df, 1, function(x) x['fisher']$fisher$conf.int[2])
df$log_OR = log2(df$oddsRatio)
df$log_CI_higher = log2(df$CI95_higher)
df$log_CI_lower = log2(df$CI95_lower)
return(df)
}
##############################################################
########## Enrichment analyses for each DISCREP set ##########
##############################################################
allResults = NULL
for (assembly in c('GRCh37','GRCh38')) {
for (type in c('overlap','non-overlap')) {
allResults = rbind(allResults, enrich(assembly, type))
}
}
# Perform statistical tests
df_use = allResults[,c('support','b','c','d','description','group')] # b,c,d are the default columns names from LOLA
df_use_ready = df_use %>%
group_by(group, description) %>%
summarize(support = sum(support), b = sum(b), c = sum(c), d = sum(d))
df_use_ready = fisher_test(df_use_ready, c('support','b','c','d')) # these are the column names from LOLA by default
# Order results by the odds ratio in the overlap DISCREPs set
df_use_ready_overlap = df_use_ready[df_use_ready$group == "overlap",]
df_use_ready$description = factor(df_use_ready$description,
levels = df_use_ready_overlap[order(df_use_ready_overlap$oddsRatio, decreasing=TRUE),]$description )
df_use_ready$group = factor(df_use_ready$group,
levels = c('GRCh38 non-overlap','GRCh37 non-overlap','overlap'))
df_use_ready$qValue = p.adjust(df_use_ready$pValue)
df_use_ready$sig = apply(df_use_ready, 1, function(x) ifelse(as.numeric(x['qValue']) < 0.01, 'sig', "xno"))
######################################################################################
########## Plotting of the enrichment analyses results for each DISCREP set ##########
######################################################################################
p = ggplot(data = df_use_ready,
aes(x = group, y = log_OR, ymin = log_CI_lower, ymax = log_CI_higher)) +
geom_pointrange(aes(col=group, ), linetype = 0)+
geom_hline(aes(fill=group),yintercept =0, linetype=2)+
xlab('')+ ylab("Odds Ratio and 95% CI (log scale)")+
geom_errorbar(aes(ymin=log_CI_lower, ymax=log_CI_higher,col=group, linetype = sig, ),width=0.4,cex=1)+
facet_wrap(~description, strip.position="left",nrow=9, ) +
theme(plot.title=element_text(size=16,face="bold"),
plot.margin = margin(5, 5, 20, 5),
axis.text.y=element_blank(),
axis.text.x=element_text(face="bold", size = 15),
axis.ticks.y=element_blank(),
axis.title=element_text(size=12,face="bold"),
axis.title.x=element_text(face="bold", size = 20, hjust=-0.4, vjust = -0.4),
strip.text.y = element_text(hjust=0.5,vjust = 0.5,angle=180,face="bold"),
strip.text.y.left = element_text(angle = 0),
strip.text = element_text(size = 15),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.text=element_text(size=22),
legend.title=element_blank(),
legend.position = c(0.7, 0.19))+
scale_y_continuous(breaks=c( log2(1), log2(2), log2(5), log2(10), log2(20),log2(50), log2(100),log2(200)),
labels = c('1', '2','5','10','20','50','100','200'))+
scale_color_manual(values=c("#311F23", "#6666DD", "#DD8452" ),
breaks=c("overlap", "GRCh37 non-overlap", "GRCh38 non-overlap"),
labels=c("Overlapping DISCREPs", "Unique to GRCh37", "Unique to GRCh38"))+
scale_linetype_manual(values=c("solid", "dashed"))+
guides(linetype=FALSE)+
coord_flip()
p
##################################################################
########## Compare between different groups of DISCREPs ##########
##################################################################
# construct data frame for pairwised comparisons
df_use_compare = df_use %>%
group_by(group, description) %>%
summarize(support = sum(support), not_support = sum(c))
df_unique_hg19 = df_use_compare[df_use_compare$group == "GRCh37 non-overlap", ]
df_unique_hg38 = df_use_compare[df_use_compare$group == "GRCh38 non-overlap", ]
df_overlap = df_use_compare[df_use_compare$group == "overlap", ]
df_overlap_hg19 = merge(df_overlap, df_unique_hg19, by = 'description')
df_overlap_hg38 = merge(df_overlap, df_unique_hg38, by = 'description')
df_hg19_hg38 = merge(df_unique_hg19, df_unique_hg38, by = 'description')
fisherTests = lapply(list(df_overlap_hg19, df_overlap_hg38, df_hg19_hg38),
fisher_test, columns = c('support.x','not_support.x','support.y','not_support.y'))
df_overlap_hg19 = fisherTests[[1]]
df_overlap_hg38 = fisherTests[[2]]
df_hg19_hg38 = fisherTests[[3]]
df_hg19_hg38$group = 'hg19_vs_hg38'
df_overlap_hg38$group = 'overlap_vs_hg38'
df_overlap_hg19$group = 'overlap_vs_hg19'
# combine different groups together
selected_features = c(
'segmental duplication', 'assembly problems', 'fix patch sequences',
'alternate haplotype' ,'genome assemblies difference', 'gaps in assembly')
df_compare_combined = do.call("rbind", list(df_overlap_hg19, df_overlap_hg38, df_hg19_hg38))
df_compare_combined = df_compare_combined[df_compare_combined$description %in% selected_features,]
df_compare_combined$description = factor(df_compare_combined$description, levels = rev(selected_features))
df_compare_combined$qValue = p.adjust(df_compare_combined$pValue)
df_compare_combined$sig = apply(df_compare_combined, 1, function(x) ifelse(as.numeric(x['qValue']) < 0.01, 'sig', "xno"))
df_compare_combined$group = factor(df_compare_combined$group,
labels = c('Overlap vs GRCh37 Unique', 'GRCh37 Unique vs\nGRCh38 Unique', 'Overlap vs GRCh38 Unique'),
levels = c('overlap_vs_hg19','hg19_vs_hg38','overlap_vs_hg38'))
######################################################################################
########## Plotting of the pairwised comparisons between each DISCREP set ############
######################################################################################
p = ggplot(data = df_compare_combined,
aes(x = description, y = log_OR, ymin = log_CI_lower, ymax = log_CI_higher)) +
geom_pointrange(aes(col=sig), linetype = 0)+
geom_hline(aes(fill=sig),yintercept =0, linetype=2)+
xlab('')+ ylab("Odds Ratio and 95% CI (log scale)")+
geom_errorbar(aes(ymin=log_CI_lower, ymax=log_CI_higher,col=sig, linetype = 'solid'),width=0.2,cex=1)+
facet_wrap(~group, strip.position="top",nrow=1) +
theme(plot.title=element_text(size=16,face="bold"),
axis.text.x=element_text(face="bold", size = 20, vjust = 1),
axis.text.y=element_text(face="bold", size = 18),
axis.title.x=element_text(face="bold", size = 20, vjust = 0),
strip.text.x = element_text(hjust=0.5,vjust = 0.5,angle=180,face="bold"),
strip.text.x.top = element_text(angle = 0, size = 20),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "none",
)+
scale_y_continuous(breaks=c(log2(0.5), log2(1), log2(2), log2(5), log2(10)),
labels = c('0.5', '1', '2','5','10'))+
scale_color_manual(values=c("black", "grey70"))+
guides(linetype=FALSE)+
coord_flip()
p
################################################
########## Output results in tables ############
################################################
output = df_use_ready[c('group','description','pValue','qValue', 'oddsRatio','CI95_lower','CI95_higher')]
write.table(output, 'results/LOLA_enrichment.tsv', sep = '\t', row.names = F, quote = F)
|
/LOLA.R
|
permissive
|
MoezDawood/discreps
|
R
| false
| false
| 8,799
|
r
|
library("LOLA")
library("ggplot2")
library("dplyr")
options(warn = -1)
#######################################################################
########## Function to perform enrichment analyis using LOLA ##########
#######################################################################
enrich = function(assembly, type) {
# Load pre-assembled loci for tested genomic elements
regionDB = loadRegionDB(paste0("reference/LOLA/", assembly))
# Overlapping DISCEPs with various genomic elements
regionSetA = readBed(paste0("inputs/DISCREPs_",assembly,"_10kb.",type,".bed"))
universe = readBed(paste0("inputs/total.",assembly,".bed.binCount.filtered"))
locResults = runLOLA(regionSetA, universe, regionDB, cores=1)
locResults$group = ifelse(type == "overlap", type, paste(assembly,type))
return(locResults)
}
###########################################################
########## Function to perform Fisher exact test ##########
###########################################################
fisher_test = function(df, columns) {
df$fisher = apply(df, 1, function(x) fisher.test(
matrix( c(
as.numeric(x[columns[1]]),
as.numeric(x[columns[2]]),
as.numeric(x[columns[3]]),
as.numeric(x[columns[4]])
), nrow = 2),
))
df$pValue = apply(df, 1, function(x) x['fisher']$fisher$p.value)
df$oddsRatio = apply(df, 1, function(x) x['fisher']$fisher$estimate)
df$CI95_lower = apply(df, 1, function(x) x['fisher']$fisher$conf.int[1])
df$CI95_higher = apply(df, 1, function(x) x['fisher']$fisher$conf.int[2])
df$log_OR = log2(df$oddsRatio)
df$log_CI_higher = log2(df$CI95_higher)
df$log_CI_lower = log2(df$CI95_lower)
return(df)
}
##############################################################
########## Enrichment analyses for each DISCREP set ##########
##############################################################
allResults = NULL
for (assembly in c('GRCh37','GRCh38')) {
for (type in c('overlap','non-overlap')) {
allResults = rbind(allResults, enrich(assembly, type))
}
}
# Perform statistical tests
df_use = allResults[,c('support','b','c','d','description','group')] # b,c,d are the default columns names from LOLA
df_use_ready = df_use %>%
group_by(group, description) %>%
summarize(support = sum(support), b = sum(b), c = sum(c), d = sum(d))
df_use_ready = fisher_test(df_use_ready, c('support','b','c','d')) # these are the column names from LOLA by default
# Order results by the odds ratio in the overlap DISCREPs set
df_use_ready_overlap = df_use_ready[df_use_ready$group == "overlap",]
df_use_ready$description = factor(df_use_ready$description,
levels = df_use_ready_overlap[order(df_use_ready_overlap$oddsRatio, decreasing=TRUE),]$description )
df_use_ready$group = factor(df_use_ready$group,
levels = c('GRCh38 non-overlap','GRCh37 non-overlap','overlap'))
df_use_ready$qValue = p.adjust(df_use_ready$pValue)
df_use_ready$sig = apply(df_use_ready, 1, function(x) ifelse(as.numeric(x['qValue']) < 0.01, 'sig', "xno"))
######################################################################################
########## Plotting of the enrichment analyses results for each DISCREP set ##########
######################################################################################
p = ggplot(data = df_use_ready,
aes(x = group, y = log_OR, ymin = log_CI_lower, ymax = log_CI_higher)) +
geom_pointrange(aes(col=group, ), linetype = 0)+
geom_hline(aes(fill=group),yintercept =0, linetype=2)+
xlab('')+ ylab("Odds Ratio and 95% CI (log scale)")+
geom_errorbar(aes(ymin=log_CI_lower, ymax=log_CI_higher,col=group, linetype = sig, ),width=0.4,cex=1)+
facet_wrap(~description, strip.position="left",nrow=9, ) +
theme(plot.title=element_text(size=16,face="bold"),
plot.margin = margin(5, 5, 20, 5),
axis.text.y=element_blank(),
axis.text.x=element_text(face="bold", size = 15),
axis.ticks.y=element_blank(),
axis.title=element_text(size=12,face="bold"),
axis.title.x=element_text(face="bold", size = 20, hjust=-0.4, vjust = -0.4),
strip.text.y = element_text(hjust=0.5,vjust = 0.5,angle=180,face="bold"),
strip.text.y.left = element_text(angle = 0),
strip.text = element_text(size = 15),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.text=element_text(size=22),
legend.title=element_blank(),
legend.position = c(0.7, 0.19))+
scale_y_continuous(breaks=c( log2(1), log2(2), log2(5), log2(10), log2(20),log2(50), log2(100),log2(200)),
labels = c('1', '2','5','10','20','50','100','200'))+
scale_color_manual(values=c("#311F23", "#6666DD", "#DD8452" ),
breaks=c("overlap", "GRCh37 non-overlap", "GRCh38 non-overlap"),
labels=c("Overlapping DISCREPs", "Unique to GRCh37", "Unique to GRCh38"))+
scale_linetype_manual(values=c("solid", "dashed"))+
guides(linetype=FALSE)+
coord_flip()
p
##################################################################
########## Compare between different groups of DISCREPs ##########
##################################################################
# construct data frame for pairwised comparisons
df_use_compare = df_use %>%
group_by(group, description) %>%
summarize(support = sum(support), not_support = sum(c))
df_unique_hg19 = df_use_compare[df_use_compare$group == "GRCh37 non-overlap", ]
df_unique_hg38 = df_use_compare[df_use_compare$group == "GRCh38 non-overlap", ]
df_overlap = df_use_compare[df_use_compare$group == "overlap", ]
df_overlap_hg19 = merge(df_overlap, df_unique_hg19, by = 'description')
df_overlap_hg38 = merge(df_overlap, df_unique_hg38, by = 'description')
df_hg19_hg38 = merge(df_unique_hg19, df_unique_hg38, by = 'description')
fisherTests = lapply(list(df_overlap_hg19, df_overlap_hg38, df_hg19_hg38),
fisher_test, columns = c('support.x','not_support.x','support.y','not_support.y'))
df_overlap_hg19 = fisherTests[[1]]
df_overlap_hg38 = fisherTests[[2]]
df_hg19_hg38 = fisherTests[[3]]
df_hg19_hg38$group = 'hg19_vs_hg38'
df_overlap_hg38$group = 'overlap_vs_hg38'
df_overlap_hg19$group = 'overlap_vs_hg19'
# combine different groups together
selected_features = c(
'segmental duplication', 'assembly problems', 'fix patch sequences',
'alternate haplotype' ,'genome assemblies difference', 'gaps in assembly')
df_compare_combined = do.call("rbind", list(df_overlap_hg19, df_overlap_hg38, df_hg19_hg38))
df_compare_combined = df_compare_combined[df_compare_combined$description %in% selected_features,]
df_compare_combined$description = factor(df_compare_combined$description, levels = rev(selected_features))
df_compare_combined$qValue = p.adjust(df_compare_combined$pValue)
df_compare_combined$sig = apply(df_compare_combined, 1, function(x) ifelse(as.numeric(x['qValue']) < 0.01, 'sig', "xno"))
df_compare_combined$group = factor(df_compare_combined$group,
labels = c('Overlap vs GRCh37 Unique', 'GRCh37 Unique vs\nGRCh38 Unique', 'Overlap vs GRCh38 Unique'),
levels = c('overlap_vs_hg19','hg19_vs_hg38','overlap_vs_hg38'))
######################################################################################
########## Plotting of the pairwised comparisons between each DISCREP set ############
######################################################################################
p = ggplot(data = df_compare_combined,
aes(x = description, y = log_OR, ymin = log_CI_lower, ymax = log_CI_higher)) +
geom_pointrange(aes(col=sig), linetype = 0)+
geom_hline(aes(fill=sig),yintercept =0, linetype=2)+
xlab('')+ ylab("Odds Ratio and 95% CI (log scale)")+
geom_errorbar(aes(ymin=log_CI_lower, ymax=log_CI_higher,col=sig, linetype = 'solid'),width=0.2,cex=1)+
facet_wrap(~group, strip.position="top",nrow=1) +
theme(plot.title=element_text(size=16,face="bold"),
axis.text.x=element_text(face="bold", size = 20, vjust = 1),
axis.text.y=element_text(face="bold", size = 18),
axis.title.x=element_text(face="bold", size = 20, vjust = 0),
strip.text.x = element_text(hjust=0.5,vjust = 0.5,angle=180,face="bold"),
strip.text.x.top = element_text(angle = 0, size = 20),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "none",
)+
scale_y_continuous(breaks=c(log2(0.5), log2(1), log2(2), log2(5), log2(10)),
labels = c('0.5', '1', '2','5','10'))+
scale_color_manual(values=c("black", "grey70"))+
guides(linetype=FALSE)+
coord_flip()
p
################################################
########## Output results in tables ############
################################################
output = df_use_ready[c('group','description','pValue','qValue', 'oddsRatio','CI95_lower','CI95_higher')]
write.table(output, 'results/LOLA_enrichment.tsv', sep = '\t', row.names = F, quote = F)
|
# xts = xts object
# m = multiple in units (mins, hours)
as.ohlc <- function(xts, m=1, on='mins') {
# get the prefix from column name like 'AAPL.ASK':
prefix <- colnames(xts)[1]
cols <- c(
paste(c(prefix, "Open"), collapse="."),
paste(c(prefix, "High"), collapse="."),
paste(c(prefix, "Low"), collapse="."),
paste(c(prefix, "Close"), collapse=".")
)
# end of the intervals specified: (end of seconds, for example)
eps <- endpoints(xts, on)
# by multiples of 2, 4, 5 seconds/minutes, etc.
# seq.int = internal form of seq()
if (m > 1) {
eps <- eps[seq.int(1L, length(eps), m)];
}
hi <- period.apply(xts, INDEX=eps, FUN=max)
lo <- period.apply(xts, INDEX=eps, FUN=min)
op <- period.apply(xts, INDEX=eps, FUN=head, 1)
cl <- period.apply(xts, INDEX=eps, FUN=tail, 1)
m <- merge(op, hi, lo, cl)
colnames(m) <- cols
return(m)
}
|
/R-packages/hzc/R/utils.R
|
no_license
|
chungers/atp
|
R
| false
| false
| 922
|
r
|
# xts = xts object
# m = multiple in units (mins, hours)
as.ohlc <- function(xts, m=1, on='mins') {
# get the prefix from column name like 'AAPL.ASK':
prefix <- colnames(xts)[1]
cols <- c(
paste(c(prefix, "Open"), collapse="."),
paste(c(prefix, "High"), collapse="."),
paste(c(prefix, "Low"), collapse="."),
paste(c(prefix, "Close"), collapse=".")
)
# end of the intervals specified: (end of seconds, for example)
eps <- endpoints(xts, on)
# by multiples of 2, 4, 5 seconds/minutes, etc.
# seq.int = internal form of seq()
if (m > 1) {
eps <- eps[seq.int(1L, length(eps), m)];
}
hi <- period.apply(xts, INDEX=eps, FUN=max)
lo <- period.apply(xts, INDEX=eps, FUN=min)
op <- period.apply(xts, INDEX=eps, FUN=head, 1)
cl <- period.apply(xts, INDEX=eps, FUN=tail, 1)
m <- merge(op, hi, lo, cl)
colnames(m) <- cols
return(m)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/by_samples.R
\name{get_clinical_by_sample}
\alias{get_clinical_by_sample}
\title{Get clinical data by attribute, study ID and sample ID}
\usage{
get_clinical_by_sample(
study_id = NULL,
sample_id = NULL,
sample_study_pairs = NULL,
clinical_attribute = NULL,
base_url = NULL
)
}
\arguments{
\item{study_id}{A string indicating the study ID from which to pull data. If no study ID, will
guess the study ID based on your URL and inform. Only 1 study ID can be passed. If mutations/cna from
more than 1 study needed, see \code{sample_study_pairs}}
\item{sample_id}{a vector of sample IDs (character)}
\item{sample_study_pairs}{A dataframe with columns: \code{sample_id}, \code{study_id} and \code{molecular_profile_id} (optional). Variations in capitalization of column names are accepted.
This can be used in place of \code{sample_id}, \code{study_id}, \code{molecular_profile_id} arguments above if you
need to pull samples from several different studies at once. If passed this will take overwrite \code{sample_id}, \code{study_id}, \code{molecular_profile_id} if also passed.}
\item{clinical_attribute}{one or more clinical attributes for your study.
If none provided, will return all attributes available for studies}
\item{base_url}{The database URL to query
If \code{NULL} will default to URL set with \verb{set_cbioportal_db(<your_db>)}}
}
\value{
a dataframe of a specific clinical attribute
}
\description{
Get clinical data by attribute, study ID and sample ID
}
\examples{
\dontrun{
get_clinical_by_sample(study_id = "acc_tcga", sample_id = "TCGA-OR-A5J2-01",
clinical_attribute = "CANCER_TYPE", base_url = 'www.cbioportal.org/api')
ex <- tibble::tribble(
~sample_id, ~study_id,
"P-0001453-T01-IM3", "blca_nmibc_2017",
"P-0002166-T01-IM3", "blca_nmibc_2017",
"P-0003238-T01-IM5", "blca_nmibc_2017",
"P-0000004-T01-IM3", "msk_impact_2017",
"P-0000023-T01-IM3", "msk_impact_2017")
x <- get_clinical_by_sample(sample_study_pairs = ex,
clinical_attribute = NULL, base_url = 'www.cbioportal.org/api')
}
}
|
/man/get_clinical_by_sample.Rd
|
permissive
|
karissawhiting/cbioportalR
|
R
| false
| true
| 2,105
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/by_samples.R
\name{get_clinical_by_sample}
\alias{get_clinical_by_sample}
\title{Get clinical data by attribute, study ID and sample ID}
\usage{
get_clinical_by_sample(
study_id = NULL,
sample_id = NULL,
sample_study_pairs = NULL,
clinical_attribute = NULL,
base_url = NULL
)
}
\arguments{
\item{study_id}{A string indicating the study ID from which to pull data. If no study ID, will
guess the study ID based on your URL and inform. Only 1 study ID can be passed. If mutations/cna from
more than 1 study needed, see \code{sample_study_pairs}}
\item{sample_id}{a vector of sample IDs (character)}
\item{sample_study_pairs}{A dataframe with columns: \code{sample_id}, \code{study_id} and \code{molecular_profile_id} (optional). Variations in capitalization of column names are accepted.
This can be used in place of \code{sample_id}, \code{study_id}, \code{molecular_profile_id} arguments above if you
need to pull samples from several different studies at once. If passed this will take overwrite \code{sample_id}, \code{study_id}, \code{molecular_profile_id} if also passed.}
\item{clinical_attribute}{one or more clinical attributes for your study.
If none provided, will return all attributes available for studies}
\item{base_url}{The database URL to query
If \code{NULL} will default to URL set with \verb{set_cbioportal_db(<your_db>)}}
}
\value{
a dataframe of a specific clinical attribute
}
\description{
Get clinical data by attribute, study ID and sample ID
}
\examples{
\dontrun{
get_clinical_by_sample(study_id = "acc_tcga", sample_id = "TCGA-OR-A5J2-01",
clinical_attribute = "CANCER_TYPE", base_url = 'www.cbioportal.org/api')
ex <- tibble::tribble(
~sample_id, ~study_id,
"P-0001453-T01-IM3", "blca_nmibc_2017",
"P-0002166-T01-IM3", "blca_nmibc_2017",
"P-0003238-T01-IM5", "blca_nmibc_2017",
"P-0000004-T01-IM3", "msk_impact_2017",
"P-0000023-T01-IM3", "msk_impact_2017")
x <- get_clinical_by_sample(sample_study_pairs = ex,
clinical_attribute = NULL, base_url = 'www.cbioportal.org/api')
}
}
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
test.glm.plug_values <- function() {
cars <- h2o.importFile(locate("smalldata/junit/cars_20mpg.csv"))
cars$name <- NULL
glm1 <- h2o.glm(training_frame = cars, y = "cylinders")
means <- h2o.mean(cars, na.rm = TRUE, return_frame = TRUE)
glm2 <- h2o.glm(training_frame = cars, y = "cylinders", missing_values_handling="PlugValues", plug_values=means)
expect_equal(h2o.coef(glm1), h2o.coef(glm2))
glm3 <- h2o.glm(training_frame = cars, y = "cylinders", missing_values_handling="PlugValues", plug_values=0.1+2*means)
expect_false(isTRUE(all.equal(h2o.coef(glm2), h2o.coef(glm3))))
}
doTest("Test Plug Values in GLM", test.glm.plug_values)
|
/h2o-r/tests/testdir_algos/glm/runit_GLM_plug_values.R
|
permissive
|
h2oai/h2o-3
|
R
| false
| false
| 789
|
r
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
test.glm.plug_values <- function() {
cars <- h2o.importFile(locate("smalldata/junit/cars_20mpg.csv"))
cars$name <- NULL
glm1 <- h2o.glm(training_frame = cars, y = "cylinders")
means <- h2o.mean(cars, na.rm = TRUE, return_frame = TRUE)
glm2 <- h2o.glm(training_frame = cars, y = "cylinders", missing_values_handling="PlugValues", plug_values=means)
expect_equal(h2o.coef(glm1), h2o.coef(glm2))
glm3 <- h2o.glm(training_frame = cars, y = "cylinders", missing_values_handling="PlugValues", plug_values=0.1+2*means)
expect_false(isTRUE(all.equal(h2o.coef(glm2), h2o.coef(glm3))))
}
doTest("Test Plug Values in GLM", test.glm.plug_values)
|
library(deldir)
library(sf)
library(sp)
library(tidyverse)
#idea sameple pi in groups of 3 digits, each digit is an angle, sum is size (or 1st num is size)
piChar <- read.table("data/PI_10000.txt", stringsAsFactors=F, colClasses = c("character"))[1,1]
piVec <- as.numeric(strsplit(piChar, "")[[1]])
################################################
data <- data.frame(x = runif(20), y = runif(20))
x <- data[,"x"]
y <- data[,"y"]
pts = SpatialPointsDataFrame(cbind(x, y), data, match.ID = T)
vor_desc = tile.list(deldir(pts@coords[, 1], pts@coords[, 2],
suppressMsge = TRUE))
vor_polygons <- lapply(1:(length(vor_desc)), function(i) {
tmp <- cbind(vor_desc[[i]]$x, vor_desc[[i]]$y)
tmp <- rbind(tmp, tmp[1, ])
Polygons(list(Polygon(tmp)), ID = i)
})
rownames(pts@data) = sapply(slot(SpatialPolygons(vor_polygons),
"polygons"), slot, "ID")
vor_spdf = SpatialPolygonsDataFrame(SpatialPolygons(vor_polygons),
data = pts@data)
polys <- fortify(vor_spdf, id = "ID")
ggplot(polys, aes(x = long, y = lat)) + geom_polygon()
triangles<-triang.list(deldir(points, plotit = TRUE))
for(tri in triangles) {
polygon(as.list(tri))
}
tri_list <- rbind_list(as.list(triangles))
ggplot(data = tri_list, aes(x = x, y = y)) + geom_polygon()
?tile.list
|
/triangles_test.R
|
no_license
|
will-r-chase/artinpi
|
R
| false
| false
| 1,355
|
r
|
library(deldir)
library(sf)
library(sp)
library(tidyverse)
#idea sameple pi in groups of 3 digits, each digit is an angle, sum is size (or 1st num is size)
piChar <- read.table("data/PI_10000.txt", stringsAsFactors=F, colClasses = c("character"))[1,1]
piVec <- as.numeric(strsplit(piChar, "")[[1]])
################################################
data <- data.frame(x = runif(20), y = runif(20))
x <- data[,"x"]
y <- data[,"y"]
pts = SpatialPointsDataFrame(cbind(x, y), data, match.ID = T)
vor_desc = tile.list(deldir(pts@coords[, 1], pts@coords[, 2],
suppressMsge = TRUE))
vor_polygons <- lapply(1:(length(vor_desc)), function(i) {
tmp <- cbind(vor_desc[[i]]$x, vor_desc[[i]]$y)
tmp <- rbind(tmp, tmp[1, ])
Polygons(list(Polygon(tmp)), ID = i)
})
rownames(pts@data) = sapply(slot(SpatialPolygons(vor_polygons),
"polygons"), slot, "ID")
vor_spdf = SpatialPolygonsDataFrame(SpatialPolygons(vor_polygons),
data = pts@data)
polys <- fortify(vor_spdf, id = "ID")
ggplot(polys, aes(x = long, y = lat)) + geom_polygon()
triangles<-triang.list(deldir(points, plotit = TRUE))
for(tri in triangles) {
polygon(as.list(tri))
}
tri_list <- rbind_list(as.list(triangles))
ggplot(data = tri_list, aes(x = x, y = y)) + geom_polygon()
?tile.list
|
############# R Functions for parsing NCCA ASCII header information ##############
# 3-10-2014 Raymond Nelson
#
#
# this script contains the following functions
#
# eventCSVNames()
# to make a character vector of the names of "*events.csv" files
#
# eventMatrix()
# to make a csv table of the stimlus events for all charts
# also make a csv table of the names of charts containing CQs and RQs
#
#########################################
library(stringr)
##########
eventCSVNames <- function(x = "*_events.csv$") {
# function to make a character vector of the filenames
# for all *events.csv" files
# input is the string that identifies the events.csv files
x <- x
y <- list.files(path = ".",
pattern = x,
all.files = FALSE,
full.names = FALSE,
recursive = FALSE,
ignore.case = FALSE,
include.dirs = FALSE)
assign("eventFiles", y, pos = 1)
return(y)
} # eventCSVNames end
eventFiles <- eventCSVNames()
#########################################
uniqueExamsE <- function(x = eventFiles) {
# function to remove the "_events.csv" and other suffix info from each file name
# to identify unique exams
# input is the output vector eventFiles that contains the names of events.csv files
y <- str_replace_all(x, "*_events.csv$", "")
y <- str_sub(y, 1, nchar(y) -6)
y <- unique(y)
return(y)
}
uniqueExams <- uniqueExamsE()
#########################################
eventMatrix <- function(x = "*_events.csv$") {
# function to make a csv table of all stimulus events for all charts for each exam
# will also parse the total unique events and missing events
# x needs to be the name of a character string to identify the events.csv files
#
eventFiles <- list.files(path = ".", pattern = x,
all.files = FALSE,
full.names = FALSE,
recursive = FALSE,
ignore.case = FALSE,
include.dirs = FALSE)
# assign("eventFiles", eventFiles, pos = 1)
#
# get the vector of file names for events
# eventFiles <- get(eventFiles, pos = 1)
#
# a loop to read the info from all CSV files in the eventVector
numberEvents <- NULL # the number of stim events in each chart in the loop
eventVectorNames <- NULL # used in the following loop
for (i in 1:length(eventFiles)) {
fileName <- eventFiles[i]
CSVdata <- read.csv(fileName)
# a nested loop to make a vector of event lables for each chart
eventLabels <- NULL
for (j in 1:nrow(CSVdata)) {
eventLabels <- c(eventLabels, as.character(CSVdata[j,2]))
} # end of nested loop to read event labels for each chart
# remove "" elements
eventLabels <- eventLabels[which(eventLabels != "")]
# determine the number of events and concatenate it to a vector
numberEvents <- as.character(c(numberEvents, length(eventLabels)))
# create the unique eventVector name
eventVectorName <- str_sub(fileName, 1, nchar(fileName) - 4)
# eventVectorName <- paste("eventVector", i, sep = "")
# eventVectorName <- eventFiles[i]
# assign the events to a unique vector
assign(eventVectorName, eventLabels)
# make a vector of unique event vector names
eventVectorNames <- c(eventVectorNames, eventVectorName)
} # end of loop to read each "_events.csv$" file for events
#
# determine the max length of the eventVector for all charts
# maxEvents <- 0
maxEvents <- max(as.integer(numberEvents))
numberEventVectors <- length(eventVectorNames)
#
# a loop to fix the length of all event vectors to the max length for each exam
for (k in 1:numberEventVectors) {
tempVector <- get(eventVectorNames[k])
tempVector <- c(tempVector, rep("-", maxEvents - length(tempVector)))
# assign(paste("eventVector", k, sep = ""), tempVector)
assign(eventVectorNames[k], tempVector)
# maybe write the csv to include the padded events
} # end loop to fix the length of all event vectors to the max for each exam
#
#
############## maybe move the rest of this to a separate function
# a loop to retain charts that include min 2 CQs and min 2 RQs
retainEventVectorNames <- NULL
allEventNames <- NULL
for (l in 1:numberEventVectors) {
eventVectorName <- eventVectorNames[l]
RQs <- length(grep("R", get(eventVectorName),
ignore.case = TRUE,
perl = FALSE,
value = FALSE,
fixed = FALSE,
useBytes = FALSE,
invert = FALSE)) >= 2 # make a logical scalar for RQs >= 2
#
CQs <- length(grep("C", get(eventVectorName),
ignore.case = TRUE,
perl = FALSE,
value = FALSE,
fixed = FALSE,
useBytes = FALSE,
invert = FALSE)) >= 2 # make a logical scalar for CQs >= 2
#
if (CQs & RQs) {
# eventVectorName <- paste("eventVector", l, sep = "") # change the eventVectorName to get the padded vector
retainEventVectorNames <- c(retainEventVectorNames, eventVectorNames[l])
# make a long vector of all events from all charts with CQs and RQs
allEventNames <- c(allEventNames, get(eventVectorName))
# allEventNames <- c(allEventNames, get(paste("eventVector", 1, sep= "")))
}
} # end loop retain charts with min 2 CQs and min 2 RQs
#
#
# numberCharts <- length(retainEventVectorNames)
#
# make a vector of unique event names
uniqueEventNames <- unique(allEventNames)
numberUniqueEvents <- length(uniqueEventNames)
#
# loop to make a combined matrix of event labels for charts with >=2 RQs and >= 2 CQs
eventMatrixOut <- NULL
for (m in 1:length(retainEventVectorNames)) {
# nested loop to verify each event in each chart
eventVector <- NULL
for (n in 1:numberUniqueEvents) {
includedEvents <- which(uniqueEventNames[n] == get(retainEventVectorNames[m]))
eventVector <- c(eventVector, includedEvents)
# eventMatrix <- rbind(eventMatrix, includedEvents)
}
#
# construct the eventMatrix
eventMatrixOut <- rbind(eventMatrixOut, eventVector)
#
# retainChartNames <- tempName
# retainChartNames <- c(retainChartNames, tempName)
#
}
#
retainEventVectorNamesCSV <- paste(strtrim(eventFiles[1], nchar(eventFiles[1]) - 17), "_CQTcharts.csv", sep = "")
write.table(retainEventVectorNames, file = retainEventVectorNamesCSV,
append = FALSE,
quote = TRUE,
sep = ",",
eol = "\n",
na = "NA",
dec = ".",
row.names = FALSE,
col.names = FALSE,
qmethod = "double",
fileEncoding = "UTF-8")
#
#
# set the column and row names for the matrix that describes the sequence of stimulus presentation
rownames(eventMatrixOut) <- retainEventVectorNames
# remove the padding event holders
uniqueEventNames <- uniqueEventNames[c(which(uniqueEventNames != "-"))]
colnames(eventMatrixOut) <- uniqueEventNames
# set the CSV file name
CSVName <- paste(strtrim(eventFiles[1], nchar(eventFiles[1]) - 17), "_eventMatrix.csv", sep = "")
# no need to assign a unique name because this will need to be done for all exam
# assign(CSVName, eventMatrix) # using assign() allows the use of the variable name that is held by another variable
# need to save the CSV with a unique name for each exam
# save the .csv file
write.table(eventMatrixOut, file = CSVName,
append = FALSE,
quote = TRUE,
sep = ",",
eol = "\n",
na = "NA",
dec = ".",
row.names = FALSE,
col.names = TRUE,
qmethod = "double",
fileEncoding = "UTF-8")
#
# rm(list = eventFiles)
#
} # eventMatrix end
# eventMatrix()
|
/backup/eventParse.r
|
no_license
|
raymondnelson/NCCA_ASCII_Parse
|
R
| false
| false
| 8,090
|
r
|
############# R Functions for parsing NCCA ASCII header information ##############
# 3-10-2014 Raymond Nelson
#
#
# this script contains the following functions
#
# eventCSVNames()
# to make a character vector of the names of "*events.csv" files
#
# eventMatrix()
# to make a csv table of the stimlus events for all charts
# also make a csv table of the names of charts containing CQs and RQs
#
#########################################
library(stringr)
##########
eventCSVNames <- function(x = "*_events.csv$") {
# function to make a character vector of the filenames
# for all *events.csv" files
# input is the string that identifies the events.csv files
x <- x
y <- list.files(path = ".",
pattern = x,
all.files = FALSE,
full.names = FALSE,
recursive = FALSE,
ignore.case = FALSE,
include.dirs = FALSE)
assign("eventFiles", y, pos = 1)
return(y)
} # eventCSVNames end
eventFiles <- eventCSVNames()
#########################################
uniqueExamsE <- function(x = eventFiles) {
# function to remove the "_events.csv" and other suffix info from each file name
# to identify unique exams
# input is the output vector eventFiles that contains the names of events.csv files
y <- str_replace_all(x, "*_events.csv$", "")
y <- str_sub(y, 1, nchar(y) -6)
y <- unique(y)
return(y)
}
uniqueExams <- uniqueExamsE()
#########################################
eventMatrix <- function(x = "*_events.csv$") {
# function to make a csv table of all stimulus events for all charts for each exam
# will also parse the total unique events and missing events
# x needs to be the name of a character string to identify the events.csv files
#
eventFiles <- list.files(path = ".", pattern = x,
all.files = FALSE,
full.names = FALSE,
recursive = FALSE,
ignore.case = FALSE,
include.dirs = FALSE)
# assign("eventFiles", eventFiles, pos = 1)
#
# get the vector of file names for events
# eventFiles <- get(eventFiles, pos = 1)
#
# a loop to read the info from all CSV files in the eventVector
numberEvents <- NULL # the number of stim events in each chart in the loop
eventVectorNames <- NULL # used in the following loop
for (i in 1:length(eventFiles)) {
fileName <- eventFiles[i]
CSVdata <- read.csv(fileName)
# a nested loop to make a vector of event lables for each chart
eventLabels <- NULL
for (j in 1:nrow(CSVdata)) {
eventLabels <- c(eventLabels, as.character(CSVdata[j,2]))
} # end of nested loop to read event labels for each chart
# remove "" elements
eventLabels <- eventLabels[which(eventLabels != "")]
# determine the number of events and concatenate it to a vector
numberEvents <- as.character(c(numberEvents, length(eventLabels)))
# create the unique eventVector name
eventVectorName <- str_sub(fileName, 1, nchar(fileName) - 4)
# eventVectorName <- paste("eventVector", i, sep = "")
# eventVectorName <- eventFiles[i]
# assign the events to a unique vector
assign(eventVectorName, eventLabels)
# make a vector of unique event vector names
eventVectorNames <- c(eventVectorNames, eventVectorName)
} # end of loop to read each "_events.csv$" file for events
#
# determine the max length of the eventVector for all charts
# maxEvents <- 0
maxEvents <- max(as.integer(numberEvents))
numberEventVectors <- length(eventVectorNames)
#
# a loop to fix the length of all event vectors to the max length for each exam
for (k in 1:numberEventVectors) {
tempVector <- get(eventVectorNames[k])
tempVector <- c(tempVector, rep("-", maxEvents - length(tempVector)))
# assign(paste("eventVector", k, sep = ""), tempVector)
assign(eventVectorNames[k], tempVector)
# maybe write the csv to include the padded events
} # end loop to fix the length of all event vectors to the max for each exam
#
#
############## maybe move the rest of this to a separate function
# a loop to retain charts that include min 2 CQs and min 2 RQs
retainEventVectorNames <- NULL
allEventNames <- NULL
for (l in 1:numberEventVectors) {
eventVectorName <- eventVectorNames[l]
RQs <- length(grep("R", get(eventVectorName),
ignore.case = TRUE,
perl = FALSE,
value = FALSE,
fixed = FALSE,
useBytes = FALSE,
invert = FALSE)) >= 2 # make a logical scalar for RQs >= 2
#
CQs <- length(grep("C", get(eventVectorName),
ignore.case = TRUE,
perl = FALSE,
value = FALSE,
fixed = FALSE,
useBytes = FALSE,
invert = FALSE)) >= 2 # make a logical scalar for CQs >= 2
#
if (CQs & RQs) {
# eventVectorName <- paste("eventVector", l, sep = "") # change the eventVectorName to get the padded vector
retainEventVectorNames <- c(retainEventVectorNames, eventVectorNames[l])
# make a long vector of all events from all charts with CQs and RQs
allEventNames <- c(allEventNames, get(eventVectorName))
# allEventNames <- c(allEventNames, get(paste("eventVector", 1, sep= "")))
}
} # end loop retain charts with min 2 CQs and min 2 RQs
#
#
# numberCharts <- length(retainEventVectorNames)
#
# make a vector of unique event names
uniqueEventNames <- unique(allEventNames)
numberUniqueEvents <- length(uniqueEventNames)
#
# loop to make a combined matrix of event labels for charts with >=2 RQs and >= 2 CQs
eventMatrixOut <- NULL
for (m in 1:length(retainEventVectorNames)) {
# nested loop to verify each event in each chart
eventVector <- NULL
for (n in 1:numberUniqueEvents) {
includedEvents <- which(uniqueEventNames[n] == get(retainEventVectorNames[m]))
eventVector <- c(eventVector, includedEvents)
# eventMatrix <- rbind(eventMatrix, includedEvents)
}
#
# construct the eventMatrix
eventMatrixOut <- rbind(eventMatrixOut, eventVector)
#
# retainChartNames <- tempName
# retainChartNames <- c(retainChartNames, tempName)
#
}
#
retainEventVectorNamesCSV <- paste(strtrim(eventFiles[1], nchar(eventFiles[1]) - 17), "_CQTcharts.csv", sep = "")
write.table(retainEventVectorNames, file = retainEventVectorNamesCSV,
append = FALSE,
quote = TRUE,
sep = ",",
eol = "\n",
na = "NA",
dec = ".",
row.names = FALSE,
col.names = FALSE,
qmethod = "double",
fileEncoding = "UTF-8")
#
#
# set the column and row names for the matrix that describes the sequence of stimulus presentation
rownames(eventMatrixOut) <- retainEventVectorNames
# remove the padding event holders
uniqueEventNames <- uniqueEventNames[c(which(uniqueEventNames != "-"))]
colnames(eventMatrixOut) <- uniqueEventNames
# set the CSV file name
CSVName <- paste(strtrim(eventFiles[1], nchar(eventFiles[1]) - 17), "_eventMatrix.csv", sep = "")
# no need to assign a unique name because this will need to be done for all exam
# assign(CSVName, eventMatrix) # using assign() allows the use of the variable name that is held by another variable
# need to save the CSV with a unique name for each exam
# save the .csv file
write.table(eventMatrixOut, file = CSVName,
append = FALSE,
quote = TRUE,
sep = ",",
eol = "\n",
na = "NA",
dec = ".",
row.names = FALSE,
col.names = TRUE,
qmethod = "double",
fileEncoding = "UTF-8")
#
# rm(list = eventFiles)
#
} # eventMatrix end
# eventMatrix()
|
## pre code {
## ----style, echo = FALSE, results = 'asis'-------------------------------
BiocStyle::markdown()
options(width=60, max.print=1000)
knitr::opts_chunk$set(
eval=as.logical(Sys.getenv("KNITR_EVAL", "TRUE")),
cache=as.logical(Sys.getenv("KNITR_CACHE", "TRUE")),
tidy.opts=list(width.cutoff=60), tidy=TRUE)
## ----setup, echo=FALSE, messages=FALSE, warnings=FALSE-------------------
suppressPackageStartupMessages({
library(systemPipeR)
library(BiocParallel)
library(Biostrings)
library(Rsamtools)
library(GenomicRanges)
library(ggplot2)
library(GenomicAlignments)
library(ShortRead)
library(ape)
library(batchtools)
})
## ----genVAR_workflow, eval=FALSE-----------------------------------------
## library(systemPipeRdata)
## genWorkenvir(workflow="varseq")
## setwd("varseq")
## Rscript -e "systemPipeRdata::genWorkenvir(workflow='varseq')"
## ----closeR, eval=FALSE--------------------------------------------------
## q("no") # closes R session on head node
## srun --x11 --partition=short --mem=2gb --cpus-per-task 4 --ntasks 1 --time 2:00:00 --pty bash -l
## ----r_environment, eval=FALSE-------------------------------------------
## system("hostname") # should return name of a compute node starting with i or c
## getwd() # checks current working directory of R session
## dir() # returns content of current working directory
## ----load_systempiper, eval=TRUE-----------------------------------------
library(systemPipeR)
## ----load_custom_fct, eval=FALSE-----------------------------------------
## source("systemPipeVARseq_Fct.R")
## ----load_targets_file, eval=TRUE----------------------------------------
targetspath <- system.file("extdata", "targetsPE.txt", package="systemPipeR")
targets <- read.delim(targetspath, comment.char = "#")
targets[1:4, 1:4]
## ----preprocess_reads, eval=FALSE----------------------------------------
## args <- systemArgs(sysma="param/trimPE.param", mytargets="targetsPE.txt")[1:4]
## # Note: subsetting!
## filterFct <- function(fq, cutoff=20, Nexceptions=0) {
## qcount <- rowSums(as(quality(fq), "matrix") <= cutoff, na.rm=TRUE)
## fq[qcount <= Nexceptions]
## # Retains reads where Phred scores are >= cutoff with N exceptions
## }
## preprocessReads(args=args, Fct="filterFct(fq, cutoff=20, Nexceptions=0)",
## batchsize=100000)
## writeTargetsout(x=args, file="targets_PEtrim.txt", overwrite=TRUE)
## ----fastq_report, eval=FALSE--------------------------------------------
## args <- systemArgs(sysma="param/tophat.param", mytargets="targets.txt")
## fqlist <- seeFastq(fastq=infile1(args), batchsize=100000, klength=8)
## pdf("./results/fastqReport.pdf", height=18, width=4*length(fqlist))
## seeFastqPlot(fqlist)
## dev.off()
## ----load_sysargs, eval=FALSE--------------------------------------------
## args <- systemArgs(sysma="param/bwa.param", mytargets="targets.txt")
## sysargs(args)[1] # Command-line parameters for first FASTQ file
## ----bwa_serial, eval=FALSE----------------------------------------------
## moduleload(modules(args))
## system("bwa index -a bwtsw ./data/tair10.fasta")
## bampaths <- runCommandline(args=args)
## writeTargetsout(x=args, file="targets_bam.txt", overwrite=TRUE)
## ----bwa_parallel, eval=FALSE--------------------------------------------
## moduleload(modules(args))
## system("bwa index -a bwtsw ./data/tair10.fasta")
## resources <- list(walltime=120, ntasks=1, ncpus=cores(args), memory=1024)
## reg <- clusterRun(args, conffile = ".batchtools.conf.R", Njobs=18, template = "batchtools.slurm.tmpl", runid="01", resourceList=resources)
## getStatus(reg=reg)
## waitForJobs(reg=reg)
## writeTargetsout(x=args, file="targets_bam.txt", overwrite=TRUE)
## ----check_file_presence, eval=FALSE-------------------------------------
## file.exists(outpaths(args))
## ----gsnap_parallel, eval=FALSE------------------------------------------
## library(gmapR); library(BiocParallel); library(batchtools)
## args <- systemArgs(sysma="param/gsnap.param", mytargets="targetsPE.txt")
## gmapGenome <- GmapGenome(systemPipeR::reference(args), directory="data",
## name="gmap_tair10chr", create=TRUE)
## f <- function(x) {
## library(gmapR); library(systemPipeR)
## args <- systemArgs(sysma="param/gsnap.param", mytargets="targetsPE.txt")
## gmapGenome <- GmapGenome(reference(args), directory="data", name="gmap_tair10chr", create=FALSE)
## p <- GsnapParam(genome=gmapGenome, unique_only=TRUE, molecule="DNA", max_mismatches=3)
## o <- gsnap(input_a=infile1(args)[x], input_b=infile2(args)[x], params=p, output=outfile1(args)[x])
## }
## resources <- list(walltime=120, ntasks=1, ncpus=cores(args), memory=1024)
## param <- BatchtoolsParam(workers = 4, cluster = "slurm", template = "batchtools.slurm.tmpl", resources = resources)
## d <- bplapply(seq(along=args), f, BPPARAM = param)
## writeTargetsout(x=args, file="targets_gsnap_bam.txt", overwrite=TRUE)
## ----align_stats, eval=FALSE---------------------------------------------
## read_statsDF <- alignStats(args=args)
## write.table(read_statsDF, "results/alignStats.xls", row.names=FALSE, quote=FALSE, sep="\t")
## ----symbolic_links, eval=FALSE------------------------------------------
## symLink2bam(sysargs=args, htmldir=c("~/.html/", "projects/gen242/"),
## urlbase="http://biocluster.ucr.edu/~tgirke/",
## urlfile="./results/IGVurl.txt")
## ----run_gatk, eval=FALSE------------------------------------------------
## moduleload("picard/1.130"); moduleload("samtools/1.3")
## system("picard CreateSequenceDictionary R=./data/tair10.fasta O=./data/tair10.dict")
## system("samtools faidx data/tair10.fasta")
## args <- systemArgs(sysma="param/gatk.param", mytargets="targets_bam.txt")
## resources <- list(walltime=120, ntasks=1, ncpus=4, memory=1024)
## reg <- clusterRun(args, conffile = ".batchtools.conf.R", Njobs=18, template = "batchtools.slurm.tmpl", runid="01", resourceList=resources)
## getStatus(reg=reg)
## waitForJobs(reg=reg)
## # unlink(outfile1(args), recursive = TRUE, force = TRUE)
## writeTargetsout(x=args, file="targets_gatk.txt", overwrite=TRUE)
## ----run_bcftools, eval=FALSE--------------------------------------------
## args <- systemArgs(sysma="param/sambcf.param", mytargets="targets_bam.txt")
## resources <- list(walltime=120, ntasks=1, ncpus=4, memory=1024)
## reg <- clusterRun(args, conffile = ".batchtools.conf.R", Njobs=18, template = "batchtools.slurm.tmpl", runid="01", resourceList=resources)
## getStatus(reg=reg)
## waitForJobs(reg=reg)
## # unlink(outfile1(args), recursive = TRUE, force = TRUE)
## writeTargetsout(x=args, file="targets_sambcf.txt", overwrite=TRUE)
## ----run_varianttools, eval=FALSE----------------------------------------
## library(gmapR); library(BiocParallel); library(batchtools)
## args <- systemArgs(sysma="param/vartools.param",
## mytargets="targets_gsnap_bam.txt")
## f <- function(x) {
## library(VariantTools); library(gmapR); library(systemPipeR)
## args <- systemArgs(sysma="param/vartools.param", mytargets="targets_gsnap_bam.txt")
## gmapGenome <- GmapGenome(systemPipeR::reference(args), directory="data", name="gmap_tair10chr", create=FALSE)
## tally.param <- TallyVariantsParam(gmapGenome, high_base_quality = 23L, indels = TRUE)
## bfl <- BamFileList(infile1(args)[x], index=character())
## var <- callVariants(bfl[[1]], tally.param)
## sampleNames(var) <- names(bfl)
## writeVcf(asVCF(var), outfile1(args)[x], index = TRUE)
## }
## resources <- list(walltime=120, ntasks=1, ncpus=cores(args), memory=1024)
## param <- BatchtoolsParam(workers = 4, cluster = "slurm", template = "batchtools.slurm.tmpl", resources = resources)
## d <- bplapply(seq(along=args), f, BPPARAM = param)
## writeTargetsout(x=args, file="targets_vartools.txt", overwrite=TRUE)
## ----inspect_vcf, eval=FALSE---------------------------------------------
## library(VariantAnnotation)
## args <- systemArgs(sysma="param/filter_gatk.param", mytargets="targets_gatk.txt")
## vcf <- readVcf(infile1(args)[1], "A. thaliana")
## vcf
## vr <- as(vcf, "VRanges")
## vr
## ----filter_gatk, eval=FALSE---------------------------------------------
## library(VariantAnnotation)
## library(BBmisc) # Defines suppressAll()
## args <- systemArgs(sysma="param/filter_gatk.param", mytargets="targets_gatk.txt")[1:4]
## filter <- "totalDepth(vr) >= 2 & (altDepth(vr) / totalDepth(vr) >= 0.8) & rowSums(softFilterMatrix(vr))>=1"
## # filter <- "totalDepth(vr) >= 20 & (altDepth(vr) / totalDepth(vr) >= 0.8) & rowSums(softFilterMatrix(vr))==6"
## suppressAll(filterVars(args, filter, varcaller="gatk", organism="A. thaliana"))
## writeTargetsout(x=args, file="targets_gatk_filtered.txt", overwrite=TRUE)
## ----filter_bcftools, eval=FALSE-----------------------------------------
## args <- systemArgs(sysma="param/filter_sambcf.param", mytargets="targets_sambcf.txt")[1:4]
## filter <- "rowSums(vr) >= 2 & (rowSums(vr[,3:4])/rowSums(vr[,1:4]) >= 0.8)"
## # filter <- "rowSums(vr) >= 20 & (rowSums(vr[,3:4])/rowSums(vr[,1:4]) >= 0.8)"
## suppressAll(filterVars(args, filter, varcaller="bcftools", organism="A. thaliana"))
## writeTargetsout(x=args, file="targets_sambcf_filtered.txt", overwrite=TRUE)
## ----filter_varianttools, eval=FALSE-------------------------------------
## library(VariantAnnotation)
## library(BBmisc) # Defines suppressAll()
## args <- systemArgs(sysma="param/filter_vartools.param", mytargets="targets_vartools.txt")[1:4]
## filter <- "(values(vr)$n.read.pos.ref + values(vr)$n.read.pos) >= 2 & (values(vr)$n.read.pos / (values(vr)$n.read.pos.ref + values(vr)$n.read.pos) >= 0.8)"
## # filter <- "(values(vr)$n.read.pos.ref + values(vr)$n.read.pos) >= 20 & (values(vr)$n.read.pos / (values(vr)$n.read.pos.ref + values(vr)$n.read.pos) >= 0.8)"
## filterVars(args, filter, varcaller="vartools", organism="A. thaliana")
## writeTargetsout(x=args, file="targets_vartools_filtered.txt", overwrite=TRUE)
## ----check_filter, eval=FALSE--------------------------------------------
## length(as(readVcf(infile1(args)[1], genome="Ath"), "VRanges")[,1])
## length(as(readVcf(outpaths(args)[1], genome="Ath"), "VRanges")[,1])
## ----annotate_basics, eval=FALSE-----------------------------------------
## library("GenomicFeatures")
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_gatk_filtered.txt")
## txdb <- loadDb("./data/tair10.sqlite")
## vcf <- readVcf(infile1(args)[1], "A. thaliana")
## locateVariants(vcf, txdb, CodingVariants())
## ----annotate_basics_non-synon, eval=FALSE-------------------------------
## fa <- FaFile(systemPipeR::reference(args))
## predictCoding(vcf, txdb, seqSource=fa)
## ----annotate_gatk, eval=FALSE-------------------------------------------
## library("GenomicFeatures")
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_gatk_filtered.txt")
## txdb <- loadDb("./data/tair10.sqlite")
## fa <- FaFile(systemPipeR::reference(args))
## suppressAll(variantReport(args=args, txdb=txdb, fa=fa, organism="A. thaliana"))
## ----annotate_bcftools, eval=FALSE---------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_sambcf_filtered.txt")
## txdb <- loadDb("./data/tair10.sqlite")
## fa <- FaFile(systemPipeR::reference(args))
## suppressAll(variantReport(args=args, txdb=txdb, fa=fa, organism="A. thaliana"))
## ----annotate_varianttools, eval=FALSE-----------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_vartools_filtered.txt")
## txdb <- loadDb("./data/tair10.sqlite")
## fa <- FaFile(systemPipeR::reference(args))
## suppressAll(variantReport(args=args, txdb=txdb, fa=fa, organism="A. thaliana"))
## ----view_annotation, eval=FALSE-----------------------------------------
## read.delim(outpaths(args)[1])[38:40,]
## ----combine_gatk, eval=FALSE--------------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_gatk_filtered.txt")
## combineDF <- combineVarReports(args, filtercol=c(Consequence="nonsynonymous"))
## write.table(combineDF, "./results/combineDF_nonsyn_gatk.xls", quote=FALSE, row.names=FALSE, sep="\t")
## ----combine_bcftools, eval=FALSE----------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_sambcf_filtered.txt")
## combineDF <- combineVarReports(args, filtercol=c(Consequence="nonsynonymous"))
## write.table(combineDF, "./results/combineDF_nonsyn_sambcf.xls", quote=FALSE, row.names=FALSE, sep="\t")
## ----combine_varianttools, eval=FALSE------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_vartools_filtered.txt")
## combineDF <- combineVarReports(args, filtercol=c(Consequence="nonsynonymous"))
## write.table(combineDF, "./results/combineDF_nonsyn_vartools.xls", quote=FALSE, row.names=FALSE, sep="\t")
## combineDF[2:4,]
## ----summary_gatk, eval=FALSE--------------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_gatk_filtered.txt")
## varSummary(args)
## write.table(varSummary(args), "./results/variantStats_gatk.xls", quote=FALSE, col.names = NA, sep="\t")
## ----summary_bcftools, eval=FALSE----------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_sambcf_filtered.txt")
## varSummary(args)
## write.table(varSummary(args), "./results/variantStats_sambcf.xls", quote=FALSE, col.names = NA, sep="\t")
## ----summary_varianttools, eval=FALSE------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_vartools_filtered.txt")
## varSummary(args)
## write.table(varSummary(args), "./results/variantStats_vartools.xls", quote=FALSE, col.names = NA, sep="\t")
## ----venn_diagram, eval=FALSE--------------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_gatk_filtered.txt")
## varlist <- sapply(names(outpaths(args))[1:4], function(x) as.character(read.delim(outpaths(args)[x])$VARID))
## vennset_gatk <- overLapper(varlist, type="vennsets")
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_sambcf_filtered.txt")
## varlist <- sapply(names(outpaths(args))[1:4], function(x) as.character(read.delim(outpaths(args)[x])$VARID))
## vennset_bcf <- overLapper(varlist, type="vennsets")
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_vartools_filtered.txt")
## varlist <- sapply(names(outpaths(args))[1:4], function(x) as.character(read.delim(outpaths(args)[x])$VARID))
## vennset_vartools <- overLapper(varlist, type="vennsets")
## pdf("./results/vennplot_var.pdf")
## vennPlot(list(vennset_gatk, vennset_bcf, vennset_vartools), mymain="", mysub="GATK: red; BCFtools: blue; VariantTools: green", colmode=2, ccol=c("red", "blue", "green"))
## dev.off()
## ----plot_variant, eval=FALSE--------------------------------------------
## library(ggbio)
## mychr <- "ChrC"; mystart <- 11000; myend <- 13000
## args <- systemArgs(sysma="param/bwa.param", mytargets="targets.txt")
## ga <- readGAlignments(outpaths(args)[1], use.names=TRUE, param=ScanBamParam(which=GRanges(mychr, IRanges(mystart, myend))))
## p1 <- autoplot(ga, geom = "rect")
## p2 <- autoplot(ga, geom = "line", stat = "coverage")
## p3 <- autoplot(vcf[seqnames(vcf)==mychr], type = "fixed") +
## xlim(mystart, myend) + theme(legend.position = "none",
## axis.text.y = element_blank(), axis.ticks.y=element_blank())
## p4 <- autoplot(txdb, which=GRanges(mychr, IRanges(mystart, myend)), names.expr = "gene_id")
## png("./results/plot_variant.png")
## tracks(Reads=p1, Coverage=p2, Variant=p3, Transcripts=p4, heights = c(0.3, 0.2, 0.1, 0.35)) + ylab("")
## dev.off()
## ----sessionInfo---------------------------------------------------------
sessionInfo()
|
/varseq/version-current/systemPipeVARseq.R
|
no_license
|
dcassol/systemPipeR_workflows
|
R
| false
| false
| 16,098
|
r
|
## pre code {
## ----style, echo = FALSE, results = 'asis'-------------------------------
BiocStyle::markdown()
options(width=60, max.print=1000)
knitr::opts_chunk$set(
eval=as.logical(Sys.getenv("KNITR_EVAL", "TRUE")),
cache=as.logical(Sys.getenv("KNITR_CACHE", "TRUE")),
tidy.opts=list(width.cutoff=60), tidy=TRUE)
## ----setup, echo=FALSE, messages=FALSE, warnings=FALSE-------------------
suppressPackageStartupMessages({
library(systemPipeR)
library(BiocParallel)
library(Biostrings)
library(Rsamtools)
library(GenomicRanges)
library(ggplot2)
library(GenomicAlignments)
library(ShortRead)
library(ape)
library(batchtools)
})
## ----genVAR_workflow, eval=FALSE-----------------------------------------
## library(systemPipeRdata)
## genWorkenvir(workflow="varseq")
## setwd("varseq")
## Rscript -e "systemPipeRdata::genWorkenvir(workflow='varseq')"
## ----closeR, eval=FALSE--------------------------------------------------
## q("no") # closes R session on head node
## srun --x11 --partition=short --mem=2gb --cpus-per-task 4 --ntasks 1 --time 2:00:00 --pty bash -l
## ----r_environment, eval=FALSE-------------------------------------------
## system("hostname") # should return name of a compute node starting with i or c
## getwd() # checks current working directory of R session
## dir() # returns content of current working directory
## ----load_systempiper, eval=TRUE-----------------------------------------
library(systemPipeR)
## ----load_custom_fct, eval=FALSE-----------------------------------------
## source("systemPipeVARseq_Fct.R")
## ----load_targets_file, eval=TRUE----------------------------------------
targetspath <- system.file("extdata", "targetsPE.txt", package="systemPipeR")
targets <- read.delim(targetspath, comment.char = "#")
targets[1:4, 1:4]
## ----preprocess_reads, eval=FALSE----------------------------------------
## args <- systemArgs(sysma="param/trimPE.param", mytargets="targetsPE.txt")[1:4]
## # Note: subsetting!
## filterFct <- function(fq, cutoff=20, Nexceptions=0) {
## qcount <- rowSums(as(quality(fq), "matrix") <= cutoff, na.rm=TRUE)
## fq[qcount <= Nexceptions]
## # Retains reads where Phred scores are >= cutoff with N exceptions
## }
## preprocessReads(args=args, Fct="filterFct(fq, cutoff=20, Nexceptions=0)",
## batchsize=100000)
## writeTargetsout(x=args, file="targets_PEtrim.txt", overwrite=TRUE)
## ----fastq_report, eval=FALSE--------------------------------------------
## args <- systemArgs(sysma="param/tophat.param", mytargets="targets.txt")
## fqlist <- seeFastq(fastq=infile1(args), batchsize=100000, klength=8)
## pdf("./results/fastqReport.pdf", height=18, width=4*length(fqlist))
## seeFastqPlot(fqlist)
## dev.off()
## ----load_sysargs, eval=FALSE--------------------------------------------
## args <- systemArgs(sysma="param/bwa.param", mytargets="targets.txt")
## sysargs(args)[1] # Command-line parameters for first FASTQ file
## ----bwa_serial, eval=FALSE----------------------------------------------
## moduleload(modules(args))
## system("bwa index -a bwtsw ./data/tair10.fasta")
## bampaths <- runCommandline(args=args)
## writeTargetsout(x=args, file="targets_bam.txt", overwrite=TRUE)
## ----bwa_parallel, eval=FALSE--------------------------------------------
## moduleload(modules(args))
## system("bwa index -a bwtsw ./data/tair10.fasta")
## resources <- list(walltime=120, ntasks=1, ncpus=cores(args), memory=1024)
## reg <- clusterRun(args, conffile = ".batchtools.conf.R", Njobs=18, template = "batchtools.slurm.tmpl", runid="01", resourceList=resources)
## getStatus(reg=reg)
## waitForJobs(reg=reg)
## writeTargetsout(x=args, file="targets_bam.txt", overwrite=TRUE)
## ----check_file_presence, eval=FALSE-------------------------------------
## file.exists(outpaths(args))
## ----gsnap_parallel, eval=FALSE------------------------------------------
## library(gmapR); library(BiocParallel); library(batchtools)
## args <- systemArgs(sysma="param/gsnap.param", mytargets="targetsPE.txt")
## gmapGenome <- GmapGenome(systemPipeR::reference(args), directory="data",
## name="gmap_tair10chr", create=TRUE)
## f <- function(x) {
## library(gmapR); library(systemPipeR)
## args <- systemArgs(sysma="param/gsnap.param", mytargets="targetsPE.txt")
## gmapGenome <- GmapGenome(reference(args), directory="data", name="gmap_tair10chr", create=FALSE)
## p <- GsnapParam(genome=gmapGenome, unique_only=TRUE, molecule="DNA", max_mismatches=3)
## o <- gsnap(input_a=infile1(args)[x], input_b=infile2(args)[x], params=p, output=outfile1(args)[x])
## }
## resources <- list(walltime=120, ntasks=1, ncpus=cores(args), memory=1024)
## param <- BatchtoolsParam(workers = 4, cluster = "slurm", template = "batchtools.slurm.tmpl", resources = resources)
## d <- bplapply(seq(along=args), f, BPPARAM = param)
## writeTargetsout(x=args, file="targets_gsnap_bam.txt", overwrite=TRUE)
## ----align_stats, eval=FALSE---------------------------------------------
## read_statsDF <- alignStats(args=args)
## write.table(read_statsDF, "results/alignStats.xls", row.names=FALSE, quote=FALSE, sep="\t")
## ----symbolic_links, eval=FALSE------------------------------------------
## symLink2bam(sysargs=args, htmldir=c("~/.html/", "projects/gen242/"),
## urlbase="http://biocluster.ucr.edu/~tgirke/",
## urlfile="./results/IGVurl.txt")
## ----run_gatk, eval=FALSE------------------------------------------------
## moduleload("picard/1.130"); moduleload("samtools/1.3")
## system("picard CreateSequenceDictionary R=./data/tair10.fasta O=./data/tair10.dict")
## system("samtools faidx data/tair10.fasta")
## args <- systemArgs(sysma="param/gatk.param", mytargets="targets_bam.txt")
## resources <- list(walltime=120, ntasks=1, ncpus=4, memory=1024)
## reg <- clusterRun(args, conffile = ".batchtools.conf.R", Njobs=18, template = "batchtools.slurm.tmpl", runid="01", resourceList=resources)
## getStatus(reg=reg)
## waitForJobs(reg=reg)
## # unlink(outfile1(args), recursive = TRUE, force = TRUE)
## writeTargetsout(x=args, file="targets_gatk.txt", overwrite=TRUE)
## ----run_bcftools, eval=FALSE--------------------------------------------
## args <- systemArgs(sysma="param/sambcf.param", mytargets="targets_bam.txt")
## resources <- list(walltime=120, ntasks=1, ncpus=4, memory=1024)
## reg <- clusterRun(args, conffile = ".batchtools.conf.R", Njobs=18, template = "batchtools.slurm.tmpl", runid="01", resourceList=resources)
## getStatus(reg=reg)
## waitForJobs(reg=reg)
## # unlink(outfile1(args), recursive = TRUE, force = TRUE)
## writeTargetsout(x=args, file="targets_sambcf.txt", overwrite=TRUE)
## ----run_varianttools, eval=FALSE----------------------------------------
## library(gmapR); library(BiocParallel); library(batchtools)
## args <- systemArgs(sysma="param/vartools.param",
## mytargets="targets_gsnap_bam.txt")
## f <- function(x) {
## library(VariantTools); library(gmapR); library(systemPipeR)
## args <- systemArgs(sysma="param/vartools.param", mytargets="targets_gsnap_bam.txt")
## gmapGenome <- GmapGenome(systemPipeR::reference(args), directory="data", name="gmap_tair10chr", create=FALSE)
## tally.param <- TallyVariantsParam(gmapGenome, high_base_quality = 23L, indels = TRUE)
## bfl <- BamFileList(infile1(args)[x], index=character())
## var <- callVariants(bfl[[1]], tally.param)
## sampleNames(var) <- names(bfl)
## writeVcf(asVCF(var), outfile1(args)[x], index = TRUE)
## }
## resources <- list(walltime=120, ntasks=1, ncpus=cores(args), memory=1024)
## param <- BatchtoolsParam(workers = 4, cluster = "slurm", template = "batchtools.slurm.tmpl", resources = resources)
## d <- bplapply(seq(along=args), f, BPPARAM = param)
## writeTargetsout(x=args, file="targets_vartools.txt", overwrite=TRUE)
## ----inspect_vcf, eval=FALSE---------------------------------------------
## library(VariantAnnotation)
## args <- systemArgs(sysma="param/filter_gatk.param", mytargets="targets_gatk.txt")
## vcf <- readVcf(infile1(args)[1], "A. thaliana")
## vcf
## vr <- as(vcf, "VRanges")
## vr
## ----filter_gatk, eval=FALSE---------------------------------------------
## library(VariantAnnotation)
## library(BBmisc) # Defines suppressAll()
## args <- systemArgs(sysma="param/filter_gatk.param", mytargets="targets_gatk.txt")[1:4]
## filter <- "totalDepth(vr) >= 2 & (altDepth(vr) / totalDepth(vr) >= 0.8) & rowSums(softFilterMatrix(vr))>=1"
## # filter <- "totalDepth(vr) >= 20 & (altDepth(vr) / totalDepth(vr) >= 0.8) & rowSums(softFilterMatrix(vr))==6"
## suppressAll(filterVars(args, filter, varcaller="gatk", organism="A. thaliana"))
## writeTargetsout(x=args, file="targets_gatk_filtered.txt", overwrite=TRUE)
## ----filter_bcftools, eval=FALSE-----------------------------------------
## args <- systemArgs(sysma="param/filter_sambcf.param", mytargets="targets_sambcf.txt")[1:4]
## filter <- "rowSums(vr) >= 2 & (rowSums(vr[,3:4])/rowSums(vr[,1:4]) >= 0.8)"
## # filter <- "rowSums(vr) >= 20 & (rowSums(vr[,3:4])/rowSums(vr[,1:4]) >= 0.8)"
## suppressAll(filterVars(args, filter, varcaller="bcftools", organism="A. thaliana"))
## writeTargetsout(x=args, file="targets_sambcf_filtered.txt", overwrite=TRUE)
## ----filter_varianttools, eval=FALSE-------------------------------------
## library(VariantAnnotation)
## library(BBmisc) # Defines suppressAll()
## args <- systemArgs(sysma="param/filter_vartools.param", mytargets="targets_vartools.txt")[1:4]
## filter <- "(values(vr)$n.read.pos.ref + values(vr)$n.read.pos) >= 2 & (values(vr)$n.read.pos / (values(vr)$n.read.pos.ref + values(vr)$n.read.pos) >= 0.8)"
## # filter <- "(values(vr)$n.read.pos.ref + values(vr)$n.read.pos) >= 20 & (values(vr)$n.read.pos / (values(vr)$n.read.pos.ref + values(vr)$n.read.pos) >= 0.8)"
## filterVars(args, filter, varcaller="vartools", organism="A. thaliana")
## writeTargetsout(x=args, file="targets_vartools_filtered.txt", overwrite=TRUE)
## ----check_filter, eval=FALSE--------------------------------------------
## length(as(readVcf(infile1(args)[1], genome="Ath"), "VRanges")[,1])
## length(as(readVcf(outpaths(args)[1], genome="Ath"), "VRanges")[,1])
## ----annotate_basics, eval=FALSE-----------------------------------------
## library("GenomicFeatures")
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_gatk_filtered.txt")
## txdb <- loadDb("./data/tair10.sqlite")
## vcf <- readVcf(infile1(args)[1], "A. thaliana")
## locateVariants(vcf, txdb, CodingVariants())
## ----annotate_basics_non-synon, eval=FALSE-------------------------------
## fa <- FaFile(systemPipeR::reference(args))
## predictCoding(vcf, txdb, seqSource=fa)
## ----annotate_gatk, eval=FALSE-------------------------------------------
## library("GenomicFeatures")
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_gatk_filtered.txt")
## txdb <- loadDb("./data/tair10.sqlite")
## fa <- FaFile(systemPipeR::reference(args))
## suppressAll(variantReport(args=args, txdb=txdb, fa=fa, organism="A. thaliana"))
## ----annotate_bcftools, eval=FALSE---------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_sambcf_filtered.txt")
## txdb <- loadDb("./data/tair10.sqlite")
## fa <- FaFile(systemPipeR::reference(args))
## suppressAll(variantReport(args=args, txdb=txdb, fa=fa, organism="A. thaliana"))
## ----annotate_varianttools, eval=FALSE-----------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_vartools_filtered.txt")
## txdb <- loadDb("./data/tair10.sqlite")
## fa <- FaFile(systemPipeR::reference(args))
## suppressAll(variantReport(args=args, txdb=txdb, fa=fa, organism="A. thaliana"))
## ----view_annotation, eval=FALSE-----------------------------------------
## read.delim(outpaths(args)[1])[38:40,]
## ----combine_gatk, eval=FALSE--------------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_gatk_filtered.txt")
## combineDF <- combineVarReports(args, filtercol=c(Consequence="nonsynonymous"))
## write.table(combineDF, "./results/combineDF_nonsyn_gatk.xls", quote=FALSE, row.names=FALSE, sep="\t")
## ----combine_bcftools, eval=FALSE----------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_sambcf_filtered.txt")
## combineDF <- combineVarReports(args, filtercol=c(Consequence="nonsynonymous"))
## write.table(combineDF, "./results/combineDF_nonsyn_sambcf.xls", quote=FALSE, row.names=FALSE, sep="\t")
## ----combine_varianttools, eval=FALSE------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_vartools_filtered.txt")
## combineDF <- combineVarReports(args, filtercol=c(Consequence="nonsynonymous"))
## write.table(combineDF, "./results/combineDF_nonsyn_vartools.xls", quote=FALSE, row.names=FALSE, sep="\t")
## combineDF[2:4,]
## ----summary_gatk, eval=FALSE--------------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_gatk_filtered.txt")
## varSummary(args)
## write.table(varSummary(args), "./results/variantStats_gatk.xls", quote=FALSE, col.names = NA, sep="\t")
## ----summary_bcftools, eval=FALSE----------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_sambcf_filtered.txt")
## varSummary(args)
## write.table(varSummary(args), "./results/variantStats_sambcf.xls", quote=FALSE, col.names = NA, sep="\t")
## ----summary_varianttools, eval=FALSE------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_vartools_filtered.txt")
## varSummary(args)
## write.table(varSummary(args), "./results/variantStats_vartools.xls", quote=FALSE, col.names = NA, sep="\t")
## ----venn_diagram, eval=FALSE--------------------------------------------
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_gatk_filtered.txt")
## varlist <- sapply(names(outpaths(args))[1:4], function(x) as.character(read.delim(outpaths(args)[x])$VARID))
## vennset_gatk <- overLapper(varlist, type="vennsets")
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_sambcf_filtered.txt")
## varlist <- sapply(names(outpaths(args))[1:4], function(x) as.character(read.delim(outpaths(args)[x])$VARID))
## vennset_bcf <- overLapper(varlist, type="vennsets")
## args <- systemArgs(sysma="param/annotate_vars.param", mytargets="targets_vartools_filtered.txt")
## varlist <- sapply(names(outpaths(args))[1:4], function(x) as.character(read.delim(outpaths(args)[x])$VARID))
## vennset_vartools <- overLapper(varlist, type="vennsets")
## pdf("./results/vennplot_var.pdf")
## vennPlot(list(vennset_gatk, vennset_bcf, vennset_vartools), mymain="", mysub="GATK: red; BCFtools: blue; VariantTools: green", colmode=2, ccol=c("red", "blue", "green"))
## dev.off()
## ----plot_variant, eval=FALSE--------------------------------------------
## library(ggbio)
## mychr <- "ChrC"; mystart <- 11000; myend <- 13000
## args <- systemArgs(sysma="param/bwa.param", mytargets="targets.txt")
## ga <- readGAlignments(outpaths(args)[1], use.names=TRUE, param=ScanBamParam(which=GRanges(mychr, IRanges(mystart, myend))))
## p1 <- autoplot(ga, geom = "rect")
## p2 <- autoplot(ga, geom = "line", stat = "coverage")
## p3 <- autoplot(vcf[seqnames(vcf)==mychr], type = "fixed") +
## xlim(mystart, myend) + theme(legend.position = "none",
## axis.text.y = element_blank(), axis.ticks.y=element_blank())
## p4 <- autoplot(txdb, which=GRanges(mychr, IRanges(mystart, myend)), names.expr = "gene_id")
## png("./results/plot_variant.png")
## tracks(Reads=p1, Coverage=p2, Variant=p3, Transcripts=p4, heights = c(0.3, 0.2, 0.1, 0.35)) + ylab("")
## dev.off()
## ----sessionInfo---------------------------------------------------------
sessionInfo()
|
library(openxlsx)
library(tidyverse)
data_df <- read.xlsx("17-45/LifeExpectancyAtBirth.xlsx", sheet = 1)
total <- data_df %>%
filter(Gender == "Total") %>%
group_by(Country.Name) %>%
arrange(Country.Name, Year) %>%
mutate(prior.Year = lag(Year), prior.Life.Expectancy = lag(Life.Expectancy)) %>%
mutate(change_in_Life_Exp_Total = Life.Expectancy - prior.Life.Expectancy ,
change_in_Life_Exp_perc = change_in_Life_Exp_Total/prior.Life.Expectancy) %>%
ungroup()
country_count <- data_df %>% select(Country.Name) %>% distinct() %>% nrow
country_count <- data_df %>% filter(Gender == "Total") %>% select(Region, Country.Name) %>%
distinct() %>% group_by(Region) %>% summarise(n=n())
count_ECA <- data_df %>% filter(Region == "Europe & Central Asia") %>% select(Country.Name) %>% distinct() %>% nrow()
qregion_count <- data_df %>% select(Region) %>% distinct() %>% nrow()
total %>% ggplot(aes(x=Year,y = Life.Expectancy, group = Country.Name, color = Country.Name)) +
geom_line() +
theme_minimal() +
theme(legend.position = "none") +
# scale_y_continuous(labels = scales::percent) +
facet_wrap( ~Region, scales = "free")
total %>% ggplot(aes(x=Year,y = change_in_Life_Exp_perc, group = Country.Name, color = Country.Name)) +
geom_line() +
theme_minimal() +
theme(legend.position = "none") +
scale_y_continuous(labels = scales::percent) +
facet_wrap( ~Region, scales = "free")
total %>% filter(Region == "Europe & Central Asia") %>% ggplot(aes(x=Year,y = change_in_Life_Exp_perc)) +
geom_line() +
theme_minimal() +
theme(legend.position = "none") +
scale_y_continuous(labels = scales::percent) +
facet_wrap( ~Country.Name, scales = "free")
total %>% ggplot(aes(x=Year,y = change_in_Life_Exp_Total, group = Country.Name, color = Country.Name)) +
geom_line() +
theme_minimal() +
theme(legend.position = "none") +
# scale_y_continuous(labels = scales::percent) +
facet_wrap( ~Region, scales = "free")
total %>% ggplot(aes(x=Year,y = change_in_Life_Exp_perc, group = Country.Name, color = Country.Name)) +
geom_line() +
theme_minimal() +
theme(legend.position = "none") +
scale_y_continuous(labels = scales::percent) +
facet_wrap( ~ Income.Group, scales = "free")
|
/17-45/makeovermondayW45.R
|
no_license
|
tbobin/MakeOverMonday
|
R
| false
| false
| 2,250
|
r
|
library(openxlsx)
library(tidyverse)
data_df <- read.xlsx("17-45/LifeExpectancyAtBirth.xlsx", sheet = 1)
total <- data_df %>%
filter(Gender == "Total") %>%
group_by(Country.Name) %>%
arrange(Country.Name, Year) %>%
mutate(prior.Year = lag(Year), prior.Life.Expectancy = lag(Life.Expectancy)) %>%
mutate(change_in_Life_Exp_Total = Life.Expectancy - prior.Life.Expectancy ,
change_in_Life_Exp_perc = change_in_Life_Exp_Total/prior.Life.Expectancy) %>%
ungroup()
country_count <- data_df %>% select(Country.Name) %>% distinct() %>% nrow
country_count <- data_df %>% filter(Gender == "Total") %>% select(Region, Country.Name) %>%
distinct() %>% group_by(Region) %>% summarise(n=n())
count_ECA <- data_df %>% filter(Region == "Europe & Central Asia") %>% select(Country.Name) %>% distinct() %>% nrow()
qregion_count <- data_df %>% select(Region) %>% distinct() %>% nrow()
total %>% ggplot(aes(x=Year,y = Life.Expectancy, group = Country.Name, color = Country.Name)) +
geom_line() +
theme_minimal() +
theme(legend.position = "none") +
# scale_y_continuous(labels = scales::percent) +
facet_wrap( ~Region, scales = "free")
total %>% ggplot(aes(x=Year,y = change_in_Life_Exp_perc, group = Country.Name, color = Country.Name)) +
geom_line() +
theme_minimal() +
theme(legend.position = "none") +
scale_y_continuous(labels = scales::percent) +
facet_wrap( ~Region, scales = "free")
total %>% filter(Region == "Europe & Central Asia") %>% ggplot(aes(x=Year,y = change_in_Life_Exp_perc)) +
geom_line() +
theme_minimal() +
theme(legend.position = "none") +
scale_y_continuous(labels = scales::percent) +
facet_wrap( ~Country.Name, scales = "free")
total %>% ggplot(aes(x=Year,y = change_in_Life_Exp_Total, group = Country.Name, color = Country.Name)) +
geom_line() +
theme_minimal() +
theme(legend.position = "none") +
# scale_y_continuous(labels = scales::percent) +
facet_wrap( ~Region, scales = "free")
total %>% ggplot(aes(x=Year,y = change_in_Life_Exp_perc, group = Country.Name, color = Country.Name)) +
geom_line() +
theme_minimal() +
theme(legend.position = "none") +
scale_y_continuous(labels = scales::percent) +
facet_wrap( ~ Income.Group, scales = "free")
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
context("To/from Python")
test_that("install_pyarrow", {
skip_on_cran()
skip_if_not_dev_mode()
# Python problems on Apple M1 still
skip_if(grepl("arm-apple|aarch64.*darwin", R.Version()$platform))
skip_if_not_installed("reticulate")
venv <- try(reticulate::virtualenv_create("arrow-test"))
# Bail out if virtualenv isn't available
skip_if(inherits(venv, "try-error"))
expect_error(install_pyarrow("arrow-test", nightly = TRUE), NA)
# Set this up for the following tests
reticulate::use_virtualenv("arrow-test")
})
test_that("Array from Python", {
skip_if_no_pyarrow()
pa <- reticulate::import("pyarrow")
py <- pa$array(c(1, 2, 3))
expect_equal(py, Array$create(c(1, 2, 3)))
})
test_that("Array to Python", {
skip_if_no_pyarrow()
pa <- reticulate::import("pyarrow", convert = FALSE)
r <- Array$create(c(1, 2, 3))
py <- pa$concat_arrays(list(r))
expect_s3_class(py, "pyarrow.lib.Array")
expect_equal(reticulate::py_to_r(py), r)
})
test_that("RecordBatch to/from Python", {
skip_if_no_pyarrow()
pa <- reticulate::import("pyarrow", convert = FALSE)
batch <- record_batch(col1 = c(1, 2, 3), col2 = letters[1:3])
py <- reticulate::r_to_py(batch)
expect_s3_class(py, "pyarrow.lib.RecordBatch")
expect_equal(reticulate::py_to_r(py), batch)
})
test_that("Table and ChunkedArray from Python", {
skip_if_no_pyarrow()
pa <- reticulate::import("pyarrow", convert = FALSE)
batch <- record_batch(col1 = c(1, 2, 3), col2 = letters[1:3])
tab <- Table$create(batch, batch)
pybatch <- reticulate::r_to_py(batch)
pytab <- pa$Table$from_batches(list(pybatch, pybatch))
expect_s3_class(pytab, "pyarrow.lib.Table")
expect_s3_class(pytab[0], "pyarrow.lib.ChunkedArray")
expect_equal(reticulate::py_to_r(pytab[0]), tab$col1)
expect_equal(reticulate::py_to_r(pytab), tab)
})
test_that("Table and ChunkedArray to Python", {
skip_if_no_pyarrow()
batch <- record_batch(col1 = c(1, 2, 3), col2 = letters[1:3])
tab <- Table$create(batch, batch)
pychunked <- reticulate::r_to_py(tab$col1)
expect_s3_class(pychunked, "pyarrow.lib.ChunkedArray")
expect_equal(reticulate::py_to_r(pychunked), tab$col1)
pytab <- reticulate::r_to_py(tab)
expect_s3_class(pytab, "pyarrow.lib.Table")
expect_equal(reticulate::py_to_r(pytab), tab)
})
test_that("RecordBatch with metadata roundtrip", {
skip_if_no_pyarrow()
batch <- RecordBatch$create(example_with_times)
pybatch <- reticulate::r_to_py(batch)
expect_s3_class(pybatch, "pyarrow.lib.RecordBatch")
expect_equal(reticulate::py_to_r(pybatch), batch)
expect_identical(as.data.frame(reticulate::py_to_r(pybatch)), example_with_times)
})
test_that("Table with metadata roundtrip", {
skip_if_no_pyarrow()
tab <- Table$create(example_with_times)
pytab <- reticulate::r_to_py(tab)
expect_s3_class(pytab, "pyarrow.lib.Table")
expect_equal(reticulate::py_to_r(pytab), tab)
expect_identical(as.data.frame(reticulate::py_to_r(pytab)), example_with_times)
})
|
/r/tests/testthat/test-python.R
|
permissive
|
abs-tudelft/arrow
|
R
| false
| false
| 3,764
|
r
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
context("To/from Python")
test_that("install_pyarrow", {
skip_on_cran()
skip_if_not_dev_mode()
# Python problems on Apple M1 still
skip_if(grepl("arm-apple|aarch64.*darwin", R.Version()$platform))
skip_if_not_installed("reticulate")
venv <- try(reticulate::virtualenv_create("arrow-test"))
# Bail out if virtualenv isn't available
skip_if(inherits(venv, "try-error"))
expect_error(install_pyarrow("arrow-test", nightly = TRUE), NA)
# Set this up for the following tests
reticulate::use_virtualenv("arrow-test")
})
test_that("Array from Python", {
skip_if_no_pyarrow()
pa <- reticulate::import("pyarrow")
py <- pa$array(c(1, 2, 3))
expect_equal(py, Array$create(c(1, 2, 3)))
})
test_that("Array to Python", {
skip_if_no_pyarrow()
pa <- reticulate::import("pyarrow", convert = FALSE)
r <- Array$create(c(1, 2, 3))
py <- pa$concat_arrays(list(r))
expect_s3_class(py, "pyarrow.lib.Array")
expect_equal(reticulate::py_to_r(py), r)
})
test_that("RecordBatch to/from Python", {
skip_if_no_pyarrow()
pa <- reticulate::import("pyarrow", convert = FALSE)
batch <- record_batch(col1 = c(1, 2, 3), col2 = letters[1:3])
py <- reticulate::r_to_py(batch)
expect_s3_class(py, "pyarrow.lib.RecordBatch")
expect_equal(reticulate::py_to_r(py), batch)
})
test_that("Table and ChunkedArray from Python", {
skip_if_no_pyarrow()
pa <- reticulate::import("pyarrow", convert = FALSE)
batch <- record_batch(col1 = c(1, 2, 3), col2 = letters[1:3])
tab <- Table$create(batch, batch)
pybatch <- reticulate::r_to_py(batch)
pytab <- pa$Table$from_batches(list(pybatch, pybatch))
expect_s3_class(pytab, "pyarrow.lib.Table")
expect_s3_class(pytab[0], "pyarrow.lib.ChunkedArray")
expect_equal(reticulate::py_to_r(pytab[0]), tab$col1)
expect_equal(reticulate::py_to_r(pytab), tab)
})
test_that("Table and ChunkedArray to Python", {
skip_if_no_pyarrow()
batch <- record_batch(col1 = c(1, 2, 3), col2 = letters[1:3])
tab <- Table$create(batch, batch)
pychunked <- reticulate::r_to_py(tab$col1)
expect_s3_class(pychunked, "pyarrow.lib.ChunkedArray")
expect_equal(reticulate::py_to_r(pychunked), tab$col1)
pytab <- reticulate::r_to_py(tab)
expect_s3_class(pytab, "pyarrow.lib.Table")
expect_equal(reticulate::py_to_r(pytab), tab)
})
test_that("RecordBatch with metadata roundtrip", {
skip_if_no_pyarrow()
batch <- RecordBatch$create(example_with_times)
pybatch <- reticulate::r_to_py(batch)
expect_s3_class(pybatch, "pyarrow.lib.RecordBatch")
expect_equal(reticulate::py_to_r(pybatch), batch)
expect_identical(as.data.frame(reticulate::py_to_r(pybatch)), example_with_times)
})
test_that("Table with metadata roundtrip", {
skip_if_no_pyarrow()
tab <- Table$create(example_with_times)
pytab <- reticulate::r_to_py(tab)
expect_s3_class(pytab, "pyarrow.lib.Table")
expect_equal(reticulate::py_to_r(pytab), tab)
expect_identical(as.data.frame(reticulate::py_to_r(pytab)), example_with_times)
})
|
setwd("~/Desktop/git-space/ranalysis/corpus")
library(XML)
library(tidytext)
library(dplyr)
library(stringr)
library(glue)
library(tidyverse)
# stick together the path to the file & 1st file name
hmMarked <- glue("277-words.txt", sep = " ")
words <- data_frame(file = paste0("~/Desktop/git-space/ranalysis/corpus/",
c("277-words.txt"))) %>%
mutate(text = map(file, read_lines)) %>%
unnest() %>%
group_by(file = str_sub(basename(file), 1, -5)) %>%
mutate(line_number = row_number()) %>%
ungroup() %>%
unnest_tokens(word, text)
words_sentiment <- inner_join(words,
get_sentiments("bing")) %>%
count(file, index = round(line_number/ max(line_number) * 100 / 5) * 5, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(net_sentiment = positive - negative)
words_sentiment %>% ggplot(aes(x = index, y = net_sentiment, fill = file)) +
geom_bar(stat = "identity", show.legend = FALSE) +
facet_wrap(~ file) +
scale_x_continuous("Location in the 'Iliad'") +
scale_y_continuous("Bing net Sentiment")
bing_word_counts <- words %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
ungroup()
bing_word_counts %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n, fill = sentiment)) +
geom_col(show.legend = FALSE) +
facet_wrap(~sentiment, scales = "free_y") +
labs(y = "Marked Words in 'The Iliad'",
x = NULL) +
coord_flip()
library(wordcloud)
library(reshape2)
# create a sentiment wordcloud
words %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(max.words = 10000, scale = c(2.5,.5),
random.order = FALSE,
colors = c("red", "blue"))
|
/code/277-sentiment-analysis.R
|
no_license
|
melvillesmarks/ranalysis
|
R
| false
| false
| 1,913
|
r
|
setwd("~/Desktop/git-space/ranalysis/corpus")
library(XML)
library(tidytext)
library(dplyr)
library(stringr)
library(glue)
library(tidyverse)
# stick together the path to the file & 1st file name
hmMarked <- glue("277-words.txt", sep = " ")
words <- data_frame(file = paste0("~/Desktop/git-space/ranalysis/corpus/",
c("277-words.txt"))) %>%
mutate(text = map(file, read_lines)) %>%
unnest() %>%
group_by(file = str_sub(basename(file), 1, -5)) %>%
mutate(line_number = row_number()) %>%
ungroup() %>%
unnest_tokens(word, text)
words_sentiment <- inner_join(words,
get_sentiments("bing")) %>%
count(file, index = round(line_number/ max(line_number) * 100 / 5) * 5, sentiment) %>%
spread(sentiment, n, fill = 0) %>%
mutate(net_sentiment = positive - negative)
words_sentiment %>% ggplot(aes(x = index, y = net_sentiment, fill = file)) +
geom_bar(stat = "identity", show.legend = FALSE) +
facet_wrap(~ file) +
scale_x_continuous("Location in the 'Iliad'") +
scale_y_continuous("Bing net Sentiment")
bing_word_counts <- words %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
ungroup()
bing_word_counts %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n, fill = sentiment)) +
geom_col(show.legend = FALSE) +
facet_wrap(~sentiment, scales = "free_y") +
labs(y = "Marked Words in 'The Iliad'",
x = NULL) +
coord_flip()
library(wordcloud)
library(reshape2)
# create a sentiment wordcloud
words %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(max.words = 10000, scale = c(2.5,.5),
random.order = FALSE,
colors = c("red", "blue"))
|
#' @importFrom assertthat assert_that
NULL
|
/R/imports.R
|
permissive
|
tzakharko/executr
|
R
| false
| false
| 42
|
r
|
#' @importFrom assertthat assert_that
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_home_away.R
\name{model_home_away}
\alias{model_home_away}
\title{Run Home Away model}
\usage{
model_home_away(data, variable)
}
\arguments{
\item{data}{The data frame on which the model is run.
It should come from \code{\link{format_data_PPBstats.data_agro_HA}}
#'}
\item{variable}{variable to analyse}
}
\value{
The function returns a list with three elements :
\itemize{
\item info : a list with variable
\item ANOVA a list with two elements :
\itemize{
\item model
\item anova_model
}
}
}
\description{
\code{model_home_away} runs home away model
}
\details{
Find details in the book \href{https://priviere.github.io/PPBstats_book/family-4.html#family-4}{here}.
}
\seealso{
\itemize{
\item \code{\link{check_model}}
\item \code{\link{check_model.fit_model_home_away}}
}
}
\author{
Pierre Riviere and Gaelle Van Frank and Baptiste Rouger
}
|
/man/model_home_away.Rd
|
no_license
|
gaelleVF/PPBstats-PPBmelange
|
R
| false
| true
| 942
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_home_away.R
\name{model_home_away}
\alias{model_home_away}
\title{Run Home Away model}
\usage{
model_home_away(data, variable)
}
\arguments{
\item{data}{The data frame on which the model is run.
It should come from \code{\link{format_data_PPBstats.data_agro_HA}}
#'}
\item{variable}{variable to analyse}
}
\value{
The function returns a list with three elements :
\itemize{
\item info : a list with variable
\item ANOVA a list with two elements :
\itemize{
\item model
\item anova_model
}
}
}
\description{
\code{model_home_away} runs home away model
}
\details{
Find details in the book \href{https://priviere.github.io/PPBstats_book/family-4.html#family-4}{here}.
}
\seealso{
\itemize{
\item \code{\link{check_model}}
\item \code{\link{check_model.fit_model_home_away}}
}
}
\author{
Pierre Riviere and Gaelle Van Frank and Baptiste Rouger
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/directconnect_operations.R
\name{directconnect_confirm_transit_virtual_interface}
\alias{directconnect_confirm_transit_virtual_interface}
\title{Accepts ownership of a transit virtual interface created by another
Amazon Web Services account}
\usage{
directconnect_confirm_transit_virtual_interface(
virtualInterfaceId,
directConnectGatewayId
)
}
\arguments{
\item{virtualInterfaceId}{[required] The ID of the virtual interface.}
\item{directConnectGatewayId}{[required] The ID of the Direct Connect gateway.}
}
\description{
Accepts ownership of a transit virtual interface created by another Amazon Web Services account.
See \url{https://www.paws-r-sdk.com/docs/directconnect_confirm_transit_virtual_interface/} for full documentation.
}
\keyword{internal}
|
/cran/paws.networking/man/directconnect_confirm_transit_virtual_interface.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 842
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/directconnect_operations.R
\name{directconnect_confirm_transit_virtual_interface}
\alias{directconnect_confirm_transit_virtual_interface}
\title{Accepts ownership of a transit virtual interface created by another
Amazon Web Services account}
\usage{
directconnect_confirm_transit_virtual_interface(
virtualInterfaceId,
directConnectGatewayId
)
}
\arguments{
\item{virtualInterfaceId}{[required] The ID of the virtual interface.}
\item{directConnectGatewayId}{[required] The ID of the Direct Connect gateway.}
}
\description{
Accepts ownership of a transit virtual interface created by another Amazon Web Services account.
See \url{https://www.paws-r-sdk.com/docs/directconnect_confirm_transit_virtual_interface/} for full documentation.
}
\keyword{internal}
|
#' @title get_hipster
#' @description Plots an image of a hipster. What else?
#' @examples
#'# Get a hipster
#'get_hipster()
#' @author Mike McMahon, \email{Mike.McMahon@@dfo-mpo.gc.ca}
#' @export
get_hipster<-function(){
#each query gets 20 results - use the randomizer allow results from any of the first 50 pages of results
randomStart = 20*round(sample(1:50,1)/20)
searchURLs <- c("https://www.google.com/search?q=%22hipster%22+beard+OR+mustache&tbm=isch&safe=active&start="
#, "https://www.bing.com/images/search?q=%22hipster%22&FORM=HDRSC2&safeSearch=Strict&count=20&offset="
)
this = sample(searchURLs,1)
search <- read_html(paste0(this,randomStart))
#Grab all <img> tags, get their "src" attribute, a URL to an image
urls <- search %>% html_nodes("img") %>% html_attr("src")
#do some filtering to remove garbage
urls<-urls[!is.na(urls)]
urls <- urls[grepl("http", urls)]
urls <- urls[nchar(urls)>13]
im <- magick::image_read(urls[sample(1:length(urls),1)])
plot(im)
}
|
/R/get_hipster.R
|
permissive
|
Maritimes/hipsteR
|
R
| false
| false
| 1,010
|
r
|
#' @title get_hipster
#' @description Plots an image of a hipster. What else?
#' @examples
#'# Get a hipster
#'get_hipster()
#' @author Mike McMahon, \email{Mike.McMahon@@dfo-mpo.gc.ca}
#' @export
get_hipster<-function(){
#each query gets 20 results - use the randomizer allow results from any of the first 50 pages of results
randomStart = 20*round(sample(1:50,1)/20)
searchURLs <- c("https://www.google.com/search?q=%22hipster%22+beard+OR+mustache&tbm=isch&safe=active&start="
#, "https://www.bing.com/images/search?q=%22hipster%22&FORM=HDRSC2&safeSearch=Strict&count=20&offset="
)
this = sample(searchURLs,1)
search <- read_html(paste0(this,randomStart))
#Grab all <img> tags, get their "src" attribute, a URL to an image
urls <- search %>% html_nodes("img") %>% html_attr("src")
#do some filtering to remove garbage
urls<-urls[!is.na(urls)]
urls <- urls[grepl("http", urls)]
urls <- urls[nchar(urls)>13]
im <- magick::image_read(urls[sample(1:length(urls),1)])
plot(im)
}
|
library(dplyr)
library(ggplot2)
library(tidyverse)
source('helper.R')
review = read.csv("employee_reviews.csv", stringsAsFactors = FALSE)
# seperate job.title column in to several column since there is many information contained in this column.
review1 = separate(review, job.title, c('employee.status', 'position'), sep = '-')
head(review1)
# create boolean for if the response is from anonymous employee or not
review2 =
review1 %>%
mutate(., position = trimws(position, which = c('both'))) %>%
mutate(is.anonymous = ifelse(position == "Anonymous Employee", TRUE, FALSE))
# change the dates column to dates formate
review3 = review2 %>%
mutate(., dates = trimws(dates, which = c('both'))) %>%
mutate(dates = as.Date(as.character(dates), '%b %d, %Y')) %>%
mutate(year = as.numeric(format(dates,'%Y'))) %>%
mutate(month = as.character(format(dates, '%b')))
# write cleaned dataframe into csv file
write.csv(review3, file='review_data.csv', row.names=F)
|
/app2/data_cleaning.r
|
no_license
|
Capstone-Projects-2020-Fall/jobscanner
|
R
| false
| false
| 1,021
|
r
|
library(dplyr)
library(ggplot2)
library(tidyverse)
source('helper.R')
review = read.csv("employee_reviews.csv", stringsAsFactors = FALSE)
# seperate job.title column in to several column since there is many information contained in this column.
review1 = separate(review, job.title, c('employee.status', 'position'), sep = '-')
head(review1)
# create boolean for if the response is from anonymous employee or not
review2 =
review1 %>%
mutate(., position = trimws(position, which = c('both'))) %>%
mutate(is.anonymous = ifelse(position == "Anonymous Employee", TRUE, FALSE))
# change the dates column to dates formate
review3 = review2 %>%
mutate(., dates = trimws(dates, which = c('both'))) %>%
mutate(dates = as.Date(as.character(dates), '%b %d, %Y')) %>%
mutate(year = as.numeric(format(dates,'%Y'))) %>%
mutate(month = as.character(format(dates, '%b')))
# write cleaned dataframe into csv file
write.csv(review3, file='review_data.csv', row.names=F)
|
#' Get reference model structure
#'
#' Generic function that can be used to create and fetch the reference model structure
#' for all those objects that have this method. All these implementations are wrappers
#' to the \code{\link{init_refmodel}}-function so the returned object has the same type.
#'
#' @name get-refmodel
#'
#' @param object Object based on which the reference model is created. See possible types below.
#' @param ... Arguments passed to the methods.
#'
#' @return An object of type \code{refmodel} (the same type as returned by \link{init_refmodel})
#' that can be passed to all the functions that
#' take the reference fit as the first argument, such as \link{varsel}, \link{cv_varsel}, \link{project},
#' \link[=proj-pred]{proj_predict} and \link[=proj-pred]{proj_linpred}.
#'
#' @examples
#' \donttest{
#' ### Usage with stanreg objects
#' fit <- stan_glm(y~x, binomial())
#' ref <- get_refmodel(fit)
#' print(class(ref))
#'
#' # variable selection, use the already constructed reference model
#' vs <- varsel(ref)
#' # this will first construct the reference model and then execute
#' # exactly the same way as the previous command (the result is identical)
#' vs <- varsel(fit)
#' }
#'
NULL
#' @rdname get-refmodel
#' @export
get_refmodel <- function (object, ...) {
UseMethod("get_refmodel", object)
}
#' @rdname get-refmodel
#' @export
get_refmodel.refmodel <- function(object, ...) {
# if the object is reference model already, then simply return it as is
object
}
#' @rdname get-refmodel
#' @export
get_refmodel.vsel <- function(object, ...) {
# the reference model is stored in vsel-object
object$refmodel
}
#' @rdname get-refmodel
#' @export
get_refmodel.cvsel <- function(object, ...) {
# the reference model is stored in cvsel object
object$refmodel
}
#' @rdname get-refmodel
#' @export
get_refmodel.stanreg <- function(object, ...) {
# the fit is an rstanarm-object
if (!requireNamespace("rstanarm", quietly = TRUE)) {
stop("You need package \"rstanarm\". Please install it.",
call. = FALSE)
}
if ('lmerMod' %in% class(object))
stop('stan_lmer and stan_glmer are not yet supported.')
families <- c('gaussian','binomial','poisson')
if (!(family(object)$family %in% families))
stop(paste0('Only the following families are currently supported:\n',
paste(families, collapse = ', '), '.'))
# fetch the draws
samp <- as.data.frame(object)
ndraws <- nrow(samp)
# data, family and the predictor matrix x
data <- object$data
y_ind <- which(apply(data, 2, function(col) all(col==rstanarm::get_y(object))))
z <- data[,-y_ind,drop=F]
fam <- kl_helpers(family(object))
x <- rstanarm::get_x(object)
rownames(x) <- NULL # ignore the rownames
x <- x[, as.logical(attr(x, 'assign')), drop=F] # drop the column of ones
attr(x, 'assign') <- NULL
y <- unname(rstanarm::get_y(object))
dis <- samp$sigma %ORifNULL% rep(0, ndraws) # TODO: handle other than gaussian likelihoods..
offset <- object$offset %ORifNULL% rep(0, nobs(object))
intercept <- as.logical(attr(object$terms,'intercept') %ORifNULL% 0)
predfun <- function(zt) t(rstanarm::posterior_linpred(object, newdata=data.frame(zt), transform=T, offset=rep(0,nrow(zt))))
wsample <- rep(1/ndraws, ndraws) # equal sample weights by default
wobs <- unname(weights(object)) # observation weights
if (length(wobs)==0) wobs <- rep(1,nrow(z))
# cvfun for k-fold cross-validation
cvfun <- function(folds) {
cvres <- rstanarm::kfold(object, K = max(folds), save_fits = T, folds = folds)
fits <- cvres$fits[,'fit']
lapply(fits, function (fit) {
dis <- as.data.frame(fit)$sigma # NOTE: this works only for Gaussian family
predfun <- function(zt) t(rstanarm::posterior_linpred(fit, newdata=data.frame(zt), transform=T, offset=rep(0,nrow(zt))))
list(predfun=predfun, dis=dis)
})
}
init_refmodel(z=z, y=y, family=fam, x=x, predfun=predfun, dis=dis, offset=offset,
wobs=wobs, wsample=wsample, intercept=intercept, cvfits=NULL, cvfun=cvfun)
}
#' Custom reference model initialization
#'
#' Initializes a structure that can be used as a reference fit for the
#' projective variable selection. This function is provided to allow construction
#' of the reference fit from arbitrary fitted models, because only limited
#' information is needed for the actual projection and variable selection.
#'
#' @param z Predictor matrix of dimension \code{n}-by-\code{dz} containing the training
#' features for the reference model. Rows denote the observations and columns the different features.
#' @param y Vector of length \code{n} giving the target variable values.
#' @param family \link{family} object giving the model family
#' @param x Predictor matrix of dimension \code{n}-by-\code{dx} containing the candidate
#' features for selection (i.e. variables from which to select the submodel). Rows denote
#' the observations and columns the different features. Notice that this can
#' different from \code{z}. If missing, same as \code{z} by default.
#' @param predfun Function that takes a \code{nt}-by-\code{dz} test predictor matrix \code{zt} as an input
#' (\code{nt} = # test points, \code{dz} = number of features in the reference model) and outputs
#' a \code{nt}-by-\code{S} matrix of expected values for the target variable \code{y},
#' each column corresponding to one posterior draw for the parameters in the reference model
#' (the number of draws \code{S} can also be 1). Notice that the output should be computed without
#' any offsets, these are automatically taken into account internally, e.g. in cross-validation.
#' If omitted, then the returned object will be 'data reference', that is, it can be used to compute
#' penalized maximum likelihood solutions such as Lasso (see examples below and in the quickstart vignette.)
#' @param dis Vector of length \code{S} giving the posterior draws for the dispersion parameter
#' in the reference model if there is such a parameter in the model family. For Gaussian
#' observation model this is the noise std \code{sigma}.
#' @param offset Offset to be added to the linear predictor in the projection. (Same as in
#' function \code{glm}.)
#' @param wobs Observation weights. If omitted, equal weights are assumed.
#' @param wsample vector of length \code{S} giving the weights for the posterior draws.
#' If omitted, equal weights are assumed.
#' @param intercept Whether to use intercept. Default is \code{TRUE}.
#' @param cvfun Function for performing K-fold cross-validation. The input is an \code{n}-element
#' vector where each value is an integer between 1 and K denoting the fold for each observation.
#' Should return a list with K elements, each of which is a list with fields \code{predfun} and
#' \code{dis} (if the model has a dispersion parameter) which are defined the same way as the arguments
#' \code{predfun} and \code{dis} above but are computed using only the corresponding subset of the data.
#' More precisely, if \code{cvres} denotes
#' the list returned by \code{cvfun}, then \code{cvres[[k]]$predfun} and \code{cvres[[k]]$dis} must be computed
#' using only data from indices \code{folds != k}, where \code{folds} is the \code{n}-element input for
#' \code{cvfun}. Can be omitted but either \code{cvfun} or \code{cvfits} is needed for K-fold cross-validation
#' for genuine reference models. See example below.
#' @param cvfits A list with K elements, that has the same format as the value returned by \code{cvind} but
#' each element of \code{cvfits} must also contain a field \code{omitted} which indicates the indices that
#' were left out for the corresponding fold. Usually it is easier to specify \code{cvfun} but this can be useful
#' if you have already computed the cross-validation for the reference model and would like to avoid
#' recomputing it. Can be omitted but either \code{cvfun} or \code{cvfits} is needed for K-fold cross-validation
#' for genuine reference models.
#' @param ... Currently ignored.
#'
#' @return An object that can be passed to all the functions that
#' take the reference fit as the first argument, such as \link{varsel}, \link{cv_varsel},
#' \link[=proj-pred]{proj_predict} and \link[=proj-pred]{proj_linpred}.
#'
#' @examples
#' \donttest{
#'
#' # generate some toy data
#' set.seed(1)
#' n <- 100
#' d <- 10
#' x <- matrix(rnorm(n*d), nrow=n, ncol=d)
#' b <- c(c(1,1),rep(0,d-2)) # first two variables are relevant
#' y <- x %*% b + rnorm(n)
#'
#' # fit the model (this uses rstanarm for posterior inference,
#' # but any other tool could also be used)
#' fit <- stan_glm(y~x, family=gaussian(), data=data.frame(x=I(x),y=y))
#' draws <- as.matrix(fit)
#' a <- draws[,1] # intercept
#' b <- draws[,2:(ncol(draws)-1)] # regression coefficients
#' sigma <- draws[,ncol(draws)] # noise std
#'
#' # initialize the reference model structure
#' predfun <- function(xt) t( b %*% t(xt) + a )
#' ref <- init_refmodel(x,y, gaussian(), predfun=predfun, dis=sigma)
#'
#' # variable selection based on the reference model
#' vs <- cv_varsel(ref)
#' varsel_plot(vs)
#'
#'
#' # pass in the original data as 'reference'; this allows us to compute
#' # traditional estimates like Lasso
#' dref <- init_refmodel(x,y,gaussian())
#' lasso <- cv_varsel(dref, method='l1') # lasso
#' varsel_plot(lasso, stat='rmse')
#'
#' }
#'
#' @export
init_refmodel <- function(z, y, family, x=NULL, predfun=NULL, dis=NULL, offset=NULL,
wobs=NULL, wsample=NULL, intercept=TRUE, cvfun=NULL, cvfits=NULL, ...) {
n <- NROW(z)
family <- kl_helpers(family)
if (is.null(x))
x <- z
if (is.null(offset))
offset <- rep(0, n)
# y and the observation weights in a standard form
target <- .get_standard_y(y, wobs, family)
y <- target$y
wobs <- target$weights
if (is.null(predfun)) {
# no prediction function given, so the 'reference model' will simply contain the
# observed data as the fitted values
predmu <- function(z,offset=0) matrix(rep(NA, NROW(z)))
mu <- y
proper_model <- FALSE
} else {
# genuine reference model. add impact of offset to the prediction function
predmu <- function(z,offset=0) family$linkinv( family$linkfun(predfun(z)) + offset )
mu <- predmu(z,offset)
if (NROW(y)!=NROW(mu))
stop(paste0('The number of rows in the output of predfun(z) does not match with the given y;',
'predfun seems to be misspecified.'))
proper_model <- TRUE
}
if (proper_model)
if (.has.dispersion(family) && is.null(dis))
stop(sprintf('Family %s needs a dispersion parameter so you must specify input argument \'dis\'.', family$family))
mu <- unname(as.matrix(mu))
S <- NCOL(mu) # number of samples in the reference model
if (is.null(dis))
dis <- rep(0, S)
if (is.null(wobs))
wobs <- rep(1, n)
if (is.null(wsample))
wsample <- rep(1, S)
if (is.null(intercept))
intercept <- TRUE
wsample <- wsample/sum(wsample)
# compute log-likelihood
if (proper_model)
loglik <- t(family$ll_fun(mu,dis,y,wobs))
else
loglik <- NULL
# figure out column names for the variables
if (!is.null(colnames(x)))
coefnames <- colnames(x)
else
coefnames <- paste0('x',1:ncol(x))
if (!proper_model) {
# this is a dummy definition for cvfun, but it will lead to standard cross-validation
# for datafit reference; see cv_varsel and get_kfold
cvfun <- function(folds) lapply(1:max(folds), function(k) list())
}
refmodel <- list(z=z, x=x, y=y, fam=family, mu=mu, dis=dis, nobs=n, coefnames=coefnames,
offset=offset, wobs=wobs, wsample=wsample, intercept=intercept,
predfun=predmu, loglik=loglik, cvfits=cvfits, cvfun=cvfun)
# define the class of the retuned object to be 'refmodel' and additionally 'datafit'
# if only the observed data was provided and no actual function for predicting test data
class(refmodel) <- 'refmodel'
if (!proper_model)
class(refmodel) <- c(class(refmodel),'datafit')
return(refmodel)
}
#' Predict method for reference model objects
#'
#' Compute the predictions using the reference model, that is, compute the
#' expected value for the next observation, or evaluate the log-predictive
#' density at a given point.
#'
#' @param object The object of class \code{refmodel}.
#' @param znew Matrix of predictor values used in the prediction.
#' @param ynew New (test) target variables. If given, then the log predictive density
#' for the new observations is computed.
#' @param offsetnew Offsets for the new observations. By default a vector of
#' zeros.
#' @param weightsnew Weights for the new observations. For binomial model,
#' corresponds to the number trials per observation. Has effect only if \code{ynew} is specified.
#' By default a vector of ones.
#' @param type Scale on which the predictions are returned. Either 'link' (the latent function
#' value, from -inf to inf) or 'response' (the scale on which the target \code{y} is measured,
#' obtained by taking the inverse-link from the latent value).
#' @param ... Currently ignored.
#'
#' @return Returns either a vector of predictions, or vector of log predictive densities evaluated
#' at \code{ynew} if \code{ynew} is not \code{NULL}.
#' @export
predict.refmodel <- function(object, znew, ynew = NULL, offsetnew = NULL,
weightsnew = NULL, type = 'response', ...) {
if ('datafit' %in% class(object))
stop('Cannot make predictions with data reference only.')
if (is.null(offsetnew)) offsetnew <- rep(0, nrow(znew))
if (is.null(weightsnew)) weightsnew <- rep(1, nrow(znew))
mu <- object$predfun(znew, offsetnew)
if (is.null(ynew)) {
if (type == 'link')
pred <- object$family$linkfun(mu)
else
pred <- mu
# integrate over the samples
if (NCOL(pred) > 1)
pred <- rowMeans(pred)
return(pred)
} else {
# evaluate the log predictive density at the given ynew values
loglik <- object$fam$ll_fun(mu, object$dis, ynew, weightsnew)
S <- ncol(loglik)
lpd <- apply(loglik, 1, log_sum_exp) - log(S)
return(lpd)
}
}
|
/R/refmodel.R
|
no_license
|
yinsenm/projpred
|
R
| false
| false
| 14,065
|
r
|
#' Get reference model structure
#'
#' Generic function that can be used to create and fetch the reference model structure
#' for all those objects that have this method. All these implementations are wrappers
#' to the \code{\link{init_refmodel}}-function so the returned object has the same type.
#'
#' @name get-refmodel
#'
#' @param object Object based on which the reference model is created. See possible types below.
#' @param ... Arguments passed to the methods.
#'
#' @return An object of type \code{refmodel} (the same type as returned by \link{init_refmodel})
#' that can be passed to all the functions that
#' take the reference fit as the first argument, such as \link{varsel}, \link{cv_varsel}, \link{project},
#' \link[=proj-pred]{proj_predict} and \link[=proj-pred]{proj_linpred}.
#'
#' @examples
#' \donttest{
#' ### Usage with stanreg objects
#' fit <- stan_glm(y~x, binomial())
#' ref <- get_refmodel(fit)
#' print(class(ref))
#'
#' # variable selection, use the already constructed reference model
#' vs <- varsel(ref)
#' # this will first construct the reference model and then execute
#' # exactly the same way as the previous command (the result is identical)
#' vs <- varsel(fit)
#' }
#'
NULL
#' @rdname get-refmodel
#' @export
get_refmodel <- function (object, ...) {
UseMethod("get_refmodel", object)
}
#' @rdname get-refmodel
#' @export
get_refmodel.refmodel <- function(object, ...) {
# if the object is reference model already, then simply return it as is
object
}
#' @rdname get-refmodel
#' @export
get_refmodel.vsel <- function(object, ...) {
# the reference model is stored in vsel-object
object$refmodel
}
#' @rdname get-refmodel
#' @export
get_refmodel.cvsel <- function(object, ...) {
# the reference model is stored in cvsel object
object$refmodel
}
#' @rdname get-refmodel
#' @export
get_refmodel.stanreg <- function(object, ...) {
# the fit is an rstanarm-object
if (!requireNamespace("rstanarm", quietly = TRUE)) {
stop("You need package \"rstanarm\". Please install it.",
call. = FALSE)
}
if ('lmerMod' %in% class(object))
stop('stan_lmer and stan_glmer are not yet supported.')
families <- c('gaussian','binomial','poisson')
if (!(family(object)$family %in% families))
stop(paste0('Only the following families are currently supported:\n',
paste(families, collapse = ', '), '.'))
# fetch the draws
samp <- as.data.frame(object)
ndraws <- nrow(samp)
# data, family and the predictor matrix x
data <- object$data
y_ind <- which(apply(data, 2, function(col) all(col==rstanarm::get_y(object))))
z <- data[,-y_ind,drop=F]
fam <- kl_helpers(family(object))
x <- rstanarm::get_x(object)
rownames(x) <- NULL # ignore the rownames
x <- x[, as.logical(attr(x, 'assign')), drop=F] # drop the column of ones
attr(x, 'assign') <- NULL
y <- unname(rstanarm::get_y(object))
dis <- samp$sigma %ORifNULL% rep(0, ndraws) # TODO: handle other than gaussian likelihoods..
offset <- object$offset %ORifNULL% rep(0, nobs(object))
intercept <- as.logical(attr(object$terms,'intercept') %ORifNULL% 0)
predfun <- function(zt) t(rstanarm::posterior_linpred(object, newdata=data.frame(zt), transform=T, offset=rep(0,nrow(zt))))
wsample <- rep(1/ndraws, ndraws) # equal sample weights by default
wobs <- unname(weights(object)) # observation weights
if (length(wobs)==0) wobs <- rep(1,nrow(z))
# cvfun for k-fold cross-validation
cvfun <- function(folds) {
cvres <- rstanarm::kfold(object, K = max(folds), save_fits = T, folds = folds)
fits <- cvres$fits[,'fit']
lapply(fits, function (fit) {
dis <- as.data.frame(fit)$sigma # NOTE: this works only for Gaussian family
predfun <- function(zt) t(rstanarm::posterior_linpred(fit, newdata=data.frame(zt), transform=T, offset=rep(0,nrow(zt))))
list(predfun=predfun, dis=dis)
})
}
init_refmodel(z=z, y=y, family=fam, x=x, predfun=predfun, dis=dis, offset=offset,
wobs=wobs, wsample=wsample, intercept=intercept, cvfits=NULL, cvfun=cvfun)
}
#' Custom reference model initialization
#'
#' Initializes a structure that can be used as a reference fit for the
#' projective variable selection. This function is provided to allow construction
#' of the reference fit from arbitrary fitted models, because only limited
#' information is needed for the actual projection and variable selection.
#'
#' @param z Predictor matrix of dimension \code{n}-by-\code{dz} containing the training
#' features for the reference model. Rows denote the observations and columns the different features.
#' @param y Vector of length \code{n} giving the target variable values.
#' @param family \link{family} object giving the model family
#' @param x Predictor matrix of dimension \code{n}-by-\code{dx} containing the candidate
#' features for selection (i.e. variables from which to select the submodel). Rows denote
#' the observations and columns the different features. Notice that this can
#' different from \code{z}. If missing, same as \code{z} by default.
#' @param predfun Function that takes a \code{nt}-by-\code{dz} test predictor matrix \code{zt} as an input
#' (\code{nt} = # test points, \code{dz} = number of features in the reference model) and outputs
#' a \code{nt}-by-\code{S} matrix of expected values for the target variable \code{y},
#' each column corresponding to one posterior draw for the parameters in the reference model
#' (the number of draws \code{S} can also be 1). Notice that the output should be computed without
#' any offsets, these are automatically taken into account internally, e.g. in cross-validation.
#' If omitted, then the returned object will be 'data reference', that is, it can be used to compute
#' penalized maximum likelihood solutions such as Lasso (see examples below and in the quickstart vignette.)
#' @param dis Vector of length \code{S} giving the posterior draws for the dispersion parameter
#' in the reference model if there is such a parameter in the model family. For Gaussian
#' observation model this is the noise std \code{sigma}.
#' @param offset Offset to be added to the linear predictor in the projection. (Same as in
#' function \code{glm}.)
#' @param wobs Observation weights. If omitted, equal weights are assumed.
#' @param wsample vector of length \code{S} giving the weights for the posterior draws.
#' If omitted, equal weights are assumed.
#' @param intercept Whether to use intercept. Default is \code{TRUE}.
#' @param cvfun Function for performing K-fold cross-validation. The input is an \code{n}-element
#' vector where each value is an integer between 1 and K denoting the fold for each observation.
#' Should return a list with K elements, each of which is a list with fields \code{predfun} and
#' \code{dis} (if the model has a dispersion parameter) which are defined the same way as the arguments
#' \code{predfun} and \code{dis} above but are computed using only the corresponding subset of the data.
#' More precisely, if \code{cvres} denotes
#' the list returned by \code{cvfun}, then \code{cvres[[k]]$predfun} and \code{cvres[[k]]$dis} must be computed
#' using only data from indices \code{folds != k}, where \code{folds} is the \code{n}-element input for
#' \code{cvfun}. Can be omitted but either \code{cvfun} or \code{cvfits} is needed for K-fold cross-validation
#' for genuine reference models. See example below.
#' @param cvfits A list with K elements, that has the same format as the value returned by \code{cvind} but
#' each element of \code{cvfits} must also contain a field \code{omitted} which indicates the indices that
#' were left out for the corresponding fold. Usually it is easier to specify \code{cvfun} but this can be useful
#' if you have already computed the cross-validation for the reference model and would like to avoid
#' recomputing it. Can be omitted but either \code{cvfun} or \code{cvfits} is needed for K-fold cross-validation
#' for genuine reference models.
#' @param ... Currently ignored.
#'
#' @return An object that can be passed to all the functions that
#' take the reference fit as the first argument, such as \link{varsel}, \link{cv_varsel},
#' \link[=proj-pred]{proj_predict} and \link[=proj-pred]{proj_linpred}.
#'
#' @examples
#' \donttest{
#'
#' # generate some toy data
#' set.seed(1)
#' n <- 100
#' d <- 10
#' x <- matrix(rnorm(n*d), nrow=n, ncol=d)
#' b <- c(c(1,1),rep(0,d-2)) # first two variables are relevant
#' y <- x %*% b + rnorm(n)
#'
#' # fit the model (this uses rstanarm for posterior inference,
#' # but any other tool could also be used)
#' fit <- stan_glm(y~x, family=gaussian(), data=data.frame(x=I(x),y=y))
#' draws <- as.matrix(fit)
#' a <- draws[,1] # intercept
#' b <- draws[,2:(ncol(draws)-1)] # regression coefficients
#' sigma <- draws[,ncol(draws)] # noise std
#'
#' # initialize the reference model structure
#' predfun <- function(xt) t( b %*% t(xt) + a )
#' ref <- init_refmodel(x,y, gaussian(), predfun=predfun, dis=sigma)
#'
#' # variable selection based on the reference model
#' vs <- cv_varsel(ref)
#' varsel_plot(vs)
#'
#'
#' # pass in the original data as 'reference'; this allows us to compute
#' # traditional estimates like Lasso
#' dref <- init_refmodel(x,y,gaussian())
#' lasso <- cv_varsel(dref, method='l1') # lasso
#' varsel_plot(lasso, stat='rmse')
#'
#' }
#'
#' @export
init_refmodel <- function(z, y, family, x=NULL, predfun=NULL, dis=NULL, offset=NULL,
wobs=NULL, wsample=NULL, intercept=TRUE, cvfun=NULL, cvfits=NULL, ...) {
n <- NROW(z)
family <- kl_helpers(family)
if (is.null(x))
x <- z
if (is.null(offset))
offset <- rep(0, n)
# y and the observation weights in a standard form
target <- .get_standard_y(y, wobs, family)
y <- target$y
wobs <- target$weights
if (is.null(predfun)) {
# no prediction function given, so the 'reference model' will simply contain the
# observed data as the fitted values
predmu <- function(z,offset=0) matrix(rep(NA, NROW(z)))
mu <- y
proper_model <- FALSE
} else {
# genuine reference model. add impact of offset to the prediction function
predmu <- function(z,offset=0) family$linkinv( family$linkfun(predfun(z)) + offset )
mu <- predmu(z,offset)
if (NROW(y)!=NROW(mu))
stop(paste0('The number of rows in the output of predfun(z) does not match with the given y;',
'predfun seems to be misspecified.'))
proper_model <- TRUE
}
if (proper_model)
if (.has.dispersion(family) && is.null(dis))
stop(sprintf('Family %s needs a dispersion parameter so you must specify input argument \'dis\'.', family$family))
mu <- unname(as.matrix(mu))
S <- NCOL(mu) # number of samples in the reference model
if (is.null(dis))
dis <- rep(0, S)
if (is.null(wobs))
wobs <- rep(1, n)
if (is.null(wsample))
wsample <- rep(1, S)
if (is.null(intercept))
intercept <- TRUE
wsample <- wsample/sum(wsample)
# compute log-likelihood
if (proper_model)
loglik <- t(family$ll_fun(mu,dis,y,wobs))
else
loglik <- NULL
# figure out column names for the variables
if (!is.null(colnames(x)))
coefnames <- colnames(x)
else
coefnames <- paste0('x',1:ncol(x))
if (!proper_model) {
# this is a dummy definition for cvfun, but it will lead to standard cross-validation
# for datafit reference; see cv_varsel and get_kfold
cvfun <- function(folds) lapply(1:max(folds), function(k) list())
}
refmodel <- list(z=z, x=x, y=y, fam=family, mu=mu, dis=dis, nobs=n, coefnames=coefnames,
offset=offset, wobs=wobs, wsample=wsample, intercept=intercept,
predfun=predmu, loglik=loglik, cvfits=cvfits, cvfun=cvfun)
# define the class of the retuned object to be 'refmodel' and additionally 'datafit'
# if only the observed data was provided and no actual function for predicting test data
class(refmodel) <- 'refmodel'
if (!proper_model)
class(refmodel) <- c(class(refmodel),'datafit')
return(refmodel)
}
#' Predict method for reference model objects
#'
#' Compute the predictions using the reference model, that is, compute the
#' expected value for the next observation, or evaluate the log-predictive
#' density at a given point.
#'
#' @param object The object of class \code{refmodel}.
#' @param znew Matrix of predictor values used in the prediction.
#' @param ynew New (test) target variables. If given, then the log predictive density
#' for the new observations is computed.
#' @param offsetnew Offsets for the new observations. By default a vector of
#' zeros.
#' @param weightsnew Weights for the new observations. For binomial model,
#' corresponds to the number trials per observation. Has effect only if \code{ynew} is specified.
#' By default a vector of ones.
#' @param type Scale on which the predictions are returned. Either 'link' (the latent function
#' value, from -inf to inf) or 'response' (the scale on which the target \code{y} is measured,
#' obtained by taking the inverse-link from the latent value).
#' @param ... Currently ignored.
#'
#' @return Returns either a vector of predictions, or vector of log predictive densities evaluated
#' at \code{ynew} if \code{ynew} is not \code{NULL}.
#' @export
predict.refmodel <- function(object, znew, ynew = NULL, offsetnew = NULL,
weightsnew = NULL, type = 'response', ...) {
if ('datafit' %in% class(object))
stop('Cannot make predictions with data reference only.')
if (is.null(offsetnew)) offsetnew <- rep(0, nrow(znew))
if (is.null(weightsnew)) weightsnew <- rep(1, nrow(znew))
mu <- object$predfun(znew, offsetnew)
if (is.null(ynew)) {
if (type == 'link')
pred <- object$family$linkfun(mu)
else
pred <- mu
# integrate over the samples
if (NCOL(pred) > 1)
pred <- rowMeans(pred)
return(pred)
} else {
# evaluate the log predictive density at the given ynew values
loglik <- object$fam$ll_fun(mu, object$dis, ynew, weightsnew)
S <- ncol(loglik)
lpd <- apply(loglik, 1, log_sum_exp) - log(S)
return(lpd)
}
}
|
require(sqldf)
data <- read.csv.sql( file='./household_power_consumption.txt',
sep=";",
sql="select * from file where Date = '1/2/2007' or Date = '2/2/2007'",
header=TRUE)
data$Timestamp = strptime(paste(data$Date, data$Time), format = "%d/%m/%Y %H:%M:%S")
png("plot4.png",width = 480, height = 480)
par(mfrow=c(2,2))
plot(data$Timestamp,data$Global_active_power,type="l",xlab="",ylab="Global Active Power")
plot(data$Timestamp,data$Voltage,type="l",xlab="datetime",ylab="Voltage")
plot(data$Timestamp,data$Sub_metering_1,type="l",xlab="",ylab="Energy sub metering")
lines(data$Timestamp, data$Sub_metering_2, type = "l", col = "red")
lines(data$Timestamp, data$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, bty='n',col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(data$Timestamp,data$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power")
dev.off()
|
/ExploratoryDataAnalysis/project1/plot4.R
|
no_license
|
clauraliu/myrepo
|
R
| false
| false
| 1,022
|
r
|
require(sqldf)
data <- read.csv.sql( file='./household_power_consumption.txt',
sep=";",
sql="select * from file where Date = '1/2/2007' or Date = '2/2/2007'",
header=TRUE)
data$Timestamp = strptime(paste(data$Date, data$Time), format = "%d/%m/%Y %H:%M:%S")
png("plot4.png",width = 480, height = 480)
par(mfrow=c(2,2))
plot(data$Timestamp,data$Global_active_power,type="l",xlab="",ylab="Global Active Power")
plot(data$Timestamp,data$Voltage,type="l",xlab="datetime",ylab="Voltage")
plot(data$Timestamp,data$Sub_metering_1,type="l",xlab="",ylab="Energy sub metering")
lines(data$Timestamp, data$Sub_metering_2, type = "l", col = "red")
lines(data$Timestamp, data$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, bty='n',col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(data$Timestamp,data$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power")
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.