blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M β | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 β | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 β | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
71a02cbc5548f1b94d3153410c8f31acdd39c109 | 59000449dc5950d0bd0c1c41cf3613748ce6d01c | /R/get.loadings.R | 37edf18c770afe54ad474581319923b6f5d0973f | [] | no_license | JMMaronge/medals | ff04cd980304aef4c42431b820f1e3f2d29fc9f4 | 543243e12f386ee3e6b8a8cd8b0b6e998a315e5e | refs/heads/master | 2020-05-21T13:47:35.370634 | 2018-11-13T04:08:34 | 2018-11-13T04:08:34 | 64,878,710 | 3 | 2 | null | 2016-09-24T16:34:18 | 2016-08-03T20:55:17 | HTML | UTF-8 | R | false | false | 394 | r | get.loadings.R | #' A function to create the loadings matrix for MEDALS.
#'
#' This function allows you to create loadings matrix for PC scores in the MEDALS pipeline. T
#' @param cov.mat The crossproduct matrix ($X^TX$) for decomposition to get loadings. Should be the output of imaging.cp.mat()
#' @export
get.loadings <- function(cov.mat){
loadings <- svd(cov.mat, nu = 0)
v = loadings$v
return(v)
}
|
1499a77d6ac1740fbc82a0f184102f2e4ea533cc | ba3f90e83341174a1970706c7ce54c2d570ac828 | /AnalyticsEdge/Complete/Week 2/PS2 - Climate Change/climateChange.R | cea4d5c00f07df775f2cfe81d13908a8689f771f | [] | no_license | sivansasidharan/Edx-References | 1dd9eff813cc560c59e51e70bdb5888a462a022f | 43b36fbd79f57d6ffb50714b580250cb0cf8c214 | refs/heads/master | 2020-03-27T23:30:31.343412 | 2018-09-04T09:40:32 | 2018-09-04T09:40:32 | 147,321,105 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,073 | r | climateChange.R | #!/usr/bin/Rscript
# Loading data
# The file climate_change.csv contains climate data from May 1983 to December 2008.
climate = read.csv("climate_change.csv")
str(climate)
# PROBLEM 1.1 - CREATING OUR FIRST MODEL
# Split the data into a training set, consisting of all the observations up to and including 2006, and a testing set consisting of the remaining years
climateTrain = subset(climate, climate$Year <= 2006)
climateTest = subset(climate, climate$Year > 2006)
# build a linear regression model using all of the independent variables (except Year and Month) to predict the dependent variable Temp. Enter the model R^2
climateReg = lm(Temp ~ MEI + CO2 + CH4 + N2O + CFC.11 + CFC.12 + TSI + Aerosols, data=climateTrain)
# Which variables are significant in the model? We will consider a variable signficant only if the p-value is below 0.05.
summary(climateReg)
# PROBLEM 2.1 - UNDERSTANDING THE MODEL
# Which of the following is the simplest correct explanation for this contradiction?
## Think yourself! :)
# PROBLEM 2.2 - UNDERSTANDING THE MODEL
# Compute the correlations between all the variables in the training set. Which of the following independent variables is N2O highly correlated with (absolute correlation greater than 0.7)?
cor(climateTrain)
# PROBLEM 3 - SIMPLIFYING THE MODEL
# Given that the correlations are so high, let us focus on the N2O variable and build a model with only MEI, TSI, Aerosols and N2O. Remember to use the training set to build the model.
climateReg2 = lm(Temp ~ MEI + N2O + TSI + Aerosols, data=climateTrain)
# Enter the coefficient of N2O in this reduced model:
summary(climateReg2)
# PROBLEM 4 - AUTOMATICALLY BUILDING THE MODEL
# Enter the R^2 value of the model produced by the step function:
stepModel = step(climateReg)
summary(stepModel)
# PROBLEM 5 - TESTING ON UNSEEN DATA
# Enter the testing set R2:
predictTest = predict(stepModel, newdata=climateTest)
summary(predictTest)
# Compute R-squared
SSE = sum((climateTest$Temp - predictTest)^2)
SST = sum((climateTest$Temp - mean(climateTrain$Temp))^2)
1 - SSE/SST |
f90f91e7e2c03e76056e444e18d01949f098e8d6 | a5ced02be5ef57cfc093b9a77fbb71cdb18d9d76 | /misc/man-deprecated/createStaticNASIS.Rd | f1c95933edaf4fcc3c0bd0be7226523717ca2a98 | [] | no_license | Emory-ENVS-SihiLab/soilDB | e882de8337a3f3bd9943046c781f42a473723669 | fca026cc1039f3f8936b70d0efe8c092950db4ee | refs/heads/master | 2023-08-13T16:25:03.372504 | 2021-09-18T00:48:57 | 2021-09-18T00:48:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,262 | rd | createStaticNASIS.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createStaticNASIS.R
\name{createStaticNASIS}
\alias{createStaticNASIS}
\title{Create a memory or file-based instance of NASIS database (for selected tables)}
\usage{
createStaticNASIS(
tables = NULL,
SS = FALSE,
systables = FALSE,
static_path = NULL,
output_path = NULL
)
}
\arguments{
\item{tables}{Character vector of target tables. Default: \code{NULL} is all tables meeting the following criteria.}
\item{SS}{Logical. Include "selected set" tables (ending with suffix \code{"_View1"}). Default: \code{FALSE}}
\item{systables}{Logical. Include "system" tables (starting with prefix \code{"system"}). Default: \code{FALSE}}
\item{static_path}{Optional: path to SQLite database containing NASIS table structure; Default: \code{NULL}}
\item{output_path}{Optional: path to new/existing SQLite database to write tables to. Default: \code{NULL} returns table results as named list.}
}
\value{
A named list of results from calling \code{dbQueryNASIS} for all columns in each NASIS table.
}
\description{
Create a memory or file-based instance of NASIS database (for selected tables)
}
\examples{
\dontrun{
str(createStaticNASIS(tables = c("calculation","formtext")))
}
}
|
c856f6807e644824d48c87948c1d3ab0455b8cf3 | 956f493986a2e4836cd7d5565fb23be636645a24 | /man/CTFDeserializer.Rd | 47a4329769837f64699100990ae3c96a005b335e | [
"MIT"
] | permissive | Bhaskers-Blu-Org2/CNTK-R | d2fcb0eab33a5f6a9134fa20937622b308590b4a | 5e2b8f5320a48dc492fa7dd1654532dd9e3e856a | refs/heads/master | 2021-08-20T10:36:09.764093 | 2017-11-28T22:55:53 | 2017-11-28T22:55:53 | 276,122,540 | 0 | 1 | MIT | 2020-06-30T14:28:11 | 2020-06-30T14:28:10 | null | UTF-8 | R | false | true | 732 | rd | CTFDeserializer.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{CTFDeserializer}
\alias{CTFDeserializer}
\title{CTFDeserializer}
\usage{
CTFDeserializer(filename, streams)
}
\arguments{
\item{filename}{A string containing the path to the data location}
\item{streams}{A python dictionary-like object that contains a mapping from
stream names to StreamDef objects. Each StreamDef object configures an input
stream.}
}
\description{
Configures the CNTK text-format reader that reads text-based files.
}
\details{
Form: `[Sequence_Id] (Sample)+` where `Sample=|Input_Name (Value )*`
}
\references{
See also \url{https://www.cntk.ai/pythondocs/cntk.io.html?highlight=ctfdeserializer#cntk.io.CTFDeserializer}
}
|
9d4f12b0c31fa3e6b2acb467cb47f2ef1ce5aff4 | 9512fa6d10e69e32f6b14e68ec6086d09909f537 | /Health Index/Functions/HIfunctions.r | 98cdcd648a0fcb27417b68b70b41cc2883b71891 | [] | no_license | KiriakosXanthopoulos/BioArchaeology_In_R | 701797c08c79a981f525d858d29dbef85c67ad95 | c09567908f9c11d6075ec7ff83209840a649d4c0 | refs/heads/master | 2021-01-17T21:29:13.918766 | 2016-05-24T17:56:27 | 2016-05-24T17:56:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,555 | r | HIfunctions.r | #Collection of all the functions used in the HealthIndex
#####################################################################################
#1. LovejoyAdj
LovejoyAdj<-function(age,info=FALSE){# age= numeric, info=logical
#info function provides details of the method used and reference
#default value= FALSE
if (info==TRUE){
output<-"Age at death estimations from skeletal remains have been show to be inaccurate.
Spefically there is a tendency to underestimate, particularly in older individuals.
This function adjusts reported age using the method outlined in lovejoy et al. (1985).
The function takes a single numeric input. To apply to dataframe the following steps are required:
Dataframe$Adjusted<-NA
for(i in 1:length(Dataframe$Age)){Dataframe$Adjusted[i]<-LovejoyAdj(Dataframe$Age[i])}"
output<-strwrap(output)
}else{
if(is.na(age)){
output<-NA
}else{
if(age>=18 & age<30){
output<-age-1.4
}else{
if(age>=30 & age<40) {
output<-age+0.8
}else{
if(age>=40 & age<50){
output<-age+7.4
}else{
if(age>=50 & age<60){
output<-age+6.8
}else{
if(age>=60){
output<-age+13.6
}else{output<-age}}}}}}}
output}
#####################################################################################
#2.StatureLogic
StatureLogic<-function(x,A,B){# x = value, A=modern standard, B= 3SD below
if(x>=A){output<-100}# if reaches or exceeds modern standard
else{
if(x<=B){output<-0}# if less than or equal to 3rd SD
else{output<-100-((A-x)/(A-B)*100)}# overwise graded score between 0 and 100
}
output
}
#####################################################################################
#3. Stature
Stature<-function(femur,Age,Sex,Maresh){# reference data, femur length age and sex
if(anyNA(c(Age,femur))|round(Age) %in% 12:17){# age excluded in S&R 11.75-17.75
s<-NA
}else{
if(round(Age)>=18){# adult values
if(Sex==6|is.na(Sex)|Sex==5){s<-StatureLogic(femur,Maresh$UL[Maresh$age==18],Maresh$U3SD[Maresh$age==18])}#unknown sex
if(Sex==1|Sex==2){s<-StatureLogic(femur,Maresh$FL[Maresh$age==18],Maresh$F3SD[Maresh$age==18])}#female
if(Sex==3|Sex==4){s<-StatureLogic(femur,Maresh$ML[Maresh$age==18],Maresh$M3SD[Maresh$age==18])}#male
}else{s<-StatureLogic(femur,Maresh$UL[which.min(abs(Maresh$age-Age))],Maresh$U3SD[which.min(abs(Maresh$age-Age))])}#juvenilles
}
s
}
#####################################################################################
#4. Cat2Percent
Cat2Percent<-function(x,c){# value and number of categories
ref<-data.frame(cat=c(0,1,2,3,4,5),two=c(NA,100,0,NA,NA,NA),three=c(NA,100,50,0,NA,NA),four=c(NA,100,67,33,0,NA),five=c(NA,100,75,50,25,0))
ref[ref$cat==x,c]
}
#####################################################################################
#5.DentalHealth
DentalHealth<-function(SUMTET,SUMPRE,SUMCAV,SUMABS){
x<-c(SUMTET,SUMPRE,SUMCAV,SUMABS)
if(sum(is.na(x))>0|SUMTET+SUMPRE<8){DH<-NA}
else{
completness<-1-((SUMPRE+SUMCAV)/(SUMPRE+SUMTET))
if(SUMABS==0){Abcess<-1}
if(SUMABS==1){Abcess<-0.5}
if(SUMABS>1){Abcess<-0}
DH<-(completness*75)+(Abcess*25)
}
DH
}
#####################################################################################
#6.Fact2NUM
Fact2Num<- function(x){
a<-as.character(x)
a<-as.numeric(a)# will replace . with NA by coercion and all other entries will become numbers
a
}
#####################################################################################
#7.B2DTotal
B2DTotal<-function(D1,D2,D2row,D1column){#D2row & D1 Column= number
D2$contributors[D2row]<-length(D1[!(is.na(D1[,D1column])),D1column])
if(D2$contributors[D2row]==0){D2<-D2}# if no contributors don't change
else{
D2$Age0_4[D2row]<-sum(D1[,D1column],na.rm=TRUE)# everyone who can contributes to the first age period
D2$Age5_14[D2row]<-sum(D1[D1$B2D %in% 2:6,D1column],na.rm=TRUE)#everyone not dead
D2$Age15_24[D2row]<-sum(D1[D1$B2D %in% 3:6,D1column],na.rm=TRUE)
D2$Age25_34[D2row]<-sum(D1[D1$B2D %in% 4:6,D1column],na.rm=TRUE)
D2$Age35_44[D2row]<-sum(D1[D1$B2D %in% 5:6,D1column],na.rm=TRUE)
D2$Age45[D2row]<-sum(D1[D1$B2D==6,D1column],na.rm=TRUE)
}
D2
}
#####################################################################################
#8.TenTotal
TenTotal<-function(D1,D2,D2row,D1column){
D2$contributors[D2row]<-length(D1[!(is.na(D1[,D1column])),D1column])
D1<- D1[!(is.na(D1[,D1column])),]
if(D2$contributors[D2row]==0){D2<-D2# if no contributors don't change
}else{
D2$Age0_4[D2row]<-(100*D2$contributors[D2row])-(100*length(D1[D1$Ten %in% c(01,12),D1column]))+sum(D1[D1$Ten %in% c(01,10),D1column])
D2$Age5_14[D2row]<-(100*D2$contributors[D2row])-(100*length(D1[D1$Ten %in% c(01,12,23),D1column]))+sum(D1[D1$Ten %in% c(12,23),D1column])
D2$Age15_24[D2row]<-(100*D2$contributors[D2row])-(100*length(D1[D1$Ten %in% c(01,12,23,34),D1column]))+sum(D1[D1$Ten %in% c(23,34),D1column])
D2$Age25_34[D2row]<-(100*D2$contributors[D2row])-(100*length(D1[D1$Ten %in% c(01,12,23,34,45),D1column]))+sum(D1[D1$Ten %in% c(34,45),D1column])
D2$Age35_44[D2row]<-(100*D2$contributors[D2row])-(100*length(D1[D1$Ten %in% c(01,12,23,34,45,56),D1column]))+sum(D1[D1$Ten %in% c(45,56),D1column])
D2$Age45[D2row]<-sum(D1[D1$Ten %in% c(56,60),D1column])
}
D2
}
#####################################################################################
#9.TenTotal2
TenTotal2<-function(D1,D2,D2row,D1column){
D2$contributors[D2row]<-length(D1[!(is.na(D1[,D1column])),D1column])
D1<- D1[!(is.na(D1[,D1column])),]
if(D2$contributors[D2row]==0){D2<-D2
}else{
D2$Age0_4[D2row]<-(100*D2$contributors[D2row])#rm contributors, i.e those whose 10 years include this period
D2$Age5_14[D2row]<-(100*D2$contributors[D2row])-(100*length(D1[D1$Ten== 01,D1column]))# rm contributors
D2$Age15_24[D2row]<-(100*D2$contributors[D2row])-(100*length(D1[D1$Ten %in% c(01,12,23,34),D1column]))+sum(D1[D1$Ten %in% c(23,34),D1column])
D2$Age25_34[D2row]<-(100*D2$contributors[D2row])-(100*length(D1[D1$Ten %in% c(01,12,23,34,45),D1column]))+sum(D1[D1$Ten %in% c(34,45),D1column])
D2$Age35_44[D2row]<-(100*D2$contributors[D2row])-(100*length(D1[D1$Ten %in% c(01,12,23,34,45,56),D1column]))+sum(D1[D1$Ten %in% c(45,56),D1column])
D2$Age45[D2row]<-sum(D1[D1$Ten %in% c(56,60),D1column])
}
D2
}
#####################################################################################
#10. PYL
PYL<-function(D1){# uses info in D1 database to calculate the person years live in each age category
pyl<-data.frame(age=c("0_4","5_14","15_24","25_34","35_44","45+"),pyl=NA,Averagepyl=NA)
#formula for working out pyl= (number of years in period x number of people who survived)+(the years they lived during that period by those who died in that period)
pyl$pyl[1]<-(4*length(D1$Age[D1$B2D!=1]))+sum(D1$Age[D1$B2D==1])# 0-4
pyl$pyl[2]<-(10*length(D1$Age[D1$B2D %in% 3:6]))+sum((D1$Age[D1$B2D==2]-4))#5-14 (-4= years before this period)
pyl$pyl[3]<-(10*length(D1$Age[D1$B2D %in% 4:6]))+sum((D1$Age[D1$B2D==3]-14))
pyl$pyl[4]<-(10*length(D1$Age[D1$B2D %in% 5:6]))+sum((D1$Age[D1$B2D==4]-24))
pyl$pyl[5]<-(10*length(D1$Age[D1$B2D %in% 6]))+sum((D1$Age[D1$B2D==5]-34))
pyl$pyl[6]<-sum((D1$Age[D1$B2D==6]-44))# no survivors everyone who isn't already dead must die during this period
pyl$Averagepyl<-pyl$pyl/length(D1$Age)# number of years lived in the category by the average person
pyl
}
#####################################################################################
|
f0fbc516b4019b7dd9d1c2c0bdc161c37223102c | 82116dcffc5fc59f80f143ca7b99902aa02a2e3e | /man/getSkylineSubset.Rd | 7b8896cf4b812bfea3c0747741db922fa78133e5 | [] | no_license | laduplessis/bdskytools | e4c6d719d07f439f284ab10dc2076c467ab8164a | c474052357f136080de9fb998c3a6447a0a28088 | refs/heads/master | 2021-01-17T20:32:09.107012 | 2018-11-28T16:28:25 | 2018-11-28T16:28:25 | 62,892,428 | 11 | 2 | null | null | null | null | UTF-8 | R | false | true | 361 | rd | getSkylineSubset.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Logfile_Utilities.R
\name{getSkylineSubset}
\alias{getSkylineSubset}
\title{Extract all matching parameters from the logfile}
\usage{
getSkylineSubset(logfile, par)
}
\description{
if par="R0" extract (R0s.1 R0s.2 R0s.3 etc.)
}
\details{
par needs to be at the start of the string
}
|
571daa37182e28f1c6c7c463892cd605dd911ca2 | 3de0f7ed52c248f04f1308573eec012fa76cc82b | /data/Scripts/config.r | aa4530e35b458a99722b4bacc3e5348c9949ab1c | [] | no_license | ozway/FinalLog | e3f684cf4b1f33250a023255ad286a20be86a637 | f18ed3d29d96c30e64bc9b45dee3dc2aeb20f9a6 | refs/heads/master | 2020-06-01T14:35:20.706408 | 2015-02-12T22:08:18 | 2015-02-12T22:08:18 | 30,726,138 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,778 | r | config.r | print.config <- function(config)
{
cat("MCMC parameters:\n")
cat(paste("Number of samples between checks:", config$n.samples, "\n"))
cat(paste("Min samples:", config$min.samples, "\n"))
cat(paste("Max samples:", config$max.samples, "\n"))
cat(paste("Reset QR until:", config$reset.qr, "samples\n"))
cat(paste("Thining: store every", config$chain.thin, "iteration as sample\n"))
cat(paste("Swap", config$swap * 100, "% of b matrix\n"))
cat(paste("Swap b matrix if L1-norm is <", config$swapAt, "\n"))
cat("\n")
cat("Simulation parameters\n")
cat(paste("Number of Cores", config$n.cores, "\n"))
cat(paste("Number of Chains:", config$n.chains, "\n"))
cat(paste("Parallel mode within chain:", config$parallel, "\n"))
cat(paste("Samples used:", config$use.n.samples, "\n"))
cat(paste("First", config$rm.first.aa, " AAs removed from sequence\n"))
cat(paste("Sequences with less than", config$rm.short, "AAs ignored\n"))
cat("List of AAs taken into account:\n\t");cat(config$aa);cat("\n")
cat("\n")
cat("Convergence criteria\n")
if(config$n.chains < 2)
{
cat("Convergence test: Geweke\n")
cat(paste("Convergence criterium: Z Score <", config$eps, "\n"))
cat(paste("% chain used at the begining:", config$frac1, "\n"))
cat(paste("% chain used at the end:", config$frac2, "\n"))
}else{
cat("Convergence test: Gelman & Rubin\n")
cat(paste("Convergence criterium: Gelman Score <", config$eps, "\n"))
}
cat(paste("Use every", config$conv.thin, "sample for convergence test\n"))
cat("\n")
}
NUMBER <- 6000
config <- list(
delta.a_12 = 0,
a_2 = 1,
n.samples = NUMBER, # number of samples between convergence checks.
#Set equal to max/min samples to avoid resetting the scale
use.n.samples = NUMBER/5, #sample size for testing for convergence.
# n.samples = 1000, # number of samples between convergence checks.
# use.n.samples = 1000, #sample size for testing for convergence.
#If convergence threshold, set in eps, is reached the sample size of our posteriors equals this value.
#IMPORTANT: Make sure this one is smaller than the thined chain, otherwise saving will crash!
n.chains = 1, # num chains
n.cores = 1, # total num of cpu cores (should be about 5*n.chains when using parallel method other then "lapply")
selected.env = 1, # deprecated (us if more than one dataset is stored in csv, e.g. different conditions)
min.samples=NUMBER, # minimum samples each chain has to do, convergence criterium is ignored until min.iter is reached
max.samples= NUMBER, # maximum samples for each chain. MCMC will be stoped if the chain reached max.iter iterations (convergence criterium is ignored)
reset.qr=0, # stop resetting qr matrix when checking for convergence after this many samples (after thining)
conv.thin=1, # thining for convergence test (recommend 1 if chain.thin != 1, otherwise double thining)
chain.thin=10, # thining of the chain during runtime. This is done before gathering convergence test sample. See note for conv.thin.
rm.first.aa=0, # remove first rm.first.aa AAs (after the first codon which is expected to be the start codon)
rm.short=0, # # ignore sequences with length < rm.short AAs after the first rm.first.aa AAs are removed
parallel="lapply", # parallel method within chain
# lapply = no parallelization within chain)
# mclapply = parallelization within chain.
# Other options are also possible.
eps=0.15, # Convergence threshold.
# Multichain MCMC uses with Gelman test where threshold is |Gelman Score - 1| < eps
# Single chain MCMC uses Geweke test where Geweke score < eps.
gf=0.4, ## growthfactor for convergence test window (n.samples + gf*currSamples)
frac1=0.1, # Used with single chain MCMC. Part of Geweke test. Value is proportion of chain at the beginning of the chain (use.n.iter window) used to calculate mean for convergence test
frac2=0.5, # Used with single chain MCMC. Part of Geweke test. Value is proportion of chain at the end of the chain (use.n.iter window) used to calculate mean for convergence test
swap=0.0, # proportion of b matrix (deltat, logmu) swapped. Which parameters are swapped is random
swapAt=0.0, # manhattan distance between two consequtive convergence test below which b matrix is swapped
#aa = c("D", "E", "F", "G", "H", "I", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "W", "Y", "Z"),
#aa = c("A", "V"),
aa = c("A", "C", "D", "E", "F", "G", "H", "I", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "V", "W", "Y", "Z"), # AAs to take into account
use.scuo = T # false means empirical data is used as initial conditions
)
rm(NUMBER)
|
8cb59f469dd81a86bceb43f69b08a1246c5bd568 | f89ffc588173d602176706659bf0529e52f0ed8a | /Week7_Exercises.R | 0fc6ee1221d7930a19cb00a57c7949ecdca7f665 | [] | no_license | Viveniac/Advanced-R | cc7980d1629fe24510ff11ef7cd6a8d566f7b1af | ba0148478500687b9f4e958ff274e9347b3a91e2 | refs/heads/main | 2023-06-08T02:54:56.085732 | 2021-06-23T14:18:15 | 2021-06-23T14:18:15 | 379,626,129 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 403 | r | Week7_Exercises.R | # Week 7 Exercises #
## EX 1 --------------------------------------------------------------
# See if you can combine the gganimate and ggmap packages to create an animated map
# of violent crimes (from Week 7 material).
# Start with the point version of the data.
## EX 2 --------------------------------------------------------------
# Which of the offenses has the smallest number of points?
|
ff1a1c35ca484815e998adf7ff817c35c9a29ac0 | 17e0b4e4c0fddaa71ce2b137b7f59d17fa47243b | /res/and_zpx.r | 10e6088e508a33a8d2e3f21331d9202e50c836b9 | [
"MIT"
] | permissive | JSpuri/EmuParadise | 6f6d26c43d9dce8f05448b6c07db133d691e39b2 | b8f6cf8823f8553f28dab5c6b44df20978ad6ba0 | refs/heads/master | 2020-06-28T18:33:56.341244 | 2019-11-22T22:49:53 | 2019-11-22T22:49:53 | 200,309,043 | 0 | 0 | MIT | 2019-09-27T15:59:31 | 2019-08-02T23:26:20 | C | UTF-8 | R | false | false | 1,928 | r | and_zpx.r | | pc = 0xc002 | a = 0x00 | x = 0x02 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc004 | a = 0x00 | x = 0x02 | y = 0x0a | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc006 | a = 0x00 | x = 0x02 | y = 0x0a | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0015] = 0x0a |
| pc = 0xc008 | a = 0x05 | x = 0x02 | y = 0x0a | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00a | a = 0x00 | x = 0x02 | y = 0x0a | sp = 0x01fd | p[NV-BDIZC] = 00110110 | MEM[0x0015] = 0x0a |
| pc = 0xc00c | a = 0x00 | x = 0x03 | y = 0x0a | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00e | a = 0x00 | x = 0x03 | y = 0x0f | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc010 | a = 0x00 | x = 0x03 | y = 0x0f | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0015] = 0x0f |
| pc = 0xc012 | a = 0x05 | x = 0x03 | y = 0x0f | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc014 | a = 0x05 | x = 0x03 | y = 0x0f | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0015] = 0x0f |
| pc = 0xc016 | a = 0x05 | x = 0x01 | y = 0x0f | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc018 | a = 0x05 | x = 0x01 | y = 0x0e | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc01a | a = 0x05 | x = 0x01 | y = 0x0e | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0015] = 0x0e |
| pc = 0xc01c | a = 0x03 | x = 0x01 | y = 0x0e | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc01e | a = 0x02 | x = 0x01 | y = 0x0e | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0015] = 0x0e |
| pc = 0xc020 | a = 0x02 | x = 0xff | y = 0x0e | sp = 0x01fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc022 | a = 0x02 | x = 0xff | y = 0xc8 | sp = 0x01fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc024 | a = 0x02 | x = 0xff | y = 0xc8 | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x0015] = 0xc8 |
| pc = 0xc026 | a = 0xca | x = 0xff | y = 0xc8 | sp = 0x01fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc028 | a = 0xc8 | x = 0xff | y = 0xc8 | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x0015] = 0xc8 |
|
aaa9959027da66c4a243eab3fa03746ed5055b14 | e1c1e6a06250330968fe40ce6a1a8d7ac9ad1649 | /RScript/Run.R | 811d3754d400d45aa557b16c557e6c67e6c48cc8 | [] | no_license | nathanjenx/data_Mining | 778c0cf50ac0d4961e6860f6f9cfcf6b81026728 | 3dbfd7def03c9cebca98ea63f5a1f088c33b59c2 | refs/heads/master | 2020-04-02T22:48:03.934541 | 2018-10-28T15:53:25 | 2018-10-28T15:53:25 | 154,844,908 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,381 | r | Run.R | source("~/Documents/Uni/Data Mining/Project/RScript/Data.R")
source("~/Documents/Uni/Data Mining/Project/RScript/Analysis.R")
trainingData <- importTrainingData("~/Documents/Uni/Data Mining/Project/Original Data Download/all/train.csv")
mixes <- getMixes(trainingData)
#testData <- importTestData("~/Documents/Uni/Data Mining/Project/Original Data Download/all/test.csv")
testData <- importTrainingData("~/Documents/Uni/Data Mining/Project/Extra Data/testInstrumentCol.csv")
extraTrainingData <- importTrainingData("~/Documents/Uni/Data Mining/Project/Extra Data/singleInstrumentsTrain.csv")
instruments <- getDistinctInstruments(extraTrainingData)
getByInstrument <- function(){
inst <- getInstruments(trainingData, mixes, "Accordian")
}
# plotTrueMatrices(inst, "Accordian") ----- Too many data points to run pairs on
getNbmAccuracy <- function(){
singleTrack <- getIndividualTracks(trainingData)
cleanedMixes <- mixes[!mixes %in% c("?")]
accuracys <- getAccuracyForAllnbmV1(singleTrack, cleanedMixes)
return(accuracys)
}
mixes2 <- function(){
rows <- c("Instrument", "Class1")
mixes <- data.frame(row.names = rows)
df <- data.frame(row.names = rows)
df <- data.frame(Instrument = "Accordian", Class1 = "aerophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "AcousticBass", Class1 = "chordophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "AltoSaxophone", Class1 = "aerophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "B-flatclarinet", Class1 = "aerophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "B-FlatTrumpet", Class1 = "aerophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "BaritoneSaxophone", Class1 = "aerophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "BassSaxophone", Class1 = "aerophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "Cello", Class1 = "chordophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "CTrumpet", Class1 = "aerophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "DoubleBass", Class1 = "chordophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "DTrumpet", Class1 = "aerophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "ElectricGuitar", Class1 = "chordophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "Marimba", Class1 = "precussion")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "Oboe", Class1 = "aerophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "Piano", Class1 = "chordophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "SopranoSaxophone", Class1 = "aerophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "TenorSaxophone", Class1 = "aerophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "TenorTrombone", Class1 = "aerophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "Tuba", Class1 = "aerophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "Vibraphone", Class1 = "idiophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "Viola", Class1 = "chordophone")
mixes <- rbind(mixes, df)
df <- data.frame(Instrument = "Violin", Class1 = "chordophone")
mixes <- rbind(mixes, df)
}
mapClasses <- function(){
rows <- c("Class1", "Class2")
classes <- data.frame(row.names = rows)
df <- data.frame(Class1 = "chordophone", Class2 = "chrd_simple")
classes <- rbind(classes, df)
df <- data.frame(Class1 = "chordophone", Class2 = "chrd_composite")
classes <- rbind(classes, df)
df <- data.frame(Class1 = "aerophone", Class2 = "aero_lip-vibrated")
classes <- rbind(classes, df)
df <- data.frame(Class1 = "aerophone", Class2 = "aero_side")
classes <- rbind(classes, df)
df <- data.frame(Class1 = "aerophone", Class2 = "aero_single-reed")
classes <- rbind(classes, df)
df <- data.frame(Class1 = "aerophone", Class2 = "aero_double-reed")
classes <- rbind(classes, df)
df <- data.frame(Class1 = "aerophone", Class2 = "aero_free-reed")
classes <- rbind(classes, df)
}
mapInst2Class <- function(){
names <- c("Instrument", "Class2")
inst2Class <- data.frame(row.names = names)
inst2Class <- map2Df(names, "Accordian", "aero_free-reed", inst2Class)
inst2Class <- map2Df(names, "AcousticBass", "chrd_composite", inst2Class)
inst2Class <- map2Df(names, "AltoSaxophone", "aero_single-reed", inst2Class)
inst2Class <- map2Df(names, "B-flatclarinet", "aero_single-reed", inst2Class)
inst2Class <- map2Df(names, "B-FlatTrumpet", "aero_lip-vibrated", inst2Class)
inst2Class <- map2Df(names, "BaritoneSaxophone", "aero_single-reed", inst2Class)
inst2Class <- map2Df(names, "BassSaxophone", "aero_single-reed", inst2Class)
inst2Class <- map2Df(names, "Cello", "chrd_composite", inst2Class)
inst2Class <- map2Df(names, "CTrumpet", "aero_lip-vibrated", inst2Class)
inst2Class <- map2Df(names, "DoubleBass", "chrd_composite", inst2Class)
inst2Class <- map2Df(names, "DTrumpet", "aero_lip-vibrated", inst2Class)
inst2Class <- map2Df(names, "ElectricGuitar", "chrd_composite", inst2Class)
#inst2Class <- map2Df(names, "Marimba", "precussion", inst2Class)
inst2Class <- map2Df(names, "Oboe", "aero_double-reed", inst2Class)
inst2Class <- map2Df(names, "Piano", "chrd_simple", inst2Class)
inst2Class <- map2Df(names, "SopranoSaxophone", "aero_single-reed", inst2Class)
inst2Class <- map2Df(names, "TenorSaxophone", "aero_single-reed", inst2Class)
inst2Class <- map2Df(names, "TenorTrombone", "aero_side", inst2Class)
inst2Class <- map2Df(names, "Tuba", "aero_lip-vibrated", inst2Class)
#inst2Class <- map2Df(names, "Vibraphone", "idiophone", inst2Class)
inst2Class <- map2Df(names, "Viola", "chrd_composite", inst2Class)
inst2Class <- map2Df(names, "Violin", "chrd_composite", inst2Class)
return(inst2Class)
}
map2Df <- function (names, d1, d2, outDf){
df <- data.frame(d1, d2)
names(df) <- names
return(rbind(outDf, df))
}
generatePredictionsFromNbmModel <- function(){
predictionsFull <- geNBMModels(extraTrainingData, testData, instruments)
pClean <- predicitionsCleansed(testData, predictionsFull)
prediction <- generatePredictions(pClean)
write_csv(prediction, "~/Desktop/rPredictAll2.csv")
} |
0506941b3c01eca4a5e3d238eb77d0e36cb3002f | 3ed202cbe6aa396223fcdbdd6031608d01acb1db | /yo_2019_calculate_weighted_points.R | be97f2c1d6d102e1fefd3900b6caa0487902a4d1 | [] | no_license | vkarhula/Data_Analysis | 7e79fef72439ff9ec015d2d64ecd2ab8c4d498ce | d94bae2fdff86d968a5fce3045ebb77dbea86e06 | refs/heads/master | 2022-09-25T05:07:18.520512 | 2020-06-06T18:21:15 | 2020-06-06T18:21:15 | 270,046,764 | 0 | 0 | null | 2020-06-06T18:18:00 | 2020-06-06T17:03:41 | null | UTF-8 | R | false | false | 7,379 | r | yo_2019_calculate_weighted_points.R | #########################################################################################
#
# This code calculates weighted points for medical faculty entrance exam for all students,
# who have grade in chemistry and biology.
# Data is sorted and stored in data frame, which is written in csv file.
# Some visualization implemented, further in Power BI.
#
# Code written by Virpi Karhula, 11st of May, 2020.
#
#########################################################################################
#--------------------- Reading data from csv file ---------------------------------------
# Read in grades of students from spring 2019
# Data file available in https://www.ylioppilastutkinto.fi/ext/data/FT2019KD4001.csv
yo2019k <- FT2019KD4001
#--------------------- Calculating weighted points, functions --------------------------------------
# Function retuns grade value weighted with corresponting coefficient for medical faculty
# grade is student's grade, 1 means I, 2 is A, 3 is B,... 7 is L
# co1 equals to i, co2 to A, co3 to B,... co7 to L
add_weighted_grade <- function(grade, co1, co2, co3, co4, co5, co6, co7){
weighted_grade_value <- 0
switch(grade,
"1" = weighted_grade_value <- co1,
"2" = weighted_grade_value <- co2,
"3" = weighted_grade_value <- co3,
"4" = weighted_grade_value <- co4,
"5" = weighted_grade_value <- co5,
"6" = weighted_grade_value <- co6,
"7" = weighted_grade_value <- co7
)
# print(weighted_grade_value)
return(weighted_grade_value)
}
# Testing
# add_weighted_grade(3,1,2,3,4,5,6,7) # testing
# Function calculates weighted points for one student
# Most important grades for medical faculty are included in calculations in this demo
# Max six subjects may be taken into account in calculations
# Note: Not all subjects have been included for calcultations,
# because limitations in data cause bigger errors than this
# Coefficients for grades can be found in opintopolku:
# https://opintopolku.fi/wp/opo/korkeakoulujen-haku/mika-korkeakoulujen-opiskelijavalinnoissa-muuttuu-vuoteen-2020-menessa/yliopistojen-todistusvalinnat-2020/
calculate_weighted_points <- function(index){
sum <- 0
sum_weighted <- 0
class(sum)
# A [,8], mother tongue, Finnish
# print(yo2019k[index,8])
if (!is.na(yo2019k[index,8])) {
# sum <- sum + yo2019k[index,8]
sum_weighted <- sum_weighted + add_weighted_grade(yo2019k[index,8], 0, 5.5, 11.0, 16.5, 22.0, 27.5, 33.0)
}
# O [,9], mother tongue, Swedish
if (!is.na(yo2019k[index,9])) {
sum_weighted <- sum_weighted + add_weighted_grade(yo2019k[index,9], 0, 5.5, 11.0, 16.5, 22.0, 27.5, 33.0)
}
# M [,16], mathematics, long
if (!is.na(yo2019k[index,16])) {
sum_weighted <- sum_weighted + add_weighted_grade(yo2019k[index,16], 0, 6.6, 13.2, 19.8, 26.4, 33.1, 39.7)
}
# N [,17], mathematics, short
if (!is.na(yo2019k[index,17])) {
sum_weighted <- sum_weighted + add_weighted_grade(yo2019k[index,17], 0, 4.7, 9.4, 14.1, 18.9, 23.6, 28.3)
}
# KE [,27], chemistry
if (!is.na(yo2019k[index,27])) {
sum_weighted <- sum_weighted + add_weighted_grade(yo2019k[index,27], 0, 5.7, 11.3, 17.0, 22.7, 28.3, 34.0)
}
# BI [,18], biology
if (!is.na(yo2019k[index,18])) {
sum_weighted <- sum_weighted + add_weighted_grade(yo2019k[index,18], 0, 5.4, 10.8, 16.2, 21.5, 26.9, 32.3)
}
# Two best points from the following three subjects are selected
max_three <- c()
#length(max_three)
# FY [,20], physics
if (!is.na(yo2019k[index,20])) {
max_three[length(max_three)+1] <- add_weighted_grade(yo2019k[index,20], 0, 5.3, 10.6, 15.9, 21.2, 26.5, 31.7)
}
# PS [,22], psychologia
if (!is.na(yo2019k[index,22])) {
max_three[length(max_three)+1] <- add_weighted_grade(yo2019k[index,22], 0, 4.1, 8.2, 12.3, 16.4, 20.5, 24.6)
}
# EA [34], long language, english
if (!is.na(yo2019k[index,34])) {
max_three[length(max_three)+1] <- add_weighted_grade(yo2019k[index,34], 0, 4.7, 9.4, 14.1, 18.9, 23.6, 28.3)
}
# Testing
# Third subject added for testing, because selected student has only two values
# max_three[length(max_three)+1] <- 33.3
# print(max_three)
sum_two_values <- 0
if(length(max_three) == 0) {sum_two_values <- 0}
if(length(max_three) == 1) {sum_two_values <- max_three[1]}
if(length(max_three) == 2) {sum_two_values <- max_three[1] + max_three[2]}
# Select two best values of (physist, pshycology and language)
if (length(max_three) == 3) {
sum[1] <- c(max_three[1] + max_three[2])
sum[2] <- c(max_three[2] + max_three[3])
sum[3] <- c(max_three[1] + max_three[1])
# print(sum)
# print(max(sum))
sum_two_values <- max(sum)
# print(sum_two_values)
}
# Add max two selected values to sum_weighted
sum_weighted <- sum_weighted + sum_two_values
return(sum_weighted)
}
# Test case
# print(calculate_weighted_points(25539)) #25540 index in table view
# result is 124
student_weighted <- as.double(calculate_weighted_points(25539))
#########################################################################################
# Calculate weighted points for students who have grade in chemistry and biology
# Variable Points_weighted_ke_bi is used to store weighted points for medical faculty
#########################################################################################
Points_weighted_ke_bi <- c()
count_ke_bi <- 0
print(paste("size:",nrow(yo2019k)))
for (i in 1:nrow(yo2019k)) {
if ((!is.na(yo2019k[i,27])) & (!is.na(yo2019k[i,18]))) {
count_ke_bi <- count_ke_bi + 1
# Calculate point using weighted coefficients for medical faculty
Points_weighted_ke_bi[count_ke_bi] <- calculate_weighted_points(i)
# print(paste("Points_weighted are: ", calculate_weighted_points(i), "for student index",i))
}
}
length(Points_weighted_ke_bi)
count_ke_bi
head(Points_weighted_ke_bi)
#----------- Sorting points and writing results to csv file -----------------------------
# Sort results from smallest to biggest
ordered_ke_bi <- sort(Points_weighted_ke_bi)
plot(ordered_ke_bi)
mean(ordered_ke_bi)
head(ordered_ke_bi, n=10)
tail(ordered_ke_bi, n=382)
# write to csv file (ordered values in one column)
write.table(ordered_ke_bi, file="ordered_ke_bi.csv", sep = ",", row.names = F)
# Set index and ke&bi points to data frame
length(ordered_ke_bi)
ind <- 1:length(ordered_ke_bi)
ordered_ke_bi_data_frame <- data.frame(ind, ordered_ke_bi)
head(ordered_ke_bi_data_frame)
# write to csv file (index and value columns)
write.table(ordered_ke_bi_data_frame, file="ordered_ke_bi_data_frame.csv", sep = ",", row.names = F)
#------------- Visualization ------------------------------------------------------------
# Weighted points for medical faculty for students, who wrote chemistry and biology
# Make histogram of ordered results
histogram_ke_bi <- hist(ordered_ke_bi, breaks = 60)
mean(ordered_ke_bi)
d <- density(ordered_ke_bi)
plot(d)
# normal deviation
x <- ordered_ke_bi
curve(dnorm(x, mean=mean(ordered_ke_bi), sd=sd(ordered_ke_bi)), add=TRUE, col='red', lwd=2)
# Empirical cumulative distribution function
P_ke_bi <- ecdf(Points_weighted_ke_bi)
plot(P_ke_bi)
|
d1965e7b234ab925ad649bb2979300b758a94cde | 2d34708b03cdf802018f17d0ba150df6772b6897 | /googleadmindirectoryv1.auto/man/CustomerPostalAddress.Rd | 695cb81e5ad0bdb04e3e0fc3c2fe89a7f6a2803c | [
"MIT"
] | permissive | GVersteeg/autoGoogleAPI | 8b3dda19fae2f012e11b3a18a330a4d0da474921 | f4850822230ef2f5552c9a5f42e397d9ae027a18 | refs/heads/master | 2020-09-28T20:20:58.023495 | 2017-03-05T19:50:39 | 2017-03-05T19:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,059 | rd | CustomerPostalAddress.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/admin_objects.R
\name{CustomerPostalAddress}
\alias{CustomerPostalAddress}
\title{CustomerPostalAddress Object}
\usage{
CustomerPostalAddress(addressLine1 = NULL, addressLine2 = NULL,
addressLine3 = NULL, contactName = NULL, countryCode = NULL,
locality = NULL, organizationName = NULL, postalCode = NULL,
region = NULL)
}
\arguments{
\item{addressLine1}{A customer's physical address}
\item{addressLine2}{Address line 2 of the address}
\item{addressLine3}{Address line 3 of the address}
\item{contactName}{The customer contact's name}
\item{countryCode}{This is a required property}
\item{locality}{Name of the locality}
\item{organizationName}{The company or company division name}
\item{postalCode}{The postal code}
\item{region}{Name of the region}
}
\value{
CustomerPostalAddress object
}
\description{
CustomerPostalAddress Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
JSON template for postal address of a customer.
}
|
fa3b9d0ddab07cc1da3eb90437e50b45a9bc5296 | ec27cf3f57d324b6132d151c875112530390b617 | /cachematrix.R | cb3c036cb9401d3f6da92118db8bf19a4b007ebb | [] | no_license | laxano/ProgrammingAssignment2 | c7713e517775effc8f1a61888b099926d8ca2199 | 9594fdf661ed1898c75f61786315ac851a1b4a62 | refs/heads/master | 2021-01-18T07:16:04.215994 | 2015-09-22T09:02:28 | 2015-09-22T09:02:28 | 42,921,472 | 0 | 0 | null | 2015-09-22T08:31:11 | 2015-09-22T08:31:11 | null | UTF-8 | R | false | false | 1,178 | r | cachematrix.R | ## A function to create a vector of 4 functions:
## (1) set the value of the vector
## (2) get the value of the vector
## (3) set the inv matrix
## (4) get the inv mean
makeCacheMatrix <- function(x = matrix()) {
invx <- NULL
set <- function(y=matrix()) {
x <<- y
invx <<- NULL
}
get <- function() x
setinv <- function(ix) invx <<- ix
getinv <- function() invx
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## A function to invert the matrix created with makeCacheMatrix. However, it first checks to see
## if the inversion has already been calculated. If so, it gets the inv table from the cache
## and skips the computation. Otherwise, it calculates the inversion, and sets the value of
## via the setinv function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
datamatrix <- x$get()
m <- solve(datamatrix, ...)
x$setinv(m)
m
}
|
cb3012726c1680a60ae4ad83de5ebd1143706489 | 9e6542d620d7576c48bd49a911cb73484775d379 | /DownloadData.R | 40a9e330631b51e096fd6f3b2d9f2ac94c2ac172 | [] | no_license | hdnguyen88/HieuNguyen_CaseStudy1_MSDS6306 | e2508b602a2bc1ffd04f3b4547e90094a94187d6 | 9f230f2d79627541162d9dcc89126b21bbb1b8ad | refs/heads/master | 2021-01-23T01:01:27.261484 | 2017-03-23T18:43:44 | 2017-03-23T18:43:44 | 85,862,446 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 598 | r | DownloadData.R | ##Download the files
file1 <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
download.file(file1, destfile = "GDP.csv")
file2 <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv"
download.file(file2, destfile = "EDU.csv")
##Format the columns
##Skip column 3 because it is an empty column
gdpdata <- read.csv("GDP.csv", skip = 4, nrows = 190)
edudata <- read.csv("EDU.csv")
gdpdata <- gdpdata[, c(1,2,4,5)]
colnames(gdpdata) <- c("CountryCode", "Rank", "Country.Name", "GDP.Value")
gdpdata$GDP.Value <- as.numeric(gsub(",", "", gdpdata$GDP.Value))
|
890551338d8069cb35c86fcb9cd84af84ced6ada | 52694abcc9168ef0ffcd6a428382102c521278f8 | /SKRYPTY/MODELING/scripts/fcu/scripts/cases_avg_score_plot.R | ad3a9a5d4acb8ec042aa7289c5c4c8cbe060724c | [] | no_license | MMandziej/magisterka | 7d18fa0e1a9a437c4235a912aa0733530fb45e3f | 33453fbbd7ede2e5ccb2771e8a3029a927a844c5 | refs/heads/master | 2023-02-13T04:44:41.581638 | 2021-01-21T23:37:26 | 2021-01-21T23:37:26 | 322,721,319 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,474 | r | cases_avg_score_plot.R | cases_avg_score_plot = function(scored_cases_backlog) {
scored_cases_backlog <- scored_cases_backlog %>% filter(Stage == 'production')
scored_cases_backlog$week <- floor_date(scored_cases_backlog$pqc_timestamp, 'week')
#scored_cases_backlog$pqc <- as.numeric(as.character(scored_cases_backlog$pqc))
cases_sent_to_qc = scored_cases_backlog[scored_cases_backlog$Should_be_send_to_QC == "1", ]
avg_score <- aggregate(scored_cases_backlog$pqc,
by=list(Category=scored_cases_backlog$week), FUN=mean) # date of join to project
cases_scored <- data.frame(table(scored_cases_backlog$week))
cases_qc <- data.frame(table(cases_sent_to_qc$week))
avg_score$cases_scored <- cases_scored$Freq
avg_score$cases_qc <- cases_qc$Freq
plot_data <- avg_score
colnames(plot_data) <- c('week', 'avg_score', 'cases_scored', 'cases_qc')
#plot_data <- plot_data[which(as.Date(plot_data$week) > as.Date("2020-03-30")),]
plot_data['sent_qc_percentage'] <- plot_data['cases_qc'] / plot_data['cases_scored']
results = plot_ly(x = plot_data$week,
y = plot_data$avg_score,
name = 'Weekly average score',
type = 'scatter',
mode = 'lines',
line = list(color='#ffb366')) %>%
add_trace(x = plot_data$week,
y = plot_data$sent_qc_percentage,
name = '[%] of cases sent to QC',
type = 'scatter',
mode = 'lines',
line = list(color='#266EDE')) %>%
add_trace(yaxis='y2',
x = plot_data$week,
y = plot_data$cases_scored,
name = 'Number of scored cases',
type = 'bar') %>%
# added
add_trace(yaxis='y2',
x = plot_data$week,
y = plot_data$cases_qc,
name = 'Number of cases sent to QC',
type = 'bar') %>%
layout(legend = list(y = -0.05,orientation = "h", # show entries horizontally
xanchor = "center", # use center of legend as anchor
x = 0.5),
margin = list(r = 50),
xaxis = list(range = c(min(plot_data$week)-3*86400, max(plot_data$week)+8*86400)),
yaxis = list(side = 'left', overlaying = "y2", title='Weekly average score', showgrid = T, zeroline = FALSE), # ticksuffix ="%",
yaxis2 = list(side = 'right', title = 'Number of cases', showgrid = F, zeroline = FALSE))
return(results)
}
|
f5ada297bc459fa7d79c8dc2d9cec9193c268baf | 29585dff702209dd446c0ab52ceea046c58e384e | /WRS2/R/print.spp.R | d8522fd2fddd1184d3aae8d1682d75ff11bb9f27 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 176 | r | print.spp.R | print.spp <-
function(x,...)
{
cat("Call:\n")
print(x$call)
cat("\nTest statistic:", round(x$test, 4),"\n")
cat("p-value:", round(x$p.value, 5), "\n")
cat("\n")
}
|
86927645443124ae0208b6802882b8377072607f | ce03150a7185ac44ffb974114f77d6e5f798b07c | /binary/realdata/ROML/cal.tsp.lumAB.R | 70b0c7d18ebfa8430aa3aa1ace436aa1ce8561dc | [] | no_license | chilampoon/ROML | d2ed4c00157d112c96cb745e04c5328d05816af8 | 85f765c27137119b113b8e21efa0f7004405db7c | refs/heads/master | 2021-06-11T09:39:12.007370 | 2021-05-07T19:03:01 | 2021-05-07T19:03:01 | 176,788,387 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,020 | r | cal.tsp.lumAB.R | ### Calculate KTSP scores
source('~/ktsp/script/function/cal.TSPscore.R')
# Balanced LumA vs LumB
data.dir <- '~/ktsp/data/binary/realdata/balanced'
load(file.path(data.dir, "TCGA.balance.LumAB.Rdata"))
load(file.path(data.dir, "MB.balance.LumAB.Rdata"))
lb.brca <- factor(brca.clin.sub$final_assign)
lb.mb <- factor(mb.clin.sub$Pam50Subtype)
# TCGA -> MB
load(file.path(data.dir, "baseline/tcga.DEG.Rdata"))
brca.train <- brca.expr.sub[match(rownames(tt.brca), rownames(brca.expr.sub)),]
mb.test <- mb.expr.sub[match(rownames(tt.brca), rownames(mb.expr.sub)),]
tcga.tsp <- cal.TSPscore(brca.train, lb.brca)
save(tcga.tsp, file = file.path(data.dir, 'ROML/tcga.TSPscore.Rdata'))
# MB -> TCGA
load(file.path(data.dir, "baseline/mb.DEG.Rdata"))
mb.train <- mb.expr.sub[match(rownames(tt.mb), rownames(mb.expr.sub)),]
brca.test <- brca.expr.sub[match(rownames(tt.mb), rownames(brca.expr.sub)),]
# Workflow
mb.tsp <- cal.TSPscore(mb.train, lb.mb)
save(mb.tsp, file = file.path(data.dir, 'ROML/mb.TSPscore.Rdata'))
|
aeda9ca937ebb915ea9e0860bc256b2c82787f45 | 30a03e76baf3efed2039af06a1aa6de8b01f15a0 | /src/r/pooled/functions/compute_PCA.r | eaf00e9e8e0cd70ac5077d0ab2475a6646705661 | [] | no_license | dbgoodman/tcsl-lenti | dbb2dd75209a2d7e7a45284383145f9745759901 | e0473acdb52114f8467ff70799beb538c859c395 | refs/heads/master | 2023-05-25T10:11:24.882572 | 2023-05-20T01:19:03 | 2023-05-20T01:19:03 | 249,501,469 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,893 | r | compute_PCA.r | compute_PCA <- function(data.dt, group) {
# Using car.bin.pct
cast.car.bin.pct <- dcast(data.dt[k.type != "NA"], sort.group + batch +
donor + timepoint + assay + t.type + CAR.align +
k.type ~ bin, value.var = 'car.bin.pct')
cast.car.bin.pct <- cast.car.bin.pct[, .(sort.group, batch, donor,
timepoint, assay, t.type, k.type,
CAR.align, bin.A = A, bin.B = B,
bin.C = C, bin.D = D)]
cast.car.bin.pct[is.na(cast.car.bin.pct)] <- 0
# compute principal components
pca <- prcomp(cast.car.bin.pct[sort.group == group,
.(bin.A, bin.B, bin.C, bin.D)],
center = T, scale. = T)
# project data onto principal components
projected.car.bin.pct <- scale(cast.car.bin.pct[sort.group == group,
.(bin.A, bin.B, bin.C,
bin.D)],
pca$center, pca$scale) %*% pca$rotation
projected.car.bin.pct <- cbind(cast.car.bin.pct[sort.group == group,
.(sort.group, batch,
donor, timepoint,
assay, t.type, k.type,
CAR.align)],
projected.car.bin.pct)
# merge CAR.scores with projected.rel.bin.ratio
ranks <- data.dt[sort.group == group &
bin == "A",
.(CAR.align, CAR.score.rank = rank(CAR.score))]
projected.car.bin.pct <- merge(projected.car.bin.pct, ranks, by = "CAR.align")
return(projected.car.bin.pct)
} |
137434b83a38ca53ad5627ae43440925533b14ce | 5328c09eacae1c89afaf2c7e13ac72d7f387681f | /p_parameters/02_parameters_CDM.R | 6d563a4a0f97632a5d4c3cf00a0edb6137e2ec44 | [] | no_license | ARS-toscana/CONSIGN | 02368d32f17fd5f9b507ef9ed28454974454ee5b | 03c6c1448107fa3c0d60fbc034ba19385e72b69d | refs/heads/master | 2023-06-15T21:39:04.575752 | 2021-07-06T08:46:48 | 2021-07-06T08:46:48 | 381,303,450 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,716 | r | 02_parameters_CDM.R | ###################################################################
# ASSIGN PARAMETERS DESCRIBING THE DATA MODEL OF THE INPUT FILES
###################################################################
# assign -ConcePTION_CDM_tables-: it is a 2-level list describing the ConcePTION CDM tables, and will enter the function as the first parameter. the first level is the data domain (in the example: 'Diagnosis' and 'Medicines') and the second level is the list of tables that has a column pertaining to that data domain
ConcePTION_CDM_tables <- vector(mode="list")
files<-sub('\\.csv$', '', list.files(dirinput))
for (i in 1:length(files)) {
if (str_detect(files[i],"^EVENTS")) { ConcePTION_CDM_tables[["Diagnosis"]][[(length(ConcePTION_CDM_tables[["Diagnosis"]]) + 1)]]<-files[i]
} else if (str_detect(files[i],"^MEDICINES")){ ConcePTION_CDM_tables[["Medicines"]][[(length(ConcePTION_CDM_tables[["Medicines"]]) + 1)]]<-files[i]
} else if (str_detect(files[i],"^PROCEDURES")) { ConcePTION_CDM_tables[["Procedures"]][[(length(ConcePTION_CDM_tables[["Procedures"]]) + 1)]]<-files[i]
} else if (str_detect(files[i],"^VACCINES")) { ConcePTION_CDM_tables[["VaccineATC"]][[(length(ConcePTION_CDM_tables[["VaccineATC"]]) + 1)]]<-files[i] }
}
# for (i in 1:length(files)) {
# if (str_detect(files[i],"^VACCINES")) ConcePTION_CDM_tables[["VaccineATC"]][[(length(ConcePTION_CDM_tables[["VaccineATC"]]) + 1)]]<-files[i]
# }
#define tables for createconceptset
ConcePTION_CDM_EAV_tables <- vector(mode="list")
EAV_table<-c()
for (i in 1:length(files)) {
if (str_detect(files[i],"^SURVEY_OB")) { ConcePTION_CDM_EAV_tables[["Diagnosis"]][[(length(ConcePTION_CDM_EAV_tables[["Diagnosis"]]) + 1)]]<-list(list(files[i], "so_source_table", "so_source_column"))
EAV_table<-append(EAV_table,files[i])
}
else{if (str_detect(files[i],"^MEDICAL_OB")){ ConcePTION_CDM_EAV_tables[["Diagnosis"]][[(length(ConcePTION_CDM_EAV_tables[["Diagnosis"]]) + 1)]]<-list(list(files[i], "mo_source_table", "mo_source_column"))
EAV_table<-append(EAV_table,files[i])}
}
}
ConcePTION_CDM_EAV_tables_retrieve_so <- vector(mode="list")
for (i in 1:length(files)) {
if (str_detect(files[i],"^SURVEY_OB")) { ConcePTION_CDM_EAV_tables_retrieve_so[[(length(ConcePTION_CDM_EAV_tables_retrieve_so) + 1)]]<-list(list(files[i], "so_source_table", "so_source_column"))
#EAV_table<-append(EAV_table,files[i])
}
}
ConcePTION_CDM_EAV_tables_retrieve_mo <- vector(mode="list")
for (i in 1:length(files)) {
if (str_detect(files[i],"^MEDICAL_OB")) { ConcePTION_CDM_EAV_tables_retrieve_mo[[(length(ConcePTION_CDM_EAV_tables_retrieve_mo) + 1)]]<-list(list(files[i], "mo_source_table", "mo_source_column"))
#EAV_table<-append(EAV_table,files[i])
}
}
alldomain<-names(ConcePTION_CDM_tables)
ConcePTION_CDM_codvar <- vector(mode="list")
ConcePTION_CDM_coding_system_cols <-vector(mode="list")
if (length(ConcePTION_CDM_EAV_tables)!=0 ){
for (dom in alldomain) {
for (i in 1:(length(ConcePTION_CDM_EAV_tables[["Diagnosis"]]))){
for (ds in append(ConcePTION_CDM_tables[[dom]],ConcePTION_CDM_EAV_tables[["Diagnosis"]][[i]][[1]][[1]])) {
if (ds==ConcePTION_CDM_EAV_tables[["Diagnosis"]][[i]][[1]][[1]]) {
if (str_detect(ds,"^SURVEY_OB")) ConcePTION_CDM_codvar[["Diagnosis"]][[ds]]="so_source_value"
if (str_detect(ds,"^MEDICAL_OB")) ConcePTION_CDM_codvar[["Diagnosis"]][[ds]]="mo_source_value"
}else{
if (dom=="Medicines") ConcePTION_CDM_codvar[[dom]][[ds]]="medicinal_product_atc_code"
if (dom=="Diagnosis") ConcePTION_CDM_codvar[[dom]][[ds]]="event_code"
if (dom=="Procedures") ConcePTION_CDM_codvar[[dom]][[ds]]="procedure_code"
if (dom=="VaccineATC") ConcePTION_CDM_codvar[[dom]][[ds]]="vx_atc"
}
}
}
}
}else{
for (dom in alldomain) {
for (ds in ConcePTION_CDM_tables[[dom]]) {
if (dom=="Medicines") ConcePTION_CDM_codvar[[dom]][[ds]]="medicinal_product_atc_code"
if (dom=="Diagnosis") ConcePTION_CDM_codvar[[dom]][[ds]]="event_code"
if (dom=="Procedures") ConcePTION_CDM_codvar[[dom]][[ds]]="procedure_code"
if (dom=="VaccineATC") ConcePTION_CDM_codvar[[dom]][[ds]]="vx_atc"
}
}
}
#coding system
if (length(ConcePTION_CDM_EAV_tables)!=0 ){
for (dom in alldomain) {
for (i in 1:(length(ConcePTION_CDM_EAV_tables[["Diagnosis"]]))){
for (ds in append(ConcePTION_CDM_tables[[dom]],ConcePTION_CDM_EAV_tables[["Diagnosis"]][[i]][[1]][[1]])) {
if (ds==ConcePTION_CDM_EAV_tables[["Diagnosis"]][[i]][[1]][[1]]) {
if (str_detect(ds,"^SURVEY_OB")) ConcePTION_CDM_coding_system_cols[["Diagnosis"]][[ds]]="so_unit"
if (str_detect(ds,"^MEDICAL_OB")) ConcePTION_CDM_coding_system_cols[["Diagnosis"]][[ds]]="mo_record_vocabulary"
}else{
# if (dom=="Medicines") ConcePTION_CDM_coding_system_cols[[dom]][[ds]]="product_ATCcode"
if (dom=="Diagnosis") ConcePTION_CDM_coding_system_cols[[dom]][[ds]]="event_record_vocabulary"
if (dom=="Procedures") ConcePTION_CDM_coding_system_cols[[dom]][[ds]]="procedure_code_vocabulary"
}
}
}
}
}else{
for (dom in alldomain) {
for (ds in ConcePTION_CDM_tables[[dom]]) {
if (dom=="Diagnosis") ConcePTION_CDM_coding_system_cols[[dom]][[ds]] = "event_record_vocabulary"
if (dom=="Procedures") ConcePTION_CDM_coding_system_cols[[dom]][[ds]] = "procedure_code_vocabulary"
# if (dom=="Medicines") ConcePTION_CDM_coding_system_cols[[dom]][[ds]] = "code_indication_vocabulary"
}
}
}
# assign 2 more 2-level lists: -id- -date-. They encode from the data model the name of the column(s) of each data table that contain, respectively, the personal identifier and the date. Those 2 lists are to be inputted in the rename_col option of the function.
#NB: GENERAL contains the names columns will have in the final datasets
person_id <- vector(mode="list")
date<- vector(mode="list")
if (length(ConcePTION_CDM_EAV_tables)!=0 ){
for (dom in alldomain) {
for (i in 1:(length(ConcePTION_CDM_EAV_tables[[dom]]))){
for (ds in append(ConcePTION_CDM_tables[[dom]],ConcePTION_CDM_EAV_tables[[dom]][[i]][[1]][[1]])) {
person_id [[dom]][[ds]] = "person_id"
}
}
}
}else{
for (dom in alldomain) {
for (ds in ConcePTION_CDM_tables[[dom]]) {
person_id [[dom]][[ds]] = "person_id"
}
}
}
if (length(ConcePTION_CDM_EAV_tables)!=0 ){
for (dom in alldomain) {
for (i in 1:(length(ConcePTION_CDM_EAV_tables[["Diagnosis"]]))){
for (ds in append(ConcePTION_CDM_tables[[dom]],ConcePTION_CDM_EAV_tables[["Diagnosis"]][[i]][[1]][[1]])) {
if (ds==ConcePTION_CDM_EAV_tables[["Diagnosis"]][[i]][[1]][[1]]) {
if (str_detect(ds,"^SURVEY_OB")) date[["Diagnosis"]][[ds]]="so_date"
if (str_detect(ds,"^MEDICAL_OB")) date[["Diagnosis"]][[ds]]="mo_date"
}else{
if (dom=="Medicines") {
if (thisdatasource_has_prescriptions == TRUE){
date[[dom]][[ds]]="date_prescription"
}else{
date[[dom]][[ds]]="date_dispensing"
}
}
if (dom=="Diagnosis") date[[dom]][[ds]]="start_date_record"
if (dom=="Procedures") date[[dom]][[ds]]="procedure_date"
if (dom=="VaccineATC") date[[dom]][[ds]] <- "vx_admin_date"
}
}
}
}
}else{
for (dom in alldomain) {
for (ds in ConcePTION_CDM_tables[[dom]]) {
if (dom=="Medicines") {
if (thisdatasource_has_prescriptions == TRUE){
date[[dom]][[ds]]="date_prescription"
}else{
date[[dom]][[ds]]="date_dispensing"
}
}
if (dom=="Diagnosis") date[[dom]][[ds]]="start_date_record"
if (dom=="Procedures") date[[dom]][[ds]]="procedure_date"
if (dom=="VaccineATC") date[[dom]][[ds]]="vx_admin_date"
}
}
}
#NEW ATTRIBUTES DEFINITION
files_par<-sub('\\.RData$', '', list.files(dirpargen))
if(length(files_par)>0){
for (i in 1:length(files_par)) {
if (str_detect(files_par[i],"^ConcePTION_CDM_EAV_attributes")) {
load(paste0(dirpargen,files_par[i],".RData"))
load(paste0(dirpargen,"ConcePTION_CDM_coding_system_list.RData"))
print("upload existing EAV_attributes")
} else {
print("create EAV_attributes")
ConcePTION_CDM_coding_system_list<-vector(mode="list")
METADATA<-fread(paste0(dirinput,"METADATA.csv"))
#METADATA<-fread(paste0(dirinput,"METADATA_CPRD.csv"))
ConcePTION_CDM_coding_system_list<-unique(unlist(str_split(unique(METADATA[type_of_metadata=="list_of_values" & (columnname=="so_unit" | columnname=="mo_record_vocabulary"),values])," ")))
ConcePTION_CDM_EAV_attributes<-vector(mode="list")
if (length(ConcePTION_CDM_EAV_tables)!=0 ){
for (i in 1:(length(ConcePTION_CDM_EAV_tables[["Diagnosis"]]))){
for (ds in ConcePTION_CDM_EAV_tables[["Diagnosis"]][[i]][[1]][[1]]) {
temp <- fread(paste0(dirinput,ds,".csv"))
for( cod_syst in ConcePTION_CDM_coding_system_list) {
if ("mo_source_table" %in% names(temp) ) {
temp1<-unique(temp[mo_record_vocabulary %in% cod_syst,.(mo_source_table,mo_source_column)])
if (nrow(temp1)!=0) ConcePTION_CDM_EAV_attributes[["Diagnosis"]][[ds]][[thisdatasource]][[cod_syst]]<-as.list(as.data.table(t(temp1)))
} else{
temp1<-unique(temp[so_unit %in% cod_syst,.(so_source_table,so_source_column)])
if (nrow(temp1)!=0) ConcePTION_CDM_EAV_attributes[["Diagnosis"]][[ds]][[thisdatasource]][[cod_syst]]<-as.list(as.data.table(t(temp1)))
}
}
}
}
}
ConcePTION_CDM_EAV_attributes_this_datasource<-vector(mode="list")
if (length(ConcePTION_CDM_EAV_attributes)!=0 ){
for (t in names(ConcePTION_CDM_EAV_attributes)) {
for (f in names(ConcePTION_CDM_EAV_attributes[[t]])) {
for (s in names(ConcePTION_CDM_EAV_attributes[[t]][[f]])) {
if (s==thisdatasource ){
ConcePTION_CDM_EAV_attributes_this_datasource[[t]][[f]]<-ConcePTION_CDM_EAV_attributes[[t]][[f]][[s]]
}
}
}
}
}
save(ConcePTION_CDM_EAV_attributes_this_datasource, file = paste0(dirpargen,"ConcePTION_CDM_EAV_attributes.RData"))
save(ConcePTION_CDM_coding_system_list, file = paste0(dirpargen,"ConcePTION_CDM_coding_system_list.RData"))
}
}
} else {
print("create EAV_attributes")
ConcePTION_CDM_coding_system_list<-vector(mode="list")
METADATA<-fread(paste0(dirinput,"METADATA.csv"))
#METADATA<-fread(paste0(dirinput,"METADATA_CPRD.csv"))
ConcePTION_CDM_coding_system_list<-unique(unlist(str_split(unique(METADATA[type_of_metadata=="list_of_values" & (columnname=="so_unit" | columnname=="mo_record_vocabulary"),values])," ")))
ConcePTION_CDM_EAV_attributes<-vector(mode="list")
if (length(ConcePTION_CDM_EAV_tables)!=0 ){
for (i in 1:(length(ConcePTION_CDM_EAV_tables[["Diagnosis"]]))){
for (ds in ConcePTION_CDM_EAV_tables[["Diagnosis"]][[i]][[1]][[1]]) {
temp <- fread(paste0(dirinput,ds,".csv"))
for( cod_syst in ConcePTION_CDM_coding_system_list) {
if ("mo_source_table" %in% names(temp) ) {
temp1<-unique(temp[mo_record_vocabulary %in% cod_syst,.(mo_source_table,mo_source_column)])
if (nrow(temp1)!=0) ConcePTION_CDM_EAV_attributes[["Diagnosis"]][[ds]][[thisdatasource]][[cod_syst]]<-as.list(as.data.table(t(temp1)))
} else{
temp1<-unique(temp[so_unit %in% cod_syst,.(so_source_table,so_source_column)])
if (nrow(temp1)!=0) ConcePTION_CDM_EAV_attributes[["Diagnosis"]][[ds]][[thisdatasource]][[cod_syst]]<-as.list(as.data.table(t(temp1)))
}
}
}
}
}
ConcePTION_CDM_EAV_attributes_this_datasource<-vector(mode="list")
if (length(ConcePTION_CDM_EAV_attributes)!=0 ){
for (t in names(ConcePTION_CDM_EAV_attributes)) {
for (f in names(ConcePTION_CDM_EAV_attributes[[t]])) {
for (s in names(ConcePTION_CDM_EAV_attributes[[t]][[f]])) {
if (s==thisdatasource ){
ConcePTION_CDM_EAV_attributes_this_datasource[[t]][[f]]<-ConcePTION_CDM_EAV_attributes[[t]][[f]][[s]]
}
}
}
}
}
save(ConcePTION_CDM_EAV_attributes_this_datasource, file = paste0(dirpargen,"ConcePTION_CDM_EAV_attributes.RData"))
save(ConcePTION_CDM_coding_system_list, file = paste0(dirpargen,"ConcePTION_CDM_coding_system_list.RData"))
}
# ConcePTION_CDM_coding_system_list<-vector(mode="list")
# METADATA<-fread(paste0(dirinput,"METADATA.csv"))
# ConcePTION_CDM_coding_system_list<-unique(unlist(str_split(unique(METADATA[type_of_metadata=="list_of_values" & (columnname=="so_unit" | columnname=="mo_record_vocabulary"),values])," ")))
#
# ConcePTION_CDM_EAV_attributes<-vector(mode="list")
#
# if (length(ConcePTION_CDM_EAV_tables)!=0 ){
# for (i in 1:(length(ConcePTION_CDM_EAV_tables[["Diagnosis"]]))){
# for (ds in ConcePTION_CDM_EAV_tables[["Diagnosis"]][[i]][[1]][[1]]) {
# temp <- fread(paste0(dirinput,ds,".csv"))
# for( cod_syst in ConcePTION_CDM_coding_system_list) {
# if ("mo_source_table" %in% names(temp) ) {
# temp1<-unique(temp[mo_record_vocabulary %in% cod_syst,.(mo_source_table,mo_source_column)])
# if (nrow(temp1)!=0) ConcePTION_CDM_EAV_attributes[["Diagnosis"]][[ds]][[thisdatasource]][[cod_syst]]<-as.list(as.data.table(t(temp1)))
# } else{
# temp1<-unique(temp[so_unit %in% cod_syst,.(so_source_table,so_source_column)])
# if (nrow(temp1)!=0) ConcePTION_CDM_EAV_attributes[["Diagnosis"]][[ds]][[thisdatasource]][[cod_syst]]<-as.list(as.data.table(t(temp1)))
# }
#
# }
# }
# }
# }
# ConcePTION_CDM_coding_system_list<-vector(mode="list")
# ConcePTION_CDM_coding_system_list<-c("ICD9","ICD10","SNOMED","SNOMED3","READ","ICD10CM","ICD10GM","kg")
#
#
# ConcePTION_CDM_EAV_attributes<-vector(mode="list")
#
# if (length(ConcePTION_CDM_EAV_tables)!=0 ){
# for (i in 1:(length(ConcePTION_CDM_EAV_tables[["Diagnosis"]]))){
# for (ds in ConcePTION_CDM_EAV_tables[["Diagnosis"]][[i]][[1]][[1]]) {
# temp <- fread(paste0(dirinput,ds,".csv"))
# for( cod_syst in ConcePTION_CDM_coding_system_list) {
# if ("mo_source_table" %in% names(temp) ) {
# temp1<-unique(temp[mo_record_vocabulary %in% cod_syst,.(mo_source_table,mo_source_column)])
# if (nrow(temp1)!=0) ConcePTION_CDM_EAV_attributes[["Diagnosis"]][[ds]][[thisdatasource]][[cod_syst]]<-as.list(as.data.table(t(temp1)))
# } else{
# temp1<-unique(temp[so_unit %in% cod_syst,.(so_source_table,so_source_column)])
# if (nrow(temp1)!=0) ConcePTION_CDM_EAV_attributes[["Diagnosis"]][[ds]][[thisdatasource]][[cod_syst]]<-as.list(as.data.table(t(temp1)))
# }
#
# }
# }
# }
# }
# #DA CMD_SOURCE
# ConcePTION_CDM_EAV_attributes<-vector(mode="list")
# datasources<-c("ARS")
#
# if (length(ConcePTION_CDM_EAV_tables)!=0 ){
# for (dom in alldomain) {
# for (i in 1:(length(ConcePTION_CDM_EAV_tables[[dom]]))){
# for (ds in ConcePTION_CDM_EAV_tables[[dom]][[i]][[1]][[1]]) {
# for (dat in datasources) {
# if (dom=="Diagnosis") ConcePTION_CDM_EAV_attributes[[dom]][[ds]][[dat]][["ICD9"]] <- list(list("RMR","CAUSAMORTE"))
# ConcePTION_CDM_EAV_attributes[[dom]][[ds]][[dat]][["ICD10"]] <- list(list("RMR","CAUSAMORTE_ICDX"))
# ConcePTION_CDM_EAV_attributes[[dom]][[ds]][[dat]][["SNOMED"]] <- list(list("AP","COD_MORF_1"),list("AP","COD_MORF_2"),list("AP","COD_MORF_3"),list("AP","COD_TOPOG"))
# # if (dom=="Medicines") ConcePTION_CDM_EAV_attributes[[dom]][[ds]][[dat]][["ICD9"]] <- list(list("CAP1","SETTAMEN_ARSNEW"),list("CAP1","GEST_ECO"),list("AP","COD_MORF_1"),list("AP","COD_MORF_2"),list("AP","COD_MORF_3"),list("AP","COD_TOPOG"))
# }
# }
# }
# }
# }
# ConcePTION_CDM_EAV_attributes_this_datasource<-vector(mode="list")
#
# if (length(ConcePTION_CDM_EAV_attributes)!=0 ){
# for (t in names(ConcePTION_CDM_EAV_attributes)) {
# for (f in names(ConcePTION_CDM_EAV_attributes[[t]])) {
# for (s in names(ConcePTION_CDM_EAV_attributes[[t]][[f]])) {
# if (s==thisdatasource ){
# ConcePTION_CDM_EAV_attributes_this_datasource[[t]][[f]]<-ConcePTION_CDM_EAV_attributes[[t]][[f]][[s]]
# }
# }
# }
# }
# }
#
# save(ConcePTION_CDM_EAV_attributes_this_datasource, file = paste0(dirpargen,"ConcePTION_CDM_EAV_attributes.RData"))
ConcePTION_CDM_datevar<-vector(mode="list")
if (length(ConcePTION_CDM_EAV_tables)!=0 ){
for (dom in alldomain) {
for (i in 1:(length(ConcePTION_CDM_EAV_tables[["Diagnosis"]]))){
for (ds in append(ConcePTION_CDM_tables[[dom]],ConcePTION_CDM_EAV_tables[["Diagnosis"]][[i]][[1]][[1]])) {
if (ds==ConcePTION_CDM_EAV_tables[["Diagnosis"]][[i]][[1]][[1]]) {
if (str_detect(ds,"^SURVEY_OB")) ConcePTION_CDM_datevar[["Diagnosis"]][[ds]]="so_date"
if (str_detect(ds,"^MEDICAL_OB")) ConcePTION_CDM_datevar[["Diagnosis"]][[ds]]="mo_date"
}else{
if (dom=="Medicines") ConcePTION_CDM_datevar[[dom]][[ds]]= list("date_dispensing","date_prescription")
if (dom=="Diagnosis") ConcePTION_CDM_datevar[[dom]][[ds]]=list("start_date_record","end_date_record")
if (dom=="Procedures") ConcePTION_CDM_datevar[[dom]][[ds]]=list("procedure_date")
if (dom=="VaccineATC") ConcePTION_CDM_datevar[[dom]][[ds]] <- "vx_admin_date"
}
}
}
}
}else{
for (dom in alldomain) {
for (ds in ConcePTION_CDM_tables[[dom]]) {
if (dom=="Medicines") ConcePTION_CDM_datevar[[dom]][[ds]]= list("date_dispensing","date_prescription")
if (dom=="Diagnosis") ConcePTION_CDM_datevar[[dom]][[ds]]=list("start_date_record","end_date_record")
if (dom=="Procedures") ConcePTION_CDM_datevar[[dom]][[ds]]=list("procedure_date")
if (dom=="VaccineATC") ConcePTION_CDM_datevar[[dom]][[ds]] <- "vx_admin_date"
}
}
}
ConcePTION_CDM_datevar_retrieve<-list()
if (length(ConcePTION_CDM_EAV_tables)!=0 ){
for (i in 1:(length(ConcePTION_CDM_EAV_tables[["Diagnosis"]]))){
for (ds in ConcePTION_CDM_EAV_tables[["Diagnosis"]][[i]][[1]][[1]]) {
ConcePTION_CDM_datevar_retrieve = ConcePTION_CDM_datevar [["Diagnosis"]]
}
}
}
#ConcePTION_CDM_datevar_retrieve<-ConcePTION_CDM_datevar_retrieveA
rm(temp, temp1)
|
a2b3b8fbc0ef86c72d3a9063932ae3ff0c9fd219 | 502c72395267d17d03f40e0b5f22c88237a1498e | /script/exemplo.R | be404f7fba8675df6accc7efe4fe4ec328fb6445 | [] | no_license | RafaMariano/sits.rep | c1d6a67be8e3720b7c6cb362e66ed380ee0b5caf | 9aa6f71caa45f8625237d236e25157ccf0d85a07 | refs/heads/master | 2021-06-28T05:05:39.722137 | 2021-01-19T00:13:36 | 2021-01-19T00:13:36 | 210,867,951 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,124 | r | exemplo.R | samples <- function(){
library(inSitu)
library(sits)
data(br_mt_1_8K_9classes_6bands)
bands <- c("evi") # , "ndvi", "nir", "mir"
samples.tb <- sits_select_bands_(br_mt_1_8K_9classes_6bands, bands = bands)
return(samples.tb)
}
# svm_model <- sits::sits_svm(cost = 1,
# formula = sits_formula_linear())
model <- list(units = c(512, 512, 512),
activation = 'relu',
dropout_rates = c(0.50, 0.45, 0.40),
epochs = 1,
batch_size = 128,
validation_split = 0.2)
coverage <- list(service = "EOCUBES",
name = "MOD13Q1/006",
bands = c("evi"),
geom = "~/geom/geom.shp")
# system.file("extdata/MT/shape/MT.shp", package = "inSitu")
cubes <- list(multicores = 4, interval = "48 month", memsize = 1, filter = NULL)
sits.rep::classify("arv_1", samples, "deeplearning", model, coverage, "cubes", cubes)
pos_p <- function(input, output, rds){
library(raster)
library(sp)
library(sits)
result <- sits_bayes_postprocess(raster_class = rds,
window = matrix(1, nrow = 3, ncol = 3, byrow = TRUE),
noise = 10,
file = output)
return(result)
}
sits.rep::pos_processing("tree_6/classification", "pos_baseyan", pos_p)
# O ΓΊltimo parΓ’metro, CUBES, pode ser um enum e, por tanto, os parΓ’metros do
# nΓ£o obrigatΓ³rios do sits_classify_cubes serΓ£o usados o default
# sits.rep::sits.rep_classify("tree_1", samples, svm_model, coverage, CUBES)
merge <- function(input, output, rds){
files_input <- list.files(input, pattern = ".*\\.tif", full.names = TRUE)
files_years <- gsub("^.*_[^_]{6}_[0-9]+_[0-9]+_[0-9]+_[0-9]+_([0-9]+)_.*\\.tif", "\\1", files_input)
for (year in unique(files_years)) {
year_list <- files_input[files_years == year]
res <- lapply(year_list, raster::raster)
res$filename <- paste0(output, "_", sprintf("MT_%s.tif", year))
do.call(raster::merge, res)
}
}
sits.rep::pos_processing("arv_1/pos_baseyan", "mosaic", merge)
|
4fb979500245e359f052818ad773de2c577cfab6 | 6df71467ddd9483d708dd1ce6015e378333c2d54 | /code/new_format.R | 36582f5ec0b144a83e9a35a83f63dee6430fd220 | [] | no_license | kiernann/dc-bmp | 62bd4c32f671b49d66248dcc383c90f0d282179d | 7c71c86d43317aaa093e025a5e297e47004ddcb3 | refs/heads/master | 2021-10-25T01:33:47.471354 | 2019-03-30T19:58:25 | 2019-03-30T19:58:25 | 110,895,536 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,605 | r | new_format.R | library(tidyverse)
library(maptools)
library(sp)
library(sf)
# read data ---------------------------------------------------------------
tract_shapes <-
getKMLcoordinates(kmlfile = "https://opendata.arcgis.com/datasets/b78d164649ad487db0f2b20a9261fb8c_17.kml",
ignoreAltitude = TRUE) %>%
map(Polygon)
tract_info <-
read_csv(file = "https://opendata.arcgis.com/datasets/b78d164649ad487db0f2b20a9261fb8c_17.csv",
col_types = cols(TRACTNO = col_character())) %>%
rename_all(tolower) %>%
select(-area) %>%
rename(tract = tractno,
area = areasqmi,
pop = total,
child = age0_17,
adult = age18plus,
houses = occupiedho,
total = fagi_total_2005,
median = fagi_median_2005) %>%
select(tract, area, total, median, pop,
child, adult, white, black, hispanic) %>%
# normalize for population
mutate(child = child / pop,
adult = adult / pop,
white = white / pop,
black = black / pop,
hispanic = hispanic / pop) %>%
# compute population density
mutate(density = pop / area)
bmp <-
read_csv(file = "https://opendata.arcgis.com/datasets/a973c2c7b7c14a918859f3e38bdffdd2_42.csv") %>%
rename_all(tolower)
return_tract <- function(row) {
t <- rep(NA, 188)
for (i in 1:188) {
t[i] <- point.in.polygon(
point.x = bmp$longitude[row],
point.y = bmp$latitude[row],
pol.x = tract_shapes[[i]]@coords[, 1],
pol.y = tract_shapes[[i]]@coords[, 2]
)
}
return(tract_info$tract[t == 1])
}
return_tract(2)
|
73cf2c91cc154094f98541dab2ae5304f43b193c | e3d271f526cb0ce859f798034f9f1faaf1092908 | /code/12-plot-jlhd-mbm-models.R | 2e2a6aa256d31b131f97b172f97e3285c6a5f2f1 | [
"Apache-2.0"
] | permissive | jdyen/size-trophic | 406f25049ca5a0079cde03a36132e655db64e42e | 730bdf285c1a5d205edfd2ad68405ad47c022eb3 | refs/heads/master | 2020-11-25T05:35:16.415014 | 2020-08-18T04:19:11 | 2020-08-18T04:19:11 | 228,522,931 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,403 | r | 12-plot-jlhd-mbm-models.R | # the caret package should make our life easy
library(caret)
library(rstanarm)
library(edarf)
library(plotrix)
# load fitted model workspace
load("outputs/fitted-models-body-mass.RData")
# load some helper functions
source("code/helpers.R")
pd_jlhd <- readRDS("outputs/pd_jlhd.RDS")
# plot Fig. 3 for JlHd model: partial regression coefficients
jpeg(file = "outputs/figs/Fig3-jlhd_bodymass.jpg", units = "in", width = 7, height = 7, res = 300)
par(mfrow = c(1, 1), mar = c(4.1, 4.1, 1.1, 1.1))
xmean <- attributes(sp_data[, "len"])$`scaled:center`
xsd <- attributes(sp_data[, "len"])$`scaled:scale`
pd_tmp <- pd_jlhd
tl_plot <- sp_data$jlhd
pd_tmp$.outcome <- (pd_tmp$.outcome * attributes(sp_data$jlhd)$`scaled:scale`) + attributes(sp_data$jlhd)$`scaled:center`
tl_plot <- (tl_plot * attributes(sp_data$jlhd)$`scaled:scale`) + attributes(sp_data$jlhd)$`scaled:center`
xplot <- sp_data$len
xplot <- (xplot * attributes(sp_data$len)$`scaled:scale`) + attributes(sp_data$len)$`scaled:center`
xplot <- 10 ^ xplot
pd_plot(pd_tmp, xlab = "Body mass (BM)", ylab = "log10(Jaw length (JlHd))",
mean = xmean,
sd = xsd, tl = tl_plot, var = xplot,
ylim = c(-1.5, 1),
log_x = TRUE, log_y = FALSE)
dev.off()
# plot Fig. 3 for JlHd model: partial regression coefficients
# without low TP species (herb/detritivores)
# load fitted model workspace
rm(sp_data)
load("outputs/fitted-models-body-mass-rm-low-tp.RData")
pd_jlhd_rmlowtp <- readRDS("outputs/pd_jlhd-rm-low-tp.RDS")
jpeg(file = "outputs/figs/Fig3-jlhd_bodymass-rm-low-tp.jpg", units = "in", width = 7, height = 7, res = 300)
par(mfrow = c(1, 1), mar = c(4.1, 4.1, 1.1, 1.1))
xmean <- attributes(sp_data[, "len"])$`scaled:center`
xsd <- attributes(sp_data[, "len"])$`scaled:scale`
pd_tmp <- pd_jlhd_rmlowtp
tl_plot <- sp_data$jlhd
pd_tmp$.outcome <- (pd_tmp$.outcome * attributes(sp_data$jlhd)$`scaled:scale`) + attributes(sp_data$jlhd)$`scaled:center`
tl_plot <- (tl_plot * attributes(sp_data$jlhd)$`scaled:scale`) + attributes(sp_data$jlhd)$`scaled:center`
xplot <- sp_data$len
xplot <- (xplot * attributes(sp_data$len)$`scaled:scale`) + attributes(sp_data$len)$`scaled:center`
xplot <- 10 ^ xplot
pd_plot(pd_tmp, xlab = "Body mass (BM)", ylab = "log10(Jaw length (JlHd))",
mean = xmean,
sd = xsd, tl = tl_plot, var = xplot,
ylim = c(-1.5, 1),
log_x = TRUE, log_y = FALSE)
dev.off()
|
8583da3b70583e0f164a68dbccf7e50a25135b85 | c42f5671f9e09c868cc0e24e4e1accfff1a2c73b | /man/package_trial1.Rd | b60398ec95a298c21bde6773ba37be6049026c48 | [] | no_license | JerryTucay/mfdata | 1d23e5c5e186d207a5785bb74cdb0e21ccb2d5dd | f05e7ee66222a4b609c74bba3162cbf096c023d4 | refs/heads/master | 2020-04-03T22:35:33.402383 | 2019-05-06T17:33:13 | 2019-05-06T17:33:13 | 155,607,894 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,795 | rd | package_trial1.Rd | \name{mcar}
\alias{mcar}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
MCAR: Missing completely at random
}
\description{
mcar() alows the user to forcibly input missing values (NA) that replicates being missing completly at random. This kind of missing data is missing by random chance.
}
\usage{
mcar(data, p, column = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{
The data that you want to give missing values to
}
\item{p}{
The percentage of data that you want to make missing.
}
\item{column = NULL}{
When NULL the function will run through your whole data set
Set a value to target specific columns
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
## Inputs missing data in the first two columns of the data set df
df<- data.frame(x=rnorm(100, 10, 2), y=rpois(100,4), z=rbinom(100, 1, .4))
df_missing<- mcar(df, .25, 1:2)
sum(is.na(df_missing))/200
## Inputs missing data into all of the columns in df2
df2<- data.frame(x=rnorm(100, 10, 2), y=rpois(100,4), z=rbinom(100, 1, .4))
df_missing2<- mcar(df2, .25)
sum(is.na(df_missing2))/300
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
0bed0ffe67c2f907718f7a032e44f56486274451 | daa108df46230bc11ffafd679ec8e99125c88dd9 | /R/merge_files.R | b487723b726d59fbaf018c7ca00cb532031a5cca | [
"MIT"
] | permissive | ArnaudDroitLab/wgs | 42235346cdeb23214cff4fa331035bc99c27860d | c11f75c9b959e7c1e6dec2dffbf398dcb916fe23 | refs/heads/master | 2020-05-29T09:44:34.538744 | 2019-06-19T16:15:11 | 2019-06-19T16:15:11 | 189,075,967 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,418 | r | merge_files.R | #' Merge multiple assemblies
#'
#' @param n The names of the bacteria to download, correcponds to the
#' \code{organism_name} column from the \code{metadata} parameter.
#' @param output The filename to save the merged fasta.
#' @param dir The download directory. Default: \code{.}.
#' @param metadata The metadata file downloaded with the
#' \code{fetch_bacteria_metadata} function. If \code{NULL}, the pre-computed
#' metadatas will be used. Default: \code{NULL}
#' @param strict If \code{TRUE}, the names must be identical to the
#' \code{organism_name} column of the metadata object (by default, it's the
#' \code{bacteria} object). Otherwise, partial match will be allowed and the
#' matchning will not be case sensitive. Default: \code{TRUE}.
#' @param force If \code{TRUE}, remove file if it already exists. Default:
#' \code{FALSE}.
#'
#' @return Invisibly return the subset of metadata that was selected to
#' download.
#'
#' @examples
#' \dontrun{
#' merge("Campylobacter avium", output = "c_avium.fna")
#' }
#'
#' @importFrom magrittr %>%
#' @importFrom dplyr filter
#' @importFrom purrr walk
#' @importFrom stringr str_extract
#' @importFrom stringr str_detect
#' @importFrom Biostrings readDNAStringSet
#' @importFrom Biostrings writeXStringSet
#'
#' @export
merge_files <- function(n, output, dir = ".", metadata = NULL, strict = FALSE,
force = FALSE) {
stopifnot(is(output, "character"))
stopifnot(dir.exists(dirname(output)))
current_metadata <- filter_metadata(metadata, n, strict)
fna_files <- paste0(dir, "/",
stringr::str_extract(current_metadata$ftp_path, "[^\\/]*$"),
"_genomic.fna.gz")
stopifnot(all(purrr::map_lgl(fna_files, file.exists)))
if (file.exists(output)) {
if (!force) {
msg <- paste0("output file (", output, ") exists.\n")
msg <- paste0(msg, "Please change output value, remove file")
msg <- paste0(msg, " or use force = TRUE.")
stop(msg)
} else {
file.remove(output)
}
}
# Right now it's very simple, but this implementation will make it easier
# to modify the files when merging in the future
merge_file <- function(x) {
readDNAStringSet(x) %>%
writeXStringSet(output, append = TRUE)
}
purrr::walk(fna_files, merge_file)
invisible(current_metadata)
}
|
dbbaf5c2e460a9c64f75736e85a58a82ae976216 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/aws.kms/examples/rotation.Rd.R | 3dd39a4d10be485386fb11923f86e0b5e3389112 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 527 | r | rotation.Rd.R | library(aws.kms)
### Name: enable_kms_rotation
### Title: Enable/Disable Key Rotation
### Aliases: enable_kms_rotation disable_kms_rotation get_kms_rotation
### ** Examples
## Not run:
##D # create key
##D k <- create_kms_key(description = "example")
##D
##D # enable rotation
##D enable_kms_rotation(k)
##D
##D # disable rotation
##D disable_kms_rotation(k)
##D
##D # confirm rotation is disabled
##D get_kms_rotation(k)
##D
##D # delete in 7 days
##D delete_kms_key(k)
## End(Not run)
|
58cc14988682a5a62483b3a009c58abeea19436f | 8fe48f24b9405e4bb50be7411190b4753bb8a09f | /Genomics/Structure/Pop_structure.R | eb41da44ab5f05a1d741895470a7b7f2cbbcc1a0 | [] | no_license | joycepra/Association-Environment-Genome-and-Morphometry-analysis | da325067125be7adf2cef8af9f2ba01ccc857dd6 | 27bd4c26bf7898333f89166d365895423fec0bb3 | refs/heads/main | 2023-06-27T16:08:59.639859 | 2021-07-31T22:31:15 | 2021-07-31T22:31:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,166 | r | Pop_structure.R | #################################################################################################################
#install packages
if("adegenet" %in% rownames(installed.packages()) == FALSE){install.packages("adegenet")
} else {print (paste0("'adegenet' has already been installed in library"))}
if("AssocTests" %in% rownames(installed.packages()) == FALSE){install.packages("AssocTests")
} else {print (paste0("'AssocTests' has already been installed in library"))}
if("fossil" %in% rownames(installed.packages()) == FALSE){install.packages("fossil")
} else {print (paste0("'fossil' has already been installed in library"))}
if("ecodist" %in% rownames(installed.packages()) == FALSE){install.packages("ecodist")
} else {print (paste0("'ecodist' has already been installed in library"))}
if("pacman" %in% rownames(installed.packages()) == FALSE){install.packages("pacman")
} else {print (paste0("'pacman' has already been installed in library"))}
#load packages
pacman::p_load("adegenet", "AssocTests", "fossil", "ecodist")
#################################################################################################################
############################################# INPUT FILES #######################################################
# 1- structure file with the genomic data
# 2- cvs file with the geographic coordinates of each individual
#################################################################################################################
################################################# PCA ###########################################################
setwd("~/") #set working directory
str <- read.structure('brasiliensis_final.str', n.ind = 20, n.loc = 24264, col.lab = 1,
col.pop = 2, onerowperind = FALSE, row.marknames = 0, NA.char= 0,
ask = FALSE)
str2 <- scaleGen (str, cent=TRUE,scale=TRUE,NA.method = "mean")
pca <- prcomp(str2,center = TRUE,scale. =TRUE)
summary(pca)
pca$x
write.csv(pca$x, file = "scores_pca.csv") # save pca scores
###Plot PCA 1n2 ####
tiff("pca1n2_moleculas.tiff", width=20, height=15, unit="cm", res=300)
quartz.options(height=10, width=12, dpi=72);
plot.new();
par(oma=c(1,1,1,1));
par(mar=c(5,5,1,1));
plot.window(xlim=c(-100,300), ylim=c(-100, 200));
points(pca$li[14:20,1],pca$li[14:20,2], col = 'slategray4', bg = "#77ab59", cex = 1.5, pch=21) # ATLANTIC FOREST
points(pca$li[1:13,1],pca$li[1:13,2], col = 'slategray4', bg = "#E7B800", cex = 1.5, pch=21) #PAMPA
axis(1, at=seq(-100, 300, by=130), cex.axis=1.15);
axis(2, at=seq(-100, 200, by=100), cex.axis=1.15, las=1);
mtext(side=1, text='PC1 (10.52%)',line=2.5, cex=1)
mtext(side=2, text='PC2 (8.79%)', line=2.8, cex=1)
legend<- c("Pampas", "Atlantic Forest")
col=c("#E7B800", "#77ab59")
legend("topright", legend = legend, cex=1, bty="n", col = col, pch= 16)
dev.off()
#################################################################################################################
################################### TRACY-WIDOM TEST FOR EIGENVALUES ############################################
#Tracy CA and Widom H. (1994). Level spacing distributions and the bessel kernel. Commun Math Phys. 161 :289--309.
#Patterson N, Price AL and Reich D. (2006). Population structure and eigenanalysis. PLoS Genet. 2 :20.
eigenvalues<-pca$eig
eigenL<- length(eigenvalues)
#criticalpoint: a numeric value corresponding to the significance level. If the significance level is 0.05, 0.01,
#0.005, or 0.001,the criticalpoint should be set to be 0.9793, 2.0234, 2.4224, or 3.2724, accordingly. The default
# is 2.0234
tw<- tw(eigenvalues, eigenL, criticalpoint = 0.9793)
tw$SigntEigenL #the number of significant eigenvalues
#################################################################################################################
############################################# MANTEL TEST #######################################################
### Genomic distance #####
PCs<- pca$x[,1:11] ### ~70% of the variation
PCA_dist<-ecodist::distance(PCs, method = "mahalanobis", sprange=NULL, spweight=NULL)
PCA_dist2<-as.matrix(dist(PCA_dist))
write.csv(PCA_dist2, "PCA_gen_dist.csv") # save the PCA genetic distance
### Geographic distance #####
coord <- read.csv("coord_ind_gen.csv", header = T)
coord <- coord[,2:3] #select only the Long/Lat column
mDIST <- earth.dist(coord, dist = TRUE)
mDIST <- as.dist(mDIST)
mDIST_vec <- as.vector(mDIST)
hist(mDIST_vec)
### Run the Mantel test #####
mantel <- mantel.rtest(PCA_dist, mDIST, 10000)
plot(mDIST, PCA_dist, pch = 16, ylab = "PCA_dist", xlab = "mDIST", cex = 1.5, cex.lab=1.5, axes= T)
abline(lm(PCA_dist ~ mDIST), col = "gray30", lwd = 3)
### Run the Mantel test for each biome ####
### Genomic distance for the Pampas #####
pca_pampa <- prcomp(str2[1:13,])
summary(pca_pampa)
PCs_pampa<- pca_pampa$x[,1:8] ### ~70% of the variation
PCA_dist_pampa<-ecodist::distance(PCs_pampa, method = "mahalanobis", sprange=NULL, spweight=NULL)
### Geographic distance for the Pampas #####
coord <- read.csv("coord_ind.csv", header = T)
coord_pampa <- coord[1:13,2:3]
mDIST_pampa <- earth.dist(coord_pampa, dist = TRUE)
mDIST_pampa <- as.dist(mDIST_pampa )
### Run the Mantel test for the Pampas #####
mantel_pampa <- mantel.rtest(PCA_dist_pampa, mDIST_pampa, 10000)
plot(mDIST_pampa, PCA_dist_pampa, pch = 16, ylab = "PCA_dist", xlab = "mDIST", cex = 1.5, cex.lab=1.5, axes= T)
abline(lm(PCA_dist_pampa ~ mDIST_pampa), col = "gray30", lwd = 3)
### Genomic distance for the Atlantic forest #####
pca_AF <- prcomp(str2[14:20,])
summary(pca_AF)
PCs_AF<- pca_AF$x[,1:4] ### ~70% of the variation
PCA_dist_FA<-ecodist::distance(PCs_FA, method = "mahalanobis", sprange=NULL, spweight=NULL)
### Geographic distance for the Atlantic forest #####
coord_FA <- coord[14:20,2:3]
mDIST_FA <- earth.dist(coord_FA, dist = TRUE)
mDIST_FA <- as.dist(mDIST_FA )
### Run the Mantel test for the Atlantic forest #####
mantel_FA <- mantel.rtest(PCA_dist_FA, mDIST_FA, 10000)
plot( mDIST_FA, PCA_dist_FA, pch = 16, ylab = "PCA_dist", xlab = "mDIST", cex = 1.5, cex.lab=1.5, axes= T)
abline(lm(PCA_dist_FA ~ mDIST_FA), col = "gray30", lwd = 3)
|
1f451c9cb52b828c5a8fea7daf88cdad88e3ffde | 3b9f656885e4822849d265a2e7b4476528ea3337 | /R/MTTF.OICI.M1.D2.R | 2a60ff3ca1c79d814acbb53a3ffd45930c99f01c | [] | no_license | cran/iDEMO | f31322c5e8c1680f84eb5320f7c1a87cad265570 | ce4fcdc6adefc20177e18f434ff0bab3534bff7a | refs/heads/master | 2020-05-17T11:12:52.543048 | 2012-06-06T00:00:00 | 2012-06-06T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,338 | r | MTTF.OICI.M1.D2.R | MTTF.OICI.M1.D2 <-
function(alpha, W, Eta.M1, sEta.M1, Cov.Mat.M1, h.val, ...){
if( h.val > min(Eta.M1/2, sEta.M1^2/2) ) h.val <- min(Eta.M1/3, sEta.M1^2/3)
#approx.
MTTF.approx.M1 <- W / Eta.M1
var.MTTF.approx.M1 <- ( -W / Eta.M1^2 )^2 * Cov.Mat.M1[1,1]
OICI.lower.MTTF.approx.M1 <- MTTF.approx.M1 - qnorm( 1 - alpha / 2 ) * sqrt( var.MTTF.approx.M1 )
OICI.upper.MTTF.approx.M1 <- MTTF.approx.M1 + qnorm( 1 - alpha / 2 ) * sqrt( var.MTTF.approx.M1 )
OICI.ln.lower.MTTF.approx.M1 <- exp( log( MTTF.approx.M1 ) - qnorm( 1 - alpha / 2 ) * sqrt( Eta.M1^(-2) * Cov.Mat.M1[1,1] ) )
OICI.ln.upper.MTTF.approx.M1 <- exp( log( MTTF.approx.M1 ) + qnorm( 1 - alpha / 2 ) * sqrt( Eta.M1^(-2) * Cov.Mat.M1[1,1] ) )
#Exact
if( (Eta.M1/sqrt(2)/sEta.M1) <= 10^307 ){
f1 <- function(x) exp( x^2 / ( 2 * sEta.M1^2 ) )
MTTF.exact.M1 <- sqrt(2)*W / sEta.M1 * dawson(Eta.M1/sqrt(2)/sEta.M1) #W / sEta.M1^2 * exp( - Eta.M1^2 / ( 2 * sEta.M1^2 ) ) * integrate( f1, 0, Eta.M1 )$value
diff_MTTF.M1 <- numeric( length = 3 )
sEta2.M1 <- sEta.M1^2
MTTF.M1.Eta <- function(eta) sqrt(2) * W / sEta.M1 * dawson( eta / sqrt(2) / sEta.M1 )
#W / sEta.M1^2 * exp( - eta^2 / ( 2 * sEta.M1^2 ) ) *
#integrate( function(x) exp( x^2 / ( 2 * sEta.M1^2 ) ), 0, eta )$value
MTTF.M1.sEta2 <- function(seta2) sqrt(2) * W / sqrt(seta2) * dawson( Eta.M1 / sqrt(2) / sqrt(seta2) )
#W / seta2 * exp( - Eta.M1^2 / ( 2 * seta2 ) ) *
#integrate( function(x) exp( x^2 / ( 2 * seta2 ) ), 0, Eta.M1 )$value
diff_MTTF.M1[1] <- ( MTTF.M1.Eta( Eta.M1 - 2 * h.val ) - 8 * MTTF.M1.Eta( Eta.M1 - h.val ) +
8 * MTTF.M1.Eta( Eta.M1 + h.val ) - MTTF.M1.Eta( Eta.M1 + 2 * h.val ) ) /
( 12 * h.val )
diff_MTTF.M1[2] <- ( MTTF.M1.sEta2( sEta2.M1 - 2 * h.val ) - 8 * MTTF.M1.sEta2( sEta2.M1 - h.val ) +
8 * MTTF.M1.sEta2( sEta2.M1 + h.val ) - MTTF.M1.sEta2( sEta2.M1 + 2 * h.val ) ) /
( 12 * h.val )
var.MTTF.exact.M1 <- t(diff_MTTF.M1) %*% Cov.Mat.M1 %*% diff_MTTF.M1
OICI.lower.MTTF.exact.M1 <- MTTF.exact.M1 - qnorm( 1 - alpha / 2 ) * sqrt( var.MTTF.exact.M1 )
OICI.upper.MTTF.exact.M1 <- MTTF.exact.M1 + qnorm( 1 - alpha / 2 ) * sqrt( var.MTTF.exact.M1 )
var2.MTTF.exact <- t( diff_MTTF.M1 / MTTF.exact.M1 ) %*% Cov.Mat.M1 %*% ( diff_MTTF.M1 / MTTF.exact.M1 )
OICI.ln.lower.MTTF.exact.M1 <- exp( log( MTTF.exact.M1 ) - qnorm( 1 - alpha / 2 ) * sqrt( var2.MTTF.exact ) )
OICI.ln.upper.MTTF.exact.M1 <- exp( log( MTTF.exact.M1 ) + qnorm( 1 - alpha / 2 ) * sqrt( var2.MTTF.exact ) )
}else{
MTTF.exact.M1 <- MTTF.approx.M1
OICI.lower.MTTF.exact.M1 <- OICI.lower.MTTF.approx.M1
OICI.upper.MTTF.exact.M1 <- OICI.upper.MTTF.approx.M1
OICI.ln.lower.MTTF.exact.M1 <- OICI.ln.lower.MTTF.approx.M1
OICI.ln.upper.MTTF.exact.M1 <- OICI.ln.upper.MTTF.approx.M1
}
list( MTTF.approx.M1 = MTTF.approx.M1,
CI.MTTF.approx.M1 = c(OICI.lower.MTTF.approx.M1, OICI.upper.MTTF.approx.M1),
CI.ln.MTTF.approx.M1 = c(OICI.ln.lower.MTTF.approx.M1, OICI.ln.upper.MTTF.approx.M1),
MTTF.exact.M1 = MTTF.exact.M1,
CI.MTTF.exact.M1 = c(OICI.lower.MTTF.exact.M1, OICI.upper.MTTF.exact.M1),
CI.ln.MTTF.exact.M1 = c(OICI.ln.lower.MTTF.exact.M1, OICI.ln.upper.MTTF.exact.M1) )
}
|
ceb82aaffaa63077a0dce6ae7ee055f94a37f98d | 2931c0b42f80075e212b4a28cd0b9b1586eac6af | /Talleres/Taller3/punto 6.R | b2a4a54d55bd74cbe6592d92cf21851ef5253a78 | [] | no_license | mgalvis60/Analisis_Numerico_1910 | f4edfe9091643c362d8366acb224c4e869024ca7 | d3a8def7b35956c3cf54efe4e9660a09439ef7b1 | refs/heads/master | 2020-04-20T04:32:38.040374 | 2019-05-15T12:09:06 | 2019-05-15T12:09:06 | 168,630,617 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 741 | r | punto 6.R | #Punto 6
library(pracma)
f1<-function(x) (exp(x))
f2<-function(x) (1/(x))
#a
r1<-c(taylor(f1, 0, 4))
r2<-c(taylor(f2, 0, 4))
x<-c(0,1,2,3,4)
for(i in 0: 4)
{
r1[i]=round(r1[i], 5)
r2[i]=round(r2[i], 5)
}
datx=x[1:5]
daty1=r1[1:5]
daty2=r2[1:5]
#b
polyAjuste1=poly.calc(datx, daty1)
polyAjuste2=poly.calc(datx, daty2)
polyAjuste1
polyAjuste2
#c
#Se considera que el polinomio es un buen interpolador
#debido a que siempre arrojarΓ‘ los datos que se necesiten
#de una funciΓ³n para todos los casos facilitando la
#interpolaciΓ³n de estos. No obstante este es el
#mΓ©todo menos eficiente para interpolar teniendo por
#encima suyo a lagrange, lagrange baricΓ©ntrica y
#y diferencias divididas.
|
d007c1d6d43d52cf5cfc0a770c0ad4ec39a66452 | d4c98a25eeeaeb76b698a4499ce918a84a292572 | /Practice Diet/e.R | 0b19ff9eb302e9d59d2598363feedd6131628bfe | [] | no_license | RoelRotti/EDDA-Data_Analytics-2021 | e431422d29ddf0e6d3fb491c62c248a19d632b52 | 5f6b0f84aeac018ed72f13d7bad8ff827bcfd946 | refs/heads/main | 2023-06-10T22:50:57.141883 | 2021-07-01T11:03:00 | 2021-07-01T11:03:00 | 350,256,495 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 462 | r | e.R | ## e)
is.numeric(diet$height) # just to check height is numeric
mod4=lm(weight.lost~diet*height,data=diet)
anova(mod4) # no interaction between diet and height
## Is the effect of height the same for all 3 types of diet?
# Yes, because there is no interaction between diet and height.
# Now we test for the main effects by using the additive model
anova(lm(weight.lost~diet+height,data=diet))
# conclude that diet is significant and height is not.
|
487541c2f5bd1c677afeb7492253f5fb64146a3b | 1a0a84f21df6f7788bb80f5e0a38fa150121370d | /regimeSwitch_th/plotcode/weatherInclPanel.R | f5950182afa2038eb46e33c304e0a521f92d4b6b | [] | no_license | juetaoLim/regimeSwitchTH | aacc6495f84bb592aacdbac511a1f2a859aaf2fd | f6bbbf5fbdbfd5b7528c9c7b4940ee236bdbc4ef | refs/heads/main | 2023-01-15T15:44:18.723798 | 2020-11-24T09:00:48 | 2020-11-24T09:00:48 | 315,547,225 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,884 | r | weatherInclPanel.R | rm(list=ls())
load("~/nBox/regime_switch_hamfil_thailand/results_mapAll/out_cluster.RData")
clust <- ind
load("~/nBox/regime_switch_hamfil_thailand/results_lassoBoot_all/out.RData")
#preprocess
store <- list()
for (i in 1:length(coefs)){
temp <- coefs[[i]]
inclProb <- apply(temp,MARGIN=1,function(x)length(which(x==0))/length(x))
store[[i]]<-inclProb}
store <- Reduce(rbind,store)
store <- 1- store
store1 <- store[which(ind==1),]
store2 <- store[which(ind==2),]
ah <- 1:4
rh <- 5:8
tp <- 9:12
at <- 13:16
pdf(file="~/nBox/regime_switch_hamfil_thailand/plots/climInclu.pdf",height=10,width=7)
par(mfrow=c(4,2),pty='s',oma=c(4,4,0,4),mar=c(2,1,2,1),las=1)
boxplot(store1[,ah],ylab="",main="Absolute Humidity C1",col="lightblue", outline = F,ylim=c(0,1))
abline(h=0.5,col="grey",lty=2)
boxplot(store2[,ah],ylab="",main="Absolute Humidity C2",col="orangered", outline = F,ylim=c(0,1))
abline(h=0.5,col="grey",lty=2)
boxplot(store1[,rh],ylab="",main="Relative Humidity C1",col="lightblue", outline = F,ylim=c(0,1))
abline(h=0.5,col="grey",lty=2)
boxplot(store2[,rh],ylab="",main="Relative Humidity C2",col="orangered", outline = F,ylim=c(0,1))
abline(h=0.5,col="grey",lty=2)
boxplot(store1[,tp],ylab="",main="Total Precipitation C1",col="lightblue", outline = F,ylim=c(0,1))
abline(h=0.5,col="grey",lty=2)
boxplot(store2[,tp],ylab="",main="Total Precipitation C2",col="orangered", outline = F,ylim=c(0,1))
abline(h=0.5,col="grey",lty=2)
boxplot(store1[,at],ylab="",main="Average Temperature C1",col="lightblue", outline = F,ylim=c(0,1))
abline(h=0.5,col="grey",lty=2)
boxplot(store2[,at],ylab="",main="Average Temperature C2",col="orangered", outline = F,ylim=c(0,1))
abline(h=0.5,col="grey",lty=2)
par(las=0)
mtext(outer=T,text="Lag (Month)",side=1,cex=2,padj=0.7)
mtext(outer=T,text="Inclusion Probabilities",side=2,cex=2)
dev.off()
|
6459bbcf231f7278566f45c001cef460b2e99030 | 0c2380c77abb4bec2805460813b2e3ad8d7e0eda | /man/primate.continuous2.Rd | d7976df7c350db703e174fecc6aa97834c219ebf | [
"MIT"
] | permissive | balthasarbickel/btw | 832d4d289583a280677f31614fdb65ea161b5727 | 41efa42c9f92f96a9431a70fe225122e67ec02c5 | refs/heads/master | 2020-04-06T04:35:10.657763 | 2016-11-23T10:52:46 | 2016-11-23T10:52:46 | 54,259,996 | 0 | 1 | null | 2016-03-19T10:23:20 | 2016-03-19T10:23:20 | null | UTF-8 | R | false | false | 354 | rd | primate.continuous2.Rd | \docType{data}
\name{primate.continuous2}
\alias{primate.continuous2}
\title{Random pair of continuous traits for primates}
\format{Text file}
\source{
Random data generated in R
}
\description{
This is just random data to demonstrate how to use
BayesTraits functions.
}
\author{
Randi H. Griffin
}
\usage{primate.continuous2}
\keyword{datasets} |
becf28ce5be470e12a5fd1b0733be41b1fe2b0bb | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.storage/man/efs_create_mount_target.Rd | 9889bb4a5939ea4277862b1034fd04fbc21a8db1 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,111 | rd | efs_create_mount_target.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/efs_operations.R
\name{efs_create_mount_target}
\alias{efs_create_mount_target}
\title{Creates a mount target for a file system}
\usage{
efs_create_mount_target(
FileSystemId,
SubnetId,
IpAddress = NULL,
SecurityGroups = NULL
)
}
\arguments{
\item{FileSystemId}{[required] The ID of the file system for which to create the mount target.}
\item{SubnetId}{[required] The ID of the subnet to add the mount target in. For file systems that
use One Zone storage classes, use the subnet that is associated with the
file system's Availability Zone.}
\item{IpAddress}{Valid IPv4 address within the address range of the specified subnet.}
\item{SecurityGroups}{Up to five VPC security group IDs, of the form \code{sg-xxxxxxxx}. These must
be for the same VPC as subnet specified.}
}
\description{
Creates a mount target for a file system. You can then mount the file system on EC2 instances by using the mount target.
See \url{https://www.paws-r-sdk.com/docs/efs_create_mount_target/} for full documentation.
}
\keyword{internal}
|
95dcf81fbe9fe7dd488e67c9a4cb8643d6d21f5b | e5c5e9f55fc275e5c9b91f48f7b8d293df803083 | /Solutions/day_22.R | cb704c58f1283b1b764da149933185ee7cf7dfca | [] | no_license | zapateros/aoc2018 | 0eeddb7ae501c80613914aca6b3d4a0b0e85a81c | ba519df333bf76389f803209d8615e757a45e958 | refs/heads/master | 2020-04-09T15:09:50.199962 | 2019-01-13T12:56:23 | 2019-01-13T12:56:23 | 160,417,621 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,719 | r | day_22.R | # Today's problem was the first, and only, one where I had no clue how to do it (22.2). I looked into some solutions online
# and it was actually a small change in my own solution for 22.1. Small, but very helpfull, because it opened my eyes for
# solving many other (future) problems. Bytheway, the solution was to add an extra parameter to the grid for calculating the
# shortest path. So not just x1,y1 to x2,y2 but x1,y1,z1 to x2,y2,z2 with z the climbing gear parameter, where the weighting is 7
# when changing climbing gear (for example z1 to z2)
# The preparation (make edgelist) for calculating the shortest path in 22.2 is hardcoded (copy-paste). There is a much shorter solution possible
# 22.1
depth <- 4848
target <- c(16, 701)
mt <- matrix(rep(0, 11216), ncol = 16)
for(i in 1:ncol(mt)){
for(j in 1:nrow(mt)){
if((i == 1 & j == 1) | (i == 16 & j == 701)){
mt[j, i] <- depth %% 20183
}else if(i == 1){
mt[j, i] <- (((j - 1) * 48271) + depth) %% 20183
}else if(j == 1){
mt[j, i] <- (((i - 1) * 16807) + depth) %% 20183
}else{
mt[j, i] <- ((mt[j, i - 1] * mt[j - 1, i]) + depth) %% 20183
}
}
}
u <- mt %% 3
sum(u)
# 22.2
depth <- 4848
target <- c(16, 701)
mt <- matrix(rep(0, 48000), ncol = 60)
for(i in 1:60){
for(j in 1:800){
if((i == 1 & j == 1) | (i == 16 & j == 701)){
mt[j, i] <- depth %% 20183
}else if(i == 1){
mt[j, i] <- (((j - 1) * 48271) + depth) %% 20183
}else if(j == 1){
mt[j, i] <- (((i - 1) * 16807) + depth) %% 20183
}else{
mt[j, i] <- ((mt[j, i - 1] * mt[j - 1, i]) + depth) %% 20183
}
}
}
u <- mt %% 3
wd <- ncol(u)
ht <- nrow(u)
all <- NULL
for(i in 1:799){
for(j in 1:59){
pt <- u[i, j]
ptn <- u[i, j + 1]
ptu <- u[i + 1, j]
#point next to
if(pt == 0){
if(ptn == 0){
adn <- c(i, j, 1, i, j + 1, 1, 1)
adn1 <- c(i, j, 2, i, j + 1, 2, 1)
all <- rbind(all, adn, adn1)
}
if(ptn == 1){
adn <- c(i, j,1, i, j + 1, 1, 1)
adn1 <- c(i, j,1, i, j + 1, 3, 8)
adn2 <- c(i, j,2, i, j + 1, 1, 8)
adn3 <- c(i, j,2, i, j + 1, 3, 8)
all <- rbind(all, adn, adn1, adn2, adn3)
}
if(ptn == 2){
adn <- c(i, j, 1, i, j + 1, 2, 8)
adn1 <- c(i, j, 1, i, j + 1, 3, 8)
adn2 <- c(i, j, 2, i, j + 1, 2, 1)
adn3 <- c(i, j, 2, i, j + 1, 3, 8)
all <- rbind(all, adn, adn1, adn2, adn3)
}
}
if(pt == 1){
if(ptn == 0){
adn <- c(i, j, 1, i, j + 1, 1, 1)
adn1 <- c(i, j, 1, i, j + 1, 2, 8)
adn2 <- c(i, j, 3, i, j + 1, 1, 8)
adn3 <- c(i, j, 3, i, j + 1, 2, 8)
all <- rbind(all, adn, adn1, adn2, adn3)
}
if(ptn == 1){
adn <- c(i, j, 1, i, j + 1, 1, 1)
adn1 <- c(i, j, 3, i, j + 1, 3, 1)
all <- rbind(all, adn, adn1)
}
if(ptn == 2){
adn <- c(i, j, 1, i, j + 1, 2, 8)
adn1 <- c(i, j, 1, i, j + 1, 3, 8)
adn2 <- c(i, j, 3, i, j + 1, 2, 8)
adn3 <- c(i, j, 3, i, j + 1, 3, 1)
all<-rbind(all, adn, adn1, adn2, adn3)
}
}
if(pt == 2){
if(ptn == 0){
adn <- c(i, j, 2, i, j + 1, 1, 8)
adn1 <- c(i, j, 2, i, j + 1, 2, 1)
adn2 <- c(i, j, 3, i, j + 1, 1, 8)
adn3 <- c(i, j, 3, i, j + 1, 2, 8)
all<-rbind(all, adn, adn1, adn2, adn3)
}
if(ptn == 1){
adn <- c(i, j, 2, i, j + 1, 1, 8)
adn1 <- c(i, j, 2, i, j + 1, 3, 8)
adn2 <- c(i, j, 3, i, j + 1, 1, 8)
adn3 <- c(i, j, 3, i, j + 1, 3, 1)
all <- rbind(all, adn, adn1, adn2, adn3)
}
if(ptn == 2){
adn <- c(i, j, 2, i, j + 1, 2, 1)
adn1 <- c(i, j, 3, i, j + 1, 3, 1)
all <- rbind(all, adn, adn1)
}
}
#point under
if(pt == 0){
if(ptu == 0){
adn <- c(i, j, 1, i + 1, j, 1, 1)
adn1 <- c(i, j, 2, i + 1, j, 2, 1)
all <- rbind(all, adn, adn1)
}
if(ptu == 1){
adn <- c(i, j, 1, i + 1, j, 1, 1)
adn1 <- c(i, j, 1, i + 1, j, 3, 8)
adn2 <- c(i, j, 2, i + 1, j, 1, 8)
adn3 <- c(i, j, 2, i + 1, j, 3, 8)
all <- rbind(all, adn, adn1, adn2, adn3)
}
if(ptu == 2){
adn <- c(i, j, 1, i + 1, j, 2, 8)
adn1 <- c(i, j, 1, i + 1, j, 3, 8)
adn2 <- c(i, j, 2, i + 1, j, 2, 1)
adn3 <- c(i, j, 2, i + 1, j, 3, 8)
all <- rbind(all, adn, adn1, adn2, adn3)
}
}
if(pt == 1){
if(ptu == 0){
adn <- c(i, j, 1, i + 1, j, 1, 1)
adn1 <- c(i, j, 1, i + 1, j, 2, 8)
adn2 <- c(i, j, 3, i + 1, j, 1, 8)
adn3 <- c(i, j, 3, i + 1, j, 2, 8)
all <- rbind(all, adn, adn1, adn2, adn3)
}
if(ptu == 1){
adn <- c(i, j, 1, i + 1, j, 1, 1)
adn1 <- c(i, j, 3, i + 1, j, 3, 1)
all <- rbind(all, adn, adn1)
}
if(ptu == 2){
adn <- c(i, j, 1, i + 1, j, 2, 8)
adn1 <- c(i, j, 1, i + 1, j, 3, 8)
adn2 <- c(i, j, 3, i + 1, j, 2, 8)
adn3 <- c(i, j, 3, i + 1, j, 3, 1)
all <- rbind(all, adn, adn1, adn2, adn3)
}
}
if(pt == 2){
if(ptu == 0){
adn <- c(i, j, 2, i + 1, j, 1, 8)
adn1 <- c(i, j, 2, i + 1, j, 2, 1)
adn2 <- c(i, j, 3, i + 1, j, 1, 8)
adn3 <- c(i, j, 3, i + 1, j, 2, 8)
all <- rbind(all, adn, adn1, adn2, adn3)
}
if(ptu == 1){
adn <- c(i ,j ,2 ,i + 1, j, 1, 8)
adn1 <- c(i ,j ,2 ,i + 1, j, 3, 8)
adn2 <- c(i ,j ,3 ,i + 1, j, 1, 8)
adn3 <- c(i ,j ,3 ,i + 1, j, 3, 1)
all <- rbind(all, adn, adn1, adn2, adn3)
}
if(ptu == 2){
adn <- c(i, j, 2, i + 1, j, 2, 1)
adn1 <- c(i, j, 3, i + 1, j, 3, 1)
all <- rbind(all, adn, adn1)
}
}
}
cat(i,"\n")
}
library(igraph)
vc <- NULL
vc1 <- NULL
for(i in 1:nrow(all)){
v <- paste(all[i, 1], all[i, 2], all[i, 3], sep = ",")
v1 <- paste(all[i, 4], all[i, 5], all[i, 6], sep = ",")
vc <- c(vc, v)
vc1 <- c(vc1, v1)
if(round(i / 10000) == i / 10000){
cat(i, "\n")
}
}
t1 <- graph_from_edgelist(cbind(vc, vc1), directed = FALSE)
E(t1)$weight <- all[, 7]
el <- get.shortest.paths(t1, from = "1,1,2", to = "701,16,2")
tt <- names(unlist(el$vpath))
jj <- sapply(strsplit(tt, ","), function(x){x[3]})
jj1 <- sapply(strsplit(tt, ","), function(x){x[1]})
jj2 <- sapply(strsplit(tt, ","), function(x){x[2]})
h <- 0
re <- "2"
for(i in 1:length(jj)){
if(jj[i] != re){
h <- h + 1
re <- jj[i]
}
}
result <- (length(jj) - 1) + h * 7
result
|
aa2e4d19a09ec6a732d440d397d4f18c78db9b2e | f9d607dd79d2e3a1da2b32a319726cbf4ef36f80 | /R/analyse_log.R | cbe416eca64a390dc474964188c40e9c4ee209ac | [
"MIT"
] | permissive | stibu81/WordBox | 4652f7e0177a4ed421e5eccc0bd3d97727135509 | f9336ca69d6128caaaf7f9fc980f86b63f9ca0f2 | refs/heads/master | 2022-12-26T05:01:37.754675 | 2022-12-10T12:07:10 | 2022-12-10T12:07:10 | 205,691,037 | 6 | 0 | NOASSERTION | 2020-11-01T17:04:31 | 2019-09-01T14:51:42 | R | UTF-8 | R | false | false | 3,436 | r | analyse_log.R | #' Analyse the Log and Plot Quiz History
#'
#' Read in and analyse the log file that is written by the WordBox shiny
#' app. Create a plot of quiz history with various options.
#'
#' @param file path to the log file
#'
#' @return
#' a tibble with one row per quiz for `analyse_log()`. A `ggplot2` or `plotly`
#' plot for `plot_quiz_per_date()`.
#'
#' @export
analyse_log <- function(file) {
# guess the encoding, if not successful, use UTF-8
guess_enc <- readr::guess_encoding(file)
enc <- if (nrow(guess_enc) == 0) {
stop("The encoding of the file ", file, " could not be determined.")
} else {
guess_enc$encoding[1]
}
# read the log and split it up in section corresponding to quizzes
log <- readr::read_lines(file, locale = readr::locale(encoding = enc))
if (log[1] != "##### WordBox Log File #####") {
stop(file, " is not a valid WordBox log file.")
}
i_start <- stringr::str_which(log, "starting a new quiz from file")
i_end <- c(i_start[-1] - 1, length(log))
quiz_logs <- Map(function(i, j) log[i:j], i_start, i_end)
# compile the data
lapply(quiz_logs, analyse_one_quiz) %>%
dplyr::bind_rows()
}
# helper to analyse a single quiz
analyse_one_quiz <- function(ql) {
start_line <- ql %>%
stringr::str_subset("starting a new quiz")
q_file <- start_line %>%
stringr::str_extract("(?<= from file).*") %>%
stringr::str_trim() %>%
basename()
start_time <- extract_dttm(start_line)
end_time <- utils::tail(ql, 1) %>% extract_dttm()
duration <- as.numeric(end_time - start_time, unit = "mins")
direction <- ql %>%
stringr::str_subset(" : direction: ") %>%
stringr::str_extract("\\w+ [<>] \\w+")
q_type <- ql %>%
stringr::str_subset(" : quiz type: ") %>%
stringr::str_extract("(?<=quiz type:).*") %>%
stringr::str_trim()
q_mode <- ql %>%
stringr::str_subset(" : mode: ") %>%
stringr::str_extract("(?<=mode:).*") %>%
stringr::str_trim()
n_words <- ql %>%
stringr::str_subset(" : # words: ") %>%
stringr::str_extract("(?<=# words:).*") %>%
stringr::str_trim() %>%
as.numeric()
tcwr <- ql %>%
stringr::str_subset(" : total / correct / wrong / remaining: ") %>%
utils::tail(n = 1) %>%
stringr::str_extract("(?<=remaining:).*") %>%
stringr::str_split("/") %>%
unlist() %>%
as.numeric()
words_per_group <- ql %>%
# the word with unit is listed twice: once when it is quizzed ("quizzing word")
# and once after it was answered ("correct/wrong answer for word").
# => keep only the lines for answered words to avoid counting twice.
stringr::str_subset("answer for word:") %>%
stringr::str_trim() %>%
# the word may contain brackets and the pattern has to make sure that only
# the last word in brackets on each line is kept.
stringr::str_extract("(?<=\\()[^())]*(?=\\)$)") %>%
stringr::str_trim() %>%
table() %>%
as.list()
dplyr::tibble(
file = q_file,
direction = direction,
type = q_type,
mode = q_mode,
start = start_time,
duration = duration,
n_words = n_words,
n_quizzed = tcwr[1],
n_correct = tcwr[2],
n_wrong = tcwr[3],
n_remaining = tcwr[4],
words_per_group = list(words_per_group)
)
}
# extract a timestamp from a string
extract_dttm <- function(x) {
x %>%
stringr::str_extract("\\d{4}-\\d{2}-\\d{2} +\\d{2}:\\d{2}:\\d{2}") %>%
as.POSIXct(tz = "CET")
}
|
13dab6b12097c9fd1ba517ae94df0c5c6fc5c50b | e6b93a2c7a53c6dbc7c8af547c87205442311383 | /man/error_max_value.Rd | c7a24c80e29c5472a17c158ccce150c47503dd6c | [] | no_license | sarahlotspeich/stride | 9433272f8c1b318eca591209b2b81c24b72c95d8 | 5e135f50ce96666c21f8e2bb2f9affbf4ed44325 | refs/heads/master | 2023-03-19T22:09:29.138238 | 2021-03-07T01:41:04 | 2021-03-07T01:41:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 639 | rd | error_max_value.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{error_max_value}
\alias{error_max_value}
\title{Error from exceeding maximum}
\usage{
error_max_value(x, max_x, variable_name)
}
\arguments{
\item{x}{numeric vector or array object.}
\item{max_x}{maximum allowed value in the vector or array.}
\item{variable_name}{name of object x.}
}
\value{
Error message if any component of \code{x} is larger than \code{max_x}.
}
\description{
Reports error if x > max_x
}
\examples{
## Reports an error (not run)
##love <- c(1,2)
##max_love <- 1.5
##error_max_value(love,max_love,get_variable_name(love))
}
|
44650e2c65437d5815dd20810ed40a6af122d1f1 | 626e67873449847d250ff4da4068f12e7ba4fc33 | /analysis/redaction/infection_v2_AB.R | 08f898d741c618441264335325679fad943a4c34 | [
"MIT"
] | permissive | opensafely/amr-uom-brit | 26787eb94341dc34e8500d8947f841cfa8079193 | 59ef7ca778d4971d98d7c1f39f6ac3269f8b3645 | refs/heads/main | 2023-09-02T20:55:20.701664 | 2023-07-14T15:08:51 | 2023-07-14T15:08:51 | 404,285,598 | 1 | 0 | MIT | 2022-11-01T10:13:20 | 2021-09-08T09:21:36 | HTML | UTF-8 | R | false | false | 37,379 | r | infection_v2_AB.R | ##############
library("data.table")
library("dplyr")
library('here')
library("tidyverse")
library("lubridate")
library("ggpubr")
dir.create(here::here("output", "redacted_v2"))
rm(list=ls())
setwd(here::here("output", "measures"))
#setwd("/Users/user/Documents/GitHub/amr-uom-brit/output/measures")
########### UTI
df=readRDS("abtype_uti.rds")
df=bind_rows(df)
# remove last month data
last.date=max(df$date)
df=df%>% filter(date!=last.date)
first_mon <- (format(min(df$date), "%m-%Y"))
last_mon <- (format(max(df$date), "%m-%Y"))
# variable types
df$prevalent=as.factor(df$prevalent)
df$date=as.Date(df$date)
df$abtype=as.character(df$abtype)
## filter case with ab
df=df%>%filter(!is.na(abtype))
##select prevalent cases
# calculate ab types
df.1=df%>%filter(prevalent==1)%>%group_by(date)%>%mutate(total=n())
df.1=df.1%>%group_by(date,abtype)%>%summarise(count=n(),total=mean(total))
#top 10 ab
DF.top10.1=df.1%>%
group_by(abtype)%>%
summarise(count=sum(count))%>%
arrange(desc(count))%>%
slice(1:10)
# sort ab type
# recode other types
df.1$type=ifelse(df.1$abtype %in% DF.top10.1$abtype | is.na(df.1$abtype), df.1$abtype, "Others")
# recode NA -> no recorded antibiotics
df.1$type=ifelse(is.na(df.1$type),"No_antibiotics", df.1$type)
df.1$type <- factor(df.1$type, levels=c(DF.top10.1$abtype,"Others","No_antibiotics"))# reorder
# consultation with AB
df.1=df.1%>%group_by(date,type)%>%summarise(count=sum(count),total=mean(total))
df.1$percentage=df.1$count/df.1$total
##select incident cases
# calculate ab types
df.0=df%>%filter(prevalent==0)%>%group_by(date)%>%mutate(total=n())
df.0=df.0%>%group_by(date,abtype)%>%summarise(count=n(),total=mean(total))
#top 10 ab
DF.top10.0=df.0%>%
group_by(abtype)%>%
summarise(count=sum(count))%>%
arrange(desc(count))%>%
slice(1:10)
# sort ab type
# recode other types
df.0$type=ifelse(df.0$abtype %in% DF.top10.0$abtype | is.na(df.0$abtype), df.0$abtype, "Others")
# recode NA -> no recorded antibiotics
df.0$type=ifelse(is.na(df.0$type),"No_antibiotics", df.0$type)
df.0$type <- factor(df.0$type, levels=c(DF.top10.0$abtype,"Others","No_antibiotics"))# reorder
# consultation with AB
df.0=df.0%>%group_by(date,type)%>%summarise(count=sum(count),total=mean(total))
df.0$percentage=df.0$count/df.0$total
## csv check for plot
rm(DF.top10.0,DF.top10.1,df)
df.1$prevalent=as.factor(1)
df.0$prevalent=as.factor(0)
df=rbind(df.0,df.1)
write_csv(df, here::here("output","redacted_v2", "AB_uti_check.csv"))
### line graph
# prevalent
lineplot.1<- ggplot(df.1, aes(x=date, y=percentage, group=type,color=type))+
annotate(geom = "rect", xmin = as.Date("2021-01-01"),xmax = as.Date("2021-04-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-11-01"),xmax = as.Date("2020-12-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-03-01"),xmax = as.Date("2020-06-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
geom_line(aes(linetype=type))+
geom_point(aes(shape=type))+
theme(legend.position = "right",legend.title =element_blank())+
scale_shape_manual(values = c(rep(1:11))) +
scale_color_manual(values = c("coral2","deeppink3","darkred","darkviolet","brown3","goldenrod2","blue3","green3","forestgreen","dodgerblue","black"))+
labs(
y = "" ,
x="")+
theme(axis.text.x=element_text(angle=60,hjust=1))+
scale_x_date(date_labels = "%m-%Y", date_breaks = "1 month")+
scale_y_continuous(labels = scales::percent)
# incident
lineplot.0<- ggplot(df.0, aes(x=date, y=percentage, group=type,color=type))+
annotate(geom = "rect", xmin = as.Date("2021-01-01"),xmax = as.Date("2021-04-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-11-01"),xmax = as.Date("2020-12-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-03-01"),xmax = as.Date("2020-06-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
geom_line(aes(linetype=type))+
geom_point(aes(shape=type))+
theme(legend.position = "right",legend.title =element_blank())+
scale_shape_manual(values = c(rep(1:11))) +
scale_color_manual(values = c("coral2","deeppink3","darkred","darkviolet","brown3","goldenrod2","blue3","green3","forestgreen","dodgerblue","black"))+
labs(
y = "" ,
x="")+
theme(axis.text.x=element_text(angle=60,hjust=1))+
scale_x_date(date_labels = "%m-%Y", date_breaks = "1 month")+
scale_y_continuous(labels = scales::percent)
lineplot=ggarrange(lineplot.0, lineplot.1,
labels = c("A", "B"),
nrow = 2)
lineplot=annotate_figure(lineplot,
top = text_grob(" ", face = "bold", size = 14),
bottom = text_grob("A= incident cases; B= prevalent cases.
Grey shading represents national lockdown time.",
hjust = 1, x = 1, size = 10),
fig.lab =paste0("Top10 antibiotic prescriptions issued - UTI ",
first_mon," - ",last_mon),
left = text_grob("", rot = 90),
)
ggsave(
plot= lineplot,
filename="AB_uti.jpeg", path=here::here("output","redacted_v2"))
### tables
# define covid date
breaks <- c(as.Date("2019-01-01"),as.Date("2019-12-31"),# 1=pre-covid, 2=exclusion
as.Date("2020-04-01"), as.Date("2021-12-31"),# 3= covid time
max(df$date)) # NA exclusion
df=df%>%mutate(covid=cut(date,breaks,labels = 1:4))
df=df%>% filter(covid==1 | covid==3)
df$covid= recode(df$covid, '1'="0", '3'="1") # precovid=0, covid=1
df$covid <- factor(df$covid, levels=c("0","1"))
# define seasons
df$month=format(df$date,"%m")
df=df%>% mutate(season= case_when( month=="03"|month=="04"|month=="05" ~ "spring",
month=="06"|month=="07"|month=="08" ~ "summer",
month=="09"|month=="10"|month=="11" ~ "autumn",
month=="12"|month=="01"|month=="02" ~ "winter"))
df.table.1=df%>%
group_by(covid,season,prevalent,type)%>%
summarise(count=sum(count))%>%
mutate(indic="uti")
df.table.1=df.table.1%>%
group_by(covid,season,prevalent)%>%
mutate(total=sum(count), percentage=count/total)
rm(df,df.0,df.1,lineplot,lineplot.0,lineplot.1)
########### LRTI
df=readRDS("abtype_lrti.rds")
df=bind_rows(df)
# remove last month data
last.date=max(df$date)
df=df%>% filter(date!=last.date)
first_mon <- (format(min(df$date), "%m-%Y"))
last_mon <- (format(max(df$date), "%m-%Y"))
# variable types
df$prevalent=as.factor(df$prevalent)
df$date=as.Date(df$date)
df$abtype=as.character(df$abtype)
## filter case with ab
df=df%>%filter(!is.na(abtype))
##select prevalent cases
# calculate ab types
df.1=df%>%filter(prevalent==1)%>%group_by(date)%>%mutate(total=n())
df.1=df.1%>%group_by(date,abtype)%>%summarise(count=n(),total=mean(total))
#top 10 ab
DF.top10.1=df.1%>%
group_by(abtype)%>%
summarise(count=sum(count))%>%
arrange(desc(count))%>%
slice(1:10)
# sort ab type
# recode other types
df.1$type=ifelse(df.1$abtype %in% DF.top10.1$abtype | is.na(df.1$abtype), df.1$abtype, "Others")
# recode NA -> no recorded antibiotics
df.1$type=ifelse(is.na(df.1$type),"No_antibiotics", df.1$type)
df.1$type <- factor(df.1$type, levels=c(DF.top10.1$abtype,"Others","No_antibiotics"))# reorder
# consultation with AB
df.1=df.1%>%group_by(date,type)%>%summarise(count=sum(count),total=mean(total))
df.1$percentage=df.1$count/df.1$total
##select incident cases
# calculate ab types
df.0=df%>%filter(prevalent==0)%>%group_by(date)%>%mutate(total=n())
df.0=df.0%>%group_by(date,abtype)%>%summarise(count=n(),total=mean(total))
#top 10 ab
DF.top10.0=df.0%>%
group_by(abtype)%>%
summarise(count=sum(count))%>%
arrange(desc(count))%>%
slice(1:10)
# sort ab type
# recode other types
df.0$type=ifelse(df.0$abtype %in% DF.top10.0$abtype | is.na(df.0$abtype), df.0$abtype, "Others")
# recode NA -> no recorded antibiotics
df.0$type=ifelse(is.na(df.0$type),"No_antibiotics", df.0$type)
df.0$type <- factor(df.0$type, levels=c(DF.top10.0$abtype,"Others","No_antibiotics"))# reorder
# consultation with AB
df.0=df.0%>%group_by(date,type)%>%summarise(count=sum(count),total=mean(total))
df.0$percentage=df.0$count/df.0$total
## csv check for plot
rm(DF.top10.0,DF.top10.1,df)
df.1$prevalent=as.factor(1)
df.0$prevalent=as.factor(0)
df=rbind(df.0,df.1)
write_csv(df, here::here("output","redacted_v2", "AB_lrti_check.csv"))
### line graph
# prevalent
lineplot.1<- ggplot(df.1, aes(x=date, y=percentage, group=type,color=type))+
annotate(geom = "rect", xmin = as.Date("2021-01-01"),xmax = as.Date("2021-04-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-11-01"),xmax = as.Date("2020-12-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-03-01"),xmax = as.Date("2020-06-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
geom_line(aes(linetype=type))+
geom_point(aes(shape=type))+
theme(legend.position = "right",legend.title =element_blank())+
scale_shape_manual(values = c(rep(1:11))) +
scale_color_manual(values = c("coral2","deeppink3","darkred","darkviolet","brown3","goldenrod2","blue3","green3","forestgreen","dodgerblue","black"))+
labs(
y = "" ,
x="")+
theme(axis.text.x=element_text(angle=60,hjust=1))+
scale_x_date(date_labels = "%m-%Y", date_breaks = "1 month")+
scale_y_continuous(labels = scales::percent)
# incident
lineplot.0<- ggplot(df.0, aes(x=date, y=percentage, group=type,color=type))+
annotate(geom = "rect", xmin = as.Date("2021-01-01"),xmax = as.Date("2021-04-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-11-01"),xmax = as.Date("2020-12-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-03-01"),xmax = as.Date("2020-06-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
geom_line(aes(linetype=type))+
geom_point(aes(shape=type))+
theme(legend.position = "right",legend.title =element_blank())+
scale_shape_manual(values = c(rep(1:11))) +
scale_color_manual(values = c("coral2","deeppink3","darkred","darkviolet","brown3","goldenrod2","blue3","green3","forestgreen","dodgerblue","black"))+
labs(
y = "" ,
x="")+
theme(axis.text.x=element_text(angle=60,hjust=1))+
scale_x_date(date_labels = "%m-%Y", date_breaks = "1 month")+
scale_y_continuous(labels = scales::percent)
lineplot=ggarrange(lineplot.0, lineplot.1,
labels = c("A", "B"),
nrow = 2)
lineplot=annotate_figure(lineplot,
top = text_grob(" ", face = "bold", size = 14),
bottom = text_grob("A= incident cases; B= prevalent cases.
Grey shading represents national lockdown time.",
hjust = 1, x = 1, size = 10),
fig.lab =paste0("Top10 antibiotic prescriptions issued - LRTI ",
first_mon," - ",last_mon),
left = text_grob("", rot = 90),
)
ggsave(
plot= lineplot,
filename="AB_lrti.jpeg", path=here::here("output","redacted_v2"))
### tables
# define covid date
breaks <- c(as.Date("2019-01-01"),as.Date("2019-12-31"),# 1=pre-covid, 2=exclusion
as.Date("2020-04-01"), as.Date("2021-12-31"),# 3= covid time
max(df$date)) # NA exclusion
df=df%>%mutate(covid=cut(date,breaks,labels = 1:4))
df=df%>% filter(covid==1 | covid==3)
df$covid= recode(df$covid, '1'="0", '3'="1") # precovid=0, covid=1
df$covid <- factor(df$covid, levels=c("0","1"))
# define seasons
df$month=format(df$date,"%m")
df=df%>% mutate(season= case_when( month=="03"|month=="04"|month=="05" ~ "spring",
month=="06"|month=="07"|month=="08" ~ "summer",
month=="09"|month=="10"|month=="11" ~ "autumn",
month=="12"|month=="01"|month=="02" ~ "winter"))
df.table.2=df%>%
group_by(covid,season,prevalent,type)%>%
summarise(count=sum(count))%>%
mutate(indic="lrti")
df.table.2=df.table.2%>%
group_by(covid,season,prevalent)%>%
mutate(total=sum(count), percentage=count/total)
rm(df,df.0,df.1,lineplot,lineplot.0,lineplot.1)
###########URTI
df=readRDS("abtype_urti.rds")
df=bind_rows(df)
# remove last month data
last.date=max(df$date)
df=df%>% filter(date!=last.date)
first_mon <- (format(min(df$date), "%m-%Y"))
last_mon <- (format(max(df$date), "%m-%Y"))
# variable types
df$prevalent=as.factor(df$prevalent)
df$date=as.Date(df$date)
df$abtype=as.character(df$abtype)
## filter case with ab
df=df%>%filter(!is.na(abtype))
##select prevalent cases
# calculate ab types
df.1=df%>%filter(prevalent==1)%>%group_by(date)%>%mutate(total=n())
df.1=df.1%>%group_by(date,abtype)%>%summarise(count=n(),total=mean(total))
#top 10 ab
DF.top10.1=df.1%>%
group_by(abtype)%>%
summarise(count=sum(count))%>%
arrange(desc(count))%>%
slice(1:10)
# sort ab type
# recode other types
df.1$type=ifelse(df.1$abtype %in% DF.top10.1$abtype | is.na(df.1$abtype), df.1$abtype, "Others")
# recode NA -> no recorded antibiotics
df.1$type=ifelse(is.na(df.1$type),"No_antibiotics", df.1$type)
df.1$type <- factor(df.1$type, levels=c(DF.top10.1$abtype,"Others","No_antibiotics"))# reorder
# consultation with AB
df.1=df.1%>%group_by(date,type)%>%summarise(count=sum(count),total=mean(total))
df.1$percentage=df.1$count/df.1$total
##select incident cases
# calculate ab types
df.0=df%>%filter(prevalent==0)%>%group_by(date)%>%mutate(total=n())
df.0=df.0%>%group_by(date,abtype)%>%summarise(count=n(),total=mean(total))
#top 10 ab
DF.top10.0=df.0%>%
group_by(abtype)%>%
summarise(count=sum(count))%>%
arrange(desc(count))%>%
slice(1:10)
# sort ab type
# recode other types
df.0$type=ifelse(df.0$abtype %in% DF.top10.0$abtype | is.na(df.0$abtype), df.0$abtype, "Others")
# recode NA -> no recorded antibiotics
df.0$type=ifelse(is.na(df.0$type),"No_antibiotics", df.0$type)
df.0$type <- factor(df.0$type, levels=c(DF.top10.0$abtype,"Others","No_antibiotics"))# reorder
# consultation with AB
df.0=df.0%>%group_by(date,type)%>%summarise(count=sum(count),total=mean(total))
df.0$percentage=df.0$count/df.0$total
## csv check for plot
rm(DF.top10.0,DF.top10.1,df)
df.1$prevalent=as.factor(1)
df.0$prevalent=as.factor(0)
df=rbind(df.0,df.1)
write_csv(df, here::here("output","redacted_v2", "AB_urti_check.csv"))
### line graph
# prevalent
lineplot.1<- ggplot(df.1, aes(x=date, y=percentage, group=type,color=type))+
annotate(geom = "rect", xmin = as.Date("2021-01-01"),xmax = as.Date("2021-04-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-11-01"),xmax = as.Date("2020-12-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-03-01"),xmax = as.Date("2020-06-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
geom_line(aes(linetype=type))+
geom_point(aes(shape=type))+
theme(legend.position = "right",legend.title =element_blank())+
scale_shape_manual(values = c(rep(1:11))) +
scale_color_manual(values = c("coral2","deeppink3","darkred","darkviolet","brown3","goldenrod2","blue3","green3","forestgreen","dodgerblue","black"))+
labs(
y = "" ,
x="")+
theme(axis.text.x=element_text(angle=60,hjust=1))+
scale_x_date(date_labels = "%m-%Y", date_breaks = "1 month")+
scale_y_continuous(labels = scales::percent)
# incident
lineplot.0<- ggplot(df.0, aes(x=date, y=percentage, group=type,color=type))+
annotate(geom = "rect", xmin = as.Date("2021-01-01"),xmax = as.Date("2021-04-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-11-01"),xmax = as.Date("2020-12-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-03-01"),xmax = as.Date("2020-06-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
geom_line(aes(linetype=type))+
geom_point(aes(shape=type))+
theme(legend.position = "right",legend.title =element_blank())+
scale_shape_manual(values = c(rep(1:11))) +
scale_color_manual(values = c("coral2","deeppink3","darkred","darkviolet","brown3","goldenrod2","blue3","green3","forestgreen","dodgerblue","black"))+
labs(
y = "" ,
x="")+
theme(axis.text.x=element_text(angle=60,hjust=1))+
scale_x_date(date_labels = "%m-%Y", date_breaks = "1 month")+
scale_y_continuous(labels = scales::percent)
lineplot=ggarrange(lineplot.0, lineplot.1,
labels = c("A", "B"),
nrow = 2)
lineplot=annotate_figure(lineplot,
top = text_grob(" ", face = "bold", size = 14),
bottom = text_grob("A= incident cases; B= prevalent cases.
Grey shading represents national lockdown time.",
hjust = 1, x = 1, size = 10),
fig.lab =paste0("Top10 antibiotic prescriptions issued - URTI ",
first_mon," - ",last_mon),
left = text_grob("", rot = 90),
)
ggsave(
plot= lineplot,
filename="AB_urti.jpeg", path=here::here("output","redacted_v2"))
### tables
# define covid date
breaks <- c(as.Date("2019-01-01"),as.Date("2019-12-31"),# 1=pre-covid, 2=exclusion
as.Date("2020-04-01"), as.Date("2021-12-31"),# 3= covid time
max(df$date)) # NA exclusion
df=df%>%mutate(covid=cut(date,breaks,labels = 1:4))
df=df%>% filter(covid==1 | covid==3)
df$covid= recode(df$covid, '1'="0", '3'="1") # precovid=0, covid=1
df$covid <- factor(df$covid, levels=c("0","1"))
# define seasons
df$month=format(df$date,"%m")
df=df%>% mutate(season= case_when( month=="03"|month=="04"|month=="05" ~ "spring",
month=="06"|month=="07"|month=="08" ~ "summer",
month=="09"|month=="10"|month=="11" ~ "autumn",
month=="12"|month=="01"|month=="02" ~ "winter"))
df.table.3=df%>%
group_by(covid,season,prevalent,type)%>%
summarise(count=sum(count))%>%
mutate(indic="urti")
df.table.3=df.table.3%>%
group_by(covid,season,prevalent)%>%
mutate(total=sum(count), percentage=count/total)
rm(df,df.0,df.1,lineplot,lineplot.0,lineplot.1)
######### Sinusitis
df=readRDS("abtype_sinusitis.rds")
df=bind_rows(df)
# remove last month data
last.date=max(df$date)
df=df%>% filter(date!=last.date)
first_mon <- (format(min(df$date), "%m-%Y"))
last_mon <- (format(max(df$date), "%m-%Y"))
# variable types
df$prevalent=as.factor(df$prevalent)
df$date=as.Date(df$date)
df$abtype=as.character(df$abtype)
## filter case with ab
df=df%>%filter(!is.na(abtype))
##select prevalent cases
# calculate ab types
df.1=df%>%filter(prevalent==1)%>%group_by(date)%>%mutate(total=n())
df.1=df.1%>%group_by(date,abtype)%>%summarise(count=n(),total=mean(total))
#top 10 ab
DF.top10.1=df.1%>%
group_by(abtype)%>%
summarise(count=sum(count))%>%
arrange(desc(count))%>%
slice(1:10)
# sort ab type
# recode other types
df.1$type=ifelse(df.1$abtype %in% DF.top10.1$abtype | is.na(df.1$abtype), df.1$abtype, "Others")
# recode NA -> no recorded antibiotics
df.1$type=ifelse(is.na(df.1$type),"No_antibiotics", df.1$type)
df.1$type <- factor(df.1$type, levels=c(DF.top10.1$abtype,"Others","No_antibiotics"))# reorder
# consultation with AB
df.1=df.1%>%group_by(date,type)%>%summarise(count=sum(count),total=mean(total))
df.1$percentage=df.1$count/df.1$total
##select incident cases
# calculate ab types
df.0=df%>%filter(prevalent==0)%>%group_by(date)%>%mutate(total=n())
df.0=df.0%>%group_by(date,abtype)%>%summarise(count=n(),total=mean(total))
#top 10 ab
DF.top10.0=df.0%>%
group_by(abtype)%>%
summarise(count=sum(count))%>%
arrange(desc(count))%>%
slice(1:10)
# sort ab type
# recode other types
df.0$type=ifelse(df.0$abtype %in% DF.top10.0$abtype | is.na(df.0$abtype), df.0$abtype, "Others")
# recode NA -> no recorded antibiotics
df.0$type=ifelse(is.na(df.0$type),"No_antibiotics", df.0$type)
df.0$type <- factor(df.0$type, levels=c(DF.top10.0$abtype,"Others","No_antibiotics"))# reorder
# consultation with AB
df.0=df.0%>%group_by(date,type)%>%summarise(count=sum(count),total=mean(total))
df.0$percentage=df.0$count/df.0$total
## csv check for plot
rm(DF.top10.0,DF.top10.1,df)
df.1$prevalent=as.factor(1)
df.0$prevalent=as.factor(0)
df=rbind(df.0,df.1)
write_csv(df, here::here("output","redacted_v2", "AB_sinusitis_check.csv"))
### line graph
# prevalent
lineplot.1<- ggplot(df.1, aes(x=date, y=percentage, group=type,color=type))+
annotate(geom = "rect", xmin = as.Date("2021-01-01"),xmax = as.Date("2021-04-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-11-01"),xmax = as.Date("2020-12-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-03-01"),xmax = as.Date("2020-06-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
geom_line(aes(linetype=type))+
geom_point(aes(shape=type))+
theme(legend.position = "right",legend.title =element_blank())+
scale_shape_manual(values = c(rep(1:11))) +
scale_color_manual(values = c("coral2","deeppink3","darkred","darkviolet","brown3","goldenrod2","blue3","green3","forestgreen","dodgerblue","black"))+
labs(
y = "" ,
x="")+
theme(axis.text.x=element_text(angle=60,hjust=1))+
scale_x_date(date_labels = "%m-%Y", date_breaks = "1 month")+
scale_y_continuous(labels = scales::percent)
# incident
lineplot.0<- ggplot(df.0, aes(x=date, y=percentage, group=type,color=type))+
annotate(geom = "rect", xmin = as.Date("2021-01-01"),xmax = as.Date("2021-04-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-11-01"),xmax = as.Date("2020-12-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-03-01"),xmax = as.Date("2020-06-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
geom_line(aes(linetype=type))+
geom_point(aes(shape=type))+
theme(legend.position = "right",legend.title =element_blank())+
scale_shape_manual(values = c(rep(1:11))) +
scale_color_manual(values = c("coral2","deeppink3","darkred","darkviolet","brown3","goldenrod2","blue3","green3","forestgreen","dodgerblue","black"))+
labs(
y = "" ,
x="")+
theme(axis.text.x=element_text(angle=60,hjust=1))+
scale_x_date(date_labels = "%m-%Y", date_breaks = "1 month")+
scale_y_continuous(labels = scales::percent)
lineplot=ggarrange(lineplot.0, lineplot.1,
labels = c("A", "B"),
nrow = 2)
lineplot=annotate_figure(lineplot,
top = text_grob(" ", face = "bold", size = 14),
bottom = text_grob("A= incident cases; B= prevalent cases.
Grey shading represents national lockdown time.",
hjust = 1, x = 1, size = 10),
fig.lab =paste0("Top10 antibiotic prescriptions issued - sinusitis ",
first_mon," - ",last_mon),
left = text_grob("", rot = 90),
)
ggsave(
plot= lineplot,
filename="AB_sinusitis.jpeg", path=here::here("output","redacted_v2"))
### tables
# define covid date
breaks <- c(as.Date("2019-01-01"),as.Date("2019-12-31"),# 1=pre-covid, 2=exclusion
as.Date("2020-04-01"), as.Date("2021-12-31"),# 3= covid time
max(df$date)) # NA exclusion
df=df%>%mutate(covid=cut(date,breaks,labels = 1:4))
df=df%>% filter(covid==1 | covid==3)
df$covid= recode(df$covid, '1'="0", '3'="1") # precovid=0, covid=1
df$covid <- factor(df$covid, levels=c("0","1"))
# define seasons
df$month=format(df$date,"%m")
df=df%>% mutate(season= case_when( month=="03"|month=="04"|month=="05" ~ "spring",
month=="06"|month=="07"|month=="08" ~ "summer",
month=="09"|month=="10"|month=="11" ~ "autumn",
month=="12"|month=="01"|month=="02" ~ "winter"))
df.table.4=df%>%
group_by(covid,season,prevalent,type)%>%
summarise(count=sum(count))%>%
mutate(indic="sinusitis")
df.table.4=df.table.4%>%
group_by(covid,season,prevalent)%>%
mutate(total=sum(count), percentage=count/total)
rm(df,df.0,df.1,lineplot,lineplot.0,lineplot.1)
####### otitis externa
df=readRDS("abtype_ot_externa.rds")
df=bind_rows(df)
# remove last month data
last.date=max(df$date)
df=df%>% filter(date!=last.date)
first_mon <- (format(min(df$date), "%m-%Y"))
last_mon <- (format(max(df$date), "%m-%Y"))
# variable types
df$prevalent=as.factor(df$prevalent)
df$date=as.Date(df$date)
df$abtype=as.character(df$abtype)
## filter case with ab
df=df%>%filter(!is.na(abtype))
##select prevalent cases
# calculate ab types
df.1=df%>%filter(prevalent==1)%>%group_by(date)%>%mutate(total=n())
df.1=df.1%>%group_by(date,abtype)%>%summarise(count=n(),total=mean(total))
#top 10 ab
DF.top10.1=df.1%>%
group_by(abtype)%>%
summarise(count=sum(count))%>%
arrange(desc(count))%>%
slice(1:10)
# sort ab type
# recode other types
df.1$type=ifelse(df.1$abtype %in% DF.top10.1$abtype | is.na(df.1$abtype), df.1$abtype, "Others")
# recode NA -> no recorded antibiotics
df.1$type=ifelse(is.na(df.1$type),"No_antibiotics", df.1$type)
df.1$type <- factor(df.1$type, levels=c(DF.top10.1$abtype,"Others","No_antibiotics"))# reorder
# consultation with AB
df.1=df.1%>%group_by(date,type)%>%summarise(count=sum(count),total=mean(total))
df.1$percentage=df.1$count/df.1$total
##select incident cases
# calculate ab types
df.0=df%>%filter(prevalent==0)%>%group_by(date)%>%mutate(total=n())
df.0=df.0%>%group_by(date,abtype)%>%summarise(count=n(),total=mean(total))
#top 10 ab
DF.top10.0=df.0%>%
group_by(abtype)%>%
summarise(count=sum(count))%>%
arrange(desc(count))%>%
slice(1:10)
# sort ab type
# recode other types
df.0$type=ifelse(df.0$abtype %in% DF.top10.0$abtype | is.na(df.0$abtype), df.0$abtype, "Others")
# recode NA -> no recorded antibiotics
df.0$type=ifelse(is.na(df.0$type),"No_antibiotics", df.0$type)
df.0$type <- factor(df.0$type, levels=c(DF.top10.0$abtype,"Others","No_antibiotics"))# reorder
# consultation with AB
df.0=df.0%>%group_by(date,type)%>%summarise(count=sum(count),total=mean(total))
df.0$percentage=df.0$count/df.0$total
## csv check for plot
rm(DF.top10.0,DF.top10.1,df)
df.1$prevalent=as.factor(1)
df.0$prevalent=as.factor(0)
df=rbind(df.0,df.1)
write_csv(df, here::here("output","redacted_v2", "AB_ot_externa_check.csv"))
### line graph
# prevalent
lineplot.1<- ggplot(df.1, aes(x=date, y=percentage, group=type,color=type))+
annotate(geom = "rect", xmin = as.Date("2021-01-01"),xmax = as.Date("2021-04-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-11-01"),xmax = as.Date("2020-12-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-03-01"),xmax = as.Date("2020-06-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
geom_line(aes(linetype=type))+
geom_point(aes(shape=type))+
theme(legend.position = "right",legend.title =element_blank())+
scale_shape_manual(values = c(rep(1:11))) +
scale_color_manual(values = c("coral2","deeppink3","darkred","darkviolet","brown3","goldenrod2","blue3","green3","forestgreen","dodgerblue","black"))+
labs(
y = "" ,
x="")+
theme(axis.text.x=element_text(angle=60,hjust=1))+
scale_x_date(date_labels = "%m-%Y", date_breaks = "1 month")+
scale_y_continuous(labels = scales::percent)
# incident
lineplot.0<- ggplot(df.0, aes(x=date, y=percentage, group=type,color=type))+
annotate(geom = "rect", xmin = as.Date("2021-01-01"),xmax = as.Date("2021-04-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-11-01"),xmax = as.Date("2020-12-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-03-01"),xmax = as.Date("2020-06-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
geom_line(aes(linetype=type))+
geom_point(aes(shape=type))+
theme(legend.position = "right",legend.title =element_blank())+
scale_shape_manual(values = c(rep(1:11))) +
scale_color_manual(values = c("coral2","deeppink3","darkred","darkviolet","brown3","goldenrod2","blue3","green3","forestgreen","dodgerblue","black"))+
labs(
y = "" ,
x="")+
theme(axis.text.x=element_text(angle=60,hjust=1))+
scale_x_date(date_labels = "%m-%Y", date_breaks = "1 month")+
scale_y_continuous(labels = scales::percent)
lineplot=ggarrange(lineplot.0, lineplot.1,
labels = c("A", "B"),
nrow = 2)
lineplot=annotate_figure(lineplot,
top = text_grob(" ", face = "bold", size = 14),
bottom = text_grob("A= incident cases; B= prevalent cases.
Grey shading represents national lockdown time.",
hjust = 1, x = 1, size = 10),
fig.lab =paste0("Top10 antibiotic prescriptions issued - otitis externa ",
first_mon," - ",last_mon),
left = text_grob("", rot = 90),
)
ggsave(
plot= lineplot,
filename="AB_ot_externa.jpeg", path=here::here("output","redacted_v2"))
### tables
# define covid date
breaks <- c(as.Date("2019-01-01"),as.Date("2019-12-31"),# 1=pre-covid, 2=exclusion
as.Date("2020-04-01"), as.Date("2021-12-31"),# 3= covid time
max(df$date)) # NA exclusion
df=df%>%mutate(covid=cut(date,breaks,labels = 1:4))
df=df%>% filter(covid==1 | covid==3)
df$covid= recode(df$covid, '1'="0", '3'="1") # precovid=0, covid=1
df$covid <- factor(df$covid, levels=c("0","1"))
# define seasons
df$month=format(df$date,"%m")
df=df%>% mutate(season= case_when( month=="03"|month=="04"|month=="05" ~ "spring",
month=="06"|month=="07"|month=="08" ~ "summer",
month=="09"|month=="10"|month=="11" ~ "autumn",
month=="12"|month=="01"|month=="02" ~ "winter"))
df.table.5=df%>%
group_by(covid,season,prevalent,type)%>%
summarise(count=sum(count))%>%
mutate(indic="ot_externa")
df.table.5=df.table.5%>%
group_by(covid,season,prevalent)%>%
mutate(total=sum(count), percentage=count/total)
rm(df,df.0,df.1,lineplot,lineplot.0,lineplot.1)
########## otitis media
df=readRDS("abtype_otmedia.rds")
df=bind_rows(df)
# remove last month data
last.date=max(df$date)
df=df%>% filter(date!=last.date)
first_mon <- (format(min(df$date), "%m-%Y"))
last_mon <- (format(max(df$date), "%m-%Y"))
# variable types
df$prevalent=as.factor(df$prevalent)
df$date=as.Date(df$date)
df$abtype=as.character(df$abtype)
## filter case with ab
df=df%>%filter(!is.na(abtype))
##select prevalent cases
# calculate ab types
df.1=df%>%filter(prevalent==1)%>%group_by(date)%>%mutate(total=n())
df.1=df.1%>%group_by(date,abtype)%>%summarise(count=n(),total=mean(total))
#top 10 ab
DF.top10.1=df.1%>%
group_by(abtype)%>%
summarise(count=sum(count))%>%
arrange(desc(count))%>%
slice(1:10)
# sort ab type
# recode other types
df.1$type=ifelse(df.1$abtype %in% DF.top10.1$abtype | is.na(df.1$abtype), df.1$abtype, "Others")
# recode NA -> no recorded antibiotics
df.1$type=ifelse(is.na(df.1$type),"No_antibiotics", df.1$type)
df.1$type <- factor(df.1$type, levels=c(DF.top10.1$abtype,"Others","No_antibiotics"))# reorder
# consultation with AB
df.1=df.1%>%group_by(date,type)%>%summarise(count=sum(count),total=mean(total))
df.1$percentage=df.1$count/df.1$total
##select incident cases
# calculate ab types
df.0=df%>%filter(prevalent==0)%>%group_by(date)%>%mutate(total=n())
df.0=df.0%>%group_by(date,abtype)%>%summarise(count=n(),total=mean(total))
#top 10 ab
DF.top10.0=df.0%>%
group_by(abtype)%>%
summarise(count=sum(count))%>%
arrange(desc(count))%>%
slice(1:10)
# sort ab type
# recode other types
df.0$type=ifelse(df.0$abtype %in% DF.top10.0$abtype | is.na(df.0$abtype), df.0$abtype, "Others")
# recode NA -> no recorded antibiotics
df.0$type=ifelse(is.na(df.0$type),"No_antibiotics", df.0$type)
df.0$type <- factor(df.0$type, levels=c(DF.top10.0$abtype,"Others","No_antibiotics"))# reorder
# consultation with AB
df.0=df.0%>%group_by(date,type)%>%summarise(count=sum(count),total=mean(total))
df.0$percentage=df.0$count/df.0$total
## csv check for plot
rm(DF.top10.0,DF.top10.1,df)
df.1$prevalent=as.factor(1)
df.0$prevalent=as.factor(0)
df=rbind(df.0,df.1)
write_csv(df, here::here("output","redacted_v2", "AB_otmedia_check.csv"))
### line graph
# prevalent
lineplot.1<- ggplot(df.1, aes(x=date, y=percentage, group=type,color=type))+
annotate(geom = "rect", xmin = as.Date("2021-01-01"),xmax = as.Date("2021-04-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-11-01"),xmax = as.Date("2020-12-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-03-01"),xmax = as.Date("2020-06-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
geom_line(aes(linetype=type))+
geom_point(aes(shape=type))+
theme(legend.position = "right",legend.title =element_blank())+
scale_shape_manual(values = c(rep(1:11))) +
scale_color_manual(values = c("coral2","deeppink3","darkred","darkviolet","brown3","goldenrod2","blue3","green3","forestgreen","dodgerblue","black"))+
labs(
y = "" ,
x="")+
theme(axis.text.x=element_text(angle=60,hjust=1))+
scale_x_date(date_labels = "%m-%Y", date_breaks = "1 month")+
scale_y_continuous(labels = scales::percent)
# incident
lineplot.0<- ggplot(df.0, aes(x=date, y=percentage, group=type,color=type))+
annotate(geom = "rect", xmin = as.Date("2021-01-01"),xmax = as.Date("2021-04-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-11-01"),xmax = as.Date("2020-12-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
annotate(geom = "rect", xmin = as.Date("2020-03-01"),xmax = as.Date("2020-06-01"),ymin = -Inf, ymax = Inf,fill="grey80", alpha=0.5)+
geom_line(aes(linetype=type))+
geom_point(aes(shape=type))+
theme(legend.position = "right",legend.title =element_blank())+
scale_shape_manual(values = c(rep(1:11))) +
scale_color_manual(values = c("coral2","deeppink3","darkred","darkviolet","brown3","goldenrod2","blue3","green3","forestgreen","dodgerblue","black"))+
labs(
y = "" ,
x="")+
theme(axis.text.x=element_text(angle=60,hjust=1))+
scale_x_date(date_labels = "%m-%Y", date_breaks = "1 month")+
scale_y_continuous(labels = scales::percent)
lineplot=ggarrange(lineplot.0, lineplot.1,
labels = c("A", "B"),
nrow = 2)
lineplot=annotate_figure(lineplot,
top = text_grob(" ", face = "bold", size = 14),
bottom = text_grob("A= incident cases; B= prevalent cases.
Grey shading represents national lockdown time.",
hjust = 1, x = 1, size = 10),
fig.lab =paste0("Top10 antibiotic prescriptions issued - otitis media ",
first_mon," - ",last_mon),
left = text_grob("", rot = 90),
)
ggsave(
plot= lineplot,
filename="AB_otmedia.jpeg", path=here::here("output","redacted_v2"))
### tables
# define covid date
breaks <- c(as.Date("2019-01-01"),as.Date("2019-12-31"),# 1=pre-covid, 2=exclusion
as.Date("2020-04-01"), as.Date("2021-12-31"),# 3= covid time
max(df$date)) # NA exclusion
df=df%>%mutate(covid=cut(date,breaks,labels = 1:4))
df=df%>% filter(covid==1 | covid==3)
df$covid= recode(df$covid, '1'="0", '3'="1") # precovid=0, covid=1
df$covid <- factor(df$covid, levels=c("0","1"))
# define seasons
df$month=format(df$date,"%m")
df=df%>% mutate(season= case_when( month=="03"|month=="04"|month=="05" ~ "spring",
month=="06"|month=="07"|month=="08" ~ "summer",
month=="09"|month=="10"|month=="11" ~ "autumn",
month=="12"|month=="01"|month=="02" ~ "winter"))
df.table.6=df%>%
group_by(covid,season,prevalent,type)%>%
summarise(count=sum(count))%>%
mutate(indic="otmedia")
df.table.6=df.table.6%>%
group_by(covid,season,prevalent)%>%
mutate(total=sum(count), percentage=count/total)
rm(df,df.0,df.1,lineplot,lineplot.0,lineplot.1)
#### combine table
df.table=rbind(df.table.1,df.table.2,df.table.3,df.table.4,df.table.5,df.table.6)
write_csv(df.table, here::here("output","redacted_v2", "AB.csv"))
|
e4099ba92d3a59315230add66b8cd39e846d82ee | ebd407af7f841a5abc89e6fb44391cf2c6dc40f5 | /R/preText.R | b7d3e284bdcc35441cd53dc5b38b1fcf600c1077 | [] | no_license | ttrann202/preText | 117ef8a73b69cd98cce09d9b487ccca451582261 | 4d40c44c680cfea921a55f5ded9940b3e6d1b6a6 | refs/heads/master | 2023-06-24T06:32:14.502689 | 2021-07-25T19:10:37 | 2021-07-25T19:10:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,854 | r | preText.R | #' @title preText Test
#' @description Calculates preText scores for each preprocessing specification.
#'
#' @param preprocessed_documents A list object generated by the
#' `factorial_preprocessing()` function.
#' @param dataset_name A string indicating the name to be associated with the
#' results. Defaults to "Documents".
#' @param distance_method The method that should be used for calculating
#' document distances. Defaults to "cosine".
#' @param num_comparisons If method = "distribution", the number of ranks to use
#' in calculating average difference. Defaults to 50.
#' @param parallel Logical indicating whether factorial preprocessing should be
#' performed in parallel. Defaults to FALSE.
#' @param cores Defaults to 1, can be set to any number less than or equal to
#' the number of cores on one's computer
#' @param verbose Logical indicating whether more information should be printed
#' to the screen to let the user know about progress. Defaults to TRUE.
#' @return A result list object.
#' @examples
#' \dontrun{
#' # load the package
#' library(preText)
#' # load in the data
#' data("UK_Manifestos")
#' # preprocess data
#' preprocessed_documents <- factorial_preprocessing(
#' UK_Manifestos,
#' use_ngrams = TRUE,
#' infrequent_term_threshold = 0.02,
#' verbose = TRUE)
#' # run preText
#' preText_results <- preText(
#' preprocessed_documents,
#' dataset_name = "Inaugural Speeches",
#' distance_method = "cosine",
#' num_comparisons = 100,
#' verbose = TRUE)
#' }
#' @export
preText <- function(preprocessed_documents,
dataset_name = "Documents",
distance_method = "cosine",
num_comparisons = 50,
parallel = FALSE,
cores = 1,
verbose = TRUE){
ptm <- proc.time()
# extract teh dfm object list from preprocessed_documents
dfm_object_list <- preprocessed_documents$dfm_list
cat("Generating document distances...\n")
# get document distances
scaling_results <- scaling_comparison(dfm_object_list,
dimensions = 2,
distance_method = distance_method,
verbose = verbose,
cores = cores)
# extract distance matrices
distance_matrices <- scaling_results$distance_matrices
cat("Generating preText Scores...\n")
preText_results <- preText_test(
distance_matrices,
choices = preprocessed_documents$choices,
labels = preprocessed_documents$labels,
baseline_index = length(preprocessed_documents$labels),
text_size = 1,
num_comparisons = num_comparisons,
parallel = parallel,
cores = cores,
verbose = verbose)
preText_scores <- preText_results$dfm_level_results_unordered
cat("Generating regression results..\n")
reg_results <- preprocessing_choice_regression(
Y = preText_scores$preText_score,
choices = preprocessed_documents$choices,
dataset = dataset_name,
base_case_index = length(preprocessed_documents$labels))
cat("Regression results (negative coefficients imply less risk):\n")
# create temporary results os we can round coefficients
reg_results2 <- reg_results
reg_results2[,1] <- round(reg_results2[,1],3)
reg_results2[,2] <- round(reg_results2[,2],3)
print(reg_results2[,c(3,1,2)])
t2 <- proc.time() - ptm
cat("Complete in:",t2[[3]],"seconds...\n")
#extract relevant info
return(list(preText_scores = preText_scores,
ranked_preText_scores = preText_results$dfm_level_results,
choices = preprocessed_documents$choices,
regression_results = reg_results))
}
|
2ceb2d3296764e378bb9c03f2b62dd0a4c14c1e4 | 29585dff702209dd446c0ab52ceea046c58e384e | /rAvis/tests/testthat_disabled/test-avisSpeciesSummary.R | 048be0e883dbf1a3ccf0744c9f9259506abe5bba | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 123 | r | test-avisSpeciesSummary.R | # test for avisSpeciesSummary in rAvis
context ("avisSpeciesSummary")
# TODO: add tests based on .avisApiBusEspecie
|
7fe6de8dba7ebd063fa1f061176742360292c7fc | 163bf40d37623797f5922a8516b9a1d4d423974c | /R/CZUncertainty.R | 09b2cf9f3aa49a6d3746bdf3dc1045709ffc1fcc | [] | no_license | cran/kgc | 0d64870f3279a6be8a88510581db2c0594ce6707 | 9f3821df9921a0b957529c07a9521a3c55da4a30 | refs/heads/master | 2021-08-31T09:49:09.755738 | 2017-12-20T22:44:53 | 2017-12-20T22:44:53 | 111,388,881 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,472 | r | CZUncertainty.R | #' CZUncertainty
#'
#' This function will return the uncertainty associated with the predicted climate zone along with other potential climate zones.
#'
#' @name CZUncertainty
#' @param data The co-ordinates for the location you are trying to predict the climate zone of.
#' @return The uncertainty associated with the located climate zone along with other potential climate zones.
#' @export
#' @examples
#' data<- data.frame(Site = c("GC","UFS","NEG"),
#' Longitude = c(-15.42,10.98,34.78),
#' Latitude = c(27.82,47.42,30.86))
#' data <- data.frame(data,
#' rndCoord.lon = RoundCoordinates(data$Longitude),
#' rndCoord.lat = RoundCoordinates(data$Latitude))
#' data <- data.frame(data,ClimateZ=LookupCZ(data))
#' data <- data.frame(data, CZUncertainty(data))
CZUncertainty <- function(data){
uncertainty <- rep(NA,nrow(data))
possible.cz <- rep(NA,nrow(data))
for ( i in 1:nrow(data)){
check <- subset(climatezones,(!(Lat > data$rndCoord.lat[i] + 0.5)) & (Lat >= data$rndCoord.lat[i]-0.5))
check <- subset(check,(!(Lon > data$rndCoord.lon[i] + 0.5)) & (Lon >= data$rndCoord.lon[i] - 0.5))
check <- droplevels(check)
tot <- nrow(check)
check2 <- subset(check,!(as.character(Cls) == as.character(data$ClimateZ[i])))
check2 <- droplevels(check2)
n = nrow(check2)
uncertainty[i] <- n/tot
possible.cz[i] <- paste(levels(check2$Cls), sep = "", collapse = " ")
}
dat<-data.frame(uncertainty, possible.cz)
return(dat)
}
|
6ceca1f56916a6e7e8df5105a27b2650f4fa80b9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/RSE/examples/Pred.Fk.Naive.Rd.R | 9dc4e9bf7637d89de9a0f851ae21ad460aee9d53 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 673 | r | Pred.Fk.Naive.Rd.R | library(RSE)
### Name: Pred.Fk.Naive
### Title: Abundance-based data: unweighted naive estimator
### Aliases: Pred.Fk.Naive
### ** Examples
## As an example, Herpetological assemblage data are used here.
data(HerpetologicalData)
## two columns represent two samples of species abundance data
X.merge = HerpetologicalData
## the first column is treated as the original sample
X.col1 = X.merge[,1]
## the second column is treated as the additional sample
X.col2 = X.merge[,2]
Xi = X.col1
## Convert species abundance data to species frequency counts data
f = X.to.f(Xi)
## the number of individuals of the additional sample
m = sum(X.col2)
Pred.Fk.Naive(f=f, m=m)
|
cd097e040beea4a318d9dfed878d0dfaab26ff79 | 5e8334b072ad5261403cc5ba390ef132b1ac8498 | /scratch.R | f9ea1561cfe59b3f9e7836a6f116dcd15abef07d | [
"MIT"
] | permissive | stephbuon/concept-lab-viewer-march | 8b2aa3d55c0409d49c2760b8571918a5d0fe1a97 | a072f9bf3eb668506f57297e752bb535b89dfcde | refs/heads/master | 2022-01-04T15:02:50.044410 | 2019-08-19T16:51:04 | 2019-08-19T16:51:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,496 | r | scratch.R | library(dplyr)
library(feather)
library(igraph)
library(readr)
library(xtable)
## Despotism tests
r1 <- read_feather('../data/ecco_90_dist_100_cut_10_2.feather')
r2<- read_feather('../data/ecco_90_dist_10_cut_10_2.feather')
rels <- read_feather('../data/times_coocs_unigrams_cutoff_d1.feather')
rels <- group_by(rels, focal) %>% arrange(desc(score)) %>%
mutate(rank = order(score, decreasing = TRUE))%>% ungroup
tmp <- filter(rels, focal=='democracy' | bound=='democracy', rank < 10)
t9 <- read_feather('../data/ecco_90_dist_100_cut_10_2.feather')
t80_100 <- bind_rows(t0,t9) %>% group_by(focal, bound) %>% summarise(focal_e=sum(focal_e),
bound_e=sum(bound_e),
Freq = sum(Freq)
)
t80_100 <- g
tmp <- read_feather('../data/times_coocs_unigrams_cutoff_d1.feather') %>% filter(dpf > 1)
tmp$log_dpf <- log(tmp$dpf)
tmp$score <- tmp$log_dpf
t2 <- read_feather('../data/ecco_100_dist_100_cut_10_2.feather')
write_feather(tmp, '../data/times_coocs_unigrams_cutoff_d1.feather')
prox <- read_feather('../data/ecco_50_dist_10_cut_10_2.feather')
dist <- read_feather('../data/ecco_50_dist_100_cut_10_2.feather')
conc <-
read_tsv('../data/concreteness.txt') %>% filter(Bigram == 0, Conc.M < 4.5)
relations <- data.frame(from=prox$focal, to=prox$bound, weight=prox$score)
relations <- filter(relations, from %in% conc$Word, to %in% conc$Word)
gprox <- graph_from_data_frame(relations, directed = FALSE) %>% simplify
relations <- data.frame(from=dist$focal, to=dist$bound, weight=dist$score)
relations <- filter(relations, from %in% conc$Word, to %in% conc$Word)
gdist <- graph_from_data_frame(relations, directed = FALSE) %>% simplify
bc <- estimate_betweenness(gdist, cutoff=4)
dist_bcdf <- data.frame(bc, names(bc)) %>% filter(bc > 200)
bc <- estimate_betweenness(gprox, cutoff=4)
prox_bcdf <- data.frame(bc, names(bc)) %>% filter(bc > 200)
tmp <- inner_join(prox_bcdf,dist_bcdf, by=('names.bc.'))
tmp$ratio <- tmp$bc.x/tmp$bc.y
tmp <- rename(tmp, prox_centrality = bc.x, dist_centrality = bc.y) %>% arrange(ratio)
top <- arrange(tmp, ratio) %>% head(50)
bottom <- arrange(tmp, desc(ratio)) %>% head(50)
ob <- xtable(top, caption = '1800_abs')
print(ob, 'html', '1750_abs_top.html')
ob <- xtable(bottom, caption = '1800_abs')
print(ob, 'html', '1750_abs_bottom.html')
|
b4b9f1825b558aaceb44de2f06701da118b69f0d | 769898772e7225264fd942b2e5a666af3105d3a1 | /R/insert.R | 8babb8cbc023ee7dee48c08d730b90d33da37f1a | [] | no_license | cran/spatialEco | 3fa4393496453b091c547cc7601a984e54bf2be6 | 22944d790b25451c848d420b61d386471073b1ee | refs/heads/master | 2023-07-08T05:04:12.117110 | 2023-06-30T07:40:02 | 2023-06-30T07:40:02 | 30,218,937 | 5 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,289 | r | insert.R | #' @title Insert a row or column into a data.frame
#' @description Inserts a new row or column into a data.frame
#' at a specified location
#'
#' @param x Existing data.frame
#' @param MARGIN Insert a 1 = row or 2 = column
#' @param value A vector of values equal to the length of MARGIN,
#' if nothing specified values with be NA
#' @param idx Index position to insert row or column
#' @param name Name of new column (not used for rows,
#' MARGIN=1)
#'
#' @return A data.frame with the new row or column inserted
#'
#' @note
#' Where there are methods to easily add a row/column to
#' the end or beginning of a data.frame, it is not straight
#' forward to insert data at a specific location within the
#' data.frame. This function allows for inserting a vector
#' at a specific location eg., between columns or rows 1 and 2
#' where row/column 2 is moved to the 3rd position and a new
#' vector of values is inserted into the 2nd position.
#'
#' @author Jeffrey S. Evans <jeffrey_evans@@tnc.org>
#'
#' @examples
#' d <- data.frame(ID=1:10, y=runif(10))
#'
#' # insert row
#' insert(d, idx=2)
#' insert(d, value=c(20,0), idx=2)
#'
#' # insert column
#' insert(d, MARGIN=2, idx=2)
#' insert(d, MARGIN = 2, value = rep(0,10), idx=2, name="x")
#'
#' @export insert
insert <- function(x, MARGIN = 1, value = NULL, idx, name=NULL) {
if(class(x)[1] != "data.frame")
stop("x must be data.frame object")
if(missing(idx))
stop("idx argument must be supplied")
idx = idx[1]
if(MARGIN == 1) {
cat("Inserting row", "\n")
if(is.null(value)) value = rep(NA, ncol(x))
if(length(value) != ncol(x))
stop("specified values not equal number of columns")
x[seq(idx+1, nrow(x)+1),] <- x[seq(idx, nrow(x)),]
x[idx,] <- value
} else if(MARGIN == 2) {
cat("Inserting column", "\n")
n <- names(x)
if(is.null(value)) value = rep(NA, nrow(x))
if(length(value) != nrow(x))
stop("specified values not equal number of columns")
x[,seq(idx,ncol(x)+1)] <- x[,seq(idx, ncol(x))]
x[,idx] <- value
names(x)[-idx] <- n
if(is.null(name)) name = "V1"
names(x)[idx] <- name
}
return(x)
}
|
d11a73d9ee99f570387cd33cc7e1283d70f1a3b9 | fdb8524680771ee42251161404df4e8a7a96315c | /vectorization/vectorized_logical_02.R | 4dcffb2cceb345711f68d8acbef1f32541a5bc46 | [] | no_license | ttedla/data_analysis | 92c2089d034360f8d0829e8b0f5b8014ebd5ab06 | 4881a3059b21872ace93b01ce588dcd0f4f83a4d | refs/heads/master | 2022-11-27T12:44:14.232561 | 2020-08-10T01:58:05 | 2020-08-10T01:58:05 | 198,594,644 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 338 | r | vectorized_logical_02.R |
# -------------------------------------------------------------------------
# E.g-02: logical comparision
# -------------------------------------------------------------------------
x <- 1:4
# we wanted to know which elements of x are greater than 2
x>2
# Of course, subtraction, multiplication and division are also vectorized.
|
e4b899bb112594b772b5339ce14f0c98c1961835 | 63caf4d9e0f4b9c9cb5ab101f5795a94f27d575d | /man/swN2.Rd | 035db1d08106198131a6e68d8d1ee5f94c88422b | [] | no_license | marie-geissler/oce | b2e596c29050c5e2076d02730adfc0c4f4b07bb4 | 2206aaef7c750d6c193b9c6d6b171a1bdec4f93d | refs/heads/develop | 2021-01-17T20:13:33.429798 | 2015-12-24T15:38:23 | 2015-12-24T15:38:23 | 48,561,769 | 1 | 0 | null | 2015-12-25T01:36:30 | 2015-12-25T01:36:30 | null | UTF-8 | R | false | false | 4,584 | rd | swN2.Rd | \name{swN2}
\alias{swN2}
\title{Squared buoyancy frequency for seawater}
\description{Compute
\eqn{N^2}{N^2}, the square of the buoyancy frequency for a seawater profile.
}
\usage{swN2(pressure, sigmaTheta=NULL, derivs, df, eos=getOption("oceEOS", default="gsw"), \dots)}
\arguments{
\item{pressure}{either pressure [dbar] (in which case \code{sigmaTheta} must
be provided) \strong{or} an object of class \code{ctd} object (in which
case \code{sigmaTheta} is inferred from the object.}
\item{sigmaTheta}{Surface-referenced potential density minus 1000
[kg/m\eqn{^3}{^3}]}
\item{derivs}{optional argument to control how the derivative
\eqn{d\sigma_\theta/dp}{d(sigmaTheta)/d(pressure)} is calculated. This
may be a character string or a function of two arguments. See
\dQuote{Details}.}
\item{df}{argument passed to \code{\link{smooth.spline}} if this function is
used for smoothing; set to \code{NA} to prevent smoothing.}
\item{eos}{equation of state, either \code{"unesco"} or \code{"gsw"}.}
\item{\dots}{additional argument, passed to \code{\link{smooth.spline}}, in
the case that \code{derivs="smoothing"}. See \dQuote{Details}.}
}
\details{If the first argument is a \code{ctd} object, then density is is
inferred from it, and the \code{sigmaTheta} argument is ignored.
Smoothing is often useful prior to computing buoyancy frequency, and so
this may optionally be done with \code{\link{smooth.spline}}, unless
\code{df=NA}, in which case raw data are used. If \code{df} is not
provided, a possibly reasonable value computed from an analysis of the
profile, based on the number of pressure levels.
If \code{eos="gsw"}, then the first argument must be a \code{ctd} object,
and processing is done with \code{\link[gsw]{gsw_Nsquared}}, based on
extracted values of Absolute Salinity and Conservative Temperature
(possibly smoothed, depending on \code{df}).
If \code{eos="unesco"}, then the processing is as follows. The core of the
method involves differentiating potential density (referenced to median
pressure) with respect to pressure, and the \code{derivs} argument is used
to control how this is done, as follows.
\itemize{
\item if \code{derivs} is not supplied, the action is as though it were
given as the string \code{"smoothing"}
\item if \code{derivs} equals \code{"simple"}, then the derivative of
density with respect to pressure is calculated as the ratio of
first-order derivatives of density and pressure, each calculated using
\code{\link{diff}}. (A zero is appended at the top level.)
\item if \code{derivs} equals \code{"smoothing"}, then the processing
depends on the number of data in the profile, and on whether \code{df}
is given as an optional argument. When the number of points exceeds 4,
and when \code{df} exceeds 1, \code{\link{smooth.spline}} is used to
calculate smoothing spline representation the variation of density as a
function of pressure, and derivatives are extracted from the spline
using \code{predict}. Otherwise, density is smoothed using
\code{\link{smooth}}, and derivatives are calculated as with the
\code{"simple"} method.
\item if \code{derivs} is a function taking two arguments (first
pressure, then density) then that function is called directly to
calculate the derivative, and no smoothing is done before or after that
call.
}
For deep-sea work, the \code{eos="gsw"} option is the best scheme, because
it uses derivatives of density computed with \emph{local} reference
pressure.
For precise work, it makes sense to skip \code{swN2} entirely, choosing
whether, what, and how to smooth based on an understanding of fundamental
principles as well as data practicalities.
}
\value{Square of buoyancy frequency [\eqn{radian^2/s^2}{radian^2/s^2}].}
\examples{
library(oce)
data(ctd)
# Illustrate difference between UNESCO and GSW
p <- pressure(ctd)
ylim <- rev(range(p))
par(mfrow=c(1,3), mar=c(3, 3, 1, 1), mgp=c(2, 0.7, 0))
plot(ctd[["sigmaTheta"]], p, ylim=ylim, type='l', xlab=expression(sigma[theta]))
N2u <- swN2(ctd, eos="unesco")
N2g <- swN2(ctd, eos="gsw")
plot(N2u, p, ylim=ylim, xlab="N2 Unesco", ylab="p", type="l")
d <- 100 * (N2u - N2g) / N2g
plot(d, p, ylim=ylim, xlab="N2 UNESCO-GSW diff. [\%]", ylab="p", type="l")
abline(v=0)
}
\author{Dan Kelley}
\keyword{misc}
|
e5d8c2aa5b40f1324ca88dcfc792f934d3c8c3f7 | a4a658d367ddf2cf2ad2f2c381605573cc0228fb | /man/news.Rd | 19c861003e4eacb6d0c9f2d7e51208ae768e9a60 | [
"MIT"
] | permissive | Dschaykib/newsmd | 1054d016e48f25490906149a51b756f1b4501ffc | 1614d02eca9c35af7360de86ca1a5ce85251fd9a | refs/heads/master | 2023-04-19T19:56:02.042189 | 2023-04-19T09:54:56 | 2023-04-19T09:54:56 | 141,821,455 | 7 | 2 | NOASSERTION | 2023-04-19T09:55:52 | 2018-07-21T14:44:57 | R | UTF-8 | R | false | true | 4,998 | rd | news.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/newsmd.R
\docType{class}
\name{news}
\alias{news}
\title{Manipulate the NEWS.md file}
\format{
An R6 class.
}
\description{
Manipulate the NEWS.md file.
}
\examples{
## Create a template
my_news <- news$new()
my_news$add_subtitle("improved things 1")
my_news$add_bullet("point 1")
my_news$add_bullet("point 2")
}
\seealso{
\link{newsmd}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-news-new}{\code{news$new()}}
\item \href{#method-news-print}{\code{news$print()}}
\item \href{#method-news-get_text}{\code{news$get_text()}}
\item \href{#method-news-write}{\code{news$write()}}
\item \href{#method-news-add_version}{\code{news$add_version()}}
\item \href{#method-news-add_subtitle}{\code{news$add_subtitle()}}
\item \href{#method-news-add_bullet}{\code{news$add_bullet()}}
\item \href{#method-news-clone}{\code{news$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-news-new"></a>}}
\if{latex}{\out{\hypertarget{method-news-new}{}}}
\subsection{Method \code{new()}}{
Create a new news object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{news$new(
text = c(paste0("## version ", version), "", "---", "", "### NEWS.md setup", "",
"- added NEWS.md creation with [newsmd](https://github.com/Dschaykib/newsmd)", ""),
version = "0.0.0.9000",
file = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{text}}{vector with context for the news.md file.}
\item{\code{version}}{current version of the package.}
\item{\code{file}}{a text file with the current news.md file.
Use NULL to create new file.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A new `news` object.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-news-print"></a>}}
\if{latex}{\out{\hypertarget{method-news-print}{}}}
\subsection{Method \code{print()}}{
Print a news object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{news$print()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-news-get_text"></a>}}
\if{latex}{\out{\hypertarget{method-news-get_text}{}}}
\subsection{Method \code{get_text()}}{
Get the news object as a text.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{news$get_text()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
The context of the news file.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-news-write"></a>}}
\if{latex}{\out{\hypertarget{method-news-write}{}}}
\subsection{Method \code{write()}}{
Write and save a news object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{news$write(file = "NEWS.md", reduce_dev = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{file}}{A path and file to where the news file is saved.}
\item{\code{reduce_dev}}{A boolean, if TRUE dev version's points are combined
into the next version}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-news-add_version"></a>}}
\if{latex}{\out{\hypertarget{method-news-add_version}{}}}
\subsection{Method \code{add_version()}}{
Adds a version line to a news object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{news$add_version(x)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{x}}{A string with the version number.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-news-add_subtitle"></a>}}
\if{latex}{\out{\hypertarget{method-news-add_subtitle}{}}}
\subsection{Method \code{add_subtitle()}}{
Adds a subtitle line to a news object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{news$add_subtitle(x)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{x}}{A string with the subtitle.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-news-add_bullet"></a>}}
\if{latex}{\out{\hypertarget{method-news-add_bullet}{}}}
\subsection{Method \code{add_bullet()}}{
Adds a bullet points to the last subtitle.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{news$add_bullet(x)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{x}}{A vector with the bullet points.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-news-clone"></a>}}
\if{latex}{\out{\hypertarget{method-news-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{news$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
021ace3e55e49c9f9d8da12fbc778ebf3181abc3 | 7afcb6eac2fc59c8f536b10d581eee3e2b0002bd | /redPATH/R/data.R | 92e472ad7c66a468defd4f9acad97992622d5951 | [] | no_license | tinglab/redPATH | 06e6c08b5c5812193ca3c44fb86b5182c32f7d1a | 7737a3e26b036cffae90119d454056db97cece94 | refs/heads/master | 2020-04-24T11:14:54.975144 | 2019-05-08T14:32:57 | 2019-05-08T14:32:57 | 171,919,131 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,843 | r | data.R | #' MGH107 - single cell glioma dataset of MGH107 patient.
#'
#' A dataset of 252 cells.
#' Referenced from:
#' Venteicher, A.S., Tirosh, I., Hebert, C., Yizhak, K., Neftel, C., Filbin, M.G., Hovestadt, V., Escalante, L.E., Shaw, M.L., Rodman, C. et al. (2017)
#' Decoupling genetics, lineages, and microenvironment in IDH-mutant gliomas by single-cell RNA-seq. Science, 355
#'
#' @format A data frame with 23686 rows (genes) and 252 cells
"mgh107_portal_exprs"
#' MGH107 - single cell glioma dataset of MGH107 patient.
#'
#' A dataset of 252 cells.
#' Referenced from:
#' Venteicher, A.S., Tirosh, I., Hebert, C., Yizhak, K., Neftel, C., Filbin, M.G., Hovestadt, V., Escalante, L.E., Shaw, M.L., Rodman, C. et al. (2017)
#' Decoupling genetics, lineages, and microenvironment in IDH-mutant gliomas by single-cell RNA-seq. Science, 355
#'
#' @format A vector of 252 cell type labels
"mgh107_portal_labels"
#' Llorens-B - single cell neural stem cells dataset.
#'
#' A dataset of 145 cells.
#' Referenced from:
#' Llorens-Bobadilla, E., Zhao, S., Baser, A., Saiz-Castro, G., Zwadlo, K. and Martin-Villalba, A. (2015)
#' Single-Cell Transcriptomics Reveals a Population of Dormant Neural Stem Cells that Become Activated upon Brain Injury. Cell Stem Cell, 17, 329-340.
#'
#' @format A data frame with 8105 rows (genes) and 145 cells
"llorens_exprs"
#' Llorens-B - single cell neural stem cells dataset.
#'
#' A dataset of 145 cells.
#' Referenced from:
#' Llorens-Bobadilla, E., Zhao, S., Baser, A., Saiz-Castro, G., Zwadlo, K. and Martin-Villalba, A. (2015)
#' Single-Cell Transcriptomics Reveals a Population of Dormant Neural Stem Cells that Become Activated upon Brain Injury. Cell Stem Cell, 17, 329-340.
#'
#' @format A vector of 145 cell type labels
"llorens_labels"
|
1aa4fe7c2556eb36009eab8753f6c7a298797568 | bcc81487edbe00a92f5a076bef6ab70dea20c4c8 | /Advanced visualization.R | 1e1e4294a87022891c9f1a273bdf06228868258b | [] | no_license | ismailhm/R-programming | 625aa49a75d303bef93a0bf0993fa095c7d1c6b0 | 1b62c7859d241a799bf0b3b98c2906df148a046d | refs/heads/master | 2020-04-20T16:42:19.928870 | 2019-02-03T16:52:31 | 2019-02-03T16:52:31 | 168,966,306 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,601 | r | Advanced visualization.R |
getwd()
movies <- read.csv("Movie-Ratings.csv")
head(movies)
colnames(movies) <- c("Film","Genre","CriticRating","AudienceRating","BudgetMillions","Year")
head(movies)
tail(movies)
str(movies)
summary(movies)
#--------- factoring a numeric variable
#factor(movies$Year)
movies$Year <- factor(movies$Year)
#--------------------------------------
summary(movies)
str(movies)
#--------------------------- Asthetics
library(ggplot2)
ggplot(data = movies, aes(x=CriticRating, y=AudienceRating))
#----------------Add Geometry
ggplot(data = movies, aes(x=CriticRating, y=AudienceRating)) +
geom_point()
# --------------Add color
ggplot(data = movies, aes(x=CriticRating, y=AudienceRating, color= Genre)) +
geom_point()
#-----------------Add size
ggplot(data = movies, aes(x=CriticRating, y=AudienceRating,
color= Genre, size=Genre)) +
geom_point()
# That does not look good. Let's change the size variable
ggplot(data = movies, aes(x=CriticRating, y=AudienceRating,
color= Genre, size=BudgetMillions)) +
geom_point()
#>>>> This is #1 (We will improve it)
#------------------Plotting with Layers
# Creating an object(the initial plot)
p <- ggplot(data = movies, aes(x=CriticRating, y=AudienceRating,
color= Genre, size=BudgetMillions))
p
#point
p + geom_point()
# Line
p + geom_line()
# multiple layers
p + geom_point()+geom_line()
p + geom_line()+geom_point()
#------------Overriding Aesthetics
q <- ggplot(data = movies, aes(x=CriticRating, y=AudienceRating,
color= Genre, size=BudgetMillions))
q + geom_point()
#Overriding aes
#ex1
q + geom_point(aes(size=CriticRating))
#ex2
q + geom_point(aes(color=BudgetMillions))
# q remains the same
q + geom_point()
#ex3
# Mapping aesthetic
q + geom_point(aes(x=BudgetMillions))+
xlab("Budget Millions $$$$")
#ex4
p + geom_line()+geom_point()
#reduce line size
# setting aesthetic
p + geom_line(size=1)+geom_point()
#----------------Mapping vs Setting------------
r <- ggplot(data=movies, aes(x=CriticRating, y=AudienceRating))
r + geom_point()
#add color
#1. Mapping (what we have done so far)
r + geom_point(aes(color=Genre))
#2. setting
r + geom_point(color="DarkGreen")
#wrong if we write
r + geom_point(aes(color="DarkGreen"))
#1 Mapping
r + geom_point(aes(size=BudgetMillions))
# setting
r + geom_point(size=10)
#error
# r + geom_point(aes(size=10))
#------------- Histograms and Density Charts
s <- ggplot(data=movies, aes(x=BudgetMillions))
s + geom_histogram(binwidth = 10)
# add color
s + geom_histogram(binwidth = 10, aes(fill=Genre))
# add border
s + geom_histogram(binwidth = 10, aes(fill=Genre), color="Black")
#>>> 3 (We will improve it)
# sometimes you may need density charts
s + geom_density(aes(fill=Genre))
s + geom_density(aes(fill=Genre), position = "stack")
#--------------------Starting Layer Tips
t <- ggplot(data=movies,aes(x=AudienceRating))
t +geom_histogram(binwidth = 10,fill="White", color="Blue")
# another way(this is better in most cases)
t <- ggplot(data = movies)
t +geom_histogram(binwidth = 10,fill="White",
aes(x=AudienceRating),
color="Blue")
#>>>>> 4
t +geom_histogram(binwidth = 10, aes(x=CriticRating),
fill="White",
color="Blue")
#>>>>>> 5
# skelton plot (creates a new empty plot)
t <- ggplot()
#---------------------------------Statistical Transformation
library(ggplot2)
install.packages(ggplot2)
u <- ggplot(data=movies, aes(x=CriticRating, y=AudienceRating,
color=Genre))
u + geom_point() +geom_smooth()
u + geom_point() + geom_smooth(fill=NA)
# boxplot
u <- ggplot(data=movies, aes(x=Genre, y=AudienceRating,
color=Genre))
u +geom_boxplot()
u +geom_boxplot(size=1.2)
u +geom_boxplot(size=1.2) + geom_point()
# tip to fix it
u +geom_boxplot(size=1.2) + geom_jitter()
# another way
u + geom_jitter() + geom_boxplot(size=1.2, alpha=0.5)
#>>>>>>>>>> 6
u <- ggplot(data=movies, aes(x=Genre, y=CriticRating,
color=Genre))
u + geom_jitter() + geom_boxplot(size=1.2, alpha=0.5)
#-------------------------Using facets
v <- ggplot(data=movies, aes(x=BudgetMillions))
v + geom_histogram(binwidth = 10, aes(fill=Genre),
color="Black")
# facets
v + geom_histogram(binwidth = 10, aes(fill=Genre),
color="Black") +
facet_grid(Genre~.)
v + geom_histogram(binwidth = 10, aes(fill=Genre),
color="Black") +
facet_grid(Genre~., scales ="free")
# scatterplot
w <- ggplot(data=movies, aes(x=CriticRating, y=AudienceRating,
color=Genre))
w + geom_point(size=3)
# facets
w + geom_point(size=3) +
facet_grid(Genre~.)
w + geom_point(size=3) +
facet_grid(.~Year)
w + geom_point(size=3) +
facet_grid(Genre~Year)
w + geom_point(size=3) +
geom_smooth()+
facet_grid(Genre~Year)
w + geom_point(aes(size=BudgetMillions)) +
geom_smooth()+
facet_grid(Genre~Year)
#>>>>>> 1(but will improve it)
#Today
#limits
#zoom
m <- ggplot(data=movies, aes(x=CriticRating, y=AudienceRating,
size=BudgetMillions,
color=Genre))
m + geom_point()
m + geom_point()+
xlim(50,100)+
ylim(50,100)
# won't work well always
n <- ggplot(data=movies, aes(x=BudgetMillions))
n + geom_histogram(binwidth = 10, aes(fill=Genre),
color="Black")
n + geom_histogram(binwidth = 10, aes(fill=Genre),
color="Black")+
ylim(0,50)
# instead -- zoom:
n + geom_histogram(binwidth = 10, aes(fill=Genre),
color="Black")+
coord_cartesian(ylim = c(0,50))
# improve #1
w + geom_point(aes(size=BudgetMillions)) +
geom_smooth()+
facet_grid(Genre~Year) +
coord_cartesian(ylim = c(0,100))
#----------------Perfecting by adding themes
o <- ggplot(data=movies, aes(x=BudgetMillions))
h <- o + geom_histogram(binwidth = 10, aes(fill=Genre),
color="Black")
h
# axes labels
h + xlab("Money Axis")+
ylab("Number of Movies")+
theme(axis.title.x = element_text(color = "Black", size = 30),
axis.title.y = element_text(color = "DarkGreen", size = 30))
# tick mark formatting
h + xlab("Money Axis")+
ylab("Number of Movies")+
theme(axis.title.x = element_text(color = "Black", size = 30),
axis.title.y = element_text(color = "DarkGreen", size = 30),
axis.text.x = element_text(size = 20),
axis.text.y = element_text(size = 20))
?theme
# legend formatting
h + xlab("Money Axis")+
ylab("Number of Movies")+
theme(axis.title.x = element_text(color = "Black", size = 30),
axis.title.y = element_text(color = "DarkGreen", size = 30),
axis.text.x = element_text(size = 20),
axis.text.y = element_text(size = 20),
legend.title = element_text(size = 30),
legend.text = element_text(size = 20),
legend.position = c(1,1),
legend.justification = c(1,1))
# title
h + xlab("Money Axis")+
ylab("Number of Movies")+
ggtitle("Movie Budget Distribution") +
theme(axis.title.x = element_text(color = "Black", size = 30),
axis.title.y = element_text(color = "DarkGreen", size = 30),
axis.text.x = element_text(size = 20),
axis.text.y = element_text(size = 20),
legend.title = element_text(size = 30),
legend.text = element_text(size = 20),
legend.position = c(1,1),
legend.justification = c(1,1),
plot.title = element_text(hjust= 0.5, color = "DarkBlue",
size=30, family="Courier")) |
0b1b4c6bd9cdc8017b532bce6cda81dffbe5fe43 | e3b51a48fc6fccb16764a692668dd85c315a9e99 | /cegmonitor/R/bn_node_monitors.R | d7d64f6d06b223510ee00d0f09de5e41b2defca3 | [] | no_license | rachwhatsit/cegmonitor | 59c734b62b49728df363bc148458bbcefdc3ba0f | 7c4e458bb8386c3762a3aecc87db31d0cd7c9087 | refs/heads/master | 2020-04-08T08:55:55.891410 | 2019-06-13T15:55:11 | 2019-06-13T15:55:11 | 159,200,217 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,328 | r | bn_node_monitors.R | #' A function to compute the node monitors of the BN
#'
#' @param df data frame
#' @param col_name string with the node in question
#' @param prior vector prior set by modeller
#' @param n integer number of records in the dataset to consider
#' @param learn logical to include learning of prior or not
#' @keywords bn node monitor unconditional
#' @export
#' @examples###############################################################
##UNCONDITIONAL NODE MONITOR FOR BNs
#df is the data in question, col_name is the stage in question, prior is the set prior (must have right number of iterations), n is the max sample size we wish to consider.
#df should be filtered first
bn.uncondtnl.node.monitor <- function(df, col_name="Events", prior, n=50, learn=FALSE) {#dataframes should also be added for the counts
#add checks to make sure that prior has same number of items as counts in dataframe
Zm <- rep(NA, n)
Sm <- rep(NA, n)
Em <- rep(NA, n)
Vm <- rep(NA, n)
p <- rep(NA, n)
df %>% group_by_(col_name) %>% tally() -> empty
empty$n <- rep(0,length(empty$n))
if(learn==FALSE){
for (i in 2:n){
df_cut <- df[2:i,]
df_cut %>%
group_by_(col_name) %>% #groups by the stage of interest
tally() -> u1 #stage1
empty$n[which(empty[[col_name]] %in% u1[[col_name]])] <- u1$n
counts = empty$n
p[i] = (lgamma(sum(prior)) + sum(lgamma(prior+counts)) - (sum(lgamma(prior)) + lgamma(sum(prior)+sum(counts))))#logprobability
#compute the z statistics
Sm[i]=-p[i]
Em[i]=sum((prior/sum(prior))*sum(counts))
Vm[i] = (sum(counts)*((sum(counts)+sum(prior))/(1+sum(prior))))*(prior/sum(prior))*(1-(prior/sum(prior)))
#Zm[i]=sum(na.omit(Sm)) - sum(na.omit(Em)) / sqrt(sum(na.omit(Vm)))
}
Zm = Sm-Em /sqrt(Vm)
}
else{
for (i in 2:n){
df_cut <- df[2:i,]
df_cut %>%
group_by_(col_name) %>% #groups by the stage of interest
tally() -> u1 #stage1
empty$n[which(empty[[col_name]] %in% u1[[col_name]])] <- u1$n
counts = empty$n
p[i] = (lgamma(sum(prior)) + sum(lgamma(prior+counts)) - (sum(lgamma(prior)) + lgamma(sum(prior)+sum(counts))))#log probability
#compute the z statistics
Sm[i]=-p[i]
Em[i]=sum(exp(na.omit(p[i]))*na.omit(Sm[i]))
Vm[i]=sum(exp(na.omit(p[i]))*(na.omit(p[i])^2) - na.omit(Em[i])^2)
#Vm[i] = (sum(counts)*((sum(counts)+sum(prior))/(1+sum(prior))))*(prior/sum(prior))*(1-(prior/sum(prior)))
prior <- (prior+counts)/sum(counts+1)
}
Zm = Sm-Em /sqrt(Vm)
}
results <-data.frame(cbind(Sm, Zm, Em, Vm))
colnames(results) <- c("Sm", "Zm", "Em", "Vm")
return((results))
}
bn.parent.child.monitor <- function(df, parents, parent.values, child, n=50, prior, learn=FALSE) {#dataframes should also be added for the counts
#add checks to make sure that prior has same number of items as counts in dataframe
#passing col names to the filtering bit
#p.sym <- sym(parents)
#p.sym <- lapply(parents, sym)
c.sym <- sym(child)
alpha.bar <- max(apply(df, 2, function(x){length(levels(as.factor(x)))})) #max number of categories at each level in the dataset
if(is.na(prior[1])==TRUE){
prior <- rep(alpha.bar, length(levels(df[[child]])))/length(levels(df[[child]]))
}
#initialize log penalty scores
Zm <- rep(NA, n)
Sm <- rep(NA, n)
Em <- rep(NA, n)
Vm <- rep(NA, n)
p <- rep(NA, n)
if(learn==FALSE){
for (i in 1:n){
df_cut <- df[1:i,]
#for each parent, filter it off
for (j in 1: length(parents)){
df_cut <- filter(df_cut, UQ(sym(parents[j])) == parent.values[j])
}
df_cut %>% count(!!c.sym) -> counts.tbl
counts = counts.tbl$n
counts <- rep(0,length(prior))
counts[as.numeric(rownames(counts.tbl))] <-counts.tbl$n
p[i] = (lgamma(sum(prior)) + sum(lgamma(prior+counts)) - (sum(lgamma(prior)) + lgamma(sum(prior)+sum(counts))))#logprobability
#compute the z statistics
Sm[i]=-p[i]
Em[i]=sum((prior/sum(prior))*sum(counts))#expected value
Vm[i] = (sum(counts)*((sum(counts)+sum(prior))/(1+sum(prior))))*(prior/sum(prior))*(1-(prior/sum(prior)))
#Zm[i]=sum(na.omit(Sm)) - sum(na.omit(Em)) / sqrt(sum(na.omit(Vm)))
}
}
else{
for (i in 1:n){
df_cut <- df[1:i,]
#for each parent, filter it off
for (j in 1: length(parents)){
df_cut <- filter(df_cut, UQ(sym(parents[j])) == parent.values[j])
}
df_cut %>% count(!!c.sym) -> counts.tbl
counts = counts.tbl$n
counts <- rep(0,length(prior))
counts[as.numeric(rownames(counts.tbl))] <-counts.tbl$n
p[i] = (lgamma(sum(prior)) + sum(lgamma(prior+counts)) - (sum(lgamma(prior)) + lgamma(sum(prior)+sum(counts))))#logprobability
#compute the z statistics
Sm[i]=-p[i]
Em[i]=sum((prior/sum(prior))*sum(counts))#expected value
Vm[i] = (sum(counts)*((sum(counts)+sum(prior))/(1+sum(prior))))*(prior/sum(prior))*(1-(prior/sum(prior)))
#Zm[i]=sum(na.omit(Sm)) - sum(na.omit(Em)) / sqrt(sum(na.omit(Vm)))
prior=prior+counts
}
}
Zm = Sm-Em /sqrt(Vm)
results <-data.frame(cbind(Sm, Zm, Em, Vm))
colnames(results) <- c("Sm", "Zm", "Em", "Vm")
return((results))
}
|
15462da63c30eb1cc490702734ea42475ef21b74 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/bestglm/examples/hivif.Rd.R | 3b025cf7d2b8bcab12a9702799761d6a69c875c3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,101 | r | hivif.Rd.R | library(bestglm)
### Name: hivif
### Title: Simulated Linear Regression (Train) with Nine Highly Correlated
### Inputs
### Aliases: hivif
### Keywords: datasets
### ** Examples
#Simple example
data(hivif)
lm(y ~ ., data=hivif)
#
#This example shows how the original data was simulated and
#how additional test data may be simulated.
## Not run:
##D set.seed(778851) #needed for original training data
##D n <- 100
##D p <- 9 #9 covariates plus intercept
##D sig <- toeplitz(0.9^(0:(p-1)))
##D X <- MASS::mvrnorm(n=n, rep(0, p), Sigma=sig)
##D colnames(X) <- paste0("x", 1:p)
##D b <- c(0,-0.3,0,0,-0.3,0,0,0.3,0.3) #
##D names(b) <- paste0("x", 1:p)
##D y <- 1 + X##D
##D Xy <- cbind(as.data.frame.matrix(X), y=y) #=hivif
##D #Test data
##D nTe <- 10^3
##D XTe <- MASS::mvrnorm(n=nTe, rep(0, p), Sigma=sig)
##D colnames(XTe) <- paste0("x", 1:p)
##D yTe <- 1 + XTe##D
##D XyTe <- cbind(as.data.frame.matrix(XTe), y=yTe) #test data
##D ans <- lm(y ~ ., data=Xy) #fit training data
##D mean((XyTe$y - predict(ans, newdata=XyTe))^2) #MSE on test data
##D
## End(Not run)
|
f317236668a3a8690659c4eccb7ab96cdb3750ea | edd52caf49c06554654462dc9df7e39878453a26 | /tests/testthat/test-common.R | 96bdeeaaed5aa2ab2c519d22d4bc63dbd84a9e0a | [
"MIT"
] | permissive | edgararuiz/packagedapis | ce09cac0f827c731595d51ecf9cccf4a5afe272e | 037dde691f797fc379e489abe1560926d7626b9a | refs/heads/master | 2023-05-22T20:08:17.421407 | 2021-06-01T14:36:55 | 2021-06-01T14:36:55 | 362,926,394 | 0 | 0 | NOASSERTION | 2021-05-10T13:48:13 | 2021-04-29T19:31:58 | R | UTF-8 | R | false | false | 223 | r | test-common.R | test_that("Functions work",{
expect_s3_class(
show_data(),
"data.frame"
)
expect_type(
show_model(),
"double"
)
expect_equal(
floor(get_prediction(30)),
-124,
ignore_attr = TRUE
)
})
|
b24319254a0ff3771a47337b56aa44dffe6b2885 | 8816c7990599e0566fb1288544b547ebf53d7cda | /cv.R | 8ca387da126492ce859f4958c68890767b60b0f6 | [] | no_license | dengwx11/GroupLasso_clean | 81678851ec23a03fb8c492ffa2c8916fece564db | 1352ed8951c1ae98898b0e6317c65bf1d29e7841 | refs/heads/master | 2020-03-28T16:29:52.065241 | 2018-09-13T21:46:55 | 2018-09-13T21:46:55 | 148,701,450 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,844 | r | cv.R | ########################## Cross Validation ##########################
divide<-function(K,n){
# Divide samples into K folds randomly
#
# Args:
# K = number of folds
# n = sample size
#
# Returns:
# folds = list of indices
i.mix = sample(1:n)
folds = vector(mode="list",length=K)
temp<-split(c(1:n),1:K)
for(i in 1:K){
folds[[i]]<-i.mix[temp[[i]]]
}
return(folds)
}
cv.FASTA<-function(X,y,f, gradf, g, proxg, x0, tau1, max_iters = 100, w = 10,
backtrack = TRUE, recordIterates = FALSE, stepsizeShrink = 0.5,
eps_n = 1e-15,m_X,m_W,m_G,m_I,lambda,lambda2,K=10,n=100,restart,truth){
# Summary of MSE, prediction errors, and model size in cross validation with different regularization parameters
#
# Args:
# X = design matrix
# y = vector of responses
# f = differentialble part in loss function
# gradf = gradient of f
# g = nondifferentialble part in loss function
# proxg = proximal operator for g
# x0 = start point
# tau1 = initial stepsize
# max_iters = maximum iterations before automatic termination
# w = lookback window for non-montone line search
# backtrack = logical; if TRUE, perform backtracking line search
# recordIterates = logical; If TRUE, record iterate sequence
# stepsizeShrink = multplier to decrease step size
# eps_n = epsilon to prevent normalized residual from dividing by zero
# m_X = number of baseline covariates
# m_W = number of treatment covariates. Default is 1
# m_G = number of biomarkers
# m_I = number of interaction terms, should equals m_G
# K = K-folds. Default is 10
# n = sample size
# lambda_candidate = list of regularization parameters for group lasso structure
# lambda_candidate2 = list of regularization parameters for ridge structure
# restart = logical; if TRUE (default), perform adaptive restart
# truth = vector of true beta
#
# Returns:
# mean_pred = Mean of prediction error in K-fold
# MSE_beta = Mean of MSE in K-fold
# SD_pred = SD of prediction error in K-fold
# Var_num = Variance of estimated model size in K-fold
# mean_num = Mean of estimated model size in K-fold
folds<-divide(K,n)
sol_cv<-list()
TestErr_pred<-rep(0,K)
TestErr_beta<-rep(0,K)
num<-rep(0,K)
for(k in 1:K){
# Generating training and test datasets for cross validation
test_X<-X[folds[[k]],]
train_X<-X[setdiff(c(1:n),folds[[k]]),]
test_y<-y[folds[[k]]]
train_y<-y[setdiff(c(1:n),folds[[k]])]
# Training model on training dataset
sol_cv[[k]]<-FASTA(train_X,train_y,f, gradf, g, proxg, x0, tau1, max_iters = 100, w = 10,
backtrack = TRUE, recordIterates = FALSE, stepsizeShrink = 0.5,
eps_n = 1e-15,m_X,m_W,m_G,m_I,lambda,lambda2,restart)
x0<-sol_cv[[k]]$x
# Test on the validation group
TestErr_pred[k]<-norm(f0(sol_cv[[k]]$x,test_X,test_y)-f0(truth,test_X,test_y),"2")/length(test_y) # Prediction Errors in Cross Validation
TestErr_beta[k]<-norm(sol_cv[[k]]$x-truth,"2")^2 # MSE
num[k]<-length(which(sol_cv[[k]]$x!=0)) # estimated model size
}
return(list(Err_pred=TestErr_pred,Err_beta=TestErr_beta,start=x0,num=num))
}
opt_lambda<-function(X,y,f, gradf, g, proxg, x0, tau1, max_iters = 100, w = 10,
backtrack = TRUE, recordIterates = FALSE, stepsizeShrink = 0.5,
eps_n = 1e-15,m_X,m_W,m_G,m_I,K=10,n=100,lamb_candidate,lamb_candidate2,restart,truth){
# Summary of MSE, prediction errors, and model size in cross validation with different regularization parameters
#
# Args:
# X = design matrix
# y = vector of responses
# f = differentialble part in loss function
# gradf = gradient of f
# g = nondifferentialble part in loss function
# proxg = proximal operator for g
# x0 = start point
# tau1 = initial stepsize
# max_iters = maximum iterations before automatic termination
# w = lookback window for non-montone line search
# backtrack = logical; if TRUE, perform backtracking line search
# recordIterates = logical; If TRUE, record iterate sequence
# stepsizeShrink = multplier to decrease step size
# eps_n = epsilon to prevent normalized residual from dividing by zero
# m_X = number of baseline covariates
# m_W = number of treatment covariates. Default is 1
# m_G = number of biomarkers
# m_I = number of interaction terms, should equals m_G
# K = K-folds. Default is 10
# n = sample size
# lambda_candidate = list of regularization parameters for group lasso structure
# lambda_candidate2 = list of regularization parameters for ridge structure
# restart = logical; if TRUE (default), perform adaptive restart
# truth = vector of true beta
#
# Returns:
# mean_pred = Mean of prediction error in K-fold
# MSE_beta = Mean of MSE in K-fold
# SD_pred = SD of prediction error in K-fold
# Var_num = Variance of estimated model size in K-fold
# mean_num = Mean of estimated model size in K-fold
lamb_candidate<-lamb_candidate[order(lamb_candidate,decreasing = T)]
lamb_candidate2<-lamb_candidate2[order(lamb_candidate2,decreasing = T)]
TestErr_pred<-matrix(0,nrow=length(lamb_candidate),ncol=length(lamb_candidate2))
MSE_beta<-matrix(0,nrow=length(lamb_candidate),ncol=length(lamb_candidate2))
SDErr_pred<-matrix(0,nrow=length(lamb_candidate),ncol=length(lamb_candidate2))
Mean_num<-matrix(0,nrow=length(lamb_candidate),ncol=length(lamb_candidate2))
Var_num<-matrix(0,nrow=length(lamb_candidate),ncol=length(lamb_candidate2))
for(i in seq_along(lamb_candidate)){
for(j in seq_along(lamb_candidate2)){
print(c(i,j))
rst<-cv.FASTA(X,y,f, gradf, g, proxg, x0, tau1, max_iters = max_iters, w = 10,
backtrack = TRUE, recordIterates = FALSE, stepsizeShrink = 0.5,
eps_n = 1e-15,m_X,m_W,m_G,m_I,lamb_candidate[i],lamb_candidate2[j],K,n,restart=TRUE,truth)
cv.Err_pred<-rst$Err_pred
cv.Err_beta<-rst$Err_beta
cv.num<-rst$num
x0<-rst$start
TestErr_pred[i,j]<-mean(cv.Err_pred)
MSE_beta[i,j]<-mean(cv.Err_beta)
Mean_num[i,j]<-mean(cv.num)
SDErr_pred[i,j]<-sqrt(var(cv.Err_pred)/K)
Var_num[i,j]<-var(cv.num)
print(c(paste("lambda 1=",lamb_candidate[i]),paste("lambda 2=",lamb_candidate2[j])))
print(cv.Err_pred)
print(cv.Err_beta)
}
TestErr_pred[i,]<-rev(TestErr_pred[i,])
MSE_beta[i,]<-rev(MSE_beta[i,])
SDErr_pred[i,]<-rev(SDErr_pred[i,])
Mean_num[i,]<-rev(Mean_num[i,])
Var_num[i,]<-rev(Var_num[i,])
}
TestErr_pred<-apply(TestErr_pred,2,rev)
MSE_beta<-apply(MSE_beta,2,rev)
SDErr_pred<-apply(SDErr_pred,2,rev)
Mean_num<-apply(Mean_num,2,rev)
Var_num<-apply(Var_num,2,rev)
return(list(mean_pred=TestErr_pred,MSE_beta=MSE_beta,SD_pred=SDErr_pred,Var_num=Var_num,mean_num=Mean_num))
}
get_lambda<-function(sol_cv_glasso,k_mean,k_var=2,lamb_candidate,lamb_candidate2){
# Get the optimal lambda and lambda2
#
# Args:
# sol_cv_glasso = results from opt_lambda()
# k_mean = weight for mean_num
# k_var = weight for var_num
# lambda_candidate = list of regularization parameters for group lasso structure
# lambda_candidate2 = list of regularization parameters for ridge structure
#
# Returns:
# lamb_opt1 = lambda1 optimal value
# lamb_opt2 = lambda2 optimal value
rst<-sol_cv_glasso$mean_pred+k_mean*sol_cv_glasso$mean_num+k_var*sol_cv_glasso$Var_num # Could be customized
lamb_loc <- which(rst==min(rst),arr.ind = T)
lamb_opt1<-lamb_candidate[lamb_loc[1]]
lamb_opt2<-lamb_candidate2[lamb_loc[2]]
return(list(lamb_opt1=lamb_opt1,lamb_opt2=lamb_opt2))
}
|
33b8bd386a96540581cddd94927c4c91b8c4041b | 85187ceeb4cfe98463f3054b352d5a04129ee5f8 | /cachematrix.R | 98264801f5464fe16e8fd767626dc94151e37e06 | [] | no_license | y33bai/ProgrammingAssignment2 | 65ecb010cae369485198c6113f078191c658678a | db6dc950225afb4e7a7f9d6adc5066e15b22513b | refs/heads/master | 2022-11-13T09:31:01.204438 | 2020-07-01T22:39:53 | 2020-07-01T22:39:53 | 276,476,681 | 0 | 0 | null | 2020-07-01T20:35:07 | 2020-07-01T20:35:06 | null | UTF-8 | R | false | false | 1,003 | r | cachematrix.R | ## This function in overall can firstly cache the calculated inverse of a matrix, and
## retrieve those cached value if needed.
## makeCacheMatrix function creates a matrix that primarily used to cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## cacheSolve function firstly checks whether the inverse already been stored or not
## If so then it pulls out the cached data
## If not then it will compute the inverse of a input matrix, cache it and output it
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m ## Return a matrix that is the inverse of 'x'
}
|
19dea2b7b1593636a736c7a2ac4e6f739d4ec313 | 33b7262af06cab5cd28c4821ead49b3a0c24bb9d | /models/files/pre.R | fcc4730d2cf560473e23d968e14cd2a9de6cda1f | [] | no_license | topepo/caret | d54ea1125ad41396fd86808c609aee58cbcf287d | 5f4bd2069bf486ae92240979f9d65b5c138ca8d4 | refs/heads/master | 2023-06-01T09:12:56.022839 | 2023-03-21T18:00:51 | 2023-03-21T18:00:51 | 19,862,061 | 1,642 | 858 | null | 2023-03-30T20:55:19 | 2014-05-16T15:50:16 | R | UTF-8 | R | false | false | 8,508 | r | pre.R | modelInfo <- list(
library = "pre",
type = c("Classification", "Regression"),
parameters = data.frame(parameter = c("sampfrac", "maxdepth",
"learnrate", "mtry",
"use.grad",
"penalty.par.val"),
class = c(rep("numeric", times = 4),
"logical", "character"),
label = c("Subsampling Fraction",
"Max Tree Depth",
"Shrinkage",
"# Randomly Selected Predictors",
"Employ Gradient Boosting",
"Regularization Parameter")),
grid = function(x, y, len = NULL, search = "grid",
sampfrac = .5, maxdepth = 3L, learnrate = .01,
mtry = Inf, use.grad = TRUE, penalty.par.val = "lambda.1se") {
if (search == "grid") {
if (!is.null(len)) {
maxdepth <- c(3L, 4L, 2L, 5L, 1L, 6:len)[1:len]
if (len > 2) {
sampfrac <- c(.5, .75, 1)
}
if (len > 1) {
penalty.par.val = c("lambda.min", "lambda.1se")
}
}
out <- expand.grid(sampfrac = sampfrac, maxdepth = maxdepth,
learnrate = learnrate, mtry = mtry,
use.grad = use.grad,
penalty.par.val = penalty.par.val)
} else if (search == "random") {
out <- data.frame(
sampfrac = sample(c(.5, .75, 1), size = len, replace = TRUE),
maxdepth = sample(2L:6L, size = len, replace = TRUE),
learnrate = sample(c(0.001, 0.01, 0.1), size = len, replace = TRUE),
mtry = sample(c(ceiling(sqrt(ncol(x))), ceiling(ncol(x)/3), ncol(x)), size = len, replace = TRUE),
use.grad = sample(c(TRUE, FALSE), size = len, replace = TRUE),
penalty.par.val = sample(c("lambda.1se", "lambda.min"), size = len, replace = TRUE))
}
return(out)
},
fit = function(x, y, wts = NULL, param, lev = NULL, last = NULL,
weights = NULL, classProbs, ...) {
theDots <- list(...)
if(!any(names(theDots) == "family")) {
theDots$family <- if (is.factor(y)) {
if (nlevels(y) == 2L) {
"binomial"
} else {
"multinomial"
}
} else {
"gaussian"
}
}
data <- data.frame(x, .outcome = y)
formula <- .outcome ~ .
if (is.null(weights)) { weights <- rep(1, times = nrow(x)) }
pre(formula = formula, data = data, weights = weights,
sampfrac = param$sampfrac, maxdepth = param$maxdepth,
learnrate = param$learnrate, mtry = param$mtry,
use.grad = param$use.grad, ...)
},
predict = function(modelFit, newdata, submodels = NULL) {
if (is.null(submodels)) {
if (modelFit$family %in% c("gaussian", "mgaussian")) {
out <- pre:::predict.pre(object = modelFit,
newdata = as.data.frame(newdata))
} else if (modelFit$family == "poisson") {
out <- pre:::predict.pre(object = modelFit,
newdata = as.data.frame(newdata), type = "response")
} else {
out <- factor(pre:::predict.pre(object = modelFit,
newdata = as.data.frame(newdata), type = "class"))
}
} else {
out <- list()
for (i in seq(along.with = submodels$penalty.par.val)) {
if (modelFit$family %in% c("gaussian", "mgaussian")) {
out[[i]] <- pre:::predict.pre(object = modelFit,
newdata = as.data.frame(newdata),
penalty.par.val = as.character(submodels$penalty.par.val[i]))
} else if (modelFit$family == "poisson") {
out[[i]] <- pre:::predict.pre(object = modelFit,
newdata = as.data.frame(newdata),
type = "response",
penalty.par.val = as.character(submodels$penalty.par.val[i]))
} else {
out[[i]] <- factor(pre:::predict.pre(object = modelFit,
newdata = as.data.frame(newdata),
type = "class",
penalty.par.val = as.character(submodels$penalty.par.val[i])))
}
}
}
out
},
prob = function(modelFit, newdata, submodels = NULL) {
if (is.null(submodels)) {
probs <- pre:::predict.pre(object = modelFit,
newdata = as.data.frame(newdata),
type = "response")
# For binary classification, create matrix:
if (is.null(ncol(probs)) || ncol(probs) == 1) {
probs <- data.frame(1 - probs, probs)
colnames(probs) <- levels(modelFit$data[,modelFit$y_names])
}
} else {
probs <- list()
for (i in seq(along.with = submodels$penalty.par.val)) {
probs[[i]] <- pre:::predict.pre(object = modelFit,
newdata = as.data.frame(newdata),
type = "response",
penalty.par.val = as.character(submodels$penalty.par.val[i]))
# For binary classification, create matrix:
if (is.null(ncol(probs[[i]])) || ncol(probs[[i]]) == 1) {
probs[[i]] <- data.frame(1 - probs[[i]], probs[[i]])
colnames(probs[[i]]) <- levels(modelFit$data[,modelFit$y_names])
}
}
}
probs
},
sort = function(x) {
ordering <- order(x$maxdepth, # lower values are simpler
x$use.grad, # TRUE employs ctree (vs ctree), so simplest
max(x$mtry) - x$mtry, # higher values yield more similar tree, so simpler
x$sampfrac != 1L, # subsampling yields simpler trees than bootstrap sampling
x$learnrate, # lower learnrates yield more similar trees, so simpler
decreasing = FALSE)
x[ordering,]
},
loop = function(fullGrid) {
# loop should provide a grid containing models that can
# be looped over for tuning penalty.par.val
loop_rows <- rownames(unique(fullGrid[,-which(names(fullGrid) == "penalty.par.val")]))
loop <- fullGrid[rownames(fullGrid) %in% loop_rows, ]
## submodels should be a list and length(submodels == nrow(loop)
## each element of submodels should be a data.frame with column penalty.par.val, with a row for every value to loop over
submodels <- list()
## for every row of loop:
for (i in 1:nrow(loop)) {
lambda_vals <- character()
## check which rows in fullGrid without $penalty.par.val are equal to
## rows in loop without $penalty.par.val
for (j in 1:nrow(fullGrid)) {
if (all(loop[i, -which(colnames(loop) == "penalty.par.val")] ==
fullGrid[j, -which(colnames(fullGrid) == "penalty.par.val")])) {
lambda_vals <- c(lambda_vals, as.character(fullGrid[j, "penalty.par.val"]))
}
}
lambda_vals <- lambda_vals[-which(lambda_vals == loop$penalty.par.val[i])]
submodels[[i]] <- data.frame(penalty.par.val = lambda_vals)
}
list(loop = loop, submodels = submodels)
},
levels = function(x) { levels(x$data[,x$y_names]) },
tag = c("Rule-Based Model", "Tree-Based Model", "L1 regularization", "Bagging", "Boosting"),
label = "Prediction Rule Ensembles",
predictors = function(x, ...) {
if (x$family %in% c("gaussian", "poisson", "binomial")) {
return(suppressWarnings(importance(x, plot = FALSE, ...)$varimps$varname))
} else {
warning("Reporting the predictors in the model is not yet available for multinomial and multivariate responses")
return(NULL)
}
},
varImp = function(x, ...) {
if (x$family %in% c("gaussian","binomial","poisson")) {
varImp <- pre:::importance(x, plot = FALSE, ...)$varimps
varnames <- varImp$varname
varImp <- data.frame(Overall = varImp$imp)
rownames(varImp) <- varnames
return(varImp)
} else {
warning("Variable importances cannot be calculated for multinomial or mgaussian family")
return(NULL)
}
},
oob = NULL,
notes = NULL,
check = NULL,
tags = c("Rule-Based Model", "Regularization")
)
|
332999da6c42c501c7c6744b54abedc71bb6ffcf | 8b155f49fc6a93cc2a524dfb29d37404c5b7ab65 | /2014_Projection.R | ade3307efc42f926221cee6ab6dd0756eeefbadc | [] | no_license | isaacmiller2004/2014-MLB-Projection-Hitters | a076dfa0feb17663d0fd8f57bbc6fa677837fa59 | 9eb60e2cc854a2bc76c8f2c1b0518dad593951ad | refs/heads/master | 2021-01-22T23:48:37.127032 | 2015-05-12T22:44:29 | 2015-05-12T22:44:29 | 35,515,561 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,643 | r | 2014_Projection.R | library(plyr)
#Load data
batting <- read.csv("Batting_names.csv", header=TRUE)
Roster <- read.csv("roster2013.csv")
retro.2013 <- read.csv("all2013.csv", header=FALSE)
retro.2012 <- read.csv("all2012.csv", header=FALSE)
retro.2011 <- read.csv("all2011.csv", header=FALSE)
fields <- read.csv("fields.csv")
names(retro.2013) <- fields[, "Header"]
names(retro.2012) <- fields[, "Header"]
names(retro.2011) <- fields[, "Header"]
#function for collapsing 'stint' variable
sum.function <- function(d){
d1 <- d[, 8:26]
apply(d1, 2, sum)
}
#Collapse stints and add Plate Appearances and BABIP
batting <- ddply(batting, .(playerID, yearID, first, last), sum.function)
batting$PA <- with(batting, AB+BB+HBP+SF)
batting$BABIP <- with(batting, round((H-HR)/(AB-SO-HR+SF), 3))
## MAIN PROJECTION FUNCTION
projection <- function(First, Last, auto=FALSE){
#Get player name
if(First==""){
First <- readline("Enter the hitter's first name: ")
}
if(Last==""){
Last <- readline("Enter the hitter's last name: ")
}
#Get retrosheet ID
retro.id <- subset(Roster, First.Name == First &
Last.Name == Last)$Player.ID
retro.id <- as.character(retro.id)
#Generate subsets for player
batter <- subset(batting, first==First & last==Last)
batter.2013 <- subset(batter, yearID==2013)
batter.2012 <- subset(batter, yearID==2012)
batter.2011 <- subset(batter, yearID==2011)
batter.100 <- subset(batter, AB>99)
batter.retro.2013 <- subset(retro.2013, BAT_ID==retro.id & BATTEDBALL_CD!="")
batter.retro.2012 <- subset(retro.2012, BAT_ID==retro.id & BATTEDBALL_CD!="")
batter.retro.2011 <- subset(retro.2011, BAT_ID==retro.id & BATTEDBALL_CD!="")
#Plate appearances
PAs <- c(batter.2013$PA, batter.2012$PA, batter.2011$PA)
if(auto==FALSE){
cat("Last 3 years of PA:\n2013: ", PAs[1],
"\n2012: ", PAs[2],
"\n2011: ", PAs[3],
"\nMean: ", round(mean(PAs[]), 2))
proj.PA <- as.numeric(readline("Enter the hitter's projected PLATE APPEARANCES: "))
} else {
proj.PA <- mean(PAs)
}
#Runs
r.pa <- (batter.2013$R+batter.2012$R+batter.2011$R)/(batter.2013$PA+batter.2012$PA+batter.2011$PA)
proj.R <- r.pa*proj.PA*(sample(90:110, 1)/100)
#RBI
rbi.pa <- (batter.2013$RBI+batter.2012$RBI+batter.2011$RBI)/(batter.2013$PA+batter.2012$PA+batter.2011$PA)
proj.RBI <- rbi.pa*proj.PA*(sample(90:110, 1)/100)
#Doubles
X2B.rate <- c(batter.2013$AB/batter.2013$X2B, batter.2012$AB/batter.2012$X2B, batter.2011$AB/batter.2011$X2B)
if(auto==FALSE){
cat("Last 3 years of AB/2B:\n2013: ", round(X2B.rate[1], 2),
"\n2012: ", round(X2B.rate[2], 2),
"\n2011: ", round(X2B.rate[3], 2),
"\nMean: ", round((batter.2013$AB+batter.2012$AB+batter.2011$AB)/
(batter.2013$X2B+batter.2012$X2B+batter.2011$X2B), 2))
proj.2B.rate <- as.numeric(readline("Enter the hitter's projected AB/Double Rate: "))
} else {
proj.2B.rate <- (batter.2013$AB+batter.2012$AB+batter.2011$AB)/
(batter.2013$X2B+batter.2012$X2B+batter.2011$X2B)
}
#Triples
X3B.rate <- c(batter.2013$AB/batter.2013$X3B, batter.2012$AB/batter.2012$X3B, batter.2011$AB/batter.2011$X3B)
if(auto==FALSE){
cat("Last 3 years of AB/3B:\n2013: ", round(X3B.rate[1], 2),
"\n2012: ", round(X3B.rate[2], 2),
"\n2011: ", round(X3B.rate[3], 2),
"\nMean: ", round((batter.2013$AB+batter.2012$AB+batter.2011$AB)/
(batter.2013$X3B+batter.2012$X3B+batter.2011$X3B), 2))
proj.3B.rate <- as.numeric(readline("Enter the hitter's projected AB/Triple Rate: "))
} else {
proj.3B.rate <- (batter.2013$AB+batter.2012$AB+batter.2011$AB)/
(batter.2013$X3B+batter.2012$X3B+batter.2011$X3B)
}
#Hit by pitches
HBP.rate <- c(batter.2013$PA/batter.2013$HBP, batter.2012$PA/batter.2012$HBP, batter.2011$PA/batter.2011$HBP)
if(auto==FALSE){
cat("Last 3 years of PA/HBP:\n2013: ", round(HBP.rate[1], 2),
"\n2012: ", round(HBP.rate[2], 2),
"\n2011: ", round(HBP.rate[3], 2),
"\nMean: ", round((batter.2013$PA+batter.2012$PA+batter.2011$PA)/
(batter.2013$HBP+batter.2012$HBP+batter.2011$HBP), 2))
proj.HBP.rate <- as.numeric(readline("Enter the hitter's projected PA/HBP Rate: "))
} else {
proj.HBP.rate <- (batter.2013$PA+batter.2012$PA+batter.2011$PA)/
(batter.2013$HBP+batter.2012$HBP+batter.2011$HBP)
}
#Sacrifice flies
SF.rate <- c(batter.2013$PA/batter.2013$SF, batter.2012$PA/batter.2012$SF, batter.2011$PA/batter.2011$SF)
if(auto==FALSE){
cat("Last 3 years of PA/SF:\n2013: ", round(SF.rate[1], 2),
"\n2012: ", round(SF.rate[2], 2),
"\n2011: ", round(SF.rate[3], 2),
"\nMean: ", round((batter.2013$PA+batter.2012$PA+batter.2011$PA)/
(batter.2013$SF+batter.2012$SF+batter.2011$SF), 2))
proj.SF.rate <- as.numeric(readline("Enter the hitter's projected PA/SF Rate: "))
} else {
proj.SF.rate <- (batter.2013$PA+batter.2012$PA+batter.2011$PA)/
(batter.2013$SF+batter.2012$SF+batter.2011$SF)
}
#Stolen base attempts
SBA.rate <- c(batter.2013$PA/(batter.2013$SB+batter.2013$CS),
batter.2012$PA/(batter.2012$SB+batter.2012$CS),
batter.2011$PA/(batter.2011$SB+batter.2011$CS))
if(auto==FALSE){
cat("Last 3 years of PA/SBA:\n2013: ", round(SBA.rate[1], 2),
"\n2012: ", round(SBA.rate[2], 2),
"\n2011: ", round(SBA.rate[3], 2),
"\nMean: ", round((batter.2013$PA+batter.2012$PA+batter.2011$PA)/
(batter.2013$SB+batter.2012$SB+batter.2011$SB
+batter.2013$CS+batter.2012$CS+batter.2011$CS), 2))
proj.SBA.rate <- as.numeric(readline("Enter the hitter's projected PA/SBA Rate: "))
} else {
proj.SBA.rate <- (batter.2013$PA+batter.2012$PA+batter.2011$PA)/
(batter.2013$SB+batter.2012$SB+batter.2011$SB
+batter.2013$CS+batter.2012$CS+batter.2011$CS)
}
#Stolen base success rate
SB.succ.rate <- c(round(batter.2013$SB/(batter.2013$SB+batter.2013$CS), 2),
round(batter.2012$SB/(batter.2012$SB+batter.2012$CS), 2),
round(batter.2011$SB/(batter.2011$SB+batter.2011$CS), 2))
if(auto==FALSE){
cat("Last 3 years of SB Success Rate:\n2013: ", round(SB.succ.rate[1], 2),
"\n2012: ", round(SB.succ.rate[2], 2),
"\n2011: ", round(SB.succ.rate[3], 2),
"\nMean: ", round((batter.2013$SB+batter.2012$SB+batter.2011$SB)/
(batter.2013$SB+batter.2013$CS+batter.2012$SB+batter.2012$CS
+batter.2011$SB+batter.2011$CS), 2))
proj.SB.succ.rate <- as.numeric(readline("Enter the hitter's projected SB Success Rate: "))
} else {
proj.SB.succ.rate <- (batter.2013$SB+batter.2012$SB+batter.2011$SB)/
(batter.2013$SB+batter.2013$CS+batter.2012$SB+batter.2012$CS
+batter.2011$SB+batter.2011$CS)
}
#Walk rate
BB.rate <- c(batter.2013$BB/batter.2013$PA, batter.2012$BB/batter.2012$PA, batter.2011$BB/batter.2011$PA)
if(auto==FALSE){
cat("Last 3 years of BB Rate:\n2013: ", round(BB.rate[1], 4),
"\n2012: ", round(BB.rate[2], 4),
"\n2011: ", round(BB.rate[3], 4),
"\nMean: ", round((batter.2013$BB+batter.2012$BB+batter.2011$BB)/
(batter.2013$PA+batter.2012$PA+batter.2011$PA), 2))
proj.BB.rate <- as.numeric(readline("Enter the hitter's projected BB Rate: "))
} else {
proj.BB.rate <- (batter.2013$BB+batter.2012$BB+batter.2011$BB)/
(batter.2013$PA+batter.2012$PA+batter.2011$PA)
}
#Strikeout rate
SO.rate <- c(batter.2013$SO/batter.2013$PA, batter.2012$SO/batter.2012$PA, batter.2011$SO/batter.2011$PA)
if(auto==FALSE){
cat("Last 3 years of SO Rate:\n2013: ", round(SO.rate[1], 4),
"\n2012: ", round(SO.rate[2], 4),
"\n2011: ", round(SO.rate[3], 4),
"\nMean: ", round((batter.2013$SO+batter.2012$SO+batter.2011$SO)/
(batter.2013$PA+batter.2012$PA+batter.2011$PA), 2))
proj.SO.rate <- as.numeric(readline("Enter the hitter's projected SO Rate: "))
} else {
proj.SO.rate <- (batter.2013$SO+batter.2012$SO+batter.2011$SO)/
(batter.2013$PA+batter.2012$PA+batter.2011$PA)
}
#BABIP
BABIPs <- c(batter.2013$BABIP, batter.2012$BABIP, batter.2011$BABIP)
if(auto==FALSE){
cat("Last 3 years of BABIP:\n2013: ", BABIPs[1],
"\n2012: ", BABIPs[2],
"\n2011: ", BABIPs[3],
"\nMean: ", round((batter.2013$H-batter.2013$HR+batter.2012$H-batter.2012$HR+batter.2011$H-batter.2011$HR)
/(batter.2013$AB+batter.2012$AB+batter.2011$AB-batter.2013$SO-batter.2012$SO
-batter.2011$SO-batter.2013$HR-batter.2012$HR-batter.2011$HR+
batter.2013$SF+batter.2012$SF+batter.2011$SF
), 3))
proj.BABIP <- as.numeric(readline("Enter the hitter's projected BABIP: "))
} else {
proj.BABIP <- (batter.2013$H-batter.2013$HR+batter.2012$H-batter.2012$HR+batter.2011$H-batter.2011$HR)/
(batter.2013$AB+batter.2012$AB+batter.2011$AB-batter.2013$SO-batter.2012$SO-
batter.2011$SO-batter.2013$HR-batter.2012$HR-batter.2011$HR+
batter.2013$SF+batter.2012$SF+batter.2011$SF)
}
#Groundballs
GBs <- c(round(sum(ifelse(batter.retro.2013$BATTEDBALL_CD=="G", 1, 0)/length(batter.retro.2013[,1])), 3),
round(sum(ifelse(batter.retro.2012$BATTEDBALL_CD=="G", 1, 0)/length(batter.retro.2012[,1])), 3),
round(sum(ifelse(batter.retro.2011$BATTEDBALL_CD=="G", 1, 0)/length(batter.retro.2011[,1])), 3))
if(auto==FALSE){
cat("Last 3 years of GB rates:\n2013: ", GBs[1],
"\n2012: ", GBs[2],
"\n2011: ", GBs[3],
"\nMean: ", round((sum(ifelse(batter.retro.2013$BATTEDBALL_CD=="G", 1, 0))+
sum(ifelse(batter.retro.2012$BATTEDBALL_CD=="G", 1, 0))+
sum(ifelse(batter.retro.2011$BATTEDBALL_CD=="G", 1, 0)))/
(length(batter.retro.2013[,1])+length(batter.retro.2012[,1])+
length(batter.retro.2011[,1])), 3))
proj.GB <- as.numeric(readline("Enter the hitter's projected GB rate: "))
} else {
proj.GB <- (sum(ifelse(batter.retro.2013$BATTEDBALL_CD=="G", 1, 0))+
sum(ifelse(batter.retro.2012$BATTEDBALL_CD=="G", 1, 0))+
sum(ifelse(batter.retro.2011$BATTEDBALL_CD=="G", 1, 0)))/
(length(batter.retro.2013[,1])+length(batter.retro.2012[,1])+
length(batter.retro.2011[,1]))
}
#Flyballs
FBs <- c(round(sum(ifelse(batter.retro.2013$BATTEDBALL_CD=="F" |
batter.retro.2013$BATTEDBALL_CD=="P",
1, 0)/length(batter.retro.2013[,1])), 3),
round(sum(ifelse(batter.retro.2012$BATTEDBALL_CD=="F" |
batter.retro.2012$BATTEDBALL_CD=="P",
1, 0)/length(batter.retro.2012[,1])), 3),
round(sum(ifelse(batter.retro.2011$BATTEDBALL_CD=="F" |
batter.retro.2011$BATTEDBALL_CD=="P",
1, 0)/length(batter.retro.2011[,1])), 3))
FB1 <- sum(ifelse(batter.retro.2013$BATTEDBALL_CD=="F" |
batter.retro.2013$BATTEDBALL_CD=="P",
1, 0))
FB2 <- sum(ifelse(batter.retro.2012$BATTEDBALL_CD=="F" |
batter.retro.2012$BATTEDBALL_CD=="P",
1, 0))
FB3 <- sum(ifelse(batter.retro.2011$BATTEDBALL_CD=="F" |
batter.retro.2011$BATTEDBALL_CD=="P",
1, 0))
FB.tot <- FB1+FB2+FB3
BatBall.tot <- length(batter.retro.2013[,1])+length(batter.retro.2012[,1])+length(batter.retro.2011[,1])
FB.mean <- FB.tot/BatBall.tot
if(auto==FALSE){
cat("Last 3 years of FB rates:\n2013: ", FBs[1],
"\n2012: ", FBs[2],
"\n2011: ", FBs[3],
"\nMean: ", round(FB.mean, 2))
proj.FB <- as.numeric(readline("Enter the hitter's projected FB rate: "))
} else {
proj.FB <- FB.mean
}
#Home run/Flyball rate
HRFB <- c(round(batter.2013$HR/FB1, 3),
round(batter.2012$HR/FB2, 3),
round(batter.2011$HR/FB3, 3))
if(auto==FALSE){
cat("Last 3 years of HR/FB rates:\n2013: ", HRFB[1],
"\n2012: ", HRFB[2],
"\n2011: ", HRFB[3],
"\nMean: ", round((batter.2013$HR+batter.2012$HR+batter.2011$HR)/FB.tot, 3))
proj.HRFB <- as.numeric(readline("Enter the hitter's projected HR/FB rate: "))
} else {
proj.HRFB <- (batter.2013$HR+batter.2012$HR+batter.2011$HR)/FB.tot
}
#Final projections
final.bb <- proj.PA*proj.BB.rate
final.so <- proj.PA*proj.SO.rate
final.hbp <- ifelse(proj.HBP.rate>0, proj.PA/proj.HBP.rate, 0)
final.sf <- ifelse(proj.SF.rate>0, proj.PA/proj.SF.rate, 0)
final.sb <- ifelse(proj.SBA.rate>0, proj.PA/proj.SBA.rate*proj.SB.succ.rate, 0)
final.cs <- ifelse(proj.SBA.rate>0, proj.PA/proj.SBA.rate*(1-proj.SB.succ.rate), 0)
final.ldperc <- 1-proj.FB-proj.GB
final.ab <- proj.PA-final.bb-final.hbp-final.sf
final.bip <- final.ab-final.so+final.sf
final.2B <- ifelse(proj.2B.rate>0, final.ab/proj.2B.rate, 0)
final.3B <- ifelse(proj.3B.rate>0, final.ab/proj.3B.rate, 0)
final.hr <- proj.HRFB*proj.FB*final.bip
final.bip <- final.ab-final.so+final.sf-final.hr
final.hits <- proj.BABIP*final.bip+final.hr
final.1B <- final.hits-final.hr-final.2B-final.3B
final.avg <- ifelse(final.hits>0, final.hits/final.ab, 0)
final.obp <- (final.hits+final.bb+final.hbp)/(final.ab+final.bb+final.hbp+final.sf)
final.slg <- (final.1B+(2*final.2B)+(3*final.3B)+(4*final.hr))/final.ab
final.ops <- final.obp+final.slg
#Print results
cat("2014 Projection for ", First, " ", Last, ": ",
"\nPA: ", round(proj.PA, 0),
"\nAB: ", round(final.ab, 0),
"\nAVG: ", round(final.avg, 3),
"\nHR: ", round(final.hr, 0),
"\nR: ", round(proj.R, 0),
"\nRBI: ", round(proj.RBI, 0),
"\nOBP: ", round(final.obp, 3),
"\nSLG: ", round(final.slg, 3),
"\nOPS: ", round(final.ops, 3),
"\nBB: ", round(final.bb, 0),
"\nSO: ", round(final.so, 0),
"\nHits: ", round(final.hits, 0),
"\n1B: ", round(final.1B, 0),
"\n2B: ", round(final.2B, 0),
"\n3B: ", round(final.3B, 0),
"\nSB: ", round(final.sb, 0),
"\nCS: ", round(final.cs, 0),
"\nBABIP: ", round(proj.BABIP, 3),
sep=""
)
}
|
3c052cac749890ca8a1e47e386b600b9beff7506 | 9aa18283078113319752d6ea52003835905aad8b | /hw5/hw5_model_classification.R | c0fc8fa92d4a8d45cb3f9083607c19e49432bd8e | [] | no_license | boniu86/Stats780_data_science | 312f9dd905b2dd88711656aa2a3ceae2a74cc32b | c02f4c3a078e8975b72bf25da58bfbc3c4eed04f | refs/heads/master | 2021-05-12T08:48:40.005776 | 2018-04-17T20:24:59 | 2018-04-17T20:24:59 | 117,298,739 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,523 | r | hw5_model_classification.R | ### Mixture DA
library(mclust)
library(tree)
library(e1071)
library(MASS)
library(tidyverse)
data("biopsy")
user<-biopsy[,-1]
user<-na.omit(user)
user[,-10]<-lapply(user[,-10], as.numeric)
## MDA
x<-user[,-10]
x<-scale(x)
B<-c()
C<-c()
D<-c()
Tab<-union()
for(i in 1:10){
set.seed(i)
user_delete<-sample (1: nrow(user), nrow(user)/4)
userMclustDA <- MclustDA(x[-user_delete,], user[-user_delete,10])
summary(userMclustDA, parameters = TRUE)
a<-summary(userMclustDA, newdata = x[user_delete,], newclass = user[user_delete,10])
b<-classAgreement(a$tab.newdata)$diag
c<-classAgreement(a$tab.newdata)$rand
d<-classAgreement(a$tab.newdata)$crand
B<-c(b,B)
C<-c(c,C)
D<-c(d,D)
}
set.seed(123)
user_delete<-sample (1: nrow(user), nrow(user)/4)
userMclustDA <- MclustDA(x[-user_delete,], user[-user_delete,10])
summary(userMclustDA, parameters = TRUE)
summary(userMclustDA, newdata = x[user_delete,], newclass = user[user_delete,10])
df1<-data.frame(cbind(B,C,D))
colnames(df1)<-c("diag","rand","crand")
df1$class<-rep("MDA",10)
##Tree
BB<-c()
CC<-c()
DD<-c()
for(i in 1:10){
set.seed(i)
train = sample (1: nrow(user), nrow(user)*0.75)
user$class<-factor(user$class)
tree.user <- tree(class~., data =user, subset=train,method="class")
user.test=user[-train,"class"]
user.pred=predict(tree.user,user[-train,],type="class")
tab<-table(user.test,user.pred)
bb<-classAgreement(tab)$diag
cc<-classAgreement(tab)$rand
dd<-classAgreement(tab)$crand
BB<-c(bb,BB)
CC<-c(cc,CC)
DD<-c(dd,DD)
}
set.seed(123)
train = sample (1: nrow(user), nrow(user)*0.75)
user$class<-factor(user$class)
tree.user <- tree(class~., data =user, subset=train,method="class")
user.test=user[-train,"class"]
user.pred=predict(tree.user,user[-train,],type="class")
tab<-table(user.test,user.pred)
df2<-data.frame(cbind(BB,CC,DD))
colnames(df2)<-c("diag","rand","crand")
df2$class<-rep("tree",10)
df<-union(df1,df2)
df<-(df%>%arrange(class))
t1<-(df%>%
group_by(class)%>%
summarise(mean_crand=mean(crand),sd_crand=sd(crand)))
t2<-(df%>%
group_by(class)%>%
summarise(mean_rand=mean(rand),sd_rand=sd(rand)))
t3<-(df%>%
group_by(class)%>%
summarise(mean_diag=mean(diag),sd_diag=sd(diag)))
table<-bind_cols(t1,t2,t3)
table
dd<-read_csv("df.csv")
ddf<-dd[,-1]
p1<-ggplot(ddf,aes(x=class,y=crand))+geom_boxplot()
p2<-ggplot(ddf,aes(x=class,y=rand))+geom_boxplot()
p3<-ggplot(ddf,aes(x=class,y=diag))+geom_boxplot()
library(gridExtra)
grid.arrange(p1,p2,p3,ncol=3)
|
e1f8034146fdac74855e9e66b59a7681f0c8d611 | 1ac4936de63f82c6865a4adfc1908da71e5a70c5 | /sample_net_w_second_3-component.R | c2c8bc3116881748ce6b63504eb18be9eb9b92f8 | [] | no_license | sjdayday/net | 64ddc2772901c2ea8c733251999c78b8e1c41a60 | 5ac574b9b02ed9e8d0be1ff39775496dc9d575b9 | refs/heads/master | 2020-04-06T04:30:56.353801 | 2013-03-26T16:41:00 | 2013-03-26T16:41:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 879 | r | sample_net_w_second_3-component.R | n1<-matrix(nrow=13,ncol=13,seq(0,0,length=169))
n1[1,2]<-1
n1[2,1]<-1
n1[2,3]<-1
n1[3,2]<-1
n1[2,8]<-1
n1[8,2]<-1
n1[5,7]<-1
n1[7,5]<-1
n1[2,7]<-1
n1[7,2]<-1
n1[3,4]<-1
n1[4,3]<-1
n1[4,5]<-1
n1[5,4]<-1
n1[4,6]<-1
n1[6,4]<-1
n1[4,7]<-1
n1[7,4]<-1
n1[7,8]<-1
n1[8,7]<-1
n1[2,5]<-1
n1[5,2]<-1
n1[2,6]<-1
n1[6,2]<-1
n1[6,8]<-1
n1[8,6]<-1
n1[1,8]<-1
n1[8,1]<-1
n1[1,7]<-1
n1[7,1]<-1
n1[5,8]<-1
n1[8,5]<-1
n1[1,9]<-1
n1[9,1]<-1
n1[10,9]<-1
n1[9,10]<-1
n1[10,11]<-1
n1[11,10]<-1
n1[10,12]<-1
n1[12,10]<-1
n1[10,13]<-1
n1[13,10]<-1
n1[11,12]<-1
n1[12,11]<-1
n1[11,13]<-1
n1[13,11]<-1
n1[12,13]<-1
n1[13,12]<-1
colnames(n1)<-c("v1:4","v2:6","v3:2","v4:4","v5:4","v6:4","v7:5","v8:5","v9:1","v10:?","v11:?","v12:?","v13:?" )
rownames(n1)<-c("v1:4","v2:6","v3:2","v4:4","v5:4","v6:4","v7:5","v8:5","v9:1","v10:?","v11:?","v12:?","v13:?" )
|
981811e2ebae51c7d9b7d370aa4a9784c6dab5df | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/phylosim/examples/getTipLabels.PhyloSim.Rd.R | 3c57618504932649485d8a0bb6111b046fb9d948 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 410 | r | getTipLabels.PhyloSim.Rd.R | library(phylosim)
### Name: getTipLabels.PhyloSim
### Title: Get the tip labels from a phylo object aggregated by a PhyloSim
### object
### Aliases: getTipLabels.PhyloSim PhyloSim.getTipLabels
### getTipLabels,PhyloSim-method
### ** Examples
# create a PhyloSim object
sim<-PhyloSim(phylo=rcoal(5));
# get the tip labels
getTipLabels(sim)
# get the tip lables via virtual field
sim$tipLabels
|
a068ca9162a62e773feaca3b1b364c34ac3385e7 | bc1120b2904a035b5177e54f2dbd51ffbee51d75 | /Assignment2.R | 73da57c90bc8818eaf3e52d13d0f0d78acca24ba | [] | no_license | JeffreyLoo/Econometric | 13de873a6c9a8d5832edb943fb34c1031cc1e4ba | e1cb49f7baad8e8adba19ac37511f1b0249a3dd8 | refs/heads/master | 2020-07-25T21:03:18.611758 | 2019-10-17T07:44:46 | 2019-10-17T07:44:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,243 | r | Assignment2.R | library(fBasics)
library(rugarch)
library(forecast)
library(sos)
library(quantmod)
##################### P A R T A #######################
###Opening data files ###
btc = read.table("btc.txt", header=T) # Load the data
eth = read.table("eth.txt", header=T) # Load the data
###Calculating the log difference ###
logbtc =as.ts(100*diff(log(btc$btc)))
logeth =as.ts(100*diff(log(eth$eth)))
##################### P A R T B #######################
###This function fits and predicts the one day ahead forecast for each rolling sample for the 8 models
###parameter data: the original log returns of cyptocurrency
###parameter size: the size of the rolling window sample
###loop A is in charge of making 8 model for each rolling windows e.g. making 8 models in 1 shot for one rolling window sample
###Models are, in sequence:
### 1.AR(1)-GARCH(1,1)
### 2.AR(1)-GJR-GARCH(1,1)
### 3.AR(5)
### 4.Naive
### 5.Historical Mean
### 6.Simple Moving Average(20)
### 7.Simple Moving Average(60)
### 8.Simple Moving Average(120)
predicted <- function(data, size ){
#holds the predictions for 8 models
#each models will have 500 observations
#so result will have 500*8 observations
result =list(c(),c(),c(),c(),c(),c(),c(),c())
#to get r^2
datasq =sapply(data, function(x) x^2)
#the specification for sGARCH model
spec1 <- ugarchspec(variance.model = list(model = "sGARCH",
garchOrder = c(1,1)),
mean.model = list(armaOrder = c(1,0), arfima = FALSE,
include.mean = TRUE),
distribution.model = "norm")
#the specification for gjrGARCH model
spec2 <- ugarchspec(variance.model = list(model = "gjrGARCH", garchOrder = c(1,1)),
mean.model = list(armaOrder = c(1,0), arfima = FALSE, include.mean = TRUE),
distribution.model = "norm")
#A
for (i in seq(1,500,1)){
#AR(1)-GARCH(1,1)
sgarch.fit = ugarchfit(data[i:(size+i)], spec = spec1, solver = "hybrid") # Model estimation
forc1 = ugarchforecast(sgarch.fit, n.ahead=1)
result[[1]][[i]] =attributes(forc1)[[1]]$sigmaFor[1]^2
#AR(1)-GJR-GARCH(1,1)
gjrgarch.fit =ugarchfit( data[i:(size+i)], spec = spec2, solver = "hybrid")
forc2 = ugarchforecast(gjrgarch.fit, n.ahead=1)
result[[2]][[i]] =attributes(forc2)[[1]]$sigmaFor[1]^2
#AR(5) model
ARmodel =arima(datasq[(i):(size+i)], order=c(5,0,0), method="ML", optim.method="BFGS")
result[[3]][[i]] =predict(ARmodel,1)$pred[1]
#Naive model
result[[4]][[i]] =datasq[size+i]
#Historical Mean Approach
result[[5]][[i]] =mean(datasq[(i):(size+i)])
#Simple Moving Average
counter = 6
#B this loop is to calculate the simple moving average 20,60,180
for (j in c(20,60,180)){
result[[counter]][[i]] =mean(datasq[(size+i-j+1):(size+i)])
counter =counter +1
}
}
return (result)
}
### This function calculates the MSE, MAD, and QLIKE
###parameter predict: the predicted values from 8 models
###parameter data: the last 500 observations of the original log return of cryptocurrency
### The results are separated in 3 different loss functions.
### each loss functions will have ordered calculation from 8 models to determine which model has the lowest loss function value
accurateModel <- function(predict,data){
#to square the data
datasq =sapply(data, function(x) x^2)
#the table to store transformed observations of the predictions
result=c()
#store 3 tables each table holds value of loss functions in order
error=c()
#MSE
result[[1]] =cbind(cbind((predict[[1]]-datasq)^2),
cbind((predict[[2]]-datasq)^2),
cbind((predict[[3]]-datasq)^2),
cbind((predict[[4]]-datasq)^2),
cbind((predict[[5]]-datasq)^2),
cbind((predict[[6]]-datasq)^2),
cbind((predict[[7]]-datasq)^2),
cbind((predict[[8]]-datasq)^2))
#MAD
result[[2]] =cbind(cbind(abs(predict[[1]]-datasq)),
cbind(abs(predict[[2]]-datasq)),
cbind(abs(predict[[3]]-datasq)),
cbind(abs(predict[[4]]-datasq)),
cbind(abs(predict[[5]]-datasq))
,cbind(abs(predict[[6]]-datasq)),
cbind(abs(predict[[7]]-datasq)),
cbind(abs(predict[[8]]-datasq)))
#QLIKE
result[[3]] =cbind(cbind(log((predict[[1]])^2)+((predict[[1]])^-2)*datasq^2),
cbind(log((predict[[2]])^2)+((predict[[2]])^-2)*datasq^2),
cbind(log((predict[[3]])^2)+((predict[[3]])^-2)*datasq^2),
cbind(log((predict[[4]])^2)+((predict[[4]])^-2)*datasq^2),
cbind(log((predict[[5]])^2)+((predict[[5]])^-2)*datasq^2),
cbind(log((predict[[6]])^2)+((predict[[6]])^-2)*datasq^2),
cbind(log((predict[[7]])^2)+((predict[[7]])^-2)*datasq^2),
cbind(log((predict[[8]])^2)+((predict[[8]])^-2)*datasq^2))
#to name the columns
#to find average of transformed prediction observations to obtain a single value of the loss function
for (i in 1:3){
colnames(result[[i]]) <- c("ARGARCH(1,1)","ARGJRGARCH(1,1)","AR(5)","Naive","Historical Mean","SMA(20)","SMA(60)","SMA(180)")
mean <- colMeans(result[[i]])
error[[i]] <- cbind(mean)
error[[i]] <- error[[i]][order(as.numeric(error[[i]][,1])), ]
}
return (error)
}
# (i)
###calling functions here
###Predicted results for 8 models
predicted_btc=predicted(logbtc,1502)
predicted_eth=predicted(logeth,976)
###Error for each models for each cryptocurrencies in ascending order
error_btc =accurateModel(predicted_btc,tail(logbtc,500))
error_eth =accurateModel(predicted_eth,tail(logeth,500))
#set the display to print more
options("max.print" =100000)
|
247c197eb25a2f40dead8049ecccdda8b2cd3303 | 2cc8973ad90835f8d5a17cf0d7b87059bbc71f81 | /R/Match_cb.R | 46d454180cb35787f16d0649866fb0042f384baa | [] | no_license | my1120/Hurricane | 55d4d140a0136c5eefcf54a73c270b3c185a5289 | f35c5e6309f032c025a9167bd433113b6943b551 | refs/heads/master | 2021-01-11T18:37:09.821805 | 2017-10-11T20:59:07 | 2017-10-11T20:59:07 | 79,583,219 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,586 | r | Match_cb.R | #' Select matched observations from original $crossbasis$ matrix.
#'
#'
#' @inheritParams readCity
#' @inheritParams readStorm
#' @inheritParams CityStorm
#' @inheritParams Match_data
#' @inheritParams dlnm::crossbasis
#'
#' @return This function returns $crossbasis$ object.
#'
#' @importFrom dplyr %>%
#'
#' @export
Match_cb <- function(root = "~/Documents/NMMAPS/", criterion, city,
control_ratio = 10, lag_1 = -2, lag_2 = 7,
arglag = list(fun = "integer"),
collapseAge = TRUE, age_cat = NULL){
# crossbasis with whole datase
orig_data <- CityStorm(root, criterion, city, collapseAge, age_cat)
orig_cb <- dlnm::crossbasis(orig_data$hurr, lag = c(lag_1, lag_2),
argvar = list(fun = "lin"),
arglag = arglag)
obs_n <- nrow(orig_data)
orig_cb_matr <- as.data.frame(subset(orig_cb, nrow = obs_n))
orig_cb_matr$date <- orig_data$date
# matched dataset
matched_date <- Match_data(root, criterion, city,
control_ratio, lag_1, lag_2) %>%
dplyr::select(date)
matched_cb_matrix <- orig_cb_matr %>%
dplyr::right_join(matched_date, by = "date") %>%
dplyr::select(-date) %>%
as.matrix()
# add attributes to matched_cb
matched_dim <- dim(matched_cb_matrix)
attr <- attributes(orig_cb)
attr$dim <- matched_dim
matched_cb <- matched_cb_matrix
attributes(matched_cb) <- attr
return(list("cb" = matched_cb, "matrix" = matched_cb_matrix,
"attributes" = attr))
}
|
44f438e8f63d5a017be99708d40d213e22f00fb3 | 05023be47e53613170dc2c5a52c602fa6e34276b | /R/invert.dataframe.R | 370e096593450390a2624ff9498ee5659df4733a | [] | no_license | NathanDeFruyt/WormTools | 087f7558dbf098ec965b0369840ef8ddb356ece0 | 5822d14e79816905c3b9166bec31c27bd82def45 | refs/heads/master | 2022-11-05T01:13:51.282285 | 2020-06-25T13:48:58 | 2020-06-25T13:48:58 | 274,924,320 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 433 | r | invert.dataframe.R | #' Invert your data frame
#'
#' This function inverts data.frames
#' @param dataframe a data.frame which you'd like to see inverted.
#' @keywords
#' @export
#' @examples
#' invert.dataframe
invert.dataframe <- function(dataframe){
output = dataframe[nrow(dataframe),]
for (i in 1:(nrow(dataframe-1))){
newline <- dataframe[nrow(dataframe)-i, ]
output <- rbind(output, newline)
}
return(output)
} |
1851f8a815ef7e4a8e4ca5112860735fdd763cf1 | b74966ae3d4cadd8248a1182526ddb8fe467b024 | /man/RotpBART.Rd | 02e5ac51e9241f397f036f4e25f322b76cdcbcee | [] | no_license | DongyueXie/bCART | 5a4cdae4f865e610173fa6695df205a5f6b76b78 | ceb95ac4021a5b4d0e7711944cdd750960b3cc7c | refs/heads/master | 2022-03-30T15:03:47.528667 | 2020-02-03T17:18:07 | 2020-02-03T17:18:07 | 192,607,643 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 753 | rd | RotpBART.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RotpBART.R
\name{RotpBART}
\alias{RotpBART}
\title{Rotation BART for classification}
\usage{
RotpBART(X, y, x.test, cutoff = 0.5, k = 2, binaryOffset = NULL,
power = 2, base = 0.95, w = rep(1, length(y)), ntree = 50,
ndpost = 700, nskip = 300, Tmin = 2, printevery = 100,
p_modify = c(0.5, 0.5, 0), save_trees = F, rule = "bart",
p_split = "CGM", rotate = "rr", srpk = 2 * ncol(X))
}
\arguments{
\item{rotate}{'rr'='random rotation'','rraug'='random rotation+augmentation','srp'='sparse random projection'}
\item{srpk}{the number of cols of sparse projection matrix}
\item{Others}{See ?BARTr}
}
\value{
See ?BARTr
}
\description{
Rotation BART for classification
}
|
aab7a72a2057610cbf75338f84cd3e19129643ea | ed1920915c1f7070c7cec39de8ca82672be18cc5 | /source/predict/mainPredictStripchart.R | 5b27a4308d1bf23234a36ed7e425fba45841a8c5 | [] | no_license | sthallor/miscRscripts | d28f7a9cdbc53fc7c7994c6000d1753b3679236d | c3a5a206c35cdbbb15f07a4ea9250ff861b2e7f1 | refs/heads/master | 2022-11-06T03:39:03.951539 | 2020-06-21T23:21:47 | 2020-06-21T23:21:47 | 273,998,540 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,085 | r | mainPredictStripchart.R | #######################################################################################
# mainPredictStripchart.R V3.0 - main routine for producing prediction strip chart
# Ensign Energy Services Inc. retains all rights to this software
# FHS, May 10, 2017
#######################################################################################
sourceDir <- "E:/Analytics/Rcode/190401_master/"
source(paste0(sourceDir,"source/predict/get.prm.predictStripchart.R"))
source(paste0(sourceDir,"source/util/prm.decode.R"))
source(paste0(sourceDir,"source/predict/predictStripchart.R"))
source(paste0(sourceDir,"source/predict/rfcutPlot.R"))
start.time <- Sys.time()
# get command line arguments
arguments <- commandArgs(trailingOnly=TRUE)
if (length(arguments)==1) {
prmFilename <- arguments[1]
} else {
prmFilename <- paste0(sourceDir,'parms/dev/predict/predictStripchart.prm')
cat("\nNo command line argument ... using default parameters filename ",prmFilename,"\n\n")
}
# Get the predict stripchart parameters
cat('\nLoading predict strip chart parameters from file ',prmFilename,'\n\n')
prm.ps <- get.prm.predictStripchart(prmFilename)
cat(prm.ps$version,'\nRun Started at : ',as.character(start.time),'\n')
cat('\nVerbatim listing of prediction parameters file from:\n',prmFilename,'\n\n')
cat(paste(rep('-',80),collapse=''),'\n')
for (i in 1:length(prm.ps$text)) { cat(prm.ps$text[i],'\n') }
cat(paste(rep('-',80),collapse=''),'\n')
# Load the file from which prediction plots are to be made
cat('\nReading predictions data file: \n',prm.ps$predictFilename)
dt <- read.csv(prm.ps$predictFilename,nrow=prm.ps$to)
cat('\nRead ',nrow(dt),' rows and ',ncol(dt),' columns.')
# If futureIgbtMax not present, but igbt_temp is, then creates futureIgbtMax
if (is.null(dt$futureIgbtMax) & !is.null(dt$igbt_temp)) {
cat('\ncreating futureIgbtMax')
library(caTools)
dt$futureIgbtMax <- NA
dt$futureIgbtMax[1:(nrow(dt)-1)] <-
runmax(dt$igbt_temp[2:nrow(dt)],k=(prm.ps$rowFutureSpan-1),align='left')
dt$futureIgbtMax[dt$futureIgbtMax==-Inf] <- NA
}
requiredColumns <- c('time','block_height','igbt_temp','hookload',
'futureIgbtMax','rfGTcut','predicted','rfAlarm')
if (length(requiredColumns)>sum(requiredColumns %in% colnames(dt))) {
cat('\npredictions data file missing columns:\n')
cat(requiredColumns[which(!requiredColumns %in% colnames(dt))])
cat('\nunable to continue...')
stop('Missing Columns for predict plots in file')
}
#########################################################################
# Input data generated, now produce plots
if(!(prm.ps$plotFilename=='')) pdf(file=prm.ps$plotFilename)
# First plot is rfcutoff chart
# Only consider observations where targetSource value at present time is below alarm cutoff
predict_select <- dt$igbt_temp < prm.ps$targetAlarmCutoff &
!is.na(dt$rfGTcut) & !is.na(dt$futureIgbtMax) & !is.na(dt$igbt_temp)
rfGTcutSelect <- dt$rfGTcut[predict_select]
cat('\nlength(rfGTcutSelect)=',length(rfGTcutSelect))
# indicator vectors for prediction target below and above alarm cutoff
targetBelowCutoff <- dt$futureIgbtMax[predict_select]<prm.ps$targetAlarmCutoff
targetAboveCutoff <- dt$futureIgbtMax[predict_select]>=prm.ps$targetAlarmCutoff
if (sum(targetBelowCutoff,na.rm=T)==0 | sum(targetAboveCutoff,na.rm=T)==0) {
cat('\n\nCount of futureIgbtMax values below cutoff=',sum(targetBelowCutoff,na.rm=T))
cat('\nCount of futureIgbtMax values above cutoff=',sum(targetAboveCutoff,na.rm=T))
cat('\nThere must be futureIgbtMax values both above and below cutoff to proceed with cutoff analysis.\n')
} else {
# There are some values above cutoff and some values below cutoff
# can proceed with analysis
rfcutPredict <- data.frame(cutoff=seq(0,1.00,.01))
rfcutPredict$trueNegative <- apply(as.matrix(rfcutPredict$cutoff,ncol=1),MARGIN=1,function(x) {
return(sum(targetBelowCutoff & rfGTcutSelect<=x))})
rfcutPredict$falseNegative <- apply(as.matrix(rfcutPredict$cutoff,ncol=1),MARGIN=1,function(x) {
return(sum(targetAboveCutoff & rfGTcutSelect<=x))})
rfcutPredict$falsePositive <- apply(as.matrix(rfcutPredict$cutoff,ncol=1),MARGIN=1,function(x) {
return(sum(targetBelowCutoff & rfGTcutSelect>x))})
rfcutPredict$truePositive <- apply(as.matrix(rfcutPredict$cutoff,ncol=1),MARGIN=1,function(x) {
return(sum(targetAboveCutoff & rfGTcutSelect>x))})
rfcutPredict$sensitivity <- rfcutPredict$truePositive/(rfcutPredict$truePositive+rfcutPredict$falseNegative)
rfcutPredict$sensitivity[is.na(rfcutPredict$sensitivity)] <- 0
rfcutPredict$specificity <- rfcutPredict$trueNegative/(rfcutPredict$trueNegative+rfcutPredict$falsePositive)
rfcutPredict$specificity [is.na(rfcutPredict$specificity)] <- 0
rfcutPredict$posPredValue <- rfcutPredict$truePositive/(rfcutPredict$truePositive+rfcutPredict$falsePositive)
rfcutPredict$posPredValue[is.na(rfcutPredict$posPredValue)] <- 0
rfcutPredict$negPredValue <- rfcutPredict$trueNegative/(rfcutPredict$trueNegative+rfcutPredict$falseNegative)
rfcutPredict$negPredValue[is.na(rfcutPredict$negPredValue)] <- 0
rfcutPredict$f_score <- (1+prm.ps$ppvFscoreFactor)*(rfcutPredict$posPredValue*rfcutPredict$sensitivity)/
(prm.ps$ppvFscoreFactor*rfcutPredict$posPredValue+rfcutPredict$sensitivity)
rfcutPredict$f_score[is.na(rfcutPredict$f_score)] <- 0
#
cat('\nPrediction Data Optimum f_score rf tree threshold cutoff\n')
print(rfcutPredict[rfcutPredict$f_score==max(rfcutPredict$f_score),])
# prm.ps$maxF_scoreCutoff <- rfcutPredict$cutoff[rfcutPredict$f_score==max(rfcutPredict$f_score)][1]
# prm.ps$maxF_scoreCutoff <- prm.ps$rfGTCutoffAlarm
rfcutplot(rfcutPredict,sprintf('RF Vote Cutoff %i minute future %s',
round(prm.ps$rowFutureSpan/6),'igbt_temp'),prm.ps)
}
# second plot, scatterplot of actual futureIgbtMax vs rfGTcutAlarm
# True negatives
selectedTN <- dt$futureIgbtMax<prm.ps$targetAlarmCutoff & !dt$rfAlarm
plot(dt$futureIgbtMax[selectedTN],dt$rfGTcut[selectedTN], pch='.',col='green',
main=sprintf('Actual futureIgbtMax vs rfGTcut alarm'),
xlab='actual futureIgbtMax',ylab='rfGTcut alarm',
xlim=c(min(dt$predicted,dt$futureIgbtMax,na.rm=T),max(dt$predicted,dt$futureIgbtMax,na.rm=T)),
ylim=c(-0.05, 1.05))
# False negatives
selectedFN <- dt$futureIgbtMax>=prm.ps$targetAlarmCutoff & !dt$rfAlarm
points(dt$futureIgbtMax[selectedFN],dt$rfGTcut[selectedFN], pch='.',col='black')
# True positives
selectedTP <- dt$futureIgbtMax>=prm.ps$targetAlarmCutoff & dt$rfAlarm
points(dt$futureIgbtMax[selectedTP],dt$rfGTcut[selectedTP], pch='.',col='red')
# False positives
selectedFP <- dt$futureIgbtMax<prm.ps$targetAlarmCutoff & dt$rfAlarm
points(dt$futureIgbtMax[selectedFP],dt$rfGTcut[selectedFP], pch='.',col='blue')
obsCount <- sum(selectedTN,na.rm=T)+sum(selectedFN,na.rm=T)+sum(selectedTP,na.rm=T)+sum(selectedFP,na.rm=T)
legend('bottomleft',c(sprintf('False Positive (%.1f%%)',round(100*sum(selectedFP,na.rm=T)/obsCount,digits=1)),
sprintf('True Positive (%.1f%%)',round(100*sum(selectedTP,na.rm=T)/obsCount,digits=1)),
sprintf('True Negative (%.1f%%)',round(100*sum(selectedTN,na.rm=T)/obsCount,digits=1)),
sprintf('False Negative (%.1f%%)',round(100*sum(selectedFN,na.rm=T)/obsCount,digits=1))),
fill=c('blue','red','green','black'),cex=0.8)
# Third plot is the high alarm event count
# Analysis of prediction performance
dt$presentTarget <- 'lowp'
dt$presentTarget[dt$igbt_temp>=prm.ps$targetAlarmCutoff & !is.na(dt$igbt_temp)] <- 'highp'
dt$presentTarget <- as.factor(dt$presentTarget)
targetrun <- data.frame(values=rle(as.vector(dt$presentTarget))$values,
lengths=rle(as.vector(dt$presentTarget))$lengths)
targetrun$from <- 0
targetrun$to <- 0
targetrun$to <- cumsum(targetrun$lengths)
targetrun$from <- targetrun$to - targetrun$lengths + 1
# Agglomerate highp runs that are separated by less than rowFutureSpan lowp values
dt$presentTargetAgglomerated <- dt$presentTarget
for (i in which(targetrun$values=='lowp' & targetrun$lengths<(prm.ps$rowFutureSpan))) {
dt$presentTargetAgglomerated[targetrun$from[i]:targetrun$to[i]] <- 'highp'
}
dt$presentTargetAgglomerated <- as.factor(dt$presentTargetAgglomerated)
targetrunA <- data.frame(values=rle(as.vector(dt$presentTargetAgglomerated))$values,
lengths=rle(as.vector(dt$presentTargetAgglomerated))$lengths)
targetrunA$from <- 0
targetrunA$to <- 0
targetrunA$to <- cumsum(targetrunA$lengths)
targetrunA$from <- targetrunA$to - targetrunA$lengths + 1
cat('\nCount of igbt_temp observations below and above alarm cutoff')
print(table(dt$presentTarget)[c('lowp','highp')])
cat('\nCount of igbt_temp observation runs below and above alarm cutoff')
print(table(targetrun$values)[c('lowp','highp')])
cat('\nCount of igbt_temp agglomerated observation runs below and above alarm cutoff')
print(table(targetrunA$values)[c('lowp','highp')])
# Gather some highp statistics
targetrunA$highp <- 0
targetrunA$highpDensity <- 0
targetrunA$AdvancedAlarmTime <- 0
targetrunA$AlarmDensity <- 0
for (i in which(targetrunA$values=='highp')) {
# Number of 'highp' (present value GT cutoff) records in current 'highp' interval
targetrunA$highp[i] <- sum(dt$presentTarget[targetrunA$from[i]:targetrunA$to[i]]=='highp')
i1 <- targetrunA$from[i] - prm.ps$rowFutureSpan
i2 <- targetrunA$from[i] - 1
if (i1>0) {
i3 <- if (length(which(dt$rfAlarm[i1:i2]==TRUE))>0) i1 + min(which(dt$rfAlarm[i1:i2]==TRUE)) - 1 else -1
} else {
i3 <- -1
}
if (i3>0) {
targetrunA$AdvancedAlarmTime[i] <- round((i2 - i3 + 1)/6,digits=1)
targetrunA$AlarmDensity[i] <- round(length(which(dt$rfAlarm[i3:i2]==TRUE))/(i2-i3+1),digits=3)
} else {
targetrunA$AdvancedAlarmTime[i] <- -5
}
}
targetrunA$highpDensity <- round(targetrunA$highp/targetrunA$lengths,digits=3)
targetrunA$FalseAlarmDensity <- 0
for (i in which(targetrunA$values=='lowp')) {
i1 <- targetrunA$from[i] + 2*prm.ps$rowFutureSpan
i2 <- targetrunA$to[i] - 2*prm.ps$rowFutureSpan
if (i2>i1) targetrunA$FalseAlarmDensity[i] <- round(length(which(dt$rfAlarm[i1:i2]==TRUE))/(i2-i1+1),digits=3) else 0
}
if (length(targetrunA$AdvancedAlarmTime[targetrunA$values=='highp' &
targetrunA$highp>=prm.ps$highCountForHist])>0) {
hist(targetrunA$AdvancedAlarmTime[targetrunA$values=='highp' & targetrunA$highp>=prm.ps$highCountForHist],
breaks=(5+prm.ps$rowFutureSpan/6),
xlab='Alarm Time in Advance of High Alarm Event (minutes)',
ylab='High Alarm Event Count',
main=sprintf('High %s Alarm Events from %s to %s','igbt_temp',substr(dt$time[1],1,10),substr(dt$time[nrow(dt)],1,10)))
}
# Fourth plot and beyond strip chart plots of selected prediction data
if (prm.ps$interval>0) {
for (from in seq(prm.ps$from,nrow(dt),prm.ps$interval)) {
to <- from + prm.ps$interval - 1
predictStripchart(dt,prm.ps$title, prm.ps$targetAlarmCutoff, from, to)
}
}
if(!(prm.ps$plotFilename=='')) dev.off()
|
b894bdd5fa26e49949049b50cc1d04182c16368d | c53e367a5a155cfb1ee3a41e8b0351aeaa8d331d | /snpStats/doc/imputation-vignette.R | 22a76401d9e600132e1073c9a05afefc8adc40d1 | [
"MIT"
] | permissive | solgenomics/R_libs | bcf34e00bf2edef54894f6295c4f38f1e480b3fc | e8cdf30fd5f32babf39c76a01df5f5544062224e | refs/heads/master | 2023-07-08T10:06:04.304775 | 2022-05-09T15:41:26 | 2022-05-09T15:41:26 | 186,859,606 | 0 | 2 | MIT | 2023-03-07T08:59:16 | 2019-05-15T15:57:13 | C++ | UTF-8 | R | false | false | 6,411 | r | imputation-vignette.R | ### R code from vignette source 'imputation-vignette.Rnw'
###################################################
### code chunk number 1: init
###################################################
library(snpStats)
library(hexbin)
data(for.exercise)
###################################################
### code chunk number 2: select
###################################################
training <- sample(1000, 200)
select <- seq(1, ncol(snps.10),2)
missing <- snps.10[training, select]
present <- snps.10[training, -select]
missing
present
###################################################
### code chunk number 3: target
###################################################
target <- snps.10[-training, -select]
target
###################################################
### code chunk number 4: imputation-vignette.Rnw:91-93
###################################################
lost <- snps.10[-training, select]
lost
###################################################
### code chunk number 5: positions
###################################################
pos.miss <- snp.support$position[select]
pos.pres <- snp.support$position[-select]
###################################################
### code chunk number 6: rules
###################################################
rules <- snp.imputation(present, missing, pos.pres, pos.miss)
###################################################
### code chunk number 7: rule1
###################################################
rules[1:10]
###################################################
### code chunk number 8: rule2
###################################################
rules[c('rs11253563', 'rs2379080')]
###################################################
### code chunk number 9: summary
###################################################
summary(rules)
###################################################
### code chunk number 10: ruleplot
###################################################
plot(rules)
###################################################
### code chunk number 11: imptest
###################################################
imp <- single.snp.tests(cc, stratum, data=subject.support,
snp.data=target, rules=rules)
###################################################
### code chunk number 12: realtest
###################################################
obs <- single.snp.tests(cc, stratum, data=subject.support, snp.data=lost)
###################################################
### code chunk number 13: compare
###################################################
logP.imp <- -log10(p.value(imp, df=1))
logP.obs <- -log10(p.value(obs, df=1))
hb <- hexbin(logP.obs, logP.imp, xbin=50)
sp <- plot(hb)
hexVP.abline(sp$plot.vp, 0, 1, col="black")
###################################################
### code chunk number 14: best
###################################################
use <- imputation.r2(rules)>0.9
hb <- hexbin(logP.obs[use], logP.imp[use], xbin=50)
sp <- plot(hb)
hexVP.abline(sp$plot.vp, 0, 1, col="black")
###################################################
### code chunk number 15: rsqmaf
###################################################
hb <- hexbin(imputation.maf(rules), imputation.r2(rules), xbin=50)
sp <- plot(hb)
###################################################
### code chunk number 16: imptest-rhs
###################################################
imp2 <- snp.rhs.tests(cc~strata(stratum), family="binomial",
data=subject.support, snp.data=target, rules=rules)
logP.imp2 <- -log10(p.value(imp2))
hb <- hexbin(logP.obs, logP.imp2, xbin=50)
sp <- plot(hb)
hexVP.abline(sp$plot.vp, 0, 1, col="black")
###################################################
### code chunk number 17: impstore
###################################################
imputed <- impute.snps(rules, target, as.numeric=FALSE)
###################################################
### code chunk number 18: uncert1
###################################################
plotUncertainty(imputed[, "rs4880568"])
###################################################
### code chunk number 19: uncert2
###################################################
plotUncertainty(imputed[, "rs2050968"])
###################################################
### code chunk number 20: imptest2
###################################################
imp3 <- single.snp.tests(cc, stratum, data=subject.support,
snp.data=imputed, uncertain=TRUE)
###################################################
### code chunk number 21: imp3
###################################################
imp3[1:5]
imp[1:5]
###################################################
### code chunk number 22: mach
###################################################
path <- system.file("extdata/mach1.out.mlprob.gz", package="snpStats")
mach <- read.mach(path)
plotUncertainty(mach[,50])
###################################################
### code chunk number 23: class-imp-obs
###################################################
class(imp)
###################################################
### code chunk number 24: save-scores
###################################################
obs <- single.snp.tests(cc, stratum, data=subject.support, snp.data=missing,
score=TRUE)
imp <- single.snp.tests(cc, stratum, data=subject.support,
snp.data=target, rules=rules, score=TRUE)
###################################################
### code chunk number 25: imputation-vignette.Rnw:345-347
###################################################
class(obs)
class(imp)
###################################################
### code chunk number 26: pool
###################################################
both <- pool(obs, imp)
class(both)
both[1:5]
###################################################
### code chunk number 27: pool-score
###################################################
both <- pool(obs, imp, score=TRUE)
class(both)
###################################################
### code chunk number 28: sign
###################################################
table(effect.sign(obs))
###################################################
### code chunk number 29: switch
###################################################
effect.sign(obs)[1:6]
sw.obs <- switch.alleles(obs, 1:3)
class(sw.obs)
effect.sign(sw.obs)[1:6]
|
63dc6191762e6ce94cb31f1df146a02f10764430 | 9283b2a903149cc4cf33d4ece839a3aab9a73065 | /man/file_exts.Rd | bf4ff1ec6793a7fad0ea19b207dd0703187e17a2 | [
"MIT"
] | permissive | quanrd/gggenomes | 4193542dc3a8cc416f9dcd2774718ac0c918b49b | 1bfbdfe2a6f5b4c48b59cb3d2298be178fb3e2d1 | refs/heads/master | 2023-03-01T09:31:22.231667 | 2021-02-01T23:00:53 | 2021-02-01T23:00:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 525 | rd | file_exts.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.R
\name{file_exts}
\alias{file_exts}
\title{Defined file extensions and associated formats}
\usage{
file_exts(context)
}
\arguments{
\item{context}{a file format context defined in \code{gggenomes_global$file_formats}}
}
\value{
vector of file extensions with formats as names
}
\description{
Defined file extensions and associated formats
}
\examples{
# vector of zip-context file extensions and format names
gggenomes:::file_exts("zips")
}
|
b67baadb78357a6f7f9fda5c795b1c3212a4c85b | 682fdb45d76bd462593d07113a0f642665ff44a3 | /man/track.length.Rd | 49c89c0a0aec352f552162db70312e1baf5a79c8 | [
"MIT"
] | permissive | dfsp-spirit/fsbrain | dd782c91f95c52b8039e4ec6642345d994a6ed84 | 09f506dbf5467356ab26a65246f31051da58f198 | refs/heads/master | 2023-07-06T10:11:18.468284 | 2023-06-26T16:42:45 | 2023-06-26T16:42:45 | 209,085,379 | 44 | 12 | NOASSERTION | 2023-01-15T19:49:54 | 2019-09-17T15:05:51 | R | UTF-8 | R | false | true | 544 | rd | track.length.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vis_dti_tracks.R
\name{track.length}
\alias{track.length}
\title{Compute the total length of a path given by the coordinates of its points.}
\usage{
track.length(point_coords)
}
\arguments{
\item{point_coords}{n x 3 numerical matrix of 3D point coordinates, in the order of traversal.}
}
\value{
float, the sum of the length of all segments of the path.
}
\description{
Compute the total length of a path given by the coordinates of its points.
}
\keyword{internal}
|
b312deb5a57801a5ee67e3b8ca35603d5aba42de | be4814bb289e1a3adf0cbdca2ef189592578e891 | /cours1.R | de0fd16acba46dd8a335d5797d3b779b8d97759d | [] | no_license | thomas-marquis/oc-stats-inf | c8e025197787dc039cb473c2f4935e6a026e1e1e | 991b0bc33625420b165cba5b1475760cce80e7f0 | refs/heads/master | 2020-03-26T09:16:52.510055 | 2018-08-14T16:06:16 | 2018-08-14T16:06:16 | 144,743,416 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 51 | r | cours1.R | essence <- read.table("essence.txt", header = TRUE) |
759ed5ef66a892229ca434776c4c8ce695c9f8b0 | 651db456b041d0b30466499907ea36128e2ac7a8 | /euler/7.R | 0529722f0f591a9e1d9f1043bafaec09b7146924 | [] | no_license | willcuratolo/projectEuler | 0ddf62b324c89bc9d977d404e90ec362691b895b | 6953bfeb4e8d7f89ad057f6c8978a3f171305feb | refs/heads/master | 2020-03-24T18:28:26.575280 | 2018-07-30T15:19:44 | 2018-07-30T15:19:44 | 142,893,087 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,088 | r | 7.R | # 10001st prime
# Problem 7
# By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see
# that the 6th prime is 13.
# What is the 10,001st prime number?
# My solution to this is not very elegant, but I could see an easy solution
# that reuses some code I've just written.
# This lists all primes less than a certain number
prime <- function(n) {
n = as.integer(n)
primes = rep(TRUE, n)
primes[1] = FALSE
last.prime = 2
fsqr = floor(sqrt(n))
while (last.prime <= fsqr)
{
primes[seq.int(2*last.prime, n, last.prime)] = FALSE
sel = which(primes[(last.prime+1):(fsqr+1)])
if (any(sel)) {
last.prime = last.prime + min(sel)
} else last.prime = fsqr+1
}
which(primes)
}
# general solver
get_nth_prime <- function(N) {
upper_limit <- 100000
primes <- prime(upper_limit)
while (length(primes) < N){
upper_limit <- (upper_limit * 10)
primes <- prime(upper_limit)
}
return(primes[N])
}
# specific solution: the 10,001st prime number
print(get_nth_prime(10001))
|
7dd3145b3e9b97c94ae68640c2bd46f378f0f6a6 | ccc766699a9eaa3d3834ccc7fcfa8c069cd0030d | /man/edp.long.Rd | 9d08fb0c071a443b3b46ac1197c65c7b63978162 | [] | no_license | jasonroy0/EDPlong | c2f12a1fc7581675bafdb5891783292cf08fe7e2 | 92084e884cbc0329266c593a1931e9763abfed0d | refs/heads/master | 2020-03-21T20:42:04.459975 | 2017-05-11T13:25:13 | 2017-05-11T13:25:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,733 | rd | edp.long.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/edp_long.R
\name{edp.long}
\alias{edp.long}
\title{Longitudinal mixed model with an enriched Dirichlet process prior}
\usage{
edp.long(y, trt, newtrt, x, newx, id, timepoints, prior, mcmc, spline,
verbose = FALSE, printevery = 100)
}
\description{
@description Fill in later.
}
\details{
@param y vector of outcomes. may have multiple (longitudinal outcomes) for each subject.
@param trt vector of treatments (may be NULL). only one treatment is allowed per subject.
@param newtrt vector of treatments for hold out subjects whose outcome is predicted.
@param x matrix of covariates. accepts binary and continuous covariates. covariates must be ordered with binary variables first. each row must correspond to one subject so nrow(x) = length(unique(id))
@param newx matrix of covariates for hold out subjects whose outcome is predicted
@param id vector of ids corresponding to y. must have same length as y.
@param timepoints vector of timepoints on which outcome y is drawn.
@param prior list of prior parameters
@param mcmc list of mcmc parameters
@param spline list of spline rules for timepoints. may be NULL if none are desired.
@param verbose logical indicating if the user wants to see printed output in the console. default is FALSE.
@param printevery numeric indicating how often to print updates during MCMC algorithm. used only if verbose = TRUE. default is 100.
@return Returns a list containing posterior draws and predictions for parameters. Size of output depends on value of mcmc$nsave.
@export
@useDynLib EDPlong
@importFrom splines bs
@importFrom Rcpp evalCpp
@import dplyr
@import MCMCpack
@import mvnfast
}
|
cdb6ab6f7787d08c0b50849fa242524f43d73f8f | 72211cfee1b52125a8cb565d931eb26453495a4b | /cachematrix.R | 4a8863c1f599449a0f7878fc251593ba51525b5b | [] | no_license | ParvathySunil/ProgrammingAssignment2 | b42b001934dc97c4b9bad4e0c97fe4b9cb094027 | 7d1b03afa19d223399efdd7f1657be3367ec31fa | refs/heads/master | 2021-08-08T01:29:53.738942 | 2017-11-09T09:02:53 | 2017-11-09T09:02:53 | 110,072,993 | 0 | 0 | null | 2017-11-09T05:47:43 | 2017-11-09T05:47:42 | null | UTF-8 | R | false | false | 1,655 | r | cachematrix.R | ## Creating a matrix, inversing the matrix and caching the inverse to avoid repeated computation.
## If the inversing is done first time, caching is done. Everytime aftereards if inverse is attempted,
##first checks if already cached. If yes, retrieve the value from cache.
## MakeCacheMatrix function creates a new matrix object and define functions to
##set the matrix value, get the matrix value, set the inverse matrix and get inverse matrix from cache.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
##set function assign value to matrix object 'x' which is in the parent environment
set <- function(y) {
x <<- y
inv <<- NULL
}
## get function retrieve the value of matrix x
get <- function() x
## Set inv variable(cache ) with the inverse matrix which is in parent env.
setInverse <- function(inverse) inv <<- inverse
##retrieve the cached inverse matrix
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve function checks if the inverse matrix is already avaialble in cache. If so retrives the same without recomputing.
##If not get the inversing done and then store in cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("retreiving cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
|
f2f6f8c07e40ebffd6d8a4107707124f63f1bfcd | 3c5d52b699e6645cbb086b4f45519f898a4b2f64 | /R/powt.series.R | d38e755c12b5bce7ffc8c069d296bd1e33e2eccd | [] | no_license | AndyBunn/dplR | 249427c0be6c68c80c267ad4392a03a331bb056d | 8bb524ae6a024affea2c9ec982b0972fc5175280 | refs/heads/master | 2023-07-06T04:02:45.073034 | 2023-04-12T21:41:31 | 2023-04-12T21:41:31 | 195,873,071 | 31 | 12 | null | 2023-06-23T14:33:40 | 2019-07-08T19:21:39 | R | UTF-8 | R | false | false | 1,675 | r | powt.series.R | powt.series <- function (series, rescale = FALSE)
{
# add a check for negative nums
if(any(series <0,na.rm = TRUE)) {
stop("'series' values must be greater than zero")
}
# check rescale
if (!is.logical(rescale))
stop("'rescale' must be either FALSE (the default) or TRUE")
# helpers
# used to set min numb to zeros.
getprec <- function(series) {
series.num <- as.numeric(series)
series.num <- series.num[!is.na(series.num) & series.num != 0]
if (length(series.num) == 0) {
NA_real_
}
else {
series.char <- format(series.num, scientific = FALSE)
if (grepl(".", series.char[1], fixed = TRUE)) {
maxdig <- nchar(sub("^[^.]*\\.", "", series.char[1]))
}
else {
rm.trail.zeros <- sub("0*$", "", series.char)
n.trail.zeros <- nchar(series.char) - nchar(rm.trail.zeros)
maxdig <- -min(n.trail.zeros)
}
10^-maxdig
}
}
# to get p
fit.lm <- function(series) {
n <- length(series)
drop.1 <- series[-1]
drop.n <- series[-n]
runn.M <- (drop.1 + drop.n)/2
runn.S <- abs(drop.1 - drop.n)
runn.S[runn.S == 0] <- prec
runn.M[runn.M == 0] <- prec
mod <- lm.fit(cbind(1, log(runn.M)), log(runn.S))
b <- mod[["coefficients"]][2]
1 - b
}
# do the trans
transf <- function(x) {
Xt <- x
X.nna <- which(!is.na(x))
X <- na.omit(x)
p <- abs(fit.lm(X))
X2 <- X^p
Xt[X.nna] <- X2
Xt
}
prec <- getprec(series)
xt <- transf(series)
if(rescale){
xtNames <- names(xt)
xt <- c(scale(xt) * sd(series,na.rm = TRUE) + mean(series,na.rm = TRUE))
names(xt) <- xtNames
}
xt
}
|
ed9bf52ebfb6b692f04c83e538f5a458046049d9 | 22fbeb95068332116e8c7739751a34b2649243d4 | /man/wigle_region_stats.Rd | 0e3bd564e715e3ac7c6ee04fd2a36c94c4e22be8 | [
"MIT"
] | permissive | sindhuselvam/wiglr | c3205ef25ae4ad9e39eb5c3b08d2438e202934b2 | 16f13fe03a7119fb9a3e7248c674dd6775aa2a01 | refs/heads/master | 2020-04-24T02:51:25.814951 | 2019-02-18T16:20:45 | 2019-02-18T16:20:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 718 | rd | wigle_region_stats.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regions.R
\name{wigle_region_stats}
\alias{wigle_region_stats}
\title{Get WiGLE statistics for a specified country, organized by region}
\usage{
wigle_region_stats(country = "US", api_key = wigle_api_key())
}
\arguments{
\item{country}{iso2c two-letter country code; defaults to \code{US}}
\item{api_key}{WiGLE API Key. See \code{\link[=wigle_api_key]{wigle_api_key()}}}
}
\value{
\code{list} of three data frames (tibbles) containing \code{region}, \code{encryption} and
\code{postal_code} stats or invisible JSON API response if unsuccessful
and a 2xx.
}
\description{
Get WiGLE statistics for a specified country, organized by region
}
|
36081b042a84d4db9cc24e61be64d3df5d080f85 | 71ee93e10a94fbfe6375352dc6da5ca2e41e802c | /stat355/proj2/src/proj2.r | 185e029ed46dbce47410e52877b70c54770d8417 | [] | no_license | dangbert/college | ef25b08bd16bfb48d514d0668354a01342c9e104 | b137658af27f86297ca70665ff5c487be06076ac | refs/heads/master | 2022-12-15T19:37:46.603666 | 2021-11-23T19:30:44 | 2021-11-23T19:30:44 | 191,094,083 | 1 | 0 | null | 2022-11-22T02:43:47 | 2019-06-10T04:01:26 | C | UTF-8 | R | false | false | 1,965 | r | proj2.r | # Daniel Engbert
# proj2
# STAT 355 Spring 2017
# parameters:
# sample size, mean, standard deviation
experiment1 = function(n, m, s) {
cat("---------- Experiment1 ----------\n")
means <- array()
sdevs <- array()
for (i in 1:1000) { # r starts indices at 1 (not 0)
res = rnorm(n, m, s) # n, mean, sd
means[i] <- mean(res) # store vector in list
sdevs[i] <- sd(res) # store vector in list
if (i == 1) {
cat("first sample:\n")
cat(sprintf("\tmean = %f\n", mean(res)))
cat(sprintf("\tstandard deviation = %f\n\n", sd(res)))
}
}
png("histogram1.png")
hist(means, main="Histogram of Means (Part 1)")
cat("Experiment Distribution Results:\n")
cat(sprintf("\tmean = %f\n", mean(means)))
cat(sprintf("\tstandard deviation = %f\n\n\n\n", sd(sdevs)))
}
# parameters:
# number of observations, size (number of trials), prob of success on each trial, experiment number
experiment2 = function(n, s, p, ex) {
cat(sprintf("---------- Experiment%d ----------\n",ex))
means <- array()
sdevs <- array()
for (i in 1:1000) {
res = rbinom(n, s, p) # num observations, size, prob of success
means[i] <- mean(res)
sdevs[i] <- sd(res)
if (i == 1) {
cat("first sample:\n")
print(res)
cat(sprintf("\tmean = %f\n", mean(res)))
cat(sprintf("\tstandard deviation = %f\n\n", sd(res)))
}
}
png(sprintf("histogram%d.png", ex))
hist(means, main=sprintf("Histogram of Means (Part %d)", ex))
cat("Experiment Distribution Results:\n")
cat(sprintf("\tmean = %f\n", mean(means)))
cat(sprintf("\tstandard deviation = %f\n\n\n\n", sd(sdevs)))
}
experiment1(40, 3, 2)
experiment2(10, 15, 0.15, ex=2)
experiment2(10, 120, 0.15, ex=3)
|
574ee561d2ab05966f74c8c56db5b9213e4eff7a | 82ea05843ae51c2a3a920d2de95b0dfac534ee82 | /tests.R | 1693bd2645c9f0f07ebb6c802073fea0258684e6 | [
"MIT"
] | permissive | TestingEquivalence/PowerLawR | b15b797b57c6f473f90abf79a59980025f822aea | a92022f072f2f2787d9bfd029d86d7ba5911accb | refs/heads/master | 2021-11-08T19:17:39.225981 | 2021-11-05T14:36:55 | 2021-11-05T14:36:55 | 199,118,799 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,989 | r | tests.R | asymptotic="asymptotic"
bootstrap="bootstrap"
derivative<-function(F1,F2)
{
Z=2*(F1-F2)
Z=rev(Z)
Z=cumsum(Z)
Z=rev(Z)
return(Z)
}
asympt_stdev<-function(p,derivative){
vec = derivative
vnsq_1 = sum(p*vec*vec)
k=length(p)
vnsq_2=0
f<-function(j){
v=vec[j]*vec
v=v*p[j]
v=v*p
return(sum(v))
}
vv=sapply(c(1:k),f)
vnsq_2=sum(vv)
vnsq = vnsq_1 - vnsq_2
return (sqrt(vnsq))
}
#' The asymptotic test is based on the asymptotic distribution of the test statistic.
#' The test statistic is the minimum Euclidean distance between the empirical CDF
#' and the family of power laws with given lower and upper bounds.
#' The test should be used carefully because it is approximate
#' and may be anti-conservative at some points.
#' In order to obtain a conservative test reducing of alpha (usually halving) or
#' slight shrinkage of the tolerance parameter may be appropriate.
#' \code{asymptotic_test} asymptotic equivalence test for power law
#' @param alpha significance level
#' @param frequency vector of observed counting frequencies
#' @param kmin lower bound of the power law
#' @param tol optional tolerance parameter for the numerical optimization
#' to find the closest power law
#' @return test returns:
#' min_eps - the minimim tolerance parameter fo which H0 can be rejected
#' distance -Euclidean distance between empirical CDF and CDF of the closest power law
#' beta - minimum distance estimate of power law exponent
#' sample_size - sample size
asymptotic_test<-function(alpha, frequency, kmin, tol=NA)
{
#calcualte cdf
n=sum(frequency)
frequency=frequency/n
cdf=cumsum(frequency)
kmax=length(frequency)+kmin-1
res = nearestPowerLaw(cdf,kmin,kmax,1,3, tol)
beta=res$minimum
distance=res$objective
pLawCDF=powerLawCDF(beta,kmin,kmax)
drv=derivative(cdf,pLawCDF)
vol=asympt_stdev(frequency,drv)
vol=vol/ sqrt(n)
qt=qnorm(1-alpha,0,1)
min_eps = distance*distance + qt*vol
min_eps=sqrt(min_eps)
vec=c(min_eps,distance,beta,n)
names(vec)=c("min_eps","distance","beta","sample_size")
return(vec)
}
fmultiple<-function(row,parameter){
kmin=parameter$kmins[row[1]]
kmax=parameter$kmaxs[row[2]]
frequency=list2freq(parameter$counting,kmin,kmax,parameter$scale)
if (parameter$test=="asymptotic"){
res=asymptotic_test(alpha = parameter$alpha,frequency,
kmin=kmin/parameter$scale)
}
if (parameter$test=="bootstrap"){
set.seed(30062020)
res= bootstrap_test(alpha = parameter$alpha, frequency,
kmin=kmin/parameter$scale,
nSimulation = parameter$nSimulation)
}
return(c(row[1],row[2],res))
}
#'
#' \code{multiple_test} Convenient function to perform multiple equivalence tests on the same data.
#' It also transforms the usual counting data to frequency data.
#' Usually we observe the counting data only and also do not know
#' the upper and lower bound of the power low.
#' In this case we need to transform the counting date to frequencies .
#' We also may perform multiple equivalence tests for different values of upper and lower bounds.
#' The convenient function "multiple_test" performs all these tasks efficiently
#' using multiple cores for computing.
#' @param parameter The parameter should be a list (s3 object)
#' containing following fields (see example.R):
#' scale - scaling, which maybe necessary to make computations feasible
#' alpha - significance level
#' nSimulation - number of bootstrap replications
#' test - string, should be asymptotic or bootstrap
#' kmins - vector of possible lower bounds of power law
#' kmaxs - vector of possible upper bounds of power law
#' counting - counting data where the upper and lower bounds are unknown
#' @return test returns list of four tables:
#' beta - table of the estimated beta's (minimum distance estimate of power law exponent)
#' distance - table of Euclidean distances between empirical CDF and CDF of the closest power law
#' sample_size - table of sample sizes
#' min_eps - table of the minimum tolerance parameters for which H0 can be rejected
multiple_test <- function(parameter) {
nrow=length(parameter$kmins)
ncol = length(parameter$kmaxs)
min_eps=matrix(data=NA,nrow,ncol)
beta=matrix(data=NA,nrow,ncol)
distance=matrix(data=NA,nrow,ncol)
sample_size=matrix(data=NA,nrow,ncol)
rownames(min_eps)=parameter$kmins
rownames(beta)=parameter$kmins
rownames(distance)=parameter$kmins
rownames(sample_size)=parameter$kmins
colnames(min_eps)=parameter$kmaxs
colnames(beta)=parameter$kmaxs
colnames(distance)=parameter$kmaxs
colnames(sample_size)=parameter$kmaxs
i=c(1:nrow)
j=c(1:ncol)
grd=expand.grid(i,j)
colnames(grd)=c("i","j")
cl=getCluster()
clusterExport(cl,c("fmultiple"))
ls=parApply(cl,grd, 1, fmultiple,parameter)
stopCluster(cl)
# ls=apply(grd, 1, fmultiple, parameter)
for (rn in c(1:ncol(ls))){
r=ls[,rn]
i=r[1]
j=r[2]
min_eps[i,j]=r[3]
distance[i,j]=r[4]
beta[i,j]=r[5]
sample_size[i,j]=r[6]
}
ls=list(min_eps=min_eps,distance=distance,beta=beta, sample_size=sample_size)
return(ls)
}
multiple_MLE <- function(parameter) {
nrow=length(parameter$kmins)
ncol = length(parameter$kmaxs)
beta=matrix(data=NA,nrow,ncol)
sample_size=matrix(data=NA,nrow,ncol)
rownames(beta)=parameter$kmins
rownames(sample_size)=parameter$kmins
colnames(beta)=parameter$kmaxs
colnames(sample_size)=parameter$kmaxs
i=c(1:nrow)
j=c(1:ncol)
grd=expand.grid(i,j)
colnames(grd)=c("i","j")
for (i in c(1:nrow)){
for (j in c(1:ncol)){
kmin=parameter$kmins[i]
kmax=parameter$kmaxs[j]
frq=list2freq(parameter$counting,kmin,kmax,parameter$scale)
sample_size[i,j]=sum(frq)
res=powerLawMLE(frq,kmin/parameter$scale,kmax/parameter$scale,1,3)
beta[i,j]=res$minimum
}
}
ls=list(beta=beta, sample_size=sample_size)
return(ls)
}
|
e603ac27c6a0986762950ad55220e2386187a3a3 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/lawn/examples/lawn_nearest.Rd.R | baf87a4a4634907202a8ae5ff5eebfb715feeb9f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 902 | r | lawn_nearest.Rd.R | library(lawn)
### Name: lawn_nearest
### Title: Get nearest point
### Aliases: lawn_nearest
### ** Examples
point <- '{
"type": "Feature",
"properties": {
"marker-color": "#0f0"
},
"geometry": {
"type": "Point",
"coordinates": [28.965797, 41.010086]
}
}'
against <- '{
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Point",
"coordinates": [28.973865, 41.011122]
}
}, {
"type": "Feature",
"properties": {},
"geometry": {
"type": "Point",
"coordinates": [28.948459, 41.024204]
}
}, {
"type": "Feature",
"properties": {},
"geometry": {
"type": "Point",
"coordinates": [28.938674, 41.013324]
}
}
]
}'
lawn_nearest(point, against)
## Not run:
##D lawn_nearest(point, against) %>% view
## End(Not run)
|
06137123ce4ebf71849a85065b4545ac31f81a90 | 41615de6999b278c76ff3a60db4af0d4dd7c7861 | /scripts/initDOM_NFI.R | e202bb3649d233a7b4ac40b110498caffdccc55e | [] | no_license | dcyr/Montmorency-Hereford | b33b744e2b9017d4c085bbb546ae9061eb2c1257 | 003298b165dee1775753f821c95ad548835f6a49 | refs/heads/master | 2022-07-17T18:11:25.144495 | 2022-06-22T15:08:02 | 2022-06-22T15:08:02 | 206,645,965 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,497 | r | initDOM_NFI.R | ################################################################################
################################################################################
### Some code to produce a area extent within which we fetch NFI plots for
### study area calibration
### Dominic Cyr
#############
rm(list = ls())
home <- path.expand("~")
home <- gsub("/Documents", "", home) # necessary on my Windows machine
setwd(paste(home, "Sync/Travail/ECCC/Landis-II/Montmorency-Hereford", sep ="/"))
wwd <- paste(getwd(), Sys.Date(), sep = "/")
dir.create(wwd)
setwd(wwd)
rm(wwd)
require(raster)
require(rgeos)
require(dplyr)
areas <- c("Hereford", "Maskinonge", "ForMont")
bufferMeters <- 200000
### input path (LANDIS)
inputPathGIS <- paste(home, "Sync/Travail/ECCC/GIS", sep = "/")
inputPathLandis <- "../inputsLandis"
inputPathNFI <- paste(home, "Sync/Travail/ECCC/NFI", sep = "/")
source("../scripts/gdal_polygonizeR.R")
NFI_sites <- read.csv(paste(inputPathNFI, "NFI_all_gp_site_info_approx_loc.csv", sep = "/"))
NFI_soils <- read.csv(paste(inputPathNFI, "NFI_all_gp_soil_site_info.csv", sep = "/"))
### convert UTM to lat long
# 1 "Very Fast Aboveground"
# 2 "Very Fast Belowground"
# 3 "Fast Aboveground"
# 4 "Fast Belowground"
# 5 "Medium"
# 6 "Slow Aboveground"
# 7 "Slow Belowground"
# 8 "Stem Snag"
# 9 "Other Snag"
# 10 "Extra pool"
################################################################################
################################################################################
#### a function to extract DOM pools
extract_CBM_DOMpools <- function(df) {
domPools <- list("Fast_A" = list(names = c("plotbio_fwd",
"plotbio_swd" ),
id = "3",
convFact = 50),
"MED" = list(names = "plotbio_wd",
id = "5",
convFact = 50),
"Slow_A" = list(names = "cc_for_floor_8mm",
id = "6",
convFact = 100),
"Slow_B" = list(names = c("cc_min0_15",
"cc_min15_35",
"cc_min35_55"),
id = "7",
convFact = 100),
"Sng_Stem" = list(names = "plotbio_lgtr_dead",
id = "8",
convFact = 50),
"Sng_Oth" = list(names = c("plotbio_stump",
"plotbio_smtr_dead"),
id = "9",
convFact = 50))
## replace -7 by NAs
for (j in 1:ncol(df)) {
y <- df[,j]
index <- which(y == -7)
y[index] <- NA
df[,j] <- y
}
domSummary <- list()
for (d in seq_along(domPools)) {
domID <- names(domPools[d])
domCol <- domPools[[d]]$names
convFactor <- domPools[[d]]$convFact
x <- apply(as.data.frame(df[,domCol]), 1, function(x)
sum(x, na.rm = ifelse(domID %in% c("Sng_Oth","Sng_Stem", "MED"), F, T))*convFactor)
if(domID %in% c("Fast_A", "Slow_B", "Slow_A")) {
x[x==0] <- NA
}
# if(domID %in% c("Slow_B", "Slow_A", "plotbio_wd")) {
# x <- apply(as.data.frame(df[,domCol]), 1, function(x) sum(x, na.rm = T)*convFactor)
#
# }
domSummary[[domID]] <- x
}
domSummary <- do.call("cbind", domSummary)
return(domSummary)
}
################################################################################
################################################################################
#### creating SpatialPointsDataframe from NFI plots
utmZone <- NFI_sites$utm_zone
u <- unique(utmZone)
u <- u[order(u)]
nfi <- list()
for(i in seq_along(u)) {
z <- u[i]
df <- NFI_sites %>%
filter(utm_zone == z) %>%
merge(NFI_soils, all.x = T, all.y = F)
df <- data.frame(df, extract_CBM_DOMpools(df))
xy <- SpatialPoints(data.frame(x = df$utm_e,
y = df$utm_n),
proj4string=CRS(paste0("+proj=utm +zone=", z, "+datum=NAD83")))
xy <- SpatialPointsDataFrame(xy, df)
xy <- spTransform(xy, CRS("+init=epsg:4269"))
nfi[[i]] <- xy
}
nfi_DOM <- do.call("rbind", nfi)
################################################################################
################################################################################
#### computing summaries by ecozones
domSummary <- list()
for(a in areas) {
ecoBuffer <- get(load(paste0("../gis/ecoregion_Buffer200km_", a, ".RData")))
nfi_DOM_buffer <- spTransform(nfi_DOM, CRSobj = crs(ecoBuffer))
ecoDF <- over(nfi_DOM_buffer, ecoBuffer)
eco <- unique(ecoDF$REGION_NAM)
eco <- eco[!is.na(eco)]
domSummary[[a]] <- list()
for (e in eco) {
domSummary[[a]][[e]] <- list()
index <- which(ecoDF$REGION_NAM == e)
df <- as.data.frame(nfi_DOM_buffer[index,])
cNames <- c("Fast_A", "MED", "Slow_A", "Slow_B", "Sng_Stem", "Sng_Oth")
for (d in seq_along(cNames)) {
convFact <-
if(cNames[d] %in% colnames(df)) {
domVal <- df[,cNames[d]]
x <- mean(domVal, na.rm = T)
domSummary[[a]][[e]][[cNames[d]]] <- x
}
}
}
}
#
# foo <- numeric()
# for(a in areas) {
# foo <- append(foo, sum(unlist(domSummary[[a]])))
# }
################################################################################
################################################################################
#### Assing DOM values based on ForCS spinup
for(a in areas) {
DOMinitPools <- read.csv(paste0("../inputsLandis/DOM-initPools_", a, ".csv"))
DOMinitProp <- DOMinitPools %>%
group_by(landtype) %>%
summarize(DOMtotalLt = sum(amountAtT0)) %>%
ungroup() %>%
merge(DOMinitPools) %>%
group_by(landtype, DOMtotalLt, spp) %>%
summarize(DOMtotalSpp = sum(amountAtT0)) %>%
ungroup() %>%
merge(DOMinitPools) %>%
mutate(propSpp = DOMtotalSpp/DOMtotalLt,
propPool = amountAtT0/DOMtotalSpp)
### assign an ecoregion to every landtype
landtypes <- raster(paste0("../inputsLandis/landtypes_", a, ".tif"))
ecoBuffer <- get(load(paste0("../gis/ecoregion_Buffer200km_", a, ".RData")))
ecoBuffer <- spTransform(ecoBuffer, CRSobj = crs(landtypes))
ecoRAT <- levels(ecoBufferR)
ecoBufferR <- rasterize(ecoBuffer, landtypes)
ecoRAT <- levels(ecoBufferR)[[1]]
ecoFreqTable <- table(values(ecoBufferR), values(landtypes))
ecoMajID <- as.numeric(rownames(ecoFreqTable))[apply(ecoFreqTable, 2, which.max)]
ecoMajID <- data.frame(landtype = colnames(ecoFreqTable),
ecoregion = ecoRAT[match(ecoMajID, ecoRAT$ID), "REGION_NAM"])
DOMinitPoolsNew <- list()
for(i in unique(DOMinitProp$landtype)) {
e <- as.character(ecoMajID[which(ecoMajID$landtype == i), "ecoregion"])
DOMnfi <- domSummary[[a]][[e]]
DOMnfi <- data.frame(nfi_ref = as.numeric(DOMnfi),
DOMName = names(DOMnfi))
index <- which(DOMinitProp$landtype == i)
DOMinitPoolsNew[[as.character(i)]] <- DOMinitProp[index,] %>%
merge(DOMnfi, all.x = T) %>%
# mutate(amountAtT0New = propSpp * nfi_ref,
# amountAtT0New = ifelse(is.na(amountAtT0New), 0, amountAtT0New))
mutate(amountAtT0 = propSpp * nfi_ref,
amountAtT0 = round(ifelse(is.na(amountAtT0), 0, amountAtT0))) %>%
select(landtype, spp, poolID, DOMName, amountAtT0) %>%
arrange(landtype, spp, poolID)
}
DOMinitPoolsNew <- do.call("rbind", DOMinitPoolsNew)
write.csv(DOMinitPoolsNew, file = paste0("DOM-initPools_", a, ".csv"), row.names = F)
#merge(DOMinitPools, DOMinitPoolsNew, by = c("landtype", "spp", "poolID"))
}
|
2f1cee16a5f8e534a55deda6a16d10e1d3d8bbc8 | 2c5640811d4ab4a58f04229fd4b2d3c7c4a5a525 | /man/RcmdrPlugin.SLC-package.Rd | 94987607120ea511d46897bde43426c72d91a77a | [] | no_license | cran/RcmdrPlugin.SLC | c4ccf7abc9af9a3e364b1b8474b49ef19ec586e2 | 547896e0a380163ace909ce377655c1425131b48 | refs/heads/master | 2021-01-10T23:33:02.114780 | 2010-01-14T00:00:00 | 2010-01-14T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 861 | rd | RcmdrPlugin.SLC-package.Rd | \name{RcmdrPlugin.SLC-package}
\title{RcmdrPlugin.SLC: A GUI for SLC R package}
\alias{RcmdrPlugin.SLC-package}
\alias{RcmdrPlugin.SLC}
\docType{package}
\encoding{UTF-8}
\description{
This package provides a GUI (Graphical User Interface) for the
SLC package. It is a Plug-in for Rcmdr (R Commander).
Functions of \code{SLC} package are available through the
GUI.
}
\details{
\tabular{ll}{
Package: \tab RcmdrPlugin.SLC\cr
Type: \tab Package\cr
Version: \tab 0.1\cr
Date: \tab 2010-14-01\cr
License: \tab GPL version 2 or newer\cr
}
}
\author{
Antonio Solanas <antonio.solanas@ub.edu>, Rumen Manolov <rrumenov13@ub.edu> & Patrick Onghena <patrick.onghena@ped.kuleuven.be>.
Maintainer: Rumen Manolov <rrumenov13@ub.edu>
}
\keyword{package}
\seealso{
For more information see \code{\link[Rcmdr]{Rcmdr-package}} and \code{\link[SLC]{SLC-package}}.
}
|
d2a1286b18ebc55506003dabc82d7edccb9d2e6f | 12a1295e3bf8150f0ab3f2f026114ec1f4ce8e2b | /version_to_upload/function.R | dd479425c9373c55c45e45320d452e38623db4fa | [] | no_license | jojoh2943/MiBiOmics | b48cfeaec8817d1cc94c6e4a61d73d4929c73bf0 | a807463ef4f21126577c3d2eba8636a4c9cc2aed | refs/heads/master | 2020-04-22T11:22:22.172443 | 2019-11-27T08:48:31 | 2019-11-27T08:48:31 | 170,337,847 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 71,354 | r | function.R | #-------------------------#
#### GENERAL FUNCTIONS ####
#-------------------------#
'%!in%' <- function(x,y)!('%in%'(x,y))
rename_axis_drivers <- function(axis_drivers, colnames_df1, colnames_df2, colnames_df3, n_df =3){
right_names <- c()
if (n_df == 3){
for (row in 1:nrow(axis_drivers)){
if (substr(rownames(axis_drivers)[row], nchar(rownames(axis_drivers)[row])- 3, nchar(rownames(axis_drivers)[row])) == ".df1"){
if (substr(rownames(axis_drivers)[row], 0, 1)== "X"){
if (substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4) %!in% colnames_df1){
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 2, nchar(rownames(axis_drivers)[row])- 4))
}else{
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4))
}
}else{
if (substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4) %in% colnames_df1){
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 2, nchar(rownames(axis_drivers)[row])- 4))
}else{
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4))
}
}
}else{
if (substr(rownames(axis_drivers)[row], nchar(rownames(axis_drivers)[row])- 3, nchar(rownames(axis_drivers)[row])) == ".df2"){
if (substr(rownames(axis_drivers)[row], 0, 1)== "X"){
if (substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4) %!in% colnames_df2){
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 2, nchar(rownames(axis_drivers)[row])- 4))
}else{
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4))
}
}else{
if (substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4) %!in% colnames_df2){
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 2, nchar(rownames(axis_drivers)[row])- 4))
}else{
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4))
}
}
}else{
if (substr(rownames(axis_drivers)[row], nchar(rownames(axis_drivers)[row])- 3, nchar(rownames(axis_drivers)[row])) == ".df3"){
if (substr(rownames(axis_drivers)[row], 0, 1)== "X"){
if (substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4) %!in% colnames_df3){
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 2, nchar(rownames(axis_drivers)[row])- 4))
}else{
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4))
}
}else{
if (substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4) %!in% colnames_df3){
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 2, nchar(rownames(axis_drivers)[row])- 4))
}else{
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4))
} }
}
}
}
}
}else{
for (row in 1:nrow(axis_drivers)){
if (substr(rownames(axis_drivers)[row], nchar(rownames(axis_drivers)[row])- 3, nchar(rownames(axis_drivers)[row])) == ".df1"){
if (substr(rownames(axis_drivers)[row], 0, 1)== "X"){
if (substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4) %!in% colnames_df1){
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 2, nchar(rownames(axis_drivers)[row])- 4))
}else{
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4))
}
}else{
if (substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4) %!in% colnames_df1){
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 2, nchar(rownames(axis_drivers)[row])- 4))
}else{
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4))
}
}
}else{
if (substr(rownames(axis_drivers)[row], nchar(rownames(axis_drivers)[row])- 3, nchar(rownames(axis_drivers)[row])) == ".df2"){
if (substr(rownames(axis_drivers)[row], 0, 1)== "X"){
if (substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4) %!in% colnames_df2){
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 2, nchar(rownames(axis_drivers)[row])- 4))
}else{
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4))
}
}else{
if (substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4) %!in% colnames_df1){
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 2, nchar(rownames(axis_drivers)[row])- 4))
}else{
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])- 4))
}
}
}else{
if (substr(rownames(axis_drivers)[row], 0, 1)== "X"){
if (substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])) %!in% colnames_df2){
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 2, nchar(rownames(axis_drivers)[row])))
}else{
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])))
}
}else{
if (substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])) %!in% colnames_df1){
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 2, nchar(rownames(axis_drivers)[row])))
}else{
right_names <- c(right_names, substr(rownames(axis_drivers)[row], 0, nchar(rownames(axis_drivers)[row])))
}
}
}
}
}
}
return(right_names)
}
TSS.divide = function(x){
x/sum(x)
}
'%!in%' <- function(x,y)!('%in%'(x,y))
euc.dist <- function(x1, x2) sqrt(sum((x1 - x2) ^ 2))
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x)))
}
## Specific functions: PLS and VIP Scores
VIPjh <- function(object, j, h) {
if (object$method != "oscorespls")
stop("Only implemented for orthogonal scores algorithm")
if (nrow(object$Yloadings) > 1)
stop("Only implemented for single-response models")
b <- c(object$Yloadings)[1:h]
T <- object$scores[,1:h, drop = FALSE]
SS <- b^2 * colSums(T^2)
W <- object$loading.weights[,1:h, drop = FALSE]
Wnorm2 <- colSums(W^2)
sqrt(nrow(W) * sum(SS * W[j,]^2 / Wnorm2) / sum(SS))
}
#Function for hive plot
vector_geom_curve <- function(edge_df, df_ggplot){
vector_x <- c()
vector_y <- c()
label_fromNode <- c()
vector_xend <- c()
vector_yend <-c()
label_toNode <- c()
for (i in 1:nrow(edge_df)){
x <- df_ggplot$x1[which(df_ggplot$label == edge_df$fromNode[i])]
label_fromNode_ind <- edge_df$fromNode[i]
y = 0
xend = 0
yend <-df_ggplot$y2[which(df_ggplot$label == edge_df$toNode[i])]
label_toNode_ind <- edge_df$toNode[i]
vector_x <- c(vector_x, x)
vector_y <- c(vector_y, y)
label_fromNode <- c(label_fromNode, label_fromNode_ind)
label_toNode <- c(label_toNode, label_toNode_ind)
vector_xend <- c(vector_xend, xend)
vector_yend <-c(vector_yend, yend)
}
df_geom_curve <- data.frame(label_fromNode, vector_x, vector_y, label_toNode, vector_xend, vector_yend)
return(df_geom_curve)
}
#### HIVE FUNCTIONS ####
hive_myLayers <- function(l_WGCNA_D1, l_WGCNA_D2, l_WGCNA_D3, myCorrs, myAnnots, correlation= "spearman", trait, exportPDF = FALSE, exportSVG = FALSE, sizePlot = 10, nameFile = "hive_D1_D2_D3"){
require(leaflet)
annot_D1 <- myAnnots[[1]]
annot_D2 <- myAnnots[[2]]
annot_D3 <- myAnnots[[3]]
WGCNA_list <- list()
WGCNA_list[[1]] <- l_WGCNA_D1
WGCNA_list[[2]] <- l_WGCNA_D2
WGCNA_list[[3]] <- l_WGCNA_D3
pal <- colorNumeric(palette = "RdBu", 1:-1)
# Create nodes dataframe:
id <- c(seq(from = 1, to = (ncol(l_WGCNA_D1[[5]])+ ncol(l_WGCNA_D2[[5]]) + ncol(l_WGCNA_D3[[5]]) +3 )))
label <- c( as.vector(paste(colnames(l_WGCNA_D1[[5]]), "DF1", sep = "_")), as.vector(paste( colnames(l_WGCNA_D2[[5]]), "DF2", sep = "_")), as.vector(paste( colnames(l_WGCNA_D3[[5]]), "DF3", sep = "_")), "extreme_DF1", "extreme_DF2", "extreme_DF3" )
color <- c( as.vector(substr(colnames(l_WGCNA_D1[[5]]), 3, 30)), as.vector(substr( colnames(l_WGCNA_D2[[5]]), 3, 30)), as.vector(substr(colnames(l_WGCNA_D3[[5]]), 3, 30)), "white", "white", "white" )
axis <- c( as.vector(rep(1, ncol(l_WGCNA_D1[[5]]))), as.vector(rep(2, ncol(l_WGCNA_D2[[5]]))), as.vector(rep(3, ncol(l_WGCNA_D3[[5]]))), 1, 2, 3 )
size <- c( as.vector(rep(1, sum(ncol(l_WGCNA_D1[[5]]), ncol(l_WGCNA_D2[[5]]), ncol(l_WGCNA_D3[[5]])))), 0, 0, 0)
radius <- c()
for (i in 1:3){
annot <- myAnnots[[i]]
myWGCNA <- WGCNA_list[[i]]
if (is.numeric(annot[,trait])){
moduleTraitCor = cor(myWGCNA[[5]], annot[,trait], use = "p", method = correlation)
}else{
annot2<- annot
annot2[,trait] <- as.factor(annot2[, trait])
annot2[,trait] <- as.numeric(annot2[,trait])
moduleTraitCor = cor(myWGCNA[[5]], annot2[,trait], use = "p", method = correlation)
}
radius <- c(radius, abs(moduleTraitCor)*100)
}
radius <- c(radius, 100, 100, 100)
nodes <- data.frame(id = id, lab= as.character(label), axis = as.integer(axis), radius= radius, size= size, color= as.character(color))
# Do we set the size of the node to zero if the p-value is not significative ?
# create edge dataframe:
id1 <- c()
id2 <- c()
weight <- c()
color <- c()
for (k in 1:3){
myCorr <- myCorrs[[k]]
for (i in 1:nrow(myCorr[[2]])){
for (j in 1:ncol(myCorr[[2]])){
if (myCorr[[2]][i, j] < 0.05 && abs(myCorr[[1]][i, j]) > 0.4){
print(rownames(myCorr[[2]])[i])
print(colnames(myCorr[[2]])[j])
myNode1 <-nodes[which(nodes$lab==rownames(myCorr[[2]])[i]),]
myNode2 <-nodes[which(nodes$lab==colnames(myCorr[[2]])[j]),]
id1 <- c(id1, myNode1$id)
id2 <- c(id2, myNode2$id)
weight <- c(weight, round(exp(abs(myCorr[[1]][i, j]))^3))
color <- c(color, pal(c(myCorr[[1]][i, j])))
}
}
}
}
edges <- data.frame(id1 = id1, id2 = id2, weight = weight, color = as.character(color))
type <- "2D"
desc <- "Hive Plot 3 Layers"
axis.cols <- c("#636363", "#636363", "#636363")
myHive <- list(nodes = nodes, edges = edges, type = type, desc = desc, axis.cols= axis.cols)
return(myHive)
}
hive_my2Layers <- function(l_WGCNA_D1, l_WGCNA_D2, myCorr, myAnnots, correlation= "spearman", trait, exportPDF = FALSE, exportSVG = FALSE, sizePlot = 10, nameFile = "hive_D1_D2_D3"){
require(leaflet)
annot_D1 <- myAnnots[[1]]
annot_D2 <- myAnnots[[2]]
WGCNA_list <- list()
WGCNA_list[[1]] <- l_WGCNA_D1
WGCNA_list[[2]] <- l_WGCNA_D2
pal <- colorNumeric(palette = "RdBu", 1:-1)
# Create nodes dataframe:
id <- c(seq(from = 1, to = (ncol(l_WGCNA_D1[[5]])+ ncol(l_WGCNA_D2[[5]]) + 2 )))
label <- c( as.vector(paste(colnames(l_WGCNA_D1[[5]]), "DF1", sep = "_")), as.vector(paste( colnames(l_WGCNA_D2[[5]]), "DF2", sep = "_")), "extreme_DF1", "extreme_DF2")
color <- c( as.vector(substr(colnames(l_WGCNA_D1[[5]]), 3, 30)), as.vector(substr( colnames(l_WGCNA_D2[[5]]), 3, 30)), "white", "white")
axis <- c( as.vector(rep(1, ncol(l_WGCNA_D1[[5]]))), as.vector(rep(2, ncol(l_WGCNA_D2[[5]]))), 1, 2 )
size <- c( as.vector(rep(1, sum(ncol(l_WGCNA_D1[[5]]), ncol(l_WGCNA_D2[[5]])))), 0, 0)
radius <- c()
for (i in 1:2){
annot <- myAnnots[[i]]
myWGCNA <- WGCNA_list[[i]]
if (is.numeric(annot[,trait])){
moduleTraitCor = cor(myWGCNA[[5]], annot[,trait], use = "p", method = correlation)
}else{
annot2<- annot
annot2[,trait] <- as.factor(annot2[, trait])
annot2[,trait] <- as.numeric(annot2[,trait])
moduleTraitCor = cor(myWGCNA[[5]], annot2[,trait], use = "p", method = correlation)
}
radius <- c(radius, abs(moduleTraitCor)*100)
}
radius <- c(radius, 100, 100)
nodes <- data.frame(id = id, lab= as.character(label), axis = as.integer(axis), radius= radius, size= size, color= as.character(color))
# Do we set the size of the node to zero if the p-value is not significative ?
# create edge dataframe:
id1 <- c()
id2 <- c()
weight <- c()
color <- c()
for (i in 1:nrow(myCorr[[2]])){
for (j in 1:ncol(myCorr[[2]])){
if (myCorr[[2]][i, j] < 0.05 && abs(myCorr[[1]][i, j]) > 0.4){
myNode1 <-nodes[which(nodes$lab==rownames(myCorr[[2]])[i]),]
myNode2 <-nodes[which(nodes$lab==colnames(myCorr[[2]])[j]),]
id1 <- c(id1, myNode1$id)
id2 <- c(id2, myNode2$id)
weight <- c(weight, round(exp(abs(myCorr[[1]][i, j]))^3))
color <- c(color, pal(c(myCorr[[1]][i, j])))
}
}
}
edges <- data.frame(id1 = id1, id2 = id2, weight = weight, color = as.character(color))
type <- "2D"
desc <- "Hive Plot 2 Layers"
axis.cols <- c("#636363", "#636363")
myHive <- list(nodes = nodes, edges = edges, type = type, desc = desc, axis.cols= axis.cols)
return(myHive)
}
plotMyHive <- function(HPD, ch = 1, method = "abs",
dr.nodes = TRUE, bkgnd = "black",
axLabs = NULL, axLab.pos = NULL, axLab.gpar = NULL,
anNodes = NULL, anNode.gpar = NULL, grInfo = NULL,
arrow = NULL, np = TRUE, anCoord = "local", ...) {
# Function to plot hive plots using grid graphics
# Inspired by the work of Martin Kryzwinski
# Bryan Hanson, DePauw Univ, Feb 2011 onward
# This function is intended to draw in 2D for nx from 2 to 6
# The results will be similar to the original hive plot concept
##### Set up some common parameters
if (!HPD$type == "2D") stop("This is not a 2D hive data set: use plot3dHive instead")
# chkHPD(HPD)
nx <- length(unique(HPD$nodes$axis))
if (nx == 1) stop("Something is wrong: only one axis seems to be present")
# Send out for ranking/norming/pruning/inverting if requested
if (!method == "abs") HPD <- manipAxis(HPD, method, ...)
nodes <- HPD$nodes
edges <- HPD$edges
axis.cols <- HPD$axis.cols
# Fix up center hole
nodes$radius <- nodes$radius + ch
HPD$nodes$radius <- nodes$radius
##### Some convenience functions, only defined in this function environ.
##### The two long functions need to stay here for simplicity, since
##### all of the radius checking etc is here and if moved elsewhere,
##### these calculations would have to be redone or results passed.
p2cX <- function(r, theta) { x <- r*cos(theta*2*pi/360) }
p2cY <- function(r, theta) { y <- r*sin(theta*2*pi/360) }
addArrow <- function(arrow, nx) {
if (!length(arrow) >= 5) stop("Too few arrow components")
if (is.null(axLab.gpar)) {
if (bkgnd == "black") axLab.gpar <- gpar(fontsize = 12, col = "white", lwd = 2)
if (!bkgnd == "black") axLab.gpar <- gpar(fontsize = 12, col = "black", lwd = 2)
}
a <- as.numeric(arrow[2])
rs <- as.numeric(arrow[3])
re <- as.numeric(arrow[4])
b <- as.numeric(arrow[5]) # label offset from end of arrow
x.st <- p2cX(rs, a)
y.st <- p2cY(rs, a)
x.end <- p2cX(re, a)
y.end <- p2cY(re, a)
x.lab <- p2cX(re + b, a) # figure arrow label position
y.lab <- p2cY(re + b, a)
al <- 0.2*(re-rs) # arrow head length
# for nx = 2 only, offset the arrow
# in the y direction to save space overall
if (nx == 2) {
if (is.na(arrow[6])) {
arrow[6] <- 0
cat("\tThe arrow can be offset vertically; see ?plotHive\n")
}
y.st <- y.st + as.numeric(arrow[6])
y.end <- y.end + as.numeric(arrow[6])
y.lab <- y.lab + as.numeric(arrow[6])
}
grid.lines(x = c(x.st, x.end), y = c(y.st, y.end),
arrow = arrow(length = unit(al, "native")),
default.units = "native", gp = axLab.gpar)
grid.text(arrow[1], x.lab, y.lab, default.units = "native", gp = axLab.gpar)
}
annotateNodes <- function(anNodes, nodes, nx, anCoord) {
if (is.null(anNode.gpar)) {
if (bkgnd == "black") anNode.gpar <- gpar(fontsize = 10, col = "white", lwd = 0.5)
if (!bkgnd == "black") anNode.gpar <- gpar(fontsize = 10, col = "black", lwd = 0.5)
}
ann <- utils::read.csv(anNodes, header = TRUE, colClasses = c(rep("character", 2), rep("numeric", 5)))
cds <- getCoords(anNodes, anCoord, nodes)
grid.segments(x0 = cds$x.st, x1 = cds$x.end, y0 = cds$y.st, y1 = cds$y.end,
default.units = "native", gp = anNode.gpar)
grid.text(ann$node.text, cds$x.lab, cds$y.lab, hjust = ann$hjust, vjust = ann$vjust,
default.units = "native", gp = anNode.gpar, ...)
}
addGraphic <- function(grInfo, nodes, nx, anCoord) {
gr <- utils::read.csv(grInfo, header = TRUE, stringsAsFactors = FALSE)
cds <- getCoords(grInfo, anCoord, nodes)
grid.segments(x0 = cds$x.st, x1 = cds$x.end, y0 = cds$y.st, y1 = cds$y.end,
default.units = "native", gp = anNode.gpar)
# readJPEG and readPNG are not vectorized, grab each graphic in turn
# Figure out if we are using jpg or png files
ext <- substr(gr$path[1], nchar(gr$path[1])-2, nchar(gr$path[1]))
if ((ext == "png") | (ext == "PNG")) ext <- "png"
if ((ext == "jpg") | (ext == "JPG") | (ext == "peg") | (ext =="PEG")) ext <- "jpg"
# Now draw the images
if (ext == "jpg") {
for (n in 1:nrow(gr)) {
grid.raster(readJPEG(gr$path[n]),
x = cds$x.lab[n], y = cds$y.lab[n], default.units = "native", width = gr$width[n])
}
}
if (ext == "png") {
for (n in 1:nrow(gr)) {
grid.raster(readPNG(gr$path[n]),
x = cds$x.lab[n], y = cds$y.lab[n], default.units = "native", width = gr$width[n])
}
}
}
getCoords <- function(file, anCoord, nodes) {
# Figure out the coordinates of the line segments and labels/graphics
# anNodes and grInfo both contains certain columns which are used here
df <- utils::read.csv(file, header = TRUE)
id <- rep(NA, nrow(df))
for (n in 1:nrow(df)) {
pat <- paste("\\b", df$node.lab[n], "\\b", sep = "")
id[n] <- grep(pat, nodes$lab)
}
N <- matrix(data = c(
0, 180, NA, NA, NA, NA,
90, 210, 330, NA, NA, NA,
90, 180, 270, 0, NA, NA,
90, 162, 234, 306, 18, NA,
90, 150, 210, 270, 330, 390),
byrow = TRUE, nrow = 5)
ax <- nodes$axis[id] # axis number
for (n in 1:length(ax)) {
ax[n] <- N[nx-1,ax[n]]
}
# Figure coords in requested reference frame
x.st <- p2cX(nodes$radius[id], ax)
y.st <- p2cY(nodes$radius[id], ax)
if (anCoord == "local") {
x.end <- x.st + p2cX(df$radius, df$angle)
y.end <- y.st + p2cY(df$radius, df$angle)
x.lab <- x.st + p2cX(df$radius + df$offset, df$angle)
y.lab <- y.st + p2cY(df$radius + df$offset, df$angle)
}
if (anCoord == "global") {
x.end <- p2cX(df$radius, df$angle)
y.end <- p2cY(df$radius, df$angle)
x.lab <- p2cX(df$radius + df$offset, df$angle)
y.lab <- p2cY(df$radius + df$offset, df$angle)
}
retval <- data.frame(x.st, y.st, x.end, y.end, x.lab, y.lab)
retval
}
###############
# Figure out which nodes to draw for each edge
# Since they are in random order
# Do this once/early to save time
id1 <- id2 <- c()
for (n in 1:nrow(edges)) {
pat1 <- paste("\\b", edges$id1[n], "\\b", sep = "")
pat2 <- paste("\\b", edges$id2[n], "\\b", sep = "")
id1 <- c(id1, grep(pat1, nodes$id))
id2 <- c(id2, grep(pat2, nodes$id))
}
##### Two dimensional case (using grid graphics)
# Prep axes first
if (nx == 2) {
# n1 <- subset(nodes, axis == 1)
# n2 <- subset(nodes, axis == 2)
n1 <- nodes[nodes[,"axis"] == 1,]
n2 <- nodes[nodes[,"axis"] == 2,]
max1 <- max(n1$radius)
max2 <- max(n2$radius)
min1 <- min(n1$radius)
min2 <- min(n2$radius)
r.st <- c(min1, min2) # in polar coordinates
axst <- c(0, 180)
x0a = p2cX(r.st, axst)
y0a = p2cY(r.st, axst)
r.end <- c(max1, max2)
axend <- c(0, 180)
x1a = p2cX(r.end, axend)
y1a = p2cY(r.end, axend)
# Set up grid graphics viewport
md <- max(abs(c(x0a, y0a, x1a, y1a)))*1.5 # max dimension
# 1.5 is used in case of labels
if (np) grid.newpage()
grid.rect(gp = gpar(col = NA, fill = bkgnd))
vp <- viewport(x = 0.5, y = 0.5, width = 1, height = 1,
xscale = c(-md, md), yscale = c(-md, md),
name = "3DHivePlot")
pushViewport(vp)
# Now draw edges
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if (nodes$axis[id1[n]] == 1) { # set up edge start params 1st
th.st <- c(th.st, 0)
r.st <- c(r.st, nodes$radius[id1[n]])
}
if (nodes$axis[id1[n]] == 2) {
th.st <- c(th.st, 180)
r.st <- c(r.st, nodes$radius[id1[n]])
}
if (nodes$axis[id2[n]] == 1) { # now edge end params
th.end <- c(th.end, 0)
r.end <- c(r.end, nodes$radius[id2[n]])
}
if (nodes$axis[id2[n]] == 2) {
th.end <- c(th.end, 180)
r.end <- c(r.end, nodes$radius[id2[n]])
}
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Draw axes
grid.segments(x0a, y0a, x1a, y1a,
gp = gpar(col = HPD$axis.cols, lwd = 8),
default.units = "native")
# Now add nodes
if (dr.nodes) {
r <- c(n1$radius, n2$radius)
theta <- c(rep(0, length(n1$radius)),
rep(180, length(n2$radius)))
x = p2cX(r, theta)
y = p2cY(r, theta)
grid.points(x, y, pch = 20, gp = gpar(cex = c(n1$size, n2$size), col = c(n1$color, n2$color)))
}
# Now label axes
if (!is.null(axLabs)) {
if (!length(axLabs) == nx) stop("Incorrect number of axis labels")
if (is.null(axLab.gpar)) axLab.gpar <- gpar(fontsize = 12, col = "white")
r <- c(max1, max2)
if (is.null(axLab.pos)) axLab.pos <- r*0.1
r <- r + axLab.pos
th <- c(0, 180)
x <- p2cX(r, th)
y <- p2cY(r, th)
grid.text(axLabs, x, y, gp = axLab.gpar, default.units = "native", ...)
}
# Add a legend arrow & any annotations
if (!is.null(arrow)) addArrow(arrow, nx)
if (!is.null(anNodes)) annotateNodes(anNodes, nodes, nx, anCoord)
if (!is.null(grInfo)) addGraphic(grInfo, nodes, nx, anCoord)
} # end of 2D
##### Three dimensional case (using grid graphics)
# Prep axes first
if (nx == 3) {
# n1 <- subset(nodes, axis == 1)
# n2 <- subset(nodes, axis == 2)
# n3 <- subset(nodes, axis == 3)
n1 <- nodes[nodes[,"axis"] == 1,]
n2 <- nodes[nodes[,"axis"] == 2,]
n3 <- nodes[nodes[,"axis"] == 3,]
max1 <- max(n1$radius)
max2 <- max(n2$radius)
max3 <- max(n3$radius)
min1 <- min(n1$radius)
min2 <- min(n2$radius)
min3 <- min(n3$radius)
r.st <- c(min1, min2, min3) # in polar coordinates
axst <- c(90, 210, 330)
x0a = p2cX(r.st, axst)
y0a = p2cY(r.st, axst)
r.end <- c(max1, max2, max3)
axend <- c(90, 210, 330)
x1a = p2cX(r.end, axend)
y1a = p2cY(r.end, axend)
# Set up grid graphics viewport
md <- max(abs(c(x0a, y0a, x1a, y1a)))*1.3 # max dimension
if (np) grid.newpage()
grid.rect(gp = gpar(col = NA, fill = bkgnd))
vp <- viewport(x = 0.5, y = 0.5, width = 1, height = 1,
xscale = c(-md, md), yscale = c(-md, md), name = "3DHivePlot")
pushViewport(vp)
# Now draw edges (must do in sets as curvature is not vectorized)
# Axis 1 -> 2
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 1) & (nodes$axis[id2[n]] == 2)) {
th.st <- c(th.st, 90)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 210)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 2 -> 3
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 2) & (nodes$axis[id2[n]] == 3)) {
th.st <- c(th.st, 210)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 330)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 3 -> 1
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 3) & (nodes$axis[id2[n]] == 1)) {
th.st <- c(th.st, 330)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 90)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 1 -> 3
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 1) & (nodes$axis[id2[n]] == 3)) {
th.st <- c(th.st, 90)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 330)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 3 -> 2
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 3) & (nodes$axis[id2[n]] == 2)) {
th.st <- c(th.st, 330)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 210)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 2 -> 1
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 2) & (nodes$axis[id2[n]] == 1)) {
th.st <- c(th.st, 210)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 90)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 1 -> 1, 2 -> 2 etc (can be done as a group since curvature can be fixed)
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 1) & (nodes$axis[id2[n]] == 1)) {
th.st <- c(th.st, 90)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 90)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
if ((nodes$axis[id1[n]] == 2) & (nodes$axis[id2[n]] == 2)) {
th.st <- c(th.st, 210)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 210)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
if ((nodes$axis[id1[n]] == 3) & (nodes$axis[id2[n]] == 3)) {
th.st <- c(th.st, 330)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 330)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Draw axes
grid.segments(x0a, y0a, x1a, y1a,
gp = gpar(col = HPD$axis.cols, lwd = 3),
default.units = "native")
# Now add nodes
if (dr.nodes) {
r <- c(n1$radius, n2$radius, n3$radius)
theta <- c(rep(90, length(n1$radius)),
rep(210, length(n2$radius)),
rep(330, length(n3$radius)))
x = p2cX(r, theta)
y = p2cY(r, theta)
grid.points(x, y, pch = 20, gp = gpar(cex = c(n1$size, n2$size, n3$size),
col = c(n1$color, n2$color, n3$color)))
}
# Now label axes
if (!is.null(axLabs)) {
if (!length(axLabs) == nx) stop("Incorrect number of axis labels")
if (is.null(axLab.gpar)) axLab.gpar <- gpar(fontsize = 12, col = "white")
r <- c(max1, max2, max3)
if (is.null(axLab.pos)) axLab.pos <- r*0.1
r <- r + axLab.pos
th <- c(90, 210, 330)
x <- p2cX(r, th)
y <- p2cY(r, th)
grid.text(axLabs, x, y, gp = axLab.gpar, default.units = "native", ...)
}
# Add a legend arrow & any annotations
if (!is.null(arrow)) addArrow(arrow, nx)
if (!is.null(anNodes)) annotateNodes(anNodes, nodes, nx, anCoord)
if (!is.null(grInfo)) addGraphic(grInfo, nodes, nx, anCoord)
} # end of 3D
##### Four dimensional case (using grid graphics)
# Prep axes first
if (nx == 4) {
# n1 <- subset(nodes, axis == 1)
# n2 <- subset(nodes, axis == 2)
# n3 <- subset(nodes, axis == 3)
# n4 <- subset(nodes, axis == 4)
n1 <- nodes[nodes[,"axis"] == 1,]
n2 <- nodes[nodes[,"axis"] == 2,]
n3 <- nodes[nodes[,"axis"] == 3,]
n4 <- nodes[nodes[,"axis"] == 4,]
max1 <- max(n1$radius)
max2 <- max(n2$radius)
max3 <- max(n3$radius)
max4 <- max(n4$radius)
min1 <- min(n1$radius)
min2 <- min(n2$radius)
min3 <- min(n3$radius)
min4 <- min(n4$radius)
r.st <- c(min1, min2, min3, min4) # in polar coordinates
axst <- c(90, 180, 270, 0)
x0a = p2cX(r.st, axst)
y0a = p2cY(r.st, axst)
r.end <- c(max1, max2, max3, max4)
axend <- c(90, 180, 270, 0)
x1a = p2cX(r.end, axend)
y1a = p2cY(r.end, axend)
# Set up grid graphics viewport
md <- max(abs(c(x0a, y0a, x1a, y1a)))*1.5 # max dimension
if (np) grid.newpage()
grid.rect(gp = gpar(col = NA, fill = bkgnd))
vp <- viewport(x = 0.5, y = 0.5, width = 1, height = 1,
xscale = c(-md, md), yscale = c(-md, md), name = "3DHivePlot")
pushViewport(vp)
# Now draw edges (must do in sets as curvature is not vectorized)
# Axis 1 -> 2
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 1) & (nodes$axis[id2[n]] == 2)) {
th.st <- c(th.st, 90)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 180)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 2 -> 3
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 2) & (nodes$axis[id2[n]] == 3)) {
th.st <- c(th.st, 180)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 270)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 3 -> 4
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 3) & (nodes$axis[id2[n]] == 4)) {
th.st <- c(th.st, 270)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 0)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 4 -> 1
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 4) & (nodes$axis[id2[n]] == 1)) {
th.st <- c(th.st, 0)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 90)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 1 -> 4
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 1) & (nodes$axis[id2[n]] == 4)) {
th.st <- c(th.st, 90)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 0)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 4 -> 3
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 4) & (nodes$axis[id2[n]] == 3)) {
th.st <- c(th.st, 0)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 270)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 3 -> 2
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 3) & (nodes$axis[id2[n]] == 2)) {
th.st <- c(th.st, 270)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 180)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 2 -> 1
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 2) & (nodes$axis[id2[n]] == 1)) {
th.st <- c(th.st, 180)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 90)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 1 -> 1, 2 -> 2 etc (can be done as a group since curvature can be fixed)
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 1) & (nodes$axis[id2[n]] == 1)) {
th.st <- c(th.st, 90)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 90)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
if ((nodes$axis[id1[n]] == 2) & (nodes$axis[id2[n]] == 2)) {
th.st <- c(th.st, 180)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 180)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
if ((nodes$axis[id1[n]] == 3) & (nodes$axis[id2[n]] == 3)) {
th.st <- c(th.st, 270)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 270)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
if ((nodes$axis[id1[n]] == 4) & (nodes$axis[id2[n]] == 4)) {
th.st <- c(th.st, 0)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 0)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Draw axes
grid.segments(x0a, y0a, x1a, y1a,
gp = gpar(col = HPD$axis.cols, lwd = 3),
default.units = "native")
# Now add nodes
if (dr.nodes) {
r <- c(n1$radius, n2$radius, n3$radius, n4$radius)
theta <- c(rep(90, length(n1$radius)),
rep(180, length(n2$radius)),
rep(270, length(n3$radius)),
rep(0, length(n4$radius)))
x = p2cX(r, theta)
y = p2cY(r, theta)
grid.points(x, y, pch = 20, gp = gpar(cex = c(n1$size, n2$size, n3$size, n4$size),
col = c(n1$color, n2$color, n3$color, n4$color)))
}
# Now label axes
if (!is.null(axLabs)) {
if (!length(axLabs) == nx) stop("Incorrect number of axis labels")
if (is.null(axLab.gpar)) axLab.gpar <- gpar(fontsize = 12, col = "white")
r <- c(max1, max2, max3, max4)
if (is.null(axLab.pos)) axLab.pos <- r*0.1
r <- r + axLab.pos
th <- c(90, 180, 270, 0)
x <- p2cX(r, th)
y <- p2cY(r, th)
grid.text(axLabs, x, y, gp = axLab.gpar, default.units = "native", ...)
}
# Add a legend arrow & any annotations
if (!is.null(arrow)) addArrow(arrow, nx)
if (!is.null(anNodes)) annotateNodes(anNodes, nodes, nx, anCoord)
if (!is.null(grInfo)) addGraphic(grInfo, nodes, nx, anCoord)
} # end of 4D
##### Five dimensional case (using grid graphics)
# Prep axes first
if (nx == 5) {
# n1 <- subset(nodes, axis == 1)
# n2 <- subset(nodes, axis == 2)
# n3 <- subset(nodes, axis == 3)
# n4 <- subset(nodes, axis == 4)
# n5 <- subset(nodes, axis == 5)
n1 <- nodes[nodes[,"axis"] == 1,]
n2 <- nodes[nodes[,"axis"] == 2,]
n3 <- nodes[nodes[,"axis"] == 3,]
n4 <- nodes[nodes[,"axis"] == 4,]
n5 <- nodes[nodes[,"axis"] == 5,]
max1 <- max(n1$radius)
max2 <- max(n2$radius)
max3 <- max(n3$radius)
max4 <- max(n4$radius)
max5 <- max(n5$radius)
min1 <- min(n1$radius)
min2 <- min(n2$radius)
min3 <- min(n3$radius)
min4 <- min(n4$radius)
min5 <- min(n5$radius)
r.st <- c(min1, min2, min3, min4, min5) # in polar coordinates
axst <- c(90, 162, 234, 306, 18)
x0a = p2cX(r.st, axst)
y0a = p2cY(r.st, axst)
r.end <- c(max1, max2, max3, max4, max5)
axend <- c(90, 162, 234, 306, 18)
x1a = p2cX(r.end, axend)
y1a = p2cY(r.end, axend)
# Set up grid graphics viewport
md <- max(abs(c(x0a, y0a, x1a, y1a)))*1.3 # max dimension
if (np) grid.newpage()
grid.rect(gp = gpar(col = NA, fill = bkgnd))
vp <- viewport(x = 0.5, y = 0.5, width = 1, height = 1,
xscale = c(-md, md), yscale = c(-md, md), name = "3DHivePlot")
pushViewport(vp)
# Now draw edges (must do in sets as curvature is not vectorized)
# Axis 1 -> 2
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 1) & (nodes$axis[id2[n]] == 2)) {
th.st <- c(th.st, 90)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 162)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 2 -> 3
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 2) & (nodes$axis[id2[n]] == 3)) {
th.st <- c(th.st, 162)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 234)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 3 -> 4
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 3) & (nodes$axis[id2[n]] == 4)) {
th.st <- c(th.st, 234)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 306)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 4 -> 5
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 4) & (nodes$axis[id2[n]] == 5)) {
th.st <- c(th.st, 306)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 18)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 5 -> 1
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 5) & (nodes$axis[id2[n]] == 1)) {
th.st <- c(th.st, 18)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 90)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 1 -> 5
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 1) & (nodes$axis[id2[n]] == 5)) {
th.st <- c(th.st, 90)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 18)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 5 -> 4
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 5) & (nodes$axis[id2[n]] == 4)) {
th.st <- c(th.st, 18)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 306)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 4 -> 3
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 4) & (nodes$axis[id2[n]] == 3)) {
th.st <- c(th.st, 306)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 234)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 3 -> 2
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 3) & (nodes$axis[id2[n]] == 2)) {
th.st <- c(th.st, 234)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 162)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 2 -> 1
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 2) & (nodes$axis[id2[n]] == 1)) {
th.st <- c(th.st, 162)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 90)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 1 -> 1, 2 -> 2 etc (can be done as a group since curvature can be fixed)
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 1) & (nodes$axis[id2[n]] == 1)) {
th.st <- c(th.st, 90)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 90)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
if ((nodes$axis[id1[n]] == 2) & (nodes$axis[id2[n]] == 2)) {
th.st <- c(th.st, 162)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 162)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
if ((nodes$axis[id1[n]] == 3) & (nodes$axis[id2[n]] == 3)) {
th.st <- c(th.st, 234)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 234)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
if ((nodes$axis[id1[n]] == 4) & (nodes$axis[id2[n]] == 4)) {
th.st <- c(th.st, 306)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 306)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
if ((nodes$axis[id1[n]] == 5) & (nodes$axis[id2[n]] == 5)) {
th.st <- c(th.st, 18)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 18)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Draw axes
grid.segments(x0a, y0a, x1a, y1a,
gp = gpar(col = HPD$axis.cols, lwd = 3),
default.units = "native")
# Now add nodes
if (dr.nodes) {
r <- c(n1$radius, n2$radius, n3$radius, n4$radius, n5$radius)
theta <- c(rep(90, length(n1$radius)),
rep(162, length(n2$radius)),
rep(234, length(n3$radius)),
rep(306, length(n4$radius)),
rep(18, length(n5$radius)))
x = p2cX(r, theta)
y = p2cY(r, theta)
grid.points(x, y, pch = 20, gp = gpar(cex = c(n1$size, n2$size, n3$size, n4$size, n5$size),
col = c(n1$color, n2$color, n3$color, n4$color, n5$color)))
}
# Now label axes
if (!is.null(axLabs)) {
if (!length(axLabs) == nx) stop("Incorrect number of axis labels")
if (is.null(axLab.gpar)) axLab.gpar <- gpar(fontsize = 12, col = "white")
r <- c(max1, max2, max3, max4, max5)
if (is.null(axLab.pos)) axLab.pos <- r*0.1
r <- r + axLab.pos
th <- c(90, 162, 234, 306, 18)
x <- p2cX(r, th)
y <- p2cY(r, th)
grid.text(axLabs, x, y, gp = axLab.gpar, default.units = "native", ...)
}
# Add a legend arrow & any annotations
if (!is.null(arrow)) addArrow(arrow, nx)
if (!is.null(anNodes)) annotateNodes(anNodes, nodes, nx, anCoord)
if (!is.null(grInfo)) addGraphic(grInfo, nodes, nx, anCoord)
} # end of 5D
##### Six dimensional case (using grid graphics)
# Prep axes first
if (nx == 6) {
# n1 <- subset(nodes, axis == 1)
# n2 <- subset(nodes, axis == 2)
# n3 <- subset(nodes, axis == 3)
# n4 <- subset(nodes, axis == 4)
# n5 <- subset(nodes, axis == 5)
# n6 <- subset(nodes, axis == 6)
n1 <- nodes[nodes[,"axis"] == 1,]
n2 <- nodes[nodes[,"axis"] == 2,]
n3 <- nodes[nodes[,"axis"] == 3,]
n4 <- nodes[nodes[,"axis"] == 4,]
n5 <- nodes[nodes[,"axis"] == 5,]
n6 <- nodes[nodes[,"axis"] == 6,]
max1 <- max(n1$radius)
max2 <- max(n2$radius)
max3 <- max(n3$radius)
max4 <- max(n4$radius)
max5 <- max(n5$radius)
max6 <- max(n6$radius)
min1 <- min(n1$radius)
min2 <- min(n2$radius)
min3 <- min(n3$radius)
min4 <- min(n4$radius)
min5 <- min(n5$radius)
min6 <- min(n6$radius)
r.st <- c(min1, min2, min3, min4, min5, min6) # in polar coordinates
axst <- c(90, 150, 210, 270, 330, 390)
x0a = p2cX(r.st, axst)
y0a = p2cY(r.st, axst)
r.end <- c(max1, max2, max3, max4, max5, max6)
axend <- c(90, 150, 210, 270, 330, 390)
x1a = p2cX(r.end, axend)
y1a = p2cY(r.end, axend)
# Set up grid graphics viewport
md <- max(abs(c(x0a, y0a, x1a, y1a)))*1.3 # max dimension
if (np) grid.newpage()
grid.rect(gp = gpar(col = NA, fill = bkgnd))
vp <- viewport(x = 0.5, y = 0.5, width = 1, height = 1,
xscale = c(-md, md), yscale = c(-md, md), name = "3DHivePlot")
pushViewport(vp)
# Now draw edges (must do in sets as curvature is not vectorized)
# Axis 1 -> 2
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 1) & (nodes$axis[id2[n]] == 2)) {
th.st <- c(th.st, 90)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 150)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 2 -> 3
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 2) & (nodes$axis[id2[n]] == 3)) {
th.st <- c(th.st, 150)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 210)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 3 -> 4
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 3) & (nodes$axis[id2[n]] == 4)) {
th.st <- c(th.st, 210)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 270)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n]) }
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 4 -> 5
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 4) & (nodes$axis[id2[n]] == 5)) {
th.st <- c(th.st, 270)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 330)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n]) }
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 5 -> 6
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 5) & (nodes$axis[id2[n]] == 6)) {
th.st <- c(th.st, 330)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 390)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n]) }
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 6 -> 1
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 6) & (nodes$axis[id2[n]] == 1)) {
th.st <- c(th.st, 390)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 90)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n]) }
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Axis 1 -> 6
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 1) & (nodes$axis[id2[n]] == 6)) {
th.st <- c(th.st, 90)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 390)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n]) }
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 6 -> 5
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 6) & (nodes$axis[id2[n]] == 5)) {
th.st <- c(th.st, 390)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 330)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n]) }
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 5 -> 4
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 5) & (nodes$axis[id2[n]] == 4)) {
th.st <- c(th.st, 330)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 270)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n]) }
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 4 -> 3
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 4) & (nodes$axis[id2[n]] == 3)) {
th.st <- c(th.st, 270)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 210)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n]) }
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 3 -> 2
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 3) & (nodes$axis[id2[n]] == 2)) {
th.st <- c(th.st, 210)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 150)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n]) }
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 2 -> 1
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 2) & (nodes$axis[id2[n]] == 1)) {
th.st <- c(th.st, 150)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 90)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n]) }
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = -0.5)
}
# Axis 1 -> 1, 2 -> 2 etc (can be done as a group since curvature can be fixed)
r.st <- r.end <- th.st <- th.end <- ecol <- ewt <- c()
for (n in 1:nrow(edges)) {
if ((nodes$axis[id1[n]] == 1) & (nodes$axis[id2[n]] == 1)) {
th.st <- c(th.st, 90)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 90)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
if ((nodes$axis[id1[n]] == 2) & (nodes$axis[id2[n]] == 2)) {
th.st <- c(th.st, 150)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 150)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
if ((nodes$axis[id1[n]] == 3) & (nodes$axis[id2[n]] == 3)) {
th.st <- c(th.st, 210)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 210)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
if ((nodes$axis[id1[n]] == 4) & (nodes$axis[id2[n]] == 4)) {
th.st <- c(th.st, 270)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 270)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
if ((nodes$axis[id1[n]] == 5) & (nodes$axis[id2[n]] == 5)) {
th.st <- c(th.st, 330)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 330)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
if ((nodes$axis[id1[n]] == 6) & (nodes$axis[id2[n]] == 6)) {
th.st <- c(th.st, 390)
r.st <- c(r.st, nodes$radius[id1[n]])
th.end <- c(th.end, 390)
r.end <- c(r.end, nodes$radius[id2[n]])
ecol <- c(ecol, edges$color[n])
ewt <- c(ewt, edges$weight[n])
}
}
x0 = p2cX(r.st, th.st)
y0 = p2cY(r.st, th.st)
x1 = p2cX(r.end, th.end)
y1 = p2cY(r.end, th.end)
if (!length(x0) == 0) {
grid.curve(x0, y0, x1, y1,
default.units = "native", ncp = 5, square = FALSE,
gp = gpar(col = ecol, lwd = ewt), curvature = 0.5)
}
# Draw axes
# grid.segments(x0a, y0a, x1a, y1a,
# gp = gpar(col = "black", lwd = 7),
# default.units = "native") # more like linnet
grid.segments(x0a, y0a, x1a, y1a,
gp = gpar(col = HPD$axis.cols, lwd = 3),
default.units = "native")
# Now add nodes
if (dr.nodes) {
r <- c(n1$radius, n2$radius, n3$radius, n4$radius, n5$radius, n6$radius)
theta <- c(rep(90, length(n1$radius)),
rep(150, length(n2$radius)),
rep(210, length(n3$radius)),
rep(270, length(n4$radius)),
rep(330, length(n5$radius)),
rep(390, length(n6$radius)))
x = p2cX(r, theta)
y = p2cY(r, theta)
grid.points(x, y, pch = 20, gp = gpar(cex = c(n1$size, n2$size, n3$size, n4$size, n5$size, n6$size),
col = c(n1$color, n2$color, n3$color, n4$color, n5$color, n6$color)))
}
# Now label axes
if (!is.null(axLabs)) {
if (!length(axLabs) == nx) stop("Incorrect number of axis labels")
if (is.null(axLab.gpar)) axLab.gpar <- gpar(fontsize = 12, col = "white")
r <- c(max1, max2, max3, max4, max5, max6)
if (is.null(axLab.pos)) axLab.pos <- r*0.1
r <- r + axLab.pos
th <- c(90, 150, 210, 270, 330, 390)
x <- p2cX(r, th)
y <- p2cY(r, th)
grid.text(axLabs, x, y, gp = axLab.gpar, default.units = "native", ...)
}
# Add a legend arrow & any annotations
if (!is.null(arrow)) addArrow(arrow, nx)
if (!is.null(anNodes)) annotateNodes(anNodes, nodes, nx, anCoord)
if (!is.null(grInfo)) addGraphic(grInfo, nodes, nx, anCoord)
} # end of 6D
} # closing brace, this is the end
|
1cfd73660e278a8f7fe21a120020cee1e15b3698 | afe39a330e68856413be87018519f7119dde6508 | /man/add_label.Rd | 84015ad9b28760ee0ae7bacefa2ca0538bfbbff4 | [] | no_license | jchrom/trelloR | 4a142222c34d480b25b7f3fd75614c6af1cf66eb | cca04eb70bf5060a7c7f858fa9911bd8c68e2089 | refs/heads/master | 2023-08-31T06:33:57.940993 | 2023-08-27T18:09:07 | 2023-08-27T18:09:07 | 34,352,839 | 40 | 12 | null | 2017-01-21T19:47:02 | 2015-04-21T21:20:56 | R | UTF-8 | R | false | true | 864 | rd | add_label.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_wrappers.R
\name{add_label}
\alias{add_label}
\title{Add label}
\usage{
add_label(card, color, name = NULL, ...)
}
\arguments{
\item{card}{Card id.}
\item{color}{Label color.}
\item{name}{Label name; choosing different non-existing name will create new
label. Defaults to \code{NULL}.}
\item{...}{Additional arguments passed to \code{\link[=create_resource]{create_resource()}}.}
}
\description{
Add label to a card.
}
\seealso{
Other functions to create resources:
\code{\link{add_board}()},
\code{\link{add_card_attachment}()},
\code{\link{add_card}()},
\code{\link{add_checkitem}()},
\code{\link{add_checklist}()},
\code{\link{add_comment}()},
\code{\link{add_list}()},
\code{\link{add_member}()},
\code{\link{create_resource}()}
}
\concept{functions to create resources}
|
12c0b172d0fbbbc859fa776e563d80688c27a7ab | ef572bd2b0515892d1f59a073b8bf99f81d6a734 | /R/data.R | f0618ce2bb4b87be3dbf984358cfb07b6cc56aa6 | [
"CC0-1.0"
] | permissive | pepfar-datim/datapackr | 5bc604caa1ae001b6c04e1d934c0c613c59df1e6 | 9275632673e45948db6846513a53c1436cfc0e47 | refs/heads/master | 2023-08-30T23:26:48.454382 | 2023-08-11T13:01:57 | 2023-08-11T13:01:57 | 170,350,211 | 9 | 7 | CC0-1.0 | 2023-09-11T21:53:24 | 2019-02-12T16:19:47 | R | UTF-8 | R | false | false | 30,232 | r | data.R | #' @docType data
#' @title Mapping of DATIM Prioritization numerals to strings.
#'
#' @description Maps DATIM Prioritizations from their coded numerals (1-8) to
#' their more descriptive names.
#'
#' @format
#' \describe{
#' \item{value}{Numeral associated with DATIM Prioritization.}
#' \item{Prioritization}{Name associated with DATIM Prioritization}
#' }
"prioritizations"
#' @docType data
#' @title Library of Openxlsx style objects to apply to both Data Pack and Site
#' Tool files.
#'
#' @description
#' A list object containing styles pertaining to different portions of the Data
#' Pack. These styles control things such as font type,font size, alignment,
#' border thickness, and color
#'
#' @format
#' \describe{
#' \item{home}{Styles for title, Data Pack Name, & PEPFAR banner.}
#' \item{siteList}{Styles for site names, broken out by Community, Facility,
#' National, Military, or Inactive.}
#' \item{data}{Styles for title, headers, labels, and Unique ID row.}
#' \item{cop21_opu}{Styles specific to cop21_opu's}
#' }
"styleGuide"
#' @docType data
#' @title Map of COP21 indicators from Data Pack indicator codes to
#' DATIM dataelements and categoryoptioncombos
#'
#' @description
#' Dataset that maps COP21 Data Pack indicators to dataelements and
#' categoryoptioncombos in DATIM, used for mapping datasets
#' extracted from Data Packs to DATIM, with the necessary import file structure.
#'
#' @format
#' \describe{
#' \item{indicator_code}{Code used in the Data Pack to uniquely identify each
#' distinct programmatic area of target setting.}
#' \item{col_type}{Values can be "target", "result" or NA}
#' \item{value_type}{Describes what type of measure the indicator code is
#' represented by. Values can be "integer", "percentage", or NA}
#' \item{categoryoption_specified}{Categoryoption disaggregate of the data
#' element}
#' \item{valid_ages.name}{Age disaggregate}
#' \item{valid_ages.id}{Age disaggregate UID}
#' \item{valid_sexes.name}{Sex disaggregate}
#' \item{valid_sexes.id}{Sex disaggregate UID}
#' \item{valid_kps.name}{KP disaggregate}
#' \item{valid_kps.id}{KP disaggregate UID}
#' \item{FY}{Fiscal Year}
#' \item{period}{DHIS2 period for example "2021Oct"}
#' \item{categoryOptions.ids}{Categoryoption UID}
#' \item{dataelementuid}{DATIM UID for dataElements.}
#' \item{hts_modality}{HIV Testing service type}
#' \item{period_dataset}{Fiscal year dataset results}
#' \item{dataelementname}{The name of the data element being described}
#' \item{categoryoptioncomboname}{The name of the various combinations of
#' categories and options}
#' \item{categoryoptioncombouid}{DATIM uid for categoryOptionCombos.}
#' \item{targets_results}{Category variable denoting "targets" or "results"}
#' \item{dataset}{Category variable denoting where the dateset stems from:
#' "impatt","subnat", "mer"}
#' \item{resultstatus}{Category variable denoting the status of the results}
#' \item{resultststaus_inclusive}{Category variable denoting
#' "Positive", "Negative", "Unknown"}
#' \item{disagg_type}{Category variable denoting the dissagregate}
#' \item{technical_area}{Category variable denoting the tecnical area}
#' \item{top_level}{Denotes if the top level is a numerator or denominator}
#' \item{support_type}{Category variable denoting "Sub-National", "DSD", "TA",
#' or "No Support Type". The crossing of these with \code{indicatorCode}
#' roughly corresponds to DATIM dataelements.}
#' \item{numerator_denominator}{Category variable denoting numerator or
#' denominator}
#' }
#'
"cop21_map_DataPack_DATIM_DEs_COCs"
#' @docType data
#' @title Map of COP22 indicators from Data Pack indicator codes to
#' DATIM dataelements and categoryoptioncombos
#'
#' @description
#' Dataset that maps COP22 Data Pack indicators to dataelements and
#' categoryoptioncombos in DATIM, used for mapping datasets
#' extracted from Data Packs to DATIM, with the necessary import file structure.
#'
#' @format
#' \describe{
#' \item{indicator_code}{Code used in the Data Pack to uniquely identify each
#' distinct programmatic area of target setting.}
#' \item{col_type}{Values can be "target", "result" or NA}
#' \item{value_type}{Describes what type of measure the indicator code is
#' represented by. Values can be "integer", "percentage", or NA}
#' \item{categoryoption_specified}{Categoryoption disaggregate of the data
#' element}
#' \item{valid_ages.name}{Age disaggregate}
#' \item{valid_ages.id}{Age disaggregate UID}
#' \item{valid_sexes.name}{Sex disaggregate}
#' \item{valid_sexes.id}{Sex disaggregate UID}
#' \item{valid_kps.name}{KP disaggregate}
#' \item{valid_kps.id}{KP disaggregate UID}
#' \item{FY}{Fiscal Year}
#' \item{period}{DHIS2 period for example "2021Oct"}
#' \item{categoryOptions.ids}{Categoryoption UID}
#' \item{dataelementuid}{DATIM UID for dataElements.}
#' \item{hts_modality}{HIV Testing service type}
#' \item{period_dataset}{Fiscal year dataset results}
#' \item{dataelementname}{The name of the data element being described}
#' \item{categoryoptioncomboname}{The name of the various combinations of
#' categories and options}
#' \item{categoryoptioncombouid}{DATIM uid for categoryOptionCombos.}
#' \item{targets_results}{Category variable denoting "targets" or "results"}
#' \item{dataset}{Category variable denoting where the dateset stems from:
#' "impatt","subnat", "mer"}
#' \item{resultstatus}{Category variable denoting the status of the results}
#' \item{resultststaus_inclusive}{Category variable denoting
#' "Positive", "Negative", "Unknown"}
#' \item{disagg_type}{Category variable denoting the dissagregate}
#' \item{technical_area}{Category variable denoting the tecnical area}
#' \item{top_level}{Denotes if the top level is a numerator or denominator}
#' \item{support_type}{Category variable denoting "Sub-National", "DSD", "TA",
#' or "No Support Type". The crossing of these with \code{indicatorCode}
#' roughly corresponds to DATIM dataelements.}
#' \item{numerator_denominator}{Category variable denoting numerator or
#' denominator}
#' }
#'
"cop22_map_DataPack_DATIM_DEs_COCs"
#' @docType data
#' @title Map of indicators from Data Pack indicator codes to DATIM dataelements
#' and categoryoptioncombos
#'
#' @description
#' Dataset that maps Data Pack indicators to dataelements and
#' categoryoptioncombos in DATIM, used for mapping datasets
#' extracted from Data Packs to DATIM, with the necessary import file structure.
#'
#' @format
#' \describe{
#' \item{indicator_code}{Code used in the Data Pack to uniquely identify each
#' distinct programmatic area of target setting.}
#' \item{col_type}{Values can be "target", "result" or NA}
#' \item{value_type}{Describes what type of measure the indicator code is
#' represented by. Values can be "integer", "percentage", or NA}
#' \item{categoryoption_specified}{Categoryoption disaggregate of the data
#' element}
#' \item{valid_ages.name}{Age disaggregate}
#' \item{valid_ages.id}{Age disaggregate UID}
#' \item{valid_sexes.name}{Sex disaggregate}
#' \item{valid_sexes.id}{Sex disaggregate UID}
#' \item{valid_kps.name}{KP disaggregate}
#' \item{valid_kps.id}{KP disaggregate UID}
#' \item{FY}{Fiscal Year}
#' \item{period}{DHIS2 period for example "2021Oct"}
#' \item{categoryOptions.ids}{Categoryoption UID}
#' \item{dataelementuid}{DATIM UID for dataElements.}
#' \item{hts_modality}{HIV Testing service type}
#' \item{period_dataset}{Fiscal year dataset results}
#' \item{dataelementname}{The name of the data element being described}
#' \item{categoryoptioncomboname}{The name of the various combinations of
#' categories and options}
#' \item{categoryoptioncombouid}{DATIM uid for categoryOptionCombos.}
#' \item{targets_results}{Category variable denoting "targets" or "results"}
#' \item{dataset}{Category variable denoting where the dateset stems from:
#' "impatt","subnat", "mer"}
#' \item{resultstatus}{Category variable denoting the status of the results}
#' \item{resultststaus_inclusive}{Category variable denoting
#' "Positive", "Negative", "Unknown"}
#' \item{disagg_type}{Category variable denoting the dissagregate}
#' \item{technical_area}{Category variable denoting the tecnical area}
#' \item{top_level}{Denotes if the top level is a numerator or denominator}
#' \item{support_type}{Category variable denoting "Sub-National", "DSD", "TA",
#' or "No Support Type". The crossing of these with \code{indicatorCode}
#' roughly corresponds to DATIM dataelements.}
#' \item{numerator_denominator}{Category variable denoting numerator or
#' denominator}
#' }
#'
"map_DataPack_DATIM_DEs_COCs"
#' @docType data
#' @title Map from Data Pack to DATIM for the adorning import files
#'
#' @description
#' Dataset that is a full map between Data Packs and DATIM for
#' the purpose of generating import and analytics tables.
#'
#' @format
#' \describe{
#' \item{indicator_code}{Code used in the Data Pack to uniquely identify each
#' distinct programmatic area of target setting.}
#' \item{col_type}{Values can be "target", "result" or NA}
#' \item{value_type}{Describes what type of measure the indicator code is
#' represented by. Values can be "integer", "percentage", or NA}
#' \item{categoryoption_specified}{Categoryoption disaggregate of the data
#' element}
#' \item{valid_ages.name}{Age disaggregate}
#' \item{valid_ages.id}{Age disaggregate UID}
#' \item{valid_sexes.name}{Sex disaggregate}
#' \item{valid_sexes.id}{Sex disaggregate UID}
#' \item{valid_kps.name}{KP disaggregate}
#' \item{valid_kps.id}{KP disaggregate UID}
#' \item{FY}{Fiscal Year}
#' \item{period}{DHIS2 period for example "2021Oct"}
#' \item{categoryOptions.ids}{Categoryoption UID}
#' \item{dataelementuid}{DATIM UID for dataElements.}
#' \item{hts_modality}{HIV Testing service type}
#' \item{period_dataset}{Fiscal year dataset results}
#' \item{dataelementname}{The name of the data element being described}
#' \item{categoryoptioncomboname}{The name of the various combinations of
#' categories and options}
#' \item{categoryoptioncombouid}{DATIM uid for categoryOptionCombos.}
#' \item{targets_results}{Category variable denoting "targets" or "results"}
#' \item{dataset}{Category variable denoting where the dateset stems from:
#' "impatt","subnat", "mer"}
#' \item{resultstatus}{Category variable denoting the status of the results}
#' \item{resultststaus_inclusive}{Category variable denoting
#' "Positive", "Negative", "Unknown"}
#' \item{disagg_type}{Category variable denoting the dissagregate}
#' \item{technical_area}{Category variable denoting the tecnical area}
#' \item{top_level}{Denotes if the top level is a numerator or denominator}
#' \item{support_type}{Category variable denoting "Sub-National", "DSD", "TA",
#' or "No Support Type". The crossing of these with \code{indicatorCode}
#' roughly corresponds to DATIM dataelements.}
#' \item{numerator_denominator}{Category variable denoting numerator or
#' denominator}
#' }
#'
"cop22_map_adorn_import_file"
#' @docType data
#' @title List of valid PSNUs used for generating Data Packs.
#'
#' @description List of valid PSNUs used for generating Data Packs. Must be
#' synced and saved manually!
#'
#' @format
#' \describe{
#' \item{ou}{Operating Unit name associated with the Organisation Unit}
#' \item{ou_id}{Operating Unit UID}
#' \item{country_name}{Country name associated with the Organisation Unit}
#' \item{country_uid}{Country name UID}
#' \item{snu1}{Subnational Unit associated with the Organisation Unit}
#' \item{snu1_uid}{Subnational Unit UID}
#' \item{psnu}{Priority Sub-National Unit associated with the Organisation
#' Unit}
#' \item{psnu_uid}{Priority Sub-National Unit UID}
#' \item{psnu_type}{The type of Priority Sub-National Unit}
#' \item{lastUpdated}{The last time the Organisation Unit was updated}
#' \item{ancestors}{A nested eleven column data frame that contains the
#' list of parent organisation units that contain the PSNU,
#' including the names, ids, and which organisationUnitGroups that those
#' parent organisation units belong to}
#' \item{organisationUnitGroups}{A nested two column data frame that
#' contains the name and id of the groups the organisation unit is associated
#' with. For example "Community" and "PvuaP6YALSA"}
#' \item{DREAMS}{Determined, Resilient, Empowered, AIDS-free, Mentored, and
#' Safe Partnernship. Binary column "Y" or NA.}
#' }
"valid_PSNUs"
#' @docType data
#' @title List of valid PSNUs used for generating Data Packs.
#'
#' @description List of valid PSNUs used for generating Data Packs. Must be
#' synced and saved manually!
#'
#' @format
#' \describe{
#' \item{ou}{Operating Unit name associated with the Organisation Unit}
#' \item{ou_uid}{Operating Unit UID}
#' \item{country_name}{Country name associated with the Organisation Unit}
#' \item{country_uid}{Country name UID}
#' \item{snu1}{Subnational Unit associated with the Organisation Unit}
#' \item{snu1_uid}{Subnational Unit UID}
#' \item{name}{Priority Sub-National Unit associated with the Organisation
#' Unit}
#' \item{uid}{Priority Sub-National Unit UID}
#' \item{orgtype}{The type of Priority Sub-National Unit}
#' \item{lastUpdated}{The last time the Organisation Unit was updated}
#' \item{ancestors}{A nested eleven column data frame that contains the
#' list of parent organisation units that contain the PSNU,
#' including the names, ids, and which organisationUnitGroups that those
#' parent organisation units belong to}
#' \item{organisationUnitGroups}{A nested two column data frame that
#' contains the name and id of the groups the organisation unit is associated
#' with. For example "Community" and "PvuaP6YALSA"}
#' \item{DREAMS}{Determined, Resilient, Empowered, AIDS-free, Mentored, and
#' Safe Partnernship. Binary column "Y" or NA.}
#' }
"valid_OrgUnits"
#' @docType data
#' @title List of valid COP22 PSNUs used for generating Data Packs.
#'
#' @description List of valid COP22 PSNUs used for generating Data Packs.
#' Must be synced and saved manually!
#'
#' @format
#' \describe{
#' \item{ou}{Operating Unit name associated with the Organisation Unit}
#' \item{ou_id}{Operating Unit UID}
#' \item{country_name}{Country name associated with the Organisation Unit}
#' \item{country_uid}{Country name UID}
#' \item{snu1}{Subnational Unit associated with the Organisation Unit}
#' \item{snu1_uid}{Subnational Unit UID}
#' \item{psnu}{Priority Sub-National Unit associated with the Organisation
#' Unit}
#' \item{psnu_uid}{Priority Sub-National Unit UID}
#' \item{psnu_type}{The type of Priority Sub-National Unit}
#' \item{lastUpdated}{The last time the Organisation Unit was updated}
#' \item{ancestors}{A nested eleven column data frame that contains the
#' list of parent organisation units that contain the PSNU,
#' including the names, ids, and which organisationUnitGroups that those
#' parent organisation units belong to}
#' \item{organisationUnitGroups}{A nested two column data frame that
#' contains the name and id of the groups the organisation unit is associated
#' with. For example "Community" and "PvuaP6YALSA"}
#' \item{DREAMS}{Determined, Resilient, Empowered, AIDS-free, Mentored, and
#' Safe Partnernship. Binary column "Y" or NA.}
#' }
"cop22_valid_PSNUs"
#' @docType data
#' @title Datapack country groupings
#'
#' @description Tibble of data pack country names and their UIDs.
#'
#' @format
#' \describe{
#' \item{datapack_name}{Country name on home tab of datapack}
#' \item{country_uids}{Country's UIDs listed on the home tab}
#' }
"COP21_datapacks_countries"
#' @docType data
#' @title Schema describing the correct structure of the COP21 OPU Data Pack
#' template.
#'
#' @description This schema describes the correct structure of a COP21 OPU
#' Data Pack file, generated from the template used to produce Data Packs and
#' useful in validating Data Packs passed through datapackr.
#'
#' @format
#' \describe{
#' \item{sheet_num}{Lists the index value associated with the sheet name
#' listed in \code{sheet_name}.}
#' \item{sheet_name}{Lists the sheet/tab name as used in both the Data Pack.}
#' \item{data_structure}{Binary column describing the structure of the data
#' These values consist of "skip" or "normal"}
#' \item{col}{Value describing the column position of each
#' \code{indicator_code}.}
#' \item{indicator_code}{Code used in the Data Pack to uniquely
#' identify each distinct programmatic area of target setting.}
#' \item{dataset}{For \code{indicator_codes} listed as "\code{Targets}"
#' in the \code{col_type} field, documents the dataset, either \code{MER},
#' \code{IMPATT},\code{datapack}, or \code{SUBNAT}.}
#' \item{col_type}{Flags whether an \code{indicator_code} is a Target
#' (\code{target}), historic data (\code{past}), reference figure
#' (\code{reference}), row header (\code{row_header}) or not (\code{NA}).}
#' \item{value_type}{Category column describing the type of measure for the
#' \code{indicator_code}. The values consist of "string", "integer",
#' "percentage", or NA}
#' \item{dataelement_dsd}{Denotes whether this element has a
#' "Direct Service Delivery" support type}
#' \item{dataelement_ta}{Denotes whether this element has a
#' "Technical Assistance" support type}
#' \item{categoryoption_specified}{Categoryoption disaggregate of the data
#' element}
#' \item{valid_ages}{Comprised of Age disaggregate and the associated UID}
#' \item{valid_sexes}{Compised of Sex disaggregate and the assoicated UID}
#' \item{valid_kps}{Compised of KP disaggregate and the assoicated UID}
#' \item{formula}{Excel formula defined for \code{indicator_code}.}
#' }
"cop21OPU_data_pack_schema"
#' @docType data
#' @title Datapack Category option groups
#'
#' @description Data frame of category option groups (id and name)
#' along with their individual category options (id and name) as a
#' nested data frame.
#'
#' @format
#' \describe{
#' \item{name}{Name of the Category Option Group for example "01-04 Only"}
#' \item{id}{Category Option Group UID}
#' }
"datapack_cogs"
#' @docType data
#' @title Schema describing the correct structure of the COP21 Data Pack template.
#'
#' @description This schema describes the correct structure of a COP21 Data Pack
#' file, generated from the template used to produce Data Packs and useful in
#' validating Data Packs passed through datapackr.
#'
#' @format
#' \describe{
#' \item{sheet_num}{Lists the index value associated with the sheet name
#' listed in \code{sheet_name}.}
#' \item{sheet_name}{Lists the sheet/tab name as used in both the Data Pack.}
#' \item{data_structure}{Binary column describing the structure of the data
#' These values consist of "skip" or "normal"}
#' \item{col}{Value describing the column position of each
#' \code{indicator_code}.}
#' \item{indicator_code}{Code used in the Data Pack to uniquely
#' identify each distinct programmatic area of target setting.}
#' \item{dataset}{For \code{indicator_codes} listed as "\code{Targets}"
#' in the \code{col_type} field, documents the dataset, either \code{MER},
#' \code{IMPATT},\code{datapack}, or \code{SUBNAT}.}
#' \item{col_type}{Flags whether an \code{indicator_code} is a Target
#' (\code{target}), historic data (\code{past}), reference figure
#' (\code{reference}), row header (\code{row_header}) or not (\code{NA}).}
#' \item{value_type}{Category column describing the type of measure for the
#' \code{indicator_code}. The values consist of "string", "integer",
#' "percentage", or NA}
#' \item{dataelement_dsd}{Denotes whether this element has a
#' "Direct Service Delivery" support type}
#' \item{dataelement_ta}{Denotes whether this element has a
#' "Technical Assistance" support type}
#' \item{categoryoption_specified}{Categoryoption disaggregate of the data
#' element}
#' \item{valid_ages}{Comprised of Age disaggregate and the associated UID}
#' \item{valid_sexes}{Compised of Sex disaggregate and the assoicated UID}
#' \item{valid_kps}{Compised of KP disaggregate and the assoicated UID}
#' \item{formula}{Excel formula defined for \code{indicator_code}.}
#' \item{FY}{Fiscal Year}
#' \item{period}{DHIS2 period for example "2021Oct"}
#' }
"cop21_data_pack_schema"
#' @docType data
#' @title Schema describing the correct structure of teh COP22 Data Pack
#' template.
#'
#' @description This schema describes the correct structure of a COP22 Data Pack
#' file, generated from the template used to produce Data Packs and useful in
#' validating Data Packs passed through datapackr.
#'
#' @format
#' \describe{
#' \item{sheet_num}{Lists the index value associated with the sheet name
#' listed in \code{sheet_name}.}
#' \item{sheet_name}{Lists the sheet/tab name as used in both the Data Pack.}
#' \item{data_structure}{Binary column describing the structure of the data
#' These values consist of "skip" or "normal"}
#' \item{col}{Value describing the column position of each
#' \code{indicator_code}.}
#' \item{indicator_code}{Code used in the Data Pack to uniquely
#' identify each distinct programmatic area of target setting.}
#' \item{dataset}{For \code{indicator_codes} listed as "\code{Targets}"
#' in the \code{col_type} field, documents the dataset, either \code{MER},
#' \code{IMPATT},\code{datapack}, or \code{SUBNAT}.}
#' \item{col_type}{Flags whether an \code{indicator_code} is a Target
#' (\code{target}), historic data (\code{past}), reference figure
#' (\code{reference}), row header (\code{row_header}) or not (\code{NA}).}
#' \item{value_type}{Category column describing the type of measure for the
#' \code{indicator_code}. The values consist of "string", "integer",
#' "percentage", or NA}
#' \item{dataelement_dsd}{Denotes whether this element has a
#' "Direct Service Delivery" support type}
#' \item{dataelement_ta}{Denotes whether this element has a
#' "Technical Assistance" support type}
#' \item{categoryoption_specified}{Categoryoption disaggregate of the data
#' element}
#' \item{valid_ages}{Comprised of Age disaggregate and the associated UID}
#' \item{valid_sexes}{Compised of Sex disaggregate and the assoicated UID}
#' \item{valid_kps}{Compised of KP disaggregate and the assoicated UID}
#' \item{formula}{Excel formula defined for \code{indicator_code}.}
#' \item{FY}{Fiscal Year}
#' \item{period}{DHIS2 period for example "2021Oct"}
#' }
"cop22_data_pack_schema"
#' @docType data
#' @title Schema describing the correct structure of the COP22 OPU Data Pack
#' template.
#'
#' @description This schema describes the correct structure of a COP22 OPU
#' Data Pack file, generated from the template used to produce Data Packs and
#' useful in validating Data Packs passed through datapackr.
#'
#' @format
#' \describe{
#' \item{sheet_num}{Lists the index value associated with the sheet name
#' listed in \code{sheet_name}.}
#' \item{sheet_name}{Lists the sheet/tab name as used in both the Data Pack.}
#' \item{data_structure}{Binary column describing the structure of the data
#' These values consist of "skip" or "normal"}
#' \item{col}{Value describing the column position of each
#' \code{indicator_code}.}
#' \item{indicator_code}{Code used in the Data Pack to uniquely
#' identify each distinct programmatic area of target setting.}
#' \item{dataset}{For \code{indicator_codes} listed as "\code{Targets}"
#' in the \code{col_type} field, documents the dataset, either \code{MER},
#' \code{IMPATT},\code{datapack}, or \code{SUBNAT}.}
#' \item{col_type}{Flags whether an \code{indicator_code} is a Target
#' (\code{target}), historic data (\code{past}), reference figure
#' (\code{reference}), row header (\code{row_header}) or not (\code{NA}).}
#' \item{value_type}{Category column describing the type of measure for the
#' \code{indicator_code}. The values consist of "string", "integer",
#' "percentage", or NA}
#' \item{dataelement_dsd}{Denotes whether this element has a
#' "Direct Service Delivery" support type}
#' \item{dataelement_ta}{Denotes whether this element has a
#' "Technical Assistance" support type}
#' \item{categoryoption_specified}{Categoryoption disaggregate of the data
#' element}
#' \item{valid_ages}{Comprised of Age disaggregate and the associated UID}
#' \item{valid_sexes}{Compised of Sex disaggregate and the assoicated UID}
#' \item{valid_kps}{Compised of KP disaggregate and the assoicated UID}
#' \item{formula}{Excel formula defined for \code{indicator_code}.}
#' }
"cop22OPU_data_pack_schema"
#' @docType data
#' @title COP Validation Rules
#' @description A nested list of validation rules for both current and past
#' COP years.
#' @md
#'
#' @format The following COP years are included in this dataset:
#' \describe{
#' \item{2021}{A list object containing the validation rules for COP21/FY22.}
#' \item{2022}{A list object containing the validation rules for COP22/FY23.}
#' }
#'
#' @section Structure for COP21 data set:
#' The data set for 2021 conforms to the following structure:
#'
#' * `description`: A description of the DATIM validation rule, showing the
#' relationship required between two indicators. Synonymous to
#' the `name` and `instruction` columns.
#' * `id`: The DATIM UID for the rule.
#' * `importance`: Category showing the relative importance of the validaiton
#' rule. For COP20 and COP21, this is always listed as `MEDIUM`.
#' * `instruction`: A description of the DATIM validation rule, showing the
#' relationship required between two indicators. Synonymous to the `description`
#' and `name` columns.
#' * `name`: A description of the DATIM validation rule, showing the
#' relationship required between two indicators. Synonymous to the `description`
#' and `instruction` columns.
#' * `operator`: The operator used in the validation rule. This must be either
#' `<=`, `>=`, or `|`.
#' * `periodType`: A string indicating whether the indicator is reported
#' quartery or annually. The value is either `Quarterly` or `FinancialOct`.
#' * `ruletype`: The type of rule being applied. This value is
#' always `VALIDATION`.
#' * `leftSide.dataElements`: A nested list containing a single DATIM data
#' element defining the indicator on the left-hand side of the equation.
#' * `leftSide.description`: A description of the indicator on the left-hand
#' side of the validation rule equation.
#' * `leftSide.expression`: An expression defining how to calculate the value
#' of the left-hand side of the validation rule equation.
#' * `leftSide.missingValueStrategy`: A string that states whether this rule
#' should be skipped if the value of the left-hand side of the equation is
#' missing. Value is either `NEVER_SKIP` or `SKIP_IF_ALL_VALUES_MISSING`.
#' * `rightSide.dataElements`: A nested list containing a single DATIM data
#' element defining the indicator on the right-hand side of the equation.
#' * `rightSide.description`: A description of the indicator on the right-hand
#' side of the validation rule equation.
#' * `rightSide.expression`: An expression defining how to calculate the value
#' of the right-hand side of the validation rule equation.
#' * `rightSide.missingValueStrategy`: A string that states whether this rule
#' should be skipped if the value of the right-hand side of the equation is
#' missing. Value is either `NEVER_SKIP` or `SKIP_IF_ALL_VALUES_MISSING`.
#' * `rightSide.ops`:
#' * `leftSide.ops`:
#'
#' @section Structure for COP22 data set:
#' The data set for COP22 conforms to the following structure:
#'
#' * `name`: A descriptive name of the DATIM validation rule, showing the
#' relationship required between two indicators. Synonymous to
#' the `description` column.
#' * `id`: The DATIM UID for the rule.
#' * `periodType`: A string indicating whether the indicator is reported
#' quartery or annually. The value is either `Quarterly` or `FinancialOct`.
#' * `description`: A description of the DATIM validation rule, showing the
#' relationship required between two indicators. Synonymous to
#' the `name` column.
#' * `operator`: The operator used in the validation rule. This must be either
#' `<=`, `>=`, or `|`.
#' * `leftSide.expression`: An expression defining how to calculate the value
#' of the left-hand side of the validation rule equation.
#' * `leftSide.missingValueStrategy`: A string that states whether this rule
#' should be skipped if the value of the left-hand side of the equation is
#' missing. Value is either `NEVER_SKIP` or `SKIP_IF_ALL_VALUES_MISSING`.
#' * `rightSide:expression`: An expression defining how to calculate the value
#' of the right-hand side of the validation rule equation.
#' * `rightSide.missingValueStrategy`: A string that states whether this rule
#' should be skipped if the value of the right-hand side of the equation is
#' missing. Value is either `NEVER_SKIP` or `SKIP_IF_ALL_VALUES_MISSING`.
#' * `rightSide.ops`:
#' * `leftSide.ops`:
#'
#' @source \url{https://www.datim.org/}
"cop_validation_rules"
#' Structure for DATIM data set levels.
#'
#' @format ## `dataset_levels`
#' \describe{
#' * \item{iso3, iso4}{ISO codes of the Operating unit and country}
#' * \item{ou}{Name of the operating unit}
#' * \item{country_name}{Name of the country}
#' * \item{country_level}{Level of the country in the DATIM hierarchy}
#' * \item{facility_level}{Level of facilities in the DATIM hierarchy}
#' * \item{community_level}{Community data set level in the DATIM hierarchy}
#' * \item{prioritization}{Prioritization level / target setting level in the DATIM hierarchy}
#' * \item{cop_year}{The COP Year for which the other values are valid}
#' * \item{ou_uid}{UID of the countries operating unit for a given COP year}
#' * \item{country_uid}{UID of the country.}
#' }
#' @source \url{https://www.datim.org/}
"dataset_levels"
|
5f90e00245c17b34b70ae2472f7b279b922a3130 | e9a61662d35c57c52add9eb02710bda5c3f4c9b6 | /plot2.R | 1c78280f7df63340e79e1bd546e7d91540856409 | [] | no_license | parveen-sharma/exploratory-data-analysis | ce287ae9fcec66288234ce91c83d608be187f104 | ed5327701683ded8fb42db71927d6f2d9f05f1a0 | refs/heads/master | 2021-01-10T19:43:30.115981 | 2015-06-03T17:55:54 | 2015-06-03T17:55:54 | 36,706,789 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 890 | r | plot2.R | ###########################
## Author: Parveen Sharma
## Date: 2015-06-02
## Exploratory Data Analysis
## Assignment: Course Project 1 | Task: plot2
##########################
## set working directory - where data file is downloaded
## plots are also saved here
setwd("C:/Users/Parveen/Desktop/r-prog/assign-ExploratoryDataAnalysis/study2/")
## read data for specific dates only
library(sqldf)
reader <- read.csv.sql("household_power_consumption.txt",sql="select * from file where Date in ('1/2/2007','2/2/2007') ", header=T, sep=";", eol = "\n")
## combine date and time into one-variable for use
library(lubridate)
reader$date_time <- dmy_hms(paste(reader$Date,reader$Time, sep=" "))
## plot2
png(file="plot2.png", width=480, height=480, units="px", bg="white")
plot(reader$date_time, reader$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.off()
|
0f5ced561a7daf2b423ce89ecec20f282f69c56e | d812db15a12cfce3666d69812fbbb0da4b070c14 | /code/main.R | 78e72d78df9e700cb8a9468b3a5655031082eea2 | [
"MIT"
] | permissive | jvpoulos/patt-c | 97fba2cec409113747f246cec1cc36ee6cf21f5d | d471872f710210516c540f313437f8fa69a91e21 | refs/heads/master | 2021-07-07T15:02:19.941944 | 2020-07-31T03:27:53 | 2020-07-31T03:27:53 | 156,440,652 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,304 | r | main.R | repo.directory <- "~/patt-c/"
# Simulation
patient.simulation <- FALSE
if(patient.simulation){
source(paste0(repo.directory,"code/simulation.R")) # --> simulation_res.Rdata
}
source(paste0(repo.directory,"code/simulation-plots.R"))
# Empirical application
source(paste0(repo.directory,"code/prepare-ohie.R")) # --> data/prepare-ohie.Rdata
download.NHIS <-FALSE
if(download.NHIS){
source(paste0(repo.directory, "data/NHIS/download-all-nhis-microdata.R")) # download NHIS data 2008-17
source(paste0(repo.directory, "data/NHIS/2015/extract_nhis_2015.R"))
source(paste0(repo.directory, "data/NHIS/merge-nhis.R"))
}
source(paste0(repo.directory,"code/prepare-nhis.R")) # script merges person, sample adult, and imputed income files --> data/prepare-nhis.RData
source(paste0(repo.directory,"code/prepare-analysis.R")) # loads data/prepare-ohie.Rdata and data/prepare-nhis.RData
# --> data/prepare-analysis.RData
source(paste0(repo.directory,"code/analysis.R"))
source(paste0(repo.directory,"code/estimator-compare-plots.R")) # plot treatment effect estimates
## Appendix figures and tables
source(paste0(repo.directory,"code/rct-nrt-compare.R")) # Tables A1 and A2
source(paste0(repo.directory,"code/placebo-test.R")) # Table A3 |
a8314f0ad2bf5c9a8a761292bdbd991ee78f01e4 | 7853c37eebe37fa6a0307e0dd9e197830ee6ac71 | /tests/funcAttributes.R | fa2f819af85517bda1d89d3c1915118efe8be289 | [] | no_license | chen0031/RCUDA | ab34ffe4f7e7036a8d39060639f09617943afbdf | 2479a3b43c6d51321b0383a88e7a205b5cb64992 | refs/heads/master | 2020-12-22T22:47:37.213822 | 2016-07-27T03:50:44 | 2016-07-27T03:50:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 387 | r | funcAttributes.R | library(RCUDA)
cuGetContext()
f = system.file("sampleKernels", "dnorm.cubin", package = "RCUDA")
if(file.exists(f)) {
m = loadModule(f)
vals = RCUDA:::CUfunction_attributeValues
# ignore the MAX entry
sapply(unclass(vals)[-length(vals)], cuFuncGetAttribute, m$dnorm_kernel)
cuFuncGetAttributes(m$dnorm_kernel)
} else
cat("Cannot find load dnorm.cubin. No test!\n")
|
2876b7c14549151f17de948067706e523b958438 | e17c16432c4d6192566ed3c19d70b885b0683405 | /R/getContent.R | 40e076af7c62c5d301e90e1a79c0ba07a9ceb5df | [
"Apache-2.0"
] | permissive | xy21hb/ReactomeContentService4R | bb8fb202bfefab66a1ef660cd3831e83d2cd582e | f55d6afe90e778677fb7f295512a56f4590bb61d | refs/heads/master | 2023-05-06T22:04:38.800860 | 2021-05-26T14:40:21 | 2021-05-26T14:40:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,582 | r | getContent.R | ## Retrieve data from Content Service
#' Search engines discovery schema
#' @param event.id stable id or db id of an Event
#' @return a list of the event schema
#' @examples
#' discover("R-HSA-73893")
#' @rdname discover
#' @export
discover <- function(event.id) {
path <- "data/discover"
url <- file.path(getOption("base.address"), path, event.id)
.retrieveData(url, as="text")
}
#' PhysicalEntity queries
#'
#' This method retrieves instances related to PhysicalEntity.
#'
#' Details on options of `retrieval` arg:
#' - \strong{subunits}: retrieves subunits that constitute any given Complex
#' - \strong{complexes}: retrieves Complexes that contain the given `id` and `resource`
#' - \strong{componentOf}: retrieves structures (Complexes and Sets) that include the given Entity as their component
#' - \strong{otherForms}: retrieves Entities containing all other forms of the given PhysicalEntity
#'
#' @param id stable or db id of a Reactome PhysicalEntity, or id from resources other than Reactome
#' @param retrieval entities to be retrieved, including "subunits", "complexes", "componentOf", "otherForms"
#' @param resource resource other than Reactome, e.g. UniProt, Ensembl
#' @param subunitsExcludeStructures whether contained Complexes and EntitySets are excluded when retrieving "subunits"
#' @return a dataframe containing requested information
#' @examples
#' getEntities("R-HSA-5674003", retrieval="subunits")
#' getEntities("P00533", retrieval="complexes", resource="UniProt")
#' @rdname getEntities
#' @family getContent
#' @export
getEntities <- function(id, retrieval=c("subunits", "complexes", "componentOf", "otherForms"),
resource="Reactome", subunitsExcludeStructures=FALSE) {
# check the inputs
if (missing(retrieval)) {
message("Retrieval argument not specified, retrieving 'subunits'... For 'complexes', 'componentOf', 'otherForms', specify 'retrieval'")
}
retrieval <- match.arg(retrieval, several.ok=FALSE)
if (retrieval == "complexes" && resource == "Reactome") {
stop("Please use an id from other resources (e.g. UniProt, Ensembl)
to retrieve complexes and specify the resource that the id comes from")
}
if (retrieval != "complexes" && resource != "Reactome") {
stop("Please use Reactome as resource and specify a Reactome stable or db id")
}
# retrieve
if (retrieval == "subunits") {
# id is stable or db id
url <- file.path(getOption("base.address"), "data/complex", id,
paste0("subunits?excludeStructures=", tolower(subunitsExcludeStructures)))
} else if (retrieval == "complexes") {
url <- file.path(getOption("base.address"), "data/complexes", resource, id)
} else {
url <- file.path(getOption("base.address"), "data/entity", id, retrieval)
}
.retrieveData(url, as="text")
}
#' EventsHierarchy queries
#'
#' Events (Pathways and Reactions) in Reactome are organized in a hierarchical
#' structure for every species. By following all `hasEvent` relationships,
#' this method retrieves the full event hierarchy for any given \strong{main species}.
#'
#' @param main.species name or taxon/db id or abbreviation of \strong{main species} in Reactome
#' @return a nested dataframe containing full event hierarchy for a given main species
#' @examples
#' \dontrun{
#' getEventsHierarchy("chicken")
#' getEventsHierarchy("XTR")
#' }
#' @rdname getEventsHierarchy
#' @seealso \code{\link{getSpecies}} to get the main species list
#' @family getContent
#' @export
getEventsHierarchy <- function(main.species) {
path <- "data/eventsHierarchy"
taxon.id <- .matchSpecies(main.species, "taxId")
url <- file.path(getOption("base.address"), path, taxon.id)
.retrieveData(url, as="text")
}
#' Orthology related queries
#'
#' This function retrieves the orthology for any given Event or Entity in the specified species.
#' More information on inferred events see \href{here}{https://www.reactome.org/pages/documentation/electronically-inferred-events/}.
#'
#' @param id a stable or db id of an Event or Entity
#' @param species name or taxon id or dbId or abbreviation of species
#' @return a list containing the orthology for given Event or Entity in the specified species
#' @examples
#' getOrthology("R-HSA-5674003", "pig")
#' @rdname getOrthology
#' @family getContent
#' @export
getOrthology <- function(id, species) {
path <- "data/orthology"
species.name <- .matchSpecies(species, "displayName")
species.id <- .matchSpecies(species, "dbId") #dbId only
url <- file.path(getOption("base.address"), path, id, "species", species.id)
# retrieve data
message("Returning inferred instances of ", sQuote(id), " in species ", sQuote(species.name), "...")
note.msg <- "Note that only orthologous Events or Entities in a different species can be retrieved"
.retrieveData(url, customMsg=note.msg, as="text")
}
#' Participants related queries
#'
#' Data in Reactome are organized in a hierarchical manner - Pathways contain Reactions,
#' Reactions contain PhysicalEntities. This function is to get the participants
#' of a given Event.
#'
#' Details on options of `retrieval` arg:
#' - \strong{AllInstances}: retrieves all participants (PhysicalEntities) from a given Event and their ReferenceEntities
#' - \strong{PhysicalEntities}: retrieves all the PhysicalEntities that take part in a given Event
#' - \strong{ReferenceEntities}: retrieves the ReferenceEntities for all PhysicalEntities in every constituent Pathway/Reaction
#' - \strong{EventsInPathways}: recursively retrieves all the Events contained in any given Event
#'
#' @param event.id a stable or db id of an Event (pathways and reactions)
#' @param retrieval participants to be retrieved, including "AllInstances", "PhysicalEntities", "ReferenceEntities", "EventsInPathways"
#' @return a dataframe containing requested participants
#' @examples
#' getParticipants("R-HSA-6804741", "AllInstances")
#' getParticipants("R-HSA-69306", "EventsInPathways")
#' @rdname getParticipants
#' @family getContent
#' @export
getParticipants <- function(event.id, retrieval=c("AllInstances", "PhysicalEntities",
"ReferenceEntities", "EventsInPathways")) {
path <- "data/participants"
# write url
url <- file.path(getOption("base.address"), path, event.id) #all participants
if (missing(retrieval)) {
message("Retrieval argument not spcified, retrieving 'AllInstances'... For others, specify 'retrieval'")
}
retrieval <- match.arg(retrieval, several.ok = FALSE)
msg <- NULL
if (retrieval == "PhysicalEntities") {
url <- file.path(url, "participatingPhysicalEntities")
} else if (retrieval == "ReferenceEntities") {
url <- file.path(url, "referenceEntities")
} else if (retrieval == "EventsInPathways") {
# in a different path/method - /data/pathway/{id}/containedEvents
url <- file.path(getOption("base.address"), "data/pathway", event.id, "containedEvents")
msg <- "'Events' are found in the 'hasEvent' attribute of Pathways"
}
# retrieve
participants <- .retrieveData(url, customMsg=msg, as="text")
# annotate instances in ReactionLikeEvents if retrieving AllInstances
if (retrieval == "AllInstances") {
all.info <- query(event.id)
if (all.info[["className"]] == "Reaction") {
# Empty columns
participants$type <- rep(0, nrow(participants)) -> participants$numOfEntries
# input/output/catalysts/regulations
for (component in c("input", "output", "catalystActivity", "regulatedBy")) {
sub.info <- all.info[[component]]
# If it's a df, entries are all unique in the component;
# if it's list, multiple entries exist
# turn dataframe into a list
if (is.data.frame(sub.info)) sub.info <- list(sub.info)
for (list in sub.info) {
if (is.integer(list) && list %in% participants$peDbId) {
# only an id, no other info
idx <- participants$peDbId == list
participants[idx, ]$numOfEntries <- participants[idx, ]$numOfEntries + 1
if (participants[idx, ]$type == 0) {
participants[idx, ]$type <- component
}
} else if (is.list(list)) {
# get the id
if (component == "catalystActivity") {
id <- list$physicalEntity$dbId
} else if (component == "regulatedBy") {
id <- list$regulator$dbId
} else {
id <- list$dbId
}
pe.id <- id[id %in% participants$peDbId]
for (i in pe.id) {
idx <- participants$peDbId == i
tmp.type <- participants[idx, ]$type
if (tmp.type != 0) {
# already has role(s)
participants[idx, ]$type <- paste0(tmp.type, ",", component)
} else {
participants[idx, ]$type <- component
participants[idx, ]$numOfEntries <- participants[idx, ]$numOfEntries + 1
}
}
}
}
}
# rename
participants$type <- gsub("catalystActivity", "catalyst", participants$type)
participants$type <- gsub("regulatedBy", "regulator", participants$type)
# rearrange the columns
participants <- participants[ ,c("peDbId", "displayName", "schemaClass", "type", "numOfEntries", "refEntities")]
}
}
participants
}
#' Pathway related queries
#'
#' To get the Events that contain the given PhysicalEntity or Event (i.e. subpathway).
#'
#' @param id a stable or db id of a PhysicalEntity or Event present in the pathways
#' @param species name or taxon id or dbId or abbreviation of species
#' @param allForms if set to \code{TRUE}, all low level pathways that contain the given PhysicalEntity (not Event) in all forms returned
#' @param top.level if set to \code{TRUE}, only top-level pathways returned
#' @return a dataframe containing requested pathways
#' @examples
#' getPathways("R-HSA-199420", "Homo sapiens")
#' @rdname getPathways
#' @family getContent
#' @importFrom data.table rbindlist
#' @importFrom foreach foreach %dopar%
#' @importFrom parallel makeCluster stopCluster
#' @importFrom doParallel registerDoParallel
#' @export
getPathways <- function(id, species=NULL, allForms=FALSE, top.level=FALSE) {
path <- "data/pathways/low/entity"
# write the full url
url <- file.path(getOption("base.address"), path, id)
if (allForms) url <- file.path(url, "allForms")
if (!is.null(species)) url <- paste0(url, "?species=", .matchSpecies(species, "taxId"))
# retrieve
pathways <- .retrieveData(url, as="text")
# map to top level pathways
if (top.level) {
cl <- makeCluster(1)
registerDoParallel(cl)
dfcomb <- function(...) {
rbindlist(list(...), fill = TRUE)
}
top.pathways <- foreach(id=pathways$dbId, .export=c(".retrieveData", ".checkStatus"), .combine=dfcomb) %dopar% {
# /data/event/{id}/ancestors
ancestors.url <- file.path(getOption("base.address"), "data/event", id, "ancestors")
ancestors <- .retrieveData(ancestors.url, as="text")
ancestors <- ancestors[[1]]
ancestors[ancestors$schemaClass == "TopLevelPathway",]
}
stopCluster(cl)
rownames(top.pathways) <- seq(1, nrow(top.pathways))
return(top.pathways)
} else {
return(pathways)
}
}
#' Person queries
#'
#' Retrieves a specific personβs property by his/her name or OrcidId or dbId.
#'
#' @param name Personβs first or/and last name
#' @param id Person's Orcid Id or DbId
#' @param attributes Property for a person. Return all available attributes if it is not specified.
#' @return a list of requested information
#' @examples
#' getPerson(name="Robin Haw", attributes=c("displayName", "affiliation"))
#' @rdname getPerson
#' @family getContent
#' @export
getPerson <- function(name=NULL, id=NULL, attributes=NULL) {
# ensure the input
if (is.null(name) & is.null(id)) stop("Must specify either a name or an ID.")
# choose an id if name input only
if (is.null(id)) {
message('Matching ', dQuote(name), ' with names in current data...\n')
names.df <- .listPeople(name)
if (nrow(names.df) == 1) {
id <- as.character(names.df$dbId)
} else {
print(names.df, row.names=FALSE)
id <- readline(prompt="Enter the matched dbId: ")
while (!id %in% names.df$dbId) {
id <- readline(prompt=paste0(id, " is not in IDs above, re-enter: "))
}
}
}
# retrieve person's information
path <- "data/person"
if (is.null(attributes)) {
# retrieve all info by default
url <- file.path(getOption("base.address"), path, id)
all.info <- .retrieveData(url, as="text")
# add authored pathways if any
ap.url <- file.path(url, "authoredPathways")
authoredPathways <- .retrieveData(ap.url, as="text")
if (length(authoredPathways) != 0) all.info[["authoredPathways"]] <- authoredPathways
} else {
# retrieve specified properties
all.info <- list(Id=id)
for (attribute in attributes) {
tmp.url <- file.path(getOption("base.address"), path, id, attribute)
tmp <- .retrieveData(tmp.url, fromJSON=FALSE, as="parse")
ifelse(is.character(tmp),
all.info[[attribute]] <- tmp,
all.info[[attribute]] <- .retrieveData(tmp.url, fromJSON=TRUE, as="text"))
}
}
all.info
}
#' List filter items
#'
#' To list the available filtering options for `searchQuery()`, and their counts.
#'
#' @param items categories of query, including "species", "types", "compartments", "keywords", or "all"
#' @param facet return faceting information or not
#' @return available search items
#' @examples
#' listSearchItems()
#' @rdname listSearchItems
#' @seealso \code{\link{searchQuery}} to search in Reactome
#' @export
listSearchItems <- function(items=c("all", "species", "types", "compartments", "keywords"), facet=FALSE) {
path <- "search/facet"
# ensure inputs
if (missing(items)) message('Item argument not specified, returning all kinds of items...')
items <- match.arg(items, several.ok = TRUE)
# retrieve
url <- file.path(getOption("base.address"), path)
list <- .retrieveData(url, as="text")
# filter
ifelse("all" %in% items,
select.name <- names(list),
select.name <- c(names(list)[1], names(list)[gsub("Facet$", "", names(list)) %in% items]))
final.list <- list[select.name]
final.list <- lapply(final.list, function(x) if (inherits(x, "list")) {x[["available"]]} else {x})
# modify final return
if (!facet) {
# remove the counts
final.list <- final.list[vapply(final.list, function(x) inherits(x, "data.frame"), logical(1))]
final.list <- lapply(final.list, function(x) data.frame(name=x$name))
names(final.list) <- gsub("Facet$", "", names(final.list))
}
final.list
}
#' Common data retrieval
#'
#' This function retrieves a Reactome Database object that has all its properties
#' and direct relationships (relationships of depth 1) filled, while it also
#' includes any second level relationships regarding regulations and catalysts.
#'
#' @param id a stable or db id of \strong{any} Reactome entry
#' @return a list containing comprehensive information (all attributes) for a given id
#' @examples
#' query("R-HSA-60140")
#' @rdname query
#' @family getContent
#' @seealso \code{\link{searchQuery}} to search in Reactome
#' @export
query <- function(id) {
path <- "data/query/enhanced"
url <- file.path(getOption("base.address"), path, id)
.retrieveData(url, as="text")
}
#' Schema class queries
#'
#' Fetch instances by Class. All Classes see
#' \href{https://reactome.org/content/schema/DatabaseObject}{Reactome data schema}.
#'
#' @param class schema class name
#' @param species name or taxon id or dbId or abbreviation of species. Only Event and PhysicalEntity classes can specify species
#' @param all to return ALL entries or not, default is \code{FALSE}
#' @param rows the number of entries retrieved, default is 1000
#' @param minimised to retrieve simplified entries (db id, stable id, displayName, type) or not, default is \code{FALSE}
#' @param reference to retrieve simplified reference objects (db id, external identifier,
#' external database name) or not, default is \code{FALSE}. Only for ReferenceEntity or ExternalOntology class
#' @return a sorted dataframe containing entries that belong to the specified schema class
#' @examples
#' \dontrun{
#' getSchemaClass(class="Drug", all=TRUE)
#' }
#' getSchemaClass(class="Regulation", rows=20, minimised=TRUE)
#' getSchemaClass(class="Complex", species="pig", rows=10)
#' @importFrom data.table rbindlist
#' @importFrom utils setTxtProgressBar txtProgressBar
#' @importFrom foreach foreach %dopar%
#' @importFrom parallel makeCluster stopCluster
#' @importFrom doParallel registerDoParallel
#' @rdname getSchemaClass
#' @family getContent
#' @export
getSchemaClass <- function(class, species=NULL, all=FALSE, rows=1000,
minimised=FALSE, reference=FALSE) {
# reminder
if (reference && !class %in% c("ReferenceEntity", "ExternalOntology")) {
stop("Note that 'class' needs to be either ReferenceEntity or ExternalOntology, and no species filter")
}
path <- "data/schema"
url <- file.path(getOption("base.address"), path, class)
msg <- NULL
# get the count first
cnt.url <- file.path(url, "count")
if (!is.null(species)) {
species.id <- .matchSpecies(species, "taxId")
cnt.url <- paste0(cnt.url, "?species=", species.id)
msg <- 'Note that if "species" is specified, "class" needs to be an instance of Event or subclasses in PhysicalEntity'
}
all.cnt <- as.integer(.retrieveData(cnt.url, customMsg=msg, fromJSON=FALSE, as="text"))
# set the range of entries
if ((all) || (!all && rows > all.cnt)) rows <- all.cnt
species.name <- ifelse(!is.null(species), .matchSpecies(species, "displayName"), "ALL")
message("Total ", all.cnt, " entries of ", class, " with species ",
species.name, ", retrieving ", format(rows, scientific=FALSE),
" of them...\n")
# calculate the range of pages
max.class.offset <- 25
max.other.offset <- 20000
offset <- ifelse(!minimised && !reference, max.class.offset, max.other.offset)
end.page <- ceiling(rows / offset) #round it up
if ((rows / offset) %% 1 != 0) {
if (end.page == 1) offset <- rows
if(!all && end.page != 1) end.offset <- rows %% offset
}
# retrieve data
if (minimised) url <- file.path(url, "min")
if (reference) url <-file.path(url, "reference")
url <- paste0(url, "?offset=", offset)
if (!is.null(species)) url <- paste0(url, "&species=", species.id)
# use doParallel - parallelly GET the urls with different pages
cl <- makeCluster(2, outfile="") # make clusters. 'outfile' for progress bar
registerDoParallel(cl)
pb <- txtProgressBar(min=0, max=end.page, style=3)
dfcomb <- function(...) {
rbindlist(list(...), fill = TRUE)
}
page <- 1 #to avoid note in R check
final.df <- foreach(page=seq(1, end.page), .export=c(".retrieveData", ".checkStatus"), .combine=dfcomb) %dopar% {
setTxtProgressBar(pb, page)
# change the offset for the last page if it's different
if (page == end.page && exists("end.offset")) {
url <- gsub(paste0("offset=", offset), paste0("offset=", end.offset), url)
}
tmp.url <- paste0(url, "&page=", page)
.retrieveData(tmp.url, fromJSON=TRUE, as="text")
}
stopCluster(cl)
# sort by dbId
final.df <- final.df[order(final.df$dbId),]
rownames(final.df) <- seq(1, nrow(final.df))
final.df
}
#' Search query
#'
#' Search for Reactome objects by name or identifiers.
#'
#' @param query name or dbId or stId of a search term from any class
#' @param species name or taxon id or dbId or abbreviation of species
#' @param types type filter, such as "Protein", "Complex", "Reaction", etc
#' @param compartments compartment filter, such as "cytosol", "plasma membrane", "nucleoplasm", etc
#' @param keywords keyword filter, such as "binds", "phosphorylates", "transports", etc
#' @param cluster cluster returned data or not
#' @param range start row and the number of rows to include, e.g. `range = c(0, 2)`
#' @return a list of information about the search term
#' @examples
#' searchQuery(query="Biological oxidation", species="Mus musculus", types=c("Pathway", "Reaction"))
#' @seealso \code{\link{listSearchItems}} for available filters
#' @rdname searchQuery
#' @export
searchQuery <- function(query, species=NULL, types=NULL, compartments=NULL,
keywords=NULL, cluster=TRUE, range=NULL) {
# write full url
args <- as.list(environment())
args <- args[vapply(args, function(arg) !is.null(arg), logical(1))]
path <- "search/query"
url <- file.path(getOption("base.address"), paste0(path, "?query=", gsub("\\s", "%20", query)))
## add filters for the query
filters <- args[!names(args) %in% c("query", "cluster", "range")]
if ("species" %in% names(filters)) {
filters[["species"]] <- .matchSpecies(filters[["species"]], "displayName")
}
msg <- paste0("Searching for term '", query, "'... ")
for (filter in names(filters)) {
msg <- paste0(msg, filter, ":'", paste(filters[[filter]], collapse = "' & '"), "' ")
for (term in filters[[filter]]) {
url <- paste0(url, "&", filter, "=", gsub("\\s", "%20", term))
}
}
cat(paste0(msg, "\n"))
## cluster the returned data or not
url <- paste0(url, "&cluster=", tolower(cluster))
## restrict rows to include
if (!is.null(range)) url <- paste0(url, "&Start%20row=", range[1], "&rows=", range[2])
# retrieve
check.msg <- spellCheck(query)
.retrieveData(url, customMsg=check.msg, as="text")
}
#' Species queries
#'
#' This method retrieves the list of all or main species in Reactome knowledgebase.
#'
#' @param main determine whether return main species, which are those have
#' either manually curated or computationally inferred pathways
#' @return a dataframe of species information
#' @examples
#' # get a list of main species
#' getSpecies(main=TRUE)
#' @rdname getSpecies
#' @family getContent
#' @export
getSpecies <- function(main=FALSE) {
path <- "data/species"
# write the url
url <- ifelse(main,
file.path(getOption("base.address"), path, "main"),
file.path(getOption("base.address"), path, "all"))
.retrieveData(url, as="text")
}
|
0162f1219ac07b08bf3310aee9da96563eb58342 | 2621ac3d3a4bfce0d55f2a5c78aa80e4324ee766 | /HunderTroutIPM_ContinuousScenarios_mH&mO1.R | cc8e8ab746c22519f77c264f82a77ec74968660e | [] | no_license | ChloeRN/HunderTroutIPM | d045baa657550002744175c82ec47d8aa11a86e5 | d2b58e50ea1d8aba3e0b4671310010d7ea515490 | refs/heads/main | 2023-01-21T16:08:49.575614 | 2020-11-23T17:01:12 | 2020-11-23T17:01:12 | 315,370,911 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,981 | r | HunderTroutIPM_ContinuousScenarios_mH&mO1.R |
# This code builds the IPM for three different assumptions of below-dam penalty on early mortality
# (none, +50%, +100%) and a set of combinations of decreases in harvest mortality
# and dam passage mortality of above-dam spawners (= background mortality of smaller than
# average above-dam spawners).
# The relevant sections in the paper are:
# - Methods: 2.3.4
# - Results: 3.3
# - Figures: Figures 5 a) & S1.5
# NOTE:
# Due to the large number of high-dimensional matrices created and analysed here,
# this code takes a substantial amount of time to run.
#------
# SETUP
#------
## Execute code for IPM building blocks
source('HunderTroutIPM_IPM_Building.R')
## Set blackbox parameters
m0 <- -log(0.082)
Mu.mj <- -log(0.353)
Mu.mdam <- -log(0.75)
Mu.mOs <- 0.62
beta2.mj <- -0.2
beta2.mdam <- 0.05
beta2.mOs <- -0.7
#-----------------------
# BASELINE IPMs - LAMBDA
#-----------------------
## ORIGINAL
IPM1.orig <- build.IPM(t=test.year, sex=1, orig=1, m0, Mu.mj, Mu.mdam, Mu.mOs, beta2.mj, beta2.mdam, beta2.mOs, dPenalty.0=1, dPenalty.j=1, l.limit=Lx, u.limit=Ux, mH.factor=1, mO1size.factor=1)$IPM
IPM2.orig <- build.IPM(t=test.year, sex=1, orig=1, m0, Mu.mj, Mu.mdam, Mu.mOs, beta2.mj, beta2.mdam, beta2.mOs, dPenalty.0=1.5, dPenalty.j=1, l.limit=Lx, u.limit=Ux, mH.factor=1, mO1size.factor=1)$IPM
IPM3.orig <- build.IPM(t=test.year, sex=1, orig=1, m0, Mu.mj, Mu.mdam, Mu.mOs, beta2.mj, beta2.mdam, beta2.mOs, dPenalty.0=2, dPenalty.j=1, l.limit=Lx, u.limit=Ux, mH.factor=1, mO1size.factor=1)$IPM
#-----------------------------------------------
# CONTINUOUS MITIGATION SCENARIOS - CALCULATIONS
#-----------------------------------------------
## Write function to calculate lambda and store in a data frame
pop.analysis <- function(t, sex, orig, m0, Mu.mj, Mu.mdam, Mu.mOs, beta2.mj, beta2.mdam, beta2.mOs, dPenalty.0, dPenalty.j, l.limit, u.limit, mH.factor, mO1size.factor){
# Build IPM
IPM <- build.IPM(t, sex, orig, m0, Mu.mj, Mu.mdam, Mu.mOs, beta2.mj, beta2.mdam, beta2.mOs, dPenalty.0, dPenalty.j, l.limit, u.limit, mH.factor, mO1size.factor)$IPM
# Eigen analysis
output <- wvlambda(IPM)
# Asymptotic population growth rate
lambda <- output$lambda
# Collate results
results <- data.frame(mH.factor = mH.factor, mO1size.factor = mO1size.factor, lambda = lambda)
return(results)
}
## Make different perturbation combinations
mH.fac <- seq(1, 0, by = -1/50)
mO.fac <- seq(1, 0, by = -1/50)
pert.params <- expand.grid(mH.fac, mO.fac)
## Calculate lambda for all possible combinations
test1.X <- do.call("rbind", sapply(1:nrow(pert.params), FUN = function(z) pop.analysis(test.year, 1, 1, m0, Mu.mj, Mu.mdam, Mu.mOs, beta2.mj, beta2.mdam, beta2.mOs, dPenalty.0=1, dPenalty.j=1, l.limit=Lx, u.limit=Ux, mH.factor=pert.params[z,1], mO1size.factor=pert.params[z,2]), simplify = FALSE))
res1.mat <- matrix(test1.X$lambda, nrow = length(mH.fac), ncol = length(mO.fac), dimnames = list(mH.fac, mO.fac))
test2.X <- do.call("rbind", sapply(1:nrow(pert.params), FUN = function(z) pop.analysis(test.year, 1, 1, m0, Mu.mj, Mu.mdam, Mu.mOs, beta2.mj, beta2.mdam, beta2.mOs, dPenalty.0=1.5, dPenalty.j=1, l.limit=Lx, u.limit=Ux, mH.factor=pert.params[z,1], mO1size.factor=pert.params[z,2]), simplify = FALSE))
res2.mat <- matrix(test2.X$lambda, nrow = length(mH.fac), ncol = length(mO.fac), dimnames = list(mH.fac, mO.fac))
test3.X <- do.call("rbind", sapply(1:nrow(pert.params), FUN = function(z) pop.analysis(test.year, 1, 1, m0, Mu.mj, Mu.mdam, Mu.mOs, beta2.mj, beta2.mdam, beta2.mOs, dPenalty.0=2, dPenalty.j=1, l.limit=Lx, u.limit=Ux, mH.factor=pert.params[z,1], mO1size.factor=pert.params[z,2]), simplify = FALSE))
res3.mat <- matrix(test3.X$lambda, nrow = length(mH.fac), ncol = length(mO.fac), dimnames = list(mH.fac, mO.fac))
#################################
# UNCHANGED SMOLT DAM MORTALITY #
#################################
library(viridis)
# Prepare vectors for labelling
perc.dec <- c('0%', '10%', '20%', '30%', '40%', '50%', '60%', '70%', '80%', '90%', '100%')
prop.ind <- rev(seq(1, 51, 5))
# Set plotting limits
llim <- 0.706
ulim <- 1.154
# Plot
par(mar=c(5.1,5.1,4.1,2.1))
image.plot(res1.mat, col = magma(100), zlim = c(llim, ulim), main = expression('Asymptotic growth rate'~lambda~'(no penalty)'), xlab = 'Decrease in harvest mortality', ylab = '', xaxt= "n", yaxt= "n", cex.lab = 1.3, cex.main = 1.5, axis.args = list(cex.axis = 1.2))
title (ylab = 'Decrease in dam mortality (small spawners)', line=3.8, cex.lab=1.3)
axis(1, at = mH.fac[prop.ind], labels = perc.dec, cex.axis = 1.2)
axis(2, at = mO.fac[prop.ind], labels = perc.dec, las = 1, cex.axis = 1.2)
contour(res1.mat, add = TRUE, levels = c(0.8, 0.9, 1.0, 1.1), col = 'white', labcex = 1, method = 'edge', lty = c(2,2,1,2), crt = 90, drawlabels = F)
par(mar=c(5.1,5.1,4.1,2.1))
image.plot(res2.mat, col = magma(100), zlim = c(llim, ulim), main = expression('Asymptotic growth rate'~lambda~'(50% penalty)'), xlab = 'Decrease in harvest mortality', ylab = '', xaxt= "n", yaxt= "n", cex.lab = 1.3, cex.main = 1.5, axis.args = list(cex.axis = 1.2))
title (ylab = 'Decrease in dam mortality (small spawners)', line=3.8, cex.lab=1.3)
axis(1, at = mH.fac[prop.ind], labels = perc.dec, cex.axis = 1.2)
axis(2, at = mO.fac[prop.ind], labels = perc.dec, las = 1, cex.axis = 1.2)
par(mar=c(5.1,5.1,4.1,2.1))
image.plot(res3.mat, col = magma(100), zlim = c(llim, ulim), main = expression('Asymptotic growth rate'~lambda~'(100% penalty)'), xlab = 'Decrease in harvest mortality', ylab = '', xaxt= "n", yaxt= "n", cex.lab = 1.3, cex.main = 1.5, axis.args = list(cex.axis = 1.2))
title (ylab = 'Decrease in dam mortality (small spawners)', line=3.8, cex.lab=1.3)
axis(1, at = mH.fac[prop.ind], labels = perc.dec, cex.axis = 1.2)
axis(2, at = mO.fac[prop.ind], labels = perc.dec, las = 1, cex.axis = 1.2)
contour(res3.mat, add = TRUE, levels = c(0.8, 0.9, 1.0, 1.1), col = 'white', labcex = 1, method = 'edge', lty = c(2,2,1,2), crt = 90, drawlabels = F)
|
d2c9674998bfdf1156382e663f81e78fb2fac480 | 3a8d972ddbb072063766c9569cc67159f8c9ba02 | /plot3.R | 9a1553afe5f3a363a775b779aa1631a9962727b6 | [] | no_license | justincassidy/ExData_Plotting1 | 49ec9beea25d74d28d5b1450e100464ca30f4b5c | dddbf8ed4c2b2f7aa33b6c7f31f28f8ac8085a26 | refs/heads/master | 2021-01-17T23:36:16.939031 | 2015-11-08T01:50:11 | 2015-11-08T01:50:11 | 45,758,981 | 0 | 0 | null | 2015-11-07T23:22:53 | 2015-11-07T23:22:51 | null | UTF-8 | R | false | false | 1,626 | r | plot3.R | # Plot 3 Code for Coursera Exploratory Data Analysis Course Project 1
# Read in raw data as "plotdata" data frame
read.table("household_power_consumption.txt",sep=";",header = TRUE)->plotdata
# Convert Date and Time columns to a "datetime" column and convert to POSIXlt object
plotdata$datetime = paste(as.character(plotdata$Date),as.character(plotdata$Time))
plotdata$datetime=strptime(plotdata$datetime, "%d/%m/%Y %H:%M:%S")
# Subset data frame to only the relevant days to graph for this exercise
plotdataNEW <- subset(plotdata, datetime >= as.POSIXct('2007-02-01') & datetime < as.POSIXct('2007-02-03'))
# Convert relevant field to numeric
plotdataNEW$Global_active_power=as.numeric(as.character(plotdataNEW$Global_active_power))
plotdataNEW$Sub_metering_1=as.numeric(as.character(plotdataNEW$Sub_metering_1))
plotdataNEW$Sub_metering_2=as.numeric(as.character(plotdataNEW$Sub_metering_2))
plotdataNEW$Sub_metering_3=as.numeric(as.character(plotdataNEW$Sub_metering_3))
# Plots graph on screen device, then saves to appropriately labeled PNG file
png(width=480,height=480,file="plot3.png")
plot(plotdataNEW$datetime,plotdataNEW$Sub_metering_1,col="black",type="l",xlab="",ylab="Energy sub metering")
lines(plotdataNEW$datetime,plotdataNEW$Sub_metering_2,col="red",type="l",xlab="",ylab="Energy sub metering")
lines(plotdataNEW$datetime,plotdataNEW$Sub_metering_3,col="blue",type="l",xlab="",ylab="Energy sub metering")
legend("topright",lty=1,col = c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),cex=1.2,pt.cex = 1)
#dev.copy(png,file="plot3.png",width=480,height=480)
dev.off()
|
6ad1ac66fea4c7d76f1cafaf9b63a3998e288761 | 5327f42ac4be4ca7238806e4c2a61ed7ad863d6a | /npoint | 2928532d4eb3fe378ace3ef048692bfdfc8c6d18 | [
"MIT"
] | permissive | IBIC/neuropointillist | 9cf13776653a8912a44d41e2fb4f9dd9e6bae6d3 | d6b481f44fa3114c7096d3c986a25552a4a73c23 | refs/heads/master | 2021-03-27T12:31:09.794084 | 2020-09-14T16:00:10 | 2020-09-14T16:00:10 | 56,612,725 | 29 | 10 | MIT | 2020-09-08T16:57:37 | 2016-04-19T16:16:51 | R | UTF-8 | R | false | false | 17,021 | npoint | #!/usr/bin/env Rscript
#### Make sure packages are installed and load them
#rniftilib is not in this location
#if (!suppressWarnings(suppressPackageStartupMessages(require(Rniftilib)))) install.packages("Rniftilib", repos="http://R-Forge.R-project.org")
if (!suppressWarnings(suppressPackageStartupMessages(require(argparse)))) install.packages("argparse")
if (!suppressWarnings(suppressPackageStartupMessages(require(doParallel)))) install.packages("doParallel")
suppressWarnings(suppressPackageStartupMessages(library(RNifti)))
suppressWarnings(suppressPackageStartupMessages(library(argparse)))
suppressWarnings(suppressPackageStartupMessages(library(doParallel)))
suppressWarnings(suppressPackageStartupMessages(library(neuropointillist)))
#### Take in command line arguments
parser <- ArgumentParser(description="This program prepares your MRI data for group-level mixed effects modeling")
parser$add_argument("-m", "--mask", nargs=1, type="character", help="Mask limiting the voxels that will be analyzed", required=TRUE)
parser$add_argument("--set1", nargs=1, help="List of files at first occasion", required=TRUE)
parser$add_argument("--set2", nargs=1, help="List of files at second occasion")
parser$add_argument("--set3", nargs=1, help="List of files at third occasion")
parser$add_argument("--set4", nargs=1, help="List of files at fourth occasion")
parser$add_argument("--set5", nargs=1, help="List of files at fifth occasion")
parser$add_argument("--setlabels1", nargs=1, help="Covariates for files at first occasion", required=TRUE)
parser$add_argument("--setlabels2", nargs=1, help="Covariates for files at second occasion")
parser$add_argument("--setlabels3", nargs=1, help="Covariates for files at third occasion")
parser$add_argument("--setlabels4", nargs=1,help="Covariates for files at fourth occasion")
parser$add_argument("--setlabels5", nargs=1, help="Covariates for files at fifth occasion")
parser$add_argument("--model", nargs=1, help="R code that defines the voxelwise-model and any initialization", required=TRUE)
parser$add_argument("--covariates", nargs=1, type="character", help="Covariates that will be merged with the design matrix")
parser$add_argument("--output", nargs=1, type="character", help="Output prefix to prepend to output files", required=TRUE)
parser$add_argument("--debugfile", nargs=1, type="character", help="Save voxeldat and designmat objects to this file to develop, test and debug the processVoxel function")
parser$add_argument("-t", "--testvoxel", type="integer", help="Specify a voxel on which the model works to determine output files", default="-1")
parser$add_argument("-p", "--processors", type="integer", help="Run using shared memory with p processors")
parser$add_argument("--sgeN", type="integer", nargs=1, help="Run using SGE generating N jobs")
parser$add_argument("--slurmN", type="integer", nargs=1, help="Run using Slurm generating N jobs")
parser$add_argument("--pbsN", type="integer", nargs=1, help="Run using PBS generating N jobs")
parser$add_argument("--pbsPre", type="character", nargs=1, help="Name of PBS preamble file to use if you wish to override default settings")
parser$add_argument("--permute", type="integer", nargs=1, help="Enter permutation testing mode with N permutations. This will generate N jobs, ignoring the number of jobs passed to any scheduler parameters. Each permutation will create a single 3D output file.")
if (file.exists("readargs.R")) {
source("readargs.R")
if (exists("cmdargs")) { # check that cmdargs is defined
args <- parser$parse_args(cmdargs)
} else {
args <- parser$parse_args()
}
} else {
args <- parser$parse_args()
}
###############################################################################
#### Check for mask and read it. It is mandatory and must exist.
maskfile <- args$mask
tryCatch({
mask <- readNifti(maskfile);
}, error=function(e) {
cat("Could not read mask file: ", maskfile, "\n")
stop(e)
})
# save mask dimensions
mask.dims <- dim(mask)
# reduce to vector and obtain list of nonzero vertices and the x,y,x
mask.vector <- as.vector(mask)
mask.vertices <- which(mask.vector > 0)
# assemble the indices to create a reverse lookup table
mask.arrayindices <- data.frame(which(mask > 0, arr.in=TRUE))
mask.arrayindices$vertex <- mask.vertices
#### Save original arguments for writing out calling info
origargs <- args
#### Do argument checking
args <- npointCheckArguments(args)
#### Are we running in parallel?
if (!is.null(args$processors) || !is.null(args$sgeN) || !is.null(args$slurmN) || !is.null(args$pbsN)) {
runningParallel =TRUE
} else {
runningParallel= FALSE
}
#### Did someone specify some combination of sge, slurm and PBS? Exit if they did
multipleschedulerflags <- sum(!is.null(args$sgeN), !is.null(args$slurmN), !is.null(args$pbsN))
if (multipleschedulerflags > 1) {
message("You tried to create driver files for multiple schedulers. You can only specify one at a time.")
message("If you want Slurm, use only the --slurmN flag.")
message("If you want PBS, use only the --pbsN flag.")
message("If you want SGE, use only the --sgeN flag.")
stop("Please try again specifying only one scheduler flag.")
}
#### Check to see that PBS preamble is only specified with PBS option
if (!is.null(args$pbsPre)) {
if (is.null(args$pbsN)) {
stop("You have specified a PBS preamble without the PBS scheduler flag. You cannot use a PBS preamble with other schedulers.")
}
# make sure the preamble file exists
if (!file.exists(args$pbsPre)) {
stop("PBS preamble file ", args$pbsPre, " does not exist!")
}
}
#### If we are writing out files for a scheduler, identify the number of files
nSchedulerJobs <- -1 # for sanity
if (runningParallel) {
if (!is.null(args$sgeN)) {
nSchedulerJobs <- args$sgeN
} else if (!is.null(args$slurmN)) {
nSchedulerJobs <- args$slurmN
} else if (!is.null(args$pbsN)) {
nSchedulerJobs <- args$pbsN
}
}
#### If we are in permutation mode, reset the number of scheduler jobs
#### In this mode, we will not split the data at all. We will generate a makefile
#### that runs the processVoxel code across all the data to generate each permutation
permutationMode <- 0
if (!is.null(args$permute)) {
permutationMode <- 1
nSchedulerJobs <- args$permute
if (!is.null(args$processors)) {
stop("Cannot specify -p flag with permute mode. You must specify a scheduler with some number of jobs, which will be ignored, and one job will be created for each permuation.")
}
if (!runningParallel) {
stop("If running in permute mode, you must specify a scheduler with some number of jobs. This number of jobs will be ignored, and one job will be created for each permuation.")
}
}
#### A lookup function to convert FSL indices to a vertex number for testing
imagecoordtovertex <- function(x,y,z) {
# first add 1 to convert from index at zero to index at 1
nx <- x+1
ny <- y+1
nz <- z+1
row <- mask.arrayindices[which(mask.arrayindices$dim1==nx&
mask.arrayindices$dim2==ny&
mask.arrayindices$dim3==nz),]
if(is.data.frame(row) && nrow(row)==0) {
warning("This coordinate is not in the mask; returning 1")
return(1)
} else {
return(as.integer(row.names(row)))
}
}
###############################################################################
#### Calculate the number of data sets
numberdatasets <- sum(!is.null(args$set1),
!is.null(args$set2),
!is.null(args$set3),
!is.null(args$set4),
!is.null(args$set5))
###############################################################################
#### Read in all the data sets
cat("Reading", numberdatasets, "data sets.\n")
data <- npointReadDataSets(args,numberdatasets,mask.vertices);
voxeldat <- data$voxeldat
designmat <-data$designmat
rm(data)
gc()
###############################################################################
### Create the output directory if it does not exist
dir <- dirname(args$output)
if (!dir.exists(dir)) {
dir.create(dir, recursive=TRUE)
}
###############################################################################
#### Read in covariates if specified and merge with other covariates specified
#### on the command line
if (!is.null(args$covariates)) {
designmat <- npointMergeDesignmatWithCovariates(designmat,args$covariates,dim(voxeldat)[1])
}
###############################################################################
### If debugging file is specified, save out design matrix and voxel matrix
### to this file
if(!is.null(args$debugfile)) {
dir <- dirname(args$output)
# add the prefix to the debug file name
debugfilename <- paste(dir, args$debugfile,sep="/")
save(designmat,voxeldat, imagecoordtovertex, mask.arrayindices, file=debugfilename)
}
###############################################################################
#### read model code
if (!is.null(args$model)) {
modelfile <- args$model
if (!file.exists(modelfile)) {
stop("model file ", modelfile, " does not exist!")
}
result <- tryCatch({
source(modelfile)
}, error=function(e) {
cat("There were errors in the model file ", modelfile, "\n")
stop(e)
})
}
# check to see that processVoxel is defined
if(!exists('processVoxel')) {
stop("The model file did not define the processVoxel function")
}
###############################################################################
#### Do the parallel processing
nvertices <- length(mask.vertices)
if (permutationMode) { # we are using SGE or SLURM or PBS
# save the design matrix
designmatname <- paste(args$output, "designmat.rds", sep="")
makefilename <- paste(dirname(args$output), "/Makefile", sep="")
nextflowfilename <- paste(dirname(args$output), "/make.nf", sep="")
masterscript.local <- paste(dirname(args$output), "/runme.local", sep="")
saveRDS(designmat,designmatname)
attach(designmat) # attach to the designmat
# test one voxel to obtain names for return vals
if (args$testvoxel < 0) {
args$testvoxel <- trunc(dim(voxeldat)[2]/2)
}
permutationNumber <- 1 # set permutation number
tryCatch({ out <- processVoxel(args$testvoxel)},error=function(e) {
message("error testing the model on a random voxel to determine output filenames")
message("try providing the -t option to give a specific voxel for testing")
message(e)
stop("Exiting before generating makefile.")})
npointWriteMakefilePermute(basename(args$output), names(out), paste(getwd(), "/",args$model,sep=""), basename(designmatname), makefilename, masterscript.local, nSchedulerJobs)
npointWriteNextflowPermute(basename(args$output), names(out), paste(getwd(), "/",args$model,sep=""), basename(designmatname), nextflowfilename, nSchedulerJobs)
npointSplitDataSize(dim(voxeldat)[2],voxeldat,args$output,mask)
if (!is.null(args$sgeN)) {
masterscript.scheduler <- paste(dirname(args$output), "/runme.sge", sep="")
jobscript <- paste(dirname(args$output), "/sgejob.bash", sep="")
# npointWriteSGEsubmitscript(basename(args$output), names(out), paste(getwd(), "/",args$model,sep=""), basename(designmatname), masterscript.scheduler,jobscript, njobs)
} else if (!is.null(args$slurmN)) {
masterscript.scheduler <- paste(dirname(args$output), "/runme.slurm", sep="")
jobscript <- paste(dirname(args$output), "/slurmjob.bash", sep="")
# npointWriteSlurmsubmitscript(basename(args$output), names(out), paste(getwd(), "/",args$model,sep=""), basename(designmatname), masterscript.scheduler,jobscript, njobs)
} else if (!is.null(args$pbsN)) {
masterscript.scheduler <- paste(dirname(args$output), "/runme.pbs", sep="")
jobscript <- paste(dirname(args$output), "/pbsjob.bash", sep="")
# npointWritePBSsubmitscript(basename(args$output), names(out), paste(getwd(), "/",args$model,sep=""), basename(designmatname), masterscript.scheduler,jobscript, njobs, args$pbsPre)
} else {
# We better not get here
stop("Cannot identify which scheduler to output")
}
npointWriteCallingInfo(origargs)
} else {
if (runningParallel) {
if (!is.null(args$processors)) {
attach(designmat) # we attach to the designmat
cl <- makeCluster(args$processors, type="FORK")
cat("Exporting data to cluster.\n")
clusterExport(cl,varlist=c("voxeldat"))
cat("Starting parallel job using", args$processors, "cores.\n")
cat("Use top to make sure that no threads are using more than 100% of the CPU.\n")
system.time(results <-parSapply(cl,1:nvertices, processVoxel))
stopCluster(cl)
npointWriteOutputFiles(args$output,results,mask)
npointWriteCallingInfo(origargs)
} else { # we are using SGE or SLURM or PBS
# save the design matrix
designmatname <- paste(args$output, "designmat.rds", sep="")
makefilename <- paste(dirname(args$output), "/Makefile", sep="")
masterscript.local <- paste(dirname(args$output), "/runme.local", sep="")
saveRDS(designmat,designmatname)
attach(designmat) # attach to the designmat
# test one voxel to obtain names for return vals
if (args$testvoxel < 0) {
args$testvoxel <- trunc(dim(voxeldat)[2]/2)
}
tryCatch({ out <- processVoxel(args$testvoxel)},error=function(e) {
message("error testing the model on a random voxel to determine output filenames")
message("try providing the -t option to give a specific voxel for testing")
message(e)
stop("Exiting before generating makefile.")})
npointWriteMakefile(basename(args$output), names(out), paste(getwd(), "/",args$model,sep=""), basename(designmatname), makefilename, masterscript.local)
# do sanity check to make sure we obtained the number of jobs
if (nSchedulerJobs < 0) {
stop("Number of scheduler jobs incorrectly specified.")
}
# split up the data into chunks and write out scripts to process
if (nSchedulerJobs > nvertices) {
stop("Number of scheduler jobs requested is greater than the number of vertices")
} else {
cat("no. of vertices", nvertices, "\n")
#sgeN * size is now larger than the # of vertices
size <- ceiling(nvertices/nSchedulerJobs)
cat("size", trunc(nvertices/nSchedulerJobs), "\n")
njobs <- npointSplitDataSize(size,voxeldat,args$output,mask)
cat("no. of jobs", njobs ,"\n")
if (!is.null(args$sgeN)) {
masterscript.scheduler <- paste(dirname(args$output), "/runme.sge", sep="")
jobscript <- paste(dirname(args$output), "/sgejob.bash", sep="")
npointWriteSGEsubmitscript(basename(args$output), names(out), paste(getwd(), "/",args$model,sep=""), basename(designmatname), masterscript.scheduler,jobscript, njobs)
} else if (!is.null(args$slurmN)) {
masterscript.scheduler <- paste(dirname(args$output), "/runme.slurm", sep="")
jobscript <- paste(dirname(args$output), "/slurmjob.bash", sep="")
npointWriteSlurmsubmitscript(basename(args$output), names(out), paste(getwd(), "/",args$model,sep=""), basename(designmatname), masterscript.scheduler,jobscript, njobs)
} else if (!is.null(args$pbsN)) {
masterscript.scheduler <- paste(dirname(args$output), "/runme.pbs", sep="")
jobscript <- paste(dirname(args$output), "/pbsjob.bash", sep="")
npointWritePBSsubmitscript(basename(args$output), names(out), paste(getwd(), "/",args$model,sep=""), basename(designmatname), masterscript.scheduler,jobscript, njobs, args$pbsPre)
} else {
# We better not get here
stop("Cannot identify which scheduler to output")
}
npointWriteCallingInfo(origargs)
}
}
} else {
cat("Starting sequential job\n")
cat("You might want to check whether your model is multithreaded\n")
cat("because your code might run faster if you limit the number of threads\n")
system.time(results <-sapply(1:nvertices, processVoxel))
npointWriteOutputFiles(args$output,results,mask)
npointWriteCallingInfo(origargs)
}
}
| |
39ddd30a30bc19d8fb3d0b485b37c019aca0e945 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610054882-test.R | 88f806951e0c16f19aad582a5cbb13d57ed7acce | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 613 | r | 1610054882-test.R | testlist <- list(a = 0L, b = 0L, x = c(757935405L, 757935405L, 757935405L, 757935405L, 757935405L, 757935405L, 757935615L, -1L, -1L, -53971L, 757935405L, 757935405L, 757935405L, 757935405L, 757935405L, 757935405L, 757935405L, 757935405L, 757935405L, 757935405L, 757935405L, 757935405L, 754974720L, 0L, 2960685L, 757935405L, 762981677L, 757935405L, 757935405L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
1daccbe806e009c8d7676ab5df26a217a6812751 | 1e28d717a28d32d2dd5fe6793de85857ae775200 | /WQD170068_Assignment2.R | 56d69b9dd6d0c82669ae8975545bcc6b43027405 | [] | no_license | quantFactory/rBackup | feb468d9a5291c142932deab06d6da4f5dd76962 | b046d300e86277dc4f2e0307a87fd37f78a88091 | refs/heads/master | 2020-03-31T19:56:58.140742 | 2018-10-10T07:52:04 | 2018-10-10T07:52:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,103 | r | WQD170068_Assignment2.R | #################################### WQD7004: ASSIGNMENT 2 #######################################
# Name : NG KANG WEI
# Student ID : WQD170068
path <- file.path("E:\\academic\\mds-sem2\\WQD7004-Rprogramming\\2008.csv")
myDF <- read.csv(path)
library(dplyr)
# 1) Sort in decreasing order the ten most popular airports according to the number of origins of flights
# # (hint : use decreasing=T and indexing )
flightOrigin <- table(myDF$Origin)
sort(flightOrigin, decreasing=TRUE)[1:10]
# dplyr
popularOrg <- myDF %>% group_by(Origin) %>% count(sort=TRUE)
popularOrg[1:10,]
# 2) Assign the names of the ten most popular airports according to the number of origins of flights to
## variable called mostPopularOrg
# # (hint : use names() and indexing)
mostPopularOrg <- names(sort(flightOrigin, decreasing=TRUE))[1:10]
mostPopularOrg
# dplyr
namesOfPopOrg <- popularOrg[1:10,] %>% select(Origin)
namesOfPopOrg
# 3) Assign the names of the ten most popular airports according to the number of destinations of flights to
## variable called mostPopularDes
flightDestination <- table(myDF$Dest)
mostPopularDes <- names(sort(flightDestination, decreasing=TRUE))[1:10]
mostPopularDes
# dplyr
mostPopularDest <- myDF %>% group_by(Dest) %>% count(sort=T)
mostPopularDest <- mostPopularDest[1:10,] %>% select(Dest)
mostPopularDest
# 4) How many flights had their origin in one of these 10 most popular airports
## (hint : use %in%)
sum(myDF$Origin %in% mostPopularDes)
# dplyr
myDF %>% filter(Origin %in% pull(namesOfPopOrg)) %>% nrow
# 5)How many flights had their destinationn in one of these 10 most popular airports
## (hint : use %in%)
sum(myDF$Dest %in% mostPopularDes)
# dplyr
myDF %>% filter(Dest %in% pull(mostPopularDest)) %>% nrow
# 6) Find flights for which the origin and the destination
# were among the 10 most popular airports
## (hint : use %in%)
sum(myDF$Origin %in% mostPopularOrg & myDF$Dest %in% mostPopularDes)
# dplyr
myDF %>% filter(Origin %in% pull(namesOfPopOrg), Dest %in% pull(mostPopularDest)) %>% nrow
# 7) For the purposes of this question, treat the group
# of the 200 least popular airports according to the
# number of flights having these as the origins.
# How many flights had one of these 200 least popular
# airports as their origin?
leastPopularOrg <- sort(flightOrigin)[1:200]
sum(leastPopularOrg)
# dplyr
leastPopularAirport <- myDF %>% group_by(Origin) %>% count(sort=T) %>% tail(n=200) %>% select(Origin)
leastPopularAirport
myDF %>% filter(Origin %in% pull(leastPopularAirport)) %>% nrow
# 8) Index a vector according to the names of the elements in the vector
##8a) How many flights departed from "IND" ?
flightOrigin['IND']
# dplyr
myDF %>% filter(Origin=='IND') %>% nrow
##8b) How many flights departed from "IND","ORD","JFK","EWR","IAD" ?
flightOrigin[c('IND', 'ORD', 'JFK', 'EWR', 'IAD')]
# dplyr
myDF %>% filter(Origin %in% c('IND', 'ORD', 'JFK', 'EWR', 'IAD')) %>% group_by(Origin) %>% tally()
##8c) How many flights departed from each of the 10 most popular airports ?
flightOrigin[mostPopularOrg]
# dplyr
myDF %>% filter(Origin %in% mostPopularOrg) %>% group_by(Origin) %>% tally
##8d) How many flights departed from each of the 200 least popular airports ?
flightOrigin[names(leastPopularOrg)]
# dplyr
flightsFromLeastPop <- myDF %>% filter(Origin %in% pull(leastPopularAirport)) %>% group_by(Origin) %>% tally(sort=T)
glimpse(flightsFromLeastPop)
# 8e) How many flights landed at Ronald Reagan Washington
## National ("DCA") or Washington Dulles Airport ("IAD") in 2008?
flightDestination[c('DCA', 'IAD')]
# dplyr
myDF %>% filter(Dest %in% c('DCA', 'IAD')) %>% group_by(Dest) %>% tally
# 9)Check the first 20 flights and see which one departed on time or early
first20 <- head(myDF, 20)
first20DepOnTimeEarly <- subset(first20, first20$DepDelay <= 0); first20DepOnTimeEarly
nrow(first20DepOnTimeEarly)
# dplyr
myDF %>% slice(1:20) %>% filter(DepDelay <= 0) %>% nrow
##9a) Restrict attention to only the 10 most popular airports
##and see which one departed on time or early
mostPopDepOnTimeEarly <- tapply(myDF$DepDelay <= 0, myDF$Origin, sum, na.rm=TRUE)[mostPopularOrg]
mostPopDepOnTimeEarly
# dplyr
depEarly <- myDF %>% filter(Origin %in% pull(namesOfPopOrg), DepDelay <= 0) %>% group_by(Origin) %>% tally()
##9b)Find the percentage of flights at each of the 10 most popular
# airports that departed on time or early
mostPopDepOnTimeEarly / flightOrigin[mostPopularOrg] * 100
# dplyr
flightFromMostPop <- myDF %>% filter(Origin %in% pull(namesOfPopOrg)) %>% group_by(Origin) %>% tally
flightFromMostPop %>% mutate(EarlyFlightPct = depEarly$n / n * 100) %>% select(Origin, EarlyFlightPct)
# 9c) What percentage of flights departed from IND on time or early?
sum(myDF$Origin=='IND' & myDF$DepDelay <= 0, na.rm=TRUE) / flightOrigin['IND'] * 100
# dplyr
earlyFlightInd <- myDF %>% filter(Origin=='IND', DepDelay<=0) %>% tally
allFlightInd <- myDF %>% filter(Origin=='IND') %>% tally
earlyFlightInd / allFlightInd * 100
#10) Analyze Flights by Origin Airport and Month of Departure
##10a) Break the data in the DepDelay vector according to which city of origin
depOrg <- tapply(myDF$DepDelay, myDF$Origin, length)
head(depOrg)
# dplyr
myDF %>% select(DepDelay, Origin) %>% group_by(Origin) %>% tally
##10b) Break the data in the DepDelay vector according to month
depMonth <- tapply(myDF$DepDelay, myDF$Month, length)
head(depMonth)
# dplyr
myDF %>% select(DepDelay, Month) %>% group_by(Month) %>% tally
#11) How many flights delay occur from each airport in each month ?
tapply(myDF$DepDelay > 0, list(myDF$Origin, myDF$Month), sum, na.rm=T)
# dplyr
myDF %>% select(DepDelay, Month, Origin) %>% filter(DepDelay > 0) %>% group_by(Origin, Month) %>% tally
##11a) Extract the data from origin airport = "IND"
# and from the month of June
tapply(myDF$DepDelay > 0, list(myDF$Origin, myDF$Month), sum, na.rm=T)['IND', 6]
# dplyr
myDF %>% filter(DepDelay > 0, Origin=='IND', Month==6) %>% tally
##11b) Extract the data from origin airport = "ATL"
# and from the month of March
tapply(myDF$DepDelay > 0, list(myDF$Origin, myDF$Month), sum, na.rm=T)['ATL',3]
# dplyr
myDF %>% filter(DepDelay > 0, Origin=='ATL', Month==3) %>% tally
# 11c) The number of flights delay from 3 airports = "ATL","AUS","BDL"
# during the months of July through October
flightDelay3airport <- tapply(myDF$DepDelay > 0, list(myDF$Origin, myDF$Month), sum, na.rm=T)[c('ATL', 'AUS', 'BDL'), 7:10]
flightDelay3airport
# dplyr
delayedFlight3Airport <- myDF %>% filter(DepDelay > 0, Origin %in% c('ATL', 'AUS', 'BDL'), Month %in% c(7:10)) %>% group_by(Month, Origin) %>% tally
delayedFlight3Airport
# 11d) How many delayed departure flights altogether from ATL, AUS, and BDL during the months of
#July 2008 through October 2008?
sum(flightDelay3airport)
colSums(flightDelay3airport)
rowSums(flightDelay3airport)
# dplyr
sum(delayedFlight3Airport$n)
# 11e) All the flight delays, month by month, frm IND airport
tapply(myDF$DepDelay > 0, list(myDF$Origin, myDF$Month), sum, na.rm=T)['IND', ]
# dplyr
myDF %>% filter(Origin=='IND', DepDelay > 0) %>% group_by(Month) %>% tally
# 11f) All the flight delays, month by month, frm both IND and ORD at once
flightDelayIndOrd <- tapply(myDF$DepDelay > 0, list(myDF$Origin, myDF$Month), sum, na.rm=T)[c('IND', 'ORD'),]
flightDelayIndOrd
# dplyr
delayedFlightIndOrd <- myDF %>% filter(Origin %in% c('IND', 'ORD'), DepDelay >0) %>% group_by(Month, Origin) %>% tally
delayedFlightIndOrd
# 12) Calculating Percentages of Flights with delayed more than 30 minutes when departing
moreThan30min <- subset(myDF, myDF$DepDelay > 30)
delayedFlight <- tapply(moreThan30min$DepDelay, list(moreThan30min$Origin, moreThan30min$Month), length)[c('IND', 'ORD'),]
pct <- delayedFlight / flightDelayIndOrd * 100
pct
# dplyr
lateDelayed <- myDF %>% filter(DepDelay > 30, Origin %in% c('IND', 'ORD')) %>% group_by(Origin, Month) %>% tally
lateDelayed <- as.data.frame(lateDelayed)
allDelayed <- myDF %>% filter(DepDelay > 0, Origin %in% c('IND', 'ORD')) %>% group_by(Origin, Month) %>% tally
allDelayed <- as.data.frame(allDelayed)
allDelayed <- allDelayed %>% mutate(LatePercentages = lateDelayed$n / n * 100) %>% select(Origin, Month, LatePercentages)
# 12a) find the percentage of flights with long delays and plot with dotchart()
dotchart(pct)
# dplyr
dotchart(allDelayed$LatePercentages, allDelayed$Origin, allDelayed$Month,
main = "Percentages of Late Flights in IND and ORD month by month in 2008",
xlab="Percentages of late flights", ylab="Airport and Months")
# 12b) How many flights departed altogether from IND
# or ORD in 2008 with a delay of more than 30 minutes each?
sum(delayedFlight)
# dplyr
myDF %>% filter(DepDelay > 30, Origin %in% c('IND', 'ORD')) %>% nrow
#12c) In which month of 2008 was the percentage of long delays
#(i.e., flights with more than 30 minute delays) the highest?
delayPerMonth <- tapply(myDF$DepDelay > 30, myDF$Month, sum, na.rm=T)
delayPerMonth
allFlightPerMonth <- tapply(myDF$Month, myDF$Month, length)
allFlightPerMonth
delayPctPerMonth <- delayPerMonth / allFlightPerMonth * 100
delayPctPerMonth
names(which.max(delayPctPerMonth))
# dplyr
longDelay <- myDF %>% filter(DepDelay > 30) %>% group_by(Month) %>% tally
longDelay <- as.data.frame(longDelay); longDelay
allFlight <- myDF %>% group_by(Month) %>% tally
allFlight <- as.data.frame(allFlight); allFlight
longDelay <- longDelay %>% mutate(LatePercentages = n / allFlight$n * 100)
which.max(longDelay$LatePercentages)
# 13) Analyzing Flights by Time of Day for Departure
# Break the day into 4 parts:
# early morning (1) correspond to the times to 6 am
# late morning (2) correspond to the times to 6 am to 12 noon
# early evening (3) correspond to the times to 12 noon to 6 pm
# late evening (4) correspond to the times to 6 pm to 12 midnight
v<-ceiling(myDF$DepTime/600)
# dplyr
# build a vector called parts of the day
partsofday <- rep(NA, times=dim(myDF)[1])
partsofday
partsofday[v==1]<-"early morning"
partsofday[v==2]<-"late morning"
partsofday[v==3]<-"early evening"
partsofday[v==4]<-"late evening"
table(partsofday)
# dplyr
# and we can create a new column in the myDF data frame called "timeofday"
# and we can store this information we just found into this column
myDF$timeofday <- partsofday
dim(myDF)
# dplyr
myDF <- myDF %>% mutate(timeofday = partsofday)
dim(myDF)
# just check to make sure that the first 6 flights were done properly
head(myDF$timeofday)
head(myDF$DepTime)
# dplyr
myDF %>% select(timeofday) %>% slice(1:6)
myDF %>% select(DepTime) %>% slice(1:6)
# 13a) How many flights departed from IND early in the morning?
sum(myDF$Origin=='IND' & myDF$timeofday=='early morning', na.rm = TRUE)
# dplyr
myDF %>% filter(Origin=='IND', timeofday=='early morning') %>% nrow
# 13b) Tabulate how many flights occur, by splitting the flights according to
# both the city of origin and also the time of the day when the flight departed
tapply(myDF$DepDelay, list(myDF$Origin, myDF$timeofday), length)
# dplyr
myDF %>% group_by(Origin, timeofday) %>% tally |
89b960add534c96048782f029b70300038997694 | 98d801be299daf76f6ccf6f4dde104e0027c9633 | /man/rbeta.Rd | 2df22177782b77914f86dbf35dc2e81d2f13d8fe | [] | no_license | cran/rBeta2009 | 90e59d3df708b6cc5e737f412f8723f0dbc8a0a2 | 2756392a871703b0e963c911794f4536559b34d4 | refs/heads/master | 2020-12-30T09:38:05.759015 | 2012-02-25T00:00:00 | 2012-02-25T00:00:00 | 17,698,930 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,800 | rd | rbeta.Rd | \name{rbeta}
\alias{rbeta}
\title{
The Beta Random Number Generating Function
}
\description{
Random generation for the beta distribution with parameters \code{shape1} and
\code{shape2}.
}
\usage{
rbeta(n, shape1, shape2)
}
\arguments{
\item{n}{Number of beta random numbers to generate. If \code{length(n) > 1}, the length
is taken to be the number required.}
\item{shape1, shape2}{Positive shape parameters.}
}
\details{
The beta distribution with parameters \code{shape1}\eqn{ = a} and \code{shape2}\eqn{ = b}
has density
\deqn{\frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} x^{a-1} (1-x)^{b-1}}{%
\Gamma(a+b)/(\Gamma(a)\Gamma(b)) x^(a-1)(1-x)^(b-1)}
for \eqn{a > 0, b > 0} and \eqn{0 \le x \le 1}.
The mean is \eqn{\frac{a}{a+b}}{a/(a+b)} and the variance is
\eqn{\frac{ab}{(a+b)^2 (a+b+1)}}{ab/((a+b)^2 (a+b+1))}.
\code{rbeta} basically utilizes the following guideline primarily proposed by Hung
\emph{et al.} (2009) for generating beta random numbers.
\itemize{
\item{When \eqn{max(}\code{shape1}\eqn{, }\code{shape2}\eqn{) < 1}, }{the B00 algorithm
(Sakasegawa, 1983) is used;}
\item{When \code{shape1}\eqn{ < 1 < }\code{shape2} or \code{shape1}\eqn{ > 1 > }\code{shape2}, }
{the B01 algorithm (Sakasegawa, 1983) is used;}
\item{When \eqn{min(}\code{shape1}\eqn{, }\code{shape1}\eqn{) > 1}, }{the B4PE algorithm
(Schmeiser and Babu, 1980) is used if one papameter is close to 1 and the other is large
(say \eqn{> 4}); otherwise, the BPRS algorithm (Zechner and Stadlober, 1993) is used.}
}
}
\value{
\code{rbeta} generates beta random numbers.
}
\source{
\code{rbeta} uses a C translation of
Y. C. Hung and N. Balakrishnan and Y. T. Lin (2009),
Evaluation of beta generation algorithms,
\emph{Communications in Statistics - Simulation and Computation},
\bold{38}:750--770.
}
\references{
Y. C. Hung and N. Balakrishnan and Y. T. Lin (2009),
Evaluation of beta generation algorithms,
\emph{Communications in Statistics - Simulation and Computation}, \bold{38}, 750--770.
H. Sakasegawa (1983),
Stratified rejection and squeeze method for generating beta random numbers,
\emph{Annals of the Institute Statistical Mathematics}, \bold{35}, 291--302.
B.W. Schmeiser and A.J.G. Babu (1980),
Beta variate generation via exponential majorizing functions,
\emph{Operations Research}, \bold{28}, 917--926.
H. Zechner and E. Stadlober (1993),
Generating beta variates via patchwork rejection,
\emph{Computing}, \bold{50}, 1--18.
}
\author{
Ching-Wei Cheng <aks43725@gmail.com>,\cr
Ying-Chao Hung <hungy@nccu.edu.tw>,\cr
Narayanaswamy Balakrishnan <bala@univmail.cis.mcmaster.ca>
}
\seealso{
\code{\link[stats]{rbeta}} in package \pkg{stats}.
}
\examples{
library(rBeta2009)
rbeta(10, 0.7, 1.5)
}
\keyword{beta}
|
d70ec5e5ff04eb07c598ea8bc78aac336c73d81d | da51412f8bbc686f070cf9c468aa51bb302e36dc | /COAD_NormalCancer_Project/2.Molecular_dataset/2.Gene_set_analysis/4.7.COAD.MSigDB_c6_analysis.R | 7df5bf2a311f80088e010641fead0f8027805a48 | [] | no_license | haojiang9999/HCA_script | f7c7451e951abb2da83ada71688dd7833276dc4c | 46eaaecb016559982818f9e2c7c7f5c689b27622 | refs/heads/master | 2020-12-21T13:14:58.616545 | 2020-04-01T01:58:28 | 2020-04-01T01:58:28 | 236,439,415 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,477 | r | 4.7.COAD.MSigDB_c6_analysis.R | #### 4.7.COAD.MSigDB_c6_analysis.R
dt.c6 <- decideTests(fit.c6.2, p.value = adjPvalueCutoff )
#number = Inf
DEgeneSets.blue.c6.all <- topTable(fit.c6.2, coef="blue - (brown + turquoise)/2", number=Inf,adjust="BH")
DEgeneSets.brown.c6.all <- topTable(fit.c6.2, coef="brown - (blue + turquoise)/2", number=Inf,adjust="BH")
DEgeneSets.turquoise.c6.all <- topTable(fit.c6.2, coef="turquoise - (brown + blue)/2", number=Inf,adjust="BH")
DEgeneSets.BlvBr.c6.all <- topTable(fit.c6.2, coef="blue - brown",number=Inf,adjust="BH")
DEgeneSets.BlvTu.c6.all <- topTable(fit.c6.2, coef="blue - turquoise", number=Inf,adjust="BH")
DEgeneSets.BrvTu.c6.all <- topTable(fit.c6.2, coef="brown - turquoise", number=Inf,adjust="BH")
dim(DEgeneSets.blue.c6.all)
dim(DEgeneSets.brown.c6.all)
dim(DEgeneSets.turquoise.c6.all)
dim(DEgeneSets.BlvBr.c6.all)
head(DEgeneSets.blue.c6.all)
#cbind(rownames(DEgeneSets.blue.c6.all),rownames(DEgeneSets.brown.c6.all),rownames(DEgeneSets.turquoise.c6.all))
## Order by rownames
Index.c6 <- rownames(DEgeneSets.blue.c6.all)
## Ordered q value table log10transformed q value
c6.log10.q.blue <- -log10(DEgeneSets.blue.c6.all[Index.c6,]$adj.P.Val) * sign(DEgeneSets.blue.c6.all[Index.c6,]$logFC)
c6.log10.q.brown <- -log10(DEgeneSets.brown.c6.all[Index.c6,]$adj.P.Val) * sign(DEgeneSets.brown.c6.all[Index.c6,]$logFC)
c6.log10.q.turquoise <- -log10(DEgeneSets.turquoise.c6.all[Index.c6,]$adj.P.Val) * sign(DEgeneSets.turquoise.c6.all[Index.c6,]$logFC)
c6.log10.q.BlvBr <- -log10(DEgeneSets.BlvBr.c6.all[Index.c6,]$adj.P.Val) * sign(DEgeneSets.BlvBr.c6.all[Index.c6,]$logFC)
c6.log10.q.BlvTu <- -log10(DEgeneSets.BlvTu.c6.all[Index.c6,]$adj.P.Val) * sign(DEgeneSets.BlvTu.c6.all[Index.c6,]$logFC)
c6.log10.q.BrvTu <- -log10(DEgeneSets.BrvTu.c6.all[Index.c6,]$adj.P.Val) * sign(DEgeneSets.BrvTu.c6.all[Index.c6,]$logFC)
c6.log10.q.all <- cbind(c6.log10.q.blue,c6.log10.q.brown,c6.log10.q.turquoise,
c6.log10.q.BlvBr,c6.log10.q.BlvTu,c6.log10.q.BrvTu)
rownames(c6.log10.q.all) <- Index.c6
head(c6.log10.q.all)
saveRDS(c6.log10.q.all, file = "COAD.tb.c6.log10.q.all.rds")
### Heatmap plot example
library(reshape2)
library(ggplot2)
c6.log10.q.all.m <- melt(c6.log10.q.all[1:10,])
ggplot(c6.log10.q.all.m, aes(Var2, Var1)) +
geom_tile(aes(fill = value),colour = "black") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
scale_fill_gradient2(low = "mediumblue", high = "red2", mid = "white",midpoint = 0) + coord_fixed()
|
54bffeab6c2e137ebb1eb9f21bbb561689f006ca | 939e6dc1722fae8b7f617ae699e78e74e0b1a016 | /man/check_RMAPI_loaded.Rd | 138cd701b8e979abb37a939e997083d34d55dfd6 | [
"MIT"
] | permissive | mrc-ide/RMAPI | 972e1cd2b32e446ec8449f39119aefe27df0742e | 515b10c7f486e3700a51dbdd5bea5ecb4ea3a939 | refs/heads/master | 2021-05-12T02:24:37.831396 | 2020-02-05T16:17:59 | 2020-02-05T16:17:59 | 117,585,480 | 3 | 0 | null | 2019-11-02T16:46:48 | 2018-01-15T19:17:09 | R | UTF-8 | R | false | true | 356 | rd | check_RMAPI_loaded.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{check_RMAPI_loaded}
\alias{check_RMAPI_loaded}
\title{Check that RMAPI package has loaded successfully}
\usage{
check_RMAPI_loaded()
}
\description{
Simple function to check that RMAPI package has loaded
successfully. Prints "RMAPI loaded successfully!" if so.
}
|
22f1610ab9589b3020c6a971337d11974874c7cd | 9a7eca6bc81a8a5ac032645c626ad469832ba3e9 | /R/p_detect.R | 8451b5ec51b5c85e019b0b5a12e35894af8eaf1c | [] | no_license | fishsciences/artemis | c4608bbe38689c62053b4e9770d61cccc140bdd4 | 6c7b0a447f99e51e4d5bf709b2ea01b559c1f617 | refs/heads/main | 2023-03-15T23:35:39.867064 | 2023-03-07T21:53:09 | 2023-03-07T21:53:09 | 193,982,468 | 5 | 0 | null | 2022-04-26T22:28:04 | 2019-06-26T21:52:14 | R | UTF-8 | R | false | false | 7,901 | r | p_detect.R | # scripts to work with the probability of detection either to work
# backwards to find the variable level which corresponds to a certain
# p(detect) for a single sample or the joint p(detect) for a bunch of
# samples
##' Estimate the probability of detection
##'
##' This function estimates the probability of getting a positive
##' detection for an eDNA survey given a set of predictors. This can
##' be useful when trying to take the estimates from a preliminary
##' study and use those estimates to inform the deployment of future
##' sampling schemes. The function assumes that you have either an
##' idea of the effects of the various predictors, for example from a
##' previous study, or a fit model with estimates of the effect sizes.
##'
##' This function takes one circumstance at a time, and calculates the
##' range of outcomes given a number of repeated sampling
##' attempts. The probability calculated is the probability of getting
##' at least one positive detection. For details on the underlying
##' model and assumptions for this calculation, please refer to the
##' package vignette.
##'
##' @section Notes on random effects:
##'
##' This function deals with random effects in two different
##' ways. First, when we desire to see the probability of detection
##' for a specific instance of a random effect, users can specify the
##' random effect as just another effect by specifying the random
##' effect = 1 in the variable list, and then the size of the random
##' effect. However, when users wish to estimate the probability of
##' detection in cases where random effects are generated from a
##' distribution of random effects, this can be accomplished by adding
##' the standard deviation of the random effect to the
##' \code{Cq_sd}. This takes advantage of the fact that random effects
##' are just another source of variation, and that sum of random
##' normal distributions is itself a random normal distribution.
##'
##' @title Estimate the probability of detection
##' @param variable_levels numeric vector, with each element
##' corresponding to the condition to estimate the probability of
##' detection.
##' @param betas numeric vector, the effect sizes for each of the
##' variable level
##' @param ln_eDNA_sd the measurement error on ln[eDNA]. If a
##' model_fit is provided and this is missing, the estimated
##' sd(ln_eDNA) from the model will be used.
##' @param std_curve_alpha the alpha for the std. curve formula for
##' conversion between log(concentration) and CQ
##' @param std_curve_beta the alpha for the std. curve formula for
##' conversion between log(concentration) and CQ
##' @param n_rep the number of replicate measurements at the levels
##' specified
##' @param prob_zero the probability of seeing a non-detection,
##' i.e. zero, from a zero-inflated process. Defaults to 8%, which
##' is the rate of inflated zeros in a large sampling experiment.
##' @param model_fit optional, a model fit from \code{eDNA_lm} or
##' \code{eDNA_lmer}. If this is provided, an estimate derived
##' from the posterior estimates of beta is calculated.
##' @param upper_Cq the upper limit on detection. Converted to the
##' lower_bound of detection internally
##' @return object of class "eDNA_p_detect" with the estimates of the
##' probability of detection for the variable levels provided.
##' @author Matt Espe
##'
##' @examples
##'
##' est_p_detect(variable_levels = c(Intercept = 1, Distance = 100, Volume = 20),
##' betas = c(Intercept = -10.5, Distance = -0.05, Volume = 0.001),
##' ln_eDNA_sd = 1, std_curve_alpha = 21.2, std_curve_beta = -1.5,
##' n_rep = 1:12)
##'
##' @export
est_p_detect = function(variable_levels,
betas,
ln_eDNA_sd,
std_curve_alpha, std_curve_beta,
n_rep = 1:12,
prob_zero = 0.08,
model_fit = NULL,
upper_Cq = 40)
{
if(!is.null(dim(variable_levels)))
stop("Sorry, only one set of variable levels at a time currently supported")
if((is.null(model_fit) && length(variable_levels) != length(betas)) ||
(!is.null(model_fit) && length(variable_levels) != ncol(model_fit@betas)) )
stop("Variable levels and betas cannot be of different lengths")
if(missing(betas) && is.null(model_fit))
stop("Must provide either a set of beta values or a model_fit object")
if(!missing(betas)) {
if(!is.null(model_fit)) dup_arg_warn("beta")
ln_conc_hat = variable_levels %*% betas
} else {
# model_fit provided
if(!missing(ln_eDNA_sd)){
dup_arg_warn("ln_eDNA_sd")
} else {
ln_eDNA_sd = model_fit@sigma_ln_eDNA
}
if(missing(std_curve_alpha) && missing(std_curve_beta)){
std_curve_alpha = unique(model_fit@std_curve_alpha)
std_curve_beta = unique(model_fit@std_curve_beta)
}
if(length(std_curve_alpha) > 1 || length(std_curve_beta) > 1)
stop("Model was fit with multiple curves - please provide a single set of standard curve parameters")
inter = if(length(model_fit@intercept)) as.vector(model_fit@intercept) else 0
ln_conc_hat = apply(model_fit@betas, 1, function(y) variable_levels %*% y) + inter
}
lower_bound = (upper_Cq - std_curve_alpha) / std_curve_beta
Cq_hat = ln_conc_hat * std_curve_beta + std_curve_alpha
ans = prob_detect_ln(ln_conc_hat, ln_eDNA_sd, n_rep, prob_zero, lower_bound)
structure(ans,
variable_levels = variable_levels,
reps = n_rep,
class = c("eDNA_p_detect", class(ans)))
}
prob_detect_ln = function(ln_conc_hat, ln_sd, n_rep, p_zero, lwb)
{
# This is a mixture of non-detections from the value with
# measurement error landing below the threshold of detection and
# the probability of a zero from e.g. filter failure
p_nondetect = (pnorm(lwb, ln_conc_hat, ln_sd) *
(1 - p_zero)) + p_zero
sapply(n_rep, function(i) 1 - (p_nondetect ^ i))
}
prob_detect = function(Cq_hat, Cq_sd, n_rep, p_zero, upper_Cq = 40)
{
## 1 - pnorm() is the prob of seeing value over upper_Cq
## p_zero is also the prob of seeing over upper_Cq
## 1 - (pnorm() * (1 - p_zero))
p_nondetect = 1 - (pnorm(upper_Cq, Cq_hat, Cq_sd) * (1 - p_zero))
sapply(n_rep, function(i) 1 - (p_nondetect ^ i))
}
dup_arg_warn = function(arg)
{
warning(sprintf("Both %s and model_fit provided. Using %s provided.", arg, arg))
}
if(FALSE){
## not ready yet
var_level_given_detect = function(p, model_fit,
varying = character(), fixed_levels,
upper_Cq = model_fit@upper_Cq,
Cq_sd = model_fit@Cq_sd,
std_curve_alpha = model_fit@std_curve_alpha,
std_curve_beta = model_fit@std_curve_beta)
# replicated the analytic steps taken in Expt5Design.R
{
ln_conc_thresh = (upper_Cq - std_curve_alpha) / std_curve_beta
if(is.null(names(fixed_levels)))
stop("Please provide a named vector of fixed levels")
fixed_order = match(names(fixed_levels), colnames(model_fit@x))
if(any(is.na(fixed_order)))
stop("Unable to match names of 'fixed_levels' to parameters")
var = match(varying, colnames(model_fit@x))
if(any(is.na(var)))
stop("Could not match 'varying' with a parameter. Please check the spelling.")
# Fix this
var_at_threshold = (ln_conc_thresh -
(fixed_levels[fixed_order] %*%
model_fit@x[,names(fixed_levels[fixed_order])]) /
model_fix@betas)
}
}
|
ff8e6ab850944a090555a449bf5af49b4adb720c | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Letombe/Abduction/aim-100-1_6-yes1-1-90/aim-100-1_6-yes1-1-90.R | e8c39ffccd7c41dafe329344b1823b099fe00324 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 70 | r | aim-100-1_6-yes1-1-90.R | 5ed3b63ab14bfb3e78c56df188616baf aim-100-1_6-yes1-1-90.qdimacs 376 664 |
a1d028cc8cf6cca8f114e4ef4d0e30cc0536eb8b | 6eb5c77ea5a336864a30397a5b0dbea45ce7b785 | /final_project.R | 4868203b197ea6de8b9e3ee41eb230d72471a61a | [] | no_license | ampatil1996/Data-Wrangling-Project | b6be85e080582bf3011fc20fbc9bd37cc1f700b1 | 6637746b5bde2d4c7539f530d99721d9e5f7cc6c | refs/heads/master | 2022-06-13T14:31:44.591606 | 2020-05-05T03:11:35 | 2020-05-05T03:11:35 | 255,664,002 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,716 | r | final_project.R | install.packages("hrbrthemes")
library(tidyverse)
library(magrittr)
library(ggplot2)
library(stringr)
library(lubridate)
library(hrbrthemes)
library(ggplot2)
data <- read.csv("C:/Users/Amit/Desktop/MS in Data Science/Sem4/Data Wrangling/Project/NJ Data/Combined_data.csv")
df1 <- data
dim(df1)
#sample of data
head(df1,30)
#changing column names
colnames(df1) <- c("trip_duration","start_time","stop_time","start_stn_id","start_stn_name","start_stn_lat",
"start_stn_long","end_stn_id","end_stn_name","end_stn_lat","end_stn_lang","bike_id",
"user_type","birth_year","gender")
df1 <- df1 %>% select("trip_duration","start_time","start_stn_id","start_stn_name","start_stn_lat",
"start_stn_long","end_stn_id","end_stn_name","end_stn_lat","end_stn_lang",
"bike_id","user_type","birth_year","gender")
df1$start_stn_name = as.character(df1$start_stn_name)
df1$end_stn_name = as.character(df1$end_stn_name)
##Since start time is in format 2019-01-01 03:09:09.7110 we will seperate start_time into start_date and start_time
df1 <- df1 %>% separate(start_time,c("start_date","start_time"),sep = " ")
#1) user type vs count graph
df1 %>% group_by(user_type) %>% summarise(n = n())
df1 %>% group_by(user_type) %>% summarise(n = n()) %>% ggplot(aes(x=user_type, y=n)) +
geom_bar(stat = "identity", fill="steelblue") +
geom_text(aes(label=n), vjust=1.6, color="white", size=3.5) +
ggtitle("User type vs Count") + xlab("User Type") + ylab("Count") + theme_minimal()
plt <- df1 %>% ggplot(aes(x=user_type,fill=user_type)) + geom_bar(alpha=.7)+theme(legend.position="none")+ggtitle(expression(atop("UserType Distribution",atop("Customer = 24 hour/7 Day pass | Subscriber = Annual Pass")))) + scale_fill_brewer(palette="Set2")+annotate("text",x="Subscriber",y=350000,label="90% are Subscribers")+
ylim(0,400000)
library(scales)
plt + scale_y_continuous(labels = comma)
class(df1$start_date)
df1$start_date <- ymd(df1$start_date)
#df1$start_date <- as.Date(df1$start_date,"%Y-%m-%d")
?as.Date
#############2) Age-Wise distribution of data
Age_df <- df1
typeof(Age_df$birth_year)
Age_df <- Age_df %>% mutate(Age = 2020 - Age_df$birth_year)
Age_plot <- Age_df %>% ggplot(aes(x = Age)) + geom_histogram(binwidth = 0.8,color = "black",fill = "steelblue") +
+ ggtitle("Distribution of Trip Times") + xlab("Trip Duration(minutes)") +
ylab("Number of Trips")
Age_plot <- ggplot(Age_df, aes(x=Age)) +
geom_histogram(color = "black",fill = "#ADD8E6")+
scale_x_continuous(breaks=seq(0,30,1)) + ggtitle("Distribution of Trip Times") +
xlab("Age") + ylab("Number of Trips") + theme_minimal()
ggplot(Age_df, aes(x=Age)) +
geom_histogram(aes(y=..density..), colour="black", fill="white")+
geom_density(alpha=.2, fill="#FF6666")+ scale_x_continuous(breaks = seq(0, 120, by = 20))
Age_plot
############
######### 3) Age-group and Gender wise trip percentage wise Pie chart
Age_df_cat <- Age_df %>% mutate(Age_group =
case_when(
(Age >= 17 & Age < 25) ~ 1,
(Age >= 25 & Age < 35) ~ 2,
(Age >= 35 & Age < 45) ~ 3,
(Age >= 45 & Age < 55) ~ 4,
TRUE ~ 5
))
class(Age_df_cat$Age_group)
Age_df_cat$Age_group <- as.factor(Age_df_cat$Age_group)
#total_count <- nrow(Age_df_cat)
#group_count <- Age_df_cat %>% group_by(Age_group) %>% summarise(n = n())
#group_count <- group_count %>% mutate(Percentage = 100*n/total_count)
total_count <- Age_df_cat %>% filter(gender == 1 | gender == 2) %>% nrow()
group_data <-Age_df_cat %>% filter(gender == 1 | gender == 2) %>% group_by(gender,Age_group) %>% summarise(n = n())
group_data <- group_data %>% mutate(percent = n/total_count*100)
group_data$gender[group_data$gender == 1] <- "Male"
group_data$gender[group_data$gender == 2] <- "Female"
Age_df_cat$gender <- as.factor(Age_df_cat$gender)
basic <- ggplot(group_data, aes(fill=gender, y=percent, x=Age_group)) +
geom_bar(position="stack", stat="identity") + xlab("Age group") + ylab("Percentage")
basic + scale_x_discrete(labels=c("1" = "17-24", "2" = "25-34", "3" = "35-44", "4" = "45-54", "5" = "55 and up"))
########
##########4) Distribution of bikes over the year
new_data <- df1
head(new_data,5)
class(new_data$Month)
new_data$Month <- format(new_data$start_date, "%Y-%m")
temp_new_data <- new_data %>% group_by(Month) %>% summarise(n= n())
temp_new_data$Month <- as.Date(paste(temp_new_data$Month,"-01",sep=""))
temp_new_data$Month <- as.Date(temp_new_data$Month,"%Y-%m-%d")
temp_new_data %>% ggplot(aes(x = Month,y = n)) +
geom_line() + geom_point()+ ggtitle("Overall usage") +
xlab("Time") + ylab("Total Trips") + geom_smooth()
plot2 <- df1 %>% group_by(start_date) %>% summarise(total_trips = n()) %>% ggplot(aes(x = start_date,y = total_trips)) +
geom_smooth(color = "#8968CD") + ggtitle("Total trips by month usage") +
xlab("Time") + ylab("Total Trips")
plot2
#########
######5)Top 5 stations with with most starts
top_starts <- df1 %>% group_by(start_stn_name) %>% summarise(total = n()) %>% top_n(5,total)
top_starts <- top_starts[order(-top_starts$total),]
plot1 <- ggplot(data = top_starts,aes(x = start_stn_name,y=total)) +
geom_bar(stat = "identity",fill="#8470FF") +
geom_text(aes(label=total), vjust=1.6, color="white", size=3.5)+
theme(axis.text.x = element_text(angle = 45, hjust = 1))+
ggtitle("Top 5 stations with with most starts") + xlab("Station names") + ylab("Total Trips")+
theme(plot.title = element_text(color="#696969", size=12, face="bold.italic"),
axis.title.x = element_text(color="#1C1C1C", size=12, face="bold"),
axis.title.y = element_text(color="#1C1C1C", size=12, face="bold"),
axis.text.x = element_text(size=10),
axis.text.y = element_text(size=9))
plot1 + scale_x_discrete(limits=c('Grove St PATH','Hamilton Park','Sip Ave','Harborside','Newport PATH'))
#######
#6) Most Popular trips
popular_trips <- df1 %>% group_by(start_stn_name,end_stn_name) %>% summarise(total = n())
popular_trips <- popular_trips[order(-popular_trips$total),]
popular_trips$trips <- paste(popular_trips$start_stn_name,popular_trips$end_stn_name,sep = " to ")
plot4 <- popular_trips[1:10,] %>% ggplot(aes(x = trips, y = total)) + geom_bar(stat = "identity") +
ggtitle("Most Popular Trips") + xlab("Trips") + ylab("Total") +
theme(plot.title = element_text(color="steelblue", size=14, face="bold.italic"),
axis.title.x = element_text(color="#993333", size=14, face="bold"),
axis.title.y = element_text(color="#993333", size=14, face="bold"),
axis.text.x = element_text(size=10),
axis.text.y = element_text(face="bold", size=9))+
coord_flip()
plot4 + scale_x_discrete(limits=c("McGinley Square to Sip Ave","Dixon Mills to Grove St PATH","Monmouth and 6th to Grove St PATH",
"Brunswick St to Grove St PATH","Grove St PATH to Marin Light Rail",
"Jersey & 6th St to Grove St PATH","Marin Light Rail to Grove St PATH",
"Brunswick & 6th to Grove St PATH","Grove St PATH to Hamilton Park",
"Hamilton Park to Grove St PATH"))
##########
## 7) Average Trips by Day of Week
df1$day_of_week <- weekdays(df1$start_date)
temp <- df1 %>% group_by(day_of_week) %>% summarise(count = n())
avg_day_of_week <- df1 %>% group_by(start_date,day_of_week) %>%
summarise(n = n()) %>% group_by(day_of_week) %>% summarise(days_count = n())
avg_day_of_week_final <- inner_join(temp,avg_day_of_week) %>% mutate(avg_count = count/days_count)
avg_day_of_week_final$day_of_week <- factor(avg_day_of_week_final$day_of_week, levels=c("Monday", "Tuesday", "Wednesday", "Thursday",
"Friday", "Saturday", "Sunday"))
avg_day_of_week_final[order(avg_day_of_week_final$day_of_week),]
#avg_day_of_week_final$day_of_week <- as.character(avg_day_of_week_final$day_of_week)
avg_day_of_week_final %>% ggplot(aes(x = day_of_week,y = avg_count,group = 1)) +
geom_line(color = "#6495ED",size=0.72) +
ylim(600,1500) +
ggtitle("Average Trips by Day of Week") + xlab("Day of week") + ylab("Avg. trips")
##########
######## 8) Average Trips by time of a day
head(df1,30)
#we will retrieve hour element from time
df2 <- df1 %>% separate(start_time,c("start_hr"), sep = ":")
df2$start_hr <- as.numeric(df2$start_hr)
typeof(df2$day_of_week)
weekdays <- df2%>% filter(day_of_week == "Monday" | day_of_week == "Tuesday" | day_of_week == "Wednesday" | day_of_week == "Thursday" | day_of_week == "Friday")
weekdays <- weekdays %>% group_by(start_hr) %>% summarise(hourly_count = n())
weekdays <- weekdays %>% mutate(avg_count = hourly_count/261)
weekdays$var <- "work days"
weekends <- df2%>% filter(day_of_week == "Saturday" | day_of_week == "Sunday")
weekends <- weekends %>% group_by(start_hr) %>% summarise(hourly_count = n())
weekends <- weekends %>% mutate(avg_count = hourly_count/104)
weekends$var <- "weekends"
combined <- rbind(weekdays,weekends)
plot3 <- combined %>% ggplot(aes(x = start_hr,y = avg_count,col = var)) +
geom_line(size =0.72) + scale_x_continuous(breaks=seq(0,23,2),labels=c("00:00", "02:00","04:00","06:00",
"08:00","10:00","12:00","14:00","16:00","18:00","20:00","22:00")) +
ggtitle("Average Trips by time of day 2019")+
xlab("Time") + ylab("Average Trips")+
scale_color_brewer(palette="Dark2") + theme(legend.position="right")
plot3
##########
######## 9) Distribution of tripduration
trip_time_min <- df1 %>% select(trip_duration) %>% mutate(trip_in_mins = trip_duration/60)
(trip_time_min[trip_time_min$trip_in_mins < 40,]) %>% ggplot(aes(x = trip_in_mins)) + geom_histogram(binwidth = 0.8,color = "black",fill = "steelblue") +
scale_x_continuous(breaks=seq(0,30,2)) + ggtitle("Distribution of Trip Times") + xlab("Trip Duration(minutes)") +
ylab("Number of Trips")
nrow(trip_time_min[trip_time_min$trip_in_mins < 40,])
##only 10000 trips and more than 60 mins so we are not considering it for plotting the graph
404947 - 394600
######
######### 10) Effect of weather on temprature
weather <- final_combined_weather_data
weather <- weather %>% select(data.weather.date,data.weather.avgtempF)
weather$data.weather.date <- ymd(weather$data.weather.date)
df1_with_weather <- inner_join(df1,weather, by= c("start_date"="data.weather.date"))
df1_with_weather$data.weather.avgtempF <- as.numeric(df1_with_weather$data.weather.avgtempF)
df1_with_weather %>% ggplot(aes(x = data.weather.avgtempF)) +
geom_density(fill="#69b3a2", color="#e9ecef", alpha=0.8) +
ggtitle("Effect of weather on bike trips") + xlab("Avg. Temprature") +
theme_ipsum()
######
########Another effect of weather on temprature
df3 <- df1_with_weather %>% group_by(start_date) %>% summarise(n = n())
df3 <- inner_join(df3,weather,by= c("start_date"="data.weather.date"))
#############
###########
cor(df3$n,as.numeric(df3$data.weather.avgtempF))
##########
##### Precipitation vs date
weather_data <- weather_data_created_by_function
weather_data_select <- weather_data %>% select(data.weather.date,data.weather.maxtempF,data.weather.avgtempF,data.weather.totalSnow_cm)
i = 1
while(i <= 365)
{
li[[i]] <- weather_data['data.weather.hourly'][[1]][[i]][['precipInches']]
i <- i + 1
}
weather_data_select['precip_in_inches'] = li
colnames(weather_data_select) <- c('date','maxtemp','avgtemp','snow_in_cm','precipinches')
weather_data_select$date <- as.Date(weather_data_select$date)
df1 <-inner_join(df1,weather_data_select, by= c("start_date"="date"))
df11 <- df1
df1$avgtemp <- as.numeric(df1$avgtemp)
df1$maxtemp <- as.numeric(df1$maxtemp)
df1$snow_in_cm <- as.numeric(df1$snow_in_cm)
df1$precipinches <- as.numeric(df1$precipinches)
df1 %>% group_by(precipinches) %>% summarise(n = n()) %>% ggplot(aes(x = precipinches,y = n)) +
xlim(0,1) + geom_smooth()
df11 <- df1
df11
arr <- weather_data['data.weather.hourly'][[1]][[1]][['precipInches']]
while(i <= 365)
{
arr[[i]] <- weather_data['data.weather.hourly'][[1]][[i]][['precipInches']]
i <- i + 1
}
arr
li
temp_df <- df11 %>% group_by(start_date) %>% summarise(n = n()) %>% select(start_date)
temp_df["precipitation"] <- li
merged <- temp_df %>% inner_join(df11)
merged['precipitation'] = as.numeric(merged$precipitation)
merged %>% group_by(precipitation) %>% summarise(n = n()) %>% ggplot(aes(x = precipitation,y = n)) +
xlim(0,1) + geom_smooth()
df11 %>% group_by(precipinches) %>% summarise(n = n()) %>% ggplot(aes(x = precipinches,y = n)) +
xlim(0,1) + geom_smooth()
##############
df1
############
trips_by_precipitation = df1 %>%
mutate(precip_bucket = cut(precipinches, c(0, 0.001, 0.1, 0.2, 0.4, 0.6, 0.8, 1, 2,4), right = FALSE)) %>%
group_by(precip_bucket) %>%
summarize(
avg_precip = mean(precipinches),
count = n())
trips_by_precipitation %>% ggplot(aes(x = avg_precip,y = count)) +
geom_line(color = "steelblue",size=0.72) + ggtitle("Precipitation vs no. of trips") +xlab("Precipitation in Inches") +
ylab("Total trips") + theme_bw()
trips_by_precipitation
############
##################
weather_data <- weather_data_created_by_function
arr <- weather_data['data.weather.hourly'][[1]][[1]][['precipInches']]
i = 2
while(i <= 365)
{
arr[[i]] <- weather_data['data.weather.hourly'][[1]][[i]][['precipInches']]
i <- i + 1
}
#arr
temp <- df1 %>% group_by(start_date) %>% summarise(n = n())
#temp
temp["precipitation"] <- arr
#temp
merged <- inner_join(df1,temp)
#merged
merged['precipitation'] = as.numeric(merged$precipitation)
merged %>% group_by(precipitation) %>% summarise(n = n()) %>% ggplot(aes(x = precipitation,y = n)) +
xlim(0,1) + geom_smooth()
#################
################
trips_by_temperature = df1 %>%
filter(precipinches == 0 & snow_in_cm == 0) %>%
mutate(temperature_bucket = floor(avgtemp / 10) * 5) %>%
group_by(temperature_bucket) %>%
summarize(avg_max_temperature = mean(avgtemp),
count = n())
min(df1$avgtemp)
trips_by_temperature %>% ggplot(aes(x = avg_max_temperature,y = count)) + geom_line(color = "steelblue",size = 0.72) +
ggtitle("Avg Temp vs Total trips") + xlab("temperature") + ylab('Total trips')+
theme_bw()
###############
#################
trips_by_snow_depth = df1 %>%
mutate(snow_bucket = cut(snow_in_cm, c(0, 0.001, 3, 6, 9, 12, 60), right = FALSE)) %>%
group_by(snow_in_cm) %>%
summarize(avg_snow_depth = mean(snow_in_cm),
count = n())
df1
################# |
724f35213843435ffe32e6c609997a4ef05cf50a | 2a82c473bccb19ba59bbdd0696f35043b9cfcfa2 | /man/quick_facet.Rd | d52bb3927cc83906b80f189d4dc36e9f85de100a | [
"MIT"
] | permissive | wkostelecki/ezplot | daab0c404af67977f4bc6daa02798c6737dd3dc0 | 3422e7cd110f960d63da1fd9fc3203423fbe2b3d | refs/heads/master | 2023-06-23T06:56:16.773219 | 2023-06-17T05:23:51 | 2023-06-17T05:23:51 | 39,913,514 | 6 | 0 | NOASSERTION | 2020-11-15T14:05:06 | 2015-07-29T20:01:32 | R | UTF-8 | R | false | true | 459 | rd | quick_facet.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{quick_facet}
\alias{quick_facet}
\title{Quick facet}
\usage{
quick_facet(g, ncol = NULL, ...)
}
\arguments{
\item{g}{A ggplot object.}
\item{ncol}{Number of facet columns.}
\item{...}{Arguments to pass to \code{facet_grid} or \code{facet_wrap}.}
}
\description{
Applies faceting to ggplot objects when g[["data"]] has a
\code{facet_x} or \code{facet_y} column.
}
|
80dba9de0b003cdf847d31fac7aec328e6a934a9 | f1df80ec987a517546a34c8589691206e079fc8b | /R/lags.r | 9374598fda6d29a681ec09566ef382f06b3e8674 | [] | no_license | skranz/sktools | 3c38b49d990a2f6e18bb92b614f8b47a79a7fc42 | 8e629e09f0b72b1471b4a4eb89f3ada0a43e4aaf | refs/heads/master | 2021-07-11T23:03:31.263729 | 2021-04-06T05:34:30 | 2021-04-06T05:34:30 | 9,919,328 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 892 | r | lags.r | examples.add_lags = function() {
T = 5; N = 4
df = data_frame(i = rep(1:N, times=T),t=rep(1:T,each=N),x=runif(T*N),y=runif(T*N))
add_lags(df,var="x",lags=1:3, leads=1:2, groupvar="i",timevar="t")
add_lags(df,var=c("x","y"),lags=1:3,leads=1, groupvar="i", timevar="t")
}
add_lags = function(data,var,lags=1,leads=NULL, groupvar=NULL, timevar=NULL, is_sorted=is.null(timevar)) {
library(dplyrExtras)
if (!is.null(groupvar))
data = s_group_by(data, groupvar)
if (!is_sorted)
data = s_arrange(data, timevar)
code = NULL
if (length(lags)>0) {
gr = expand.grid(var,lags)
code = c(code,paste0(gr[,1],"_lag",gr[,2]," = lag(",gr[,1],",",gr[,2],")", collapse=", "))
}
if (length(leads)>0) {
gr = expand.grid(var,leads)
code = c(code,paste0(gr[,1],"_lead",gr[,2]," = lead(",gr[,1],",",gr[,2],")", collapse=", "))
}
ungroup(s_mutate(data,code))
}
|
64daf921e795124b2438ebb0a49fb6ea9996bb1a | 2d8ba76402f8b76391a76eb6fb92401e12d03cd3 | /Chr3_GSE103091_Cibersort.R | 67dca94ce3796e5f96cc9b6de9223d4776dae6ee | [] | no_license | HopeStar2018/Macrophages-Related-Gene-Signature-to-Predict-Overall-Survival-in-Patients-with-TNBC | 79d88ca58bb805739ce6291ea972eec724919709 | 19c21ba4fa7738cd2cb2d3e5303b9f41acdd0b73 | refs/heads/main | 2023-04-14T21:06:04.951928 | 2021-04-27T05:15:51 | 2021-04-27T05:15:51 | 361,979,635 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,441 | r | Chr3_GSE103091_Cibersort.R | rm(list = ls())
setwd('H:\\PHD_database\\Paper_writing\\A_Paper_XVIII_BreastCancer_LXN_ver2\\Step7_OtherGSE\\Chr3_GSE103091_Cibersort')
load("H:/PHD_database/Paper_writing/A_Paper_XVIII_BreastCancer_LXN_ver2/Step7_OtherGSE/Chr1_GSE103091_reanno/GSE103091_exprdf_uniq.RData")
library(parallel)
library(doParallel)
source("CIBERSORT.R")
out=rbind(ID=colnames(GSE103091_exprdf_uniq),GSE103091_exprdf_uniq)
write.table(out,file="uniq.symbol.txt",sep="\t",quote=F,col.names=F)
no_cores <- detectCores() - 1
cl <- makeCluster(no_cores)
registerDoParallel(cl)
results=CIBERSORT("ref.txt", "uniq.symbol.txt", perm=1000, QN=TRUE)
stopCluster(cl)
#### Method II ####
library(affy)
library(annotate)
library(hgu133plus2hsentrezgcdf)
library(org.Hs.eg.db)
setwd('G:\\GEO_database\\TNBC\\GSE103091_RAW')
Data<-ReadAffy(cdfname = "hgu133plus2hsentrezgcdf")
eset<-rma(Data)
# eset<-mas5(Data)
ID<-featureNames(eset)
ID2<-sub("_at","",ID)
GS <- as.matrix(getSYMBOL(ID2, 'org.Hs.eg'))
ematrix<-exprs(eset)
rows <- GS
cols = c("GeneSymbol",colnames(ematrix))
ematrix <- cbind(rows,ematrix)
ematrix <- ematrix[which(ematrix[,1] != "NA"),] #remove NAs
ematrix <- ematrix[order(ematrix[,1]),] #sort by gene name
ematrix <- rbind(cols, ematrix)
write.table(ematrix,file="NormalizedExpressionArray.customCDF.txt",sep="\t", col.names=F, row.names=F,quote=FALSE)
|
43f43348acadb7e5e60af302f6cb6d7f7f4b1243 | cadbe42d3a9397c38d81ddc2be6c807034c03d36 | /module5_DimensionalityReduction.R | a3539b458602090265518a12cef3d58fea5e80b1 | [] | no_license | airsafe/edx-principles-of-machine-learning | c55cbaa5fc53f8548f6e3a4f5ac2d96c5b0d84e3 | 732439053528edc5fb79843213b428ffbcd69f6d | refs/heads/master | 2020-06-12T23:50:34.624132 | 2019-07-11T16:31:03 | 2019-07-11T16:31:03 | 194,465,415 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,662 | r | module5_DimensionalityReduction.R | # Processing start time
sink('dim_reduction_output.txt')
timeStart = Sys.time()
# ADMINISTRATIVE NOTES
# Note: To describe database at any point, use str(*name*)
# Note: To clear R workspace, use rm(list = ls())
# Note: Searching R help files - RSiteSearch("character string")
# Note: To clear console, use CTRL + L
# INSTALLING SPECIAL PACKAGES
# if("<package_name>" %in% rownames(installed.packages()) == FALSE)
# {install.packages("<package_name>")}# Added due to error
# library(<package_name>)
# ====================
# Dimensionality reduction with principal components
# Principal component analysis, or PCA, is an alternative to regularization and straight-forward feature elimination.
# PCA is particularly useful for problems with very large numbers of features compared to the number of training cases.
# For example, when faced with a problem with many thousands of features and perhaps a few thousand cases, PCA can be a good choice to reduce the dimensionality of the feature space.
#
# PCA is one of a family of transformation methods that reduce dimensionality.
# PCA is the focus here, since it is the most widely used of these methods.
#
# The basic idea of PCA is rather simple: Find a linear transformation of the feature space which projects the majority of the variance onto a few orthogonal dimensions in the transformed space.
# The PCA transformation maps the data values to a new coordinate system defined by the principal components.
# Assuming the highest variance directions, or components, are the most informative, low variance components can be eliminated from the space with little loss of information.
#
# The projection along which the greatest variance occurs is called the first principal component.
# The next projection, orthogonal to the first, with the greatest variance is called the second principal component.
# Subsequent components are all mutually orthogonal with decreasing variance along the projected direction.
#
# Widely used PCA algorithms compute the components sequentially, starting with the first principal component.
# This means that it is computationally efficient to compute the first several components from a very large number of features.
# Thus, PCA can make problems with very large numbers of features computationally tractable.
#
# Note: It may help your understanding to realize that principal components are a scaled version of the eigenvectors of the feature matrix.
# The scale for each dimensions is given by the eigenvalues.
# The eigenvalues are the fraction of the variance explained by the components.
#
# A simple example
# To cement the concepts of PCA you will now work through a simple example.
# This example is restricted to 2-d data so that the results are easy to visualize.
#
# As a first step, execute the code in cell below to load the packages required for the rest of this notebook.
#
# Note: If you are running in Azure Notebooks, make sure that you run the code in the setup.ipynb notebook at the start of you session to ensure your environment is correctly configured.
# Dimensionality reduction with principal components
# Principal component analysis, or PCA, is an alternative to regularization and straight-forward feature elimination.
# PCA is particularly useful for problems with very large numbers of features compared to the number of training cases.
# For example, when faced with a problem with many thousands of features and perhaps a few thousand cases, PCA can be a good choice to reduce the dimensionality of the feature space.
#
# PCA is one of a family of transformation methods that reduce dimensionality.
# PCA is the focus here, since it is the most widely used of these methods.
#
# The basic idea of PCA is rather simple: Find a linear transformation of the feature space which projects the majority of the variance onto a few orthogonal dimensions in the transformed space.
# The PCA transformation maps the data values to a new coordinate system defined by the principal components.
# Assuming the highest variance directions, or components, are the most informative, low variance components can be eliminated from the space with little loss of information.
#
# The projection along which the greatest variance occurs is called the first principal component.
# The next projection, orthogonal to the first, with the greatest variance is called the second principal component.
# Subsequent components are all mutually orthogonal with decreasing variance along the projected direction.
#
# Widely used PCA algorithms compute the components sequentially, starting with the first principal component.
# This means that it is computationally efficient to compute the first several components from a very large number of features.
# Thus, PCA can make problems with very large numbers of features computationally tractable.
#
# Note: It may help your understanding to realize that principal components are a scaled version of the eigenvectors of the feature matrix.
# The scale for each dimensions is given by the eigenvalues.
# The eigenvalues are the fraction of the variance explained by the components.
#
# A simple example
# To cement the concepts of PCA you will now work through a simple example.
# This example is restricted to 2-d data so that the results are easy to visualize.
#
# As a first step, execute the code in cell below to load the packages required for the rest of this notebook.
#
# Note: If you are running in Azure Notebooks, make sure that you run the code in the setup.ipynb notebook at the start of you session to ensure your environment is correctly configured.
## Import packages
if("ggplot2" %in% rownames(installed.packages()) == FALSE)
{install.packages("ggplot2")}
library(ggplot2)
if("repr" %in% rownames(installed.packages()) == FALSE)
{install.packages("repr")}
library(repr)
if("dplyr" %in% rownames(installed.packages()) == FALSE)
{install.packages("dplyr")}
library(dplyr)
if("caret" %in% rownames(installed.packages()) == FALSE)
{install.packages("caret")}
library(caret)
if("MASS" %in% rownames(installed.packages()) == FALSE)
{install.packages("MASS")}
library(MASS)
if("ROCR" %in% rownames(installed.packages()) == FALSE)
{install.packages("ROCR")}
library(ROCR)
if("pROC" %in% rownames(installed.packages()) == FALSE)
{install.packages("pROC")}
library(pROC)
# options(repr.plot.width=4, repr.plot.height=4) # Set the initial plot area dimensions
# The code in the cell below simulates data from a bivariate Normal distribution.
# The distribution is deliberately centered on {0,0} and with unit variance on each dimension. There is considerable covariance between the two dimensions leading to a covariance matrix:
# cov(X) = [1.0 0.6]
# [0.6 1.0]
#
# Given the covariance matrix 100 draws from this distribution are computed using the mvrnorm function from the R MASS package.
# Execute this code:
set.seed(124)
cov = matrix(c(1.0, 0.6, 0.6, 1.0), nrow =2, ncol = 2)
mean = c(0.0, 0.0)
sample = data.frame(mvrnorm(n = 100, mu = mean, Sigma = cov))
names(sample) = c('x','y')
print(dim(sample))
head(sample)
# To get a feel for this data, execute the code in the cell below to display a plot and examine the result.
ggplot(sample, aes(x,y)) + geom_point()
# You can see that the data have a roughly elliptical pattern.
# The correlation between the two dimensions is also visible.
#
# With the simulated data set created, it is time to compute the PCA model.
# The code in the cell below computes the principle component model using the R prcomp function.
# This function contains a list with multiple elements including the eigenvalues.
# The eigenvalues can be scaled to compute the variance explained:
#
# VE(X) = Var_X-component(X)/Var_X-total(X)
#
# Notice that by construction:
#
# ππΈ(π)=βπ=1πππΈπ(π)=1.0
#
# In other words, the sum of the variance explained for each component must add to the total variance or 1.0 for standardized data.
#
# Execute this code and examine the result.
pca_mod = prcomp(sample)
pca_mod
# Notice that the standard deviation of the first component is several times larger than for the second component. This is exactly the desired result indicating the first principal component explains the majority of the variance of the sample data. Mathematically the components are the eigenvectors and the standard deviations are the eigenvalues of the data covariance matrix.
#
# The code in the cell below computes and prints the scaled magnitude of the components. These scaled components must add to 1.0. Execute this code:
sdev_scaled = pca_mod$sdev**2/sum(pca_mod$sdev**2)
sdev_scaled
# The components are scaled by element wise multiplication with percent variance explained. Execute this code and examine the results.
scaled_pca = data.frame(matrix(c(0,0,0,0), nrow = 2, ncol = 2))
for(i in 1:2){
scaled_pca[i,] = pca_mod$rotation[i,] * sdev_scaled
}
names(scaled_pca) = c('PC1','PC2')
str(scaled_pca)
# The two component vectors have their origins at [0,0}, and are quite different magnitude, and are pointing in different directions.
# To better understand how the projections of the components relate to the data, execute the code to plot the data along with the principal components.
# Execute this code:
## Find the slopes
s1 = data.frame(x = c(0.0, scaled_pca$PC1[1]), y = c(0.0, scaled_pca$PC1[2]))
s2 = data.frame(x = c(0.0, scaled_pca$PC2[1]), y = c(0.0, scaled_pca$PC2[2]))
## Plot the data with the PCs
ggplot(sample, aes(x,y)) + geom_point() +
geom_line(data = s1, aes(x,y), color = 'red', size = 1) +
geom_line(data = s2, aes(x,y), color = 'red', size = 1)
# Notice the the first principal component (the long red line) is along the direction of greatest variance of the data. This is as expected.
# The short red line is along the direction of the second principal component.
# The lengths of these lines are the variance in the directions of the projection.
#
# The ultimate goal of PCA is to transform data to a coordinate system with the highest variance directions along the axes.
# The transform function in the cell below computes the projections of the data onto the new coordinate frames using matrix multiplication.
# Execute this code to apply the transform and plot the result:
pca_transform = function(df, pca, ncomps){
data.frame(as.matrix(df) %*% as.matrix(pca)[,1:ncomps])
}
trans_sample = pca_transform(sample, scaled_pca, 2)
names(trans_sample) = c('x', 'y')
ggplot(trans_sample, aes(x,y)) + geom_point()
# Notice that the scale along these two coordinates are quite different.
# The first principal component is along the horizontal axis. The range of values on this direction is in the range of about {β2.5,2.5}.
# The range of values on the vertical axis or second principal component are only about {β0.2,0.3}.
# It is clear that most of the variance is along the direction of the fist principal component.
#
# Load Features and Labels
# Keeping the foregoing simple example in mind, it is time to apply PCA to some real data.
#
# The code in the cell below loads the dataset which has the following preprocessing:
#
# 1. Cleaning missing values.
# 2. Aggregating categories of certain categorical variables.
#
# Execute the code in the cell below to load the dataset:
credit = read.csv('German_Credit_Preped.csv', header = TRUE)
credit[,'Customer_ID'] = NULL
credit$bad_credit <- ifelse(credit$bad_credit == 1, 'bad', 'good')
credit$bad_credit <- factor(credit$bad_credit, levels = c('bad', 'good'))
dim(credit)
str(credit)
#
# There are 20 features in this data set.
#
# The prcomp function can only work with numeric matrices.
# Therefore, the categorical features are dummy variable encoded.
# Executed the code in the cell below to compute the encoding for the dummy variables.
dummies = dummyVars(bad_credit ~ ., data = credit)
# The code in the cell below to split the data set into test and training subsets and dummy variable encode the categorical features.
# The Caret createDataPartion function is used to randomly split the dataset.
# The perdict method dummy variable encodes the categorical features.
# Execute this code and examine the result.
set.seed(1955)
## Randomly sample cases to create independent training and test data
partition = createDataPartition(credit[,'bad_credit'], times = 1, p = 0.7, list = FALSE)
training = credit[partition,] # Create the training feature sample
training_label = credit[partition, 'bad_credit'] # Subset training labels
training = predict(dummies, newdata = training) # transform categorical to dummy vars
dim(training)
test = credit[-partition,] # Create the test sample
test_label = credit[-partition, 'bad_credit'] # Subset training labels
test = predict(dummies, newdata = test) # transform categorical to dummy vars
dim(test)
head(training)
# Before preforming PCA all features must be zero mean and unit variance.
# Failure to do so will result in biased computation of the components and scales.
# The preProcess function from Caret is used to compute scaling of the training data.
# The same scaling is applied to the test data. Execute the code in the cell below to scale the features.
num_cols = c('loan_duration_mo', 'loan_amount', 'payment_pcnt_income', 'age_yrs')
preProcValues <- preProcess(training[,num_cols], method = c("center", "scale"))
training[,num_cols] = predict(preProcValues, training[,num_cols])
test[,num_cols] = predict(preProcValues, test[,num_cols])
head(training[,num_cols])
# Compute principal components
# The code in the cell below computes the principal components for the training feature subset. Execute this code:
pca_credit = prcomp(training)
# Execute the code in the cell below to print the variance explained for each component and the sum of the variance explained:
# var_exp = pca_credit$sdev**2/sum(pca_credit$sdev**2)
# var_exp
# sum(var_exp)
# These numbers are a bit abstract.
# However, you can see that the variance ratios are in descending order and that the sum is 1.0.
#
# Execute the code in the cell below to create a plot of the explained variance vs. the component:
plot_scree = function(pca_mod){
## Plot as variance explained
df = data.frame(x = 1:length(var_exp), y = var_exp)
ggplot(df, aes(x,y)) + geom_line(size = 1, color = 'blue') +
xlab('Component number') + ylab('Variance explained') +
ggtitle('Scree plot of variance explained vs. \n Principal Component')
}
plot_scree(pca_credit)
# This curve is often referred to as a scree plot.
# Notice that the explained variance decreases rapidly until the 10th component and then slowly, thereafter.
# The first few components explain a large fraction of the variance and therefore contain much of the explanatory information in the data.
# The components with small explained variance are unlikely to contain much explanatory information.
# Often the inflection point or 'knee' in the scree curve is used to choose the number of components selected.
#
# Now it is time to create a PCA model with a reduced number of components.
# The code in the cell below trains and fits a PCA model with 10 components, and then transforms the features using that model.
# Execute this code.
## Compute first 10 PCA components
pca_credit_10 = prcomp(training, rank = 10)
## Scale the eigenvalues
var_exp_10 = pca_credit_10$sdev**2/sum(pca_credit_10$sdev**2)
Nrow = nrow(pca_credit_10$rotation)
Ncol = ncol(pca_credit_10$rotation)
scaled_pca_10 = data.frame(matrix(rep(0, Nrow * Ncol), nrow = Nrow, ncol = Ncol))
## Scale the rotations
for(i in 1:Nrow){
scaled_pca_10[i,] = pca_credit_10$rotation[i,] * var_exp_10[1:Ncol]
}
## Print the dimensions of the scalled rotations and the first component
dim(scaled_pca_10)
pca_credit_10$rotation[1:10,1]
# The scaled rotation matrix has dimensions of 61 rows and 10 columns.
# You can see the first 10 elements of the first rotation.
# Multiplying these numbers by the features rotate each row (case) to the new coordinate system.
#
# Compute and evaluate a logistic regression model
# Next, you will compute and evaluate a logistic regression model using the features transformed by the first 10 principal components.
# The code in the cell below performs the matrix multiplication between the features and on the
training_10 = training %*% as.matrix(scaled_pca_10)
dim(training_10)
# There are now 10 transformed features.
#
# Now you will now construct and evaluate a logistic regression model by executing the code below:
## Construct a data frame with the transformed features and label
training_10 = data.frame(training_10)
training_10[,'bad_credit'] = training_label
## Create a weight vector for the training cases.
weights = ifelse(training_10$bad_credit == 'bad', 0.66, 0.34)
## Define and fit the logistic regression model
set.seed(5566)
logistic_mod_10 = glm(bad_credit ~ ., data = training_10,
weights = weights, family = quasibinomial)
logistic_mod_10$coefficients
# Notice that there are now 10 regression coefficients, one for each component plus an intercept.
# This number is in contrast to the 61 features in the dummy variable array.
#
# In order to test the model, the test feature array must also be transformed.
# Execute the code in the cell below to apply the PCA transformation to the test features:
test_10 = test %*% as.matrix(scaled_pca_10)
test_10 = data.frame(test_10)
test_10[,'bad_credit'] = test_label
dim(test_10)
# Execute the code in the cell below to score the model using the test data:
score_model = function(df, threshold){
df$score = ifelse(df$probs < threshold, 'bad', 'good')
df
}
test_10$probs = predict(logistic_mod_10, newdata = test_10, type = 'response')
test_10 = score_model(test_10, 0.5)
test_10[1:10, c('bad_credit','probs', 'score')]# Execute the code in the cell below to evaluate the 10 PCA component logistic regression model.
#
# Then, answer Question 1 on the course page.
logistic.eval <- function(df){
# First step is to find the TP, FP, TN, FN cases
df$conf = ifelse(df$bad_credit == 'bad' & df$score == 'bad', 'TP',
ifelse(df$bad_credit == 'bad' & df$score == 'good', 'FN',
ifelse(df$bad_credit == 'good' & df$score == 'good', 'TN', 'FP')))
# Elements of the confusion matrix
TP = length(df[df$conf == 'TP', 'conf'])
FP = length(df[df$conf == 'FP', 'conf'])
TN = length(df[df$conf == 'TN', 'conf'])
FN = length(df[df$conf == 'FN', 'conf'])
## Confusion matrix as data frame
out = data.frame(Negative = c(TN, FN), Positive = c(FP, TP))
row.names(out) = c('Actual Negative', 'Actual Positive')
print(out)
# Compute and print metrics
P = TP/(TP + FP)
R = TP/(TP + FN)
F1 = 2*P*R/(P+R)
cat('\n')
cat(paste('accuracy =', as.character(round((TP + TN)/(TP + TN + FP + FN), 3)), '\n'))
cat(paste('precision =', as.character(round(P, 3)), '\n'))
cat(paste('recall =', as.character(round(R, 3)), '\n'))
cat(paste('F1 =', as.character(round(F1,3)),'\n'))
roc_obj <- roc(df$bad_credit, df$probs)
cat(paste('AUC =', as.character(round(auc(roc_obj),3)),'\n'))
}
ROC_AUC = function(df){
options(repr.plot.width=5, repr.plot.height=5)
pred_obj = prediction(df$probs, df$bad_credit)
perf_obj <- performance(pred_obj, measure = "tpr", x.measure = "fpr")
AUC = performance(pred_obj,"auc")@y.values[[1]] # Access the AUC from the slot of the S4 object
plot(perf_obj)
abline(a=0, b= 1, col = 'red')
text(0.8, 0.2, paste('AUC = ', as.character(round(AUC, 3))))
}
logistic.eval(test_10)
ROC_AUC(test_10)
# These results are reasonably good.
# Recall, accuracy and AUC have reasonable values, however precision and F1 are low.
# Is it possible that more PCA components are required to achieve a good model?
# Add more components to the model
# Now you will compute and evaluate a logistic regression model using the first 20 principal components.
# You will compare this model to the one created with 10 principal components.
#
# Execute the code below to transform the training features using the first 20 principal components.
## Compute first 10 PCA components
pca_credit_20 = prcomp(training, rank = 20)
## Scale the eigenvalues
var_exp_20 = pca_credit_20$sdev**2/sum(pca_credit_20$sdev**2)
Nrow = nrow(pca_credit_20$rotation)
Ncol = ncol(pca_credit_20$rotation)
scaled_pca_20 = data.frame(matrix(rep(0, Nrow * Ncol), nrow = Nrow, ncol = Ncol))
## Scale the rotations
for(i in 1:Nrow){
scaled_pca_20[i,] = pca_credit_20$rotation[i,] * var_exp_20[1:Ncol]
}
## Print the dimensions of the scalled rotations and the first component
dim(scaled_pca_20)
# There are now 20 components in the PCA model.
# The code in the cell below computes the transformed feature set and creates a logistic regression model from this feature set.
# Execute this code.
## Construct a data frame with the transformed features and label
training_20 = training %*% as.matrix(scaled_pca_20)
training_20 = data.frame(training_20)
training_20[,'bad_credit'] = training_label
weights = ifelse(training_20$bad_credit == 'bad', 0.66, 0.34)
## Define and fit the logistic regression model
set.seed(5566)
logistic_mod_20 = glm(bad_credit ~ ., data = training_20,
weights = weights, family = quasibinomial)
logistic_mod_20$coefficients
# The code in the cell below scores the logistic regression model and displays performance metrics, the ROC curve, and the AUC.
# Execute this code and examine the result.
## Create the transformed test dataset
test_20 = test %*% as.matrix(scaled_pca_20)
test_20 = data.frame(test_20)
test_20[,'bad_credit'] = test_label
## Score the model
test_20$probs = predict(logistic_mod_20, newdata = test_20, type = 'response')
test_20 = score_model(test_20, 0.5)
## Evaluate the model
logistic.eval(test_20)
ROC_AUC(test_20)
# The metrics for the 20 component model are nearly the same as for the 10 component model.
# It appears that 10 components is enough to represent the information in the feature set.
#
# Summary
# In this lab you have applied principal component analysis to dimensionality reduction for supervised machine learning.
# The first components computed contain most of the available information.
# When faced with large number of features, PCA is an effective way to make supervised machine learning models tractable.
#
# Specifically in this lab you have:
#
# 1. Computed PCA models with different numbers of components.
# 2. Compared logistic regression models with different numbers of components.
# In this case, using 10 components produced a good model.
# Extending this to 20 components gained little if anything.
# In summary the dimensionality of the original 61 dummy variable array to just 10 components.
# ====================
# Processing end time
timeEnd = Sys.time()
# Processing date and total processing time
cat(paste("","Processing end date and time",date(),"","",sep="\n"))
paste("Total processing time =",round(difftime(timeEnd,timeStart), digits=2),"seconds",sep=" ")
# Stop writing to an output file
sink()
################ |
cfa19594ad31a1b3c80d52b51ee697b8de6e0aee | 1e912c54cb17be1e24fe47072498f3e7bbc37a1e | /R/blomCOP.R | f43bb464ee0820f7480e3ac1d51ce1497f6cf071 | [] | no_license | cran/copBasic | a3149fffc343130a77e693dae608dde9ca50cd05 | 84adb528160d3cd5abb83e06da276a6df9af382f | refs/heads/master | 2023-06-23T14:32:12.974136 | 2023-06-19T15:50:02 | 2023-06-19T15:50:02 | 17,695,240 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,093 | r | blomCOP.R | "blomCOP" <-
function(cop=NULL, para=NULL, as.sample=FALSE,
ctype=c("joe", "weibull", "hazen", "1/n",
"bernstein", "checkerboard"), ...) {
if(as.sample) {
ctype <- match.arg(ctype)
if(is.null(para)) {
warning("Sample Blomqvist's Beta desired but para is NULL, returning NULL")
return(NULL)
}
if(length(names(para)) != 2) {
warning("para argument must be data.frame having only two columns, returning NULL")
return(NULL)
}
if(ctype == "joe") {
n <- nrow(para); A <- (1+n)/2
return((2/ n)*(sum(as.numeric((rank(para[,1]) - A) *
(rank(para[,2]) - A) >= 0))) - 1)
} else {
A <- 1/4 # P(1/2, 1/2) = (1/2) * (1/2)
return(EMPIRcop(0.5, 0.5, para=para, ctype=ctype, ...)/A - 1)
}
} else {
if(is.null(cop)) {
warning("must have copula argument specified, returning NULL")
return(NULL)
}
blom <- 4*cop(0.5,0.5, para=para, ...) - 1
return(blom)
}
}
|
02850e74f13fef21b2c7983c5c75a4a172acb7b3 | 1e820fe644a039a60bfbee354e50c775af675f6b | /R_easysteps/Ch04_ElseIf.R | afd0d045febe8d39b146728c1f92859ecdf38683 | [] | no_license | PyRPy/stats_r | a334a58fca0e335b9b8b30720f91919b7b43d7bc | 26a3f47977773044d39f6d8ad0ac8dafb01cce3f | refs/heads/master | 2023-08-17T00:07:38.819861 | 2023-08-16T14:27:16 | 2023-08-16T14:27:16 | 171,056,838 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 279 | r | Ch04_ElseIf.R | # chaining branches
hour <- 21
if( hour < 13 )
{
print( paste( "Good Morning:", hour ) )
} else if( hour < 18 )
{
print( paste( "Good Afternoon:", hour ) )
} else
{
print( paste( "Good Evening:", hour ) )
}
# in a cascaded way remember ! |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.