blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0ceac77721fe9bb94296f7ebc03c7bc8103b3c09
|
6b9a398030a320ca38a3ff8c11adbb235deddcdf
|
/Analysis/Bootstrap/bootstrapSPE18Streams.R
|
6d27281f82f645fa7c253c88872644b239a97217
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
alexholcombe/nStream
|
ffb9dc89eaec1222b957d44c634aad68282b8f51
|
fddf0ad89a5a2353f0f76fef70923500d6cad628
|
refs/heads/master
| 2020-05-23T08:09:21.402053
| 2019-12-12T04:35:40
| 2019-12-12T04:35:40
| 80,474,968
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,019
|
r
|
bootstrapSPE18Streams.R
|
library(ggplot2)
library(dplyr)
library(magrittr)
library(mixRSVP)
library(reshape2)
setwd('~/gitCode/nStream/')
allErrors <- read.csv('Analysis/allErrors18Streams.txt', header = T, stringsAsFactors = F)
runAnyway <- FALSE
plotExample <- FALSE #Generates the components of the bootstrap explainer plot
xDomain <- -4:4
speNs <- function(theseData){
xDomain = -4:4
outs <- data.frame(xDomain = xDomain, counts = -999)
for(thisX in xDomain){
count <- length(which(theseData$SPE == thisX))
outs$counts[outs$xDomain == thisX] <- count
}
outs
}
bootstrapPValue <- function(theseData, numItemsInStream, whichSPE, nReps){
nTrials <- nrow(theseData)
#############################
###Generate pseudo-uniform###
#############################
thisAccuracy <- length(which(theseData$SPE == 0))/nTrials
thisGuessingRate <- 1 - thisAccuracy
print(thisAccuracy)
print(thisGuessingRate)
maxSPETheseData <- theseData %>% pull(targetSP) %>% max
minSPETheseData <- theseData %>% pull(targetSP) %>% min
minSPE <- 1 - maxSPETheseData
maxSPE <- numItemsInStream - minSPETheseData
guessingDist <- createGuessingDistribution(minSPE = minSPE,
maxSPE = maxSPE,
targetSP = theseData$targetSP,
numItemsInStream = numItemsInStream)
pseudoUniform <- data.frame(xDomain = minSPE:maxSPE, guessingDist = guessingDist, prob = guessingDist/sum(guessingDist)) #give guessing dist values labels
nWhichSPE <- theseData %>% filter(SPE == whichSPE) %>% nrow() #How many observations at SPE = whichSPE?
bootstraps <- data.frame(rep = 1:nReps, #store the counts of whichSPE sampled from the pseudouniform here.
count = -999)
for(i in 1:nReps){
thisSample <- sample(pseudoUniform$xDomain, prob = pseudoUniform$prob, replace = T, size = nTrials*thisGuessingRate) #sample nTrials many trials from the pseudouniform over the range of possible SPEs in this experiment
nThisSPE<- which(thisSample == whichSPE) %>% length #How many of the trials had an SPE == whichSPE
bootstraps %<>% mutate(count = replace(count, rep == i, nThisSPE)) #Put the count nThisSPE in the dataframe
}
return(length(which(bootstraps$count>=nWhichSPE))/nReps) #a p.value. The proportion of bootstrapped samples that had a count of whichSPE at least as great as the observed count
}
numItemsInStream = 24
pFiles <- list.files(pattern = 'bootstrapPValues18Streams.*\\.csv',
path = 'Analysis/Bootstrap',
full.names = T)
if(length(pFiles)>0 & !runAnyway){
splits <- pFiles %>% strsplit(x = .,
split = '18Streams|\\.csv')
dates <- lapply(splits,
FUN = function(x){
x[2] %>% as.POSIXct(., format = "%d-%m-%Y_%H-%M-%S")
}) %>% unlist
whichLatestDate <- which(dates == max(dates))
ps <- read.csv(pFiles[whichLatestDate])
} else{
ps <- expand.grid(xDomain = xDomain,
p = -1,
participant = unique(allErrors$ID),
condition = unique(allErrors$condition))
for(thisParticipant in unique(allErrors$ID)){
print(thisParticipant)
for(thisCondition in unique(allErrors$condition)){
print(thisCondition)
theseData <- allErrors %>% filter(ID == thisParticipant & condition == thisCondition)
for(whichSPE in xDomain){
print(whichSPE)
thisP <- bootstrapPValue(theseData,numItemsInStream,whichSPE,5000)
ps %<>% mutate(p = replace(p, xDomain == whichSPE & participant == thisParticipant & condition == thisCondition, thisP))
}
}
}
write.csv(ps, paste0('Analysis/bootstrapPValues18Streams',format(Sys.time(), "%d-%m-%Y_%H-%M-%S"),'.csv'),row.names = F)
}
ps <- allData %>% group_by(ID, condition) %>% do(speNs(.)) %>% rename(participant = ID) %>% left_join(ps, ., by = c('participant', 'condition', 'xDomain'))
ps <- allData %>% group_by(ID, condition) %>% summarise(ntrials = n()) %>% rename(participant = ID) %>% left_join(ps, ., by = c('participant', 'condition'))
ps %<>% filter(participant != '18TR1')
ps %>% filter(p != -1) %>% ggplot(., aes(x = xDomain, y = p))+
geom_line(aes(colour = factor(condition)))+
facet_wrap(~participant,nrow = 3)+
geom_hline(yintercept = .05, linetype = 'dashed')+
geom_vline(xintercept = -1, linetype = 'dashed')+
labs(x = 'SPE', y = 'p', colour = 'nStream')
table <- ps %>% group_by(condition, xDomain) %>% summarise(nSig = length(which(p<.05/12 & p >-1)))
table %<>% mutate(milliseconds = xDomain*(1000/12))
ggplot(table, aes(x = xDomain, y = nSig))+
geom_line(aes(linetype = condition))
table %>% dcast(.,
xDomain ~ condition,
value.var = 'nSig')
bootstrapPlot <- table %>% ggplot(., aes(x=xDomain, y = nSig))+
geom_line(aes(linetype = factor(condition)))+
scale_x_continuous(breaks = -9:9)+
scale_y_continuous(breaks = seq(0,12,3))+
labs(x = 'SPE', y = 'Deviations from Guessing',linetype = 'nStreams')+
scale_linetype_manual(values = c('2' = 'solid', '6' = 'dotted', '18' = 'twodash'))
bootstrapPlot
ggsave(filename = 'modelOutput/18Streams/bootstrapPlot.png',
plot = bootstrapPlot,
width=30,
height=30,
units='cm')
#######################
###Example Bootstrap###
#######################
if(plotExample){
randID <- sample(allErrors$ID, size = 1)
exampleCondition = 2
whichSPE = 2
nReps <- 5000
exampleData <- allErrors %>% filter(ID == randID, condition == exampleCondition, !fixationReject)
exampleNTrials <- nrow(exampleData)
exampleHistogram <- ggplot(exampleData, aes(x = SPE))+
geom_histogram(binwidth = 1, aes(fill = SPE == whichSPE))+
geom_vline(xintercept = 0, linetype = 'dashed')+
scale_x_continuous(breaks = seq(min(exampleData$SPE),max(exampleData$SPE),2))+
scale_fill_manual(values = c('TRUE' = '#ffa951', 'FALSE' = '#628093'), guide = FALSE)+
labs(y = 'Count')+
theme_apa()
exampleHistogram
ggsave(exampleHistogram,
file = paste0('modelOutput/18Streams/bootstrapExampleEmpirical', randID,'.png'),
width = 10,
height = 10,
units = 'cm')
exampleAccuracy <- length(which(exampleData$SPE == 0))/exampleNTrials
exampleGuessingRate <- 1 - exampleAccuracy
print(exampleAccuracy)
print(exampleGuessingRate)
maxSPETheseData <- exampleData %>% pull(targetSP) %>% max
minSPETheseData <- exampleData %>% pull(targetSP) %>% min
minSPE <- 1 - maxSPETheseData
maxSPE <- numItemsInStream - minSPETheseData
thisGuessingDist <- createGuessingDistribution(minSPE = minSPE,
maxSPE = maxSPE,
targetSP = exampleData$targetSP,
numItemsInStream = numItemsInStream)
pseudoUniform <- data.frame(xDomain = minSPE:maxSPE, guessingDist = thisGuessingDist, prob = (thisGuessingDist/sum(thisGuessingDist))) #give guessing dist values labels
exampleGuessingDist <- ggplot(pseudoUniform, aes(x = xDomain, y = prob))+
geom_bar(stat= 'identity', aes(fill = xDomain == whichSPE))+
labs(x = 'SPE', y = 'Probability of Response')+
scale_fill_manual(values = c('TRUE' = '#ffa951', 'FALSE' = '#628093'), guide = FALSE)+
theme_apa()
exampleGuessingDist
ggsave(exampleGuessingDist,
file = paste0('modelOutput/18Streams/bootstrapExampleGuessing', randID,'.png'),
width = 10,
height = 10,
units = 'cm')
nWhichSPE <- exampleData %>% filter(SPE == whichSPE) %>% nrow() #How many observations at SPE = whichSPE?
bootstraps <- data.frame(rep = 1:nReps, #store the counts of whichSPE sampled from the pseudouniform here.
count = -999)
theseSamples <- expand.grid(sample = 1:nReps, SPE = numeric(exampleNTrials*exampleGuessingRate))
for(i in 1:nReps){
thisSample <- sample(pseudoUniform$xDomain, prob = pseudoUniform$prob, replace = T, size = exampleNTrials*exampleGuessingRate) #sample nTrials many trials from the pseudouniform over the range of possible SPEs in this experiment
theseSamples %<>% mutate(SPE = replace(SPE, sample == i, thisSample))
nThisSPE<- which(thisSample == whichSPE) %>% length #How many of the trials had an SPE == whichSPE
bootstraps %<>% mutate(count = replace(count, rep == i, nThisSPE)) #Put the count nThisSPE in the dataframe
}
randomSamplesForPlotting <- sample(1:nReps, 12, replace = F)
randomSamples <- theseSamples %>% filter(sample %in% randomSamplesForPlotting) %>%
ggplot(., aes(x = SPE))+
geom_histogram(binwidth = 1, aes(fill = SPE == whichSPE))+
scale_fill_manual(values = c('TRUE' = '#ffa960', 'FALSE' = '#628093'), guide = FALSE)+
labs(y = 'Count')+
theme_apa()+
theme(
strip.background = element_blank(),
strip.text.x = element_blank()
)+
facet_wrap(~sample, nrow = 4)
randomSamples
ggsave(randomSamples,
file = paste0('modelOutput/18Streams/randomBootstrapHistograms', randID,'.png'),
width = 10,
height = 10,
units = 'cm')
bootstrapCountDist <- ggplot(bootstraps, aes(x = count))+
geom_histogram(binwidth = 1, aes(fill = count >= nWhichSPE))+
scale_fill_manual(values = c('TRUE' = '#ffa951', 'FALSE' = '#628093'), guide = FALSE)+
labs(x = 'Number of SPEs = 2', y = 'Number of Samples')+
theme_apa()
ggsave(bootstrapCountDist,
file = paste0('modelOutput/18Streams/bootstrapCountDistribution', randID,'.png'),
width = 10,
height = 10,
units = 'cm')
thisP <- length(which(bootstraps$count>=nWhichSPE))/nReps #a p.value. The proportion of bootstrapped samples that had a count of whichSPE at least as great as the observed count
}
|
777339c4c08039c656f60ddbc3b2eee91cf7d2a5
|
d05d9c914636747e125b33e28279ec43971fa7ae
|
/Models/26jan17/R-utils/harmfill.r
|
fa365b0dce8e687d1f231e13d23d0f592218e05f
|
[] |
no_license
|
hamiddashti/ED_BSU
|
802b256362e83f0db63b8386edb45df3f13b1bdf
|
2ff3bf81c63d5b089321aabce6c74f7a3e261836
|
refs/heads/master
| 2022-02-25T21:38:00.385292
| 2019-10-17T21:17:44
| 2019-10-17T21:17:44
| 105,049,937
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,488
|
r
|
harmfill.r
|
#==========================================================================================#
#==========================================================================================#
# This function fills all gaps in a time series, by using an iterative Fourier #
# analysis of the time series. Keep in mind that geophysical time series have random #
# noise that are captured by the Fourier transform as weak waves, and those waves can make #
# very bad things in the middle of the gap if the gap is long. #
# #
# Input Variables #
# --------------- #
# #
# x: #
# The time series to be filled. #
# #
# detrend.method: #
# Which technique to use to detrend the time series before applying the Fourier trans- #
# form. Valid values are (case insensitive and the first few letters will do): #
# - "mean" : don't detrend the time series, just subtract the mean #
# - "linear" : (default) use a simple linear detrending #
# - "loess" : use a polynomial surface using local fitting #
# #
# trend.back (optional): #
# Should the routine add back the trend? (TRUE or FALSE). Default is TRUE. #
# #
# min.signal (optional): #
# The minimum accumulated spectrum to retain (the count goes from most powerful to #
# least powerful). It must be between 0 and signal.retain (see below). #
# #
# signal.retain (optional): #
# The total accumulated spectrum to retain (the count goes from most powerful to least #
# powerful). It must be between 0 and 1, and at least one mode will always be used. #
# #
# conv.threshold: #
# Tolerance for change in the response for each sub-step, beyond which we move to the #
# next mode. #
# #
# minmod: #
# The minimum number of modes to use. This avoids using too few modes when the strong- #
# est modes are too powerful. #
# #
# maxmod: #
# The maximum number of modes to use. This allows using all sought modes when the #
# spectrum is well-defined, and lower power in case the time series is too noisy. #
# #
# maxin: #
# Maximum number of inner iterations before giving up convergence and moving on. #
# #
# verbose: #
# Prints more information, which may be useful if you want to debug or just curious. #
# #
# rmse: #
# Flag that tells whether to estimate the root mean square error (TRUE | FALSE). If #
# true, a jackknife method will be run, otherwise a NA will be returned for error. #
# #
# del.frac: #
# In case jackknife is to be run, this tells the fraction of valid data to be removed #
# each realisation. #
# #
# n.jack: #
# Maximum number of iterations for the jackknife method. #
# #
# Output Variables #
# ----------------- #
# #
# The output is a list containing the following variables: #
# #
# xfill: #
# vector with the same length as y with the original time series where data were #
# available, and the gap filled value for the time series. #
# #
# error: #
# Estimate of the root mean square error. If input rmse is FALSE, this will be a NA. #
# #
#------------------------------------------------------------------------------------------#
harmfill <<- function(x,detrend.method="linear",trend.back=TRUE,min.signal=0.00
,signal.retain=0.80,conv.threshold=0.0001,minmod=1,maxmod=Inf
,verbose=0,maxin=50,rmse=FALSE,del.frac=1/3,n.jack=100
,jack.toler=0.01){
#---------------------------------------------------------------------------------------#
# Harmfill requires two packages now: RSEIS and zoo. Make sure that you have both #
# of them installed and loaded. #
#---------------------------------------------------------------------------------------#
zoo.check = "package:zoo" %in% search()
RSEIS.check = "package:RSEIS" %in% search() | detrend.method != "linear"
if ( (! zoo.check) | (! RSEIS.check) ){
cat (" ---> ZOO: ",zoo.check ,"\n")
cat (" ---> RSEIS: ",RSEIS.check,"\n")
stop(" Harmfill requires ZOO and RSEIS!!!")
}#end if
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Copy the time series to a safe place, and never point to x again unless there is #
# no need for gap filling. #
#---------------------------------------------------------------------------------------#
x.act = x
nx.act = length(x.act)
miss.act = is.na(x.act)
nx.inf = sum(is.infinite(x.act),na.rm=TRUE)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Stop if there is any infinity... #
#---------------------------------------------------------------------------------------#
if (nx.inf != 0){
cat (" ---> Matt, there are infinite numbers in your time series, fix it!!!","\n")
cat (" ---> Number of points: ",nx.act,"\n")
cat (" ---> Number of points that are infinity: ",nx.inf,"\n")
stop(" Time series must contain only finite numbers and NAs")
}#end if
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Double the size of the time series by adding the second half before and the first #
# half after the time series. Harmonic filling doesn't work very well at the beginning #
# and end of the time series, so we buffer the period. #
#---------------------------------------------------------------------------------------#
n.brk = floor(nx.act / 2)
before = sequence(n.brk)
after = seq(from=n.brk+1,to=nx.act,by=1)
rxa = length(after) + 1
rxz = rxa + nx.act - 1
x.ext = c(x.act[after],x.act,x.act[before])
miss.ext = is.na(x.ext)
nx.ext = length(x.ext)
#---------------------------------------------------------------------------------------#
#----- Find the time series size and the Nyquist frequency. ----------------------------#
nnyq = 1 + ceiling((nx.ext-1)/2)
#---------------------------------------------------------------------------------------#
#----- Find the indices to use in the FFT. ---------------------------------------------#
fuse = sequence(nnyq)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Check and count the number of missing data. If the time series is full, we don't #
# need to do anything. #
#---------------------------------------------------------------------------------------#
okdata = is.finite(x.ext)
nodata = ! is.finite(x.ext)
nok = sum(okdata)
nmiss = sum(nodata)
if (nmiss == 0){
if (verbose > 0) cat (" * Time series is complete, no need to gap fill... \n")
ans = list(xfit=x.act,error=rep(0,times=nx.act))
return(ans)
}#end if
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Fill in the gaps with something so it doesn't go completely off during the #
# de-trending. To avoid spurious trends, we keep only the true period. #
#---------------------------------------------------------------------------------------#
x.trend = na.fill(na.approx(x.ext,na.rm=FALSE),fill="extend")
x.trend = x.trend[rxa:rxz]
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# De-trend the time series. Notice that even though we #
#---------------------------------------------------------------------------------------#
if (verbose > 0) cat (" * Detrending time series... \n")
detmet = substring(tolower(detrend.method),1,2)
if (detmet == "li"){
#----- Linear trend. ----------------------------------------------------------------#
x0 = detrend2(x.trend)$y0
#------------------------------------------------------------------------------------#
#----- Make x0 the same size as x.ext -----------------------------------------------#
x0 = c(x0[after],x0,x0[before])
#------------------------------------------------------------------------------------#
}else if(detmet == "lo"){
#----- When is just a dimensionless time. -------------------------------------------#
when = sequence(nx.act)
#------------------------------------------------------------------------------------#
#----- Local fitting. ---------------------------------------------------------------#
guess = loess(formula= x ~ when, data=data.frame(when=when,x=x.trend)
,na.action="na.omit")
x0 = predict(object=guess,when)
#------------------------------------------------------------------------------------#
#----- Make x0 the same size as x.ext -----------------------------------------------#
x0 = c(x0[after],x0,x0[before])
#------------------------------------------------------------------------------------#
}else if (detmet == "me"){
#----- No trend, subtract the mean and that's enough. -------------------------------#
x0 = rep(mean(x.act,na.rm=TRUE),times=nx.ext)
#------------------------------------------------------------------------------------#
}#end if
#---------------------------------------------------------------------------------------#
#----- Find the deviation from the detrended time series. ------------------------------#
xprime = x.ext - x0
xpbar = mean(xprime,na.rm=TRUE)
#---------------------------------------------------------------------------------------#
#----- Initial conditions. Assume no wave present in missing data. --------------------#
xnext = xprime
xnext[nodata] = 0.
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Find the first Fourier transform, and determine which modes to use. #
#---------------------------------------------------------------------------------------#
xfftall = fft(xnext)
xfft = xfftall[fuse]
pow = abs(xfft)^2
npow = order(pow,decreasing=TRUE)
cumpow = cumsum(pow[npow])/sum(pow)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Noutit is the number of "outer" iteractions needed to capture the meaningful #
# power. #
#---------------------------------------------------------------------------------------#
noutit = max(minmod,max(sum(cumpow <= min.signal)
,min(nok-minmod,sum(cumpow <= signal.retain),maxmod)))
powuse = npow[1:noutit]
if (verbose > 0){
cat(" * Using ",noutit," modes out of ",nnyq,"... \n")
cat(" ( Retained signal = ",sprintf("%.2f",cumpow[noutit]*100),"%...) \n")
}#end if
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Find the list of iterations to print. #
#---------------------------------------------------------------------------------------#
print.iter = unique(sort(c(1,pretty(sequence(noutit),n=10),noutit)))
print.iter = print.iter[print.iter %in% sequence(noutit)]
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Loop over the outer iterations. #
#---------------------------------------------------------------------------------------#
if (verbose > 0) cat (" * Starting the outer loop... \n")
for (outit in 1:noutit){
if (verbose > 0 & outit %in% print.iter){
cat(" # Outer iteration: ",outit
,". Signal = ",sprintf("%.3f",cumpow[outit]*100),"... \n")
}#end if
#----- Reset the values for the inner iterations. -----------------------------------#
initerate = TRUE
#errest1st = NULL
#errestbest = NULL
r2best = 0
r2prev = 0
init = 0
#------------------------------------------------------------------------------------#
# Conditional loop over the outer iterations. #
#------------------------------------------------------------------------------------#
while(initerate){
init = init + 1
#----- Update guess. -------------------------------------------------------------#
xnow = xnext
#---------------------------------------------------------------------------------#
#----- Find the Fast Fourier analysis for this guess. ----------------------------#
xfftall = fft(xnow)
xfft = xfftall[fuse]
#---------------------------------------------------------------------------------#
#----- Keep only the powers that we should use. ----------------------------------#
del = - powuse[1:outit]
xfft[del] = 0+0i
#---------------------------------------------------------------------------------#
#----- Reconstruct the Fourier transform without the weaker powers. --------------#
if (nx.ext %% 2 == 0){
dseq = seq(from=2,to=nnyq-1,by=1)
xfft = c(xfft[1]
,xfft[dseq]
,xfft[nnyq]
,rev(Re(xfft[dseq])) + (0-1i)*rev(Im(xfft[dseq])) )
}else{
dseq = seq(from=2,to=nnyq,by=1)
xfft = c(xfft[1]
,xfft[dseq]
,rev(Re(xfft[dseq])) + (0-1i)*rev(Im(xfft[dseq])) )
}#end if
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Find the guess by finding the inverse FFT without the trailing components. #
#---------------------------------------------------------------------------------#
xfill = Re(fft(xfft,inverse=TRUE)/nx.ext)
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Find the potential next guess, which is the actual time series, with the #
# missing values replaced by the guess. #
#---------------------------------------------------------------------------------#
xtry = xprime
xtry[nodata] = xfill[nodata]
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Simple error guess, this will tell whether the model is approaching a #
# place. Although this may mean slow convergence, we assume that this is because #
# the result is close to the best guess. #
#---------------------------------------------------------------------------------#
r2 = ( 1. - ((nok - 1 ) * sum((xprime[okdata] - xfill[okdata])^2))
/ ((nok - outit - 1) * sum((xprime[okdata] - xpbar )^2)) )
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Decide whether to accept or reject the step. We iterate in the inner loop #
# only if guesses are getting better by a significant amount. #
#---------------------------------------------------------------------------------#
gain = 2.0 * (r2 - r2prev) / (r2 + r2prev)
r2prev = r2
initerate = init < maxin && gain > conv.threshold
xnext = xtry
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Print information if needed. #
#---------------------------------------------------------------------------------#
if (verbose > 1){
cat(" > Inner iteration: ",init
,"; R2 = ",signif(r2,5),"; Gain = ",signif(gain,5),"...","\n")
}#end if
#---------------------------------------------------------------------------------#
}#end while (initerate)
#------------------------------------------------------------------------------------#
}#end for outit in 1:noutit
#---------------------------------------------------------------------------------------#
#----- Add back the trend, and chop the time series back to the original size. ---------#
if (trend.back){
xfit = x0[rxa:rxz] + xnext[rxa:rxz]
}else{
xfit = xnext[rxa:rxz]
}#end if
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# This part is the error estimate. #
#---------------------------------------------------------------------------------------#
if (rmse){
if (verbose > 0) cat(" * Estimating RMSE...","\n")
#------------------------------------------------------------------------------------#
# Because this doesn't use any other data (either from other stations or other #
# variables), we must estimate the error using a Jackknife approach. Each iteration #
# will delete a fraction of valid data and run the harmonic analysis, and the root #
# mean square error between the predicted value and the actual value will be the #
# error of this realisation. We run this n.jack times so the error estimate is more #
# robust. #
#------------------------------------------------------------------------------------#
nj = 0
err.jack = 0
#----- List indices with available data. --------------------------------------------#
idx.avail = which(! miss.act)
navail = length(idx.avail)
iterate = TRUE
while (iterate){
nj = nj + 1
#----- Copy the time series to a scratch vector. ---------------------------------#
ndel = floor(del.frac * navail)
del = sample(x=idx.avail,size=ndel,replace=FALSE)
jackknife = x.act
jackknife[del] = NA
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Find the gap-filled time series. This call is slightly modified so we #
# preserve some of the properties of the original fitting. We impose the number #
# of outer iterations to be exactly the same as the result with the full time #
# series. Also, we must call the function forcing the error estimate to be #
# FALSE, otherwise we will enter in an infinite loop. For the error we must also #
# add the trend back because we compare the results with the original dataset. #
#---------------------------------------------------------------------------------#
realisation = harmfill( x = jackknife
, detrend.method = detrend.method
, trend.back = TRUE
, signal.retain = 1.00
, minmod = noutit
, maxmod = noutit
, verbose = 0
, rmse = FALSE
, del.frac = del.frac
, n.jack = n.jack )
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Find the mean square error of this realisation. #
#---------------------------------------------------------------------------------#
err.jack.now = sqrt( sum((x.act[del] - realisation$xfit[del])^2)
/ (ndel - 2*noutit) )
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Find the new estimate of the error. #
#---------------------------------------------------------------------------------#
err.jack.next = (err.jack * (nj-1) + err.jack.now) / nj
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Find the new estimate of the error. #
#---------------------------------------------------------------------------------#
gain = 2.0 * abs(err.jack.next - err.jack) / abs(err.jack.next + err.jack)
iterate = gain > jack.toler
err.jack = err.jack.next
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
# Print a banner with the information on how the method is going. #
#---------------------------------------------------------------------------------#
if (verbose > 0){
cat (" # Iteration: ",nj,"; RMSE = ",signif(err.jack,4)
,"; GAIN = ",sprintf("%4.4f",gain),"; CONVERGE = ",! iterate,"\n")
}#end if
#---------------------------------------------------------------------------------#
}#end for
#------------------------------------------------------------------------------------#
}else{
#------------------------------------------------------------------------------------#
# Skip the error estimate and leave it missing. #
#------------------------------------------------------------------------------------#
err.jack = NA
#------------------------------------------------------------------------------------#
}#end if
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Make the vector with the error estimate. The error is 0 for full points, and the #
# estimate for the gap-filled points. #
#---------------------------------------------------------------------------------------#
error = rep(0,times=nx.act)
error[miss.act] = err.jack
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Make the list with the output then quit. #
#---------------------------------------------------------------------------------------#
ans = list(xfit=xfit,error=error)
return(ans)
#---------------------------------------------------------------------------------------#
}#end function
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
# This function detrends a time series by applying a linear regression. In case there #
# are missing data, it fills in with a linear interpolation before detrending, to make #
# sure the data is detrended properly (i.e., the gaps contains some linear interpolation). #
#------------------------------------------------------------------------------------------#
detrend2 <<- function(y){
#----- Fill gap with linear interpolation. ---------------------------------------------#
yfill = gaplin(y)
yprime = detrend(yfill)
y0 = yfill - yprime
sel = !is.finite(y)
yprime[sel] = NA
ans = list(y0=y0,yprime=yprime,yfill=yfill)
return(ans)
}#end function
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
# This function completes the gaps with a linear interpolation between the previous #
# and the next available points. #
#------------------------------------------------------------------------------------------#
gaplin <<- function(x){
gap = ! is.finite(x)
nx = length(x)
if (sum(gap) > 0){
ind = which(gap)
ans = sapply(X=ind,FUN=lin.filler,dat=x)
xout = x
xout[gap] = ans
}else{
xout = x
}#end if
return(xout)
}#end function
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
# Auxiliary function that will fill in the gap by looking at the previous and future #
# available data by applying a linear interpolation between the previous and next avail- #
# able data. #
#------------------------------------------------------------------------------------------#
lin.filler <<- function(gapind,dat){
if (is.finite(dat[gapind])){
#----- No need to fill gaps, data is available. -------------------------------------#
interp = dat[gapind]
}else{
#------------------------------------------------------------------------------------#
# Interpolate. #
#------------------------------------------------------------------------------------#
ndat = length(dat)
#----- Split the time series into before and after gapind. --------------------------#
prevdat = dat[1:gapind]
prevok = which(is.finite(prevdat))
nextdat = dat[gapind:ndat]
nextok = (gapind - 1) + which(is.finite(nextdat))
#----- Size of the split time series. -----------------------------------------------#
nprev = length(prevok)
nnext = length(nextok)
#------------------------------------------------------------------------------------#
# Check whether we have points on both sides (interpolation) or only on one side #
# (extrapolation). #
#------------------------------------------------------------------------------------#
if (nprev + nnext <= 1){
stop("Time series has either 0 or 1 valid point! No interpolation possible!")
}else if(nprev == 0){
inda = nextok[1]
indz = nextok[2]
}else if(nnext == 0){
inda = prevok[nprev-1]
indz = prevok[nprev]
}else{
inda = prevok[nprev]
indz = nextok[1]
}#end if
interp = dat[inda] + (gapind - inda) * (dat[indz]-dat[inda]) / (indz - inda)
}#end if
return(interp)
}#end function
#==========================================================================================#
#==========================================================================================#
|
fcdd3933dc5670abfe225a2acfcfd3f8170a3066
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/crimelinkage/examples/clusterPath.Rd.R
|
16daf48499bd58e0f6b56c113fb3e193c329261b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 217
|
r
|
clusterPath.Rd.R
|
library(crimelinkage)
### Name: clusterPath
### Title: Follows path of one crime up a dendrogram
### Aliases: clusterPath
### ** Examples
# See vignette: "Crime Series Identification and Clustering" for usage.
|
040ea59beabdcd9ed00c26f1633cf0fc996b93c9
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/libFuzzer_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1612736198-test.R
|
e8aa4e1ab642d0835ae8d2cd30d928dacc2f7752
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 418
|
r
|
1612736198-test.R
|
testlist <- list(doy = c(-5.48612930076931e+303, NaN, NaN, 6.4757678266058e-319, 1.49552342554576e-305, -5.90834448102331e+303, 1.51170009278853e+164 ), latitude = numeric(0), temp = c(-5.48612930076931e+303, -5.51157045666091e+303, NaN, NaN, NaN, 2.12199579047121e-314, 0, -5.48612406879369e+303, 6.88345878187749e-315, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
e95db1f2c1f041eb894ded139cd0770204e88e74
|
95855078928f5e12530f5b7f8956ec06c795b746
|
/01_merge-data-sets.R
|
fdf527fea68a990e3b01e67c93c8a3cd016dbb24
|
[] |
no_license
|
gaborstats/roma-discrimination
|
76155234d810d234a96b4440449ef16314e29380
|
f5a38d1573471789c9c908722575c08b6a49ebc5
|
refs/heads/master
| 2023-01-08T02:55:22.227688
| 2020-11-07T20:54:05
| 2020-11-07T20:54:05
| 296,667,398
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,070
|
r
|
01_merge-data-sets.R
|
###############################
# adatbazisok osszkapcsolasa #
###############################
# TARTALOM
# 1. az adatbazist kiegeszitettem az ocr utjabn pdf-bol kinyert valaszokkal
# 2. elmentettem a valaszokat nehany egyeb valtozoval egyutt
getwd()
setwd("C:/Users/Gabor/Documents/01_ELTE/00_szakdoga/03_Adatok/06_Eredmenyek")
df_oreg =read.csv("2020-03-19_AB-adatbazis_jav.csv", sep = ";", stringsAsFactors = F)
library("readxl")
df_masik = read_excel("2020-08-01_AB-adatbazis_ocr-updated.xlsx")
#install.packages("tidyverse")
library("tidyverse")
df_teljes = full_join(x = df_oreg, y = df_masik[,4:5], by = "telepules")
rm(df_oreg, df_masik)
# valaszuk ki a karakter valaszt es a featuroket
df = select(df_teljes, reply, roma_felado, magas_statusz, telepules, onk.hiv_szekhely)
rm(df_teljes)
setwd("C:/Users/Gabor/Documents/01_ELTE/00_szakdoga/03_Adatok/05_Adatfeldolgozas/05_NLP-proba")
write.table(df, '2020-08-01_AB-text.csv', sep = ";", dec = ",", na = "NA",
quote = T, qmethod = c("double"), row.names = F)
|
d70598ecfe16486426977ff4a3e7510f25880b42
|
cd875a9d426f4697267113768f6d02721ab611c3
|
/man/deal_stages_history.Rd
|
a9a838c93de345a7a5c6a91dbdbe3b78e06371a0
|
[
"MIT"
] |
permissive
|
entr0py43/hubspot
|
e36b629f46f41dc4013a280821fd9ed69c575da5
|
dfa51817500971efc947d2bfcb81687489be6125
|
refs/heads/master
| 2020-08-06T06:20:20.308770
| 2019-07-28T12:30:47
| 2019-07-28T12:30:47
| 212,869,283
| 0
| 1
|
NOASSERTION
| 2019-10-04T17:31:24
| 2019-10-04T17:31:23
| null |
UTF-8
|
R
| false
| true
| 596
|
rd
|
deal_stages_history.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deal_stages_history.R
\name{deal_stages_history}
\alias{deal_stages_history}
\title{Retrieve the version history of deal stages}
\usage{
deal_stages_history(deals = get_deals(properties = "dealstage",
property_history = "true", associations = "false"))
}
\arguments{
\item{deals}{A deals list object, typically produced by running \code{get_deals}}
}
\value{
A tbl containing all stages a deal has gone through
}
\description{
Retrieve the version history of deal stages
}
\examples{
hist <- deal_stages_history()
}
|
60c2ed8f61cca086428cd530944f756b96467bdc
|
91bb175a2ddc9534b52035e93b67670cd8e068e1
|
/R/summarize.R
|
bd806b2e564bfdffe89ce84638e94331686c3760
|
[] |
no_license
|
ekstroem/dataMaid
|
5db604991c52e957b14edac146c20f206f1bc2ff
|
d7571007db8f6f530ce9dd6cd4978c526eb2cb8f
|
refs/heads/master
| 2022-02-06T20:29:39.941918
| 2022-01-25T10:21:01
| 2022-01-25T10:21:01
| 69,242,550
| 154
| 30
| null | 2019-10-19T15:48:22
| 2016-09-26T11:15:17
|
HTML
|
UTF-8
|
R
| false
| false
| 18,545
|
r
|
summarize.R
|
#' @title Summarize a variable/dataset
#'
#' @description Generic shell function that produces a summary of a variable (or for each
#' variable in an entire dataset), given a number of summary functions and
#' depending on its data class.
#'
#' @param v The variable (vector) or dataset (data.frame) to be summarized.
#'
#' @param reportstyleOutput Logical indicating whether the output should
#' be formatted for inclusion in the report (escaped matrix) or not. Defaults to not.
#'
#' @param summaries A list of summaries to use on each supported variable type. We recommend
#' using \code{\link{setSummaries}} for creating this list and refer to the documentation
#' of this function for more details.
#'
#' @param ... Additional argument passed to data class specific methods.
#'
#' @details Summary functions are supplied using their
#' names (in character strings) in the class-specific argument, e.g.
#' \code{characterSummaries = c("countMissing", "uniqueValues")} for character variables and
#' similarly for the remaining 7 data classes (factor, Date, labelled, haven_labelled, numeric, integer, logical).
#' Note that an overview of all available \code{summaryFunction}s can be obtained by calling
#' \code{\link{allSummaryFunctions}}.
#'
#' The default choices of \code{summaryFunctions} are available in data class specific functions, e.g.
#' \code{defaultCharacterSummaries()} and \code{defaultNumericSummaries()}.
#' A complete overview of all default options can be obtained by calling setSummaries()
#'
#' A user defined summary function can be supplied using its function name. Note
#' however that it should take a vector as argument and return a list on the form
#' \code{list(feature="Feature name", result="The result")}. More details on how to construct
#' valid summary functions are found in \code{\link{summaryFunction}}.
#'
#' @return The return value depends on the value of \code{reportstyleOutput}.
#'
#' If \code{reportstyleOutput = FALSE} (the default): If \code{v} is a varibale,
#' a list of \code{summaryResult} objects, one \code{summaryResult} for each summary
#' function called on \code{v}. If \code{v} is a dataset, then \code{summarize()} returns
#' a list of lists of \code{summaryResult} objects instead; one list for each variable
#' in \code{v}.
#'
#' If \code{reportstyleOutput = TRUE}:
#' If \code{v} is a single variable: A matrix with two columns, \code{feature} and
#' \code{result} and one row for each summary function that was called. Character
#' strings in this matrix are escaped such that they are ready for Rmarkdown rendering.
#'
#' If \code{v} is a full dataset: A list of matrices as described above, one for each
#' variable in the dataset.
#'
#' @references Petersen AH, Ekstrøm CT (2019). “dataMaid: Your Assistant for Documenting Supervised Data Quality Screening in R.” _Journal of Statistical Software_, *90*(6), 1-38. doi: 10.18637/jss.v090.i06 (\url{https://doi.org/10.18637/jss.v090.i06}).
#'
#' @seealso \code{\link{setSummaries}},
#' \code{\link{summaryFunction}}, \code{\link{allSummaryFunctions}},
#' \code{\link{summaryResult}},
#' \code{\link{defaultCharacterSummaries}}, \code{\link{defaultFactorSummaries}},
#' \code{\link{defaultLabelledSummaries}}, \code{\link{defaultHavenlabelledSummaries}},
#' \code{\link{defaultNumericSummaries}}, \code{\link{defaultIntegerSummaries}},
#' \code{\link{defaultLogicalSummaries}}
#'
#' @examples
#' #Default summary for a character vector:
#' charV <- c("a", "b", "c", "a", "a", NA, "b", "0")
#' summarize(charV)
#'
#' #Inspect default character summary functions:
#' defaultCharacterSummaries()
#'
#' #Define a new summary function and add it to the summary for character vectors:
#' countZeros <- function(v, ...) {
#' res <- length(which(v == 0))
#' summaryResult(list(feature="No. zeros", result = res, value = res))
#' }
#' summarize(charV,
#' summaries = setSummaries(character = defaultCharacterSummaries(add = "countZeros")))
#'
#' #Does nothing, as intV is not affected by characterSummaries
#' intV <- c(0:10)
#' summarize(intV,
#' summaries = setSummaries(character = defaultCharacterSummaries(add = "countZeros")))
#'
#' #But supplying the argument for integer variables changes the summary:
#' summarize(intV, summaries = setSummaries(integer = "countZeros"))
#'
#' #Summarize a full dataset:
#' data(cars)
#' summarize(cars)
#'
#' #Summarize a variable and obtain report-style output (formatted for markdown)
#' summarize(charV, reportstyleOutput = TRUE)
#'
#' @export
summarize <- function(v, reportstyleOutput = FALSE, summaries = setSummaries(),
...) UseMethod("summarize")
#' Default summary functions for character variables
#'
#' @param remove Character vector of function names. Checks to remove from the returned vector
#'
#' @param add Character vector of function names. Checks to add to the returned vector
#'
#' @description Default options for which summaries to apply on
#' character type variables in \code{\link{check}} and \code{\link{makeDataReport}},
#' possibly user-modified by adding extra function names using \code{add} or
#' removing default function names with \code{remove}.
#'
#' @return A list of function names (as character strings).
#'
#' @seealso \code{\link{variableType}}, \code{\link{countMissing}}, \code{\link{uniqueValues}},
#' \code{\link{centralValue}}
#'
#' @examples
#' #remove "variableType" from the summaries:
#' defaultCharacterSummaries(remove = "variableType")
#'
#' @export
defaultCharacterSummaries <- function(remove = NULL, add = NULL) {
defVals <- c("variableType", "countMissing", "uniqueValues",
"centralValue")
unique(c(setdiff(defVals, remove), add))
}
#' Default summary functions for factor variables
#'
#' @param remove Character vector of function names. Checks to remove from the returned vector
#'
#' @param add Character vector of function names. Checks to add to the returned vector
#'
#' @description Default options for which summaries to apply on
#' factor type variables in \code{\link{check}} and \code{\link{makeDataReport}},
#' possibly user-modified by adding extra function names using \code{add} or
#' removing default function names with \code{remove}.
#'
#' @return A list of function names (as character strings).
#'
#' @seealso code{\link{variableType}}, \code{\link{countMissing}}, \code{\link{uniqueValues}},
#' \code{\link{centralValue}}
#'
#' @examples
#' #remove "countMissing" for the summaries:
#' defaultFactorSummaries(remove = "countMissing")
#'
#' @export
defaultFactorSummaries <- function(remove = NULL, add = NULL) {
defVals <- c("variableType", "countMissing", "uniqueValues",
"centralValue", "refCat")
unique(c(setdiff(defVals, remove), add))
}
#' Default summary functions for labelled variables
#'
#' @param remove Character vector of function names. Checks to remove from the returned vector
#'
#' @param add Character vector of function names. Checks to add to the returned vector
#'
#' @description Default options for which summaries to apply on
#' labelled type variables in \code{\link{check}} and \code{\link{makeDataReport}},
#' possibly user-modified by adding extra function names using \code{add} or
#' removing default function names with \code{remove}.
#'
#' @return A list of function names (as character strings).
#'
#' @seealso \code{\link{variableType}},
#' \code{\link{countMissing}}, \code{\link{uniqueValues}}, \code{\link{centralValue}}
#'
#' @examples
#' #remove "centralValue":
#' defaultLabelledSummaries(remove = "centralValue")
#'
#' @export
defaultLabelledSummaries <- function(remove = NULL, add = NULL) {
defVals <- c("variableType", "countMissing", "uniqueValues",
"centralValue")
unique(c(setdiff(defVals, remove), add))
}
#' Default summary functions for haven_labelled variables
#'
#' @param remove Character vector of function names. Checks to remove from the returned vector
#'
#' @param add Character vector of function names. Checks to add to the returned vector
#'
#' @description Default options for which summaries to apply on
#' haven_labelled type variables in \code{\link{check}} and \code{\link{makeDataReport}},
#' possibly user-modified by adding extra function names using \code{add} or
#' removing default function names with \code{remove}.
#'
#' @return A list of function names (as character strings).
#'
#' @seealso \code{\link{variableType}},
#' \code{\link{countMissing}}, \code{\link{uniqueValues}}, \code{\link{centralValue}}
#'
#' @examples
#' #remove "centralValue":
#' defaultHavenlabelledSummaries(remove = "centralValue")
#'
#' @export
defaultHavenlabelledSummaries <- function(remove = NULL, add = NULL) {
defaultLabelledSummaries(remove = remove, add = add)
}
#' Default summary functions for numeric variables
#'
#' @param remove Character vector of function names. Checks to remove from the returned vector
#'
#' @param add Character vector of function names. Checks to add to the returned vector
#'
#' @description Default options for which summaries to apply on
#' numeric type variables in \code{\link{check}} and \code{\link{makeDataReport}},
#' possibly user-modified by adding extra function names using \code{add} or
#' removing default function names with \code{remove}.
#'
#' @return A list of function names (as character strings).
#'
#' @seealso \code{\link{variableType}},
#' \code{\link{countMissing}}, \code{\link{uniqueValues}},
#' \code{\link{centralValue}}, \code{\link{quartiles}}, \code{\link{minMax}}
#'
#' @examples
#' #remove "uniqueValues":
#' defaultNumericSummaries(remove = "uniqueValues")
#'
#' @export
defaultNumericSummaries <- function(remove = NULL, add = NULL) {
defVals <- c("variableType", "countMissing", "uniqueValues",
"centralValue", "quartiles", "minMax")
unique(c(setdiff(defVals, remove), add))
}
#' Default summary functions for integer variables
#'
#' @param remove Character vector of function names. Checks to remove from the returned vector
#'
#' @param add Character vector of function names. Checks to add to the returned vector
#'
#' @description Default options for which summaries to apply on
#' integer type variables in \code{\link{check}} and \code{\link{makeDataReport}},
#' possibly user-modified by adding extra function names using \code{add} or
#' removing default function names with \code{remove}.
#'
#' @return A list of function names (as character strings).
#'
#' @seealso \code{\link{variableType}},
#' \code{\link{countMissing}}, \code{\link{uniqueValues}},
#' \code{\link{centralValue}}, \code{\link{quartiles}}, \code{\link{minMax}}
#'
#' @examples
#' #remove "countMissing":
#' defaultIntegerSummaries(remove = "countMissing")
#'
#' @export
defaultIntegerSummaries <- function(remove = NULL, add = NULL) {
defVals <- c("variableType", "countMissing", "uniqueValues",
"centralValue", "quartiles", "minMax")
unique(c(setdiff(defVals, remove), add))
}
#' Default summary functions for logical variables
#'
#' @param remove Character vector of function names. Checks to remove from the returned vector
#'
#' @param add Character vector of function names. Checks to add to the returned vector
#'
#' @description Default options for which summaries to apply on
#' logical type variables in \code{\link{check}} and \code{\link{makeDataReport}},
#' possibly user-modified by adding extra function names using \code{add} or
#' removing default function names with \code{remove}.
#'
#' @return A list of function names (as character strings).
#'
#' @seealso \code{\link{variableType}},
#' \code{\link{countMissing}}, \code{\link{uniqueValues}}, \code{\link{centralValue}}
#'
#' @examples
#' #remove "uniqueValues":
#' defaultLogicalSummaries(remove = "uniqueValues")
#'
#' @export
defaultLogicalSummaries <- function(remove = NULL, add = NULL) {
defVals <- c("variableType", "countMissing", "uniqueValues",
"centralValue")
unique(c(setdiff(defVals, remove), add))
}
#' Default summary functions for Date variables
#'
#' @param remove Character vector of function names. Checks to remove from the returned vector
#'
#' @param add Character vector of function names. Checks to add to the returned vector
#'
#' @description Default options for which summaries to apply on
#' Date type variables in \code{\link{check}} and \code{\link{makeDataReport}},
#' possibly user-modified by adding extra function names using \code{add} or
#' removing default function names with \code{remove}.
#'
#' @return A list of function names (as character strings).
#'
#' @seealso \code{\link{variableType}}, \code{\link{countMissing}}, \code{\link{uniqueValues}},
#' \code{\link{centralValue}}, \code{\link{minMax}}, \code{\link{quartiles}}
#'
#' @examples
#' defaultDateSummaries()
#'
#' @export
defaultDateSummaries <- function(remove = NULL, add = NULL) {
defVals <- c("variableType", "countMissing", "uniqueValues",
"centralValue", "minMax", "quartiles")
unique(c(setdiff(defVals, remove), add))
}
#methods for each data type
#Catch non-supported classes, do nothing and throw a warning:
#' @export
summarize.default <- function(v, reportstyleOutput = FALSE, summaries = setSummaries(),
...) {
vClass <- class(v)[1]
warning(paste("A variable of class", vClass,
"was supplied. This is not supported by dataMaid.",
"No summaries were made."))
res <- list(summaryResult(list(feature = "No summaries available", result = "-")))
if (reportstyleOutput) {
res <- sumMatGenerator(res)
} else {
names(res) <- "DataClassNotSupported"
}
res
}
#' @export
summarize.character <- function(v, reportstyleOutput = FALSE,
summaries = setSummaries(),
characterSummaries = NULL,
...) {
if (is.null(characterSummaries)) characterSummaries <- summaries$character
res <- lapply(characterSummaries, function(x) eval(call(x, v = v)))
if (reportstyleOutput) {
res <- sumMatGenerator(res)
} else {
names(res) <- characterSummaries
}
res
}
#' @export
summarize.factor <- function(v, reportstyleOutput = FALSE,
summaries = setSummaries(),
factorSummaries = NULL, ...) {
if (is.null(factorSummaries)) factorSummaries <- summaries$factor
res <- lapply(factorSummaries, function(x) eval(call(x, v = v)))
if (reportstyleOutput) {
res <- sumMatGenerator(res)
} else {
names(res) <- factorSummaries
}
res
}
#' @export
summarize.labelled <- function(v, reportstyleOutput = FALSE,
summaries = setSummaries(),
labelledSummaries = NULL,
...) {
if (is.null(labelledSummaries)) labelledSummaries <- summaries$labelled
res <- lapply(labelledSummaries, function(x) eval(call(x, v = v)))
if (reportstyleOutput) {
res <- sumMatGenerator(res)
} else {
names(res) <- labelledSummaries
}
res
}
#' @export
summarize.haven_labelled <- function(v, reportstyleOutput = FALSE,
summaries = setSummaries(),
havenlabelledSummaries = NULL,
...) {
if (is.null(havenlabelledSummaries)) havenlabelledSummaries <- summaries$haven_labelled
res <- lapply(havenlabelledSummaries, function(x) eval(call(x, v = v)))
if (reportstyleOutput) {
res <- sumMatGenerator(res)
} else {
names(res) <- havenlabelledSummaries
}
res
}
#' @export
summarize.numeric <- function(v, reportstyleOutput = FALSE,
summaries = setSummaries(),
numericSummaries = NULL,
maxDecimals = 2, ...) {
if (is.null(numericSummaries)) numericSummaries <- summaries$numeric
res <- lapply(numericSummaries, function(x) eval(call(x, v = v)))
if (reportstyleOutput) {
res <- sumMatGenerator(res, maxDecimals = maxDecimals)
} else {
names(res) <- numericSummaries
}
res
}
#' @export
summarize.integer <- function(v, reportstyleOutput = FALSE,
summaries = setSummaries(),
integerSummaries = NULL,
maxDecimals = 2, ...) {
if (is.null(integerSummaries)) integerSummaries <- summaries$integer
res <- lapply(integerSummaries, function(x) eval(call(x, v = v)))
if (reportstyleOutput) {
res <- sumMatGenerator(res, maxDecimals = maxDecimals)
} else {
names(res) <- integerSummaries
}
res
}
#' @export
summarize.logical <- function(v, reportstyleOutput = FALSE,
summaries = setSummaries(),
logicalSummaries = NULL, ...) {
if (is.null(logicalSummaries)) logicalSummaries <- summaries$logical
res <- lapply(logicalSummaries, function(x) eval(call(x, v = v)))
if (reportstyleOutput) {
res <- sumMatGenerator(res)
} else {
names(res) <- logicalSummaries
}
res
}
#' @export
summarize.Date <- function(v, reportstyleOutput = FALSE,
summaries = setSummaries(),
dateSummaries = NULL,
maxDecimals = 0, ...) {
if (is.null(dateSummaries)) dateSummaries <- summaries$Date
res <- lapply(dateSummaries, function(x) eval(call(x, v = v)))
if (reportstyleOutput) {
res <- sumMatGenerator(res, maxDecimals = maxDecimals)
} else {
names(res) <- dateSummaries
}
res
}
#' @export
summarize.data.frame <- function(v, reportstyleOutput = FALSE,
summaries = setSummaries(), ...) {
lapply(v, summarize, reportstyleOutput = reportstyleOutput,
summaries = summaries, ...)
}
##########################################Not exported below#########################################
#produces the output matrix from a summarize call. Use internally only
sumMatGenerator <- function(resList, maxDecimals = NULL) {
nFunctions <- length(resList)
outMat <- matrix(NA, nFunctions, 2,
dimnames=list(NULL, c("Feature", "Result")))
for (i in 1:nFunctions) {
outMat[i, "Feature"] <- resList[[i]]$feature
outMat[i, "Result"] <- resList[[i]]$result
}
outMat
}
|
592ee86ee5ff7864bc4f1c2b18c7054bd14ed7f8
|
4a352d4507772e6cffedd5de126a4b4dcaaa59e0
|
/hampl.R
|
7c27a527d141040ae1efcb43a2e616d66234ba8c
|
[] |
no_license
|
hpsprecher/PT
|
fee05d2dde833c0ec9d549efe11a073ce57cc293
|
cda42d0386bb256f7b410142f7a9d4ed7f34e426
|
refs/heads/master
| 2020-03-29T00:29:06.614170
| 2018-10-09T17:43:16
| 2018-10-09T17:43:16
| 149,341,564
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 926
|
r
|
hampl.R
|
hampl<- function(x, maxiter= 25, s_method="MAD", na.rm=TRUE) {
if(na.rm==TRUE)
x<-na.omit(x)
x_star<- median(x)
if(s_method=="MAD")
s_star<- mad(x)
if(s_method=="nIQR")
s_star<- 0.7413* IQR(x)
if(s_method=="Qn")
{
library(robustbase)
s_star<- Qn(x)}
dif<- max(x)-min(x)
iter<- 0
while ( dif >=s_star*0.01/sqrt(length(x)) && iter < maxiter) {
iter<- iter+1
q_i<- abs((x-x_star)/s_star)
w_4.5<- which(q_i>4.5)
w_3<- which(4.5>=q_i & q_i>3)
w_1.5<- which(3>=q_i & q_i>1.5)
w_0<- which(1.5>=q_i)
w_i<- q_i
w_i[w_4.5]<- q_i[w_4.5]*0
w_i[w_3]<- (4.5-q_i[w_3])/q_i[w_3]
w_i[w_1.5]<- (1.5/q_i[w_1.5])
w_i[w_0]<- 1
x_star_new<-sum(w_i*x)/sum(w_i)
dif<- abs(x_star_new-x_star)
x_star<- x_star_new}
if (iter >= maxiter)
warning("Maximum iterations reached; Hampel estimator may not have converged")
return(list(mu= x_star, s= s_star, iterations=iter))
}
|
0029c89cac84d12e081ddf6daa3eba5fdec2c3ca
|
a938cac574809e32fe405378eebad102fe8cfeb2
|
/cachematrix.R
|
a838104815541c39c234fd94341b7cd4c750417b
|
[] |
no_license
|
Steven217/ProgrammingAssignment2
|
80497dbc70afe33c19a1e8082f30b358e63ef446
|
c5fba4d47ed6a855f7a54c8e5d301f45ed410099
|
refs/heads/master
| 2021-01-19T13:06:15.338351
| 2017-08-19T21:28:01
| 2017-08-19T21:28:01
| 100,811,039
| 0
| 0
| null | 2017-08-19T17:40:21
| 2017-08-19T17:40:20
| null |
UTF-8
|
R
| false
| false
| 1,762
|
r
|
cachematrix.R
|
## The functions below can be used to create a special "matrix" object that can
## cache the inverse of a matrix rather than compute it repeatedly
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
InverseMatrix <- NULL
# store the new matrix in cache (x) and delete the old inverse matrix
set <- function(y) {
x <<- y
InverseMatrix <<- NULL
}
# first (get): get the matrix in cache
# second (setInverseMatrix): store the inverse matrix in cache
# third (getInverseMatrix): get the cached inverse matrix
# fourth (list): store the four functions in a list
get <- function() x
setInverseMatrix <- function(solve) InverseMatrix <<- solve
getInverseMatrix <- function() InverseMatrix
list(set = set,
get = get,
setInverseMatrix = setInverseMatrix,
getInverseMatrix = getInverseMatrix)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed), then the
## cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
# get the inverse matrix of x
InverseMatrix <- x$getInverseMatrix()
# check whether there is a cached matrix available
if(!is.null(InverseMatrix)) {
message("getting cached matrix")
return(InverseMatrix)
}
# if there is no cached matrix available, here we create the inverse matrix with the solve function
# and set the new inverse matrix in cache
data <- x$get()
InverseMatrix <- solve(data, ...)
x$setInverseMatrix(InverseMatrix)
InverseMatrix
}
|
291f42e6232085725883ee5d8af3a224150a1ff1
|
1b4ec904d9267f9a06cc4bd576d6d6dda0767e4e
|
/man/get_article_location_data.Rd
|
33e08cd5fcff450f8d684413dec7d0d6e256e590
|
[] |
no_license
|
rOpenHealth/rpubmed
|
a455c8c5535c407e76214dec0bcafc9025efef6e
|
840315df5dbc3de7f3ed069906b5d767e13d3a1e
|
refs/heads/master
| 2021-01-23T12:48:30.056889
| 2017-05-08T13:49:01
| 2017-05-08T13:49:01
| 7,855,530
| 30
| 18
| null | 2022-01-04T15:28:18
| 2013-01-27T17:46:10
|
R
|
UTF-8
|
R
| false
| false
| 602
|
rd
|
get_article_location_data.Rd
|
\name{get_article_location_data}
\alias{get_article_location_data}
\title{Extracts addresses of affiliated departments from Pubmed metadata
email addresses are cleaned out.}
\usage{
get_article_location_data(abstracts)
}
\arguments{
\item{abstracts}{A list of Pubmed records. e.g. from
fetch_in_chunks()}
}
\value{
character vector of addresses
}
\description{
Extracts addresses of affiliated departments from Pubmed
metadata email addresses are cleaned out.
}
\examples{
\dontrun{
# Extract affiliated addresses from article metadata:
affil_addresses <- get_article_location_data(abstracts)
}
}
|
2f52a133a2cbcf63ac228db970b023ef24156d62
|
9d13550ab15bee71e95326a1513d4b62c62197b0
|
/man/trim.Rd
|
3f88e05aedbcb7dd90140e879606610d9be766fb
|
[] |
no_license
|
cran/genoPlotR
|
8612232d2992605511e71b74bced5378841f66d0
|
3887f91ed718b7df935c11d1a84e9276f4f6b01c
|
refs/heads/master
| 2021-07-21T15:01:58.200076
| 2021-01-07T14:00:02
| 2021-01-07T14:00:02
| 17,696,331
| 4
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,995
|
rd
|
trim.Rd
|
\name{trim}
\alias{trim}
\alias{trim.default}
\alias{trim.dna_seg}
\alias{trim.comparison}
\alias{trim.annotation}
\alias{trim.seg_plot}
\title{
Trimming data frames or more complex objects with >= 2 numeric columns
}
\description{
Trims data frames with 2 or more numeric columns using a
xlim. \code{xlim}(s) are as used to filter rows whose numeric values are
included in this interval.
}
\usage{
trim(x, ...)
\method{trim}{default}(x, xlim = NULL, ...)
\method{trim}{dna_seg}(x, xlim = NULL, ...)
\method{trim}{comparison}(x, xlim1 = c(-Inf, Inf), xlim2 = c(-Inf, Inf), ...)
\method{trim}{annotation}(x, xlim = NULL, ...)
\method{trim}{seg_plot}(x, xlim = NULL, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
An object to trim,. generally a data frame or a matrix, or a
\code{seg_plot} object.
}
\item{xlim}{
A numeric of length 2. In a general case, the rows whose values are
included in this interval are returned.
}
\item{\dots}{
Unused.
}
\item{xlim1}{
A numeric of length 2. In the case of comparison, where the
comparison can be filtered on two sides, the interval to filter the
first side.
}
\item{xlim2}{
A numeric of length 2. The interval to filter the second side.
}
}
\details{
In the case where \code{x} is a \code{seg_plot} object, the function
uses the \code{xargs} argument to define what are the vectors defining
the x position (they should be the same length). Then, all the
arguments (including those inside an eventual \code{gp} argument) that
are the same length as the x vectors are trimmed, so that only the
rows for which the x values are inside the \code{xlim} argument are kept.
}
\value{
Returns the same object as input, with the rows (or subset)
corresponding to the given interval.
}
\author{
Lionel Guy
}
\seealso{
\code{\link{dna_seg}}, \code{\link{comparison}}, \code{\link{seg_plot}}.
}
\examples{
## Load
data(barto)
xlim_ref <- c(10000, 45000)
## Seg 2 (ref)
barto$dna_segs[[2]] <- trim(barto$dna_segs[[2]], xlim=xlim_ref)
## Seg 1
barto$comparisons[[1]] <- trim(barto$comparisons[[1]], xlim2=xlim_ref)
xlim1 <- range(barto$comparisons[[1]], overall=FALSE)$xlim1
barto$dna_segs[[1]] <- trim(barto$dna_segs[[1]], xlim=xlim1)
## Seg 3
barto$comparisons[[2]] <- trim(barto$comparisons[[2]], xlim1=xlim_ref)
xlim3 <- range(barto$comparisons[[2]], overall=FALSE)$xlim2
barto$dna_segs[[3]] <- trim(barto$dna_segs[[3]], xlim=xlim3)
## Seg 4
barto$comparisons[[3]] <- trim(barto$comparisons[[3]], xlim1=xlim3)
xlim4 <- range(barto$comparisons[[3]], overall=FALSE)$xlim2
barto$dna_segs[[4]] <- trim(barto$dna_segs[[4]], xlim=xlim4)
## Plot
plot_gene_map(barto$dna_segs, barto$comparisons)
## With seg_plot
x <- 1:20
y <- rnorm(20)
sp <- seg_plot(func=pointsGrob, args=list(x=x, y=y,
gp=gpar(col=1:20, cex=1:3)))
## Trim
sp_trim <- trim(sp, c(3, 10))
str(sp_trim)
range(sp_trim$arg$x)
}
\keyword{ data }
|
3b85450044dd3df67731c54a9100ce1cd79f68c5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pbdNCDF4/examples/ncvar_def.Rd.R
|
70716e35f9d9db9f98459605c9132f6cc026ed72
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,288
|
r
|
ncvar_def.Rd.R
|
library(pbdNCDF4)
### Name: ncvar_def
### Title: Define a netCDF Variable
### Aliases: ncvar_def
### Keywords: utilities
### ** Examples
## Not run:
##D # Define an integer dimension
##D dimState <- ncdim_def( "StateNo", "count", 1:50 )
##D
##D # Make an integer variable. Note that an integer variable can have
##D # a double precision dimension, or vice versa; there is no fixed
##D # relationship between the precision of the dimension and that of the
##D # associated variable. We just make an integer variable here for
##D # illustration purposes.
##D varPop <- ncvar_def("Pop", "count", dimState, -1,
##D longname="Population", prec="integer")
##D
##D # Create a netCDF file with this variable
##D ncnew <- nc_create( "states_population.nc", varPop )
##D
##D # Write some values to this variable on disk.
##D popAlabama <- 4447100
##D ncvar_put( ncnew, varPop, popAlabama, start=1, count=1 )
##D
##D # Add source info metadata to file
##D ncatt_put( ncnew, 0, "source", "Census 2000 from census bureau web site")
##D
##D nc_close(ncnew)
##D
##D # Now illustrate some manipulations of the var.ncdf object
##D filename <- "states_population.nc"
##D nc <- nc_open(filename)
##D print(paste("File",nc$filename,"contains",nc$nvars,"variables"))
##D for( i in 1:nc$nvars ) {
##D v <- nc$var[[i]]
##D print(paste("Here is information on variable number",i))
##D print(paste(" Name: ",v$name))
##D print(paste(" Units:",v$units))
##D print(paste(" Missing value:",v$missval))
##D print(paste(" # dimensions :",v$ndims))
##D print(paste(" Variable size:",v$varsize))
##D }
##D
##D # Illustrate creating variables of various types. You will find
##D # that the type of the missing_value attribute automatically follows
##D # the type of the variable.
##D dimt <- ncdim_def( "Time", "days", 1:3 )
##D missval <- -1
##D varShort <- ncvar_def( "varShort", "meters", dimt, missval, prec="short")
##D varInt <- ncvar_def( "varInt", "meters", dimt, missval, prec="integer")
##D varFloat <- ncvar_def( "varFloat", "meters", dimt, missval, prec="single")
##D varDouble<- ncvar_def( "varDouble","meters", dimt, missval, prec="double")
##D nctypes <- nc_create("vartypes.nc", list(varShort,varInt,varFloat,varDouble) )
##D nc_close(nctypes)
## End(Not run)
|
86f3a203f8205b852a5fb7d1fe8cad56069c55b1
|
99226b385e059d258074aba150c62a30f3ec5040
|
/getNearestSegmentParallel.R
|
6b5aeacb105f6d60e0882c111b3112817dc7fc6c
|
[] |
no_license
|
jzwart/2wp-temp-observations
|
368e9599123f165b9187faa42d03b83c3cf47ff0
|
3c5ecc7e27b48d148243f0de32d8015ae91898cd
|
refs/heads/master
| 2020-07-09T19:17:10.141125
| 2019-10-15T18:47:13
| 2019-10-15T18:47:13
| 204,059,749
| 0
| 0
| null | 2019-08-23T19:39:18
| 2019-08-23T19:39:17
| null |
UTF-8
|
R
| false
| false
| 4,090
|
r
|
getNearestSegmentParallel.R
|
library(scipiper)
library(feather)
library(rgdal)
library(geosphere)
library(dplyr)
library(raster)
library(sf)
library(googledrive)
library(parallel)
#' @param sites the file path for an inventory file with MonitoringLocationIdentifier, latitude, and longitude at a minimum
#' @param reaches the file path to a shapefile or gdb with a reach identifier
#' @param layerName the name of the gdb layer
#' @param isGDB boolean TRUE if the reaches file is a gdb, FALSE if it is a shapefile
#' reaches <- "/home/megan/Documents/2wp-temp-observations/delaware_segments.shp"
#' sites <- "1_wqp_pull/inout/wqp_inventory.feather.ind"
getNearestSegment <- function(sites, reaches, layerName=NULL, isGDB=FALSE){
## read in site data
featherPath <- gd_get(sites)
sitesToMatch <- read_feather(featherPath, columns = NULL)
## read in reach data
if(isGDB) {
reachLayer <- readOGR(dsn=reaches,layer=layerName)
} else {
reachLayer <- readOGR(reaches)
}
## make points spatial so we can subset them by the reach layer
projection <- "+proj=longlat +datum=WGS84"
siteCoords <- dplyr::select(sitesToMatch, longitude, latitude) %>%
sp::SpatialPoints(proj4string = CRS("+proj=longlat +datum=WGS84")) %>%
sp::spTransform(CRS(projection))
siteCoordsSPDF <- SpatialPointsDataFrame(siteCoords, proj4string = projection, data=sitesToMatch)
## project reach layer to wgs84
reachLayer <- spTransform(reachLayer, CRS("+proj=longlat +datum=WGS84"))
## change reachLayer ID to be the reachId
for (i in 1:nrow(reachLayer)) {
reachLayer@lines[[i]]@ID <- as.character(reachLayer[i, ]@data[[1]])
}
## create a bbox around the reaches
reachBbox <- st_bbox(reachLayer)
reachPoly <- as(raster::extent(reachBbox[[1]],reachBbox[[3]],reachBbox[[2]],reachBbox[[4]]), "SpatialPolygons")
## project poly layer to wgs84
crs(reachPoly) <- "+proj=longlat +datum=WGS84"
## geospatially subset points by reachPolygon
points_subset <- siteCoordsSPDF[reachPoly,]
## subset to keep only data we need
drops <- c("OrganizationIdentifier","MonitoringLocationIdentifier", "ResolvedMonitoringLocationTypeName", "StateName", "CountyName", "HUCEightDigitCode", "resultCount")
init_coords <- points_subset[,!(names(points_subset) %in% drops)]
## for testing only
init_coords <- init_coords[sample(nrow(init_coords), 200), ]
## parallel set up
no_cores <- detectCores() - 1
n <- 100
parts <- split(1:nrow(init_coords), cut(1:nrow(init_coords), n))
cl <- makeCluster(no_cores, type = "FORK")
print(cl)
system.time(distParts <- parLapply(cl = cl,
X = 1:n,
fun = function(x) {
points.sp <- init_coords[parts[[x]],]
points.sp$system.index <- init_coords$system.index[parts[[x]]]
dist <- geosphere::dist2Line(p = points.sp, line = reachLayer)
# Convert dist to data.frame
dist.df <- as.data.frame(dist)
dist.df$site.lat <- init_coords@data$latitude[parts[[x]]]
dist.df$site.lon <- init_coords@data$longitude[parts[[x]]]
dist.df$reachId <- reachLayer@data$seg_id_nat[parts[[x]]]
colnames(dist.df) <- c("distance", "lon", "lat", "ID", "site.lat", "site.lon", "reachId")
gc(verbose = FALSE) # free memory
return(dist.df)
}))
stopCluster(cl)
distBind <- do.call("rbind", distParts)
## get the monitoring Location Identifier from the site coords
distBind <- dplyr::left_join(distBind, sitesToMatch, by = c("site.lat" = "latitude", "site.lon" = "longitude"))
## remove unnecessary columns
distBind <- dplyr::select(distBind, MonitoringLocationIdentifier, reachId)
return(distBind)
}
|
cca4c8e9f476d9557c8ed7d39f59898e7d7ee009
|
ab1a3abc8788e25b4242d49b50cd6d954b99553f
|
/午後002.R
|
27c768ed6113f0b100041e42a4bc466a5995633b
|
[] |
no_license
|
tateishih/2021R-practice
|
cfaf0249e012d6364b3361613cae1388903ed6a2
|
7aea63df7eda6f9f0ad8705fe08d26203a251dfd
|
refs/heads/main
| 2023-06-11T14:17:04.156910
| 2021-06-29T08:52:55
| 2021-06-29T08:52:55
| 381,294,756
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,738
|
r
|
午後002.R
|
# 2021 R 研修会
# データロガーの読み込みと作図
# Hiroto Tateishi
# 2021 / 06 / 29
##################################################################
# Load packages
library(tidyverse)
library(lubridate) #時刻
library(gnnlab) #lab
library(magick)
library(showtext)
library(readxl) #Excel読み込みパッケージ
library(patchwork)
# Plotの設定
color = viridis::viridis(6)
font_add_google("Noto Sans JP", "notosans-jp")
font_add_google("Noto Sans", "notosans")
showtext_auto()
# Read data file
# dataのフォルダー
folder = "~/Lab_Data/kawatea/"
# ファイル名とパス
mnames = str_glue("{folder}Microstation") |> dir(full = T)
onames = str_glue("{folder}Oxygen") |> dir(full = T)
# 2021/04のデータの存在を確認する
str_subset(mnames, "_2104")
str_subset(onames, "_2104")
# Microstation 2021 April を読み込む
micro = str_subset(mnames, "_2104") |> read_onset()
# Oxygenを読み込む
oxygen =
tibble(onames) |>
filter(str_detect(onames,"2104")) |>
mutate(data = map(onames, read_onset))
# それぞれのonamesにread_onsetの処理を行う
oxygen =
oxygen |>
mutate(onames = basename(onames)) |>
# basenameでpath(どこから引っ張っているか)を削除
separate(onames,
c("logger",
"ID",
"location",
"position",
"survey",
"extension")) |>
select(-logger, -survey, -extension)
oxygen =
oxygen |>
mutate(location = ifelse(str_detect(location,"amamo"),
"Zostera","Sargassum"))
oxygen = oxygen |> unnest(data)
# data の確認
oxygen |> slice(1:3)
micro |> slice(1:3)
micro =
micro |>
mutate(datetime = floor_date(datetime, "min"))
dall = full_join(oxygen,
micro,
by = "datetime")
# inner_join は合致する部分だけ出す
s0 = as_date("2021/04/24")
s1 = as_date("2021/05/18")
interval(s0, s1)
dall = dall |> filter(datetime %within% interval(s0, s1))
dall |>
ggplot() +
geom_line(aes(x = datetime,
y = temperature,
color = position)) +
facet_grid(rows = vars(location))
dall |>
ggplot() +
geom_line(aes(x = datetime,
y = mgl,
color = position)) +
facet_grid(rows = vars(location))
dall |>
ggplot() +
geom_point(aes(x = ppfd,
y = temperature,
color = position)) +
facet_grid(rows = vars(location))
dsum = dall |>
mutate(date = as_date(datetime), # datetimeをdateに変換
.before = "location") |>
# locationの前にdateをもってくる
group_by(location, position, date) |>
summarise(across(c(mgl, temperature,
ppfd, wind),
list(mean = mean,
sd = sd,
median = median))) # それぞれをリストに追加
# mean = mean : 左は列名、右はその列に何をさせるか
ggplot(dsum) +
geom_point(aes(x = ppfd_mean,
y = mgl_mean)) +
facet_grid(rows = vars(location),
cols = vars(position))
dall |>
mutate(date = as_date(datetime),
.before = "location") |>
group_by(location, position, date) |>
mutate(rate = mgl - lag(mgl), .before = "mgl") |>
# lag:前にデータを1個ずらす / lead:後ろにデータを1個ずらす
select(location, position, date,
rate, ppfd) |>
drop_na() |>
summarise(across(c(rate, ppfd),
list(mean = mean,
sd = sd))) |>
ggplot() +
geom_point(aes(x = ppfd_mean, y = rate_mean)) +
scale_x_continuous("PPFD") +
scale_y_continuous("RATE") +
facet_grid(rows = vars(location),
cols = vars(position))
|
c4ed515330f5a11cae7edcc7ec20694eda5558fa
|
a037bb09139c03c9f74c97a4c99c79e5f8726b25
|
/QB Mixed Effect Model.R
|
de244bc38655bac8135bac02b2d24ea042686e01
|
[] |
no_license
|
CFBNumbers/College-QB-Mixed-Model
|
ab584fd4bc804112f1873c4704aaf239eff7df84
|
d9a4668a5b29d5c93fbb0cde3b2b36dd0e109c6c
|
refs/heads/main
| 2023-03-03T02:50:34.779298
| 2021-02-09T22:51:27
| 2021-02-09T22:51:27
| 337,552,952
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,347
|
r
|
QB Mixed Effect Model.R
|
library(tidyverse)
library(dplyr)
library(cfbscrapR)
library(lme4)
seasons <- 2014:2020
pbp <- purrr::map_df(seasons, function(x) {
readRDS(
url(
glue::glue("https://raw.githubusercontent.com/saiemgilani/cfbscrapR-data/master/data/rds/pbp_players_pos_{x}.rds")
)
)
})
pbp <- pbp_2020 %>%
mutate(passer = ifelse(pass == 1,
ifelse(str_detect(play_text, "from"),
str_extract(play_text, "(?<= from )(.{0,30})(?= \\()"),
str_extract(play_text, "(.{0,30})(?= pass| sacked)")), NA))
pbp <- pbp_2020 %>%
mutate(
rush_player = ifelse(.data$rush == 1,
str_extract(.data$play_text, "(.{0,25} )run |(.{0,25} )\\d{0,2} Yd Run"), NA),
rush_player = str_remove(.data$rush_player, " run | \\d+ Yd Run"))
sp <- data.frame()
for(i in 2014:2020){
data <- cfb_ratings_sp(year = i) %>% mutate(year = i)
df <- data.frame(data)
sp <- bind_rows(sp, df)
}
sp$defense_rating <- as.numeric(sp$defense_rating)
sp <- sp %>% select(year, team, defense_rating)
pbp <- pbp %>% left_join(sp, by = c("year" = "year",
"defense_play" = "team"))
#Most of the mixed effect model code is from @adrian_cadem. Changes were made for college football
dataraw <- pbp %>%
filter(pass == 1 | rush == 1,
penalty_flag == FALSE,
!is.na(down),
!is.na(yards_to_goal)) %>%
mutate(in_red_zone = if_else(yards_to_goal <= 20, 1, 0),
in_fg_range = if_else(yards_to_goal <= 35, 1, 0))
prev_play <- dataraw %>%
group_by(game_id, offense_play) %>%
mutate(
total_runs = if_else(rush == 1,
cumsum(rush) - 1, cumsum(rush)
),
total_pass = if_else(pass == 1,
cumsum(pass) - 1, cumsum(pass)
),
previous_play = if_else(offense_play == dplyr::lag(offense_play),
dplyr::lag(play_type), "First play of Drive"
),
previous_play = if_else(is.na(previous_play),
replace_na("First play of Drive"), previous_play
)
)
prev_play <- unite(prev_play, name, c(passer,rush_player), remove = FALSE, na.rm = TRUE)
prev_play <- prev_play %>% filter(name != "")
data_filt <- prev_play %>%
filter(!is_na(EPA),
!is.na(down),
!is_na(name))
epa_data<- data_filt %>%
select(season, game_id, id_play, EPA, name, offense_play, defense_play,
pass, yards_to_goal, yards_gained, down, distance, previous_play,
TimeSecsRem, Under_two, scoring_opp, rz_play, wp_before, period, def_EPA,
middle_8, defense_conference, defense_rating, home, away, score_diff) %>%
mutate(
down = as.factor(down),
t=paste(season,game_id,id_play,sep=''),
posid = paste(home,away,as.character(season),offense_play,sep=''),
defid = paste(home,away,as.character(season),defense_play,sep=''),
t = paste(season,game_id,id_play),
log_ydstogo = log(distance),
t = paste(id_play,game_id,season),
converted = if_else(yards_gained - distance>0,1,0),
prev_play_run = if_else(previous_play=='run',1,0),
first_play_drive = if_else(previous_play=='First play of Drive',1,0)
)
plays_qb <- epa_data %>%
group_by(name) %>%
summarise(
num_plays = n()
);colnames(plays_qb)[1] <- 'level'
#Fixes a couple Zach Wilson plays
epa_data <- epa_data %>%
mutate(name = case_when(
name == "Zach. Wilson" ~ "Zach Wilson",
TRUE ~ name),
pos_home = ifelse(home == offense_play, 1, 0))
mixed<-lmer(EPA ~
yards_to_goal +
distance +
down +
distance*down +
TimeSecsRem +
period +
score_diff +
defense_conference +
defense_rating +
pos_home +
Under_two +
scoring_opp +
rz_play +
wp_before +
middle_8 +
previous_play +
(1|name),
data=epa_data)
tt<-broom.mixed::tidy(mixed,effects="ran_vals") %>%
merge(plays_qb,by='level',all.x = TRUE,no.dups=TRUE) %>%
filter(num_plays >300) %>%
mutate(
QB = as.factor(level),
t_stat = estimate/std.error
)
#If you want to filter down to top 10 statistically significant QBs
tt<-tt[order(-tt$estimate),]
tt<-head(tt,n=10)
z <- 1.282 #This is the 90% Z Score
tt<-tt %>% filter(
abs(t_stat) > z
)
#This section is entirely data viz for the 2016-2020 QB draft prospects
#Yes I know this is a bad way to do this but...... it worked lol
tt <- tt %>% filter(level %in% c("Jared Goff", "Dak Prescott",
"Deshaun Watson", "Patrick Mahomes",
"Mitch Trubisky", "Baker Mayfield",
"Josh Rosen", "Lamar Jackson", "Sam Darnold",
"Kyler Murray", "Drew Lock", "Dwayne Haskins",
"Daniel Jones", "Jalen Hurts", "Tua Tagovailoa",
"Justin Herbert", "Joe Burrow",
"Justin Fields", "Trevor Lawrence", "Zach Wilson",
"Mac Jones", "Josh Allen"))
tt<-tt[order(-tt$estimate),]
tt<-head(tt,n=10)
z <- 1.282
teams <- epa_data %>% ungroup() %>% select(name, offense_play) %>%
filter(name %in% c("Jared Goff", "Dak Prescott",
"Deshaun Watson", "Patrick Mahomes",
"Mitch Trubisky", "Baker Mayfield",
"Josh Rosen", "Lamar Jackson", "Sam Darnold",
"Kyler Murray", "Drew Lock", "Dwayne Haskins",
"Daniel Jones", "Jalen Hurts", "Tua Tagovailoa",
"Justin Herbert", "Joe Burrow",
"Justin Fields", "Trevor Lawrence", "Zach Wilson",
"Mac Jones", "Josh Allen"),
offense_play != "Navy") %>% distinct() %>% arrange(name)
#This gets rid of previous transfer locations (Yes, Georiga went with Jake Fromm)
teams <- teams[-c(7,10, 14, 17), ]
tt <- tt %>% left_join(teams, by = c("QB" = "name"))
cfblogos <- read.csv("https://raw.githubusercontent.com/spfleming/CFB/master/logos.csv")
tt <- tt %>% left_join(cfblogos, by = c("offense_play" = "school"))
tt<-tt[order(tt$estimate),]
tt %>%
ggplot(aes(x=factor(QB, level = QB),estimate)) +
geom_point()+
geom_pointrange(aes(ymin=estimate - z*std.error,
ymax=estimate + z*std.error),
colour = tt$color)+
coord_flip() +
labs(y = "Random Effects Estimate | EPA/Play",
x = "Quarterback",
subtitle = "Data: 2014-2020, Passes & Rushes",
caption = "Model Code: @adrian_cadem | Figure: @CFBNumbers | Data: @CFB_Data with #cfbscrapR",
title = "QB Rankings Via Mixed Effects Model") +
theme_minimal() +
theme(
axis.text = element_text(size = 10),
axis.title.x = element_text(size = 12),
axis.title.y = element_text(size = 12),
strip.text = element_text(size = 10),
plot.title = element_text(size = 12, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 9, hjust = 0.5),
plot.caption = element_text(size = 8, hjust = 0.5))
ggsave("MixedEffectsNew.png", dpi = 600)
|
e36422a0621caa1878e56dbc328a1f8f2998ac02
|
9de3b2b8b28f89cfb13723b6be99f157fc13a313
|
/3_Analysis/1_Social/1_Experimental_analyses/Synergies_tradeoffs/Scripts for reference/Gov_social_data.R
|
6cb2b9260da7bef156fd8b0bfb6377f32e53fd97
|
[] |
no_license
|
WWF-ConsEvidence/MPAMystery
|
0e730dd4d0e39e6c44b36d5f9244a0bfa0ba319b
|
6201c07950206a4eb92531ff5ebb9a30c4ec2de9
|
refs/heads/master
| 2023-06-22T04:39:12.209784
| 2021-07-20T17:53:51
| 2021-07-20T19:34:34
| 84,862,221
| 8
| 1
| null | 2019-07-24T08:21:16
| 2017-03-13T18:43:30
|
R
|
UTF-8
|
R
| false
| false
| 30,891
|
r
|
Gov_social_data.R
|
#install.packages('pacman')
pacman::p_load(rio,skimr,cowplot,corrgram,corrr,psych,tidyverse)
today.date <- gsub("-","",Sys.Date())
last.file <- function(dir.nam,nam){
import(paste0(dir.nam,last(sort(grep(nam,list.files(dir.nam), value=T)))))
}
#--Create path to folder that holds multiple .csv files
wd <- getwd()
dropboxdir <- ifelse(wd == "/Users/kendallmaryjefferys/Desktop/gov_analysis3", gsub("Desktop.*$","",wd),gsub("Dropbox.*$","",getwd()))
input.dir <- paste0(dropboxdir,"/Dropbox/Governance analysis/tables/")
list.files(input.dir)
#import data
gov.data <- last.file(input.dir, "_WWF.Governance.Data.Analyzed.V4.csv")
soc.data <- last.file(input.dir, "sett_avg_hh_data.csv")
settl.data <- last.file(input.dir, "HH_tbl_SETTLEMENT_20200316.xls")
impact.data <- last.file(input.dir, "settlevel_impacts_1-3match_20201218.csv")
sett.imp.data <- last.file(input.dir, "Le_HH_data.csv")
soc.imp.data <- last.file(input.dir, "sett_avg_hh_data.csv")
#####################################################################
## gov and soc data
####################################################################
soc.data <- soc.data %>%
rename("SiteCode" = "MPAID",
"Year" = "MonitoringYear"
) %>%
group_by(SiteCode) %>%
mutate(monitor.yr = ifelse(Year==min(Year),"baseline","post"),
wealthIndex.pca=-PovertyIndex_pca)
gov.data <- gov.data %>%
left_join(select(settl.data,SettlementID,Treatment), by="SettlementID") %>%
group_by(SiteCode) %>%
mutate(monitor.yr = ifelse(Year==min(Year),"baseline","post"))
# full.data <- gov.data %>%
# full_join(soc.data, by = c("SettlementID", "SiteCode", "Year")) %>%
# group_by(SiteCode) %>%
# mutate(monitor.yr = ifelse(Year==min(Year),"baseline","post"))
#check for duplicates
#n_distinct(paste0(full.data$SettlementID,"_",full.data$SiteCode,"_",full.data$Year))==nrow(full.data)
#summary(full.data)
###################################################################
# MPA Post Data frames
##################################################################
## filter impact data
final.impact.data <- impact.data %>%
filter(term=="Impact") %>%
group_by(SettlementID, Response) %>%
summarise(estimate=mean(estimate, na.rm=T),
std.error=mean(std.error, na.rm=T),
statistic=mean(statistic, na.rm=T),
p.value=mean(p.value,na.rm=T))
summary(final.impact.data)
###So that MPA ID can be added into full data frame later
MPA.ID <- impact.data %>%
group_by(SettlementID) %>%
summarise(MPAID=median(MPAID))
## Using the full joined gov and soc data
#mpa.post.data <- filter(full.data, monitor.yr=="post", Treatment==1) %>%
# ungroup()
## Using just the governance data, post MPA
gov.mpa.post.data <- gov.data %>%
filter(monitor.yr=="post" & Treatment==1) %>%
ungroup()
## Using just social data baseline MPA
## filter to baseline social data in MPA
soc.mpa.data <- soc.data %>%
filter(monitor.yr=="baseline" & Treatment==1) %>%
ungroup() %>%
##Create community cohesion variable by taking max % participation
mutate(group.part = pmax(MarineGroup,OtherGroup,na.rm = T))
soc.mpa.data %>%
select(MarineGroup,OtherGroup,group.part) %>% head()
############################################################
#SKIM DATA
############################################################
###Skim data set to look at distributions of governance and social variables
full.data %>%
dplyr::group_by(SiteCode) %>%
select(boundaries.defined.FGD_MPA) %>%
skim()
full.data %>%
dplyr::group_by(Year) %>%
select(boundaries.defined.FGD_MPA) %>%
skim()
##initial increase in mean number of ways boundaries are defined after the baseline year, but then begins to drop off.
gov.mpa.post.data %>%
dplyr::group_by(SiteCode) %>%
select(boundaries.defined.FGD_MPA) %>%
skim()
## higher mean number of ways boundaries are defined for MPA sites after baseline year.
## a lot of variation among MPAs
gov.mpa.post.data %>%
dplyr::group_by(Year) %>%
select(boundaries.defined.FGD_MPA) %>%
skim()
##decline in mean number of ways boundaries defined over time.
##############################################################
# CORRELATION TESTS
##############################################################
##correlation test for gov variables MPAs after baseline year
corr.gov.post <- select(gov.mpa.post.data,boundaries.defined.FGD_MPA:graduated.sanctions)
psych::alpha(corr.gov.post)
corr.bnd.post <- select(gov.mpa.post.data,boundaries.defined.FGD_MPA:internal.boundaries)
psych::alpha(corr.bnd.post)
##correlation test for soc variables MPAs
# corr.soc.post <- soc.mpa.data %>%
# select("RightsExclude", "RightsTransfer", "pri.farming", "pri.harv.forest",
# "pri.fishing","MAIndex", "EconStatusTrend", "FSIndex", "MTIndex",
# "RightsAccess", "RightsHarvest", "RightsManage", "MarineGroup", "OtherGroup",
# "FreqEatFish", "FreqSaleFish", "PercentIncFish", "PAIndex", "group.part","wealthIndex.pca",
# "ed.level", "boat.own", "PercentProteinFish")
#
# psych::alpha(corr.soc.post,check.keys=TRUE)
# corrgram(corr.soc.post, order=TRUE, lower.panel=panel.shade,
# upper.panel=panel.pie, text.panel=panel.txt,cor.method="pearson",
# main="Social variables")
# corr.group.post <- select(soc.mpa.data, MarineGroup:OtherGroup)
# psych::alpha(corr.group.post,check.keys=TRUE)
# Social indicators
marine.rel.col <- c("FreqEatFish","FreqSaleFish","PercentIncFish","pri.fishing", "FreqFish")
wellbeing.col <- c("FSIndex","wealthIndex.pca", "ed.level")
rights.manage.col <- c("RightsManage","RightsExclude","RightsTransfer")
rights.use.col <- c("RightsAccess","RightsHarvest")
psych::alpha(select(soc.mpa.data,marine.rel.col),check.keys=TRUE)
corrgram(select(soc.mpa.data,marine.rel.col), order=TRUE, lower.panel=panel.shade,
upper.panel=panel.cor, text.panel=panel.txt,cor.method="spearman",
main="Marine Reliance")
psych::alpha(select(soc.mpa.data,wellbeing.col),check.keys=TRUE)
corrgram(select(soc.mpa.data,wellbeing.col), order=TRUE, lower.panel=panel.shade,
upper.panel=panel.cor, text.panel=panel.txt,cor.method="spearman",
main="Social variables")
psych::alpha(select(soc.mpa.data,rights.manage.col),check.keys=TRUE)
corrgram(select(soc.mpa.data,rights.manage.col), order=TRUE, lower.panel=panel.shade,
upper.panel=panel.cor, text.panel=panel.txt,cor.method="spearman",
main="Social variables")
psych::alpha(select(soc.mpa.data,rights.use.col),check.keys=TRUE)
corrgram(select(soc.mpa.data,rights.use.col), order=TRUE, lower.panel=panel.shade,
upper.panel=panel.cor, text.panel=panel.txt,cor.method="spearman",
main="Social variables")
# Goverance indicators
corrgram(select(gov.mpa.post.data,boundaries.defined.FGD_MPA:graduated.sanctions), order=TRUE, lower.panel=panel.shade,
upper.panel=panel.cor, text.panel=panel.txt,cor.method="spearman",
main="Governance variables")
decision.part.col <- c("decision.making.participation","rule.making.participation")
acct.monitoring.col <- c("monitor.eco", "monitor.soc", "monitor.compliance", "accountable.enforcement", "enforcement.frequency",
"enforcement.freq", "eco.monitor.sanctions", "soc.monitor.sanctions", "comp.monitor.sanctions", "pen.monitor.sanctions")
congruence.col <- c("user.rule","DRuleEco","DRuleSoc")
psych::alpha(select(gov.mpa.post.data,decision.part.col),check.keys=TRUE)
corrgram(select(gov.mpa.post.data,decision.part.col), order=TRUE, lower.panel=panel.shade,
upper.panel=panel.cor, text.panel=panel.txt,cor.method="spearman",
main="Decision Making Participation")
psych::alpha(select(gov.mpa.post.data,acct.monitoring.col),check.keys=TRUE)
corrgram(select(gov.mpa.post.data,acct.monitoring.col), order=TRUE, lower.panel=panel.shade,
upper.panel=panel.cor, text.panel=panel.txt,cor.method="spearman",
main="Accountable Monitoring")
psych::alpha(select(gov.mpa.post.data,congruence.col),check.keys=TRUE)
corrgram(select(gov.mpa.post.data,congruence.col), order=TRUE, lower.panel=panel.shade,
upper.panel=panel.cor, text.panel=panel.txt,cor.method="spearman",
main="Congruence with Local Conditions")
####################################################################################
# Group Variables based on internal Consistency Using Z-score
####################################################################################
##Z score function
scale2 <- function(x, na.rm = TRUE) (x - mean(x, na.rm = TRUE)) / sd(x, na.rm = TRUE)
##Z-score for Governance Variables
gov.col <- c("decision.making.participation", "rule.making.participation",
"monitor.compliance", "internal.boundaries", "external.boundaries",
"monitor.eco", "monitor.soc", "accountable.enforcement", "enforcement.frequency",
"enforcement.freq", "eco.monitor.sanctions", "soc.monitor.sanctions",
"comp.monitor.sanctions", "pen.monitor.sanctions",
"user.rule", "user.rules.inc",
"DRuleEco","DRuleSoc", "boundaries.defined.FGD_MPA",
"nested.governance", "graduated.sanctions", "compliance.incentives",
"govt.support")
##Avg grouped variables by settlement without Z-score
final.gov.mpa.post.data <- gov.mpa.post.data %>%
ungroup() %>%
mutate(part.dec.making = rowMeans(select(.,decision.part.col),na.rm = T)) %>%
mutate(acct.monitoring = rowMeans(select(.,acct.monitoring.col),na.rm = T)) %>%
mutate(congruency = rowMeans(select(.,congruence.col),na.rm = T)) %>%
group_by(SettlementID) %>%
summarise(part.dec.making=mean(part.dec.making, na.rm=T),
acct.monitoring=mean(acct.monitoring, na.rm=T),
congruency=mean(congruency, na.rm=T),
compliance.incentives=mean(compliance.incentives, na.rm=T),
graduated.sanctions=mean(graduated.sanctions, na.rm=T),
nested.governance=mean(nested.governance, na.rm=T),
govt.support=mean(govt.support, na.rm=T),
boundaries.defined.FGD_MPA=mean(boundaries.defined.FGD_MPA, na.rm=T))
### Scale all variables by Z-score
gov.mpa.post.data.z <- gov.mpa.post.data %>%
group_by(SiteCode) %>%
mutate(across(gov.col, scale2))
##Avg grouped variables by settlement using Z-score
final.gov.mpa.post.data.z <- gov.mpa.post.data.z %>%
ungroup() %>%
mutate(part.dec.making = rowMeans(select(.,decision.part.col),na.rm = T)) %>%
mutate(acct.monitoring = rowMeans(select(.,acct.monitoring.col),na.rm = T)) %>%
mutate(congruency = rowMeans(select(.,congruence.col),na.rm = T)) %>%
group_by(SettlementID) %>%
summarise(part.dec.making=mean(part.dec.making, na.rm=T),
acct.monitoring=mean(acct.monitoring, na.rm=T),
congruency=mean(congruency, na.rm=T),
compliance.incentives=mean(compliance.incentives, na.rm=T),
graduated.sanctions=mean(graduated.sanctions, na.rm=T),
nested.governance=mean(nested.governance, na.rm=T),
govt.support=mean(govt.support, na.rm=T),
boundaries.defined.FGD_MPA=mean(boundaries.defined.FGD_MPA, na.rm=T))
## social variables
soc.col <- c("RightsExclude", "RightsTransfer", "pri.farming", "pri.harv.forest", "TimeMarket",
"pri.fishing", "EconStatusTrend", "FSIndex", "FreqFish","wealthIndex.pca", "ed.level",
"ed.level" , "RightsAccess", "RightsHarvest", "RightsManage", "MarineGroup", "OtherGroup",
"FreqEatFish", "FreqSaleFish", "PercentIncFish", "PAIndex", "group.part")
##Settlement Average for grouped variables (marine reliance, rights.use, rights.manage)
final.soc.mpa.data <- soc.mpa.data %>%
ungroup() %>%
mutate(reliance.marine = rowMeans(select(.,marine.rel.col),na.rm = T)) %>%
mutate(rights.use = rowMeans(select(.,rights.use.col),na.rm = T)) %>%
mutate(rights.manage = rowMeans(select(.,rights.manage.col),na.rm = T)) %>%
group_by(SettlementID) %>%
summarise(reliance.marine=mean(reliance.marine, na.rm=T),
rights.use=mean(rights.use , na.rm=T),
rights.manage=mean(rights.manage , na.rm=T),
wealthIndex.pca=mean(wealthIndex.pca, na.rm=T),
EconStatusTrend=mean(EconStatusTrend, na.rm=T),
ed.level=mean(ed.level, na.rm=T),
FSIndex=mean(FSIndex, na.rm=T),
PAIndex=mean(PAIndex, na.rm=T),
TimeMarket=mean(TimeMarket, na.rm=T),
group.part=mean(group.part, na.rm=T))
##Scale social variables by Z-score
soc.mpa.data.z <- soc.mpa.data %>%
group_by(SiteCode) %>%
mutate(across(soc.col, scale2))
##Settlement average Z-score for grouped social variables
final.soc.mpa.data.z <- soc.mpa.data.z %>%
ungroup() %>%
mutate(reliance.marine = rowMeans(select(.,marine.rel.col),na.rm = T)) %>%
mutate(rights.use = rowMeans(select(.,rights.use.col),na.rm = T)) %>%
mutate(rights.manage = rowMeans(select(.,rights.manage.col),na.rm = T)) %>%
group_by(SettlementID) %>%
summarise(reliance.marine=mean(reliance.marine, na.rm=T),
rights.use=mean(rights.use , na.rm=T),
rights.manage=mean(rights.manage , na.rm=T),
wealthIndex.pca=mean(wealthIndex.pca, na.rm=T),
EconStatusTrend=mean(EconStatusTrend, na.rm=T),
ed.level=mean(ed.level, na.rm=T),
FSIndex=mean(FSIndex, na.rm=T),
PAIndex=mean(PAIndex, na.rm=T),
TimeMarket=mean(TimeMarket, na.rm=T),
group.part=mean(group.part, na.rm=T))
## Now I have two data sets: MPA post governance data for governance variables,
## in units of standard deviation and averaged at the settlement level. And MPA baseline
## social variables, in units of standard deviation, averaged at the settlement level
##There is one with data scaled by Z-scores and one without
#########################################################################################
### Time to left join all three datasets plus MPAID
#########################################################################################
final.gov.soc.impact.data <- final.impact.data %>%
left_join(final.gov.mpa.post.data, by = "SettlementID") %>%
left_join(final.soc.mpa.data, by= "SettlementID") %>%
left_join(MPA.ID, by = "SettlementID")
##Z-score dataset
final.gov.soc.impact.data.z <- final.impact.data %>%
left_join(final.gov.mpa.post.data.z, by = "SettlementID") %>%
left_join(final.soc.mpa.data.z, by= "SettlementID") %>%
left_join(MPA.ID, by = "SettlementID")
#####################################################
##Correlation of Gov, Impact, and Soc Variables
#####################################################
gov.col <- c("part.dec.making", "acct.monitoring",
"boundaries.defined", "congruency","nested.governance",
"graduated.sanctions", "compliance.incentives",
"govt.support")
soc.col <- c("reli.marine",
"PAIndex","TimeMarket", "group.part", "wealth.index", "ed.level",
"econ.status", "FSIndex", "rights.use", "rights.manage")
gov.soc.col.grouped <- c( "estimate","part.dec.making", "acct.monitoring",
"boundaries.defined", "congruency",
"nested.governance", "graduated.sanctions", "compliance.incentives",
"govt.support","reli.marine",
"PAIndex","TimeMarket", "group.part", "wealth.index", "ed.level",
"econ.status", "FSIndex", "rights.use", "rights.manage")
FSImpact.gov.soc.impact.data <- final.gov.soc.impact.data %>%
group_by(SettlementID) %>%
filter(Response=="FSIndex_z") %>%
ungroup() %>%
rename("boundaries.defined"="boundaries.defined.FGD_MPA",
"econ.status" = "EconStatusTrend",
"reli.marine" = "reliance.marine",
"wealth.index" = "wealthIndex.pca")
FSImpact.gov.soc.impact.data.z <- final.gov.soc.impact.data.z %>%
group_by(SettlementID) %>%
filter(Response=="FSIndex_z") %>%
ungroup() %>%
rename("boundaries.defined"="boundaries.defined.FGD_MPA",
"econ.status" = "EconStatusTrend",
"reli.marine" = "reliance.marine",
"wealth.index" = "wealthIndex.pca" )
##Gov Indices Corrgam, Z-score
corrgram(select(FSImpact.gov.soc.impact.data.z, all_of(gov.col)), order=TRUE, lower.panel=panel.shade,
upper.panel=panel.cor, text.panel=panel.txt, label.pos = c(0.5, 0.5), label.srt = 5, cor.method="spearman",
main="Governance Variables")
##Soc Indices Corrgram, Z-score
corrgram(select(FSImpact.gov.soc.impact.data.z, all_of(soc.col)), order=TRUE, lower.panel=panel.shade,
upper.panel=panel.cor, text.panel=panel.txt, label.pos = c(0.5, 0.5), label.srt = 5, cor.method="spearman",
main="Social Variables")
##Combined Corrgram
##Labels cut off, might be able to fix by renaming
corrgram(select(FSImpact.gov.soc.impact.data, all_of(gov.soc.col.grouped)), order=TRUE, lower.panel=panel.shade,
upper.panel=panel.cor, text.panel=panel.txt, label.pos = c(0.5, 0.5), label.srt = 5, cor.method="spearman",
main="Governance, Impact and Social Variables")
##Combined corrgram with Z-score
##Labels cut off, might be able to fix by renaming
corrgram(select(FSImpact.gov.soc.impact.data.z, all_of(gov.soc.col.grouped)), order=NULL, lower.panel=panel.shade,
upper.panel=panel.cor, text.panel=panel.txt, label.pos = c(0.5, 0.5), label.srt = 5, cor.method="spearman",
main="Governance, Impact and Social Variables")
# scatter plots
pairs(select(FSImpact.gov.soc.impact.data, estimate,FSIndex, congruency,part.dec.making,reli.marine,rights.use,group.part ) %>% ungroup)
#linear relationship between baseline food security and food security impact
#scatter plots Z
pairs(select(FSImpact.gov.soc.impact.data.z, estimate,FSIndex, congruency,part.dec.making,reli.marine,rights.use,group.part ) %>% ungroup)
plot.dat <- FSImpact.gov.soc.impact.data %>%
ungroup() %>%
select(estimate,FSIndex, congruency,part.dec.making,reli.marine,rights.use,group.part) %>%
gather(key=var,val=val, FSIndex:group.part)
head(plot.dat)
p.plot.dat <- ggplot(plot.dat, aes(x=val, y=estimate )) +
geom_point() +
facet_grid(.~var, scale = "free") +
ggtitle("Scatter plot") +
scale_color_brewer(palette = "Dark2") +
# geom_smooth(method=lm, se=FALSE) +
geom_smooth(method=loess, se=FALSE) +
labs(x=NULL,y="FS Impact", title="Original scale")
##scatterplots of FSimpact and gov/soc variables
plot.dat.z <- FSImpact.gov.soc.impact.data.z %>%
ungroup() %>%
select(estimate,FSIndex, congruency,part.dec.making,reli.marine,rights.use,group.part) %>%
gather(key=var,val=val, FSIndex:group.part)
head(plot.dat.z)
p.plot.dat.z <- ggplot(plot.dat.z, aes(x=val, y=estimate )) +
geom_point() +
facet_grid(.~var, scale = "free") +
ggtitle("Scatter plot") +
scale_color_brewer(palette = "Dark2") +
# geom_smooth(method=lm, se=FALSE) +
geom_smooth(method=loess, se=FALSE) +
labs(x=NULL,y="FS Impact", title="Z scores")
plot_grid(p.plot.dat.z,p.plot.dat, nrow=2)
##Participation Decision Making is bimodal; divide into "low" and "high"
part.dat.z <- FSImpact.gov.soc.impact.data.z %>%
mutate(particip=ifelse(part.dec.making<0,"low","high")) %>%
ungroup() %>%
filter(!is.na(particip) & !is.na(estimate)) %>%
select(estimate,particip,part.dec.making)
##Standard Error Bars
part.plot.dat.z <- part.dat.z %>%
group_by(particip) %>%
summarise(fs.estimate=mean(estimate), fs.sd=sd(estimate), num=n()) %>%
mutate(se=fs.sd/sqrt(num),se.lower=fs.estimate-(2*se),se.upper=fs.estimate+(2*se))
p.particip <- ggplot(part.plot.dat.z,aes(x=particip, y=fs.estimate)) +
geom_errorbar(aes(ymin=se.lower, ymax=se.upper), width=0) +
geom_point(shape = 16) +
geom_hline(yintercept = 0)
# linear model FSIndex and estimate
summary(lm(estimate~FSIndex, data=FSImpact.gov.soc.impact.data))
##The linear relationship between baseline food security and the food security impact estimate
## is statistically significant with a p-value of 3.945e-06. The adjusted R-squared value is .2502,
##Meaning 25% of variation in the FS impact estimate is due to baseline food security index (Food security measured before the MPA was established)
#linear model FSIndex and estimate z-score
summary(lm(estimate~FSIndex, data=FSImpact.gov.soc.impact.data.z))
##The linear relationship between baseline food security and the food security impact estimate
## is statistically significant with a p-value of 7.855e-05. The adjusted R-squared value is .1871,
##Meaning 18% of variation in the FS impact estimate is due to baseline food security index (Food security measured before the MPA was established)
# t-test, participation in decision making, Z-score
t.test(estimate~particip,data=filter(part.dat.z))
##p-value is .07, the difference in mean FSImpact between high and low participation groups is not statistically significant
#linear model, congruency
summary(lm(estimate~congruency, data=FSImpact.gov.soc.impact.data))
##The correlation between congruency and FSImpact is not statistically significant (p-value: 0.2184)
##linear model, congruency Z-score
summary(lm(estimate~congruency, data=FSImpact.gov.soc.impact.data.z))
##The correlation between congruency and FSImpact is not statistically significant (p-value: 0.2084)
##linear model, group.part
summary(lm(estimate~group.part, data=FSImpact.gov.soc.impact.data))
##p-value: 0.06936
##linear model, group.part Z-score
summary(lm(estimate~group.part, data=FSImpact.gov.soc.impact.data.z))
## The negative linear relationship between group participation (proxy for community cohesion) and
## FSImpact is statistically significant with a p-value p-value: 0.04059. Adjusted R-squared: 0.04447
##linear model, reli.marine
summary(lm(estimate~reli.marine, data=FSImpact.gov.soc.impact.data))
##p-value: 0.866
##linear model, reli.marine Z-score
summary(lm(estimate~reli.marine, data=FSImpact.gov.soc.impact.data.z))
##p-value: 0.1388
##linear model, rights.use
summary(lm(estimate~rights.use, data=FSImpact.gov.soc.impact.data))
##p-value: 0.1764
##linear model, rights.use Z-score
summary(lm(estimate~rights.use, data=FSImpact.gov.soc.impact.data.z))
## The positive linear relationship between food security impacts and rights use is statistically
## significant with a p-value of 0.002428. Adjusted R-squared: 0.1099
##### FSImpact Conclusions ########
## FSIndex and rights use significant positive correlation
##group.part significant negative correlation
##### Tests, Relationship between Governance Variables #######
##Scatterplots
pairs(select(FSImpact.gov.soc.impact.data, nested.governance, govt.support, acct.monitoring,graduated.sanctions, compliance.incentives) %>% ungroup)
##linear relationship: acct.monitoring, graduated.sanctions, compliance.incentives
##Scatterplots Z
pairs(select(FSImpact.gov.soc.impact.data.z, nested.governance, govt.support, acct.monitoring,graduated.sanctions, compliance.incentives) %>% ungroup)
##linear relationship: acct.monitoring, graduated.sanctions, compliance.incentives, gov.support; gov.support and nested.governance
##########################################################################################
# Food Security Outcomes and Governance
##########################################################################################
#Single Linear Regression, Food Security and Participation in Decision Making
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = part.dec.making, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes and Participation in Decision Making", x = "Participation Decision Making", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = acct.monitoring, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes and Accountable Monitoring",
x = "Accountable Monitoring", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = congruency, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes and Congruency",
x = "Congruency", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = compliance.incentives, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes and Compliance Incentives",
x = "Compliance Incentives", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = graduated.sanctions, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes and Graduated Sanctions",
x = "Graduated Sanctions", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = nested.governance, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes and Nested Governance",
x = "Nested Governance", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = govt.support, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes and Government Support",
x = "Government Support", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = boundaries.defined.FGD_MPA, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes and Boundaries Defined",
x = "Boundaries Defined", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
###########################################################################
##Outcomes Social
###########################################################################
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = reliance.marine, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes and Baseline Marine Reliance",
x = "Marine Reliance", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
#####Negative Relationship between Baseline Food Security and Food Security Impact
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = FSIndex, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes and Baseline Food Security Index",
x = "Basline Food Security Index", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = EconStatusTrend, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes Baseline Economic Status",
x = "Basline Econ Status Trend", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = wealthIndex.pca, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes Baseline Wealth Index",
x = "Basline Wealth Index", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = rights.use, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes Baseline Marine Rights Use Tenure Index",
x = "Basline Marine Rights Use Tenure Index", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = rights.manage, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes Baseline Marine Rights Manage Tenure Index",
x = "Basline Marine Rights Manage Tenure Index", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = PAIndex, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes Baseline Place Attachment Index",
x = "Basline Personal Assets Index", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = group.part, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes Baseline Group Participation",
x = "Basline Group Participation Index", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = ed.level, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes Baseline Education Level",
x = "Basline Education Level", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
final.gov.soc.impact.data %>%
filter(Response=="FSIndex_z") %>%
ggplot(aes(x = TimeMarket, y = estimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
labs(title = "MPA Food Security Outcomes Baseline Time to Market",
x = "Basline Time to Market", y = "Food Security Index") +
theme(panel.background = element_rect(color = "black", fill = "white"))
|
8ac95f93542ceb96960a98854df63f140272a44e
|
2d17676e1fb7dd3cc012d46d7f553e9b63e10424
|
/Pooling_covid_functions.R
|
02b2a0125b1fa25c0d8fc63cca51a15548e3e2cb
|
[] |
no_license
|
ericxbcheng/pooling_new
|
6000768b6b0feba6fcbcf51b222c0bc177240e0a
|
461928ba6fce259f3c9e4f330cfa7629470a3a3a
|
refs/heads/master
| 2022-11-26T10:54:22.420062
| 2020-08-05T23:42:46
| 2020-08-05T23:42:46
| 285,337,142
| 0
| 1
| null | 2020-08-05T18:55:20
| 2020-08-05T15:52:32
|
R
|
UTF-8
|
R
| false
| false
| 7,319
|
r
|
Pooling_covid_functions.R
|
make_samples = function(n, n_pos){
stopifnot(n >= n_pos)
a = rep.int(x = 1, times = n_pos)
b = vector(mode = "numeric", length = n - n_pos)
return(sample(x = c(a,b), size = n, replace = FALSE))
}
make_pool_idx = function(n, pool_size, for2D = FALSE){
quotient = n %/% pool_size
modulo = n %% pool_size
if(modulo == 0){
a = rep.int(x = 1:quotient, times = pool_size)
return(a)
} else {
if(for2D == FALSE){
a = rep.int(x = 1:quotient, times = pool_size)
} else {
a = rep(x = 1:quotient, each = pool_size)
}
b = seq.int(from = quotient + 1, to = quotient + modulo, by = 1)
return(c(a,b))
}
}
find_pool_pos_covid = function(conc, pool_idx, thresh){
# Calculate pooled concentration
pools = split(x = conc, f = pool_idx) %>%
map_dbl(.x = ., .f = mean)
# Determine positive or negative pool
return(pools > thresh)
}
find_pool_pos_2d_covid = function(conc, n, pool_size, thresh, message = FALSE){
#get number of sets to compare
n_sets <- n %/% pool_size^2
n_indiv = n %% pool_size^2
if(message == TRUE){
if(n %% pool_size^2 != 0) print("need to add results in for remainder")
}
}
compare_2D_pools_ec = function(n, pool_size){
#get number of sets to compare
n_sets <- n %/% pool_size^2
if(n %% pool_size^2 != 0) print("need to add results in for remainder")
#compare by sets
status<-n
setID = rep(x = 1:n_sets, each = pool_size)
for( i in 1:unique(setID) ) {
d <- row_pools[i*(1:pool_size)] %o% col_pools[i*(1:pool_size)]
status[(1:(pool_size)^2)*i] <- as.vector(d)
}
return(status)
}
calc_1d_metrics_covid = function(n, conc, pool_pos, pool_idx){
class_true = {conc == 1} %>%
as.numeric() %>%
factor(x = ., levels = c(0, 1))
class_pred = ifelse(test = pool_idx %in% which(pool_pos), yes = TRUE, no = FALSE) %>%
as.numeric() %>%
factor(x = ., levels = c(0, 1))
# Make a contingency table and calculate sensitivity and specificity
cont_table = table(class_true, class_pred)
sensi = cont_table[2,2] / (cont_table[2,2] + cont_table[2,1])
speci = cont_table[1,1] / (cont_table[1,1] + cont_table[1,2])
out = c(sensi, speci)
return(out)
}
sim_1d_outcome_covid = function(n, n_pos, pool_size, thresh = 0){
stopifnot(n > n_pos | n > 0 | n_pos >= 0)
conc = make_samples(n = n, n_pos = n_pos)
pool_idx = make_pool_idx(n = n, pool_size = pool_size)
pool_pos = find_pool_pos_covid(conc = conc, pool_idx = pool_idx, thresh = thresh)
# Calculate sensitivity and specificity
out = calc_1d_metrics_covid(n = n, conc = conc, pool_pos = pool_pos, pool_idx = pool_idx)
return(out)
}
# Iterate once
gen_sim_1d_outcome_covid = function(n, n_pos, thresh, pool_size){
function(...){
sim_1d_outcome_covid(n = n, n_pos = n_pos, thresh = thresh, pool_size = pool_size)
}
}
# First layer of iteration
sim_iterate_covid = function(n_iter, Args){
# Check point: Is n_iter >= 1?
stopifnot(n_iter >= 1)
# Generate a sim_outcome_new() with loaded arguments
a = do.call(what = gen_sim_1d_outcome_covid, args = Args)
# Iterate that manufactured function for n_iter times
b = map(.x = 1:n_iter, .f = a)
return(b)
}
clean_covid = function(list){
a = unlist(list)
# Find the indices of each type of metric
sens_ind = seq(from = 1, by = 2, length.out = length(a)/2)
spec_ind = sens_ind + 1
#Rearrange the results
b = list("sensi" = a[sens_ind], "speci" = a[spec_ind])
return(b)
}
# This function makes a list of arguments from the default arguments
update_arg_covid = function(Args, param, val){
# Checkpoint
stopifnot(param %in% names(Args))
Args[[param]] = val
return(Args)
}
# Create a function that passes one tuning parameter value to sim_iterate2()
tune_param_covid = function(Args, n_iter, param, val, ...){
# Get tuning parameters
a = update_arg_covid(Args = Args, param = param, val = val)
b = sim_iterate_covid(n_iter = n_iter, Args = a) %>%
clean_covid(list = .)
# Add a vector of tuning parameter value
b$param = rep.int(x = val, times = n_iter)
return(b)
}
# A function that tunes over many values of the primary tuning parameter
tune_param_n_covid = function(vals, Args, n_iter, var_prim){
return(map(.x = vals, .f = tune_param_covid, Args = Args, n_iter = n_iter, param = var_prim))
}
# A function that tunes over a crossed combination of the primary and secondary tuning parameter
tune_param_sec_covid = function(Args, var_prim, vals_prim, var_sec, n_iter, vals_sec){
# Create a list of argument lists, each argument list corresponding to one secondary tuning value
Args_sec = map(.x = vals_sec, .f = update_arg_covid, Args = Args, param = var_sec)
# For each argument list in Args_sec, do iterate_tune1()
sim_data = list()
for(i in 1:length(vals_sec)){
sim_data[[i]] = map(.x = vals_prim, .f = tune_param_covid,
Args = Args_sec[[i]], n_iter = n_iter,param = var_prim)
}
return(sim_data)
}
metrics_sec_covid = function(data, vals_prim, vals_sec, n_iter){
a = map(.x = data, .f = bind_rows)
b = bind_rows(a)
param2 = rep(x = vals_sec, each = length(vals_prim) * n_iter)
return(data.frame(b, param2))
}
# # Calculate the cost of reagents and pipetting
# calc_1d_cost = function(data, n, by){
#
# stopifnot(by %in% c("row", "column"))
# stopifnot(n %in% c(48, 96))
#
# if(n == 48){
# n_row = 6
# n_col = 8
# } else if(n == 96){
# n_row = 8
# n_col = 12
# } else {
# stop("Undefined n. Choose 48 or 96.")
# }
#
# if(by == "row"){
# a = do.call(what = rbind.data.frame, args = data) %>%
# mutate(putative_pos = sensi*n_pos + (1-speci)*(n-n_pos),
# n_test_total = n_row + putative_pos,
# n_pipette = n + n_row + putative_pos)
# return(a)
#
# } else if(by == "column"){
# b = do.call(what = rbind.data.frame, args = data) %>%
# mutate(putative_pos = sensi*n_pos + (1-speci)*(n-n_pos),
# n_test_total = n_col + putative_pos,
# n_pipette = n + n_col + putative_pos)
# return(b)
#
# }
# }
plot_tune2_ribbon_covid = function(data, xlab, legend_lab, xtick){
# Summarise the data
a = data %>%
gather(data = ., key = "metric", value = "value", -c(param, param2)) %>%
group_by(param2, param, metric) %>%
summarise(lb = quantile(x = value, probs = 0.025),
med = median(x = value),
ub = quantile(x = value, probs = 0.975))
# Visualize
b = ggplot(data = a) +
geom_ribbon(aes(x = param, ymin = lb, ymax = ub, group = as.factor(metric), fill = as.factor(metric)), alpha = 0.3) +
geom_line(aes(x = param, y = med, color = as.factor(metric))) +
geom_point(aes(x = param, y = med, color = as.factor(metric))) +
scale_y_continuous(breaks = seq(from = 0, to = 1, by = 0.1)) +
scale_fill_discrete(name = legend_lab) +
scale_color_discrete(name = legend_lab) +
scale_x_continuous(breaks = unique(a$param), labels = xtick) +
coord_cartesian(ylim = c(0,1)) +
labs(x = xlab, y = "Metrics (2.5th - 97.5th percentile)") +
theme_bw() +
theme(legend.position = "top")+
facet_wrap(~param2)
return(b)
}
|
2083b48b4e89e1167e3df1fdc6c21ddcdbcb4a21
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/seewave/examples/wav2flac.Rd.R
|
7918104b904377e1a9111582bad2cc9e4579106c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 534
|
r
|
wav2flac.Rd.R
|
library(seewave)
### Name: wav2flac
### Title: wav-flac file conversion
### Aliases: wav2flac
### Keywords: IO
### ** Examples
## Not run:
##D # synthesis of a 1kHz sound
##D a<-synth(d=10,f=8000,cf=1000)
##D # save it as a .wav file in the default working directory
##D savewav(a,f=8000)
##D # compress it to FLAC format and overwrite on the file a.wav
##D wav2flac("a.wav", overwrite=TRUE)
##D # back to .wav format
##D wav2flac("a.flac", reverse=TRUE)
##D # remove the files
##D unlink(c("a.wav","a.flac"))
## End(Not run)
|
4c01c9a431ecbc8c9f67941767f485b7e28a8a3a
|
adde50f644a2ed67c261fb9bc2a0e67821189978
|
/man/b_rename.Rd
|
b5a65c93d0cee3cf8322914a475afad91a904585
|
[] |
no_license
|
peterwsetter/bplyr
|
de8dc87b7c14c9e9f8dd70f2f1f14b40da05195d
|
553f658e10cf50f32bdaeaed0b0117df8c5f0952
|
refs/heads/master
| 2022-04-11T20:09:54.957640
| 2020-02-18T02:10:13
| 2020-02-18T02:10:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 477
|
rd
|
b_rename.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rename.R
\name{b_rename}
\alias{b_rename}
\title{rename variables by name}
\usage{
b_rename(.data, ...)
}
\arguments{
\item{.data}{data.frame}
\item{...}{variables to rename}
}
\value{
data.frame
}
\description{
Mimics dplyr::rename using base R and rlang
}
\examples{
airquality\%>\%
b_rename(tmp = Temp,day = Day)\%>\%
head()
}
\seealso{
\code{\link[rlang]{quotation}}
}
\author{
Jonathan Sidi
}
|
bcfe10a1894b6dbd47c89267f5119d94ef937c24
|
8204f0abe97c1b5162c41fccf7574c8c9936b585
|
/plot2.R
|
dd531341156aa0ad45e99d89d9b36dd9b1ac4a32
|
[] |
no_license
|
flyingrat42/ExData_Plotting1
|
ad56bd9c5656e5c6c8be0c107d3cdbe678142e05
|
a929b146b033b0396a9c0589977bbcb57f62347f
|
refs/heads/master
| 2020-12-27T15:20:30.243326
| 2015-06-03T02:07:31
| 2015-06-03T02:07:31
| 36,665,375
| 0
| 0
| null | 2015-06-01T14:17:29
| 2015-06-01T14:17:29
| null |
UTF-8
|
R
| false
| false
| 304
|
r
|
plot2.R
|
plot2 <- function () {
source("loadData.R")
#Get data
powerdata<-loadData()
png(filename= "plot2.png", width = 480, height = 480, bg = "transparent")
with(powerdata, plot(x=DateTime, y=Global_active_power,type="l", xlab = "", ylab = "Global active power (kilowatts)"))
dev.off()
}
|
a09ca411bf6af3fec3f0d7aac39b2bf79e9f9aa8
|
4573415414b0c12d8be0930ab4ee3a0434dcdbe8
|
/man/MakePriorityPrunerInputFile.Rd
|
cb432f367e4e232b9700848b8b5b6d2a40a7624b
|
[] |
no_license
|
bbanbury/bb_gecco
|
fb3b277ce1f34c24599d0c07f535ef0de5bf5488
|
81740295d4908ae94500cbf7dcc97e6e23843e2f
|
refs/heads/master
| 2020-05-20T15:40:39.095515
| 2015-05-26T15:08:32
| 2015-05-26T15:08:32
| 33,628,901
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,889
|
rd
|
MakePriorityPrunerInputFile.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/SearchFunctions.R
\name{MakePriorityPrunerInputFile}
\alias{MakePriorityPrunerInputFile}
\title{Make PriorityPruner Input File}
\usage{
MakePriorityPrunerInputFile(snplist, pvals = "gigsv2", forceSelect = NULL,
designScore = NULL, chatty = TRUE, save.file = TRUE)
}
\arguments{
\item{snplist}{A SNP name, either rs number or chr:pos (ex, "rs1234" or "1:1234")}
\item{pvals}{Where to draw pvalues (or other values between 0 and 1). PriorityPruner will use these as a prioritization metric. This is used for prioritizing the selection of SNPs, where lower numbers are prioritized.}
\item{forceSelect}{For now, this doesn't really work other than to assign all values the same 0 status. Flag indicating if the SNP should be selected (kept) regardless of its LD with other selected SNPs or other filtering criteria specified, such as MAF or design score (1=true, 0=false).}
\item{designScore}{For now, this doesn't really work other than to assign all values the same 1 status. Flag indicating if the SNP should be selected (kept) regardless of its LD with other selected SNPs or other filtering criteria specified, such as MAF or design score (1=true, 0=false).}
\item{chatty}{Option to print progress to screen}
\item{save.file}{Option to save results to files. This will save a separate file for each chromosome in the snplist.}
}
\description{
Auto create an input file for the program PriorityPruner from a vector of snp names
}
\details{
This function returns a table with the required columns for the program PriorityPruner. These include: snp_name, chr, pos, a1, a2, P, forceSelect, and designScore.
}
\examples{
MakePriorityPrunerInputFile(c("rs3181096", "rs3863057", "rs6666554"), save.file=FALSE)
}
\seealso{
\link{MakeSNPDetailsTable_GIGS} \link{GetMarginal} \link{Run_PriorityPruner}
}
|
2595baf05cb280203797f4361defc01ccc41676d
|
0e6446212f2bcf34c2512cbb241e84149bde4152
|
/kNN - 19042020.R
|
45b45e8323db60492e709148faa5c97fdfa78633
|
[] |
no_license
|
shishir247/ML-algorithm-with-R
|
de70ac96ae41fc967d09a3497eecf107a658fb87
|
e8ca2789e107c1d8720bb87c63c4ddee8ad40516
|
refs/heads/master
| 2022-12-09T23:59:12.400394
| 2020-09-18T14:14:01
| 2020-09-18T14:14:01
| 295,944,075
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,314
|
r
|
kNN - 19042020.R
|
rm(list=ls())
library(mlbench)
library(class)
# Loading dataset
?PimaIndiansDiabetes
data("PimaIndiansDiabetes")
data <- PimaIndiansDiabetes
head(data)
summary(data)
str(data)
diabetes <- data$diabetes
normalize <- function(x) {
num <- x - min(x)
denom <- max(x) - min(x)
return(num/denom)
}
dim(data)
mode(data)
lapply(data[,1:8], normalize)
data <- as.data.frame((lapply(data[,1:8], normalize)))
head(data)
data$diabetes <- diabetes
head(data)
summary(data)
# Spliting dataset
train <- data[1:600,]
test <- data[601:768,]
head(train)
head(test)
########################################################################################
## Build a kNN model (with k = 10) on the training dataset in R to predict the diabetes
## (pos or neg). So here we will consider "diabetes" as Class variable. Then test the
## model on the testing dataset. Calculate accuracy and error rate.
cl <- train$diabetes # defining class - predictor variable
model <- knn(train[-9], test[-9], cl, k = 10)
# model # predicted value on test data set
Accuracy <- mean(model == test$diabetes)
Accuracy# 0.7678571
# Even number k can give different accuracy at each iteration.
model <- knn(train[-9], test[-9], cl, k = 11)
Accuracy <- mean(model == test$diabetes)
Accuracy
############################################################################
## Perform k-fold validation (with k = 10) on PimaIndiansDiabetes data.
# K fold with k = 10
library(caret)
control <- trainControl(method = "cv", number = 10, classProbs=TRUE,
summaryFunction = twoClassSummary)
# will compute the sensitivity, specificity and area under the ROC curve
fit_knn <- train(diabetes ~ ., data=PimaIndiansDiabetes, method="knn",
metric="ROC", trControl=control)
fit_knn
# k-Nearest Neighbors
#
# 768 samples
# 8 predictors
# 2 classes: 'neg', 'pos'
#
# No pre-processing
# Resampling: Cross-Validated (10 fold)
# Summary of sample sizes: 691, 691, 691, 691, 691, 691, ...
# Resampling results across tuning parameters:
#
# k ROC Sens Spec
# 5 0.7405755 0.816 0.5146724
# 7 0.7639858 0.830 0.5484330
# 9 0.7805883 0.838 0.5330484
#
# ROC was used to select the optimal model using the largest value.
# The final value used for the model was k = 9.
|
5f1d5e922c98c57b0c9cef2bb16aa53ee8b742c4
|
df9fb162c8a135c1336f7209c6fd0978d5d9062a
|
/man/AppUI.Rd
|
0b2a0e39d75f6765caaf489cef198091fa491fe6
|
[] |
no_license
|
alexthie/FCBapp5
|
1593b928e34fd3851d7cec5dc871091dc887e39f
|
831ca46f4a7bf59d20bf8dbe3b2f339616342218
|
refs/heads/master
| 2022-11-02T07:56:30.871275
| 2020-06-17T04:45:09
| 2020-06-17T04:45:09
| 272,878,811
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 293
|
rd
|
AppUI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AppUI.R
\docType{data}
\name{AppUI}
\alias{AppUI}
\title{Shiny app server object}
\format{An object of class \code{shiny.tag} of length 3.}
\usage{
AppUI
}
\description{
Shiny app server object
}
\keyword{datasets}
|
5f7012fc4a87188984129eaa9c5b11bd54cd5a03
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NCSampling/examples/Check.pop.Rd.R
|
b12edc9d152beb623bcd5d6f0b721518d242146a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 151
|
r
|
Check.pop.Rd.R
|
library(NCSampling)
### Name: Check.pop
### Title: Check population file
### Aliases: Check.pop
### ** Examples
## Check.pop(popfile, desvars)
|
f6c58f7f7ed45029b50bf04bdfba7df68bfa4d19
|
c4cb920902a96270eabe14349daada9269dad185
|
/R/build.R
|
cdc672f22e3933d528e351b81e1bd23be3f32a7a
|
[] |
no_license
|
ashbaldry/vstsr
|
d224187bde2f3694cb63ce8dd1bf5a3b535a54dd
|
76111d35ddb7650eb6d602b6acc60f95925cbb03
|
refs/heads/main
| 2023-08-30T21:38:13.389355
| 2023-08-20T10:18:21
| 2023-08-20T10:18:21
| 117,109,966
| 4
| 2
| null | 2023-08-20T10:15:49
| 2018-01-11T14:19:14
|
R
|
UTF-8
|
R
| false
| false
| 1,392
|
r
|
build.R
|
#' Azure DevOps Project Build Definition Information
#'
#' @description
#' These functions will allow you to scrape build definition information from Azure DevOps.
#'
#' @details
#' For more information about the build definition API calls check
#' \url{https://docs.microsoft.com/en-us/rest/api/azure/devops/build/definitions/list}.
#'
#' @param domain The name of the Azure DevOps organization.
#' @param project the name of the project in \code{domain} to look at
#' @param auth_key authentication key generated by using \code{\link{vsts_auth_key}}
#' @param query a list of extra parameters that can be sent to the API call. Check details for access to list
#' of options.
#'
#' @examples
#' \dontrun{
#' # Add in own details to get a non-NULL output
#' auth_key <- vsts_auth_key("<username>", "<password>")
#' vsts_get_build_defs("domain", "project", auth_key)
#' }
#'
#' @rdname vsts_build_def
#' @export
vsts_get_build_defs <- function(domain, project, auth_key, query = NULL) {
uri <- file.path(AZURE_HOME_URL, domain, project, "_apis/build/definitions?api-version=5.0")
response <- httr::GET(uri, httr::add_headers(Authorization = auth_key), query = query)
if (httr::status_code(response) != 200) {
send_failure_message(response, "get build definition list")
return(invisible(NULL))
}
httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE)$value
}
|
7aea8f717c7c198f97e243f714b57522ca716c22
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/yhat/examples/plotCI.yhat.rd.R
|
a36d643eef55ae499e6fdc2ed6570044dd5d3c7c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,020
|
r
|
plotCI.yhat.rd.R
|
library(yhat)
### Name: plotCI.yhat
### Title: Plot CIs from yhat
### Aliases: plotCI.yhat
### Keywords: models regression
### ** Examples
## Bootstrap regression results predicting paragraph
## comprehension based on three verbal tests: general info,
## sentence comprehension, & word classification
## Use HS dataset in MBESS
require ("MBESS")
data(HS.data)
## Regression
lm.out<-lm(paragrap~general+sentence+wordc,data=HS.data)
## Calculate regression metrics
regrOut<-calc.yhat(lm.out)
## Bootstrap results
require ("boot")
boot.out<-boot(HS.data,boot.yhat,100,lmOut=lm.out,regrout0=regrOut)
## Evaluate bootstrap results
result<-booteval.yhat(regrOut,boot.out,bty="perc")
## Plot results
## plotCI.yhat(regrOut$PredictorMetrics[-nrow(regrOut$PredictorMetrics),],
## result$upperCIpm,result$lowerCIpm, pid=which(colnames(regrOut$PredictorMetrics)
## %in% c("Beta","rs","CD:0","CD:1","CD:2","GenDom","Pratt","RLW") == TRUE),nr=3,nc=3)
|
14b5fc907da81e25c4f2df944488d81c1e952831
|
6fe8911fff7c35b25a27b67bfd152429e695de54
|
/bak_bioinformatics3_perl_scripts/perl_scripts/R_code/meth_eadge_3par.R
|
c0c09e50109aa4e8e2ec8a28070eb8dd1169ee20
|
[] |
no_license
|
KaoruTSai/CAU_PhD_scripts
|
01a93fc5dce6f18c30eb3afd5e7d2391bf24be97
|
4868d3e10bf03badc4cb051a3e09d2694da94dce
|
refs/heads/master
| 2022-01-07T23:47:47.318915
| 2019-04-17T02:53:37
| 2019-04-17T02:53:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 858
|
r
|
meth_eadge_3par.R
|
meth_eadge_3par<-function(cpg,chg,chh){
# pdf(OUT,width=6,height=8)
#exp
cpg<-read.table(cpg)
chg<-read.table(chg)
chh<-read.table(chh)
par(mfrow=c(3,1))
plot(lowess(cpg[1:201,3],f=0.05),type="l",xlim=c(0,450),col="red",xlab="CpG",ylab="Methylation Level")
abline(v=101,lty="dashed")
lines(212:412, lowess(cpg[202:402,3], f=0.05),col="red")
abline(v=313,lty="dashed")
plot(chg[1:201,3],type="l",xlim=c(0,450),col="RoyalBlue",xlab="CHG",ylab="Methylation Level")
abline(v=101,lty="dashed")
lines(212:412,chg[202:402,3],col="RoyalBlue")
abline(v=313,lty="dashed")
plot(chh[1:201,3],type="l",xlim=c(0,450),col="PaleGreen4",xlab="CHH",ylab="Methylation Level")
abline(v=101,lty="dashed")
lines(212:412,chh[202:402,3],col="PaleGreen4")
abline(v=313,lty="dashed")
# dev.off()
}
|
f554e724250dfb6a6ecbe35724ffeaa803b9953a
|
09a628049984b5c939aef8f1cfd8bc71cdb4735b
|
/R/setEmptyCellsValue.R
|
56f60aafce41e3b9c4b58335932e34fcebd3daa5
|
[] |
no_license
|
mathphysmx/cleanTable
|
471d5e7db64e2b2b128fee3f5483d1b4728d478f
|
17bb71242a7c07bc5760ed6210eef037c4eb6906
|
refs/heads/master
| 2021-09-23T18:04:49.268573
| 2018-09-26T08:43:43
| 2018-09-26T08:43:43
| 115,658,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,304
|
r
|
setEmptyCellsValue.R
|
#' @title Changes multiple invalid values for a unique invalid value
#' @param ef Empty Flag number,character, symbol ("", "(N/A)", NA, ...), etc. What is meant to be "empty". \code{flag} could be a vector but all of itS elements must be different of the posible valid values of \code{x}.
#' @param x Vector with multiple empty symbols.
#' @param fw The unique number,character, symbol (NA, Inf), etc. which represents empty cells. What is the symbol meaning empty cell?. Default to \code{NA}
#' @export
#' @examples
#' xe <- c(-999, 1, 0, NA)
#' print(xe)
#' setEmptyCellsValue(xe)
#' setEmptyCellsValue(xe, fw = 1000)
#' # example 2
#' xe <- data.frame(a = 3:6, b = 0, c = 0, d = 0, e = 0)
#' xe[2, 1] <- 0
#' xe[1, 4] <- 3
#' xe[2, 3] <- 3
#' xe[3, 3] <- 3
#' xe[3, 5] <- 3
#' xe[4, 2] <- 3
#' print(xe)
#' setEmptyCellsValue(x = xe, ef = 0)
# IMPROVEMENTS make characters, NA, numerics, and other \code{typeof} being possible as \code{ef}
# IMPROVEMENTS:
# accept different object classes as values of \code{flag}.
# For example flag <- list(a = 0, b = NA, c = "")
setEmptyCellsValue <- function(x, ef = c(NA, -999, 0), fw = NULL){
xf <- x
for(i in 1:length(ef)){
xf[xf == ef[i]] <- NA
}
if(!is.null(fw))
xf[is.na(xf)] <- fw
return(xf)
}
|
42db382622153298028d0af69dce668b86af0e68
|
883a4a0c1eae84485e1d38e1635fcae6ecca1772
|
/nCompiler/man/nCompile_nClass.Rd
|
a3d7da8f605186fd8bd4b312af2dc62823edd100
|
[
"BSD-3-Clause"
] |
permissive
|
nimble-dev/nCompiler
|
6d3a64d55d1ee3df07775e156064bb9b3d2e7df2
|
392aabaf28806827c7aa7b0b47f535456878bd69
|
refs/heads/master
| 2022-10-28T13:58:45.873095
| 2022-10-05T20:14:58
| 2022-10-05T20:14:58
| 174,240,931
| 56
| 7
|
BSD-3-Clause
| 2022-05-07T00:25:21
| 2019-03-07T00:15:35
|
R
|
UTF-8
|
R
| false
| true
| 1,070
|
rd
|
nCompile_nClass.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NC_Compile.R
\name{nCompile_nClass}
\alias{nCompile_nClass}
\title{Compile a nClass.}
\usage{
nCompile_nClass(
NC,
dir = file.path(tempdir(), "nCompiler_generatedCode"),
cacheDir = file.path(tempdir(), "nCompiler_RcppCache"),
env = parent.frame(),
control = list(),
interface = c("full", "generic", "both"),
...
)
}
\arguments{
\item{NC}{A nClass generator (returned from a call to \link{nClass}).}
\item{dir}{Directory where generated C++ will be written.}
\item{cacheDir}{Directory to be used for Rcpp cache.}
\item{env}{Environment to be used for loading results of compilation.}
\item{control}{List of control settings for compilation. See...}
}
\value{
Generator of objects of the compiled version of class
\code{NC}. These will use C++ objects internally for compiled
data and methods.
}
\description{
Generates C++ for the compilable data and methods of a nClass,
manages C++ compilation of them and returns a generator for obejcts
of the compiled class.
}
|
ea8652f4d41d8aa3739fabd32478d75ca53d4611
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/gdimap/R/gqi.odfpeaks.R
|
399637286778d5dba2b65e2ac870807c057fa806
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,456
|
r
|
gqi.odfpeaks.R
|
## GQI volume processing
## fslview-compatible gfa-map and V1 volumes
gqi.odfpeaks <-
function(gdi="gqi", fbase=NULL, rg=NULL, swap=FALSE, lambda=NULL, depth=3, btoption=2, threshold=0.4, showglyph=FALSE, bview="coronal", savedir=tempdir(), aniso=NULL)
{
gdimethods <- c("gqi", "gqi2")
gdimethod <- match(gdi, gdimethods)
bviews <- c("sagittal", "coronal", "axial")
kv <- match(bview, bviews)
stopifnot(is.na(kv) != TRUE)
##---------
## generate S2 grid
s2 <- s2tessel.zorder(depth=depth, viewgrid=FALSE)
odfvertices <- s2$pc
tcsurf <- s2$tcsurf
##-----------
## Read data
testfilexist(fbase=fbase, btoption=btoption)
if(btoption == 1) { ## Option 1: S2-shell (DSI 203-point 3mm)
btable <- as.matrix(readtable(fbase=fbase, filename="btable.txt"))
}
else {
if(btoption == 2) {
## Option 2: using a 3D-dsi grid
bval <- scantable(fbase=fbase, filename="data.bval")
# bvec <- readtable(fbase=fbase, filename="data.bvec")
bvec <- scantable(fbase=fbase, filename="data.bvec")
bvec <- matrix(bvec, ncol=3)
btable <- cbind(bval,bvec)
rm(bval, bvec)
}
else stop()
}
##--------------------
gc()
cat("Reading data ...\n")
ptm <- proc.time()
img.nifti <- readniidata(fbase=fbase, filename="data.nii.gz")
volimg <- img.nifti@.Data
mask.nifti <- readniidata(fbase=fbase, filename="data_brain_mask.nii.gz")
volmask <- mask.nifti@.Data
print(proc.time() - ptm)
rm(img.nifti, mask.nifti)
gc()
##--------------------
d <- dim(volmask)
volgfa <- array(0, dim=d) ## gfas map
V1 <- array(0, dim=c(d, 3)) ## V1 direction
if(is.null(rg)) {
switch(kv,
{ nslices <- d[1]}, # sagittal,
{ nslices <- d[2]}, # coronal
{ nslices <- d[3]}) # axial
first <- 1; last <- nslices
}
else { first <- rg[1]; last <- rg[2] }
cat("\n")
##-----------------------------
## "gdimethod" process
cat("Estimating slice odfs ...\n")
switch(gdimethod,
q2odf <- gqifn(odfvert=odfvertices, btable=btable,
lambda=lambda),
q2odf <- gqifn2(odfvert=odfvertices, btable=btable,
lambda=lambda) )
##-----------------------------
## store 1st vector directions for each non-thresholded voxel
## v1list: vector of lists
nv1 <- length(first:last)
v1list <- vector(mode="list", nv1)
v1count <- 0
for (sl in (first:last)) {
cat(sl,"")
## slicedata <- read.slice(img=volimg, mask=volmask, slice=sl, swap=swap)
slicedata <- read.slice(img=volimg, mask=volmask, slice=sl,
swap=swap, bview=bview)
ymaskdata <- premask(slicedata)
if(ymaskdata$empty) next # empty mask
##------------------
## odfs
odfs <- q2odf %*% (ymaskdata$yn)
# odfs <- apply(odfs, 2, norm01) ## normalize
odfs <- apply(odfs, 2, anisofn, aniso=aniso)
##------------------
## gfas
gfas <- apply(odfs, 2, genfa)
gfas <- norm01(gfas) ##??
z2d <- ymaskdata$kin
zx <- which(gfas <= threshold)
if(length(zx)) {
z2d <- z2d[-zx,]
gfas <- gfas[-zx]
odfs <- odfs[,-zx]
}
if(is.null(dim(z2d))) next
# if(length(gfas) < 2) next # 2 elements as minimum number
lix <- dim(z2d)[1]
v1perslice <- matrix(0, nrow=lix,ncol=3) # store v1 directions
nullvectors <- NULL
for(m in 1:lix) {
odf <- odfs[,m]
##-------------------
## find peaks
odf <- odf[1:(length(odf)/2)] # use half sized odf in findpeak
pk <- findpeak(odf, t(odfvertices), tcsurf)
## don't store eigenvector for cross-fiber voxels
if(length(pk$peaks) < 1 | (length(pk$peaks) > 2)) {
nullvectors <- c(nullvectors, m)
next
}
v1perslice[m,] <- pk$pcoords[,1]
## optional glyph visualization
if(showglyph) {
if(rgl.cur() == 0) rglinit()
else rgl.clear()
if(pk$np < 2) { # show 1st direction only
plotglyph(odfs[,m], odfvertices, pk, kdir=2, vmfglyph=FALSE)
pp <- readline(
"\nmore glyphs ? ('n' to exit) ")
if(pp == "n" ) { rgl.close(); showglyph <- FALSE; }
else { rgl.clear( type = "shapes" ) }
}
}
}
# remove null pk vectors
nvl <- lix
nnv <- length(nullvectors)
if(nnv > 0) {
nvl <- nvl-nnv
v1perslice <- v1perslice[-nullvectors,]
z2d <- z2d[-nullvectors,]
gfas <- gfas[-nullvectors]
}
## V1 volume
if(is.null(dim(z2d))) next
for(k in 1:3) {
switch(kv,
{ mx <- matrix(0, d[2],d[3])
mx[z2d] <- v1perslice[,k]
V1[sl,,,k] <- mx }, # sagittal
{ mx <- matrix(0, d[1],d[3])
mx[z2d] <- v1perslice[,k]
V1[,sl,,k] <- mx }, # coronal
{ mx <- matrix(0, d[1],d[2])
mx[z2d] <- v1perslice[,k]
V1[,,sl,k] <- mx } ) # axial
}
## gfas volume
switch(kv,
{ mx <- matrix(0, d[2],d[3])
mx[z2d] <- gfas
volgfa[sl,,] <- mx }, # sagittal
{ mx <- matrix(0, d[1],d[3])
mx[z2d] <- gfas
volgfa[,sl,] <- mx }, # coronal
{ mx <- matrix(0, d[1],d[2])
mx[z2d] <- gfas
volgfa[,,sl] <- mx } ) # axial
}
print(proc.time() - ptm)
cat("\n")
##-----------------------------
f <- paste(savedir,"/data_gfa",sep="")
writeNIfTI(volgfa, filename=f, verbose=TRUE)
cat("wrote",f,"\n")
f <- paste(savedir,"/data_V1",sep="")
writeNIfTI(V1, filename=f, verbose=TRUE)
cat("wrote",f,"\n")
}
|
da3ba617a4a454518f18c58456f157c65194c5f7
|
0f5847a777e01692b0fa2580f0e39aa048c32acb
|
/R/KScollect.R
|
076ad114e120b67481cd325607be88ef92b28fd1
|
[] |
no_license
|
geanes/KScollect
|
4eb0a6fee4639fdfd8cf3f45bf5c543f6c9ecb82
|
d8a5e5b0aa046e437c791b396d5ffe1a16e2a1ed
|
refs/heads/master
| 2021-01-17T23:27:31.669432
| 2018-10-10T18:20:33
| 2018-10-10T18:20:33
| 46,076,351
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 700
|
r
|
KScollect.R
|
#' KScollect: Shiny application to collect data for future inclusion in the
#' KidStats package.
#'
#' \code{KScollect} Purpose built Shiny application to collect data.
#'
#' @docType package
#' @name KScollect
NULL
#' @import shiny shinyFiles
NULL
#' KScollect shiny app call
#' @examples
#' KScollect()
#' @export
KScollect <- function(new = NULL){
# library(shinyFiles)
if (length(new) > 0 && !file.exists(new)) {
file.create(new)
shinyOptions(newfile = TRUE)
file_path <- new
} else {
shinyOptions(newfile = FALSE)
file_path <- file.choose()
}
shinyOptions(file_path = file_path)
shiny::runApp(system.file('app', package = 'KScollect'), launch.browser = TRUE)
}
|
dc6d00e8faf9e478beb8854a09ff7ec8df1460c0
|
a36ee60ef3673d4af1980f9a5151a68b63ce3cea
|
/inst/doc/ex3_RetrievingRegressionCoefficients.R
|
2181f963507b815323b37a4265aa625a601e8b68
|
[] |
no_license
|
cran/lsasim
|
36c042b64d96e3ae78745874a962b3a9b6c0ab34
|
86d89294245c04db04ccf9ce15a50c70b229bade
|
refs/heads/master
| 2023-03-31T18:30:22.208960
| 2023-03-28T10:10:02
| 2023-03-28T10:10:02
| 82,914,843
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,393
|
r
|
ex3_RetrievingRegressionCoefficients.R
|
## ----setup, include = FALSE, warning = FALSE--------------------------------------------
library(knitr)
options(width = 90, tidy = TRUE, warning = FALSE, message = FALSE)
opts_chunk$set(comment = "", warning = FALSE, message = FALSE,
echo = TRUE, tidy = TRUE)
## ----load-------------------------------------------------------------------------------
library(lsasim)
## ----packageVersion---------------------------------------------------------------------
packageVersion("lsasim")
## ---------------------------------------------------------------------------------------
set.seed(1234)
bg <- questionnaire_gen(n_obs = 1000, n_X = 2, n_W = list(2, 3), theta = TRUE,
family = "gaussian", full_output = TRUE)
## ---------------------------------------------------------------------------------------
str(bg$bg)
## ---------------------------------------------------------------------------------------
bg$linear_regression
## ---------------------------------------------------------------------------------------
beta_gen(bg)
## ---------------------------------------------------------------------------------------
beta_gen(bg, MC = TRUE, MC_replications = 100, rename_to_q = TRUE)
## ---------------------------------------------------------------------------------------
beta_gen(bg, MC = TRUE, MC_replications = 100, rename_to_q = TRUE)
|
7642003bc86de2f931c38dc0f2f1c489f9d9f546
|
a920451b755817c085ecf925aba7e5f7b8137bb4
|
/res/bg/mail2/messy-mergingv2.R
|
af26815a4612b88cd1b0bcbe31965e2accc3a0b7
|
[] |
no_license
|
lucienbaumgartner/ddj18
|
5c6c0216cfcd48c5bc9bda27ed941a4f5bde89de
|
889f353571355320a7c6d84782549a15673e275b
|
refs/heads/master
| 2020-03-29T20:18:48.294509
| 2019-01-14T11:53:31
| 2019-01-14T11:53:31
| 150,297,669
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,102
|
r
|
messy-mergingv2.R
|
library(dplyr)
library(reshape2)
rm(list = ls())
setwd('~/ddj18/res/bg/mail2/csv')
dfl <- lapply(grep('Table\\s1', list.files(), value = T), function(x){
tmp <- read.csv(x, stringsAsFactors = F, header = T, skip = 4) %>%
as_tibble %>%
.[,sapply(.[1:ncol(.)], function(x) any(grepl('[0-9]|[A-z]', x)))] %>%
setNames(., make.names(names=gsub('[^a-zA-Z0-9äÄöÖüÜ]|^c', '', (paste0(.[1:3,]))) %>% .[!.==""], unique=TRUE, allow_ = TRUE)) %>%
.[-c(1:4),] %>%
mutate(Jahr=as.integer(gsub('[^0-9]', '', x)) %/% 10)
if(any(grepl('HauptgebieteBezirkeRegionen', colnames(tmp)))) tmp <- rename(tmp, Gemeinden=HauptgebieteBezirkeRegionen)
tmp <- mutate_at(tmp, vars(-Gemeinden), as.numeric)
})
taxrates <- dfl %>%
lapply(., function(x){setNames(x, tolower(colnames(dfl[[1]])))}) %>%
do.call(rbind, .) %>%
filter(!(is.na(gemeinden)|gemeinden=="")) %>%
print(n=100)
save(taxrates, file='taxrates-yearly.RDS')
tx.sub <- taxrates %>%
select(bfs, polent=gemeinden, taxrate=ohnekirchen, year=jahr)
save(tx.sub, file='taxrates-yearly-subset.RDS')
|
9b7ba798aa8d70be5d70d790a62066b9cae11b34
|
47225532fed76248f3a4a6a9d4073a8a6d4c3e91
|
/plot4.R
|
01bb5bd2d39d9d80533e22f0330106e45fb28bfb
|
[] |
no_license
|
sndofconfusion/ExData_Plotting1
|
28c4650296d98a31872c250022699c94ffdf5e68
|
d240d1fd57d14d3b1518eece9efda32b43ef435c
|
refs/heads/master
| 2021-01-15T11:25:08.585972
| 2015-12-13T10:54:44
| 2015-12-13T10:54:44
| 47,915,268
| 0
| 0
| null | 2015-12-13T10:51:00
| 2015-12-13T10:50:59
| null |
UTF-8
|
R
| false
| false
| 1,403
|
r
|
plot4.R
|
#load libraries
library(data.table)
library(dplyr)
#load file
power <- fread("./household_power_consumption.txt", na.strings = "?")
#add datetime column
power <- mutate(
power,
datetime = as.POSIXct(
paste(power$Date,power$Time),
format="%d/%m/%Y %H:%M:%S"
)
)
#filter for data between "2007-02-01" and "2007-02-03"
power <- filter(power,
datetime >= "2007-02-01",
datetime < "2007-02-03"
)
#open png device
png(file="plot4.png", width=480, height=480)
par(mfrow=c(2,2), mar=c(4,4,1,1))
#draw plot 1
plot(
power$datetime,
power$Global_active_power,
xlab="",
ylab="Global Active Power",
type="l"
)
#draw plot 2
plot(
power$datetime,
power$Voltage,
xlab="datetime",
ylab="Voltage",
type="l"
)
#draw plot 3
plot(
power$datetime,
power$Sub_metering_1,
type="n",
xlab="",
ylab="Energy sub metering"
)
points(power$datetime, power$Sub_metering_1, col="black", type="l")
points(power$datetime, power$Sub_metering_2, col="red", type="l")
points(power$datetime, power$Sub_metering_3, col="blue", type="l")
legend(
"topright",
lty=1,
col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
)
#draw plot 4
plot(
power$datetime,
power$Global_reactive_power,
xlab="datetime",
ylab="Global_reactive_power",
type="l"
)
#close png device
dev.off()
|
a98d38d606939a54dd88646b463332241c645f3c
|
fd0ecf7fc0940ff086025602749062f20b68f751
|
/.Rprofile
|
e761ccf83ff3da1b9dcae397d18000c9fd66b8fa
|
[] |
no_license
|
maurogm/proyectoAB
|
e1540aa9f8710cfa5c7104bbf347847de0457d29
|
0381ef020beaeb7aaaecd6bde29dc12cc667cf2d
|
refs/heads/master
| 2022-08-24T00:47:02.777559
| 2020-05-27T15:01:22
| 2020-05-27T15:01:22
| 241,650,136
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,924
|
rprofile
|
.Rprofile
|
exports_code_in_Rmd_as_script <- function(path, input_file_name, output_file_name = NA, documentation = 1) {
if (is.na(output_file_name)) output_file_name <- paste0(input_file_name, '_rscript')
knitr::purl(paste0(path, '/', input_file_name, '.Rmd'),
paste0(path, '/', output_file_name, '.R'),
documentation = documentation)
}
append_archivo_secundario_a_archivo_principal <- function(path_archivo_principal, nombre_archivo_principal,
path_archivo_secundario, nombre_archivo_secundario,
sobreescribir_archivo_principal = TRUE) {
#Si sobreescribir_archivo_principal es FALSE, escribe la nueva versi?n en el path principal con el nombre nombre_archivo_principal_bis
archivo_principal <- fread(paste(path_archivo_principal, nombre_archivo_principal, sep = '/'), na.string="NULL", encoding="UTF-8")
archivo_secundario <- fread(paste(path_archivo_secundario, nombre_archivo_secundario, sep = '/'), na.string="NULL", encoding="UTF-8")
if (sobreescribir_archivo_principal) {
write.table(rbind(archivo_principal, archivo_secundario),
paste(path_archivo_principal, nombre_archivo_principal, sep = '/'),
row.names = FALSE, sep='\t')
} else {
write.table(rbind(archivo_principal,archivo_secundario),
paste(path_archivo_principal, paste('appended', nombre_archivo_principal,sep='_'), sep = '/'),
row.names = FALSE, sep='\t') }
}
object_sizes_all <- function() {
size_Mb <- sapply(ls(globalenv()),
function(x) { object.size(get(x)) / 1024^2 })
as.data.frame(size_Mb) %>%
tibble::rownames_to_column('object')
}
source_all_files <- function(path_from_wd = "source") {
for (file in list.files(path_from_wd)) {
source(paste0(path_from_wd, "/", file), encoding = 'UTF-8')
}
}
source_all_files()
|
1e17296fa9d4d54854710e0ce604a0e1b8f4944a
|
5ae9dc9e4052d7e6117c76fbf08041b3871b2e5e
|
/Analysis/sce_clustering_umap_spatial.R
|
eb59c7563b979726fe65c519de1da1cd8ecaf692
|
[] |
no_license
|
LieberInstitute/HumanPilot
|
110074304e850b5110d70067d8022c0be31cb352
|
eca2070bcd09282a8adbdc3de310084834b7cd9e
|
refs/heads/master
| 2023-04-13T13:14:41.101756
| 2023-02-10T16:25:59
| 2023-02-10T16:25:59
| 225,910,046
| 50
| 25
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,501
|
r
|
sce_clustering_umap_spatial.R
|
# Script to calculate clustering
# using small number of UMAP dimensions plus 2 spatial dimensions
# Lukas Weber, Dec 2019
library(SingleCellExperiment)
library(uwot)
library(scran)
library(scater)
library(ggplot2)
library(RColorBrewer)
# ---------
# load data
# ---------
# load scran output file (containing top 50 molecular PCs and 2 spatial coordinates)
load("../../data/Human_DLPFC_Visium_processedData_sce_scran.Rdata")
sce
# select cells from one sample
ix <- colData(sce)$sample_name == 151673
sce <- sce[, ix]
sce
# ----------------
# extract features
# ----------------
# extract PCs
dims_pcs <- reducedDim(sce, type = "PCA")
stopifnot(nrow(dims_pcs) == ncol(sce))
# extract spatial dimensions
dims_spatial <- colData(sce)[, c("imagecol", "imagerow")]
stopifnot(nrow(dims_spatial) == ncol(sce))
# -------------------------
# calculate UMAP dimensions
# -------------------------
# will aim to use top few (e.g. 5-10) UMAP dimensions (question: how much of the
# overall heterogeneity do these really capture?)
# note: calculate UMAP on top 50 PCs for faster runtime (could also calculate on
# all 1942 highly variable genes instead for more accuracy)
# note: calculating UMAP on one sample only (could also calculate UMAP on all
# samples combined, then subset for clustering)
# note: could also use top few PCs for clustering (instead of top few UMAP
# components)
# keep top 50 UMAP components
set.seed(123)
dims_umap <- umap(dims_pcs, n_components = 50)
stopifnot(nrow(dims_umap) == ncol(sce))
# ----------------
# scale dimensions
# ----------------
# need all dimensions (UMAP and spatial) to be on approximately comparable
# scales
# UMAP dimensions are already on a sensible scale, so can leave as is (note:
# don't do z-score scaling since this will scale up the less meaningful UMAP
# compenents)
summary(dims_umap)
mean(dims_umap[, 1])
sd(dims_umap[, 1])
max(abs(dims_umap))
range(dims_umap[, 1])
range(dims_umap[, 2])
range(dims_umap[, 3])
colnames(dims_umap) <- paste0("UMAP_", seq_len(ncol(dims_umap)))
# spatial dimensions: scale to e.g. min -5 and max 5, so they are on roughly
# similar scale as top few UMAP dimensions (note: z-score scaling doesn't really
# make sense for spatial coordinates, which are on a uniform physical scale)
# note: choice of these max and min values is very important! results will be
# highly sensitive to this
summary(as.data.frame(dims_spatial))
range(dims_spatial[, 1])
range(dims_spatial[, 2])
dims_spatial <- apply(as.matrix(dims_spatial), 2, function(col) {
(col - min(col)) / (max(col) - min(col)) * 10 - 5
})
colnames(dims_spatial) <- c("spatial_x", "spatial_y")
summary(dims_spatial)
stopifnot(nrow(dims_spatial) == ncol(sce))
# ----------------------
# graph-based clustering
# ----------------------
# now can run standard Bioconductor graph-based clustering on subset of UMAP
# dimensions and scaled spatial dimensions
# note: graph-based clustering seems better suited than k-means for this
# dataset, since the "layers" in brain data do not have an ellipsoidal shape in
# the spatial feature space. However, for other datasets, e.g. cancer data,
# k-means may also work.
# number of UMAP dimensions to use
n_umap <- 10
dims_clus <- cbind(dims_umap[, seq_len(n_umap), drop = FALSE], dims_spatial)
head(dims_clus)
# clustering: see OSCA book
# note: number of clusters k
# note: use transpose
g <- buildSNNGraph(t(dims_clus), k = 10, d = ncol(dims_clus))
g_walk <- igraph::cluster_walktrap(g)
# default number of clusters (not using this for final results)
#clus <- g_walk$membership
#table(clus)
#stopifnot(length(clus) == ncol(sce))
# choose number of clusters
n_clus <- 8
clus <- igraph::cut_at(g_walk, n = n_clus)
table(clus)
stopifnot(length(clus) == ncol(sce))
# ------------
# plot results
# ------------
# display plot on original spatial coordinates
d_plot <- data.frame(
# get original spatial coordinates (non-scaled) from this sample
# note: y coordinate is reversed
x_coord = colData(sce)[, "imagecol"],
y_coord = -colData(sce)[, "imagerow"],
cluster = as.factor(clus)
)
ggplot(d_plot, aes(x = x_coord, y = y_coord, color = cluster)) +
geom_point(size = 2, alpha = 0.5) +
coord_fixed() +
scale_color_brewer(palette = "Paired") +
theme_bw() +
ggtitle("Clustering on top few UMAP dims plus 2 spatial dims (scaled)")
ggsave("../plots/clustering_UMAP_spatial/plot_clustering_UMAP_spatial.png", width = 7, height = 7)
|
a63c2543f9f080918e84f0c8b65291380be69246
|
860fbdf856ee98ae047700df64f41cd2f6811cc4
|
/R/parser.R
|
fb7f42a3f11ee30e1aea69339acd7a5c0cf88ef8
|
[] |
no_license
|
cran/knitr
|
9409a87da280b3845545ce67840bf0725250d4fa
|
d33e12f849224506d0a33fd10d0eb4225e693ab5
|
refs/heads/master
| 2023-06-07T18:02:02.216980
| 2023-05-25T08:20:08
| 2023-05-25T08:20:08
| 17,696,927
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,208
|
r
|
parser.R
|
## adapted from Hadley's decumar: https://github.com/hadley/decumar
# split input document into groups containing chunks and other texts
# (may contain inline R code)
split_file = function(lines, set.preamble = TRUE, patterns = knit_patterns$get()) {
n = length(lines)
chunk.begin = patterns$chunk.begin; chunk.end = patterns$chunk.end
if (is.null(chunk.begin) || is.null(chunk.end))
return(list(parse_inline(lines, patterns)))
if (!child_mode() && set.preamble) {
set_preamble(lines, patterns) # prepare for tikz option 'standAlone'
}
markdown_mode = identical(patterns, all_patterns$md)
i = group_indices(grepl(chunk.begin, lines), grepl(chunk.end, lines), lines, markdown_mode)
groups = unname(split(lines, i))
if (set.preamble)
knit_concord$set(inlines = sapply(groups, length)) # input line numbers for concordance
# parse 'em all
lapply(groups, function(g) {
block = grepl(chunk.begin, g[1])
if (!set.preamble && !parent_mode()) {
return(if (block) '' else g) # only need to remove chunks to get pure preamble
}
if (block) {
n = length(g)
# remove the optional chunk footer
if (n >= 2 && grepl(chunk.end, g[n])) g = g[-n]
# remove the optional prefix % in code in Rtex mode
g = strip_block(g, patterns$chunk.code)
params.src = if (group_pattern(chunk.begin)) {
extract_params_src(chunk.begin, g[1])
} else ''
parse_block(g[-1], g[1], params.src, markdown_mode)
} else parse_inline(g, patterns)
})
}
extract_params_src = function(chunk.begin, line) {
trimws(gsub(chunk.begin, '\\1', line))
}
#' The code manager to manage code in all chunks
#'
#' This object provides methods to manage code (as character vectors) in all
#' chunks in \pkg{knitr} source documents. For example,
#' \code{knitr::knit_code$get()} returns a named list of all code chunks (the
#' names are chunk labels), and \code{knitr::knit_code$get('foo')} returns the
#' character vector of the code in the chunk with the label \code{foo}.
#' @note The methods on this object include the \code{set()} method (i.e., you
#' could do something like \code{knitr::knit_code$set(foo = "'my precious new
#' code'")}), but we recommend that you do not use this method to modify the
#' content of code chunks, unless you are
#' \href{https://emitanaka.rbind.io/post/knitr-knitr-code/}{as creative as Emi
#' Tanaka} and know what you are doing.
#' @export
knit_code = new_defaults()
# strip the pattern in code
strip_block = function(x, prefix = NULL) {
if (!is.null(prefix) && (length(x) > 1)) {
x[-1L] = sub(prefix, '', x[-1L])
spaces = min(attr(regexpr("^ *", x[-1L]), "match.length"))
if (spaces > 0) x[-1L] = substring(x[-1L], spaces + 1)
}
x
}
# an object to store chunk dependencies; dep_list$get() is of the form list(foo
# = c('chunk', 'labels', 'that', 'depend', 'on', 'chunk', 'foo'))
dep_list = new_defaults()
# separate params and R code in code chunks
parse_block = function(code, header, params.src, markdown_mode = out_format('markdown')) {
params = params.src
engine = 'r'
# consider the syntax ```{engine, opt=val} for chunk headers
if (markdown_mode) {
engine = get_chunk_engine(params)
params = get_chunk_params(params)
}
params = clean_empty_params(params) # rm empty options
# turn ```{engine} into ```{r, engine="engine"}
if (tolower(engine) != 'r') {
params = sprintf('%s, engine="%s"', params, engine)
params = gsub('^\\s*,\\s*', '', params)
}
# for quarto, preserve the actual original params.src and do not remove the engine
if (!is_quarto()) params.src = params
params = parse_params(params)
# remove indent (and possibly markdown blockquote >) from code
if (nzchar(spaces <- get_chunk_indent(header))) {
params$indent = spaces
code = gsub(sprintf('^%s', spaces), '', code)
# in case the trailing spaces of the indent string are trimmed on certain
# lines (e.g. in blockquotes https://github.com/yihui/knitr/issues/1446)
code = gsub(sprintf('^%s', gsub('\\s+$', '', spaces)), '', code)
}
# merge with possible chunk options written as (YAML or CSV) metadata in
# chunk, and remove metadata from code body
parts = partition_chunk(engine, code)
params = merge_list(params, parts$options)
code = parts$code
label = params$label; .knitEnv$labels = c(.knitEnv$labels, label)
if (length(code) || length(params$file) || length(params$code)) {
if (label %in% names(knit_code$get())) {
if (identical(getOption('knitr.duplicate.label'), 'allow')) {
params$label = label = unnamed_chunk(label)
} else stop(
"Duplicate chunk label '", label, "', which has been used for the chunk:\n",
one_string(knit_code$get(label))
)
}
code = as.character(code)
knit_code$set(setNames(list(structure(code, chunk_opts = params)), label))
}
# store dependencies
if (!is.null(deps <- params$dependson)) {
deps = sc_split(deps)
if (is.numeric(deps)) {
deps[deps < 0] = length(.knitEnv$labels) + deps[deps < 0]
deps = .knitEnv$labels[deps[deps > 0]]
}
for (i in deps)
dep_list$set(setNames(list(c(dep_list$get(i), label)), i))
}
# for quarto only
if (is_quarto()) {
params$original.params.src = params.src
params$chunk.echo = isTRUE(params[['echo']])
params$yaml.code = parts$src
attr(params, 'quarto_options') = c('original.params.src', 'chunk.echo', 'yaml.code')
# alias 'warning' explicitly set in chunk metadata to the 'message' option
if (!is.null(parts$options[['warning']])) {
params$message = parts$options[['warning']]
}
}
structure(list(params = params, params.src = params.src), class = 'block')
}
get_chunk_indent = function(header) {
gsub('^([\t >]*).*', '\\1', header)
}
get_chunk_engine = function(params) {
sub('^([a-zA-Z0-9_]+).*$', '\\1', params)
}
get_chunk_params = function(params) {
sub('^([a-zA-Z0-9_]+)', '', params)
}
clean_empty_params = function(params) {
gsub('^\\s*,*|,*\\s*$', '', params) # rm empty options
}
# autoname for unnamed chunk
unnamed_chunk = function(prefix = NULL, i = chunk_counter()) {
if (is.null(prefix)) prefix = opts_knit$get('unnamed.chunk.label')
paste(prefix, i, sep = '-')
}
# parse params from chunk header
parse_params = function(params, label = TRUE) {
if (params == '') return(if (label) list(label = unnamed_chunk()))
res = withCallingHandlers(
eval(parse_only(paste('alist(', quote_label(params), ')'))),
error = function(e) {
message('(*) NOTE: I saw chunk options "', params,
'"\n please go to https://yihui.org/knitr/options',
'\n (it is likely that you forgot to quote "character" options)')
})
# good, now you seem to be using valid R code
idx = which(names(res) == '') # which option is not named?
# remove empty options
for (i in idx) if (identical(res[[i]], alist(,)[[1]])) res[[i]] = NULL
idx = if (is.null(names(res)) && length(res) == 1L) 1L else which(names(res) == '')
if ((n <- length(idx)) > 1L || (length(res) > 1L && is.null(names(res))))
stop('invalid chunk options: ', params,
"\n(all options must be of the form 'tag=value' except the chunk label)")
if (is.null(res$label)) {
if (n == 0L) res$label = '' else names(res)[idx] = 'label'
}
if (!is.character(res$label))
res$label = gsub(' ', '', as.character(as.expression(res$label)))
if (identical(res$label, '')) res$label = if (label) unnamed_chunk()
res
}
# quote the chunk label if necessary
quote_label = function(x) {
x = gsub('^\\s*,?', '', x)
if (grepl('^\\s*[^\'"](,|\\s*$)', x)) {
# <<a,b=1>>= ---> <<'a',b=1>>=
x = gsub('^\\s*([^\'"])(,|\\s*$)', "'\\1'\\2", x)
} else if (grepl('^\\s*[^\'"](,|[^=]*(,|\\s*$))', x)) {
# <<abc,b=1>>= ---> <<'abc',b=1>>=
x = gsub('^\\s*([^\'"][^=]*)(,|\\s*$)', "'\\1'\\2", x)
}
x
}
# comment characters for various languages
comment_chars = list(
`#` = c('awk', 'bash', 'coffee', 'gawk', 'julia', 'octave', 'perl', 'powershell', 'python', 'r', 'ruby', 'sed', 'stan'),
'//' = c('asy', 'cc', 'csharp', 'd3', 'dot', 'fsharp', 'go', 'groovy', 'java', 'js', 'node', 'Rcpp', 'sass', 'scss', 'scala'),
`%` = c('matlab', 'tikz'),
`/* */` = c('c', 'css'),
`* ;` = c('sas'),
`--` = c('haskell', 'lua', 'mysql', 'psql', 'sql'),
`!` = c('fortran', 'fortran95'),
`*` = c('stata')
)
# reshape it using the language name as the index, i.e., from list(char = lang)
# to list(lang = char)
comment_chars = local({
res = list(apl = '\u235D')
for (i in names(comment_chars)) {
chars = comment_chars[[i]]
res = c(res, setNames(rep(list(strsplit(i, ' ')[[1]]), length(chars)), chars))
}
res[order(names(res))]
})
#' Partition chunk options from the code chunk body
#'
#' Chunk options can be written in special comments (e.g., after \verb{#|} for R
#' code chunks) inside a code chunk. This function partitions these options from
#' the chunk body.
#' @param engine The name of the language engine (to determine the appropriate
#' comment character).
#' @param code A character vector (lines of code).
#' @return A list with the following items: \describe{\item{\code{options}}{The
#' parsed options (if any) as a list.} \item{\code{src}}{The part of the input
#' that contains the options.} \item{\code{code}}{The part of the input that
#' contains the code.}}
#' @note Chunk options must be written on \emph{continuous} lines (i.e., all
#' lines must start with the special comment prefix such as \verb{#|}) at the
#' beginning of the chunk body.
#' @export
#' @examples
#' # parse yaml-like items
#' yaml_like = c("#| label: mine", "#| echo: true", "#| fig.width: 8", "#| foo: bar", "1 + 1")
#' writeLines(yaml_like)
#' knitr::partition_chunk("r", yaml_like)
#'
#' # parse CSV syntax
#' csv_like = c("#| mine, echo = TRUE, fig.width = 8, foo = 'bar'", "1 + 1")
#' writeLines(csv_like)
#' knitr::partition_chunk("r", csv_like)
partition_chunk = function(engine, code) {
res = list(yaml = NULL, src = NULL, code = code)
# mask out empty blocks
if (length(code) == 0) return(res)
opt_comment = get_option_comment(engine)
s1 = opt_comment$start
s2 = opt_comment$end
# check for option comments
i1 = startsWith(code, s1)
i2 = endsWith(trimws(code, 'right'), s2)
# if "commentChar| " is not found, try "#| " instead
if (!i1[1] && !identical(s1, '#|')) {
s1 = '#| '; s2 = ''
i1 = startsWith(code, s1); i2 = TRUE
}
m = i1 & i2
# has to have at least one matched line at the beginning
if (!m[[1]]) return(res)
# divide into yaml and code
if (all(m)) {
src = code
code = NULL
} else {
src = head(code, which.min(m) - 1)
code = tail(code, -length(src))
}
# trim right
if (any(i2)) src = trimws(src, 'right')
# extract meta from comments, then parse it
meta = substr(src, nchar(s1) + 1, nchar(src) - nchar(s2))
# see if the metadata looks like YAML or CSV
if (grepl('^[^ :]+:($|\\s)', meta[1])) {
meta = yaml::yaml.load(meta, handlers = list(expr = parse_only))
if (!is.list(meta) || length(names(meta)) == 0) {
warning('Invalid YAML option format in chunk: \n', one_string(meta), '\n')
meta = list()
}
} else {
meta = parse_params(paste(meta, collapse = ''), label = FALSE)
}
# normalize field name 'id' to 'label' if provided
meta$label = unlist(meta[c('label', 'id')])[1]
meta$id = NULL
# convert any option with fig- into fig. and out- to out.
names(meta) = sub('^(fig|out)-', '\\1.', names(meta))
# extract code
if (length(code) > 0 && is_blank(code[[1]])) {
code = code[-1]
src = c(src, '')
}
list(options = meta, src = src, code = code)
}
get_option_comment = function(engine) {
char = comment_chars[[engine]] %n% '#'
s1 = paste0(char[[1]], '| ')
s2 = ifelse(length(char) > 1, char[[2]], '')
list(start = s1, end = s2)
}
print.block = function(x, ...) {
params = x$params
if (opts_knit$get('verbose')) {
code = knit_code$get(params$label)
if (length(code) && !is_blank(code)) {
cat('\n')
cat(one_string(' | ', code), '\n')
}
}
}
# extract inline R code fragments (as well as global options)
parse_inline = function(input, patterns) {
inline.code = patterns$inline.code; inline.comment = patterns$inline.comment
if (!is.null(inline.comment)) {
idx = grepl(inline.comment, input)
# strip off inline code
input[idx] = gsub(inline.code, '\\1', input[idx])
}
input = one_string(input) # merge into one line
loc = cbind(start = numeric(0), end = numeric(0))
if (group_pattern(inline.code)) loc = str_locate(input, inline.code)[[1]]
code1 = code2 = character()
if (nrow(loc)) {
code = t(str_match(input, inline.code))
if (NCOL(code) >= 2L) {
code1 = code[, 1L]
code2 = apply(code[, -1L, drop = FALSE], 1, paste, collapse = '')
}
}
structure(
list(input = input, location = loc, code = code2, code.src = code1),
class = 'inline'
)
}
print.inline = function(x, ...) {
if (opts_knit$get('verbose')) {
cat('\n')
if (nrow(x$location)) {
cat(sprintf(' | %s #%s:%s', x$code, x$location[, 1], x$location[, 2]), sep = '\n')
}
}
}
#' Read chunks from an external script
#'
#' Chunks can be put in an external script, and this function reads chunks into
#' the current \pkg{knitr} session; \code{read_demo()} is a convenience function
#' to read a demo script from a package.
#'
#' There are two approaches to read external code into the current session: (1)
#' Use a special separator of the from \code{## ---- chunk-label} (at least four
#' dashes before the chunk label) in the script; (2) Manually specify the
#' labels, starting and ending positions of code chunks in the script.
#'
#' The second approach will be used only when \code{labels} is not \code{NULL}.
#' For this approach, if \code{from} is \code{NULL}, the starting position is 1;
#' if \code{to} is \code{NULL}, each of its element takes the next element of
#' \code{from} minus 1, and the last element of \code{to} will be the length of
#' \code{lines} (e.g. when \code{from = c(1, 3, 8)} and the script has 10 lines
#' in total, \code{to} will be \code{c(2, 7, 10)}). Alternatively, \code{from}
#' and \code{to} can be character vectors as regular expressions to specify the
#' positions; when their length is 1, the single regular expression will be
#' matched against the \code{lines} vector, otherwise each element of
#' \code{from}/\code{to} is matched against \code{lines} and the match is
#' supposed to be unique so that the numeric positions returned from
#' \code{grep()} will be of the same length of \code{from}/\code{to}. Note
#' \code{labels} always has to match the length of \code{from} and \code{to}.
#' @param path Path to the R script.
#' @param lines Character vector of lines of code. By default, this is read from
#' \code{path}.
#' @param labels Character vector of chunk labels (default \code{NULL}).
#' @param from,to Numeric vector specifying the starting/ending line numbers of
#' code chunks, or a character vector; see Details.
#' @param from.offset,to.offset Offsets to be added to \code{from}/\code{to}.
#' @param roxygen_comments Logical dictating whether to keep trailing
#' roxygen-style comments from code chunks in addition to whitespace
#' @return As a side effect, code chunks are read into the current session so
#' that future chunks can (re)use the code by chunk label references. If an
#' external chunk has the same label as a chunk in the current session, chunk
#' label references by future chunks will refer to the external chunk.
#' @references \url{https://yihui.org/knitr/demo/externalization/}
#' @note This function can only be used in a chunk which is \emph{not} cached
#' (chunk option \code{cache = FALSE}), and the code is read and stored in the
#' current session \emph{without} being executed (to actually run the code,
#' you have to use a chunk with a corresponding label).
#' @author Yihui Xie; the idea of the second approach came from Peter
#' Ruckdeschel (author of the \pkg{SweaveListingUtils} package)
#' @export
#' @examples ## put this in foo.R and read_chunk('foo.R')
#'
#' ## ---- my-label ----
#' 1+1
#' lm(y~x, data=data.frame(x=1:10,y=rnorm(10)))
#'
#' ## later you can use <<my-label>>= to reference this chunk
#'
#' ## the 2nd approach
#' code = c("#@@a", '1+1', "#@@b", "#@@a", 'rnorm(10)', "#@@b")
#' read_chunk(lines = code, labels = 'foo') # put all code into one chunk named foo
#' read_chunk(lines = code, labels = 'foo', from = 2, to = 2) # line 2 into chunk foo
#' read_chunk(lines = code, labels = c('foo', 'bar'), from = c(1, 4), to = c(3, 6))
#' # automatically figure out 'to'
#' read_chunk(lines = code, labels = c('foo', 'bar'), from = c(1, 4))
#' read_chunk(lines = code, labels = c('foo', 'bar'), from = "^#@@a", to = "^#@@b")
#' read_chunk(lines = code, labels = c('foo', 'bar'), from = "^#@@a", to = "^#@@b", from.offset = 1, to.offset = -1)
#'
#' ## later you can use, e.g., <<foo>>=
#' knitr::knit_code$get() # use this to check chunks in the current session
#' knitr::knit_code$restore() # clean up the session
read_chunk = function(
path, lines = read_utf8(path), labels = NULL, from = NULL, to = NULL,
from.offset = 0L, to.offset = 0L, roxygen_comments = TRUE
) {
if (!length(lines)) {
warning('code is empty')
return(invisible())
}
lab = .sep.label
if (is.null(labels)) {
if (!group_pattern(lab)) return(invisible())
} else {
if (is.null(from)) from = 1L
if (!is.numeric(from)) from = pattern_index(from, lines)
if (is.null(to)) to = c(from[-1L] - 1L, length(lines))
if (!is.numeric(to)) to = pattern_index(to, lines)
stopifnot(length(labels) == length(from), length(from) == length(to))
from = from + from.offset; to = to + to.offset
code = list()
for (i in seq_along(labels)) {
code[[labels[i]]] = strip_white(lines[from[i]:to[i]])
}
knit_code$set(code)
return(invisible())
}
idx = cumsum(grepl(lab, lines))
if (idx[1] == 0) {
idx = c(0, idx); lines = c('', lines) # no chunk header in the beginning
}
groups = unname(split(lines, idx))
labels = trimws(gsub(lab, '\\3', sapply(groups, `[`, 1)))
labels = gsub(',.*', '', labels) # strip off possible chunk options
code = lapply(groups, strip_chunk, roxygen_comments)
for (i in which(!nzchar(labels))) labels[i] = unnamed_chunk()
knit_code$set(setNames(code, labels))
}
#' @rdname read_chunk
#' @param topic,package Name of the demo and the package. See
#' \code{utils::\link{demo}}.
#' @param ... Arguments passed to \code{\link{read_chunk}}.
#' @export
read_demo = function(topic, package = NULL, ...) {
paths = list.files(file.path(find.package(package), 'demo'), full.names = TRUE)
read_chunk(paths[sans_ext(basename(paths)) == topic], ...)
}
# convert patterns to numeric indices in a character vector
pattern_index = function(pattern, text) {
if (length(pattern) == 1L) {
idx = grep(pattern, text)
if (length(idx) == 0L) stop('pattern ', pattern, ' not found')
return(idx)
}
sapply(pattern, function(p) {
idx = grep(p, text)
if (length(idx) != 1L) stop('non-unique matches of ', p)
idx
})
}
strip_chunk = function(x, roxygen_comments = TRUE) {
x = x[-1]
strip_white(x, if (roxygen_comments) is_blank else function(line) {
is_blank(line) || grepl("^#+'[ ]?", line)
})
}
# strip lines that are pure white spaces or
# that match the test_strip condition(s)
strip_white = function(x, test_strip = is_blank) {
if (!length(x)) return(x)
while (test_strip(x[1])) {
x = x[-1]; if (!length(x)) return(x)
}
while (test_strip(x[(n <- length(x))])) {
x = x[-n]; if (n < 2) return(x)
}
x
}
# (recursively) parse chunk references inside a chunk
parse_chunk = function(x, rc = knit_patterns$get('ref.chunk')) {
if (length(x) == 0L) return(x)
x = c(x) # drop attributes of code (e.g. chunk_opts)
if (!group_pattern(rc) || !any(idx <- grepl(rc, x))) return(x)
labels = sub(rc, '\\1', x[idx])
code = knit_code$get(labels)
indent = gsub('^(\\s*).*', '\\1', x[idx])
if (length(labels) <= 1L) code = list(code)
code = mapply(indent_block, code, indent, SIMPLIFY = FALSE, USE.NAMES = FALSE)
x = as.list(x)
x[idx] = lapply(code, function(z) parse_chunk(z, rc))
unlist(x, use.names = FALSE)
}
# split text lines into groups of code and text chunks
group_indices = function(chunk.begin, chunk.end, lines = NA, is.md = FALSE) {
in.chunk = FALSE # whether inside a chunk now
pattern.end = NA # the expected chunk end pattern (derived from header)
b = NA # the last found chunk header
# TODO: for now we only disallow unmatched delimiters during R CMD check
# that's not running on CRAN; we will fully disallow it in the future (#2057)
signal = if (is_R_CMD_check() && !(is_cran() || is_bioc())) stop2 else warning2
g = NA # group index: odd - text; even - chunk
fun = function(is.begin, is.end, line, i) {
if (i == 1) {
g <<- if (is.begin) {
in.chunk <<- TRUE
b <<- i
0
} else 1
return(g)
}
# begin of another chunk is found while the previous chunk is not complete yet
if (in.chunk && is.begin) {
if (!is.md || match_chunk_begin(pattern.end, line)) {
g <<- g + 2 # same amount of ` as previous chunk, so should be a new chunk
if (is.md) b <<- i
} # otherwise ignore the chunk header
return(g)
}
if (in.chunk && is.end && match_chunk_end(pattern.end, line, i, b, lines, signal)) {
in.chunk <<- FALSE
g <<- g + 1
return(g - 1) # don't use incremented g yet; use it in the next step
}
if (!in.chunk && is.begin) {
in.chunk <<- TRUE
if (is.md) {
pattern.end <<- sub('(^[\t >]*```+).*', '^\\1\\\\s*$', line)
b <<- i
}
g <<- g + 2 - g%%2 # make sure g is even
}
g
}
mapply(fun, chunk.begin, chunk.end, lines, seq_along(chunk.begin))
}
match_chunk_begin = function(pattern.end, x, pattern = '^\\1\\\\{') {
grepl(gsub('^([^`]*`+).*', pattern, pattern.end), x)
}
match_chunk_end = function(pattern, line, i, b, lines, signal = stop) {
if (is.na(pattern) || grepl(pattern, line)) return(TRUE)
n = length(lines)
# if the exact match was not found, look ahead to see if there is another
# chunk end that is an exact match before the next chunk begin
if (i < n && length(k <- grep(pattern, lines[(i + 1):n]))) {
k = k[1]
if (k == 1) return(FALSE) # the next line is real chunk end
# no other chunk headers before the new next exact chunk end
if (!any(match_chunk_begin(pattern, lines[i + 1:(k - 1)], '^\\1`*\\\\{')))
return(FALSE)
}
signal(
'The closing backticks on line ', i, ' ("', line, '") in ', current_input(),
' do not match the opening backticks "',
gsub('\\^(\\s*`+).*', '\\1', pattern), '" on line ', b, '. You are recommended to ',
'fix either the opening or closing delimiter of the code chunk to use exactly ',
'the same numbers of backticks and same level of indentation (or blockquote).'
)
TRUE
}
#' Get all chunk labels in a document
#'
#' The function \code{all_labels()} returns all chunk labels as a character
#' vector. Optionally, you can specify a series of conditions to filter the
#' labels. The function `all_rcpp_labels()` is a wrapper function for
#' \code{all_labels(engine == 'Rcpp')}.
#'
#' For example, suppose the condition expression is \code{engine == 'Rcpp'}, the
#' object \code{engine} is the local chunk option \code{engine}. If an
#' expression fails to be evaluated (e.g. when a certain object does not exist),
#' \code{FALSE} is returned and the label for this chunk will be filtered out.
#' @param ... A vector of R expressions, each of which should return \code{TRUE}
#' or \code{FALSE}. The expressions are evaluated using the \emph{local} chunk
#' options of each code chunk as the environment, which means global chunk
#' options are not considered when evaluating these expressions. For example,
#' if you set the global chunk option \code{opts_chunk$set(purl = TRUE)},
#' \code{all_labels(purl == TRUE)} will \emph{not} return the labels of all
#' code chunks, but will only return the labels of those code chunks that have
#' local chunk options \code{purl = TRUE}.
#' @note Empty code chunks are always ignored, including those chunks that are
#' empty in the original document but filled with code using chunk options
#' such as \code{ref.label} or \code{code}.
#' @return A character vector.
#' @export
#' @examples # the examples below are meaningless unless you put them in a knitr document
#' all_labels()
#' all_labels(engine == 'Rcpp')
#' all_labels(echo == FALSE && results != 'hide')
#' # or separate the two conditions
#' all_labels(echo == FALSE, results != 'hide')
all_labels = function(...) {
cond = as.list(match.call())[-1]
code = knit_code$get()
labels = names(code)
if (length(cond) == 0) return(labels)
params = lapply(code, attr, 'chunk_opts')
idx = rep_len(TRUE, length(labels))
for (i in seq_along(cond)) {
for (j in seq_along(params)) {
# need tryCatch() because the expression cond[[i]] may trigger an error
# when any variable is not found, e.g. not all chunks have the engine
# option when the condition is engine == 'Rcpp'
try_eval = function(expr) tryCatch(
eval(expr, envir = params[[j]], enclos = knit_global()),
error = function(e) FALSE
)
if (idx[j]) {
res = try_eval(cond[[i]])
# the condition could be evaluated to an expression; see all_rcpp_labels()
if (is.expression(res)) res = try_eval(res)
idx[j] = res
}
}
}
labels[idx]
}
#' @rdname all_labels
#' @export
all_rcpp_labels = function(...) all_labels(expression(engine == 'Rcpp'), ...)
#' Wrap code using the inline R expression syntax
#'
#' This is a convenience function to write the "source code" of inline R
#' expressions. For example, if you want to write \samp{`r 1+1`} literally in an
#' R Markdown document, you may write \samp{`` `r knitr::inline_expr('1+1')`
#' ``}; for Rnw documents, this may be
#' \samp{\verb|\Sexpr{knitr::inline_expr{'1+1'}}|}.
#' @param code Character string of the inline R source code.
#' @param syntax A character string to specify the syntax, e.g. \code{rnw},
#' \code{html}, or \code{md}. If not specified, this will be guessed from
#' the knitting context.
#' @return A character string marked up using the inline R code syntax.
#' @export
#' @examples library(knitr)
#' inline_expr('1+1', 'rnw'); inline_expr('1+1', 'html'); inline_expr('1+1', 'md')
inline_expr = function(code, syntax) {
if (!is.character(code) || length(code) != 1)
stop('The inline code must be a character string')
if (!missing(syntax)) pat = syntax else {
inline = knit_patterns$get('inline.code')
if (is.null(inline)) stop('inline_expr() must be called in a knitting process')
pat = NULL
for (i in names(all_patterns)) {
if (inline == all_patterns[[i]][['inline.code']]) {
pat = i; break
}
}
}
if (is.null(pat)) stop('Unknown document format')
sprintf(switch(
pat, rnw = '\\Sexpr{%s}', tex = '\\rinline{%s}', html = '<!--rinline %s -->',
md = '`r %s`', rst = ':r:`%s`', asciidoc = '`r %s`', textile = '@r %s@',
stop('Unknown syntax ', pat)
), code)
}
#' Convert the in-header chunk option syntax to the in-body syntax
#'
#' This is a helper function for moving chunk options from the chunk header to
#' the chunk body using the new syntax.
#' @param input File path to the document with code chunks to convert.
#' @param output The default \code{NULL} will output to console. Other values
#' can be a file path to write the converted content into or a function which
#' takes \code{input} as argument and returns a file path to write into (e.g.,
#' \code{output = identity} to overwrite the input file).
#' @param type This determines how the in-body options will be formatted.
#' \code{"mutiline"} (the default, except for \file{qmd} documents, for which
#' the default is \code{"yaml"}) will write each chunk option on a separate
#' line. Long chunk option values will be wrapped onto several lines, and you
#' can use \code{width = 0} to keep one line per option only. \code{"wrap"}
#' will wrap all chunk options together using
#' \code{\link[base:strwrap]{base::strwrap}()}. \code{"yaml"} will convert
#' chunk options to YAML.
#' @param width An integer passed to \code{base::strwrap()} for \code{type =
#' "wrap"} and \code{type = "multiline"}. If set to \code{0}, deactivate the
#' wrapping (for \code{type = "multiline"} only).
#' @return A character vector of converted \code{input} when \code{output =
#' NULL}. The output file path with converted content otherwise.
#' @note Learn more about the new chunk option syntax in
#' \url{https://yihui.org/en/2022/01/knitr-news/}
#' @section About \pkg{knitr} option syntax:
#'
#' Historical chunk option syntax have chunk option in the chunk header using
#' valid R syntax. This is an example for \verb{.Rmd} document
#' \preformatted{
#' ```\{r, echo = FALSE, fig.width: 10\}
#' ```
#' }
#'
#' New syntax allows to pass option inside the chunk using several variants
#' \itemize{
#' \item Passing options one per line using valid R syntax. This corresponds to \code{convert_chunk_header(type = "multiline")}.
#' \preformatted{
#' ```\{r\}
#' #| echo = FALSE,
#' #| fig.width = 10
#' ```
#' }
#'
#' \item Passing option part from header in-chunk with several line if wrapping is
#' needed. This corresponds to \code{convert_chunk_header(type = "wrap")}
#' \preformatted{
#' ```\{r\}
#' #| echo = FALSE, fig.width = 10
#' ```
#' }
#' \item Passing options key value pairs in-chunk using YAML syntax. Values are no
#' more R expression but valid YAML syntax. This corresponds to
#' \code{convert_chunk_header(type = "yaml")} (not implement yet).
#' \preformatted{```\{r\}
#' #| echo: false,
#' #| fig.width: 10
#' ```
#' }
#' }
#' @examples
#' knitr_example = function(...) system.file('examples', ..., package = 'knitr')
#' # Convert a document for multiline type
#' convert_chunk_header(knitr_example('knitr-minimal.Rmd'))
#' # Convert a document for wrap type
#' convert_chunk_header(knitr_example('knitr-minimal.Rmd'), type = "wrap")
#' # Reduce default wrapping width
#' convert_chunk_header(knitr_example('knitr-minimal.Rmd'), type = "wrap", width = 0.6 * getOption('width'))
#' \dontrun{
#' # Explicitly name the output
#' convert_chunk_header('test.Rmd', output = 'test2.Rmd')
#' # Overwrite the input
#' convert_chunk_header('test.Rmd', output = identity)
#' # Use a custom function to name the output
#' convert_chunk_header('test.Rmd', output = \(f) sprintf('%s-new.%s', xfun::sans_ext(f), xfun::file_ext(f)))
#' }
#' @export
convert_chunk_header = function(
input, output = NULL, type = c('multiline', 'wrap', 'yaml'),
width = 0.9 * getOption('width')
) {
# extract fenced header information
text = xfun::read_utf8(input)
ext = xfun::file_ext(input)
if (missing(type) && ext == 'qmd') type = 'yaml' # default to yaml for Quarto
type = match.arg(type)
pattern = detect_pattern(text, ext)
# no code chunk in brew file
if (pattern == 'brew') return()
markdown_mode = pattern == 'md'
chunk_begin = all_patterns[[pattern]]$chunk.begin
# counter for inserted lines
nb_added = 0L
new_text = text
for (i in grep(chunk_begin, text)) {
# transform each chunk one by one
indent = get_chunk_indent(text[i])
header = extract_params_src(chunk_begin, text[i])
engine = if (markdown_mode) get_chunk_engine(header) else 'r'
params = if (markdown_mode) get_chunk_params(header) else header
# if no params nothing to format
if (params == '') next
params2 = clean_empty_params(params)
params2 = trimws(clean_empty_params(params2))
# select the correct prefix char (e.g `#|`)
opt_chars = get_option_comment(engine)
prefix = paste0(indent, opt_chars$start)
# clean old chunk keeping only engine
new_text[i + nb_added] = gsub(params, '', text[i], fixed = TRUE)
# format new chunk
if (type == 'wrap') {
# simple line wrapping of R code
params3 = strwrap(params2, width, prefix = prefix)
} else if (type == 'multiline') {
# one option per line of the form `key = value,`
res = parse_params(params2, label = FALSE)
params3 = sprintf('%s = %s,', names(res), deparsed_string(res))
# remove trailing for last element
last = length(params3)
params3[last] = gsub(',$', '', params3[last])
# wrap long single line and add prefix
params3 = if (width <= 0) paste0(prefix, params3) else {
strwrap(params3, width, prefix = prefix)
}
} else {
params3 = parse_params(params2, label = FALSE)
# fix un-evaluated options for yaml by transforming to !expr val
params3 = lapply(params3, function(x) {
if (is.symbol(x) || is.language(x)) {
x = deparse(x, 500L)
attr(x, 'tag') = '!expr'
}
x
})
# convert to yaml and add prefix
params3 = strsplit(yaml::as.yaml(
params3, handlers = list(
# true / false instead of no
logical = function(x) {
x = tolower(x)
class(x) = 'verbatim'
x
}), line.sep = '\n'), '\n')[[1]]
params3 = paste0(prefix, params3)
}
if (nzchar(opt_chars$end)) params3 = paste0(params3, opt_chars$end)
# insert new chunk header
new_text = append(new_text, params3, after = i + nb_added)
nb_added = nb_added + length(params3)
}
if (is.null(output)) return(new_text)
# otherwise write to file
if (is.function(output)) output = output(input)
xfun::write_utf8(new_text, output)
invisible(output)
}
# TODO: when R 4.0.0 is minimal version, switch to deparse1()
deparsed_string = function(exprs) {
unlist(lapply(exprs, function(x) paste(deparse(x, 500), collapse = ' ')))
}
|
b147cf20b52f91a74185a588871972496bd33bfe
|
8c64577dff8fadca97574f5919a26b4e6b50bbbd
|
/JASA_Supplement_Burkina_Faso/Simulation_Study_RunOnce.R
|
1272da43d73da977ad3aaa67ebcff656a6762f14
|
[] |
no_license
|
VanFerreira/Bayesian-Reconstruction
|
fb9958239bf5d621da5f6f3d2b2004076d0b07fa
|
d52d187ca1ef846462b9d66f29d2821fc9b283b6
|
refs/heads/master
| 2020-09-25T06:30:08.813105
| 2019-01-26T20:40:27
| 2019-01-26T20:40:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,790
|
r
|
Simulation_Study_RunOnce.R
|
################################################################################
###
### TITLE: Simulation_Study_RunOnce.R
###
### DATE: 23 May 2012
###
### AUTHOR: Mark C. Wheldon
###
### DESC: Run one replicate of the simulation study in
### "Reconstructing Past Populations with Uncertainty from
### Fragmentary Data" submitted to Journal of the American
### Statistical Association" to produce output in
### Figure 2.
###
### REFERENCE: Wheldon, M. C., Raftery, A. E., Clark, S. J.,
### & Gerland, P. (2013). Reconstructing Past
### Populations with Uncertainty from Fragmentary
### Data. Journal of the American Statistical
### Association, 108(501),
### 96–110. http://doi.org/10.1080/01621459.2012.737729
###
### LICENCE: Released under the Creative Commons BY-NC-SA Licence
### (https://creativecommons.org).
###
### DISCLAIMER: The views and opinions expressed in this work
### are those of the authors and do not
### necessarily represent those of the United
### Nations. This work has not been formally
### edited and cleared by the United Nations.
###
###-----------------------------------------------------------------------------
###
### SYNOPSIS:
###
### This script runs the simulation study in Section 4 of the paper once to
### produce the plots in Figure 2. The full simulation study is in the file
### 'Simulation_Study_Run_Full.R'.
###
###-----------------------------------------------------------------------------
###
### INPUT FILES:
###
### This script expects to find the following files:
### - pop_reconstruction_functions.R
###
###-----------------------------------------------------------------------------
###
### OUTPUTS:
###
### Figures and tables that appear in the paper are placed in the directories
### outputs/plots/ and outputs/tables/. These directories also contain .Rdata and
### .csv files containing the data values used in the plots.
###
################################################################################
################################################################################
###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### * !!! THINGS YOU MUST SET !!!
###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
################################################################################
## Change the working directory if needed (i.e., the directory this script is
## going to be run from).
if(identical(.Platform$OS.type, "unix")) {
home.path <- "~"
} else if(identical(.Platform$OS.type, "windows")) {
home.path <- "T:/"
}
setwd(file.path(home.path
,"Documents", "PPGp_Working", "TEST_JASA_Scripts"
))
###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
################################################################################
###
### * SET UP
###
################################################################################
## Libraries
library(coda)
library(reshape)
library(gdata)
library(lattice)
## Create directories for output
dir.create("outputs")
dir.create("outputs/plots")
dir.create("outputs/additional_plots")
dir.create("outputs/tables")
dir.create("perform_parallel_logs")
dir.create("perform_parallel_logs/DumpFiles")
dir.create("perform_parallel_logs/RprofFiles")
dir.create("perform_parallel_logs/SinkFiles")
###
### Source files
###
## Population reconstruction functions
source("pop_reconstruction_functions.R")
###
### * CREATE NECESSARY OBJECTS
###
################################################################################
###
### ** 'True' vital rates (Table 1 in the paper)
###
################################################################################
asFertTRUE.mat <-
matrix(c(0, 0.4, 0.3, 0, 0, 0.4, 0.3, 0, 0, 0.4, 0.3, 0, 0,
0.4, 0.3, 0)
,nrow = 4
,dimnames = list(c("0", "5", "10", "15")
,c("[1960, 1965)", "[1965, 1970)", "[1970, 1975)", "[1975, 1980)"))
)
asSurvTRUE.mat <-
matrix(c(0.9, 0.95, 0.85, 0.8, 0.1, 0.9, 0.95, 0.85, 0.8,
0.1, 0.9, 0.95, 0.85, 0.8, 0.1, 0.9, 0.95, 0.85, 0.8, 0.1)
,nrow = 5
,dimnames = list(c("0", "5", "10", "15", "20+"), c("[1960, 1965)",
"[1965, 1970)", "[1970, 1975)", "[1975, 1980)"))
)
asMigTRUE.mat <-
matrix(c(-0.025, -0.05, -0.055, -0.005, -0.05, -0.1, -0.11,
-0.01, 0.025, 0.05, 0.055, 0.005, 0.05, 0.1, 0.11, 0.01)
,nrow = 4
,dimnames = list(c("0", "5", "10", "15"), c("[1960, 1965)",
"[1965, 1970)", "[1970, 1975)", "[1975, 1980)"))
)
baselineTRUE.mat <-
matrix(c(7500, 6000, 4000, 3000), nrow = 4
,dimnames = list(c("0", "5", "10", "15"), "1960")
)
censusTRUE.mat <-
matrix(c(8482, 6886, 4862, 3404, 9453, 7512, 5293, 3998, 11436
,9280, 6690, 4762, 14504, 11600, 8651, 6149)
,nrow = 4
,dimnames = list(c("0", "5", "10", "15")
,c("1965", "1970", "1975", "1980"))
)
## Combine all in a list for input into simulation study function
tvr <- list(asFertTRUE.mat = asFertTRUE.mat
,asSurvTRUE.mat = asSurvTRUE.mat
,asMigTRUE.mat = asMigTRUE.mat
,baselineTRUE.mat = baselineTRUE.mat
,censusTRUE.mat = censusTRUE.mat
)
###
### ** Level 4 hyperparameters (alpha and beta in Table 3 of the paper)
###
################################################################################
hyper.params <-
list(al.f = 1
,be.f = 0.0109
,al.s = 1
,be.s = 0.0109
,al.g = 1
,be.g = 0.0436
,al.n = 1
,be.n = 0.0109
)
###
### ** Control parameters for simulation and reconstruction
###
################################################################################
###
### Number of iterations to use for the reconstruction
###
## Don't set this to less than 5000 otherwise raftery.diag might fail.
## Chain is re-run if Metropolis acceptance proportions are outside [0.1,0.5] or
## Raftery-Lewis diagnostic suggests a longer chain.
n.iter <- 9000
burn.in <- 100
###
### * RUN THE SIMULATION
###
################################################################################
###
### ** Run Once (generates output for Figure 2)
###
################################################################################
## Seed
set.seed(1)
simulationStudy.output.run.once <-
simStudy.estimation.once(#.. the array determining number of times to run
rep.ar.count =1
#.. sim stats
,runs.per.node=1, desired.overall.reps=1, overall.reps=1
,cluster.size=1
#.. coverage level
,alpha = 0.05
#.. algorithm parameters
,start.iter = 5E3
,start.burn.in = 600
,prop.vars = sim.study.prop.vars
,max.tune.reruns = 5
,max.iter = 5E4, max.burn.in = 2E3
,min.iter = 5E3, min.burn.in = 600
,runRaftLew = TRUE
,checkAR = TRUE # check acceptance proportions
,ar.lower = 0.1, ar.upper = 0.5
#.. est model arguments
,ccmp.f = "popRecon.ccmp.female"
, age.size=5, census.columns =2:5, fert.rows=c(2, 3)
,s.tol = 10^(-10)
,verb = TRUE
#.. hyper parameters
,hyper.params = hyper.params
#.. source functions
,estMod.f.source = "pop_reconstruction_functions.R"
#.. function names
,estMod.f = "popRecon.sampler"
#.. true values
,true.values = tvr
#.. profile?
,Rprof.file.path = "perform_parallel_logs/RprofFiles/"
#.. sink?
,sink.file.path = "perform_parallel_logs/SinkFiles/"
#.. dump?
,dump.file.path = "perform_parallel_logs/DumpFiles/"
#.. save output?
,save.file = "outputs/Simulation_Study_run_once_FULL_RESULTS.Rdata"
)[[1]] # untangle list
save(simulationStudy.output.run.once
,file = "outputs/Simulation_Study_run_once_COVERAGE_RESULTS.Rdata"
)
###
### ** Figure 2
###
################################################################################
load(file = "outputs/Simulation_Study_run_once_FULL_RESULTS.Rdata")
## Plotting parameters
t.col <- trellis.par.get("superpose.line")$col[1]
m.col <- trellis.par.get("superpose.line")$col[2]
qMed.col <- trellis.par.get("superpose.line")$col[3]
qLim.col <- qMed.col
t.pch <- 1
m.pch <- 3
qMed.pch <- 4
qLim.pch <- NA
t.lty <- 5
m.lty <- 2
qMed.lty <- 1
qLim.lty <- 4
m.cex <- 0.8
qMed.cex <- 0.8
qLim.cex <- 0.8
t.lwd <- 2
m.lwd <- 2
qMed.lwd <- 2
qLim.lwd <- 1
graphics.off()
###
### Figure 2 (a). Age Specific Fertility Rate
###
## Quantiles of posterior sample
emp.quantiles <-
apply(simulationStudy.mcmc.samples$fert.rate.mcmc, 2
,function(z) quantile(z, probs = c(0.025, 0.5, 0.975))
)
dimnames(emp.quantiles) <-
list(as.character(c(0.025, 0.5, 0.975))
,colnames(simulationStudy.mcmc.samples$fert.rate.mcmc)
)
emp.quantiles.melt <- melt(emp.quantiles)
X2.split <-
strsplit(as.character(levels(emp.quantiles.melt$X2)[emp.quantiles.melt$X2])
,"\\."
)
emp.quantiles.melt$years <- sapply(X2.split, "[[", 1)
emp.quantiles.melt$ages <- as.numeric(sapply(X2.split, "[[", 2))
emp.quantiles.df <-
rename.vars(emp.quantiles.melt[,c("X1", "value", "years", "ages")]
,from = c("X1", "value"), to = c("quant", "fert.rate")
)
## Initial estimates generated in this run
init.est.df <-
rename.vars(melt(simulationStudy.mcmc.samples$fixed.params$mean.fert.rate[2:3,])
,from = c("X1", "X2", "value")
,to = c("ages", "years", "fert.rate")
)
init.est.df$quant <- 77
## True values
fert.rate.true.df <-
rename.vars(melt(asFertTRUE.mat[2:3,])
,from = c("X1", "X2", "value")
,to = c("ages", "years", "fert.rate")
)
fert.rate.true.df$quant <- 99
## Bind all data
fert.rate.final.df <-
rbind(fert.rate.true.df, init.est.df, emp.quantiles.df)
## Plot
pdf(file = "outputs/plots/simulation_study_Figure_2_a.pdf", width = 7, height = 7)
xyplot(fert.rate ~ ages | ordered(years)
,data = fert.rate.final.df
,groups = quant
##,subset = quant < 1
,panel = function(x, y, ...) {
panel.refline(h = c(1.5, 2))
panel.xyplot(x, y, ...)
}
,type = "b"
,xlab = "age"
,ylab = "age-specific fertility rate"
,ylim = c(0, 0.8)
,col = c(qLim.col, qMed.col, qLim.col, m.col, t.col)
,pch = c(qLim.pch, qMed.pch, qLim.pch, m.pch, t.pch)
,lty = c(qLim.lty, qMed.lty, qLim.lty, m.lty, t.lty)
,lwd = c(qLim.lwd, qMed.lwd, qLim.lwd, m.lwd, t.lwd)
,key = list(text = list(c("Post. median", "95% Post. Int."
,"initial est.", "truth"))
,lines = list(lty = c(qMed.lty, qLim.lty, m.lty, t.lty))
,col = c(qMed.col, qLim.col, m.col, t.col)
,pch = c(qMed.pch, qLim.pch, m.pch, t.pch)
,type = c("b", "b", "b", "b")
,columns = 3)
,as.table = TRUE
,las = 1
,par.settings = list(superpose.symbol = list(cex = 1))
)
dev.off()
###
### Figure 2 (b). Total Fertility Rate
###
## Sum sample of age-specific fertility rates to get TFR
tfr.mcmc.mat <-
matrix(0, nrow = nrow(simulationStudy.mcmc.samples$fert.rate.mcmc)
,ncol = 4
,dimnames = list(NULL,
unique(sapply(strsplit(colnames(simulationStudy.mcmc.samples$fert.rate.mcmc)
,"\\."), FUN = function(z) z[[1]])
)
)
)
fert.rate.mcmc.colYrs <-
sapply(strsplit(colnames(simulationStudy.mcmc.samples$fert.rate.mcmc)
,"\\."), FUN = function(z) z[[1]])
for(i in 1:ncol(tfr.mcmc.mat)) {
colYrs.index <- fert.rate.mcmc.colYrs == colnames(tfr.mcmc.mat)[i]
tfr.mcmc.mat[,i] <-
apply(simulationStudy.mcmc.samples$fert.rate.mcmc[,colYrs.index]
,1
,FUN = function(z) 5 * sum(z)
)
}
tfr.quantiles.mat <-
apply(tfr.mcmc.mat, 2, FUN = function(z)
{
quantile(z, probs = c(0.025, 0.5, 0.975))
})
## Initial estimates generated in this run
tfr.init.est.mat <-
apply(simulationStudy.mcmc.samples$fixed.params$mean.fert.rate[2:3,], 2
,FUN = function(z) 5 * sum(z)
)
## True TFR
tfr.true.mat <-
apply(asFertTRUE.mat[2:3,], 2
,FUN = function(z) 5 * sum(z)
)
## Plot
pdf(file = "outputs/plots/simulation_study_Figure_2_b.pdf", width = 7, height = 7)
plot(seq(from = 1960, to = 1975, by = 5), tfr.quantiles.mat[3,]
,type = "l", lty = qLim.lty, col = qLim.col, pch = qLim.pch
,lwd = qLim.lwd
,ylim = 5 * c(0.4, 1.1)
,ylab = "Total Fertility Rate"
,xlab = "year"
,las = 1)
lines(seq(from = 1960, to = 1975, by = 5), tfr.quantiles.mat[1,]
,type = "l", lty = qLim.lty, col = qLim.col, pch = qLim.pch
,lwd = qLim.lwd)
lines(seq(from = 1960, to = 1975, by = 5), tfr.quantiles.mat[2,]
,type = "b", lty = qMed.lty, col = qMed.col, pch = qMed.pch
,lwd = qMed.lwd)
lines(seq(from = 1960, to = 1975, by = 5)
,tfr.init.est.mat
,type = "b"
,col = m.col, pch = m.pch, lty = m.lty, lwd = m.lwd)
lines(seq(from = 1960, to = 1975, by = 5)
,tfr.true.mat
,type = "b"
,col = t.col, pch = t.pch, lty = t.lty, lwd = t.lwd)
legend("topright", lty = c(qMed.lty, qLim.lty, m.lty, t.lty)
,lwd = c(qMed.lwd, qLim.lwd, m.lwd, t.lwd)
,col = c(qMed.col, qMed.col, t.col, m.col)
,pch = c(qMed.pch, qLim.pch, t.pch, m.pch)
,legend = c("median", "95% PI", "truth", "initial est." )
,cex = 0.85
)
dev.off()
###
### Figure 2 (c). Life expectancy at birth.
###
leb.mcmc.mat <-
matrix(0, nrow = nrow(simulationStudy.mcmc.samples$surv.prop.mcmc)
,ncol = 4
,dimnames = list(NULL,
unique(sapply(strsplit(colnames(simulationStudy.mcmc.samples$surv.prop.mcmc)
,"\\."), FUN = function(z) z[[1]])
)
)
)
surv.prop.mcmc.colYrs <-
sapply(strsplit(colnames(simulationStudy.mcmc.samples$surv.prop.mcmc)
,"\\."), FUN = function(z) z[[1]])
for(i in 1:ncol(leb.mcmc.mat)) {
colYrs.index <- surv.prop.mcmc.colYrs == colnames(leb.mcmc.mat)[i]
leb.mcmc.mat[,i] <-
apply(simulationStudy.mcmc.samples$surv.prop.mcmc[,colYrs.index]
,1
,FUN = function(z) {
x <- c(head(z, -1), tail(z,1) / (1-tail(z,1)))
5 * sum(cumprod(x))
}
)
}
leb.quantiles.mat <- apply(leb.mcmc.mat, 2, FUN = function(z)
{
quantile(z, probs = c(0.025, 0.5, 0.975))
})
## Initial estimates generated in this run
leb.init.est.mat <-
apply(simulationStudy.mcmc.samples$fixed.params$mean.surv.prop, 2
,FUN = function(z) {
x <- c(head(z, -1), tail(z,1) / (1-tail(z,1)))
5 * sum(cumprod(x))
}
)
## True leb
leb.true.mat <-
apply(asSurvTRUE.mat, 2
,FUN = function(z) {
x <- c(head(z, -1), tail(z,1) / (1-tail(z,1)))
5 * sum(cumprod(x))
}
)
## Plot
pdf(file = "outputs/plots/simulation_study_Figure_2_c.pdf", width = 7, height = 7)
plot(seq(1960, 1975, by = 5), leb.quantiles.mat[3,]
,type = "l", lty = qLim.lty, col = qLim.col, lwd = qLim.lwd
,ylim = c(14, 17.5)
,ylab = "Life expectancy at birth (years)"
,xlab = "year"
,las = 1)
lines(seq(1960, 1975, by = 5), leb.quantiles.mat[1,]
,type = "l", lty = qLim.lty, col = qLim.col, lwd = qLim.lwd)
lines(seq(1960, 1975, by = 5), leb.quantiles.mat[2,]
,type = "b", lty = qMed.lty, col = qMed.col, lwd = qMed.lwd)
lines(seq(from = 1960, to = 1975, by = 5)
,leb.init.est.mat
,type = "b"
,col = m.col, lty = m.lty, pch = m.pch, lwd = m.lwd)
lines(seq(from = 1960, to = 1975, by = 5)
,leb.true.mat
,type = "b"
,col = t.col, lty = t.lty, pch = t.pch, lwd = t.lwd)
legend("topright", lty = c(qMed.lty, qLim.lty, m.lty, t.lty)
,lwd = c(qMed.lwd, qLim.lwd, m.lwd, t.lwd)
,col = c(qMed.col, qMed.col, t.col, m.col)
,pch = c(qMed.pch, qLim.pch, t.pch, m.pch)
,legend = c("median", "95% PI", "truth", "initial est.")
,cex = 0.85
)
dev.off()
###
### Figure 2 (d). Net number of migrants.
###
## NB: Can't simply sum migration proportions because they are based on
## different population totals. The two functions below are needed to
## convert migration proportions into counts collapsed over age.
## Functions to create the Leslie matrix (Section 3.2.1 of the paper)
make.leslie.matrix <-
function(pop, surv, fert, srb = 1.05, age.int = 5, label.dims = FALSE)
{
##-- Make the leslie matrix for CCMPP --##
##
## pop : population count at baseline
## fert : matrix of age specific fertility rates NOT yet
## mulitplied by age.int
## srb : sex ratio at birth matrix
## surv : Survivorship probabilities: the probability of
## reaching the age at the start of the interval.
## The first row should be nL0/(n*l0).
## The last row is survival for age.int years in the open
## interval
## proj.steps
## age.int : needed for correct interpretation of survival
## and fertility rates
## label.dims
## : should output have dimnames set?
##
## CREATOR
## Mark C. Wheldon
##
## REFERENCE
## Wheldon, M. C., Raftery, A. E., Clark, S. J., & Gerland,
## P. (2013). Reconstructing Past Populations with Uncertainty
## from Fragmentary Data. Journal of the American Statistical
## Association, 108(501), 96–110.
## http://doi.org/10.1080/01621459.2012.737729
##
## LICENCE:
## Released under the Creative Commons BY-NC-SA Licence
## (https://creativecommons.org).
n.age.grps <- length(pop)
n.surv <- length(surv)
lesM <- matrix(0, nrow = n.age.grps, ncol = n.age.grps)
k <- 1/(1+srb) * surv[1] * 0.5
dbl.fert <- age.int*fert + c(age.int*fert[-1], 0) * surv[-1]
lesM[1,] <- k * dbl.fert
lesM[2:n.age.grps,1:(n.age.grps-1)] <- diag(surv[-c(1,n.surv)])
lesM[n.age.grps,n.age.grps] <- surv[n.surv]
if(label.dims) {
age.labs <- seq(from = 0, by = 5, length = n.age.grps)
dimnames(lesM) <- list(age.labs, age.labs)
}
return(lesM)
}
## Function to calculate total migration count
total.mig.count <- function(n1, n2, L)
{
##-- Find net number of migrants in a CCMPP projection --##
##
## ARGUMENTS
##
## n1 : Population count vector at time t
## n2 : Population count vector at time t + delta
## L : Leslie matrix used to get population at t + delta
##
##
## METHOD
##
## Invert n2 = L(n1 + 0.5 mig) + (0.5)*mig
## Can get proportions by pre-multiplying output by 'solve(diag(n1))'
##
## CREATOR
## Mark C. Wheldon
##
## REFERENCE
## Wheldon, M. C., Raftery, A. E., Clark, S. J., & Gerland,
## P. (2013). Reconstructing Past Populations with Uncertainty
## from Fragmentary Data. Journal of the American Statistical
## Association, 108(501), 96–110.
## http://doi.org/10.1080/01621459.2012.737729
##
## LICENCE:
## Released under the Creative Commons BY-NC-SA Licence
## (https://creativecommons.org).
##
n1 <- as.numeric(n1)
n2 <- as.numeric(n2)
L <- as.matrix(L)
return(2 * solve(L + diag(nrow(L))) %*% (n2 - L %*% n1))
}
## Function 'popRecon.ccmp.female' needed; sourced in from
## 'pop_reconstruction_functions.R' at top of this file.
## MCMC SAMPLE FOR TOTAL NET NUMBER OF MIGRANTS
## Prepare output matrix
net.mig.mcmc.mat <-
matrix(0, nrow = nrow(simulationStudy.mcmc.samples$mig.prop.mcmc)
,ncol = 4
,dimnames = list(NULL,
unique(sapply(strsplit(colnames(simulationStudy.mcmc.samples$mig.prop.mcmc)
,"\\."), FUN = function(z) z[[1]])
)
)
)
mig.prop.mcmc.colYrs <-
sapply(strsplit(colnames(simulationStudy.mcmc.samples$mig.prop.mcmc)
,"\\."), FUN = function(z) z[[1]])
mig.prop.mcmc.colYrsUniq <- unique(mig.prop.mcmc.colYrs)
## Combine population counts at baseline and in subsequent years
pop.mat <- cbind(simulationStudy.mcmc.samples$baseline.count.mcmc
,simulationStudy.mcmc.samples$lx.mcmc)
## Index for years
pop.mat.colYrs <- sapply(strsplit(colnames(pop.mat)
,"\\."), FUN = function(z) z[[1]])
pop.mat.colYrsUniq <- unique(pop.mat.colYrs)
## cycle through mcmc sample
for(k in 1:nrow(simulationStudy.mcmc.samples$mig.prop.mcmc)) {
## cycle through years
for(i in 1:ncol(net.mig.mcmc.mat)) {
## 5-year sub-intervals for indexing columns
mig.colYrs.index <-
colnames(net.mig.mcmc.mat) == mig.prop.mcmc.colYrsUniq[i]
surv.colYrs.index <-
surv.prop.mcmc.colYrs == mig.prop.mcmc.colYrsUniq[i]
fert.colYrs.index <-
fert.rate.mcmc.colYrs == mig.prop.mcmc.colYrsUniq[i]
pop.colYrs.index1 <-
pop.mat.colYrs == substr(mig.prop.mcmc.colYrsUniq[i]
,start = 2, stop = 5)
pop.colYrs.index2 <-
pop.mat.colYrs == as.numeric(substr(mig.prop.mcmc.colYrsUniq[i]
,start = 2, stop = 5)) + 5
## get vital rates and make leslie matrix
sk <- simulationStudy.mcmc.samples$surv.prop.mcmc[k,surv.colYrs.index]
fk <- rep(0, 4)
fk[2:3] <- simulationStudy.mcmc.samples$fert.rate.mcmc[k,fert.colYrs.index]
popk1 <- pop.mat[k,pop.colYrs.index1]
popk2 <- pop.mat[k,pop.colYrs.index2]
Lk <- make.leslie.matrix(pop = popk1, surv = sk, fert = fk, srb = 1.05
,age.int = 5)
## calculate net number of migrants
netMigk <- total.mig.count(n1 = popk1, n2 = popk2, L = Lk)
## store
net.mig.mcmc.mat[k, mig.colYrs.index] <- sum(netMigk)
}
}
## Posterior quantiles
net.mig.quantiles.mat <- apply(net.mig.mcmc.mat, 2, FUN = function(z)
{
quantile(z, probs = c(0.025, 0.5, 0.975))
})
## INITIAL ESTIMATE OF NET NUMBER OF MIGRANTS
## Prepare output matrix
net.mig.init.est.mat <- rep(0, 4)
names(net.mig.init.est.mat) <-
colnames(simulationStudy.mcmc.samples$fixed.params$mean.mig.prop)
## Combine population counts at baseline and in subsequent years
pop.input.mat <-
popRecon.ccmp.female(pop=simulationStudy.mcmc.samples$fixed.params$mean.baseline.count
,surv=simulationStudy.mcmc.samples$fixed.params$mean.surv.prop
,fert=simulationStudy.mcmc.samples$fixed.params$mean.fert.rate
,mig=simulationStudy.mcmc.samples$fixed.params$mean.mig.prop
)
## Calculate input net migration
for(k in 1:(ncol(pop.input.mat)-1)) {
Lk <- make.leslie.matrix(pop = pop.input.mat[,k]
,surv = simulationStudy.mcmc.samples$fixed.params$mean.surv.prop[,k]
,fert = simulationStudy.mcmc.samples$fixed.params$mean.fert.rate[,k]
,srb = 1.05
,age.int = 5)
netMigk <- total.mig.count(n1 = pop.input.mat[,k]
,n2 = pop.input.mat[,k+1]
,L = Lk)
net.mig.init.est.mat[k] <- sum(netMigk)
}
## TRUE NET NUMBER OF MIGRANTS
## Prepare output matrix
net.mig.true.mat <- rep(0, 4)
names(net.mig.true.mat) <- colnames(asMigTRUE.mat)
## True population counts
pop.true.mat <-
popRecon.ccmp.female(pop=baselineTRUE.mat, surv=asSurvTRUE.mat
,fert=asFertTRUE.mat, mig=asMigTRUE.mat)
## Calculate true net migration
for(k in 1:(ncol(pop.true.mat)-1)) {
Lk <- make.leslie.matrix(pop = pop.true.mat[,k], surv = asSurvTRUE.mat[,k]
,fert = asFertTRUE.mat[,k], srb = 1.05
,age.int = 5)
netMigk <- total.mig.count(n1 = pop.true.mat[,k]
,n2 = pop.true.mat[,k+1]
,L = Lk)
net.mig.true.mat[k] <- sum(netMigk)
}
## PLOT
pdf(file = "outputs/plots/simulation_study_Figure_2_d.pdf", width = 7, height = 7)
plot(seq(from = 1960, to = 1975, by = 5), net.mig.quantiles.mat[3,]/1E3
,type = "l", lty = qLim.lty, col = qLim.col, lwd = qLim.lwd
,ylim = c(-10, 10)
,ylab = "Total Net Number of Migrations (000s)"
,xlab = "year"
,las = 1)
lines(seq(from = 1960, to = 1975, by = 5), net.mig.quantiles.mat[1,]/1E3
,type = "l", lty = qLim.lty, col = qLim.col, pch = qLim.pch
,lwd = qLim.lwd)
lines(seq(from = 1960, to = 1975, by = 5), net.mig.quantiles.mat[2,]/1E3
,type = "b", lty = qMed.lty, col = qMed.col, pch = qMed.pch
,lwd = qMed.lwd)
lines(seq(from = 1960, to = 1975, by = 5)
,net.mig.init.est.mat/1E3
,type = "b"
,col = m.col, lty = m.lty, pch = m.pch, lwd = m.lwd)
lines(seq(from = 1960, to = 1975, by = 5)
,net.mig.true.mat/1E3
,type = "b"
,col = t.col, lty = t.lty, pch = t.pch, lwd = t.lwd)
abline(h = 0, col = "grey")
legend("topleft", lty = c(qMed.lty, qLim.lty, m.lty, t.lty)
,lwd = c(qMed.lwd, qLim.lwd, m.lwd, t.lwd)
,col = c(qMed.col, qMed.col, m.col, t.col)
,pch = c(qMed.pch, qMed.pch, m.pch, t.pch)
,legend = c("median", "95% PI", "initial est.", "true")
,cex = 0.85
)
dev.off()
|
ab4a6cf0b5fc479ff8e7082988e3d73ad4aefc63
|
f871ea0fdfc0ba93e65037f28090fb0a60513bec
|
/DIST_EffectSize/effectSize4.22.R
|
f629ab5427b9eab3ce373c2e5dd489bc07d38195
|
[] |
no_license
|
jcooperdevlin/RewildedMice
|
8b838ff2831ea5f3e8432859dbb8c1631ab142c5
|
fd95ba111a3c2127939c85e336091de78fa724c1
|
refs/heads/master
| 2023-03-07T08:45:19.141349
| 2020-04-06T14:16:53
| 2020-04-06T14:16:53
| 340,936,220
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,039
|
r
|
effectSize4.22.R
|
#### Making Principle Coordinate plots and analyzing effect sizes
#### 4.22
library(ggplot2)
library(circlize)
library(gridExtra)
library(reshape)
library(ape)
library(scales)
library(ggrepel)
library(MDMR)
library(ggsci)
library(ggsignif)
setwd("/Volumes/lokep01lab/lokep01labspace/Rewilding_Data")
type_cols <- c(Diarrhea="red2", Environment="dodgerblue2", FACS = "forestgreen",
Wedge_cage="navyblue",Flow.date="grey50",
Gender="hotpink",Genotype="darkorange1", Pregnant="purple", WeightGain="forestgreen")
#function for plotting PCoA with Effect Sizes
var_plotter <- function(input, effectors, col_var, col_var_name="Environment", col_var_col=NA,
shape_var=NA, shape_var_name=NA, Feature, dist_panel=F){
D <- dist(input, method = "euclidean")
res <- pcoa(D)
plotter <- data.frame(res$vectors, col_var=col_var,
shape_var = shape_var)
#good_plotter <- subset(plotter, Axis.1 > quantile(plotter$Axis.1, 0.05) &
# Axis.1 < quantile(plotter$Axis.1, 0.95))
#good_plotter <- subset(good_plotter, Axis.2 > quantile(plotter$Axis.2, 0.05) &
# Axis.2 < quantile(plotter$Axis.2, 0.95))
if(is.na(col_var_col)){col_var_col = c(lab="mediumpurple1", wild="red3")}
g0=ggplot(plotter, aes(Axis.1, Axis.2, color=col_var, shape=shape_var)) +
geom_point(size=5) +
guides(color=guide_legend(title=col_var_name),
shape=guide_legend(title=shape_var_name)) +
ylab(paste0("PCo2 ", round(res$values$Broken_stick[2]*100,2), "% expl. variation")) +
xlab(paste0("PCo1 ", round(res$values$Broken_stick[1]*100,2), "% expl. variation")) +
scale_color_manual(values = col_var_col) +
ggtitle(Feature)+
theme_bw() +
theme(axis.text = element_text(size=12, color="black"),
axis.title = element_text(size=13, color="black"),
legend.text = element_text(size=12, color="black"),
legend.title = element_text(size=13, color="black"),
plot.title = element_text(size=14, color="black",
face='bold',hjust = 0.5))
pr.coo=res$vectors
plot.axes=c(1,2)
n <- nrow(input)
points.stand <- scale(pr.coo[,plot.axes])
S <- cov(input, points.stand)
U <- S %*% diag((res$values$Eigenvalues[plot.axes]/(n-1))^(-0.5))
colnames(U) <- colnames(pr.coo[,plot.axes])
PC = res
data <- data.frame(obsnames=row.names(PC$vectors), PC$vectors[,1:2])
datapc <- data.frame(varnames=rownames(U), U*100)
datapc$var1 <- rescale(datapc$Axis.1, c(min(data$Axis.1),max(data$Axis.1)))
datapc$var2 <- rescale(datapc$Axis.1, c(min(data$Axis.2),max(data$Axis.2)))
datapc$mult <- abs(datapc$Axis.1*datapc$Axis.2)
datapc <- datapc[order(datapc$mult, decreasing = T),]
datapc2 = datapc
datapc2 = datapc[1:12,]
g_seg1=ggplot(plotter, aes(Axis.1, Axis.2, color=col_var, shape=shape_var)) +
geom_point(size=5) +
guides(color=guide_legend(title=col_var_name),
shape=guide_legend(title=shape_var_name)) +
ylab(paste0("PCo2 ", round(res$values$Broken_stick[2]*100,2), "% expl. variation")) +
xlab(paste0("PCo1 ", round(res$values$Broken_stick[1]*100,2), "% expl. variation")) +
scale_color_manual(values = col_var_col) +
ggtitle(Feature)+
theme_bw() +
theme(axis.text = element_text(size=12, color="black"),
axis.title = element_text(size=13, color="black"),
legend.text = element_text(size=12, color="black"),
legend.title = element_text(size=13, color="black"),
plot.title = element_text(size=14, color="black",
face='bold',hjust = 0.5))
g_seg=ggplot(plotter, aes(Axis.1, Axis.2)) +
geom_point(size=3, alpha=0) + #ggtitle("Loadings for Gated Populations") +
scale_color_manual(values = col_var_col) + coord_equal() +
geom_segment(data=datapc2, aes(x=0, y=0, xend=Axis.1, yend=Axis.2),
arrow=arrow(length=unit(0.2,"cm")), alpha=0.5) +
geom_label_repel(data=datapc2, aes(x=Axis.1, y=Axis.2, label=varnames),
size = 4, force=4, segment.alpha=0.5) +
theme_void()
#g_seg
pc1_hist <- ggplot(plotter, aes(Axis.1, color = col_var, fill= col_var)) +
geom_density(alpha=0.6) +
scale_y_reverse() +
scale_color_manual(values = col_var_col) +
scale_fill_manual(values = col_var_col) +
theme_void()
pc2_hist <- ggplot(plotter, aes(Axis.2, color = col_var, fill= col_var)) +
geom_density(alpha=0.6) +
coord_flip() +
scale_y_reverse() +
scale_color_manual(values = col_var_col) +
scale_fill_manual(values = col_var_col) +
theme_void()
pc_fake <- ggplot(plotter, aes(Axis.2, color = col_var, fill= col_var)) +
geom_density(alpha=0.6) +
coord_flip() +
scale_y_reverse() +
scale_color_manual(values = c("white", "white", "white")) +
scale_fill_manual(values = c("white", "white", "white")) +
theme_void()
top_g=arrangeGrob(
pc2_hist+theme(legend.position = 'none'),
g0+theme_void()+
theme(legend.position = 'none',
plot.title = element_text(size=14, color="black",
face='bold',hjust = 0.5)), nrow=1, widths = c(1,3))
g_seg_plot <- arrangeGrob(g_seg,pc_fake+theme(legend.position = 'none'),nrow=2, heights = c(3,1))
#g_seg_plot <- arrangeGrob(pc_fake+theme(legend.position = 'none'),g_seg_plot,nrow=1, widths = c(1,7))
bottom_g = arrangeGrob(
pc_fake+theme(legend.position = 'none'),
pc1_hist+theme(legend.position = 'none'), nrow = 1, widths = c(1,3))
pcoa_plot=arrangeGrob(top_g, bottom_g, heights = c(3,1))
mdmr.res <- mdmr(X = effectors, D = D)
es_df=mdmr.res$stat
es_df$Variable=gsub("1", "", rownames(es_df))
es_df$Variable=gsub("Weight_gain\\.", "WeightGain", es_df$Variable)
es_df <- es_df[-1,]
#delta_res=delta(effectors, Y = inputs, dtype = "euclidean", niter = 1, seed = 12345, plot.res = F)
#delta_res <- data.frame(t(delta_res))
es_df <- es_df[order(es_df$stat, decreasing = T),]
es_df$Variable <- factor(es_df$Variable, levels = es_df$Variable)
es_df <- subset(es_df, !is.na(stat))
es_df <- es_df[order(abs(es_df$stat), decreasing = T),]
es_df$Variable <- factor(es_df$Variable, levels = es_df$Variable)
gg_combo=ggplot(es_df, aes(Variable, abs(stat), fill = Variable)) +
geom_col() +
scale_x_discrete(limits = rev(levels(es_df$Variable)))+
guides(fill=guide_legend(title="Feature"))+
ylab("EffectSize")+xlab("")+
scale_fill_manual(values=type_cols)+
theme_bw()+
coord_flip()+
theme(legend.position = 'none',
axis.text = element_text(size=12, color="black"),
#axis.text = element_blank(),
#axis.title = element_text(size=13, color="black"),
axis.title = element_blank(),
legend.text = element_text(size=12, color="black"),
legend.title = element_text(size=13, color="black"),
plot.title = element_text(size=14, face='bold'))
if(dist_panel==T){
d_mat <- as.matrix(D)
d_df <- data.frame(
id1 = rep(colnames(d_mat), ncol(d_mat)),
id2 = rep(rownames(d_mat), each=ncol(d_mat)),
var1 = rep(col_var, ncol(d_mat)),
var2 = rep(col_var, each=ncol(d_mat)),
value = c(d_mat)
)
d_df$lab <- paste0(d_df$var1, ":", d_df$var2)
d_df$lab <- gsub("wild:lab", "lab:wild", d_df$lab)
d_df <- subset(d_df, value > 0)
d_df$lab2 <- factor(d_df$lab, levels = c("lab:lab", "wild:wild", "lab:wild"))
dist_plot=ggplot(d_df, aes(lab2, value, color=lab, fill=lab)) +
#geom_jitter(width = 0.3, alpha=0.01) +
#geom_violin(alpha=0.5, outlier.shape = NA) +
geom_boxplot(alpha=0.2) +
scale_fill_manual(values = c("mediumorchid3", "red3", "navy")) +
scale_color_manual(values = c("mediumorchid3", "red3", "navy")) +
xlab("Comparison") + ylab("Distance") +
theme_bw() + geom_signif(test = "wilcox.test",
comparisons = combn(unique(d_df$lab),2, simplify = F),
y_position = c(max(d_df$value)+0.5, max(d_df$value)+1, max(d_df$value)+1.5),
color='black', map_signif_level = T) +
theme(legend.position='none',
axis.title = element_text(size=15),
axis.text = element_text(size=12, color='black'))
return(arrangeGrob(pcoa_plot, g_seg_plot,dist_plot, gg_combo, ncol=4, widths=c(2,2,1.5,1.5)))
} else {
return(arrangeGrob(pcoa_plot, g_seg_plot, gg_combo, ncol=3, widths=c(2,2,1)))
}
}
## FACS Blood
facs_blood <- read.table("int/data/FACS_data/BLOOD_lymph_FACS_metadata_names.txt", T, '\t')
name_change <- read.table("int/data/FACS_data/lymph_name_change.txt")
colnames(facs_blood)[13:27] <- as.character(name_change$V2)
rownames(facs_blood) <- as.character(facs_blood$mouse_id)
input=log2(facs_blood[,13:27]+1)
rownames(input) <- as.character(facs_blood$mouse_id)
rowZeros <- apply(input, 1, function(x) {sum(x == 0)})
colZeros <- apply(input, 2, function(x) {sum(x == 0)})
input = input[which(rowZeros<0.5*ncol(input)),which(colZeros<0.5*nrow(input)) ]
facs_blood <- facs_blood[rownames(input),]
effectors=facs_blood[,c(4,5,6,8,10)]
col_var=facs_blood$Environment
shape_var=facs_blood$Genotype
gg_blood_lymph <- var_plotter(input, effectors, col_var, "Environment", col_var_col = NA,
shape_var, "Genotype", "Blood Lymph Panel", dist_panel=T)
#grid.arrange(gg_blood_lymph)
### fig 1d?
png("int/DIST_EffectSize/Fig_1d.png",
height = 5, width = 20, units = 'in', res=300)
grid.arrange(gg_blood_lymph)
dev.off()
pdf("int/DIST_EffectSize/Fig_1d.pdf",
height = 5, width = 20)
grid.arrange(gg_blood_lymph)
dev.off()
## FACS MLN
facs_mln <- read.table("int/data/FACS_data/MLN_lymph_FACS_metadata_names.txt", T, '\t')
name_change <- read.table("int/data/FACS_data/lymph_name_change.txt")
colnames(facs_mln)[13:27] <- as.character(name_change$V2)
rownames(facs_mln) <- as.character(facs_mln$mouse_id)
facs_mln <- facs_mln[!is.na(facs_mln$Weight_gain.),]
facs_mln <- facs_mln[!is.na(facs_mln$Total_T_cells),]
input=log2(facs_mln[,13:27]+1)
rownames(input) <- as.character(facs_mln$mouse_id)
rowZeros <- apply(input, 1, function(x) {sum(x == 0)})
colZeros <- apply(input, 2, function(x) {sum(x == 0)})
input = input[which(rowZeros<0.5*ncol(input)),which(colZeros<0.5*nrow(input)) ]
facs_mln <- facs_mln[rownames(input),]
effectors=facs_mln[,c(4,5,6,8,10)]
col_var=facs_mln$Environment
shape_var=facs_mln$Genotype
gg_mln_lymph <- var_plotter(input, effectors, col_var, "Environment", col_var_col=NA,
shape_var, "Genotype", "MLN Lymph Panel")
col_var=facs_mln$Flow.date
gg_mln_lymph_date <- var_plotter(input, effectors, col_var, "Week",
col_var_col=c("dodgerblue2", "green4", "orange"),
shape_var, "Genotype", "MLN Lymph Panel")
pdf("int/DIST_EffectSize/MLN_lymph_pcoa_ES_combo+date.pdf", height = 10, width = 15)
grid.arrange(gg_mln_lymph, gg_mln_lymph_date, nrow=2)
dev.off()
### mln lymph lab only
lab_mice <- as.character(subset(facs_mln, Environment == "lab")$mouse_id)
input2 <- input[lab_mice,]
facs_mln2 <- facs_mln[lab_mice,]
effectors=facs_mln2[,c(7,6)]
effectors$Wege_cage <- factor(effectors$Wege_cage)
colnames(effectors)[1] <- "Wedge_cage"
col_var=facs_mln2$Environment
shape_var=facs_mln2$Genotype
gg_mln_lymph_lab <- var_plotter(input2, effectors, col_var, "Environment",
shape_var, "Genotype", "MLN Lymph Panel")
grid.arrange(gg_mln_lymph_lab)
##
#
#### mln lymph wild only
wild_mice <- as.character(subset(facs_mln, Environment == "wild")$mouse_id)
input2 <- input[wild_mice,]
facs_mln2 <- facs_mln[wild_mice,]
effectors <- data.frame(Flow.date=rep(1,101))
effectors$Flow.date[facs_mln2$Flow.date=="10-Aug"] <- 2
effectors=cbind(effectors, facs_mln2[,c(4,6,7,8,9,10)])
#effectors$Wege_cage <- factor(effectors$Wege_cage)
colnames(effectors)[4] <- "Wedge_cage"
col_var=facs_mln2$Environment
shape_var=facs_mln2$Genotype
gg_mln_lymph_wild <- var_plotter(input2, effectors, col_var, "Environment",
shape_var, "Genotype", "MLN Lymph Panel")
grid.arrange(gg_mln_lymph_wild)
pdf("int/DIST_EffectSize/MLN_lymph_pcoa_ES_combo+wild.pdf", height = 10, width = 15)
grid.arrange(gg_mln_lymph, gg_mln_lymph_wild, nrow=2)
dev.off()
#
#
#
#### supp figure (investigate separation)
## FACS MLN My
facs_mln <- read.table("int/data/FACS_data/MLN_myeloid_FACS_metadata_names.txt", T, '\t')
rownames(facs_mln) <- as.character(facs_mln$mouse_id)
facs_mln <- facs_mln[!is.na(facs_mln$Weight_gain.),]
facs_mln <- facs_mln[!is.na(facs_mln$Neutrophils),]
input=log2(facs_mln[,13:18]+1)
rownames(input) <- as.character(facs_mln$mouse_id)
rowZeros <- apply(input, 1, function(x) {sum(x == 0)})
colZeros <- apply(input, 2, function(x) {sum(x == 0)})
input = input[which(rowZeros<0.5*ncol(input)),which(colZeros<0.5*nrow(input)) ]
facs_mln <- facs_mln[rownames(input),]
effectors=facs_mln[,c(4,5,6,8,9,10)]
col_var=facs_mln$Environment
shape_var=facs_mln$Genotype
gg_mln_myeloid <- var_plotter(input, effectors, col_var, "Environment",
shape_var, "Genotype", "MLN Myeloid Panel")
#pdf('figures/effectSizes.pdf', height = 18, width = 20)
#grid.arrange(gg_blood_lymph,gg_mln_lymph,gg_mln_myeloid,nrow=3)
#dev.off()
#
#
## combo
blood_lymph <- read.table("int/data/FACS_data/BLOOD_lymph_FACS_metadata_names.txt", header=T, sep='\t')
name_change <- read.table("int/data/FACS_data/lymph_name_change.txt")
colnames(blood_lymph)[13:27] <- as.character(name_change$V2)
colnames(blood_lymph)[13:27] <- paste0("BLOOD_lymph_", colnames(blood_lymph[,13:27]))
blood_lymph <- blood_lymph[order(blood_lymph$mouse_id),]
mln_lymph <- read.table("int/data/FACS_data/MLN_lymph_FACS_metadata_names.txt", header=T, sep='\t')
name_change <- read.table("int/data/FACS_data/lymph_name_change.txt")
colnames(mln_lymph)[13:27] <- as.character(name_change$V2)
colnames(mln_lymph)[13:27] <- paste0("MLN_lymph_", colnames(mln_lymph[,13:27]))
mln_lymph <- mln_lymph[order(mln_lymph$mouse_id),]
mln_myeloid <- read.table("int/data/FACS_data/MLN_myeloid_FACS_metadata_names.txt", header=T, sep='\t')
colnames(mln_myeloid)[13:18] <- paste0("MLN_myeloid_", colnames(mln_myeloid[,13:18]))
mln_myeloid <- mln_myeloid[order(mln_myeloid$mouse_id),]
full_facs <- cbind(blood_lymph, mln_lymph[,13:27], mln_myeloid[,13:18])
num_check <- which(is.na(rowSums(full_facs[,13:48])))
full_facs <- full_facs[-num_check,]
rownames(full_facs) <- as.character(full_facs$mouse_id)
input=log2(full_facs[,13:48]+1)
rownames(input) <- as.character(full_facs$mouse_id)
rowZeros <- apply(input, 1, function(x) {sum(x == 0)})
colZeros <- apply(input, 2, function(x) {sum(x == 0)})
input = input[which(rowZeros<0.5*ncol(input)),which(colZeros<0.5*nrow(input)) ]
full_facs <- full_facs[rownames(input),]
effectors=full_facs[,c(4,5,6,8,9,10)]
col_var=full_facs$Environment
shape_var=full_facs$Genotype
gg_facs_all <- var_plotter(input, effectors, col_var, "Environment",
shape_var, "Genotype", "All FACS")
pdf('figures/effectSizes.pdf', height = 24, width = 20)
grid.arrange(gg_blood_lymph,gg_mln_lymph,gg_mln_myeloid,gg_facs_all,nrow=4)
dev.off()
#
#
#
#
#
#
#
#
#
#
## MLN stimulation Cytokines
stim_cast <- read.table("int/data/MLN_stimulations/MLN_stimulation_flat.txt", T, '\t')
meta <- read.table("int/data/metadata/mice_metadata.11.19_mouse_id.txt", T, '\t')
nums <- stim_cast[,2:ncol(stim_cast)]
cc <- colnames(nums)
rr <- as.character(stim_cast$mouse_id)
#nums <- scale(nums)
nums <- data.frame(data.matrix(nums))
colnames(nums) <- cc
rownames(nums) <- as.character(stim_cast$mouse_id)
rownames(meta) <- as.character(meta$mouse_id)
cyt_keep = intersect(meta$mouse_id, stim_cast$mouse_id)
cyt_meta = meta[cyt_keep,]
nums <- nums[cyt_keep,]
input=log2(nums+1)
rowZeros <- apply(input, 1, function(x) {sum(x == 0)})
colZeros <- apply(input, 2, function(x) {sum(x == 0)})
input = input[which(rowZeros<0.5*ncol(input)),which(colZeros<0.5*nrow(input)) ]
cyt_meta<-cyt_meta[rownames(input),]
effectors=cyt_meta[,c(2,3,4,6,7,8)]
col_var=cyt_meta$Environment
shape_var=cyt_meta$Genotype
gg_cytokines <- var_plotter(input, effectors, col_var, "Environment", col_var_col=NA,
shape_var, "Genotype", "MLN Cytokines")
#pdf("mln_cytokine_pcoa_ES.pdf", height = 5, width = 15)
#grid.arrange(gg_ser_cyt)
#dev.off()
## quick check
checker = input
checker$mouse_id <- rownames(checker)
checker <- data.frame(checker, cyt_meta)
gg_melt <- melt(checker, measure.vars = c("IL.10_ClostridiumP", "IL.10_CD3.CD28",
"IL.10_StaphA", "IL.10_CandidaA", "IFN.y_StaphA",
"IFN.y_PseudomonasA", "IFN.y_StaphA"),
id.vars = c("mouse_id", "Genotype", "Environment", "Pregnant"))
g1=ggplot(gg_melt, aes(Genotype, value, color=Genotype)) +
geom_boxplot() + geom_jitter(width = 0.1)
g1+facet_wrap(~variable, nrow=3)
g2=ggplot(gg_melt, aes(Pregnant, value, color=Pregnant)) +
geom_boxplot() + geom_jitter(width = 0.1)
g2+facet_wrap(~variable, nrow=3)
colors_clusters = c(pal_d3("category10")(10), pal_d3("category20b")(20), pal_igv("default")(51))
g3=ggplot(gg_melt, aes(Environment:Genotype, value, color=Environment:Genotype)) +
geom_boxplot() + geom_jitter(width = 0.1) +
scale_color_manual(values=colors_clusters)
g3+facet_wrap(~variable, nrow=3)
pdf("figures/top_comp.pdf", height=15,width=22)
grid.arrange(
arrangeGrob(g1+facet_wrap(~variable, nrow=3),
g2+facet_wrap(~variable, nrow=3),
ncol=2),
g3+facet_wrap(~variable, nrow=2), nrow=2)
dev.off()
#
#
#
#
#
#
#
#
#
#
#
#
### serum cytokines
ser_cyt <- read.table("int/data/Serum_stimulations/Serum_names_final.txt", T, '\t')
meta <- read.table("int/data/metadata/mice_metadata.11.19_mouse_id.txt", T, '\t')
cyt_keep <- intersect(ser_cyt$sample, meta$mouse_id)
rownames(ser_cyt) <- ser_cyt$sample
rownames(meta) <- meta$mouse_id
ser_cyt_keep <- ser_cyt[cyt_keep,]
meta_keep <- meta[cyt_keep,]
input=log2(ser_cyt_keep[,-1]+1)
rowZeros <- apply(input, 1, function(x) {sum(x == 0)})
colZeros <- apply(input, 2, function(x) {sum(x == 0)})
input = input[which(rowZeros<0.5*ncol(input)),which(colZeros<0.5*nrow(input)) ]
meta_keep<-meta_keep[rownames(input),]
effectors=meta_keep[,c(2,3,4,6,7,8)]
col_var=meta_keep$Environment
shape_var=meta_keep$Genotype
gg_ser_cyt <- var_plotter(input, effectors, col_var, "Environment",
shape_var, "Genotype", "Serum Cytokines")
grid.arrange(gg_ser_cyt)
#
#
#
pdf('figures/effectSizes2.pdf', height = 18, width = 35)
grid.arrange(gg_blood_lymph,gg_mln_lymph,
gg_mln_myeloid,gg_facs_all,
gg_cytokines, gg_ser_cyt,
nrow=3, ncol=2)
dev.off()
|
af631943ef83f2d288522304c12ba10cb17515fa
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/12377_0/rinput.R
|
816977e94fdd0a4afe4711eddd0a71016d28369a
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("12377_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="12377_0_unrooted.txt")
|
3690ba063d4762449dceab007acf329a1231eb9f
|
87b115897644454429c5d5bdf34266131a5dc8b9
|
/ui.R
|
67de906a700264fee445d46a645c41aac50aaaa0
|
[] |
no_license
|
mtganalyze/curve_evaluation
|
1f4a5f6e5fcb1da589bdd18300f547839d85a9db
|
d8a2f80246fd6bc304455e9a16c9cf2e36bf4036
|
refs/heads/master
| 2020-03-24T19:23:08.564636
| 2018-07-30T20:12:38
| 2018-07-30T20:12:38
| 142,923,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,806
|
r
|
ui.R
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
library(shiny)
library(ggplot2)
library(plotly)
library(dplyr)
library(magrittr)
shinyUI(fluidPage(
# Application title
titlePanel("Curve Evaluator"),
# Set the mana curve
h2("Mana Curve:"),
plotOutput("manacurve", width = "70%"),
textOutput("landcount"),
tags$head(tags$style("#landcount{color: navy;
font-size: 20px;
font-style: bold;
}")),
h3(" "),
mainPanel(
inputPanel(
numericInput("d1", label = h4("1-drops:"), value = 2, width = "80px"),
numericInput("d2", label = h4("2-drops:"), value = 4, width = "80px"),
numericInput("d3", label = h4("3-drops:"), value = 5, width = "80px"),
numericInput("d4", label = h4("4-drops:"), value = 5, width = "80px")
),
inputPanel(
numericInput("d5", label = h4("5-drops:"), value = 4, width = "80px"),
numericInput("d6", label = h4("6-drops:"), value = 2, width = "80px"),
numericInput("d7", label = h4("7-drops:"), value = 0, width = "80px"),
numericInput("d8", label = h4("8-drops:"), value = 0, width = "80px")
),
# Evaluate the curve
actionButton("submit", label = "Evaluate Curve"),
# Show evaluation of curve
h3("Curve Evaluation:"),
plotOutput("evaluation", width = "110%"),
# Show execute curve
h3("Mean Executed Curve:"),
plotOutput("execurve"),
# documentation
h2("Links and documentation:"),
a("MTGANALYZE BLOG: on curve evaluation", href= "https://mtganalyze.github.io/post/curve_considerations/" , target = "_blank")
))
)
|
86333e1e425539e39ad3bcdd04746dd52045daea
|
ddce5d8a142724f15d1ed302b2dc15bc640adcd2
|
/sheet06.R
|
47d95e655d86ae5c61d150b0c3680721d24d1a58
|
[] |
no_license
|
MoonRiyadh/Stat-with-R
|
e9c8ee1e8fe38eb5f72d89cb6e92da63caf12d7f
|
90a0ef3becebc347b842305852dcef3437cefe38
|
refs/heads/master
| 2022-04-08T20:43:38.767200
| 2020-03-25T21:23:20
| 2020-03-25T21:23:20
| 250,092,513
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,389
|
r
|
sheet06.R
|
### Stats with R Exercise sheet 6
##########################
#Week 7: Correlation and Regression
##########################
## This exercise sheet contains the exercises that you will need to complete and
## submit by 23:55 on Monday, December 9. Write the code below the questions.
## If you need to provide a written answer, comment this out using a hashtag (#).
## Submit your homework via moodle.
## You are required to work together in groups of three students, but everybody
## needs to submit the group version of the homework via moodle individually.
## Please write below your (and your teammates') name, matriculation number.
## Name: 1. H T M A Riyadh, 2. Abdallah Bashir, 3. Maria Francis
## Matriculation number: 1. 2577735, 2. 2577831, 3. 2573627
## Change the name of the file by adding your matriculation numbers
## (sheet06_firstID_secondID_thirdID.R)
###########################################################################################
###########################################################################################
library(reshape)
library(languageR)
library(ggplot2)
library(cowplot)
#######################
### Exercise 1: Correlation
#######################
# a) Get some data - access the ratings data set in languageR and name it "data".
# The data set contains subjective frequency ratings and their length averaged over
# subjects, for 81 concrete English nouns.
data <- ratings
# b) Take a look at the data frame.
head(data)
# c) Let's say you're interested in whether there is a linear relationship between
# the word frequency of the 81 nouns and their length.
# Take a look at the relationship between the frequency and word length data by
# means of a scatterplot (use the ggplot library for this).
ggplot(data, aes(x = Length, y = Frequency)) +
geom_point()
# d) Judging from the graphs, do you think that word frequency and word length are
# in any way correlated with one another?
# Looking at the graph, it does not look like they are corelated to each other.
# e) Compute the Pearson correlation coefficient for the two variables by means
# of cor().
# Tell R to only include complete pairs of observations.
# As a reminder: Pearson coefficient denotes the covariance of the two variables
# divided by the product of their respective variance.
# It is scaled between 1 (for a perfect positive correlation) to -1 (for a perfect
# negative correlation).
#help("cor")
cor(x = data$Length, y = data$Frequency, use = "complete.obs", method = "pearson")
# f) Does the correlation coefficient suggest a small, medium or large effect?
# What about the direction of the effect?
#[1] -0.4281462
#The answer is near to -.5. so it suggested a medium effect.
#the direction of the effect is negative
# g) Note that we have a large number of tied ranks in word length data
# (since there are multiple words with the length of e.g. 5).
# Thus, we might draw more accurate conclusions by setting the method to
# Kendall's tau instead of the Pearson correlation coefficient (which is the default).
# How do you interpret the difference between these 2 correlation coefficients?
cor(data$Length, data$Frequency,use="complete.obs", method = "kendall")
#[1] -0.316297
#Pearson coefficien is a measure of the linear correlation between two variables
#and Kendall's method is a measure of rank correlation
# h) What about significance? Use the more user-friendly cor.test()!
# Take a look at the output and describe what's in there.
# What do you conclude?
# Significance:- correlation coefficient value changed using kendall's method.
cor.test(data$Length, data$Frequency, method = "kendall")
#data: data$Length and data$Frequency
#z = -3.9186, p-value = 8.907e-05
#alternative hypothesis: true tau is not equal to 0
#sample estimates:
# tau
#-0.316297
#Conclusion: p-value is less then 0.5, thus we cannot reject the null hypothesis
# i) Finally, also calculate Spearman's rank correlation for the same data.
cor(data$Length, data$Frequency, use="complete.obs", method = "spearman")
#######################
### Exercise 2: Regression
#######################
# a) Fit a linear regression model to the data frame "data" from exercise 1
# for the variables Frequency (outcome variable) and Length (predictor variable).
# General form:
# "modelname <- lm(outcome ~ predictor, data = dataFrame, na.action = an action)"
linear_rm<-lm(Frequency ~ Length, data = data)
# b) How do you interpret the output? Is the relationship between the two variables
# positive or negative?
# Plot the data points and the regression line.
#Output:-
#(Intercept) Length
#6.5015 -0.2943
# Negative relationship between the two variables.
ggplot(data, aes(x = Length, y = Frequency))+
geom_point()+
geom_abline(intercept = 6.5015, slope=-0.2943)
# c) Run the plotting command again and have R display the actual words that belong
# to each point.
# (Don't worry about readability of overlapping words.)
ggplot(data, aes(x = Length, y = Frequency, label = rownames(data) ))+
geom_abline(intercept = 6.5015, slope=-0.2943)+
geom_text()
#######################
### Exercise 3: Regression
#######################
# a) Try this again for another example:
# Let's go back to our digsym data set.
# Set your wd and load the data frame digsym_clean.csv and store it in a variable.
# You can download this data frame from the material of week 6: t-test and friends.
dat <- read.csv("digsym_clean.csv")
str(dat)
#summary(dat)
# b) Suppose you want to predict reaction times in the digit symbol task by
# people's age.
# Fit a linear regression model to the data frame for the variables
# correct_RT_2.5sd (outcome variable) and Age (predictor variable).
# General form:
# "modelname <- lm(outcome ~ predictor, data = dataFrame, na.action = an action)"
# But first we need to cast the data to compute an RT mean (use correct_RT_2.5sd)
# for each subject, so that we have only one Age observation per Subject.
# Store the result in a new dataframe called "cast".
# In case you're wondering why we still have to do this - like the t-test,
# linear regression assumes independence of observations.
# In other words, one row should correspond to one subject or item only.
linear_rm_dat<-lm(correct_RT_2.5sd ~ Age, data = dat)
cast <- cast(dat, Subject + Age ~., fun.aggregate = mean, value = "correct_RT_2.5sd", na.rm = TRUE)
colnames(cast)[colnames(cast)=="(all)"] <- "RTavg"
head(cast)
# c) Now fit the regression model.
linear_rm_cast <- lm(RTavg ~ Age, data = cast)
# d) Let's go over the output - what's in there?
# How do you interpret the output?
linear_rm_cast
#output:
#Coefficients:
# (Intercept) Age
# 637.93 21.22
#Here the slope is positive. It means a positive relation between the input and
#the output
# e) Plot the data points and the regression line.
ggplot(cast, aes(x = Age, y = RTavg))+
geom_point() +
geom_abline(intercept = 637.93, slope=21.22)
# f) Plot a histogram and qq-plot of the residuals.
# Does their distribution look like a normal distribution?
ggplot(data=cast, aes(x = residuals(linear_rm_cast))) +
geom_histogram()
ggplot(data = cast, aes(sample = residuals(linear_rm_cast))) +
stat_qq()
#Histogram distribution looks like normally distributed but qq-plot is not nornally
#distributed for the residuals
# g) Plot Cook's distance for the regression model from c) which estimates the
# residuals (i.e. distance between the actual values and the predicted value on
# the regression line) for individual data points in the model.
cooks_dist <- cooks.distance(linear_rm_cast)
ggplot(cast, aes(x = Age, y = cooks_dist )) +
geom_point()
# h) Judging from the plot in g) it actually looks like we have 1 influential
# observation in there that has potential to distort (and pull up) our regression
# line.
# The last observation (row 37) in cast has a very high Cook's distance
# (greater than 0.6).
# In other words, the entire regression function would change by more than
# 0.6 when this particular case would be deleted.
# What is the problem with observation 37?
# Run the plotting command again and have R display the subjects that belong to
# each point.
#observation 37 for subject 40 seems like an outlier
ggplot(cast, aes(x = Age, y = cooks_dist, label = Subject)) +
geom_point() +
geom_text()
# i) Make a subset of "cast" by excluding the influential subject and name it cast2.
#dim(cast)
#37 3
cast2 <- subset(cast, Subject!= 40)
# j) Fit the model from c) again, using cast2, and take a good look at the output.
linear_rm_cast2<-lm(RTavg ~ Age, data = cast2)
linear_rm_cast2
# k) What's different about the output?
# How does that change your interpretation of whether age is predictive of RTs?
#Coefficients:
# (Intercept) Age
#862.05 11.98
#As we removed the outlier from the data, thus our model is not affected by it. As a
#result, we got new slop value (11.98) comparing to the previous result (21.22)
# l) Plot the regression line again - notice the difference in slope in
# comparison to our earlier model fit?
ggplot(cast2, aes(x = Age, y = RTavg))+
geom_point() +
geom_abline(intercept = 862.05, slope = 11.98)
# m) Display the two plots side by side to better see what's going on.
plot1 <- ggplot(cast, aes(x = Age, y = RTavg))+
geom_point() +
geom_abline(intercept = 637.93, slope=21.22)+
ggtitle("With outlier")
plot2 <- ggplot(cast2, aes(x = Age, y = RTavg))+
geom_point() +
geom_abline(intercept = 862.05, slope = 11.98)+
ggtitle("Without outlier")
plot_grid(plot1, plot2)
# n) Compute the proportion of variance in RT that can be accounted for by Age.
# In other words: Compute R Squared.
# Take a look at the Navarro book (Chapter on regression) if you have trouble
# doing this.
summary(linear_rm_cast2)$r.squared
# o) How do you interpret R Squared?
#0.03493231
# We get the r squierd value 0.03493231. that means 3% of error rate we have. The smaller the number is,
#the better fit (closer) to the regression line.
|
fdee71da2e55fdf2d8c9e0291c66c4379df4db10
|
d426aa790bdbeac9a62fc3ad5cbb58d3384bf593
|
/man/pslScore.Rd
|
08e7e9e6586730aef3ec24c27089e56e3c172994
|
[] |
no_license
|
drmjc/blat
|
0977e4ae82ed2751c2f8922b6d79b6ed4607e792
|
84400169501b6f7e4e1e6ec9d88b27a53410f776
|
refs/heads/master
| 2020-05-31T05:03:53.663159
| 2013-08-29T00:17:06
| 2013-08-29T00:17:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 837
|
rd
|
pslScore.Rd
|
\name{pslScore}
\alias{pslScore}
\title{calculate the score from a psl alignment}
\usage{
pslScore(psl, isMrna = !pslIsProtein(psl[1, ]))
}
\arguments{
\item{psl}{a PSL object}
\item{isMrna}{logical: protein scores are 3x size of
mrna/nucleotide scores.}
}
\value{
vector psl alignment score
}
\description{
This calculates the score for a PSL alignment, based on C
code from Jim Kent, see comment in pslScore.R. This has
been optimised, breaking the problem into smaller chunks.
}
\examples{
f <- file.path(system.file(package="blat"), "examples", "test.psl")
psl <- import.psl(f, score=FALSE)
psl$score <- pslScore(psl, FALSE)
head(psl)
# or the simpler appraoch
f <- file.path(system.file(package="blat"), "examples", "test.psl")
psl <- import.psl(f, score=TRUE)
head(psl)
}
\author{
Mark Cowley, 12 April 2006
}
|
b6136f861e64d45d802a100330d96aeffcecb9bf
|
075a98b418bed4056b0a2b234044582152bee08b
|
/man/ExpTwoPlots.Rd
|
0cae2dea7cb5aa9f45f6a3e2147d398e97ea3513
|
[] |
no_license
|
daya6489/SmartEDA
|
f6493f9707c28c5bf5d6a079a992761e10305955
|
c72fa59fc9fdff7190c8bbe06dce3914ac840ba9
|
refs/heads/master
| 2022-12-11T19:29:18.814913
| 2022-12-03T09:44:54
| 2022-12-03T09:44:54
| 184,899,407
| 35
| 12
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,973
|
rd
|
ExpTwoPlots.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fn_exp_ExpCompViz.R
\name{ExpTwoPlots}
\alias{ExpTwoPlots}
\title{Function to create two independent plots side by side for the same variable}
\usage{
ExpTwoPlots(
data,
plot_type = "numeric",
iv_variables = NULL,
target = NULL,
lp_geom_type = "boxplot",
lp_arg_list = list(),
rp_geom_type = "boxplot",
rp_arg_list = list(),
fname = NULL,
page = NULL,
theme = "Default"
)
}
\arguments{
\item{data}{dataframe}
\item{plot_type}{the plot type ("numeric", "categorical").}
\item{iv_variables}{list of independent variables. this input will be based off plot_type. List of numeric variables / List of categorical variables}
\item{target}{binary or multi-class dependent variable}
\item{lp_geom_type}{left side geom plot. this option is for univariate data. Options for numeric are "boxplot", "histogram", "density", "violin", "qqplot" and for categorical "bar", "pie", "donut"}
\item{lp_arg_list}{arguments to be passed to lp_geom_type. Default is list()}
\item{rp_geom_type}{right side geom plot. Options for numeric are "boxplot", "histogram", "density", "violin" "qqplot" and for categorical "bar", "pie", "donut"}
\item{rp_arg_list}{arguments to be passed to rp_geom_type. Default is list()}
\item{fname}{output file name. Output will be generated in PDF format}
\item{page}{output pattern. if Page=c(3,2), It will generate 6 plots with 3 rows and 2 columns}
\item{theme}{adding extra themes, geoms, and scales for 'ggplot2' (eg: themes options from ggthemes package)}
}
\value{
This function returns same variable in two different views of ggplot in one graph. And there is a option to save the graph into PDF or JPEG format.
}
\description{
To plot graph from same variable when Target=NULL vs. when Target = categorical
variable (binary or multi-class variable)
}
\examples{
## Bar graph for specified variable
# Let's consider mtcars data set, it has several numerical and binary columns
target = "gear"
categorical_features <- c("vs", "am", "carb")
numeircal_features <- c("mpg", "cyl", "disp", "hp", "drat", "wt", "qsec")
# plot numerical data two independent plots:
# Left side histogram chart wihtout target and Right side boxplot chart with target
num_1 <- ExpTwoPlots(mtcars, plot_type = "numeric",
iv_variables = numeircal_features, target = "gear",
lp_arg_list = list(alpha=0.5, color = "red", fill= "white",
binwidth=1),lp_geom_type = 'histogram',
rp_arg_list = list(fill = c("red", "green", "blue")),
rp_geom_type = 'boxplot', page = c(2,1),theme = "Default")
# plot categorical data with two independent plots:
# Left side Donut chart wihtout target and Right side Stacked bar chart with target
cat_1 <- ExpTwoPlots(mtcars,plot_type = "categorical",
iv_variables = categorical_features,
target = "gear",lp_arg_list = list(),lp_geom_type = 'donut',
rp_arg_list = list(stat = 'identity', ),
rp_geom_type = 'bar',page = c(2,1),theme = "Default")
}
|
79006eb663a38347f78a9e573a9bda6d2fadf030
|
cc1a7d384cf35f5e2c5710deffd3095606cee52e
|
/man/Kfactor.Rd
|
bc1ca551a0662f66e250999653768909589af972
|
[] |
no_license
|
cran/tolerance
|
dd98abd74329c9c1fb9f3a13a433e7cbca88aaab
|
057076b0cb32882c2874503c65b68dbb13102402
|
refs/heads/master
| 2021-01-22T08:59:48.693139
| 2020-02-05T12:10:05
| 2020-02-05T12:10:05
| 17,700,545
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,708
|
rd
|
Kfactor.Rd
|
\name{K.factor}
\title{Estimating K-factors for Tolerance Intervals Based on Normality}
\alias{K.factor}
\usage{
K.factor(n, f = NULL, alpha = 0.05, P = 0.99, side = 1,
method = c("HE", "HE2", "WBE", "ELL", "KM", "EXACT",
"OCT"), m = 50)
}
\description{
Estimates k-factors for tolerance intervals based on normality.
}
\arguments{
\item{n}{The (effective) sample size.}
\item{f}{The number of degrees of freedom associated with calculating the estimate of the population standard deviation.
If \code{NULL}, then \code{f} is taken to be \code{n-1}.}
\item{alpha}{The level chosen such that \code{1-alpha} is the confidence level.}
\item{P}{The proportion of the population to be covered by the tolerance interval.}
\item{side}{Whether a 1-sided or 2-sided tolerance interval is required (determined by \code{side = 1} or \code{side = 2},
respectively).}
\item{method}{The method for calculating the k-factors. The k-factor for the 1-sided tolerance intervals
is performed exactly and thus is the same for the chosen method. \code{"HE"} is the
Howe method and is often viewed as being extremely accurate, even for small sample sizes. \code{"HE2"} is a second method due to Howe, which performs similarly to the Weissberg-Beatty method, but is computationally simpler. \code{"WBE"} is the
Weissberg-Beatty method (also called the Wald-Wolfowitz method), which performs similarly to the first Howe method for larger sample sizes. \code{"ELL"} is
the Ellison correction to the Weissberg-Beatty method when \code{f} is appreciably larger than \code{n^2}. A warning
message is displayed if \code{f} is not larger than \code{n^2}. \code{"KM"} is the Krishnamoorthy-Mathew approximation to the exact solution, which works well for larger sample sizes. \code{"EXACT"} computes the
k-factor exactly by finding the integral solution to the problem via the \code{integrate} function. Note the computation time of this method is largely determined by \code{m}. \code{"OCT"} is the Owen approach
to compute the k-factor when controlling the tails so that there is not more than (1-P)/2 of the data in each tail of the distribution.}
\item{m}{The maximum number of subintervals to be used in the \code{integrate} function. This is necessary only for \code{method = "EXACT"} and \code{method = "OCT"}. The larger
the number, the more accurate the solution. Too low of a value can result in an error. A large value can also cause the function to be slow for \code{method = "EXACT"}.}
} \value{
\code{K.factor} returns the k-factor for tolerance intervals based on normality with the arguments specified above.
} \seealso{
\code{\link{integrate}}, \code{\link{K.table}}, \code{\link{normtol.int}}, \code{\link{TDist}}
}
\note{
For larger sample sizes, there may be some accuracy issues with the 1-sided calculation since it depends on the noncentral t-distribution.
The code is primarily intended to be used for moderate values of the noncentrality parameter. It will not be highly accurate, especially in the tails, for large values.
See \code{\link{TDist}} for further details.
}
\references{
Ellison, B. E. (1964), On Two-Sided Tolerance Intervals for a Normal Distribution, \emph{Annals of Mathematical
Statistics}, \bold{35}, 762--772.
Howe, W. G. (1969), Two-Sided Tolerance Limits for Normal Populations - Some Improvements, \emph{Journal of the
American Statistical Association}, \bold{64}, 610--620.
Krishnamoorthy, K. and Mathew, T. (2009), \emph{Statistical Tolerance Regions: Theory, Applications, and Computation}, Wiley.
Odeh, R. E. and Owen, D. B. (1980), \emph{Tables for Normal Tolerance Limits, Sampling Plans, and Screening}, Marcel-Dekker.
Owen, D. B. (1964), Controls of Percentages in Both Tails of the Normal Distribution, \emph{Technometrics}, \bold{6}, 377-387.
Wald, A. and Wolfowitz, J. (1946), Tolerance Limits for a Normal Distribution, \emph{Annals of the Mathematical Statistics},
\bold{17}, 208--215.
Weissberg, A. and Beatty, G. (1969), Tables of Tolerance Limit Factors for Normal Distributions, \emph{Technometrics},
\bold{2}, 483--500.
}
\examples{
## Showing the effect of the Howe, Weissberg-Beatty,
## and exact estimation methods as the sample size increases.
K.factor(10, P = 0.95, side = 2, method = "HE")
K.factor(10, P = 0.95, side = 2, method = "WBE")
K.factor(10, P = 0.95, side = 2, method = "EXACT", m = 50)
K.factor(100, P = 0.95, side = 2, method = "HE")
K.factor(100, P = 0.95, side = 2, method = "WBE")
K.factor(100, P = 0.95, side = 2, method = "EXACT", m = 50)
}
\keyword{file}
|
1036094cfd8939d96fcfc9ec4c0e232bb2594f90
|
68c3ba9a769f6ab9be05bbee19c550a972f5ddc3
|
/2020.06.10.case_when_tutorial.R
|
4a58142e852977fc61c5989f1d8c6fbb7a8bae35
|
[] |
no_license
|
KROVINSKI/2020.06.10_case_when.Tutorials
|
ba5166721c129366f7a7729a880477a0012df959
|
908c6af4bc3a94ffc9067140dd71f4c78988ec8f
|
refs/heads/master
| 2022-10-10T10:05:58.874856
| 2020-06-12T00:15:33
| 2020-06-12T00:15:33
| 271,384,733
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,904
|
r
|
2020.06.10.case_when_tutorial.R
|
# Hello World
# Below is a case when tutorial
#********************************
# Libraries
#********************************
library(dplyr)
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _|
#********************************
# Table of Contents
#********************************
# 1.0 Introduction
# 2.0 Bringing in Data
# 3.0 Creating a new expression
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _|
#********************************
# 1.0 Introduction
#********************************
# Case when in R can be executed with case_when()
# function in dplyr package. Dplyr package is provided with case_when()
# function which is similar to case when statement in SQL.
# We will be using iris data to depict the example of case_when()
# function.
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _|
#********************************
# 2.0 My data
#********************************
mydata2 <-iris
head(mydata2)
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _|
#********************************
# 3.0 New Conditional Statement
#********************************
# Create new/additional variable
# species_new using the mutate function and case when statement
mydata2 %>% mutate(species_new = case_when(is.na(Species) ~ "missing",
Species=="setosa" ~ "setosa_new",
Species=="versicolor" ~ "versicolor_new",
Species=="virginica" ~ "virginica_new",
TRUE ~ "others"))
# you can use variables directly within the case_when()wrapper
# TRUE equivalent to ELSE statement
mydata2
head(mydata2)
#********************************
# End of Document | End of Script
#********************************
|
d197d6b88003baa6b1507279c2b900f881d55f11
|
3a4fc4a6c3c8e641de7218827e55d604103e3610
|
/Shiny/calculate_duration_code.R
|
d40f23a88e0ea531a667497f8a2849ab2756bc31
|
[] |
no_license
|
IUResearchApplications/GruberProject
|
5cbc0b7bcd456776f7d926df720e6280fbbdc6dc
|
5e2d0d593681c7bf1995dbbd65e808da127e5e0b
|
refs/heads/master
| 2021-06-14T16:52:42.880865
| 2017-04-06T17:56:51
| 2017-04-06T17:56:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,931
|
r
|
calculate_duration_code.R
|
library(shiny)
library(shinyTime)
library(ggplot2)
library(plyr)
library(data.table)
library(lubridate)
library(scales)
library(RMySQL)
setwd("~/Fitbit_project/Code")
config <- read.table('rshiny.cnf.txt')
#
mydb <- dbConnect(MySQL(), user=as.character(config[1,2]), password=as.character(config[2,2]),
dbname=as.character(config[3,2]), host=as.character(config[4,2]),
port = as.integer(as.character(config[5,2])))
alldata <- dbGetQuery(conn = mydb, statement = "SELECT * FROM intens;")
alldata$in_time <-as.POSIXct(alldata$in_time)
# If Intensity large than or equal to 2, then Intensity2 = 1, else Intensity2 = 0
alldata$intensity2 <- ifelse(alldata$intensity >= 2, 1, 0)
# Apply rle() to find Intensity2 >= 1
selectRow <- rle(alldata$intensity2 >= 1)
# Find indices of the selectRow with length of at least 1
index <-which(selectRow$values == TRUE & selectRow$lengths >= 1)
# Check if selectRow has any value in it
any(index)
# Do a communitive sum of the selectRow lengths and extract the end positions of the selectRow with length of at least 1
# using the above found indices
selectRow_lengths_cumsum <- cumsum(selectRow$lengths)
ends <- selectRow_lengths_cumsum[index]
# Find the start position of these selectRow
newindex <- ifelse(index > 1, index - 1, 0)
starts <- selectRow_lengths_cumsum[newindex] + 1
if (0 %in% newindex) starts = c(1,starts)
duration <- as.numeric(difftime(alldata$in_time[ends], alldata$in_time[starts])) / 60
# Calculate average intensity per activiity
avg_intens <- rep(0, length(starts))
for (i in 1:length(starts)){
avg_intens[i] <- sum(alldata$intensity[starts[i]:ends[i]])/(ends[i]-starts[i]+1)
}
# Extrac data with the number of starts
StartTime <- alldata[starts, ]
EndTime <- alldata$in_time[ends]
DurationData <- cbind(StartTime, EndTime, duration, avg_intens)
write.csv(DurationData, file="DurationData.csv", row.names=FALSE)
|
379aacc6552a803aac6f52c8b70b19ee4558c47d
|
71f59cdc233491fb589630293c2379dcabd62c20
|
/cachematrix.R
|
e4f218c949a4281eed8aea2a57439408736e4f90
|
[] |
no_license
|
jynkim/ProgrammingAssignment2
|
69ebb4e925f4761659be432d662db21f307446af
|
ac5bcc7aa169633f96c1076f50d5811c831304a7
|
refs/heads/master
| 2021-01-18T11:13:04.389534
| 2016-02-22T06:08:09
| 2016-02-22T06:08:09
| 52,246,190
| 0
| 0
| null | 2016-02-22T04:07:19
| 2016-02-22T04:07:18
| null |
UTF-8
|
R
| false
| false
| 1,029
|
r
|
cachematrix.R
|
## As shown in the example with vectors, makeCacheMatrix function
## will be list of 4 functions that set the value of the matrix, get the value of the matrix
## and do the same for the inverse - using solve() function
## this function will take premade invertible matrix to cache the inversion
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
##define function to calculate matrix inversion
setInv <- function(solve) m <<- solve
getInv <- function() m
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## CacheSolve function utilizes list of functions defined earlier to
## cache the matrix inverse
cacheSolve <- function(x, ...) {
m <- x$getInv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
##retrive the matrix from x$get
data <- x$get()
##run solve to get cached data
m <- solve(data, ...)
x$setInv(m)
m
## Return a matrix that is the inverse of 'x'
}
|
ad3d49b1953e0781c0119d09b03e249bdb35e649
|
d3f20ed2a13eb9ca153094f15c2e351e2642cb19
|
/analytics/vidooly_analytics/vidooly_stats_timeseries.R
|
90bc5281466831cfc874668997c61ae98a21d1dc
|
[] |
no_license
|
apoorvakarn/R-in-Action
|
b7d0003d9d0be0755b7710903fb56984a59dda9b
|
923dfe1f12cecfdd1511d2d55e4a4796499f9c82
|
refs/heads/master
| 2021-09-11T19:49:45.127777
| 2018-04-11T18:01:48
| 2018-04-11T18:01:48
| 100,679,882
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,225
|
r
|
vidooly_stats_timeseries.R
|
#Assignment - 2
#Forecasting of channel stats of a Youtube channel
data=read.csv("C:/rWork/rProjects/R-in-Action/analytics/vidooly_analytics/oct_march.csv",stringsAsFactors = FALSE)
str(data)
#Converting date column into date format
data$date=as.Date(data$date,format="%d-%m-%Y")
data$date
#Forecasting views
omn=as.data.frame(data[,c(5,2)])
colSums(is.na(data)) #Checking presence of NA values
#We use 'xts' and 'zoo' library for this assignment
library(xts)
library(zoo)
#Converting dataframe into a time-series object for further analysis
channel1 <- xts(omn[,2], order.by=as.Date(omn[,1], "%Y-%m-%d"))
channel1
#Plotting views against time
plot(channel1,ylab='Views',xlab='Date',main="Views between Oct 2016 - Mar 2017",col="grey")
#Differentiating views of order 1 to remove trend
plot(diff(channel1),ylab='Views',xlab='Date',main="Views between Oct 2016 - Mar 2017",col="grey")
library(tseries)
adf.test(channel1,alternative="stationary")
#p-value less than 0.05, null hypothesis rejected
#data is stationary
#Detrending the data
ch2=diff(channel1)
par(mfrow=c(1,2))
ch2 = as.data.frame(ch2)
ch2$V1[is.na(ch2$V1)]=0
ch2=as.xts(ch2)
class(ch2)
channel1
ch2
#To obtain values of p,d,q(d=1 already obtained)
acf(ch2,main="ACF plot")#ACF PLOT -- Moving Average or q
pacf(ch2,main="PACF plot")#PACF PLOT -- Auto Regressive or p
#Fitting ARIMA model on detrended data
ARIMAFit=arima(channel1,c(0,1,1))
summary(ARIMAFit)
#Forecasting values of next 3 months(91 days)
pred=predict(ARIMAFit, n.ahead=91)
str(pred)
pred2 = pred$pred+2*pred$se
pred2=round(pred2,0)
pred2 #Final forecast of views for Apr 01-Jun 30 2017
#Forecasting subscribers
omn2=as.data.frame(data[,c(5,3)])
omn2
colSums(is.na(data)) #Checking NA values
library(xts)
library(zoo)
#Converting dataframe to time-series object
channel2 <- xts(omn2[,2], order.by=as.Date(omn2[,1], "%Y-%m-%d"))
channel2
#Plotting subscribers against time
plot(channel2,ylab='Subscribers',xlab='Date',main="Views between Oct 2016 - Mar 2017",col="grey")
#Removing trend
plot(diff(channel2,differences = 1),ylab='Views',xlab='Date',main="Views between Oct 2016 - Mar 2017",col="grey")
library(tseries)
adf.test(channel2,alternative="stationary")
#p-value greater than 0.05, hence we fail to reject null hypothesis
#Data is non-stationary
#Detrending data
ch3=diff(channel2)
ch3
ch3 = as.data.frame(ch3)
ch3$V1[is.na(ch3$V1)]=0
ch3=as.xts(ch3)
ch3
adf.test(ch3,alternative="stationary")
#p-value < 0.05, null hypothesis rejected
#Data is stationary
#To obtain p and q(d=1 already obtained)
acf(ch3,main="ACF plot")#ACF PLOT -- Moving Average or q
pacf(ch3,main="PACF plot")#PACF PLOT -- Auto Regressive or p
#Fitting ARIMA model on channel2
ARIMAFit2=arima(channel2,c(0,1,1))
summary(ARIMAFit2)
#Forecasting values for Apr-Jun 2017
pred3=predict(ARIMAFit2, n.ahead=91)
str(pred3)
pred4 = pred3$pred+2*pred3$se
pred4=round(pred4,0)
pred4 #Final forecast of subscribers for Apr 01-Jun 30 2017
#Forecasting videoscount
omn3=as.data.frame(data[,c(5,4)])
omn3
nrow(omn3)
library(xts)
library(zoo)
#Converting dataframe into time-series object for further analysis
channel3 <- xts(omn3[,2], order.by=as.Date(omn3[,1], "%Y-%m-%d"))
channel3
#Plotting videoscount against time and checking for trend
plot(channel3,ylab='Videocount',xlab='Date',main="Videocount between Oct 2016 - Mar 2017",col="grey")
plot(diff((channel3),differences = 1),col="grey")
library(tseries)
adf.test(channel3,alternative="stationary")
#p-value greater than 0.05, hence we fail to reject null hypothesis
#Data is non-stationary
#Detrending data
ch4=diff(channel3)
ch4
ch4 = as.data.frame(ch4)
ch4$V1[is.na(ch4$V1)]=0
ch4=as.xts(ch4)
ch4
adf.test(ch4,alternative="stationary")
#p-value < 0.05, null hypothesis is rejected
#Data is stationary
#Obtaining p and q(d=1 already obtained)
acf(ch4,main="ACF plot")#ACF PLOT -- Moving Average or q
pacf(ch4,main="PACF plot")#PACF PLOT -- Auto Regressive or p
#Fitting ARIMA model
ARIMAFit3=arima((channel3),c(0,1,1))
summary(ARIMAFit3)
#Forecasting videoscount for Apr-Jun 2017
pred5=predict(ARIMAFit3, n.ahead=91)
str(pred5)
pred6 = (pred5$pred+2*pred5$se)
pred6=round(pred6,0)
pred6#Final forecast of videoscount for Apr 01 - Jun 30 2017
|
93fc29e9f98ce38cf9bb72a28ee35d646bb94494
|
5f1ad26538589accf8eac161f6584c2ddc300f00
|
/GARP.Rcheck/00_pkg_src/GARP/man/prepare4ColList.Rd
|
1a453c7072fefd0ef2fe9d7d9304b102ee9f44f7
|
[] |
no_license
|
nunngm/StatAnalysis
|
a9ea2518531b67a724d376dd199fdb7d2d800e1d
|
66e133e69544851c93e527cb1e333c501e8d1fc5
|
refs/heads/master
| 2021-07-11T04:30:53.987054
| 2020-06-23T17:39:51
| 2020-06-23T17:39:51
| 158,037,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 571
|
rd
|
prepare4ColList.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aheatmap.R
\name{prepare4ColList}
\alias{prepare4ColList}
\title{given a list of barColors and phenoData, prepare a list of colors that can be passed to aheatmap}
\usage{
prepare4ColList(barColList = NULL, phenoDat)
}
\arguments{
\item{barColList}{a named list of colors}
\item{phenoDat}{a data frame for pheno data}
}
\value{
a list of vectors each with length nrow(phenoDat)
}
\description{
given a list of barColors and phenoData, prepare a list of colors that can be passed to aheatmap
}
|
52d9560503e87c9e81b0a720460fa5e8832b02cc
|
37ce38ba0eff95451aebea810a1e2ab119f89a85
|
/man/decimal.WY.Rd
|
2267101e7c93158951afd386493002b92f95c7e6
|
[
"MIT"
] |
permissive
|
SwampThingPaul/AnalystHelper
|
39fdd58dc4c7300b6e72ff2713316809793236ce
|
eb570b69d7ea798facaf146d80bc40269a3d5028
|
refs/heads/master
| 2023-07-21T00:19:21.162374
| 2023-07-11T17:24:36
| 2023-07-11T17:24:36
| 179,672,539
| 1
| 0
|
MIT
| 2020-03-21T20:05:31
| 2019-04-05T11:53:19
|
R
|
UTF-8
|
R
| false
| true
| 590
|
rd
|
decimal.WY.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/decimal.WY.R
\name{decimal.WY}
\alias{decimal.WY}
\title{Decimal water year from a date field}
\usage{
decimal.WY(date, WY.type = "FL")
}
\arguments{
\item{date}{Date in as.POSIXct or as.Date format}
\item{WY.type}{Specifies Federal ("Fed") or Florida ("FL") water years}
}
\value{
Returns decimal water year value based on Federal (Starts Oct 1) or Florida (Starts May 1),
}
\description{
Decimal water year from a date field
}
\examples{
# Decimal Water Year
decimal.WY(as.Date("2001-05-01"));
}
\keyword{WY}
|
d810e1975a230689c2259b054a070143c6f5461b
|
2ad3de8c74bf18501daf2793db46cf45c6ed9b03
|
/plot3.R
|
05b31e1dfc057ba4063acda84777d43749c7dc4e
|
[] |
no_license
|
jincera/ExData_Plotting1
|
b62fca03060c459c2037e0fa466a5f5adeacb8b3
|
aa621d7ff41d0942a3472f163ef9ca4d65c8df2f
|
refs/heads/master
| 2020-04-07T16:53:54.601366
| 2015-03-07T19:43:31
| 2015-03-07T19:43:34
| 31,577,854
| 0
| 0
| null | 2015-03-03T03:41:01
| 2015-03-03T03:41:01
| null |
UTF-8
|
R
| false
| false
| 1,609
|
r
|
plot3.R
|
#Exploratory Data Analysis
# Course project 1
# Code for Plot 3
# The code assumes that the file "household_power_consumption.txt" is
# already in the working directory. As stated in the instructions, this
# data file can be downloaded from_:
# https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
# Step 1: Let us read the data file. Fields are separated with ";", missing values are labeled as "?"
homePowerC<- read.table("household_power_consumption.txt", sep=";", header=TRUE, stringsAsFactors=FALSE, na.strings="?")
# Step 2: We are only interested in the readings from Feb 1st and Feb 2nd 2007
homePowerC_sub<-homePowerC[(homePowerC$Date=="1/2/2007" | homePowerC$Date=="2/2/2007" ),]
# Step 3: Let us add a column with POSIX format for interpreting date and time
homePowerC_sub$DateTime<-strptime(paste(homePowerC_sub$Date,homePowerC_sub$Time),"%d/%m/%Y %H:%M:%S")
# Step 4: Open the png graphics device with the required dimensions.
png(file = "./Plot3.png",width = 480, height = 480, bg = "transparent")
# Step 5: Start the plot with the first variable and label the axis
with(homePowerC_sub,plot(DateTime,Sub_metering_1,type="l",bg="white",ylab="Energy sub metering",xlab=""))
# Step 6: Annotate (add) the other variables
lines(homePowerC_sub$DateTime,homePowerC_sub$Sub_metering_2,col="red")
lines(homePowerC_sub$DateTime,homePowerC_sub$Sub_metering_3,col="blue")
# Step 7: Add the legend
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lty=c(1,1))
# Step 8: Shut down the graphics device.
dev.off()
|
d44af21eb74a3142264fb48d13fa89b56cd2620c
|
432d68b44e60d0fa1c23efb71ffa69933aa89f8a
|
/R/Chapter 8/exercise-7.R
|
9b92649c81a1804578e6b97d3ee40212b76e0682
|
[] |
no_license
|
ShilpaGopal/ISLR-exercise
|
5a5951118a33ef7dd8a9b3a6f7493bfd7b89b5ff
|
9705905d36bd74086b133e4a5f937e7a4ab18e5e
|
refs/heads/master
| 2020-12-04T11:12:01.024951
| 2020-03-21T10:23:12
| 2020-03-21T10:23:12
| 231,741,200
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 984
|
r
|
exercise-7.R
|
library(MASS)
library(randomForest)
set.seed(1101)
names(Boston)
train=sample(dim(Boston)[1],dim(Boston)[1]/2)
X.train=Boston[train, -14]
X.test= Boston[-train, -14]
Y.train=Boston[train, 14]
Y.test=Boston[-train, 14]
p=dim(Boston)[2]-1
p.2=p/2
p.sq=sqrt(p)
rf.boston.p= randomForest(X.train, Y.train, xtest=X.test, ytest = Y.test, mtry = p, ntree = 500)
rf.boston.p.2 = randomForest(X.train, Y.train, xtest = X.test, ytest = Y.test,
mtry = p.2, ntree = 500)
rf.boston.p.sq = randomForest(X.train, Y.train, xtest = X.test, ytest = Y.test,
mtry = p.sq, ntree = 500)
plot(1:500, rf.boston.p$test$mse, col="green", type = "l",
xlab="Number of trees", ylab = "Test mse", ylim = c(10,30))
lines(1:500, rf.boston.p.2$test$mse, col = "red", type = "l")
lines(1:500, rf.boston.p.sq$test$mse, col = "blue", type = "l")
legend("topright", c("m=p", "m=p/2", "m=sqrt(p)"), col = c("green", "red", "blue"), cex = 1, lty = 1)
|
58f48640c4d442b9cfe24355e1f59d9cffa763c3
|
8bd4a967307b4185ce6e6fcdd8732cb258171b2c
|
/real/zeisel/pics/make_plots.R
|
f89ae16bb9b90f85f51e811d93fadf30cf1995c6
|
[] |
no_license
|
donkang75/MatrixEval2017
|
620ee9447d873b3a0e2a3e426458d1e127b054d8
|
a4c79e49d1a8ad62bfe7c9b3d0273aa199716c71
|
refs/heads/master
| 2020-04-26T11:47:21.594624
| 2018-08-08T11:37:58
| 2018-08-08T11:37:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,301
|
r
|
make_plots.R
|
# This makes bar plots of row/column access times.
colors <- c(`ordinary`="darkblue",
`sparse`="blue",
`HDF5 (column)`="dodgerblue",
`HDF5 (rectangle)`="lightblue")
pdf("zeisel_col.pdf")
par(mar=c(7.1, 5.1, 2.1, 2.1))
col.times <- read.table("../timings_col.txt", header=TRUE, sep="\t", stringsAsFactors=FALSE)
barplot(setNames(col.times$Time/1000, col.times$Type), ylab="Time (s)", cex.axis=1, cex.lab=1.4, cex.names=1.4,
names.arg=sub(" ", "\n", col.times$Type), las=2, col=colors[col.times$Type],
cex.main=1.4, main="Column access")
dev.off()
names(colors) <- sub("column", "row", names(colors))
pdf("zeisel_row.pdf")
par(mar=c(7.1, 5.1, 2.1, 2.1))
row.times <- read.table("../timings_row.txt", header=TRUE, sep="\t", stringsAsFactors=FALSE)
barplot(setNames(row.times$Time/1000, row.times$Type), ylab="Time (s)", cex.axis=1, cex.lab=1.4, cex.names=1.4,
names.arg=sub(" ", "\n", row.times$Type), las=2, col=colors[row.times$Type],
cex.main=1.4, main="Row access")
dev.off()
# This makes bar plots of timing comparisons to R.
colors <- c(`ordinary (beachmat)`="darkblue",
`ordinary (R)`="darkblue",
`sparse (beachmat)`="blue",
`sparse (R)`="blue",
`HDF5 (beachmat)`="dodgerblue",
`HDF5 (R)`="dodgerblue")
pchs <- c(16, 4, 2)
pdf("zeisel_detection.pdf", width=10, height=6)
layout(cbind(1,2), width=c(5, 1.5))
par(mar=c(5.1, 4.1, 2.1, 0.1))
plot(1,1,type='n', xlim=c(1, 6.5), xlab="", xaxt="n", ylim=c(10, 2000), log="y", ylab="Time (ms)")
counter <- 0
for (mode in c("library_sizes", "detect_cells", "detect_genes")) {
incoming <- read.table(paste0("../timings_", mode, ".txt"), header=TRUE, sep="\t", stringsAsFactors=FALSE)
vals <- setNames(incoming$Time, incoming$Type)
vals <- vals[names(colors)]
X <- seq_along(vals) + counter/5
segments(X, vals, X, 1, lty=3)
points(X, vals, col=colors, pch=pchs[counter+1], cex=1.5)
counter <- counter + 1
}
axis(side=1, at=seq_along(colors)+1/5, labels=sub(" ", "\n", names(colors)), line=1, tick=FALSE)
par(mar=c(5.1, 0.1, 2.1, 0.1))
plot.new()
legend("left", col="black", pch=pchs, c("Library size per cell", "Number of cells per gene", "Number of genes per cell"), pt.cex=1.5)
dev.off()
|
048b22a41e87c090a654b70bdc3f072ffb6f276b
|
7ed4f875a449f41e89f2032e161c9554fc63c9ac
|
/count_reads.R
|
dd90b8996247cc64f00ee83f9f184beb29ec4191
|
[] |
no_license
|
elpsakkas/R_code_bioionformatics
|
31c62191e5bfbb219f4d3a8af6ac9ff3f881605f
|
a0893d3495315d6d6641b49c8cd3a34ddb8fc5dd
|
refs/heads/master
| 2021-01-20T06:05:33.070692
| 2017-04-30T10:40:53
| 2017-04-30T10:40:53
| 89,843,754
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 786
|
r
|
count_reads.R
|
library("Rsamtools")
library("GenomicFeatures")
library("GenomicAlignments")
library("BiocParallel")
dir <- "/Volumes/My Passport for Mac/Treutlein_E18/"
sampleTable <- read.csv(file.choose(),row.names=1)
filenames <- file.path(dir, paste0(sampleTable$cell, ".merge.sort.bam"))
file.exists(filenames)
bamfiles <- BamFileList(filenames, yieldSize=2000000)
gtffile <- file.path(dir,"ref-transcripts-pAcGFP1-N1-ERCC_spikes.gtf")
txdb <- makeTxDbFromGFF(gtffile, format="gtf", circ_seqs=character())
ebg <- exonsBy(txdb, by="gene")
se <- summarizeOverlaps(features=ebg, reads=bamfiles,
mode="Union",
singleEnd=T,
ignore.strand=TRUE)
counts <- assay(se)
write.csv(counts, file="counts_Treutlein_E18.csv")
|
e806f04beb0ef9e77bbb8f3455733f8cb2761fe8
|
a760871d2655f42d4c4e9fe8b198f1fd7ca3751b
|
/Data_Analytics-Assignment2/Retention.R
|
28520811dc0850dbdd0715fb3cdf3e36e4e39501
|
[] |
no_license
|
LTille/Data-Analytics
|
2237ff4bbaeb45e6ee8612a58b36a5956255dc6c
|
d860d98d3440fe7ddaa2d54d74a453953be3416f
|
refs/heads/master
| 2020-12-05T14:53:41.279798
| 2016-09-01T14:38:48
| 2016-09-01T14:38:48
| 67,138,254
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,424
|
r
|
Retention.R
|
retention = read.csv("/Users/Tillie/Desktop/retention.csv", header=TRUE)
retention[1:3,]
summary(retention)
rm=(list=ls())
#list all the statistical information for variables
Rowname = c("spend","apret","top10","rejr","tstsc","pacc","strat","salar");
Columname = c("Min","1stQuartile","Median","Mean","3rdQuartile","Max","SD","Variance","Count")
spend = c(as.vector(summary(retention$spend)), sd(retention$spend),var(retention$spend),length(retention$spend))
apret = c(as.vector(summary(retention$apret)), sd(retention$apret),var(retention$apret),length(retention$apret))
top10 = c(as.vector(summary(retention$top10)), sd(retention$top10),var(retention$top10),length(retention$top10))
rejr = c(as.vector(summary(retention$rejr)), sd(retention$rejr),var(retention$rejr),length(retention$rejr))
tstsc = c(as.vector(summary(retention$tstsc)), sd(retention$tstsc),var(retention$tstsc),length(retention$tstsc))
pacc = c(as.vector(summary(retention$pacc)), sd(retention$pacc),var(retention$pacc),length(retention$pacc))
strat = c(as.vector(summary(retention$strat)), sd(retention$strat),var(retention$strat),length(retention$strat))
salar = c(as.vector(summary(retention$salar)), sd(retention$salar),var(retention$salar),length(retention$salar))
Summary = matrix(c(spend,apret,top10,rejr,tstsc,pacc,strat,salar),nrow=8,ncol=9,byrow=TRUE,dimnames = list(Rowname,Columname))
Summary
#plot histogram for apret, tstsc, and salar
library('ggplot2')
theme_set((theme_bw()))
ggplot(retention, aes(x=apret))+geom_histogram(binwidth=10)
ggplot(retention, aes(x=tstsc))+geom_histogram(binwidth=0.5)
ggplot(retention, aes(x=salar))+geom_histogram(binwidth=0.5)
#perform linear regression of apret on tstsc
fit1=lm(apret~tstsc,data=retention)
summary(fit1)
ggplot(retention, aes(x=tstsc,y=apret))+geom_point()+geom_smooth(method=lm,se=FALSE)
#perform linear regression of apret on salar
fit2=lm(apret~salar,data=retention)
summary(fit2)
ggplot(retention, aes(x=salar,y=apret))+geom_point()+geom_smooth(method=lm,se=FALSE)
#perform linear regression of apret on both tstsc and salar.
fit3=lm(apret~tstsc+salar,data=retention)
summary(fit3)
##compute RMSE for the retention data
model.mse = mean(residuals(fit3)^2)
rmse = sqrt (model.mse)
rmse
cor(retention)
library('car')
suppressWarnings( ## (avoid printing the warnings)
scatterplotMatrix(retention, spread=FALSE, lty.smooth=2,
main="Scatter Plot Matrix")
)
|
aafb370dfca4b1892645cf903cc59202994fa1b0
|
21910d6b591aaafe801138753a038bd73e3eccac
|
/plot3.R
|
e32908267b47dc54c103b9f4ed4bfe7343011d3d
|
[] |
no_license
|
bgyarno/ExData_Plotting1
|
164e0a42b9bfaadcc4a47e8552bc27a728edb512
|
f5df834577667a60b38463548ba72deb6e9ae3f9
|
refs/heads/master
| 2021-01-21T16:53:29.299229
| 2017-05-22T02:39:37
| 2017-05-22T02:39:37
| 91,914,870
| 0
| 0
| null | 2017-05-20T20:33:40
| 2017-05-20T20:33:39
| null |
UTF-8
|
R
| false
| false
| 1,025
|
r
|
plot3.R
|
##Assignment 1
data <- read.csv('household_power_consumption.txt', sep = ";", header = TRUE, stringsAsFactors = FALSE)
##Convert Date string to POSIXlt
data$Date <- strptime(data$Date, "%e/%m/%Y")
##subset data for 2007-02-01 and 2007-02-02
data <- subset(data, data$Date == strptime("2007-02-01", "%Y-%m-%d") | data$Date == strptime("2007-02-02", "%Y-%m-%d"))
## create new field combining date and time
data$Date_Time <- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S")
##convert remaining fields to numeric
data[,3:8] <- apply(data[3:8], 2, as.numeric)
## print plot to png
png(filename = "plot3.png")
plot(as.POSIXct(data$Date_Time),data$Sub_metering_1, type = "l", col = "black",ylab = "Energy sub metering", xlab = "")
lines(as.POSIXct(data$Date_Time), data$Sub_metering_2, col = "red")
lines(as.POSIXct(data$Date_Time), data$Sub_metering_3, col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lwd = 1)
dev.off()
|
0abe6d2b5f221408f6d58343db42518414dbeb11
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/segclust2d/examples/segmentation-class.Rd.R
|
a1414a0fa40e5da8f044000195e502afa5c793d0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,327
|
r
|
segmentation-class.Rd.R
|
library(segclust2d)
### Name: segmentation-class
### Title: segmentation class description
### Aliases: segmentation-class print.segmentation plot.segmentation
### likelihood.segmentation plot_likelihood get_likelihood
### logLik.segmentation plot_BIC BIC.segmentation stateplot states
### segment augment.segmentation segmap
### ** Examples
## Not run:
##D plot(res.segclust)
##D plot(res.segclust, nseg = 10, ncluster = 3)
## End(Not run)
## Not run:
##D plot_likelihood(res.seg)
## End(Not run)
## Not run:
##D logLik(res.seg)
## End(Not run)
## Not run:
##D plot_BIC(res.segclust)
## End(Not run)
## Not run:
##D plot_BIC(res.segclust)
## End(Not run)
## Not run:
##D stateplot(res.segclust)
##D stateplot(res.seg)
## End(Not run)
## Not run:
##D states(res.segclust)
##D states(res.seg)
## End(Not run)
## Not run:
##D segment(res.segclust)
##D segment(res.segclust, ncluster = 3, nseg = 30)
##D segment(res.seg)
##D segment(res.seg, nseg = 4)
## End(Not run)
## Not run:
##D augment(res.segclust)
##D augment(res.segclust, ncluster = 3, nseg = 30)
##D augment(res.seg)
##D augment(res.seg, nseg = 4)
## End(Not run)
## Not run:
##D segmap(res.segclust, coord.names = c("x","y"))
##D segmap(res.segclust, ncluster = 3, nseg = 30)
##D segmap(res.seg)
##D segmap(res.seg, nseg = 4)
## End(Not run)
|
0120c254dba8cfea3080fcb9d3dcf00150517c5d
|
d5cef95958632a73ea141236aea6a487ed6a3ee0
|
/Lab08Marker.R
|
6d2a10b3ff88972741dedb3e6cd4ceb351ca8d43
|
[] |
no_license
|
joyhumin/Marking-tool-for-stats220
|
1275b054bf4afb2930f645a0bd70b5ab72c1e7c2
|
a9318ee15a66a166844a1e38bbb4a7cfcecf27e5
|
refs/heads/master
| 2021-04-23T19:15:45.639728
| 2020-06-20T09:16:47
| 2020-06-20T09:16:47
| 249,978,497
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,352
|
r
|
Lab08Marker.R
|
# read student file
student.df <- read.csv("lab08-marks-c-1.csv")
student.num <- nrow(student.df)
# mark.df <- data.frame(matrix(NA, nrow = student.num, ncol = ncol(student.df)))
# colnames(mark.df) <- colnames(student.df)
mark.df <- read.csv("/Users/Joy/Documents/UoA/lab08-marks-c-1.csv")
# student.num = 2
# redo = c(10,30)
# require("diffobj")
for (i in 1:student.num){
student <- student.df[i,]
student.name <- student[1]
student.id <- student[2]
open.r.cmd <- paste("open ","submissions/*",student.id,"*", sep = "")
system(open.r.cmd)
# open.r.cmd <- paste("find submissions/*", student.id, "*.R", sep = "")
# filename <- system(open.r.cmd, intern = TRUE)
# diffr("lab08-solution.R",filename)
# dif <- diffFile(filename,"lab08-solution.R")
# mark01 <- as.numeric(readline(prompt = "Question 01 generate file names(2 marks) -->"))
# mark02 <- as.numeric(readline(prompt = "Question 02 print the max values(4 marks) -->"))
# mark03 <- as.numeric(readline(prompt = "Question 03 calculate proportion(6 marks) -->"))
# total <- sum(mark01,mark02,mark03)
comment <- readline(prompt = "Any comments for this student? --> ")
# student[3:7] <- c(mark01,mark02,mark03,total,comment)
# mark.df[i,] <- student
}
# write.table(mark.df,file = "/Users/Joy/Documents/UoA/lab08-marks-c-1.csv", row.names = FALSE, sep = ",")
|
6a43774f5ad98c728a86788c678965a17e16503b
|
dc3f1a4ba2311b757ba72072f4c54af961ffcc86
|
/RF_check/get_models.R
|
cbce180724aba3743a6cf0adfd6a7f2ba5b2884f
|
[] |
no_license
|
sebastianduchene/virus_analyses
|
2d002ee383af98d92eca126d15603bf9343ae41f
|
b35b13f8e2634badb9a7100c4dedea9c045896fc
|
refs/heads/master
| 2020-04-04T12:29:35.336776
| 2015-08-04T08:56:17
| 2015-08-04T08:56:17
| 19,427,816
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 162
|
r
|
get_models.R
|
# write function to run model generator and get BIC model
# run loop over complete and pruned data sets
# read trees and get mean rates with and without prunning
|
6b496daf5adbd5687375d3f1504dcc9caa922307
|
e8ea229abea92cf55b29e1c6efe36924a9b0d2e8
|
/stock.r
|
958d8d3cab5f99e3f8e042ac968b2c22b1c7de84
|
[] |
no_license
|
Roboer/stockpredict
|
097f220eab928652ee92106426c6eda1e5e80bae
|
38ad88c99c4a863be46f0ab6619838d86b49a7fa
|
refs/heads/master
| 2021-01-23T11:04:10.191214
| 2015-01-28T07:52:45
| 2015-01-28T07:52:45
| 29,715,769
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,674
|
r
|
stock.r
|
library(xts)
company <- read.table("company_data.csv",sep='|')
updown <- read.table("God_File.csv",sep='')
t = with(company, as.POSIXct(V4) )
company$pTime = t
companydata <- company[,c(1,8,7)]
#c1.q1 <- companydata[(companydata$V1 == 'c1') & (companydata$pTime <= '2011-04-15'),]
#c1q5.xts <- as.xts(c1.q5$V7,order.by=c1.q5$pTime)
#plot(c1q5.xts)
stock <- c('c1','c2','c3','c4','c5','c6','c7','c8','c9','c10','c11','c12','c13','c14','c15','c16','c17','c18')
Q1.time <- c('2011-01-01','2011-04-15','2011-04-01','2011-07-15',
'2011-07-01','2011-10-15','2011-10-01','2012-1-15',
'2012-01-01','2012-4-15','2012-04-01','2012-07-15')
extract <- function(companyid,timeindex){
companydata[(companydata$V1 == companyid) & (companydata$pTime >= timeindex[1]) & (companydata$pTime <= timeindex[2]),]
}
#for (i in stock )
c3.q1 <- extract('c3',c('2011-01-01','2011-04-15'))
c3.q2 <- extract('c3',c('2011-04-01','2011-07-15'))
c3.q3 <- extract('c3',c('2011-04-01','2011-07-15'))
c3.q4 <- extract('c3',c('2011-10-01','2012-01-15'))
c3.q5 <- extract('c3',c('2012-01-01','2012-04-15'))
c3.q6 <- extract('c3',c('2012-04-01','2012-07-15'))
c3q1.xts <- as.xts(c3.q1$V7,order.by=c3.q1$pTime)
c3q2.xts <- as.xts(c3.q2$V7,order.by=c3.q2$pTime)
c3q3.xts <- as.xts(c3.q3$V7,order.by=c3.q3$pTime)
c3q4.xts <- as.xts(c3.q4$V7,order.by=c3.q4$pTime)
c3q5.xts <- as.xts(c3.q5$V7,order.by=c3.q5$pTime)
c3q6.xts <- as.xts(c3.q6$V7,order.by=c3.q6$pTime)
##########6个季度都数据转置后组成矩阵X############
x <- rbind(t(c3.q1$V7),t(c3.q2$V7[1:105]),t(c3.q3$V7[1:105]),t(c3.q4$V7[1:105]),t(c3.q5$V7[1:105]),t(c3.q6$V7[1:105]))
y <- c(0,-2,-2,-1,-1,1,0)
|
34c575ef332717c489a92887700fc913dfc9c950
|
170893806806dc601c61a5f95dd26490e164cc02
|
/tests/testthat/data_helpers.R
|
d06027de3da371f92a375092c6812260e7f285ab
|
[
"MIT"
] |
permissive
|
jtannen/svdcov
|
e9b28f5c1263f24f546373d96e42900203d78f41
|
24bae25ecb2cbfff7af69e5691635bc61b5106a1
|
refs/heads/master
| 2023-03-26T11:00:54.444289
| 2021-03-08T13:24:59
| 2021-03-08T13:24:59
| 345,179,936
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,154
|
r
|
data_helpers.R
|
generate_cov_data <- function(
row_means,
col_means,
cov_mat
){
x <- t(
MASS::mvrnorm(
n = length(col_means),
mu = row_means,
Sigma = cov_mat
) + col_means
)
row.names(x) <- seq_along(row_means)
colnames(x) <- seq_along(col_means)
x
}
get_mat_diag <- function(mat){
return(row(mat) == col(mat))
}
get_mat_block <- function(mat, blocksize){
row_mod <- (row(mat)-1) %/% blocksize
col_mod <- (col(mat)-1) %/% blocksize
is_diag <- get_mat_diag(mat)
is_block <- (row_mod == col_mod) & !is_diag
return(is_block)
}
generate_block_cov_mat <- function(nrow = 100, blocksize=25, offdiag=0.9){
block_cov <- diag(nrow)
is_block <- get_mat_block(block_cov, blocksize)
block_cov[is_block] <- offdiag
block_cov
}
gen_block_diag_data <- function(blocksize, offdiag){
block_diag_data <- list(
row_means = 1:100,
col_means = 1:200
)
block_diag_data$cov_mat <-
generate_block_cov_mat(length(block_diag_data$row_means), blocksize=blocksize, offdiag=offdiag)
block_diag_data$x <- with(
block_diag_data,
generate_cov_data(row_means, col_means, cov_mat)
)
block_diag_data
}
|
147fb838686a5101cc605cc74f212e6acccecce5
|
cbf2d312bbe49c0aaf4a95ee3765103eab6e260f
|
/man/sf_DKOS_Electoral_College_Map_v1.Rd
|
a6023b074db5819c39bdd7a135e095b225f5b2f7
|
[
"MIT"
] |
permissive
|
bhaskarvk/tilegramsR
|
4e8d0433e5bd84ca665085896afb38a07e0ff700
|
09b86496cca55880c0f2d8a844f2f0b1359b5dea
|
refs/heads/master
| 2021-10-12T06:26:12.338679
| 2021-09-30T14:42:12
| 2021-09-30T14:42:12
| 70,617,531
| 55
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 851
|
rd
|
sf_DKOS_Electoral_College_Map_v1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tilegrams.R
\docType{data}
\name{sf_DKOS_Electoral_College_Map_v1}
\alias{sf_DKOS_Electoral_College_Map_v1}
\alias{sf_DKOS_Electoral_College_Map_v1.states}
\alias{sf_DKOS_Electoral_College_Map_v1.centers}
\title{sf_DKOS_Electoral_College_Map_v1}
\format{sf}
\usage{
sf_DKOS_Electoral_College_Map_v1
sf_DKOS_Electoral_College_Map_v1.states
sf_DKOS_Electoral_College_Map_v1.centers
}
\description{
A `sf` object where each polygon equals one electoral college
A `sf` object where each polygon represents a state
A `sf` object of centroids of each state
}
\examples{
\dontrun{
library(leaflet)
library(tilegramsR)
data <- sf_DKOS_Electoral_College_Map_v1
leaflet(data,
options=leafletOptions(crs=leafletCRS("L.CRS.Simple"))) \%>\%
addPolygons()
}
}
\keyword{datasets}
|
9bf31737f4ea8435c038abd0d49720f26451b17d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MASS/examples/gamma.shape.glm.Rd.R
|
02687785d5b59b4f48252e758157a32349f6e1ad
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 676
|
r
|
gamma.shape.glm.Rd.R
|
library(MASS)
### Name: gamma.shape
### Title: Estimate the Shape Parameter of the Gamma Distribution in a GLM
### Fit
### Aliases: gamma.shape gamma.shape.glm print.gamma.shape
### Keywords: models
### ** Examples
clotting <- data.frame(
u = c(5,10,15,20,30,40,60,80,100),
lot1 = c(118,58,42,35,27,25,21,19,18),
lot2 = c(69,35,26,21,18,16,13,12,12))
clot1 <- glm(lot1 ~ log(u), data = clotting, family = Gamma)
gamma.shape(clot1)
gm <- glm(Days + 0.1 ~ Age*Eth*Sex*Lrn,
quasi(link=log, variance="mu^2"), quine,
start = c(3, rep(0,31)))
gamma.shape(gm, verbose = TRUE)
summary(gm, dispersion = gamma.dispersion(gm)) # better summary
|
4b453faad370721d1c728ff76aa1172191e9d349
|
f3631dc4bfba98dc99578185684b6737fc7a84c5
|
/osd2014_16S_asv/osd2014_fuzzyforests.R
|
78805a8f2418e0aa8a6a686b41835b05b6e81435
|
[] |
no_license
|
genomewalker/osd2014_analysis
|
a458f51dfee671a2cbc83e0d0d1f3e0cd33d53f5
|
5709ff15621a18696a34bdbf0d3332b2a73ce88e
|
refs/heads/master
| 2021-08-16T13:28:37.426404
| 2020-04-02T13:21:58
| 2020-04-02T13:21:58
| 145,523,004
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,290
|
r
|
osd2014_fuzzyforests.R
|
library(fuzzyforest)
library(tidyverse)
library(randomForest)
library(caret)
library(phyloseq)
library(igraph)
library(tidygraph)
library(ggraph)
source("osd2014_16S_asv/lib/fuzzyforest_lib.R")
# BEGIN: WARNING!!!! -------------------------------------------------------------
# You can access to the data used in this analysis in several ways:
# 1. You have a copy of the PostgreSQL DB
# 2. You downloaded the .Rdata files from http://osd2014.metagenomics.eu/ and placed them
# in the data folder
# 3. You can load the files remotely, it might take a while when the file is very large
# END: WARNING!!!! -------------------------------------------------------------
# BEGIN: WARNING!!: This will load all the data and results for the analysis --------
# Uncomment if you want to use it. Some of the analysis step might require long
# computational times and you might want to use a computer with many cores/CPUs
# load("osd2014_16S_asv/data/osd2014_fuzzyforest.Rdata", verbose = TRUE)
# load(url("http://osd2014.metagenomics.eu/osd2014_16S_asv/data/osd2014_fuzzyforest.Rdata"), verbose = TRUE)
# END: WARNING!! --------------------------------------------------------------------
# BEGIN: SKIP THIS IF YOU ALREADY LOADED ALL RESULTS AND DATA --------------------
# Load necessary data -----------------------------------------------------
# Use if you have the postgres DB in place
my_db <- src_postgres(host = "localhost", port = 5432, dbname = "osd_analysis", options = "-c search_path=osd_analysis")
osd2014_amp_mg_intersect <- tbl(my_db, "osd2014_amp_mg_intersect_2018") %>%
collect(n = Inf)
osd2014_cdata <- tbl(my_db, "osd2014_cdata") %>%
collect(n = Inf)
osd2014_meow_regions <- tbl(my_db, "osd2014_meow_regions") %>%
collect(n = Inf)
osd2014_cdata <- osd2014_cdata %>%
filter(label %in% osd2014_amp_mg_intersect$label, meow_province %in% osd2014_meow_regions$meow_province)
osd2014_asv_connectedness <- tbl(my_db, "osd2014_asv_connectedness") %>%
collect(n = Inf)
st_100_order_terrestrial <- tbl(my_db, "osd2014_st_order_coastal") %>%
collect(n = Inf)
# If downloaded file at osd2014_16S_asv/data/ use:
load("osd2014_16S_asv/data/osd2014_16S_asv_networks_results.Rdata", verbose = TRUE)
load("osd2014_16S_asv/data/osd2014_ff_fit.Rdata", verbose = TRUE)
# Basic contextual data
load("osd2014_16S_asv/data/osd2014_basic_cdata.Rdata", verbose = TRUE)
# If remote use
load(url("http://osd2014.metagenomics.eu/osd2014_16S_asv/data/osd2014_16S_asv_networks_results.Rdata"), verbose = TRUE)
load(url("http://osd2014.metagenomics.eu/osd2014_16S_asv/data/osd2014_ff_fit.Rdata"), verbose = TRUE)
# Basic contextual data
load(url("http://osd2014.metagenomics.eu/osd2014_16S_asv/data/osd2014_basic_cdata.Rdata"), verbose = TRUE)
# Load necessary data -----------------------------------------------------
# END: SKIP THIS IF YOU ALREADY LOADED ALL RESULTS AND DATA --------------------
#set seed so that results are reproducible
set.seed(1)
osd2014_dada2_phyloseq_alpha_prop <- transform_sample_counts(osd2014_dada2_phyloseq_alpha, function(x) x/sum(x))
osd2014_order_terrestrial <- osd2014_order_terrestrial %>%
filter(label %in% osd2014_cdata$label)
osd2014_dada2_phyloseq_alpha_filt <- subset_samples(osd2014_dada2_phyloseq_alpha_prop, label %in% osd2014_cdata$label)
osd2014_dada2_phyloseq_alpha_filt <- prune_taxa(taxa_sums(osd2014_dada2_phyloseq_alpha_filt) > 0, osd2014_dada2_phyloseq_alpha_filt)
osd2014_dada2_phyloseq_beta_filt <- prune_taxa(taxa_names(osd2014_dada2_phyloseq_beta), osd2014_dada2_phyloseq_alpha_filt)
#osd2014_dada2_phyloseq_beta_filt <- subset_samples(osd2014_dada2_phyloseq_beta_vst, label %in% osd2014_cdata$label)
#osd2014_dada2_phyloseq_beta_filt <- prune_taxa(taxa_sums(osd2014_dada2_phyloseq_beta_filt) > 0, osd2014_dada2_phyloseq_beta_filt)
osd2014_dada2_phyloseq_beta_df <- (as(otu_table(osd2014_dada2_phyloseq_beta_filt), "matrix")) %>% as_tibble(rownames = "label") %>%
inner_join(osd2014_cdata %>%
select(label, meow_province)) %>%
as.data.frame() %>%
column_to_rownames("label")
osd2014_dada2_phyloseq_beta_df$meow_province <- as.factor(osd2014_dada2_phyloseq_beta_df$meow_province)
# Run fuzzforest with 5 repetitions and 10 k-folds ------------------------
ff_training <- vector(mode = "list")
ff_fit_r <- vector(mode = "list")
ff_predict <- vector(mode = "list")
ff_accuracy <- vector(mode = "list")
for (i in 1:5){
training.samples <- osd2014_dada2_phyloseq_beta_df$meow_province %>%
createFolds(returnTrain = TRUE)
#createDataPartition(p = 0.8, list = FALSE)
ff_training[[i]] <- training.samples
for (o in 1:10){
cat(paste("Rep:", i, "Fold:", o, "\n"))
train.data <- osd2014_dada2_phyloseq_beta_df[training.samples[[o]], ]
test.data <- osd2014_dada2_phyloseq_beta_df[-training.samples[[o]], ]
module_membership <- df_nodes %>%
filter(asv %in% colnames(osd2014_dada2_phyloseq_beta_df)) %>%
dplyr::slice(match(colnames(train.data), asv)) %>% .$com
mtry_factor <- 1; min_ntree <- 500; drop_fraction <- .5; ntree_factor <- 1
nodesize <- 1; final_ntree <- 500
screen_params <- screen_control(drop_fraction = drop_fraction,
keep_fraction = .25, min_ntree = min_ntree,
ntree_factor = ntree_factor,
mtry_factor = mtry_factor)
select_params <- select_control(drop_fraction = drop_fraction,
number_selected = 500,
min_ntree = min_ntree,
ntree_factor = ntree_factor,
mtry_factor = mtry_factor)
train.data$meow_province <- as.factor(train.data$meow_province)
ff_fit <- ff(train.data[,1:ncol(train.data) - 1], train.data[,ncol(train.data)], module_membership = module_membership,
screen_params = screen_params, select_params=select_params,
final_ntree = 500, num_processors = 4)
ff_fit_r[[paste0("iter_",i,"_fold_",o)]] <- ff_fit
pred_asv <- predict(ff_fit$final_rf, newdata = test.data[,1:ncol(test.data) - 1])
ff_predict[[paste0("iter_",i,"_fold_",o)]] <- pred_asv
test.data$rightPred <- pred_asv == test.data$meow_province
accuracy <- sum(test.data$rightPred)/nrow(test.data)
ff_accuracy[[paste0("iter_",i,"_fold_",o)]] <- accuracy
}
}
# Check how accurate are the models ---------------------------------------
bind_rows(map(1:50, get_accuracy)) %>% ggplot(aes(y = value, x = "accuracy")) +
ggpol::geom_boxjitter(jitter.shape = 21, jitter.color = "black", jitter.alpha = 1,
color = "black", alpha = 1, errorbar.draw = TRUE, jitter.height = 0.05, jitter.width = 0.075, width = 0.4, errorbar.length = 0.2) +
geom_dotplot(binaxis = "y", dotsize = 0.2, stackdir = "down", binwidth = 0.1,
position = position_nudge(-0.025)) +
theme_bw() +
ylab("Accuracy") +
xlab("")+
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank())
# Get top features --------------------------------------------------------
# we use the broken stick model to select the most important features, then for each repetition and fold we select
# those that occur more than 40%
top_features <- map_df(1:50, get_ftable_bs) %>%
left_join(df_nodes %>% dplyr::rename(feature_name = asv)) %>%
dplyr::group_by(run) %>%
dplyr::top_n(n = 500, wt = variable_importance) %>%
ungroup() %>% select(name, com, feature_name, variable_importance) %>%
group_by(name, com, feature_name) %>% dplyr::summarise(n = n(), mean_imp = mean(variable_importance), median_imp = median(variable_importance)) %>%
mutate(prop = n/50) %>%
filter(prop > 0.4)
# Define some colors for phylum -------------------------------------------
# p_colors <- tibble(Phylum = c("Proteobacteria", "Bacteroidetes", "Cyanobacteria", "Actinobacteria",
# "Verrucomicrobia", "Planctomycetes", "Euryarchaeota", "Marinimicrobia_(SAR406_clade)",
# "Firmicutes", "Lentisphaerae", "Tenericutes", "Chlamydiae", "Epsilonbacteraeota",
# "Patescibacteria", "Other"),
# colour = c("#cf4149", "#8f62ca", "#66b14a", "#6b8bcd", "#c2ad4b", "#ce7531",
# "#4baf90", "#c85d9d", "#542437", "#c26f65", "#A5C990", "#767834",
# "#559279", "#D3BBC3", "#7f8c8d"))
#
#
# Get all components ------------------------------------------------------
all_comps <- bind_rows(
get_g("com_1") %>%
activate(nodes) %>%
as_tibble() %>%
inner_join(as(otu_table(osd2014_dada2_phyloseq_beta_filt), "matrix") %>%
as_tibble(rownames = "label") %>%
gather(asv, prop, -label)) %>%
mutate(label = fct_relevel(label, osd2014_order_terrestrial$label)),
get_g("com_2") %>%
activate(nodes) %>%
as_tibble() %>%
inner_join(as(otu_table(osd2014_dada2_phyloseq_beta_filt), "matrix") %>%
as_tibble(rownames = "label") %>%
gather(asv, prop, -label)) %>%
mutate(label = fct_relevel(label, osd2014_order_terrestrial$label)),
# get_g("com_3") %>%
# activate(nodes) %>%
# as_tibble() %>%
# inner_join(as(otu_table(osd2014_dada2_phyloseq_beta_filt), "matrix") %>%
# as_tibble(rownames = "label") %>%
# gather(asv, prop, -label)) %>%
# mutate(label = fct_relevel(label, osd2014_order_terrestrial$label)),
get_g("com_4") %>%
activate(nodes) %>%
as_tibble() %>%
inner_join(as(otu_table(osd2014_dada2_phyloseq_beta_filt), "matrix") %>%
as_tibble(rownames = "label") %>%
gather(asv, prop, -label)) %>%
mutate(label = fct_relevel(label, osd2014_order_terrestrial$label)),
# get_g("com_5") %>%
# activate(nodes) %>%
# as_tibble() %>%
# inner_join(as(otu_table(osd2014_dada2_phyloseq_beta_filt), "matrix") %>%
# as_tibble(rownames = "label") %>%
# gather(asv, prop, -label)) %>%
# mutate(label = fct_relevel(label, osd2014_order_terrestrial$label)),
get_g("com_7") %>%
activate(nodes) %>%
as_tibble() %>%
inner_join(as(otu_table(osd2014_dada2_phyloseq_beta_filt), "matrix") %>%
as_tibble(rownames = "label") %>%
gather(asv, prop, -label)) %>%
mutate(label = fct_relevel(label, osd2014_order_terrestrial$label)),
get_g("com_8") %>%
activate(nodes) %>%
as_tibble() %>%
inner_join(as(otu_table(osd2014_dada2_phyloseq_beta_filt), "matrix") %>%
as_tibble(rownames = "label") %>%
gather(asv, prop, -label)) %>%
mutate(label = fct_relevel(label, osd2014_order_terrestrial$label)),
get_g("com_9") %>%
activate(nodes) %>%
as_tibble() %>%
inner_join(as(otu_table(osd2014_dada2_phyloseq_beta_filt), "matrix") %>%
as_tibble(rownames = "label") %>%
gather(asv, prop, -label)) %>%
mutate(label = fct_relevel(label, osd2014_order_terrestrial$label))
) %>%
group_by(Order) %>%
mutate(agg_prop = sum(prop)) %>% ungroup() %>% mutate(order_mod = ifelse(agg_prop > 0.01, Order, "Other"))
# all_comps_order <- all_comps %>% select(order_mod) %>% unique() %>%
# mutate(colour = o_colors)
order_mod <- c("SAR11_clade", "Synechococcales", "Flavobacteriales", "Rhodobacterales", "Verrucomicrobiales", "Betaproteobacteriales",
"Oceanospirillales", "Cellvibrionales", "Micrococcales", "Rhodospirillales", "KI89A_clade", "Cytophagales ",
"Arenicellales", "Chitinophagales", "Tenderiales", "Other")
o_colors <- c("#a6cee3", "#1f78b4", "#b2df8a", "#33a02c",
"#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00",
"#cab2d6","#6a3d9a","#D0D0D0", "#2E5158", "#6A6D51",
"#F0D999", "#43233A","#666666")
all_comps_order <- tibble(order_mod = order_mod, colour = o_colors)
# Get the graph with all components and save file for gephi ---------------
bind_graphs(
get_g("com_1") %>% activate(nodes) %>% mutate(order_mod = ifelse(Order %in% all_comps_order$order_mod, Order, "Other")) %>%
inner_join(all_comps_order) %>% inner_join(top_features %>% select(name, mean_imp, median_imp)),
get_g("com_2") %>% activate(nodes) %>% mutate(order_mod = ifelse(Order %in% all_comps_order$order_mod, Order, "Other")) %>%
inner_join(all_comps_order) %>% inner_join(top_features %>% select(name, mean_imp, median_imp)),
#get_g("com_3"),
get_g("com_4") %>% activate(nodes) %>% mutate(order_mod = ifelse(Order %in% all_comps_order$order_mod, Order, "Other")) %>%
inner_join(all_comps_order) %>% inner_join(top_features %>% select(name, mean_imp, median_imp)),
#get_g("com_5"),
#get_g("com_6")
get_g("com_7") %>% activate(nodes) %>% mutate(order_mod = ifelse(Order %in% all_comps_order$order_mod, Order, "Other")) %>%
inner_join(all_comps_order) %>% inner_join(top_features %>% select(name, mean_imp, median_imp)),
#get_g("com_8") %>% activate(nodes) %>% mutate(order_mod = ifelse(Order %in% all_comps_order$order_mod, Order, "Other")) %>% inner_join(all_comps_order),
get_g("com_9") %>% activate(nodes) %>% mutate(order_mod = ifelse(Order %in% all_comps_order$order_mod, Order, "Other")) %>%
inner_join(all_comps_order) %>% inner_join(top_features %>% select(name, mean_imp, median_imp))
) %>% write.graph(file = "osd2014_16S_asv/data/osd2014_fuzzyforest.graphml", format = "graphml")
# Plot the abundance of each ASV in each LC and MEOW province -------------
meow_provinces <- c("Tropical Northwestern Atlantic", "Warm Temperate Northwest Atlantic", "Cold Temperate Northwest Atlantic",
"Lusitanian", "Mediterranean Sea", "Northern European Seas")
plots <- map(meow_provinces, plot_com)
ggpubr::ggarrange(plotlist = plots, common.legend = TRUE, ncol = 1, nrow = length(plots))
ggsave(plot = last_plot(), filename = "osd2014_16S_asv/figures/osd2014_fuzzyforests_bplots.pdf", width = 11.69, height = 8.27)
# Test with only the selected ASVs ----------------------------------------
top_features$feature_name
osd2014_dada2_phyloseq_alpha_prop <- transform_sample_counts(osd2014_dada2_phyloseq_alpha, function(x) x/sum(x))
osd2014_order_terrestrial <- osd2014_order_terrestrial %>%
filter(label %in% osd2014_cdata$label)
osd2014_dada2_phyloseq_alpha_filt <- subset_samples(osd2014_dada2_phyloseq_alpha_prop, label %in% osd2014_cdata$label)
osd2014_dada2_phyloseq_beta_filt <- prune_taxa(top_features$feature_name, osd2014_dada2_phyloseq_alpha_filt)
osd2014_dada2_phyloseq_beta_filt <- prune_taxa(taxa_sums(osd2014_dada2_phyloseq_beta_filt) > 0, osd2014_dada2_phyloseq_beta_filt)
#osd2014_dada2_phyloseq_beta_filt <- subset_samples(osd2014_dada2_phyloseq_beta_vst, label %in% osd2014_cdata$label)
osd2014_dada2_phyloseq_beta_df <- (as(otu_table(osd2014_dada2_phyloseq_beta_filt), "matrix")) %>% as_tibble(rownames = "label") %>%
inner_join(osd2014_cdata %>%
select(label, meow_province)) %>%
as.data.frame() %>%
column_to_rownames("label")
osd2014_dada2_phyloseq_beta_df$meow_province <- as.factor(osd2014_dada2_phyloseq_beta_df$meow_province)
ff_training_test <- vector(mode = "list")
ff_fit_r_test <- vector(mode = "list")
ff_predict_test <- vector(mode = "list")
ff_accuracy_test <- vector(mode = "list")
for (i in 1:5){
training.samples <- osd2014_dada2_phyloseq_beta_df$meow_province %>%
createFolds(returnTrain = TRUE)
#createDataPartition(p = 0.8, list = FALSE)
ff_training_test[[i]] <- training.samples
for (o in 1:10){
train.data <- osd2014_dada2_phyloseq_beta_df[training.samples[[o]], ]
test.data <- osd2014_dada2_phyloseq_beta_df[-training.samples[[o]], ]
module_membership <- df_nodes %>%
filter(asv %in% colnames(osd2014_dada2_phyloseq_beta_df)) %>%
dplyr::slice(match(colnames(train.data), asv)) %>% .$com
mtry_factor <- 1; min_ntree <- 500; drop_fraction <- .5; ntree_factor <- 1
nodesize <- 1; final_ntree <- 500
screen_params <- screen_control(drop_fraction = drop_fraction,
keep_fraction = .25, min_ntree = min_ntree,
ntree_factor = ntree_factor,
mtry_factor = mtry_factor)
select_params <- select_control(drop_fraction = drop_fraction,
number_selected = 500,
min_ntree = min_ntree,
ntree_factor = ntree_factor,
mtry_factor = mtry_factor)
train.data$meow_province <- as.factor(train.data$meow_province)
ff_fit <- ff(train.data[,1:ncol(train.data) - 1], train.data[,ncol(train.data)], module_membership = module_membership,
screen_params = screen_params, select_params=select_params,
final_ntree = 500, num_processors = 4)
ff_fit_r_test[[paste0("iter_",i,"_fold_",o)]] <- ff_fit
#
# asv_imp <- randomForest::importance(ff_fit$final_rf, type = 1, scale = F)
# asv_imp <- data.frame(predictors = rownames(asv_imp), asv_imp) %>% as_tibble()
#
# asv_imp.sort <- arrange(asv_imp, desc(MeanDecreaseAccuracy))
# asv_imp.sort$predictors <- factor(asv_imp.sort$predictors, levels = asv_imp.sort$predictors)
#
# # Select the top 10 predictors
# asv_imp.sort <- asv_imp.sort[1:100, ]
#
# asv_imp.sort_s <- asv_imp.sort %>%
# as_tibble() %>%
# left_join(df_nodes %>% dplyr::rename(predictors = asv)) %>%
# mutate(name = fct_reorder(name, -MeanDecreaseAccuracy))
# # ggplot
# ggplot(asv_imp.sort_s, aes(x = name, y = MeanDecreaseAccuracy)) +
# geom_bar(stat = "identity", fill = "indianred") +
# coord_flip() +
# ggtitle("Most important OTUs for classifying into low or mid/hight impacted OSD samples") +
# theme_light() +
# ylab("Total decrease in node impurities (Mean decrease Gini)")
pred_asv <- predict(ff_fit$final_rf, newdata = test.data[,1:ncol(test.data) - 1])
ff_predict_test[[paste0("iter_",i,"_fold_",o)]] <- pred_asv
test.data$rightPred <- pred_asv == test.data$meow_province
accuracy <- sum(test.data$rightPred)/nrow(test.data)
ff_accuracy_test[[paste0("iter_",i,"_fold_",o)]] <- accuracy
}
}
get_accuracy_test <- function(X){
ff_accuracy_test[[X]] %>% as_tibble() %>% mutate(run = names(ff_accuracy_test[X]))
}
bind_rows(map(1:50, get_accuracy)) %>% ggplot(aes(y = value, x = "accuracy")) +
ggpol::geom_boxjitter(jitter.shape = 21, jitter.color = "black", jitter.alpha = 1,
color = "black", alpha = 1, errorbar.draw = TRUE, jitter.height = 0.05, jitter.width = 0.075, width = 0.4, errorbar.length = 0.2) +
theme_bw() +
ylab("Accuracy") +
xlab("")+
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank())
# BEGIN: Save objects ------------------------------------------------------------
# WARNING!!! You might not want to run this code --------------------------
save.image("osd2014_16S_asv/data/osd2014_fuzzyforest.Rda")
# END: Save objects ------------------------------------------------------------
|
afd83e230a18e24a788a9639f7bb2cb61ef45323
|
179211fef98e5123a3c27f11ae5385420be9adcf
|
/bootstrap/ICES_ecoregions.R
|
99472539ac768b21b24052c522569f67bc6930c9
|
[
"MIT"
] |
permissive
|
ices-taf/2020_ETC_fish-distribution-indicator
|
c5ef25824be96b4a30c7d0caa31d8a7b94c33d7a
|
017110e99467ef5bad9a3e903339590fe773b47b
|
refs/heads/main
| 2023-01-22T10:03:35.730327
| 2020-12-06T09:51:17
| 2020-12-06T09:51:17
| 309,326,384
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 220
|
r
|
ICES_ecoregions.R
|
library(icesTAF)
taf.library(icesFO)
ecoregion <- icesFO::load_ecoregion("Greater North Sea")
sf::st_write(ecoregion, "ecoregion.csv",
layer_options = "GEOMETRY=AS_WKT",
delete_layer = TRUE)
|
8b3f611465f0148a18d45f0674418a56d4c00653
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/netCoin/examples/timeCoin.Rd.R
|
72fb1f0b85920137ab8e3569c1ebb165de1eb6d5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 313
|
r
|
timeCoin.Rd.R
|
library(netCoin)
### Name: timeCoin
### Title: Networked coincidences.
### Aliases: timeCoin
### ** Examples
# Database of 19th century sociologists
data(sociologists)
timeCoin(sociologists,"name","birth","death","birthcountry",
dir = "./timeline", show = FALSE) # See ./timeline/index.html file
|
7e1c3941364ea5401ed2d9980614e9b9b2a5e2e2
|
bffd306bf0053643aa812dac62303ad9be4e56f0
|
/tests/testthat/test-multi-umap.R
|
88490f1f396a18e697b4595e421150e8e3f11902
|
[] |
no_license
|
LTLA/mumosa
|
3734bdd80abee135894fb1070588d35ed2853c6e
|
6d61b526172271f4bc6cca573863247e0fbff903
|
refs/heads/master
| 2023-03-17T03:06:00.062638
| 2023-03-14T04:55:38
| 2023-03-14T04:55:38
| 310,972,661
| 0
| 0
| null | 2020-12-09T05:49:19
| 2020-11-08T02:47:32
|
R
|
UTF-8
|
R
| false
| false
| 1,226
|
r
|
test-multi-umap.R
|
# This tests the multi-UMAP code.
# library(testthat); library(mumosa); source("test-multi-umap.R")
stuff <- matrix(rnorm(10000), ncol=50)
things <- list(stuff, stuff[,1:5], stuff[,1:20])
test_that("metrics assembly works as expected", {
metrics <- mumosa:::.compute_multi_modal_metrics(things)
expect_equal(names(metrics), rep("euclidean", length(things)))
expect_identical(unname(lengths(metrics)), vapply(things, ncol, 0L))
expect_identical(unname(unlist(metrics)), seq_len(sum(vapply(things, ncol, 0L))))
})
test_that("multi-modal UMAP works as expected", {
output <- calculateMultiUMAP(things)
expect_identical(nrow(output), nrow(stuff))
expect_identical(ncol(output), 2L)
set.seed(9999)
output <- calculateMultiUMAP(things, n_components=10)
expect_identical(nrow(output), nrow(stuff))
expect_identical(ncol(output), 10L)
# Same result for SCEs.
sce <- SingleCellExperiment(list(X=t(stuff)), reducedDims=list(Y=stuff[,1:5]), altExps=list(Z=SummarizedExperiment(t(stuff[,1:20]))))
set.seed(9999)
output2 <- runMultiUMAP(sce, assays=1, dimreds=1, altexps=1, altexp.assay=1, n_components=10)
expect_identical(output, reducedDim(output2, "MultiUMAP"))
})
|
83fae3826cfc3ccc8d14839b1168d6fff33d0cef
|
5858bac2996c3aed34487f76655d4e1cafe72287
|
/R/leaf_spot.R
|
b7692b4b0361a632292973f5bb80ca3acdb7ae3c
|
[] |
no_license
|
walmes/RDASC
|
33f5070216bd4c92c29ad592ba9c26db00d845c7
|
a4d98d803996b54eb51857eb11abd48e98fee0d9
|
refs/heads/master
| 2021-01-21T19:05:40.701103
| 2021-01-08T14:24:13
| 2021-01-08T14:24:13
| 92,110,481
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,760
|
r
|
leaf_spot.R
|
#' @name leaf_spot
#' @title Progresso da Mancha Foliar de \emph{Glomerella} em Macieira
#' no Estado do Paran\enc{á}{a}
#' @description Avaliação da severidade (\% de área com lesão no limbo
#' foliar) da mancha foliar de \emph{Glomerella} em duas áreas de
#' pomar comercial. Para cada área foram escolhidas 30 plantas ao
#' acaso e em cada planta foi marcado, no terço médio, um segmento
#' de ramo contendo 10 folhas. A avaliação da doença foi semanal e
#' realizada com auxílio de escala diagramática. A severidade foi
#' medida nas 10 folhas de cada ramo, totalizando 300 folhas por
#' área experimental.
#' @format Um \code{data.frame} com 6600 linhas e 5 colunas, em que
#'
#' \describe{
#'
#' \item{\code{pomar}}{Variável numérica que indica o pomar.}
#'
#' \item{\code{dia}}{Dia de avaliação. Foi do dia 0 ao dia 82, com
#' intervalos próximos de 7 dias, mudando de acordo com a ocorrência
#' de finais de semana e feriados. Os pomares foram avaliados nas
#' mesmas datas pois eram na mesma propriedade afastados por 300
#' metros.}
#'
#' \item{\code{ramo}}{Variável que indentifica os 30 ramos contendo 10
#' folhas cada um, marcados aleatóriamente em 30 árvores diferentes
#' (um ramo por árvore) no mesmo pomar. A indentificação dos ramos é
#' unica por pomar.}
#'
#' \item{\code{folha}}{Variável que indentifica as 10 folhas marcadas em
#' cada ramo, totalizando 300 folhas avaliadas no total de 30 ramos
#' por pomar. A indentificação das folhas é única por pomar. A
#' primeira folha em cada ramo é a folha mais próxima do caule.}
#'
#' \item{\code{sever}}{Severidade medida ao longo do tempo em cada uma
#' das folhas, em porcentagem de área com lesão de acordo com a
#' escala diagramática adotada. Depois que a folha cai do ramo, não
#' há como determinar a severidade, então os valores perdidos para
#' as datas porteriores a queda da folha são representados com
#' \code{NA}.}
#'
#' }
#'
#' @source Moreira,
#' R. R. (\url{http://lattes.cnpq.br/8144030677308566}), May De Mio,
#' L. L. (\url{http://lattes.cnpq.br/5306520242222948}).
#' Universidade Federal do Paraná, Setor de Ciências Agrárias,
#' Laboratório de Epidemiologia para Manejo Integrado de Doenças em
#' Plantas (LEMID).
#'
#' @examples
#'
#' data(leaf_spot)
#' str(leaf_spot)
#'
#' ftable(xtabs(~pomar + dia, data = leaf_spot))
#' ftable(xtabs(~pomar + ramo + dia, data = leaf_spot))
#'
#' library(lattice)
#'
#' # Gráfico de perfil das folhas para 5 ramos em cada pomar.
#' xyplot(sever ~ dia | ramo + pomar,
#' groups = folha,
#' data = subset(leaf_spot, ramo <= 5),
#' type = "o")
#'
NULL
|
b4ee985436b6384d26c8f62eaac8a40a88213813
|
613abf9af1b78007993b3138c9e678aacf4aa09c
|
/script_backup/16S/Tax4Fun/SILVA123/InfoFiles/KEGGBacArchTaxInformation.Rd
|
b59399f27825ccdec47c82a2ead44e2ac59c659d
|
[] |
no_license
|
kent5438/sharing-github
|
f02124f94d71c910bca2e5d4fa86376c1817cab5
|
2195a2cf033d6ac59099b22cea21051357b0876c
|
refs/heads/master
| 2021-06-25T22:13:33.257507
| 2020-12-31T08:20:14
| 2020-12-31T08:20:14
| 168,316,249
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 224,366
|
rd
|
KEGGBacArchTaxInformation.Rd
|
\name{KEGGBacArchTaxInformation}
\alias{KEGGBacArchTaxInformation}
\docType{data}
\title{
%% ~~ data name/kind ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{data(KEGGBacArchTaxInformation)}
\format{
A data frame with 1943 observations on the following 20 variables.
\describe{
\item{\code{Definition}}{a factor with levels \code{Acaryochloris marina MBIC11017} \code{Acetobacterium woodii DSM 1030} \code{Acetobacter pasteurianus IFO 3283-01-42C} \code{Acetohalobium arabaticum DSM 5501} \code{Acholeplasma laidlawii PG-8A} \code{Achromobacter xylosoxidans A8} \code{Acidaminococcus fermentans DSM 20731} \code{Acidaminococcus intestini RyC-MR95} \code{Acidianus hospitalis W1} \code{Acidilobus saccharovorans 345-15} \code{Acidimicrobium ferrooxidans DSM 10331} \code{Acidiphilium cryptum JF-5} \code{Acidiphilium multivorum AIU301} \code{Acidithiobacillus caldus SM-1} \code{Acidithiobacillus ferrivorans SS3} \code{Acidithiobacillus ferrooxidans ATCC 23270} \code{Acidithiobacillus ferrooxidans ATCC 53993} \code{Acidobacterium capsulatum ATCC 51196} \code{Acidothermus cellulolyticus 11B} \code{Acidovorax avenae subsp. avenae ATCC 19860} \code{Acidovorax avenae subsp. citrulli AAC00-1} \code{Acidovorax ebreus TPSY} \code{Acidovorax sp. JS42} \code{Aciduliprofundum boonei T469} \code{Acinetobacter baumannii 1656-2} \code{Acinetobacter baumannii AB0057} \code{Acinetobacter baumannii AB307-0294} \code{Acinetobacter baumannii ACICU} \code{Acinetobacter baumannii ATCC 17978} \code{Acinetobacter baumannii MDR-TJ} \code{Acinetobacter baumannii MDR-ZJ06} \code{Acinetobacter baumannii TCDC-AB0715} \code{Acinetobacter calcoaceticus PHEA-2} \code{Acinetobacter oleivorans DR1} \code{Actinobacillus pleuropneumoniae AP76 (serotype 7)} \code{Actinobacillus pleuropneumoniae JL03 (serotype 3)} \code{Actinobacillus pleuropneumoniae L20 (serotype 5b)} \code{Actinobacillus succinogenes 130Z} \code{Actinoplanes missouriensis 431} \code{Actinoplanes sp. SE50/110} \code{Actinosynnema mirum DSM 43827} \code{Advenella kashmirensis WT001} \code{Aequorivita sublithincola DSM 14238} \code{Aerococcus urinae ACS-120-V-Col10a} \code{Aeromonas hydrophila subsp. hydrophila ATCC 7966} \code{Aeromonas salmonicida subsp. salmonicida A449} \code{Aeromonas veronii B565} \code{Aeropyrum pernix K1} \code{Aggregatibacter actinomycetemcomitans ANH9381} \code{Aggregatibacter actinomycetemcomitans D11S-1} \code{Aggregatibacter actinomycetemcomitans D7S-1} \code{Aggregatibacter aphrophilus NJ8700} \code{Agrobacterium sp. H13-3} \code{Agrobacterium tumefaciens C58} \code{Agrobacterium vitis S4} \code{Akkermansia muciniphila ATCC BAA-835} \code{Alcanivorax borkumensis SK2} \code{Alicycliphilus denitrificans BC} \code{Alicycliphilus denitrificans K601} \code{Alicyclobacillus acidocaldarius subsp. acidocaldarius DSM 446} \code{Alicyclobacillus acidocaldarius subsp. acidocaldarius Tc-4-1} \code{Aliivibrio salmonicida LFI1238} \code{Alistipes finegoldii DSM 17242} \code{Alkalilimnicola ehrlichei MLHE-1} \code{Alkaliphilus metalliredigens QYMF} \code{Alkaliphilus oremlandii OhILAs} \code{Allochromatium vinosum DSM 180} \code{Alteromonas macleodii Deep ecotype} \code{Alteromonas sp. SN2} \code{Aminobacterium colombiense DSM 12261} \code{Ammonifex degensii KC4} \code{Amycolatopsis mediterranei S699} \code{Amycolatopsis mediterranei U32} \code{Amycolicicoccus subflavus DQS3-9A1} \code{Anabaena azollae 0708 ('Nostoc azollae' 0708), cyanobacterial} \code{Anabaena sp. PCC 7120 (Nostoc sp. PCC 7120)} \code{Anabaena variabilis ATCC 29413} \code{Anaerobaculum mobile DSM 13181} \code{Anaerococcus prevotii DSM 20548} \code{Anaerolinea thermophila UNI-1} \code{Anaeromyxobacter dehalogenans 2CP-1} \code{Anaeromyxobacter dehalogenans 2CP-C} \code{Anaeromyxobacter sp. Fw109-5} \code{Anaeromyxobacter sp. K} \code{Anaplasma centrale Israel} \code{Anaplasma marginale Florida} \code{Anaplasma marginale St. Maries} \code{Anaplasma phagocytophilum HZ} \code{Anoxybacillus flavithermus WK1} \code{Aquifex aeolicus VF5} \code{Arcanobacterium haemolyticum DSM 20595} \code{Archaeoglobus fulgidus DSM 4304 (VC-16)} \code{Archaeoglobus profundus DSM 5631} \code{Archaeoglobus veneficus SNP6} \code{Arcobacter butzleri ED-1} \code{Arcobacter butzleri RM4018} \code{Arcobacter nitrofigilis DSM 7299} \code{Arcobacter sp. L} \code{Aromatoleum aromaticum EbN1} \code{Arthrobacter arilaitensis Re117} \code{Arthrobacter aurescens TC1} \code{Arthrobacter chlorophenolicus A6} \code{Arthrobacter phenanthrenivorans Sphe3} \code{Arthrobacter sp. FB24} \code{Arthrobacter sp. Rue61a} \code{Aster yellows witches'-broom phytoplasma AYWB} \code{Asticcacaulis excentricus CB 48} \code{Atopobium parvulum DSM 20469} \code{Azoarcus sp. BH72} \code{Azorhizobium caulinodans ORS 571} \code{Azospirillum lipoferum 4B} \code{Azospirillum sp. B510} \code{Azotobacter vinelandii DJ} \code{Bacillus amyloliquefaciens CAU-B946} \code{Bacillus amyloliquefaciens DSM 7} \code{Bacillus amyloliquefaciens FZB42} \code{Bacillus amyloliquefaciens LL3} \code{Bacillus amyloliquefaciens subsp. plantarum YAU B9601-Y2} \code{Bacillus amyloliquefaciens TA208} \code{Bacillus amyloliquefaciens XH7} \code{Bacillus amyloliquefaciens Y2} \code{Bacillus anthracis A0248} \code{Bacillus anthracis Ames} \code{Bacillus anthracis Ames 0581 (Ames Ancestor)} \code{Bacillus anthracis CDC 684} \code{Bacillus anthracis H9401} \code{Bacillus anthracis Sterne} \code{Bacillus atrophaeus 1942} \code{Bacillus cellulosilyticus DSM 2522} \code{Bacillus cereus 03BB102} \code{Bacillus cereus AH187} \code{Bacillus cereus AH820} \code{Bacillus cereus ATCC 10987} \code{Bacillus cereus ATCC 14579} \code{Bacillus cereus B4264} \code{Bacillus cereus biovar anthracis CI} \code{Bacillus cereus E33L (zebra killer)} \code{Bacillus cereus G9842} \code{Bacillus cereus NC7401} \code{Bacillus cereus Q1} \code{Bacillus coagulans 2-6} \code{Bacillus coagulans 36D1} \code{Bacillus cytotoxicus NVH 391-98} \code{Bacillus halodurans C-125} \code{Bacillus licheniformis ATCC 14580} \code{Bacillus licheniformis DSM 13 = ATCC 14580} \code{Bacillus megaterium DSM 319} \code{Bacillus megaterium QM B1551} \code{Bacillus megaterium WSH-002} \code{Bacillus pseudofirmus OF4} \code{Bacillus pumilus SAFR-032} \code{Bacillus selenitireducens MLS10} \code{Bacillus sp. JS} \code{Bacillus subtilis BSn5} \code{Bacillus subtilis subsp. spizizenii TU-B-10} \code{Bacillus subtilis subsp. spizizenii W23} \code{Bacillus subtilis subsp. subtilis RO-NN-1} \code{Bacillus thuringiensis 97-27 (serovar konkukian)} \code{Bacillus thuringiensis Al Hakam} \code{Bacillus thuringiensis BMB171} \code{Bacillus thuringiensis serovar chinensis CT-43} \code{Bacillus thuringiensis serovar finitimus YBT-020} \code{Bacillus weihenstephanensis KBAB4} \code{Bacteriovorax marinus SJ} \code{Bacteroides fragilis 638R} \code{Bacteroides fragilis NCTC 9343} \code{Bacteroides fragilis YCH46} \code{Bacteroides helcogenes P 36-108} \code{Bacteroides salanitronis DSM 18170} \code{Bacteroides thetaiotaomicron VPI-5482} \code{Bacteroides vulgatus ATCC 8482} \code{Bartonella bacilliformis KC583} \code{Bartonella grahamii as4aup} \code{Bartonella henselae Houston-1} \code{Bartonella quintana Toulouse} \code{Bartonella tribocorum CIP 105476} \code{Baumannia cicadellinicola Hc, symbioint of Homalodisca coagulata} \code{Bdellovibrio bacteriovorus HD100} \code{Beijerinckia indica subsp. indica ATCC 9039} \code{Belliella baltica DSM 15883} \code{Beutenbergia cavernae DSM 12333} \code{Bifidobacterium adolescentis ATCC 15703} \code{Bifidobacterium animalis subsp. animalis ATCC 25527} \code{Bifidobacterium animalis subsp. lactis AD011} \code{Bifidobacterium animalis subsp. lactis BB-12} \code{Bifidobacterium animalis subsp. lactis Bl-04} \code{Bifidobacterium animalis subsp. lactis BLC1} \code{Bifidobacterium animalis subsp. lactis CNCM I-2494} \code{Bifidobacterium animalis subsp. lactis DSM 10140} \code{Bifidobacterium animalis subsp. lactis V9} \code{Bifidobacterium bifidum BGN4} \code{Bifidobacterium bifidum PRL2010} \code{Bifidobacterium bifidum S17} \code{Bifidobacterium breve ACS-071-V-Sch8b} \code{Bifidobacterium dentium Bd1} \code{Bifidobacterium longum DJO10A} \code{Bifidobacterium longum NCC2705} \code{Bifidobacterium longum subsp. infantis 157F} \code{Bifidobacterium longum subsp. infantis ATCC 15697 (JGI)} \code{Bifidobacterium longum subsp. infantis ATCC 15697 (Tokyo)} \code{Bifidobacterium longum subsp. longum BBMN68} \code{Bifidobacterium longum subsp. longum JCM 1217} \code{Bifidobacterium longum subsp. longum JDM301} \code{Bifidobacterium longum subsp. longum KACC 91563} \code{Blattabacterium sp. (Blaberus giganteus)} \code{Blattabacterium sp. (Blattella germanica) Bge} \code{Blattabacterium sp. (Cryptocercus punctulatus) Cpu} \code{Blattabacterium sp. (Mastotermes darwiniensis) MADAR} \code{Blattabacterium sp. (Periplaneta americana) BPLAN} \code{Blochmannia floridanus, endosymbiont of Camponotus floridanus} \code{Bordetella avium 197N} \code{Bordetella bronchiseptica RB50} \code{Bordetella parapertussis 12822} \code{Bordetella pertussis CS} \code{Bordetella pertussis Tohama I} \code{Bordetella petrii DSM 12804} \code{Borrelia afzelii PKo (FLI)} \code{Borrelia afzelii PKo (Maryland)} \code{Borrelia bissettii DN127} \code{Borrelia burgdorferi B31} \code{Borrelia burgdorferi ZS7} \code{Borrelia crocidurae Achema} \code{Borrelia duttonii Ly} \code{Borrelia garinii BgVir} \code{Borrelia garinii PBi} \code{Borrelia hermsii DAH} \code{Borrelia recurrentis A1} \code{Borrelia turicatae 91E135} \code{Brachybacterium faecium DSM 4810} \code{Brachyspira hyodysenteriae WA1} \code{Brachyspira intermedia PWS/A} \code{Brachyspira murdochii DSM 12563} \code{Brachyspira pilosicoli 95/1000} \code{Brachyspira pilosicoli B2904} \code{Bradyrhizobium japonicum USDA110} \code{Bradyrhizobium japonicum USDA 6} \code{Bradyrhizobium sp. BTAi1} \code{Bradyrhizobium sp. S23321} \code{Brevibacillus brevis NBRC 100599} \code{Brevundimonas subvibrioides ATCC 15264} \code{Brucella abortus 9-941 (biovar 1)} \code{Brucella abortus A13334} \code{Brucella abortus S19} \code{Brucella canis ATCC 23365} \code{Brucella canis HSK A52141} \code{Brucella melitensis ATCC 23457} \code{Brucella melitensis biovar Abortus 2308} \code{Brucella melitensis bv. 1 16M} \code{Brucella melitensis M28} \code{Brucella melitensis M5-90} \code{Brucella melitensis NI} \code{Brucella microti CCM 4915} \code{Brucella ovis ATCC 25840} \code{Brucella pinnipedialis B2/94} \code{Brucella suis 1330} \code{Brucella suis ATCC 23445} \code{Brucella suis VBI22} \code{Buchnera aphidicola 5A, endosymbiont of Acyrthosiphon pisum} \code{Buchnera aphidicola Ak, endosymbiont of Acyrthosiphon kondoi (blue} \code{Buchnera aphidicola APS, endosymbiont of Acyrthosiphon pisum (pea} \code{Buchnera aphidicola Bp, endosymbiont of Baizongia pistaciae} \code{Buchnera aphidicola Cc, endosymbiont of Cinara cedri} \code{Buchnera aphidicola (Cinara tujafilina)} \code{Buchnera aphidicola JF98, endosymbiont of Acyrthosiphon pisum} \code{Buchnera aphidicola LL01, endosymbiont of Acyrthosiphon pisum} \code{Buchnera aphidicola Sg, endosymbiont of Schizaphis graminum} \code{Buchnera aphidicola TLW03, endosymbiont of Acyrthosiphon pisum} \code{Buchnera aphidicola Tuc7, endosymbiont of Acyrthosiphon pisum} \code{Buchnera aphidicola Ua, endosymbiont of Uroleucon ambrosiae} \code{Burkholderia ambifaria AMMD} \code{Burkholderia ambifaria MC40-6} \code{Burkholderia cenocepacia AU 1054} \code{Burkholderia cenocepacia HI2424} \code{Burkholderia cenocepacia J2315} \code{Burkholderia cenocepacia MC0-3} \code{Burkholderia cepacia GG4} \code{Burkholderia gladioli BSR3} \code{Burkholderia glumae BGR1} \code{Burkholderia mallei ATCC 23344} \code{Burkholderia mallei NCTC 10229} \code{Burkholderia mallei NCTC 10247} \code{Burkholderia mallei SAVP1} \code{Burkholderia multivorans ATCC 17616 (JGI)} \code{Burkholderia multivorans ATCC 17616 (Tohoku)} \code{Burkholderia phymatum STM815} \code{Burkholderia phytofirmans PsJN} \code{Burkholderia pseudomallei 1026b} \code{Burkholderia pseudomallei 1106a} \code{Burkholderia pseudomallei 1710b} \code{Burkholderia pseudomallei 668} \code{Burkholderia pseudomallei K96243} \code{Burkholderia pseudomallei MSHR346} \code{Burkholderia rhizoxinica HKI 454} \code{Burkholderia sp. 383} \code{Burkholderia sp. CCGE1001} \code{Burkholderia sp. CCGE1002} \code{Burkholderia sp. CCGE1003} \code{Burkholderia sp. KJ006} \code{Burkholderia sp. YI23} \code{Burkholderia thailandensis E264} \code{Burkholderia vietnamiensis G4} \code{Burkholderia xenovorans LB400} \code{Butyrivibrio proteoclasticus B316} \code{Caldicellulosiruptor bescii DSM 6725} \code{Caldicellulosiruptor hydrothermalis 108} \code{Caldicellulosiruptor kristjanssonii 177R1B} \code{Caldicellulosiruptor kronotskyensis 2002} \code{Caldicellulosiruptor lactoaceticus 6A} \code{Caldicellulosiruptor obsidiansis OB47} \code{Caldicellulosiruptor owensensis OL} \code{Caldicellulosiruptor saccharolyticus DSM 8903} \code{Caldilinea aerophila DSM 14535 = NBRC 104270} \code{Caldisericum exile AZM16c01} \code{Calditerrivibrio nitroreducens DSM 19672} \code{Caldivirga maquilingensis IC-167} \code{Campylobacter concisus 13826} \code{Campylobacter curvus 525.92} \code{Campylobacter fetus subsp. fetus 82-40} \code{Campylobacter hominis ATCC BAA-381} \code{Campylobacter jejuni RM1221} \code{Campylobacter jejuni subsp. doylei 269.97} \code{Campylobacter jejuni subsp. jejuni 81116} \code{Campylobacter jejuni subsp. jejuni 81-176} \code{Campylobacter jejuni subsp. jejuni IA3902} \code{Campylobacter jejuni subsp. jejuni ICDCCJ07001} \code{Campylobacter jejuni subsp. jejuni M1} \code{Campylobacter jejuni subsp. jejuni NCTC 11168 = ATCC 700819} \code{Campylobacter jejuni subsp. jejuni S3} \code{Campylobacter lari RM2100} \code{Candidatus Accumulibacter phosphatis clade IIA UW-1} \code{Candidatus Amoebophilus asiaticus 5a2} \code{Candidatus Arthromitus sp. SFB-mouse-Japan} \code{Candidatus Arthromitus sp. SFB-mouse-Yit} \code{Candidatus Arthromitus sp. SFB-rat-Yit} \code{Candidatus Azobacteroides pseudotrichonymphae genomovar. CFP2,} \code{Candidatus Blochmannia pennsylvanicus BPEN, endosymbiont of} \code{Candidatus Blochmannia vafer BVAF} \code{Candidatus Carsonella ruddii CE isolate Thao2000} \code{Candidatus Carsonella ruddii CS isolate Thao2000} \code{Candidatus Carsonella ruddii HC isolate Thao2000} \code{Candidatus Carsonella ruddii HT isolate Thao2000} \code{Candidatus Carsonella ruddii PC isolate NHV} \code{Candidatus Carsonella ruddii PV, endosymbiont of Pachypsylla} \code{Candidatus Chloracidobacterium thermophilum B} \code{Candidatus Desulfococcus oleovorans Hxd3} \code{Candidatus Desulforudis audaxviator MP104C} \code{Candidatus Hamiltonella defensa 5AT, endosymbiont of Acyrthosiphon} \code{Candidatus Hodgkinia cicadicola Dsem} \code{Candidatus Korarchaeum cryptofilum OPF8} \code{Candidatus Koribacter versatilis Ellin345} \code{Candidatus Liberibacter asiaticus psy62} \code{Candidatus Liberibacter solanacearum CLso-ZC1} \code{Candidatus Methanosphaerula palustris E1-9c} \code{Candidatus Methylomirabilis oxyfera} \code{Candidatus Midichloria mitochondrii IricVA} \code{Candidatus Moranella endobia PCIT} \code{Candidatus Nitrospira defluvii} \code{Candidatus Pelagibacter sp. IMCC9063} \code{Candidatus Pelagibacter ubique HTCC1062} \code{Candidatus Phytoplasma australiense} \code{Candidatus Phytoplasma mali} \code{Candidatus Portiera aleyrodidarum BT-B} \code{Candidatus Protochlamydia amoebophila UWE25 (Parachlamydia sp.} \code{Candidatus Puniceispirillum marinum IMCC1322} \code{Candidatus Rickettsia amblyommii GAT-30V} \code{Candidatus Riesia pediculicola USDA} \code{Candidatus Ruthia magnifica Cm, endosymbiont of Calyptogena} \code{Candidatus Solibacter usitatus Ellin6076} \code{Candidatus Sulcia muelleri DMIN} \code{Candidatus Tremblaya princeps PCIT} \code{Candidatus Tremblaya princeps PCVAL} \code{Candidatus Vesicomyosocius okutanii HA, endosymbiont of Calyptogena} \code{Candidatus Zinderia insecticola CARI} \code{Capnocytophaga ochracea DSM 7271} \code{Carboxydothermus hydrogenoformans Z-2901} \code{Carnobacterium sp. 17-4} \code{Catenulispora acidiphila DSM 44928} \code{Caulobacter crescentus CB15} \code{Caulobacter crescentus NA1000} \code{Caulobacter segnis ATCC 21756} \code{Caulobacter sp. K31} \code{Cellulomonas fimi ATCC 484} \code{Cellulomonas flavigena DSM 20109} \code{Cellulophaga algicola DSM 14237} \code{Cellulophaga lytica DSM 7489} \code{Cellvibrio gilvus ATCC 13127} \code{Cellvibrio japonicus Ueda107} \code{Cenarchaeum symbiosum A} \code{Chitinophaga pinensis DSM 2588} \code{Chlamydia muridarum Nigg (Chlamydia trachomatis MoPn)} \code{Chlamydia trachomatis} \code{Chlamydia trachomatis A2497} \code{Chlamydia trachomatis A/HAR-13} \code{Chlamydia trachomatis B/TZ1A828/OT} \code{Chlamydia trachomatis D-EC} \code{Chlamydia trachomatis D-LC} \code{Chlamydia trachomatis D/UW-3/CX} \code{Chlamydia trachomatis E/11023} \code{Chlamydia trachomatis E/150} \code{Chlamydia trachomatis E/SW3} \code{Chlamydia trachomatis F/SW4} \code{Chlamydia trachomatis F/SW5} \code{Chlamydia trachomatis G/11074} \code{Chlamydia trachomatis G/11222} \code{Chlamydia trachomatis G/9301} \code{Chlamydia trachomatis G/9768} \code{Chlamydia trachomatis L2/434/Bu} \code{Chlamydia trachomatis L2b/UCH-1/proctitis} \code{Chlamydia trachomatis L2c} \code{Chlamydophila abortus S26/3} \code{Chlamydophila caviae GPIC} \code{Chlamydophila felis Fe/C-56} \code{Chlamydophila pecorum E58} \code{Chlamydophila pneumoniae AR39} \code{Chlamydophila pneumoniae CWL029} \code{Chlamydophila pneumoniae J138} \code{Chlamydophila pneumoniae LPCoLN} \code{Chlamydophila pneumoniae TW-183} \code{Chlamydophila psittaci 01DC11} \code{Chlamydophila psittaci 02DC15} \code{Chlamydophila psittaci 08DC60} \code{Chlamydophila psittaci 6BC} \code{Chlamydophila psittaci C19/98} \code{Chlorobaculum parvum NCIB 8327} \code{Chlorobaculum tepidum TLS} \code{Chlorobium chlorochromatii CaD3} \code{Chlorobium limicola DSM 245} \code{Chlorobium phaeobacteroides BS1} \code{Chlorobium phaeobacteroides DSM 266} \code{Chlorobium phaeovibrioides DSM 265} \code{Chloroflexus aggregans DSM 9485} \code{Chloroflexus aurantiacus J-10-fl} \code{Chloroflexus sp. Y-400-fl} \code{Chloroherpeton thalassium ATCC 35110} \code{Chromobacterium violaceum ATCC 12472} \code{Chromohalobacter salexigens DSM 3043} \code{Citrobacter koseri ATCC BAA-895} \code{Citrobacter rodentium ICC168} \code{Clavibacter michiganensis subsp. michiganensis NCPPB 382} \code{Clavibacter michiganensis subsp. sepedonicus ATCC 33113} \code{Clostridiales genomosp. BVAB3 UPII9-5} \code{Clostridium acetobutylicum EA 2018} \code{Clostridium beijerinckii NCIMB 8052} \code{Clostridium botulinum A2 Kyoto} \code{Clostridium botulinum A3 Loch Maree} \code{Clostridium botulinum A ATCC 19397} \code{Clostridium botulinum A ATCC 3502} \code{Clostridium botulinum A Hall} \code{Clostridium botulinum B1 Okra} \code{Clostridium botulinum Ba4 657} \code{Clostridium botulinum B Eklund 17B} \code{Clostridium botulinum BKT015925} \code{Clostridium botulinum E3 Alaska E43} \code{Clostridium botulinum F 230613} \code{Clostridium botulinum F Langeland} \code{Clostridium cellulolyticum H10} \code{Clostridium cellulovorans 743B} \code{Clostridium clariflavum DSM 19732} \code{Clostridium difficile 630 (PCR-ribotype 012)} \code{Clostridium difficile BI1} \code{Clostridium difficile CD196 (PCR-ribotype 027)} \code{Clostridium difficile R20291 (PCR-ribotype 027)} \code{Clostridium kluyveri DSM 555} \code{Clostridium kluyveri NBRC 12016} \code{Clostridium lentocellum DSM 5427} \code{Clostridium ljungdahlii DSM 13528} \code{Clostridium novyi NT} \code{Clostridium perfringens 13} \code{Clostridium perfringens ATCC 13124} \code{Clostridium perfringens SM101} \code{Clostridium phytofermentans ISDg} \code{Clostridium saccharolyticum WM1} \code{Clostridium sp. BNL1100} \code{Clostridium sp. SY8519} \code{Clostridium tetani E88} \code{Clostridium thermocellum ATCC 27405} \code{Clostridium thermocellum DSM 1313} \code{Collimonas fungivorans Ter331} \code{Colwellia psychrerythraea 34H} \code{Comamonas testosteroni CNB-2} \code{Conexibacter woesei DSM 14684} \code{Coprothermobacter proteolyticus DSM 5265} \code{Coraliomargarita akajimensis DSM 45221} \code{Corallococcus coralloides DSM 2259} \code{Coriobacterium glomerans PW2} \code{Corynebacterium aurimucosum ATCC 700975} \code{Corynebacterium diphtheriae 241} \code{Corynebacterium diphtheriae 31A} \code{Corynebacterium diphtheriae BH8} \code{Corynebacterium diphtheriae C7 (beta)} \code{Corynebacterium diphtheriae CDCE 8392} \code{Corynebacterium diphtheriae gravis NCTC13129} \code{Corynebacterium diphtheriae HC01} \code{Corynebacterium diphtheriae HC02} \code{Corynebacterium diphtheriae HC03} \code{Corynebacterium diphtheriae HC04} \code{Corynebacterium diphtheriae INCA 402} \code{Corynebacterium diphtheriae PW8} \code{Corynebacterium diphtheriae VA01} \code{Corynebacterium efficiens YS-314} \code{Corynebacterium glutamicum ATCC 13032 (Bielefeld)} \code{Corynebacterium glutamicum ATCC 13032 (Kyowa Hakko)} \code{Corynebacterium glutamicum R} \code{Corynebacterium jeikeium K411} \code{Corynebacterium kroppenstedtii DSM 44385} \code{Corynebacterium pseudotuberculosis 1002} \code{Corynebacterium pseudotuberculosis 1/06-A} \code{Corynebacterium pseudotuberculosis 258} \code{Corynebacterium pseudotuberculosis 267} \code{Corynebacterium pseudotuberculosis 31} \code{Corynebacterium pseudotuberculosis 316} \code{Corynebacterium pseudotuberculosis 3/99-5} \code{Corynebacterium pseudotuberculosis 42/02-A} \code{Corynebacterium pseudotuberculosis C231} \code{Corynebacterium pseudotuberculosis CIP 52.97} \code{Corynebacterium pseudotuberculosis Cp162} \code{Corynebacterium pseudotuberculosis FRC41} \code{Corynebacterium pseudotuberculosis I19} \code{Corynebacterium pseudotuberculosis P54B96} \code{Corynebacterium pseudotuberculosis PAT10} \code{Corynebacterium resistens DSM 45100} \code{Corynebacterium ulcerans 0102} \code{Corynebacterium ulcerans 809} \code{Corynebacterium ulcerans BR-AD22} \code{Corynebacterium urealyticum DSM 7109} \code{Corynebacterium variabile DSM 44702} \code{Coxiella burnetii CbuG_Q212} \code{Coxiella burnetii CbuK_Q154} \code{Coxiella burnetii Dugway 5J108-111} \code{Coxiella burnetii RSA 331} \code{Coxiella burnetii RSA 493} \code{Croceibacter atlanticus HTCC2559} \code{Cronobacter sakazakii ATCC BAA-894} \code{Cronobacter sakazakii ES15} \code{Cronobacter turicensis z3032} \code{Cryptobacterium curtum DSM 15641} \code{Cupriavidus metallidurans CH34} \code{Cupriavidus necator N-1} \code{Cyanobacterium UCYN-A} \code{Cyanothece sp. ATCC 51142} \code{Cyanothece sp. PCC 7424} \code{Cyanothece sp. PCC 7425} \code{Cyanothece sp. PCC 7822} \code{Cyanothece sp. PCC 8801} \code{Cyanothece sp. PCC 8802} \code{Cyclobacterium marinum DSM 745} \code{Cytophaga hutchinsonii ATCC 33406} \code{Dechloromonas aromatica RCB} \code{Dechlorosoma suillum PS} \code{Deferribacter desulfuricans SSM1} \code{Dehalococcoides ethenogenes 195} \code{Dehalococcoides sp. BAV1} \code{Dehalococcoides sp. CBDB1} \code{Dehalococcoides sp. GT} \code{Dehalococcoides sp. VS} \code{Dehalogenimonas lykanthroporepellens BL-DC-9} \code{Deinococcus deserti VCD115} \code{Deinococcus geothermalis DSM 11300} \code{Deinococcus gobiensis I-0} \code{Deinococcus maricopensis DSM 21211} \code{Deinococcus proteolyticus MRP} \code{Deinococcus radiodurans R1} \code{Delftia acidovorans SPH-1} \code{Delftia sp. Cs1-4} \code{Denitrovibrio acetiphilus DSM 12809} \code{Desulfarculus baarsii DSM 2075} \code{Desulfatibacillum alkenivorans AK-01} \code{Desulfitobacterium dehalogenans ATCC 51507} \code{Desulfitobacterium hafniense DCB-2} \code{Desulfitobacterium hafniense Y51} \code{Desulfobacca acetoxidans DSM 11109} \code{Desulfobacterium autotrophicum HRM2} \code{Desulfobulbus propionicus DSM 2032} \code{Desulfohalobium retbaense DSM 5692} \code{Desulfomicrobium baculatum DSM 4028} \code{Desulfomonile tiedjei DSM 6799} \code{Desulfosporosinus acidiphilus SJ4} \code{Desulfosporosinus meridiei DSM 13257} \code{Desulfosporosinus orientis DSM 765} \code{Desulfotalea psychrophila LSv54} \code{Desulfotomaculum acetoxidans DSM 771} \code{Desulfotomaculum carboxydivorans CO-1-SRB} \code{Desulfotomaculum kuznetsovii DSM 6115} \code{Desulfotomaculum reducens MI-1} \code{Desulfotomaculum ruminis DSM 2154} \code{Desulfovibrio aespoeensis Aspo-2} \code{Desulfovibrio africanus Walvis Bay} \code{Desulfovibrio alaskensis G20} \code{Desulfovibrio desulfuricans ND132} \code{Desulfovibrio desulfuricans subsp. desulfuricans ATCC 27774} \code{Desulfovibrio magneticus RS-1} \code{Desulfovibrio salexigens DSM 2638} \code{Desulfovibrio vulgaris Miyazaki F} \code{Desulfovibrio vulgaris RCH1} \code{Desulfovibrio vulgaris subsp. vulgaris DP4} \code{Desulfovibrio vulgaris subsp. vulgaris Hildenborough} \code{Desulfurispirillum indicum S5} \code{Desulfurivibrio alkaliphilus AHT2} \code{Desulfurobacterium thermolithotrophum DSM 11699} \code{Desulfurococcus fermentans DSM 16532} \code{Desulfurococcus kamchatkensis 1221n} \code{Desulfurococcus mucosus DSM 2162} \code{Dichelobacter nodosus VCS1703A} \code{Dickeya dadantii 3937} \code{Dickeya dadantii Ech586} \code{Dickeya dadantii Ech703} \code{Dickeya zeae Ech1591} \code{Dictyoglomus thermophilum H-6-12} \code{Dictyoglomus turgidum DSM 6724} \code{Dinoroseobacter shibae DFL 12} \code{Dyadobacter fermentans DSM 18053} \code{Edwardsiella ictaluri 93-146} \code{Edwardsiella tarda EIB202} \code{Edwardsiella tarda FL6-60} \code{Eggerthella lenta DSM 2243} \code{Eggerthella sp. YY7918} \code{Ehrlichia canis Jake} \code{Ehrlichia chaffeensis Arkansas} \code{Ehrlichia ruminantium Welgevonden (South Africa)} \code{Elusimicrobium minutum Pei191} \code{Enterobacter aerogenes KCTC 2190} \code{Enterobacter asburiae LF7a} \code{Enterobacter cloacae EcWSU1} \code{Enterobacter cloacae SCF1} \code{Enterobacter cloacae subsp. cloacae ATCC 13047} \code{Enterobacter cloacae subsp. dissolvens SDM} \code{Enterobacter sp. 638} \code{Enterococcus faecalis D32} \code{Enterococcus faecalis OG1RF} \code{Enterococcus faecalis V583, vancomycin-resistant clinical isolate} \code{Enterococcus faecium Aus0004} \code{Enterococcus faecium DO} \code{Enterococcus hirae ATCC 9790} \code{Erwinia amylovora ATCC 49946} \code{Erwinia amylovora CFBP1430} \code{Erwinia billingiae Eb661} \code{Erwinia pyrifoliae DSM 12163} \code{Erwinia pyrifoliae Ep1/96} \code{Erwinia sp. Ejp617} \code{Erwinia tasmaniensis Et1/99} \code{Erysipelothrix rhusiopathiae Fujisawa} \code{Erythrobacter litoralis HTCC2594} \code{Escherichia blattae DSM 4481} \code{Escherichia coli ABU 83972} \code{Escherichia coli BL21(DE3)} \code{Escherichia coli BL21-Gold(DE3)pLysS AG} \code{Escherichia coli B REL606} \code{Escherichia coli C ATCC 8739} \code{Escherichia coli clone D i14} \code{Escherichia coli clone D i2} \code{Escherichia coli DH1} \code{Escherichia coli IHE3034} \code{Escherichia coli K-12 DH10B} \code{Escherichia coli K-12 W3110} \code{Escherichia coli KO11FL} \code{Escherichia coli NA114} \code{Escherichia coli O103:H2 12009 (EHEC)} \code{Escherichia coli O111:H- 11128 (EHEC)} \code{Escherichia coli O127:H6 E2348/69 (EPEC)} \code{Escherichia coli O139:H28 E24377A (ETEC)} \code{Escherichia coli O152:H28 SE11 (commensal strain)} \code{Escherichia coli O157:H7 EC4115 (EHEC)} \code{Escherichia coli O157:H7 EDL933 (EHEC)} \code{Escherichia coli O157:H7 Sakai (EHEC)} \code{Escherichia coli O157:H7 TW14359 (EHEC)} \code{Escherichia coli O18:K1:H7 UTI89 (UPEC)} \code{Escherichia coli O1:K1:H7 (Avian pathogenic Escherichia coli)} \code{Escherichia coli O26:H11 11368 (EHEC)} \code{Escherichia coli O44:H18 042} \code{Escherichia coli O55:H7 CB9615 (atypical EPEC)} \code{Escherichia coli O55:H7 RM12579 (EPEC)} \code{Escherichia coli O6:K15:H31 536 (UPEC)} \code{Escherichia coli O6:K2:H1 CFT073 (UPEC)} \code{Escherichia coli O78:H11:K80 H10407 (ETEC)} \code{Escherichia coli O7:K1 CE10} \code{Escherichia coli O83:H1 NRG 857C} \code{Escherichia coli O9 HS (commensal strain)} \code{Escherichia coli P12b} \code{Escherichia coli SMS-3-5 (environmental isolate)} \code{Escherichia coli UM146} \code{Escherichia coli UMNK88} \code{Escherichia coli W} \code{Escherichia coli Xuzhou21} \code{Ethanoligenens harbinense YUAN-3} \code{Eubacterium eligens ATCC 27750} \code{Eubacterium limosum KIST612} \code{Eubacterium rectale ATCC 33656} \code{Exiguobacterium sibiricum 255-15} \code{Exiguobacterium sp. AT1b} \code{Ferrimonas balearica DSM 9799} \code{Ferroglobus placidus DSM 10642} \code{Fervidicoccus fontis Kam940} \code{Fervidobacterium nodosum Rt17-B1} \code{Fervidobacterium pennivorans DSM 9078} \code{Fibrobacter succinogenes subsp. succinogenes S85} \code{Filifactor alocis ATCC 35896} \code{Finegoldia magna ATCC 29328} \code{Flavobacteriaceae bacterium 3519-10} \code{Flavobacterium branchiophilum FL-15} \code{Flavobacterium columnare ATCC 49512} \code{Flavobacterium indicum GPTSA100-9} \code{Flavobacterium johnsoniae UW101} \code{Flavobacterium psychrophilum JIP02/86} \code{Flexibacter litoralis DSM 6794} \code{Flexistipes sinusarabici DSM 4947} \code{Fluviicola taffensis DSM 16823} \code{Francisella cf. novicida Fx1} \code{Francisella noatunensis subsp. orientalis Toba 04} \code{Francisella novicida U112} \code{Francisella philomiragia subsp. philomiragia ATCC 25017} \code{Francisella sp. TX077308} \code{Francisella tularensis subsp. holarctica FTNF002-00} \code{Francisella tularensis subsp. holarctica LVS (Live Vaccine Strain)} \code{Francisella tularensis subsp. holarctica OSU18} \code{Francisella tularensis subsp. mediasiatica FSC147} \code{Francisella tularensis subsp. tularensis FSC 198} \code{Francisella tularensis subsp. tularensis NE061598} \code{Francisella tularensis subsp. tularensis SCHU S4} \code{Francisella tularensis subsp. tularensis TI0902} \code{Francisella tularensis subsp. tularensis WY96-3418} \code{Francisella tularensis TIGB03} \code{Frankia sp. CcI3} \code{Frankia sp. EAN1pec} \code{Frankia sp. EuI1c} \code{Frankia symbiont of Datisca glomerata} \code{Frateuria aurantia DSM 6220} \code{Fusobacterium nucleatum ATCC 25586} \code{Gallibacterium anatis UMN179} \code{Gallionella capsiferriformans ES-2} \code{Gamma proteobacterium HdN1} \code{Gardnerella vaginalis 409-05} \code{Gardnerella vaginalis ATCC 14019} \code{Gardnerella vaginalis HMP9231} \code{Gemmatimonas aurantiaca T-27} \code{Geobacillus kaustophilus HTA426} \code{Geobacillus sp. C56-T3} \code{Geobacillus sp. WCH70} \code{Geobacillus sp. Y412MC52} \code{Geobacillus sp. Y412MC61} \code{Geobacillus sp. Y4.1MC1} \code{Geobacillus thermodenitrificans NG80-2} \code{Geobacillus thermoglucosidasius C56-YS93} \code{Geobacillus thermoleovorans CCB_US3_UF5} \code{Geobacter bemidjiensis Bem} \code{Geobacter lovleyi SZ} \code{Geobacter metallireducens GS-15} \code{Geobacter sp. FRC-32} \code{Geobacter sp. M18} \code{Geobacter sp. M21} \code{Geobacter sulfurreducens KN400} \code{Geobacter sulfurreducens PCA} \code{Geobacter uraniireducens Rf4} \code{Geodermatophilus obscurus DSM 43160} \code{Glaciecola nitratireducens FR1064} \code{Glaciecola sp. 4H-3-7+YE-5} \code{Gloeobacter violaceus PCC7421} \code{Gluconacetobacter diazotrophicus PAl 5 (Brazil)} \code{Gluconacetobacter diazotrophicus PAl 5 (JGI)} \code{Gluconacetobacter xylinus NBRC 3288} \code{Gluconobacter oxydans 621H} \code{Gordonia bronchialis DSM 43247} \code{Gordonia polyisoprenivorans VH2} \code{Gordonia sp. KTR9} \code{Gramella forsetii KT0803} \code{Granulibacter bethesdensis CGDNIH1} \code{Granulicella mallensis MP5ACTX8} \code{Granulicella tundricola} \code{Haemophilus ducreyi 35000HP} \code{Haemophilus influenzae 10810} \code{Haemophilus influenzae 86-028NP (nontypeable)} \code{Haemophilus influenzae F3031} \code{Haemophilus influenzae F3047} \code{Haemophilus influenzae PittEE} \code{Haemophilus influenzae PittGG} \code{Haemophilus influenzae R2866} \code{Haemophilus influenzae Rd KW20 (serotype d)} \code{Haemophilus parainfluenzae T3T1} \code{Haemophilus parasuis SH0165} \code{Haemophilus somnus 2336} \code{Haemophilus somnus (Histophilus somni) 129PT} \code{Hahella chejuensis KCTC 2396} \code{Halalkalicoccus jeotgali B3} \code{Halanaerobium hydrogeniformans} \code{Halanaerobium praevalens DSM 2228} \code{Haliangium ochraceum DSM 14365} \code{Haliscomenobacter hydrossis DSM 1100} \code{Haloarcula hispanica ATCC 33960} \code{Haloarcula marismortui ATCC 43049} \code{Halobacillus halophilus DSM 2266} \code{Halobacterium salinarum R1} \code{Halobacterium sp. NRC-1} \code{Haloferax mediterranei ATCC 33500} \code{Haloferax volcanii DS2} \code{Halogeometricum borinquense DSM 11551} \code{Halomicrobium mukohataei DSM 12286} \code{Halomonas elongata DSM 2581} \code{Halophilic archaeon DL31} \code{Halopiger xanaduensis SH-6} \code{Haloquadratum walsbyi C23} \code{Halorhabdus utahensis DSM 12940} \code{Halorhodospira halophila SL1} \code{Halorubrum lacusprofundi ATCC 49239} \code{Haloterrigena turkmenica DSM 5511} \code{Halothermothrix orenii H 168} \code{Halothiobacillus neapolitanus c2} \code{Helicobacter acinonychis Sheeba} \code{Helicobacter bizzozeronii CIII-1} \code{Helicobacter cetorum MIT 00-7128} \code{Helicobacter cetorum MIT 99-5656} \code{Helicobacter cinaedi PAGU611} \code{Helicobacter felis ATCC 49179} \code{Helicobacter hepaticus ATCC 51449} \code{Helicobacter mustelae 12198} \code{Helicobacter pylori 2017} \code{Helicobacter pylori 2018} \code{Helicobacter pylori 26695} \code{Helicobacter pylori 35A} \code{Helicobacter pylori 52} \code{Helicobacter pylori 83} \code{Helicobacter pylori 908} \code{Helicobacter pylori B8} \code{Helicobacter pylori Cuz20} \code{Helicobacter pylori ELS37} \code{Helicobacter pylori F16} \code{Helicobacter pylori F30} \code{Helicobacter pylori F32} \code{Helicobacter pylori F57} \code{Helicobacter pylori G27} \code{Helicobacter pylori Gambia94/24} \code{Helicobacter pylori HUP-B14} \code{Helicobacter pylori India7} \code{Helicobacter pylori J99} \code{Helicobacter pylori Lithuania75} \code{Helicobacter pylori P12} \code{Helicobacter pylori PeCan18} \code{Helicobacter pylori PeCan4} \code{Helicobacter pylori Puno120} \code{Helicobacter pylori Puno135} \code{Helicobacter pylori Sat464} \code{Helicobacter pylori Shi112} \code{Helicobacter pylori Shi169} \code{Helicobacter pylori Shi417} \code{Helicobacter pylori Shi470} \code{Helicobacter pylori SJM180} \code{Helicobacter pylori SNT49} \code{Helicobacter pylori SouthAfrica7} \code{Helicobacter pylori v225d} \code{Helicobacter pylori XZ274} \code{Heliobacterium modesticaldum Ice1} \code{Herbaspirillum seropedicae SmR1} \code{Herpetosiphon aurantiacus DSM 785} \code{Hippea maritima DSM 10411} \code{Hirschia baltica ATCC 49814} \code{Hydrogenobacter thermophilus TK-6} \code{Hydrogenobaculum sp. Y04AAS1} \code{Hyperthermus butylicus DSM 5456} \code{Hyphomicrobium denitrificans ATCC 51888} \code{Hyphomonas neptunium ATCC 15444} \code{Idiomarina loihiensis L2TR} \code{Ignavibacterium album JCM 16511} \code{Ignicoccus hospitalis KIN4/I} \code{Ignisphaera aggregans DSM 17230} \code{Ilyobacter polytropus DSM 2926} \code{Intrasporangium calvum DSM 43043} \code{Isoptericola variabilis 225} \code{Isosphaera pallida ATCC 43644} \code{Jannaschia sp. CCS1} \code{Jonesia denitrificans DSM 20603} \code{Kangiella koreensis DSM 16069} \code{Ketogulonicigenium vulgare Y25} \code{Ketogulonigenium vulgarum WSH-001} \code{Kineococcus radiotolerans SRS30216} \code{Kitasatospora setae KM-6054} \code{Klebsiella oxytoca KCTC 1686} \code{Klebsiella pneumoniae 342} \code{Klebsiella pneumoniae KCTC 2242} \code{Klebsiella pneumoniae NTUH-K2044} \code{Klebsiella pneumoniae subsp. pneumoniae HS11286} \code{Klebsiella variicola At-22} \code{Kocuria rhizophila DC2201} \code{Kosmotoga olearia TBF 19.5.1} \code{Kribbella flavida DSM 17836} \code{Krokinobacter sp. 4H-3-7-5} \code{Kyrpidia tusciae DSM 2912} \code{Kytococcus sedentarius DSM 20547} \code{Lacinutrix sp. 5H-3-7-4} \code{Lactobacillus acidophilus 30SC} \code{Lactobacillus acidophilus NCFM} \code{Lactobacillus amylovorus GRL 1112} \code{Lactobacillus amylovorus GRL1118} \code{Lactobacillus brevis ATCC 367} \code{Lactobacillus buchneri NRRL B-30929} \code{Lactobacillus casei ATCC 334} \code{Lactobacillus casei BD-II} \code{Lactobacillus casei BL23} \code{Lactobacillus casei LC2W} \code{Lactobacillus casei Zhang} \code{Lactobacillus crispatus ST1} \code{Lactobacillus delbrueckii subsp. bulgaricus 2038} \code{Lactobacillus delbrueckii subsp. bulgaricus ATCC 11842} \code{Lactobacillus delbrueckii subsp. bulgaricus ATCC BAA-365} \code{Lactobacillus fermentum CECT 5716} \code{Lactobacillus fermentum IFO 3956} \code{Lactobacillus gasseri ATCC 33323} \code{Lactobacillus helveticus DPC 4571} \code{Lactobacillus helveticus H10} \code{Lactobacillus helveticus R0052} \code{Lactobacillus johnsonii DPC 6026} \code{Lactobacillus johnsonii FI9785} \code{Lactobacillus johnsonii NCC 533} \code{Lactobacillus kefiranofaciens ZW3} \code{Lactobacillus plantarum JDM1} \code{Lactobacillus plantarum subsp. plantarum ST-III} \code{Lactobacillus plantarum WCFS1} \code{Lactobacillus reuteri DSM 20016} \code{Lactobacillus reuteri JCM 1112} \code{Lactobacillus reuteri SD2112} \code{Lactobacillus rhamnosus ATCC 8530} \code{Lactobacillus rhamnosus GG} \code{Lactobacillus rhamnosus Lc 705} \code{Lactobacillus ruminis ATCC 27782} \code{Lactobacillus sakei 23K} \code{Lactobacillus salivarius CECT 5713} \code{Lactobacillus salivarius UCC118} \code{Lactobacillus sanfranciscensis TMW 1.1304} \code{Lactococcus garvieae ATCC 49156} \code{Lactococcus garvieae Lg2} \code{Lactococcus lactis subsp. cremoris MG1363} \code{Lactococcus lactis subsp. cremoris NZ9000} \code{Lactococcus lactis subsp. cremoris SK11} \code{Lactococcus lactis subsp. lactis CV56} \code{Lactococcus lactis subsp. lactis IL1403} \code{Lactococcus lactis subsp. lactis KF147} \code{Laribacter hongkongensis HLHK9} \code{Lawsonia intracellularis PHE/MN1-00} \code{Leadbetterella byssophila DSM 17132} \code{Legionella pneumophila 2300/99 Alcoy} \code{Legionella pneumophila Corby} \code{Legionella pneumophila Lens} \code{Legionella pneumophila Paris} \code{Legionella pneumophila subsp. pneumophila ATCC 43290} \code{Legionella pneumophila subsp. pneumophila Philadelphia 1} \code{Leifsonia xyli subsp. xyli CTCB07} \code{Leptospira biflexa serovar Patoc Patoc 1 (Ames)} \code{Leptospira biflexa serovar Patoc Patoc 1 (Paris)} \code{Leptospira borgpetersenii serovar Hardjo-bovis JB197} \code{Leptospira borgpetersenii serovar Hardjo-bovis L550} \code{Leptospira interrogans serovar Copenhageni Fiocruz L1-130} \code{Leptospira interrogans serovar Lai 56601} \code{Leptospira interrogans serovar Lai IPAV} \code{Leptospirillum ferrooxidans C2-3} \code{Leptothrix cholodnii SP-6} \code{Leptotrichia buccalis C-1013-b} \code{Leuconostoc citreum KM20} \code{Leuconostoc gasicomitatum LMG 18811} \code{Leuconostoc kimchii IMSNU11154} \code{Leuconostoc mesenteroides subsp. mesenteroides ATCC 8293} \code{Leuconostoc mesenteroides subsp. mesenteroides J18} \code{Leuconostoc sp. C2} \code{Listeria innocua CLIP 11262 (serotype 6a)} \code{Listeria ivanovii subsp. ivanovii PAM 55} \code{Listeria monocytogenes 07PF0776} \code{Listeria monocytogenes 08-5578 (serotype 1/2a)} \code{Listeria monocytogenes 08-5923 (serotype 1/2a)} \code{Listeria monocytogenes 10403S} \code{Listeria monocytogenes ATCC 19117} \code{Listeria monocytogenes Clip81459 (serotype 4b)} \code{Listeria monocytogenes EGD-e (serotype 1/2a)} \code{Listeria monocytogenes F2365 (serotype 4b)} \code{Listeria monocytogenes Finland 1998} \code{Listeria monocytogenes FSL R2-561} \code{Listeria monocytogenes HCC23 (serotype 4a)} \code{Listeria monocytogenes J0161} \code{Listeria monocytogenes L99} \code{Listeria monocytogenes M7 (serotype 4a)} \code{Listeria monocytogenes serotype 7 SLCC2482} \code{Listeria monocytogenes SLCC2372} \code{Listeria monocytogenes SLCC2376} \code{Listeria monocytogenes SLCC2378} \code{Listeria monocytogenes SLCC2479} \code{Listeria monocytogenes SLCC2540} \code{Listeria monocytogenes SLCC2755} \code{Listeria monocytogenes SLCC5850} \code{Listeria monocytogenes SLCC7179} \code{Listeria seeligeri SLCC3954 (serovar 1/2b)} \code{Listeria welshimeri SLCC5334 (serotype 6b)} \code{Lysinibacillus sphaericus C3-41} \code{Macrococcus caseolyticus JCSC5402} \code{Magnetococcus marinus MC-1} \code{Magnetospirillum magneticum AMB-1} \code{Mahella australiensis 50-1 BON} \code{Mannheimia succiniciproducens MBEL55E} \code{Maribacter sp. HTCC2170} \code{Maricaulis maris MCS10} \code{Marinithermus hydrothermalis DSM 14884} \code{Marinitoga piezophila KA3} \code{Marinobacter adhaerens HP15} \code{Marinobacter aquaeolei VT8} \code{Marinomonas mediterranea MMB-1} \code{Marinomonas posidonica IVIA-Po-181} \code{Marinomonas sp. MWYL1} \code{Marivirga tractuosa DSM 4126} \code{Meiothermus ruber DSM 1279} \code{Meiothermus silvanus DSM 9946} \code{Melissococcus plutonius ATCC 35311} \code{Melissococcus plutonius DAT561} \code{Mesoplasma florum L1} \code{Mesorhizobium ciceri biovar biserrulae WSM1271} \code{Mesorhizobium loti MAFF303099} \code{Mesorhizobium opportunistum WSM2075} \code{Mesorhizobium sp. BNC1} \code{Mesotoga prima MesG1.Ag.4.2} \code{Metallosphaera cuprina Ar-4} \code{Metallosphaera sedula DSM 5348} \code{Methanobacterium sp. AL-21} \code{Methanobacterium sp. SWAN-1} \code{Methanobrevibacter ruminantium M1} \code{Methanobrevibacter smithii ATCC 35061} \code{Methanocaldococcus fervens AG86} \code{Methanocaldococcus infernus ME} \code{Methanocaldococcus jannaschii DSM 2661} \code{Methanocaldococcus sp. FS406-22} \code{Methanocaldococcus vulcanius M7} \code{Methanocella arvoryzae MRE50} \code{Methanocella conradii HZ254} \code{Methanocella paludicola SANAE} \code{Methanococcoides burtonii DSM 6242} \code{Methanococcus aeolicus Nankai-3} \code{Methanococcus maripaludis C5} \code{Methanococcus maripaludis C6} \code{Methanococcus maripaludis C7} \code{Methanococcus maripaludis S2} \code{Methanococcus maripaludis X1} \code{Methanococcus vannielii SB} \code{Methanococcus voltae A3} \code{Methanocorpusculum labreanum Z} \code{Methanoculleus bourgensis MS2} \code{Methanoculleus marisnigri JR1} \code{Methanohalobium evestigatum Z-7303} \code{Methanohalophilus mahii DSM 5219} \code{Methanoplanus petrolearius DSM 11571} \code{Methanopyrus kandleri AV19} \code{Methanoregula boonei 6A8} \code{Methanosaeta concilii GP6} \code{Methanosaeta harundinacea 6Ac} \code{Methanosaeta thermophila PT} \code{Methanosalsum zhilinae DSM 4017} \code{Methanosarcina acetivorans C2A} \code{Methanosarcina barkeri fusaro, chromosome 1} \code{Methanosarcina mazei Go1} \code{Methanosphaera stadtmanae DSM 3091} \code{Methanospirillum hungatei JF-1} \code{Methanothermobacter marburgensis Marburg} \code{Methanothermobacter thermautotrophicus deltaH} \code{Methanothermococcus okinawensis IH1} \code{Methanothermus fervidus DSM 2088} \code{Methanotorris igneus Kol 5} \code{Methylacidiphilum infernorum V4} \code{Methylibium petroleiphilum PM1} \code{Methylobacillus flagellatus KT} \code{Methylobacterium chloromethanicum CM4} \code{Methylobacterium extorquens PA1} \code{Methylobacterium nodulans ORS 2060} \code{Methylobacterium populi BJ001} \code{Methylobacterium radiotolerans JCM 2831} \code{Methylobacterium sp. 4-46} \code{Methylocella silvestris BL2} \code{Methylococcus capsulatus Bath} \code{Methylomonas methanica MC09} \code{Methylophaga sp. JAM1} \code{Methylophaga sp. JAM7} \code{Methylotenera mobilis JLW8} \code{Methylotenera versatilis 301} \code{Methylovorus glucosetrophus SIP3-4} \code{Methylovorus sp. MP688} \code{Micavibrio aeruginosavorus ARL-13} \code{Microbacterium testaceum StLB037} \code{Micrococcus luteus NCTC 2665} \code{Microcystis aeruginosa NIES-843} \code{Microlunatus phosphovorus NM-1} \code{Micromonospora aurantiaca ATCC 27029} \code{Micromonospora sp. L5} \code{Minibacterium massiliensis (Janthinobacterium sp. Marseille)} \code{Mobiluncus curtisii ATCC 43063} \code{Modestobacter marinus} \code{Moorella thermoacetica ATCC 39073} \code{Moraxella catarrhalis RH4} \code{Muricauda ruestringensis DSM 13258} \code{Mycobacterium abscessus ATCC 19977} \code{Mycobacterium africanum GM041182} \code{Mycobacterium avium 104} \code{Mycobacterium avium subsp. paratuberculosis K-10} \code{Mycobacterium bovis BCG Mexico} \code{Mycobacterium bovis BCG Pasteur 1173P2} \code{Mycobacterium bovis BCG Tokyo 172} \code{Mycobacterium canettii CIPT 140010059} \code{Mycobacterium chubuense NBB4} \code{Mycobacterium gilvum PYR-GCK (Mycobacterium flavescens PYR-GCK)} \code{Mycobacterium gilvum Spyr1} \code{Mycobacterium intracellulare ATCC 13950} \code{Mycobacterium intracellulare MOTT-02} \code{Mycobacterium intracellulare MOTT-64} \code{Mycobacterium leprae Br4923} \code{Mycobacterium leprae TN} \code{Mycobacterium marinum M} \code{Mycobacterium massiliense GO 06} \code{Mycobacterium smegmatis MC2 155} \code{Mycobacterium sp. JDM601} \code{Mycobacterium sp. JLS} \code{Mycobacterium sp. KMS} \code{Mycobacterium sp. MCS} \code{Mycobacterium sp. MOTT36Y} \code{Mycobacterium tuberculosis CCDC5079} \code{Mycobacterium tuberculosis CCDC5180} \code{Mycobacterium tuberculosis CDC1551, clinical strain} \code{Mycobacterium tuberculosis CTRI-2} \code{Mycobacterium tuberculosis F11} \code{Mycobacterium tuberculosis H37Ra} \code{Mycobacterium tuberculosis H37Rv} \code{Mycobacterium tuberculosis KZN 1435} \code{Mycobacterium tuberculosis KZN 4207} \code{Mycobacterium tuberculosis KZN 605} \code{Mycobacterium tuberculosis RGTB327} \code{Mycobacterium tuberculosis RGTB423} \code{Mycobacterium ulcerans Agy99} \code{Mycobacterium vanbaalenii PYR-1} \code{Mycoplasma agalactiae 5632} \code{Mycoplasma agalactiae PG2} \code{Mycoplasma arthritidis 158L3-1} \code{Mycoplasma bovis HB0801} \code{Mycoplasma bovis Hubei-1} \code{Mycoplasma bovis PG45} \code{Mycoplasma capricolum subsp. capricolum ATCC 27343} \code{Mycoplasma conjunctivae} \code{Mycoplasma crocodyli MP145} \code{Mycoplasma fermentans JER} \code{Mycoplasma fermentans M64} \code{Mycoplasma gallisepticum NC06_2006.080-5-2P} \code{Mycoplasma gallisepticum NC95_13295-2-2P} \code{Mycoplasma gallisepticum NY01_2001.047-5-1P} \code{Mycoplasma gallisepticum R(high)} \code{Mycoplasma gallisepticum R(low)} \code{Mycoplasma gallisepticum VA94_7994-1-7P} \code{Mycoplasma gallisepticum WI01_2001.043-13-2P} \code{Mycoplasma genitalium G37} \code{Mycoplasma genitalium M2288} \code{Mycoplasma genitalium M2321} \code{Mycoplasma genitalium M6282} \code{Mycoplasma genitalium M6320} \code{Mycoplasma haemocanis Illinois} \code{Mycoplasma haemofelis Langford 1} \code{Mycoplasma haemofelis Ohio2} \code{Mycoplasma hominis} \code{Mycoplasma hyopneumoniae 168} \code{Mycoplasma hyopneumoniae 232} \code{Mycoplasma hyopneumoniae 7448} \code{Mycoplasma hyopneumoniae J} \code{Mycoplasma hyorhinis GDL-1} \code{Mycoplasma hyorhinis HUB-1} \code{Mycoplasma hyorhinis MCLD} \code{Mycoplasma leachii 99/014/6} \code{Mycoplasma leachii PG50} \code{Mycoplasma mobile 163K} \code{Mycoplasma mycoides subsp. capri LC 95010} \code{Mycoplasma mycoides subsp. mycoides SC PG1} \code{Mycoplasma pneumoniae 309} \code{Mycoplasma pneumoniae FH} \code{Mycoplasma pneumoniae M129} \code{Mycoplasma pulmonis UAB CTIP} \code{Mycoplasma putrefaciens KS1} \code{Mycoplasma suis Illinois} \code{Mycoplasma suis KI3806} \code{Mycoplasma synoviae 53} \code{Mycoplasma wenyonii Massachusetts} \code{Myxococcus fulvus HW-1} \code{Myxococcus xanthus DK 1622} \code{Nakamurella multipartita DSM 44233} \code{Nanoarchaeum equitans Kin4-M} \code{Natranaerobius thermophilus JW/NM-WN-LF} \code{Natrialba magadii ATCC 43099} \code{Natrinema sp. J7-2} \code{Natronomonas pharaonis DSM 2160} \code{Nautilia profundicola AmH} \code{Neisseria gonorrhoeae FA 1090} \code{Neisseria gonorrhoeae NCCP11945} \code{Neisseria gonorrhoeae TCDC-NG08107} \code{Neisseria lactamica 020-06} \code{Neisseria meningitidis 053442} \code{Neisseria meningitidis alpha14 (contains capsule null locus)} \code{Neisseria meningitidis alpha710 (serogroup B)} \code{Neisseria meningitidis FAM18 (serogroup C)} \code{Neisseria meningitidis G2136 (serogroup B)} \code{Neisseria meningitidis H44/76 (serogroup B)} \code{Neisseria meningitidis M01-240149 (serogroup B)} \code{Neisseria meningitidis M01-240355 (serogroup B)} \code{Neisseria meningitidis M04-240196 (serogroup B)} \code{Neisseria meningitidis MC58 (serogroup B)} \code{Neisseria meningitidis NZ-05/33 (serogroup B)} \code{Neisseria meningitidis WUE 2594 (serogroup A)} \code{Neisseria meningitidis Z2491 (serogroup A)} \code{Neorickettsia risticii Illinois} \code{Neorickettsia sennetsu Miyayama} \code{Niastella koreensis GR20-10} \code{Nitratifractor salsuginis DSM 16511} \code{Nitratiruptor sp. SB155-2} \code{Nitrobacter hamburgensis X14} \code{Nitrobacter winogradskyi Nb-255} \code{Nitrosococcus halophilus Nc4} \code{Nitrosococcus oceani ATCC 19707} \code{Nitrosococcus watsonii C-113} \code{Nitrosomonas europaea ATCC 19718} \code{Nitrosomonas eutropha C91} \code{Nitrosomonas sp. AL212} \code{Nitrosomonas sp. Is79A3} \code{Nitrosopumilus maritimus SCM1} \code{Nitrosospira multiformis ATCC 25196} \code{Nocardia farcinica IFM 10152} \code{Nocardioides sp. JS614} \code{Nocardiopsis alba ATCC BAA-2165} \code{Nocardiopsis dassonvillei subsp. dassonvillei DSM 43111} \code{Nostoc punctiforme PCC 73102} \code{Novosphingobium aromaticivorans DSM 12444} \code{Novosphingobium sp. PP1Y} \code{Oceanimonas sp. GK1} \code{Oceanithermus profundus DSM 14977} \code{Oceanobacillus iheyensis HTE831} \code{Ochrobactrum anthropi ATCC 49188} \code{Odoribacter splanchnicus DSM 20712} \code{Oenococcus oeni PSU-1} \code{Oligotropha carboxidovorans OM4} \code{Oligotropha carboxidovorans OM5 (Goettingen)} \code{Oligotropha carboxidovorans OM5 (Mississippi)} \code{Olsenella uli DSM 7084} \code{Onion yellows phytoplasma OY-M} \code{Opitutus terrae PB90-1} \code{Orientia tsutsugamushi Boryong} \code{Orientia tsutsugamushi Ikeda} \code{Ornithobacterium rhinotracheale DSM 15997} \code{Oscillibacter valericigenes Sjm18-20} \code{Owenweeksia hongkongensis DSM 17368} \code{Paenibacillus mucilaginosus 3016} \code{Paenibacillus mucilaginosus K02} \code{Paenibacillus mucilaginosus KNP414} \code{Paenibacillus polymyxa E681} \code{Paenibacillus polymyxa M1} \code{Paenibacillus polymyxa SC2} \code{Paenibacillus sp. JDR-2} \code{Paenibacillus sp. Y412MC10} \code{Paenibacillus terrae HPL-003} \code{Paludibacter propionicigenes WB4} \code{Pantoea ananatis AJ13355} \code{Pantoea ananatis LMG 20103} \code{Pantoea ananatis LMG 5342} \code{Pantoea ananatis PA13} \code{Pantoea sp. At-9b} \code{Pantoea vagans C9-1} \code{Parabacteroides distasonis ATCC 8503} \code{Parachlamydia acanthamoebae UV-7} \code{Paracoccus denitrificans PD1222} \code{Parvibaculum lavamentivorans DS-1} \code{Parvularcula bermudensis HTCC2503} \code{Pasteurella multocida 36950} \code{Pasteurella multocida subsp. multocida 3480} \code{Pasteurella multocida subsp. multocida HN06} \code{Pasteurella multocida subsp. multocida Pm70} \code{Pectobacterium atrosepticum SCRI1043} \code{Pectobacterium carotovorum subsp. carotovorum PC1} \code{Pectobacterium wasabiae WPP163} \code{Pediococcus claussenii ATCC BAA-344} \code{Pediococcus pentosaceus ATCC 25745} \code{Pedobacter heparinus DSM 2366} \code{Pedobacter saltans DSM 12145} \code{Pelobacter carbinolicus DSM 2380} \code{Pelobacter propionicus DSM 2379} \code{Pelodictyon luteolum DSM 273} \code{Pelodictyon phaeoclathratiforme BU-1} \code{Pelotomaculum thermopropionicum SI} \code{Persephonella marina EX-H1} \code{Petrotoga mobilis SJ95} \code{Phaeobacter gallaeciensis 2.10} \code{Phaeobacter gallaeciensis DSM 17395} \code{Phenylobacterium zucineum HLK1} \code{Photobacterium profundum SS9} \code{Photorhabdus asymbiotica ATCC43949} \code{Photorhabdus luminescens subsp. laumondii TTO1} \code{Phycisphaera mikurensis NBRC 102666} \code{Picrophilus torridus DSM 9790} \code{Pirellula staleyi DSM 6068} \code{Planctomyces brasiliensis DSM 5305} \code{Planctomyces limnophilus DSM 3776} \code{Polaromonas naphthalenivorans CJ2} \code{Polaromonas sp. JS666} \code{Polymorphum gilvum SL003B-26A1} \code{Polynucleobacter necessarius subsp. asymbioticus QLW-P1DMWA-1} \code{Polynucleobacter necessarius subsp. necessarius STIR1} \code{Porphyromonas asaccharolytica DSM 20707} \code{Porphyromonas gingivalis ATCC 33277} \code{Porphyromonas gingivalis TDC60} \code{Porphyromonas gingivalis W83} \code{Prevotella denticola F0289} \code{Prevotella intermedia 17} \code{Prevotella melaninogenica ATCC 25845} \code{Prevotella ruminicola 23} \code{Prochlorococcus marinus AS9601} \code{Prochlorococcus marinus MED4 (subsp. pastoris str. CCMP1986)} \code{Prochlorococcus marinus MIT 9211} \code{Prochlorococcus marinus MIT 9215} \code{Prochlorococcus marinus MIT 9301} \code{Prochlorococcus marinus MIT 9303} \code{Prochlorococcus marinus MIT 9312} \code{Prochlorococcus marinus MIT 9313} \code{Prochlorococcus marinus MIT 9515} \code{Prochlorococcus marinus NATL1A} \code{Prochlorococcus marinus NATL2A} \code{Prochlorococcus marinus SS120 (subsp. marinus CCMP1375)} \code{Propionibacterium acnes 266} \code{Propionibacterium acnes 6609} \code{Propionibacterium acnes ATCC 11828} \code{Propionibacterium acnes SK137} \code{Propionibacterium acnes TypeIA2 P.acn17} \code{Propionibacterium acnes TypeIA2 P.acn31} \code{Propionibacterium acnes TypeIA2 P.acn33} \code{Propionibacterium freudenreichii subsp. shermanii CIRM-BIA1} \code{Propionibacterium propionicum F0230a} \code{Prosthecochloris aestuarii DSM 271} \code{Proteus mirabilis HI4320} \code{Providencia stuartii MRSN 2154} \code{Pseudoalteromonas atlantica T6c} \code{Pseudoalteromonas haloplanktis TAC125} \code{Pseudoalteromonas sp. SM9913} \code{Pseudogulbenkiania sp. NH8B} \code{Pseudomonas aeruginosa DK2} \code{Pseudomonas aeruginosa LESB58} \code{Pseudomonas aeruginosa M18} \code{Pseudomonas aeruginosa NCGM2.S1} \code{Pseudomonas aeruginosa PA7} \code{Pseudomonas aeruginosa PAO1} \code{Pseudomonas aeruginosa UCBPP-PA14} \code{Pseudomonas brassicacearum subsp. brassicacearum NFM421} \code{Pseudomonas fluorescens A506} \code{Pseudomonas fluorescens F113} \code{Pseudomonas fluorescens Pf0-1} \code{Pseudomonas fluorescens SBW25} \code{Pseudomonas fulva 12-X} \code{Pseudomonas mendocina NK-01} \code{Pseudomonas mendocina ymp} \code{Pseudomonas protegens Pf-5} \code{Pseudomonas putida BIRD-1} \code{Pseudomonas putida F1} \code{Pseudomonas putida GB-1} \code{Pseudomonas putida KT2440} \code{Pseudomonas putida ND6} \code{Pseudomonas putida S16} \code{Pseudomonas putida W619} \code{Pseudomonas stutzeri A1501} \code{Pseudomonas stutzeri ATCC 17588 = LMG 11199} \code{Pseudomonas stutzeri CCUG 29243} \code{Pseudomonas stutzeri DSM 10701} \code{Pseudomonas stutzeri DSM 4166} \code{Pseudomonas syringae pv. phaseolicola 1448A} \code{Pseudomonas syringae pv. syringae B728a} \code{Pseudomonas syringae pv. tomato DC3000} \code{Pseudonocardia dioxanivorans CB1190} \code{Pseudovibrio sp. FO-BEG1} \code{Pseudoxanthomonas spadix BD-a59} \code{Pseudoxanthomonas suwonensis 11-1} \code{Psychrobacter arcticus 273-4} \code{Psychrobacter cryohalolentis K5} \code{Psychrobacter sp. PRwf-1} \code{Psychromonas ingrahamii 37} \code{Pusillimonas sp. T7-7} \code{Pyrobaculum aerophilum IM2} \code{Pyrobaculum arsenaticum DSM 13514} \code{Pyrobaculum calidifontis JCM 11548} \code{Pyrobaculum islandicum DSM 4184} \code{Pyrobaculum sp. 1860} \code{Pyrococcus abyssi GE5} \code{Pyrococcus furiosus COM1} \code{Pyrococcus furiosus DSM 3638} \code{Pyrococcus horikoshii OT3} \code{Pyrococcus sp. NA2} \code{Pyrococcus sp. ST04} \code{Pyrococcus yayanosii CH1} \code{Pyrolobus fumarii 1A} \code{Rahnella aquatilis CIP 78.65 = ATCC 33071} \code{Rahnella aquatilis HX2} \code{Rahnella sp. Y9602} \code{Ralstonia eutropha H16} \code{Ralstonia eutropha JMP134} \code{Ralstonia pickettii 12D} \code{Ralstonia pickettii 12J} \code{Ralstonia solanacearum GMI1000} \code{Ralstonia solanacearum Po82} \code{Ramlibacter tataouinensis TTB310} \code{Renibacterium salmoninarum ATCC 33209} \code{Rhizobium leguminosarum bv. trifolii WSM1325} \code{Rhizobium leguminosarum bv. trifolii WSM2304} \code{Rhizobium leguminosarum bv. viciae 3841} \code{Rhodobacter capsulatus SB1003} \code{Rhodobacter sphaeroides 2.4.1} \code{Rhodobacter sphaeroides ATCC 17025} \code{Rhodobacter sphaeroides ATCC 17029} \code{Rhodococcus equi 103S} \code{Rhodococcus erythropolis PR4} \code{Rhodococcus jostii RHA1} \code{Rhodococcus opacus B4} \code{Rhodoferax ferrireducens T118 (DSM 15236)} \code{Rhodomicrobium vannielii ATCC 17100} \code{Rhodopirellula baltica SH 1 (Pirellula sp. strain 1)} \code{Rhodopseudomonas palustris BisA53} \code{Rhodopseudomonas palustris BisB18} \code{Rhodopseudomonas palustris BisB5} \code{Rhodopseudomonas palustris CGA009} \code{Rhodopseudomonas palustris DX-1} \code{Rhodopseudomonas palustris HaA2} \code{Rhodopseudomonas palustris TIE-1} \code{Rhodospirillum centenum SW} \code{Rhodospirillum photometricum DSM 122} \code{Rhodospirillum rubrum ATCC 11170} \code{Rhodospirillum rubrum F11} \code{Rhodothermus marinus DSM 4252} \code{Rhodothermus marinus SG0.5JP17-172} \code{Rickettsia africae ESF-5} \code{Rickettsia akari Hartford} \code{Rickettsia australis Cutlack} \code{Rickettsia bellii OSU 85-389} \code{Rickettsia bellii RML369-C} \code{Rickettsia canadensis CA410} \code{Rickettsia canadensis McKiel} \code{Rickettsia conorii Malish 7} \code{Rickettsia felis URRWXCal2} \code{Rickettsia heilongjiangensis 054} \code{Rickettsia japonica YH} \code{Rickettsia massiliae AZT80} \code{Rickettsia massiliae MTU5} \code{Rickettsia montanensis OSU 85-930} \code{Rickettsia parkeri Portsmouth} \code{Rickettsia peacockii Rustic} \code{Rickettsia philipii 364D} \code{Rickettsia prowazekii BuV67-CWPP} \code{Rickettsia prowazekii Chernikova} \code{Rickettsia prowazekii Dachau} \code{Rickettsia prowazekii GvV257} \code{Rickettsia prowazekii Katsinyian} \code{Rickettsia prowazekii Madrid E} \code{Rickettsia prowazekii Rp22} \code{Rickettsia prowazekii RpGvF24} \code{Rickettsia rhipicephali 3-7-female6-CWPP} \code{Rickettsia rickettsii Arizona} \code{Rickettsia rickettsii Brazil} \code{Rickettsia rickettsii Colombia} \code{Rickettsia rickettsii Hauke} \code{Rickettsia rickettsii Hino} \code{Rickettsia rickettsii Hlp#2} \code{Rickettsia rickettsii Iowa} \code{Rickettsia rickettsii Sheila Smith} \code{Rickettsia slovaca 13-B} \code{Rickettsia slovaca D-CWPP} \code{Rickettsia typhi B9991CWPP} \code{Rickettsia typhi TH1527} \code{Rickettsia typhi Wilmington (ATCC VR-144)} \code{Riemerella anatipestifer ATCC 11845 = DSM 15868} \code{Riemerella anatipestifer RA-GD} \code{Robiginitalea biformata HTCC2501} \code{Roseburia hominis A2-183} \code{Roseiflexus castenholzii DSM 13941} \code{Roseiflexus sp. RS-1} \code{Roseobacter denitrificans OCh 114} \code{Roseobacter litoralis Och 149} \code{Rothia dentocariosa ATCC 17931} \code{Rothia mucilaginosa DY-18} \code{Rubrivivax gelatinosus IL144} \code{Rubrobacter xylanophilus DSM 9941} \code{Ruegeria pomeroyi DSS-3} \code{Ruegeria sp. TM1040} \code{Ruminococcus albus 7} \code{Runella slithyformis DSM 19594} \code{Saccharomonospora viridis DSM 43017} \code{Saccharophagus degradans 2-40} \code{Saccharopolyspora erythraea NRRL 2338} \code{Salinibacter ruber DSM 13855} \code{Salinibacter ruber M8} \code{Salinispora arenicola CNS-205} \code{Salinispora tropica CNB-440} \code{Salmonella bongori NCTC 12419} \code{Salmonella enterica subsp. arizonae serovar 62:z4,z23:--} \code{Salmonella enterica subsp. enterica serovar Agona SL483} \code{Salmonella enterica subsp. enterica serovar Choleraesuis SC-B67} \code{Salmonella enterica subsp. enterica serovar Dublin CT_02021853} \code{Salmonella enterica subsp. enterica serovar Enteritidis P125109} \code{Salmonella enterica subsp. enterica serovar Gallinarum 287/91} \code{Salmonella enterica subsp. enterica serovar Heidelberg B182} \code{Salmonella enterica subsp. enterica serovar Heidelberg SL476} \code{Salmonella enterica subsp. enterica serovar Newport SL254} \code{Salmonella enterica subsp. enterica serovar Paratyphi A AKU_12601} \code{Salmonella enterica subsp. enterica serovar Paratyphi A ATCC 9150} \code{Salmonella enterica subsp. enterica serovar Paratyphi B} \code{Salmonella enterica subsp. enterica serovar Paratyphi C RKS4594} \code{Salmonella enterica subsp. enterica serovar Schwarzengrund CVM19633} \code{Salmonella enterica subsp. enterica serovar Typhimurium 14028S} \code{Salmonella enterica subsp. enterica serovar Typhimurium 798} \code{Salmonella enterica subsp. enterica serovar Typhimurium D23580} \code{Salmonella enterica subsp. enterica serovar Typhimurium LT2} \code{Salmonella enterica subsp. enterica serovar Typhimurium SL1344} \code{Salmonella enterica subsp. enterica serovar Typhimurium ST4/74} \code{Salmonella enterica subsp. enterica serovar Typhimurium T000240} \code{Salmonella enterica subsp. enterica serovar Typhimurium UK-1} \code{Salmonella enterica subsp. enterica serovar Typhi P-stx-12} \code{Salmonella enterica subsp. enterica serovar Typhi Ty2} \code{Sanguibacter keddieii DSM 10542} \code{Saprospira grandis Lewin} \code{Sebaldella termitidis ATCC 33386} \code{Secondary endosymbiont of Ctenarytaina eucalypti} \code{Segniliparus rotundus DSM 44985} \code{Selenomonas ruminantium subsp. lactilytica TAM6421} \code{Selenomonas sputigena ATCC 35185} \code{Serratia plymuthica AS9} \code{Serratia proteamaculans 568} \code{Serratia sp. AS12} \code{Serratia sp. AS13} \code{Serratia symbiotica Cinara cedri} \code{Shewanella amazonensis SB2B} \code{Shewanella baltica BA175} \code{Shewanella baltica OS117} \code{Shewanella baltica OS155} \code{Shewanella baltica OS185} \code{Shewanella baltica OS195} \code{Shewanella baltica OS223} \code{Shewanella baltica OS678} \code{Shewanella denitrificans OS217} \code{Shewanella frigidimarina NCIMB 400} \code{Shewanella halifaxensis HAW-EB4} \code{Shewanella loihica PV-4} \code{Shewanella oneidensis MR-1} \code{Shewanella pealeana ATCC 700345} \code{Shewanella piezotolerans WP3} \code{Shewanella putrefaciens 200} \code{Shewanella putrefaciens CN-32} \code{Shewanella sediminis HAW-EB3} \code{Shewanella sp. ANA-3} \code{Shewanella sp. MR-4} \code{Shewanella sp. MR-7} \code{Shewanella sp. W3-18-1} \code{Shewanella violacea DSS12} \code{Shewanella woodyi ATCC 51908} \code{Shigella boydii CDC 3083-94} \code{Shigella boydii Sb227} \code{Shigella dysenteriae Sd197} \code{Shigella flexneri 2002017 (serotype Fxv)} \code{Shigella flexneri 2457T (serotype 2a)} \code{Shigella flexneri 301 (serotype 2a)} \code{Shigella flexneri 8401 (serotype 5b)} \code{Shigella sonnei 53G} \code{Shigella sonnei Ss046} \code{Sideroxydans lithotrophicus ES-1} \code{Simkania negevensis Z} \code{Sinorhizobium fredii HH103} \code{Sinorhizobium fredii NGR234} \code{Sinorhizobium fredii USDA 257} \code{Sinorhizobium medicae WSM419} \code{Sinorhizobium meliloti AK83} \code{Sinorhizobium meliloti BL225C} \code{Sinorhizobium meliloti SM11} \code{Slackia heliotrinireducens DSM 20476} \code{Sodalis glossinidius morsitans, endosymbiont of Glossina spp.} \code{Solibacillus silvestris StLB046} \code{Sorangium cellulosum So ce 56} \code{Sphaerobacter thermophilus DSM 20745} \code{Sphaerochaeta pleomorpha Grapes} \code{Sphingobacterium sp. 21} \code{Sphingobium chlorophenolicum L-1} \code{Sphingobium japonicum UT26S} \code{Sphingobium sp. SYK-6} \code{Sphingomonas wittichii RW1} \code{Sphingopyxis alaskensis RB2256} \code{Spirochaeta africana DSM 8902} \code{Spirochaeta caldaria DSM 7334} \code{Spirochaeta coccoides DSM 17374} \code{Spirochaeta smaragdinae DSM 11293} \code{Spirochaeta sp. Buddy} \code{Spirochaeta thermophila DSM 6192} \code{Spirochaeta thermophila DSM 6578} \code{Spirosoma linguale DSM 74} \code{Stackebrandtia nassauensis DSM 44728} \code{Staphylococcus aureus RF122/ET3-1, bovine mastitis-causing} \code{Staphylococcus aureus subsp. aureus 11819-97} \code{Staphylococcus aureus subsp. aureus 71193} \code{Staphylococcus aureus subsp. aureus COL, methicillin-resistant} \code{Staphylococcus aureus subsp. aureus ECT-R 2} \code{Staphylococcus aureus subsp. aureus ED133} \code{Staphylococcus aureus subsp. aureus ED98} \code{Staphylococcus aureus subsp. aureus HO 5096 0412} \code{Staphylococcus aureus subsp. aureus JH9, meticillin-resistant,} \code{Staphylococcus aureus subsp. aureus JKD6008} \code{Staphylococcus aureus subsp. aureus JKD6159} \code{Staphylococcus aureus subsp. aureus M013} \code{Staphylococcus aureus subsp. aureus MRSA252, methicillin-resistant} \code{Staphylococcus aureus subsp. aureus MSHR1132} \code{Staphylococcus aureus subsp. aureus MSSA476, methicillin-} \code{Staphylococcus aureus subsp. aureus Mu3, MRSA strain with} \code{Staphylococcus aureus subsp. aureus Mu50, MRSA strain with} \code{Staphylococcus aureus subsp. aureus MW2, community-acquired MRSA} \code{Staphylococcus aureus subsp. aureus N315, hospital-acquired} \code{Staphylococcus aureus subsp. aureus Newman} \code{Staphylococcus aureus subsp. aureus T0131} \code{Staphylococcus aureus subsp. aureus TCH60} \code{Staphylococcus aureus subsp. aureus TW20} \code{Staphylococcus aureus subsp. aureus USA300_FPR3757, community-} \code{Staphylococcus aureus subsp. aureus USA300_TCH1516, community-} \code{Staphylococcus aureus subsp. aureus VC40} \code{Staphylococcus carnosus subsp. carnosus TM300} \code{Staphylococcus epidermidis ATCC 12228} \code{Staphylococcus epidermidis RP62A} \code{Staphylococcus lugdunensis HKU09-01} \code{Staphylococcus lugdunensis N920143} \code{Staphylococcus pseudintermedius ED99} \code{Staphylococcus pseudintermedius HKU10-03} \code{Staphylococcus saprophyticus subsp. saprophyticus ATCC 15305} \code{Staphylothermus hellenicus DSM 12710} \code{Staphylothermus marinus F1} \code{Starkeya novella DSM 506} \code{Stenotrophomonas maltophilia D457} \code{Stenotrophomonas maltophilia JV3} \code{Stenotrophomonas maltophilia K279a} \code{Stenotrophomonas maltophilia R551-3} \code{Stigmatella aurantiaca DW4/3-1} \code{Streptobacillus moniliformis DSM 12112} \code{Streptococcus agalactiae 2603 (serotype V)} \code{Streptococcus agalactiae A909 (serotype Ia)} \code{Streptococcus agalactiae NEM316 (serotype III)} \code{Streptococcus dysgalactiae subsp. equisimilis ATCC 12394} \code{Streptococcus dysgalactiae subsp. equisimilis GGS_124} \code{Streptococcus equi subsp. equi 4047} \code{Streptococcus equi subsp. zooepidemicus H70} \code{Streptococcus equi subsp. zooepidemicus MGCS10565} \code{Streptococcus gallolyticus subsp. gallolyticus ATCC 43143} \code{Streptococcus gallolyticus subsp. gallolyticus ATCC BAA-2069} \code{Streptococcus gallolyticus UCN34} \code{Streptococcus gordonii Challis} \code{Streptococcus infantarius subsp. infantarius CJ18} \code{Streptococcus intermedius JTH08} \code{Streptococcus macedonicus ACA-DC 198} \code{Streptococcus mitis B6} \code{Streptococcus mutans GS-5} \code{Streptococcus mutans NN2025 (serotype c)} \code{Streptococcus mutans UA159 (serotype C)} \code{Streptococcus oralis Uo5} \code{Streptococcus parasanguinis ATCC 15912} \code{Streptococcus parasanguinis FW213} \code{Streptococcus parauberis KCTC 11537} \code{Streptococcus pasteurianus ATCC 43144} \code{Streptococcus pneumoniae 670-6B} \code{Streptococcus pneumoniae 70585} \code{Streptococcus pneumoniae AP200} \code{Streptococcus pneumoniae ATCC 700669 (serotype 23F ST81 lineage)} \code{Streptococcus pneumoniae CGSP14 (serotype 14)} \code{Streptococcus pneumoniae D39 (virulent serotype 2)} \code{Streptococcus pneumoniae G54 (serotype 19F)} \code{Streptococcus pneumoniae Hungary19A-6} \code{Streptococcus pneumoniae INV104} \code{Streptococcus pneumoniae INV200} \code{Streptococcus pneumoniae JJA} \code{Streptococcus pneumoniae OXC141} \code{Streptococcus pneumoniae P1031} \code{Streptococcus pneumoniae R6 (avirulent, laboratory-adapted D39} \code{Streptococcus pneumoniae Taiwan19F-14} \code{Streptococcus pneumoniae TCH8431/19A} \code{Streptococcus pneumoniae TIGR4 (virulent serotype 4)} \code{Streptococcus pseudopneumoniae IS7493} \code{Streptococcus pyogenes Alab49 (serotype M53)} \code{Streptococcus pyogenes Manfredo (serotype M5)} \code{Streptococcus pyogenes MGAS10394 (serotype M6)} \code{Streptococcus pyogenes MGAS15252 (serotype M59)} \code{Streptococcus pyogenes MGAS1882} \code{Streptococcus pyogenes MGAS315 (serotype M3)} \code{Streptococcus pyogenes MGAS8232 (serotype M18)} \code{Streptococcus pyogenes NZ131 (serotype M49)} \code{Streptococcus pyogenes SF370 (serotype M1)} \code{Streptococcus pyogenes SSI-1 (serotype M3)} \code{Streptococcus salivarius CCHSS3} \code{Streptococcus suis 05ZYH33} \code{Streptococcus suis 98HAH33} \code{Streptococcus suis A7} \code{Streptococcus suis BM407} \code{Streptococcus suis D12} \code{Streptococcus suis D9} \code{Streptococcus suis GZ1} \code{Streptococcus suis JS14} \code{Streptococcus suis P1/7} \code{Streptococcus suis S735} \code{Streptococcus suis SC84} \code{Streptococcus suis SS12} \code{Streptococcus suis ST1} \code{Streptococcus suis ST3} \code{Streptococcus thermophilus CNRZ1066} \code{Streptococcus thermophilus JIM 8232} \code{Streptococcus thermophilus LMD-9} \code{Streptococcus thermophilus LMG18311} \code{Streptococcus thermophilus MN-ZLW-002} \code{Streptococcus thermophilus ND03} \code{Streptococcus uberis 0140J} \code{Streptomyces avermitilis MA-4680} \code{Streptomyces bingchenggensis BCW-1} \code{Streptomyces cattleya NRRL 8057 = DSM 46488} \code{Streptomyces coelicolor A3(2)} \code{Streptomyces flavogriseus ATCC 33331} \code{Streptomyces griseus subsp. griseus NBRC 13350} \code{Streptomyces hygroscopicus subsp. jinggangensis 5008} \code{Streptomyces scabiei 87.22} \code{Streptomyces sp. SirexAA-E} \code{Streptomyces violaceusniger Tu 4113} \code{Streptosporangium roseum DSM 43021} \code{Sulfobacillus acidophilus DSM 10332} \code{Sulfobacillus acidophilus TPY} \code{Sulfolobus acidocaldarius DSM 639} \code{Sulfolobus islandicus HVE10/4} \code{Sulfolobus islandicus L.S.2.15} \code{Sulfolobus islandicus M.14.25} \code{Sulfolobus islandicus M.16.27} \code{Sulfolobus islandicus M.16.4} \code{Sulfolobus islandicus REY15A} \code{Sulfolobus islandicus Y.G.57.14} \code{Sulfolobus islandicus Y.N.15.51} \code{Sulfolobus solfataricus 98/2} \code{Sulfolobus tokodaii strain7} \code{Sulfuricurvum kujiense DSM 16994} \code{Sulfurihydrogenibium azorense Az-Fu1} \code{Sulfurihydrogenibium sp. YO3AOP1} \code{Sulfurimonas autotrophica DSM 16294} \code{Sulfurimonas denitrificans DSM 1251} \code{Sulfurospirillum barnesii SES-3} \code{Sulfurospirillum deleyianum DSM 6946} \code{Sulfurovum sp. NBC37-1} \code{Symbiobacterium thermophilum IAM14863} \code{Synechococcus elongatus PCC6301} \code{Synechococcus elongatus PCC 7942} \code{Synechococcus sp. CC9311} \code{Synechococcus sp. CC9605} \code{Synechococcus sp. CC9902} \code{Synechococcus sp. JA-2-3B'a(2-13) (Cyanobacteria bacterium} \code{Synechococcus sp. JA-3-3Ab (Cyanobacteria bacterium Yellowstone A-} \code{Synechococcus sp. PCC 7002} \code{Synechococcus sp. RCC307} \code{Synechococcus sp. WH 7803} \code{Synechococcus sp. WH 8102} \code{Synechocystis sp. PCC 6803} \code{Synechocystis sp. PCC 6803 GT-I} \code{Synechocystis sp. PCC 6803 GT-S} \code{Synechocystis sp. PCC 6803 PCC-N} \code{Synechocystis sp. PCC 6803 PCC-P} \code{Syntrophobacter fumaroxidans MPOB} \code{Syntrophobotulus glycolicus DSM 8271} \code{Syntrophomonas wolfei subsp. wolfei str. Goettingen} \code{Syntrophothermus lipocalidus DSM 12680} \code{Syntrophus aciditrophicus SB} \code{Tannerella forsythia ATCC 43037} \code{Taylorella asinigenitalis MCE3} \code{Taylorella equigenitalis ATCC 35865} \code{Taylorella equigenitalis MCE9} \code{Tepidanaerobacter sp. Re1} \code{Teredinibacter turnerae T7901} \code{Terriglobus roseus DSM 18391} \code{Terriglobus saanensis SP1PR4} \code{Tetragenococcus halophilus NBRC 12172} \code{Thauera sp. MZ1T} \code{Thermaerobacter marianensis DSM 12885} \code{Thermanaerovibrio acidaminovorans DSM 6589} \code{Thermincola potens JR} \code{Thermoanaerobacter brockii subsp. finnii Ako-1} \code{Thermoanaerobacter italicus Ab9} \code{Thermoanaerobacterium saccharolyticum JW/SL-YS485} \code{Thermoanaerobacterium thermosaccharolyticum DSM 571} \code{Thermoanaerobacterium xylanolyticum LX-11} \code{Thermoanaerobacter mathranii subsp. mathranii A3} \code{Thermoanaerobacter pseudethanolicus ATCC 33223} \code{Thermoanaerobacter sp. X513} \code{Thermoanaerobacter sp. X514} \code{Thermoanaerobacter tengcongensis MB4(T)} \code{Thermoanaerobacter wiegelii Rt8.B1} \code{Thermobaculum terrenum ATCC BAA-798} \code{Thermobifida fusca YX} \code{Thermobispora bispora DSM 43833} \code{Thermococcus barophilus MP} \code{Thermococcus gammatolerans EJ3} \code{Thermococcus kodakaraensis KOD1} \code{Thermococcus onnurineus NA1} \code{Thermococcus sibiricus MM 739} \code{Thermococcus sp. 4557} \code{Thermococcus sp. CL1} \code{Thermocrinis albus DSM 14484} \code{Thermodesulfatator indicus DSM 15286} \code{Thermodesulfobacterium sp. OPB45} \code{Thermodesulfobium narugense DSM 14796} \code{Thermodesulfovibrio yellowstonii DSM 11347} \code{Thermofilum pendens Hrk 5} \code{Thermogladius cellulolyticus 1633} \code{Thermomicrobium roseum DSM 5159} \code{Thermomonospora curvata DSM 43183} \code{Thermoplasma acidophilum DSM 1728} \code{Thermoplasma volcanium GSS1} \code{Thermoproteus neutrophilus V24Sta} \code{Thermoproteus uzoniensis 768-20} \code{Thermosediminibacter oceani DSM 16646} \code{Thermosipho africanus TCF52B} \code{Thermosipho melanesiensis BI429} \code{Thermosphaera aggregans DSM 11486} \code{Thermosynechococcus elongatus BP-1} \code{Thermotoga lettingae TMO} \code{Thermotoga maritima MSB8} \code{Thermotoga naphthophila RKU-10} \code{Thermotoga petrophila RKU-1} \code{Thermotoga sp. RQ2} \code{Thermotoga thermarum DSM 5069} \code{Thermovibrio ammonificans HB-1} \code{Thermovirga lienii DSM 17291} \code{Thermus scotoductus SA-01} \code{Thermus sp. CCB_US3_UF1} \code{Thermus thermophilus HB8} \code{Thermus thermophilus JL-18} \code{Thermus thermophilus SG0.5JP17-16} \code{Thioalkalimicrobium cyclicum ALM1} \code{Thioalkalivibrio sp. HL-EbGR7} \code{Thioalkalivibrio sp. K90mix} \code{Thiobacillus denitrificans ATCC 25259} \code{Thiocystis violascens DSM 198} \code{Thiomicrospira crunogena XCL-2} \code{Thiomonas intermedia K12} \code{Tistrella mobilis KA081020-065} \code{Tolumonas auensis DSM 9187} \code{Treponema azotonutricium ZAS-9} \code{Treponema brennaborense DSM 12168} \code{Treponema denticola ATCC 35405} \code{Treponema pallidum subsp. pallidum Chicago} \code{Treponema pallidum subsp. pallidum DAL-1} \code{Treponema pallidum subsp. pallidum Nichols} \code{Treponema pallidum subsp. pallidum SS14} \code{Treponema pallidum subsp. pertenue CDC2} \code{Treponema pallidum subsp. pertenue Gauthier} \code{Treponema pallidum subsp. pertenue SamoaD} \code{Treponema paraluiscuniculi Cuniculi A} \code{Treponema primitia ZAS-2} \code{Treponema succinifaciens DSM 2489} \code{Trichodesmium erythraeum IMS101} \code{Tropheryma whipplei TW08/27} \code{Tropheryma whipplei Twist} \code{Truepera radiovictrix DSM 17093} \code{Tsukamurella paurometabola DSM 20162} \code{Turneriella parva DSM 21527} \code{Uncultured Termite group 1 bacterium phylotype Rs-D17} \code{Ureaplasma parvum serovar 3 ATCC 27815} \code{Ureaplasma parvum serovar 3 ATCC 700970} \code{Ureaplasma urealyticum serovar 10 ATCC 33699} \code{Variovorax paradoxus EPS} \code{Variovorax paradoxus S110} \code{Veillonella parvula DSM 2008} \code{Verminephrobacter eiseniae EF01-2} \code{Verrucosispora maris AB-18-032} \code{Vibrio anguillarum 775} \code{Vibrio cholerae El Tor N16961 (biovar eltor)} \code{Vibrio cholerae IEC224} \code{Vibrio cholerae LMA3984-4} \code{Vibrio cholerae M66-2} \code{Vibrio cholerae MJ-1236} \code{Vibrio cholerae O1 2010EL-1786} \code{Vibrio cholerae O395} \code{Vibrio fischeri ES114} \code{Vibrio fischeri MJ11} \code{Vibrio furnissii NCTC 11218} \code{Vibrio harveyi ATCC BAA-1116} \code{Vibrio parahaemolyticus RIMD 2210633} \code{Vibrio sp. EJY3} \code{Vibrio sp. Ex25} \code{Vibrio vulnificus CMCP6} \code{Vibrio vulnificus MO6-24/O} \code{Vibrio vulnificus YJ016 (biotype 1)} \code{Vulcanisaeta distributa DSM 14429} \code{Vulcanisaeta moutnovskia 768-28} \code{Waddlia chondrophila WSU 86-1044} \code{Weeksella virosa DSM 16922} \code{Weissella koreensis KACC 15510} \code{Wigglesworthia glossinidia endosymbiont of Glossina morsitans} \code{Wigglesworthia glossinidia (Wigglesworthia brevipalpis),} \code{Wolbachia pipientis, endosymbiont of Culex quinquefasciatus Pel} \code{Wolbachia sp. wMel, endosymbiont of Drosophila melanogaster (fruit} \code{Wolbachia sp. wRi, endosymbiont of Drosophila simulans} \code{Wolbachia wBm, endosymbiont of Brugia malayi TRS} \code{Wolinella succinogenes DSM 1740} \code{Xanthobacter autotrophicus Py2} \code{Xanthomonas axonopodis pv. citrumelo F1} \code{Xanthomonas campestris pv. campestris 8004 (Beijing)} \code{Xanthomonas campestris pv. campestris B100} \code{Xanthomonas campestris pv. raphani 756C} \code{Xanthomonas campestris pv. vesicatoria 85-10} \code{Xanthomonas oryzae pv. oryzae KACC10331} \code{Xanthomonas oryzae pv. oryzae MAFF 311018} \code{Xanthomonas oryzae pv. oryzae PXO99A} \code{Xanthomonas oryzae pv. oryzicola BLS256} \code{Xenorhabdus bovienii SS-2004} \code{Xenorhabdus nematophila ATCC 19061} \code{Xylanimonas cellulosilytica DSM 15894} \code{Xylella fastidiosa 9a5c} \code{Xylella fastidiosa M12} \code{Xylella fastidiosa M23} \code{Xylella fastidiosa subsp. fastidiosa GB514} \code{Xylella fastidiosa Temecula1} \code{Yersinia enterocolitica subsp. enterocolitica 8081} \code{Yersinia enterocolitica subsp. palearctica 105.5R(r)} \code{Yersinia enterocolitica subsp. palearctica Y11} \code{Yersinia pestis 91001 (biovar Microtus)} \code{Yersinia pestis A1122} \code{Yersinia pestis Angola (virulent Pestoides isolate)} \code{Yersinia pestis Antiqua (biovar Antiqua)} \code{Yersinia pestis biovar Medievalis Harbin 35} \code{Yersinia pestis CO92 (biovar Orientalis)} \code{Yersinia pestis D106004} \code{Yersinia pestis D182038} \code{Yersinia pestis KIM 10 (biovar Mediaevalis)} \code{Yersinia pestis Nepal516 (biovar Antiqua)} \code{Yersinia pestis Pestoides F} \code{Yersinia pestis Z176003} \code{Yersinia pseudotuberculosis IP31758 (serotype O:1b)} \code{Yersinia pseudotuberculosis IP32953 (serotype I)} \code{Yersinia pseudotuberculosis PB1/+} \code{Yersinia pseudotuberculosis YPIII} \code{Zobellia galactanivorans} \code{Zunongwangia profunda SM-A87} \code{Zymomonas mobilis subsp. mobilis ATCC 10988} \code{Zymomonas mobilis subsp. mobilis ATCC 29191} \code{Zymomonas mobilis subsp. mobilis NCIMB 11163} \code{Zymomonas mobilis subsp. mobilis ZM4} \code{Zymomonas mobilis subsp. pomaceae ATCC 29192}}
\item{\code{OrgCode}}{a factor with levels \code{aaa} \code{aac} \code{aad} \code{aae} \code{aai} \code{aan} \code{aao} \code{aap} \code{aar} \code{aas} \code{aat} \code{aau} \code{aav} \code{aba} \code{abb} \code{abc} \code{abd} \code{abi} \code{abn} \code{abo} \code{abr} \code{abt} \code{abu} \code{abx} \code{abz} \code{aca} \code{acb} \code{acc} \code{acd} \code{ace} \code{ach} \code{acl} \code{acm} \code{acn} \code{aco} \code{acp} \code{acr} \code{acu} \code{ade} \code{adg} \code{adk} \code{adn} \code{aeh} \code{aex} \code{afd} \code{afe} \code{afi} \code{afl} \code{afn} \code{afo} \code{afr} \code{afu} \code{afw} \code{agr} \code{aha} \code{ahe} \code{aho} \code{ain} \code{ajs} \code{aka} \code{ali} \code{alt} \code{alv} \code{ama} \code{amc} \code{amd} \code{amf} \code{ami} \code{amm} \code{amn} \code{amo} \code{amr} \code{ams} \code{amt} \code{amu} \code{amv} \code{ana} \code{ank} \code{ant} \code{aoe} \code{apa} \code{apb} \code{ape} \code{aph} \code{apj} \code{apl} \code{apn} \code{apo} \code{app} \code{apr} \code{aps} \code{apv} \code{apw} \code{arc} \code{arr} \code{art} \code{asa} \code{asb} \code{asc} \code{asd} \code{ase} \code{asf} \code{asl} \code{asm} \code{asu} \code{ate} \code{atm} \code{atu} \code{aur} \code{ava} \code{ave} \code{avi} \code{avn} \code{avr} \code{awo} \code{axy} \code{ayw} \code{azc} \code{azl} \code{azo} \code{baa} \code{bab} \code{bac} \code{bad} \code{bae} \code{baf} \code{bafz} \code{bag} \code{bah} \code{bai} \code{baj} \code{bak} \code{bal} \code{bam} \code{ban} \code{bao} \code{bap} \code{baq} \code{bar} \code{bas} \code{bat} \code{bau} \code{bav} \code{baw} \code{bax} \code{bay} \code{baz} \code{bba} \code{bbb} \code{bbc} \code{bbd} \code{bbe} \code{bbf} \code{bbg} \code{bbi} \code{bbk} \code{bbl} \code{bbp} \code{bbr} \code{bbs} \code{bbt} \code{bbu} \code{bbv} \code{bbz} \code{bca} \code{bcb} \code{bcc} \code{bce} \code{bcg} \code{bch} \code{bci} \code{bcj} \code{bck} \code{bcm} \code{bcn} \code{bco} \code{bcp} \code{bcq} \code{bcr} \code{bcs} \code{bct} \code{bcu} \code{bcv} \code{bcw} \code{bcx} \code{bcy} \code{bcz} \code{bde} \code{bdu} \code{bfa} \code{bfg} \code{bfl} \code{bfr} \code{bfs} \code{bga} \code{bgb} \code{bgd} \code{bge} \code{bgf} \code{bgl} \code{bgr} \code{bha} \code{bhe} \code{bhl} \code{bhr} \code{bhy} \code{bid} \code{bip} \code{bja} \code{bjs} \code{bju} \code{bla} \code{blb} \code{blc} \code{bld} \code{blf} \code{bli} \code{blj} \code{blk} \code{bll} \code{blm} \code{bln} \code{blo} \code{blon} \code{blt} \code{blv} \code{bma} \code{bmb} \code{bmc} \code{bmd} \code{bme} \code{bmf} \code{bmg} \code{bmh} \code{bmi} \code{bmj} \code{bml} \code{bmm} \code{bmn} \code{bmq} \code{bmr} \code{bms} \code{bmt} \code{bmu} \code{bmv} \code{bmw} \code{bmx} \code{bmz} \code{bnc} \code{bni} \code{bnm} \code{bov} \code{bpa} \code{bpb} \code{bpc} \code{bpd} \code{bpe} \code{bpf} \code{bph} \code{bpi} \code{bpj} \code{bpl} \code{bpm} \code{bpn} \code{bpo} \code{bpp} \code{bpr} \code{bps} \code{bpt} \code{bpu} \code{bpy} \code{bpz} \code{bql} \code{bqu} \code{bqy} \code{bre} \code{brh} \code{brm} \code{brs} \code{bsa} \code{bsb} \code{bse} \code{bsi} \code{bsk} \code{bsn} \code{bsr} \code{bss} \code{bst} \code{bsv} \code{btb} \code{btc} \code{bte} \code{btf} \code{bth} \code{btk} \code{btl} \code{btr} \code{bts} \code{btu} \code{bua} \code{buc} \code{bug} \code{buh} \code{buj} \code{buk} \code{bup} \code{bur} \code{bva} \code{bvi} \code{bvu} \code{bwe} \code{bxe} \code{bxh} \code{bya} \code{byi} \code{caa} \code{cab} \code{cag} \code{cai} \code{cak} \code{cao} \code{cap} \code{car} \code{cat} \code{cau} \code{cay} \code{cba} \code{cbb} \code{cbc} \code{cbd} \code{cbe} \code{cbf} \code{cbg} \code{cbh} \code{cbi} \code{cbk} \code{cbl} \code{cbm} \code{cbn} \code{cbo} \code{cbs} \code{cbt} \code{cbu} \code{cby} \code{cca} \code{ccb} \code{cce} \code{cch} \code{ccl} \code{cco} \code{ccr} \code{ccs} \code{ccu} \code{ccv} \code{ccx} \code{cda} \code{cdb} \code{cdc} \code{cdd} \code{cde} \code{cdf} \code{cdg} \code{cdh} \code{cdi} \code{cdl} \code{cdp} \code{cdr} \code{cds} \code{cdt} \code{cdv} \code{cdw} \code{cdz} \code{cef} \code{ces} \code{cex} \code{cfe} \code{cff} \code{cfi} \code{cfl} \code{cfs} \code{cfu} \code{cfw} \code{cga} \code{cgb} \code{cgl} \code{cgo} \code{cgt} \code{cha} \code{chb} \code{chc} \code{chd} \code{chi} \code{chl} \code{chp} \code{chs} \code{cht} \code{chu} \code{chy} \code{cja} \code{cjd} \code{cje} \code{cji} \code{cjj} \code{cjk} \code{cjm} \code{cjn} \code{cjr} \code{cjs} \code{cju} \code{cki} \code{ckl} \code{ckn} \code{cko} \code{ckp} \code{ckr} \code{cla} \code{clb} \code{clc} \code{cle} \code{cli} \code{clj} \code{clo} \code{clp} \code{cls} \code{cly} \code{cma} \code{cmi} \code{cmr} \code{cms} \code{cmu} \code{cnc} \code{cni} \code{cno} \code{cob} \code{coc} \code{cod} \code{coe} \code{coi} \code{cop} \code{cor} \code{cos} \code{cou} \code{cow} \code{cpa} \code{cpb} \code{cpc} \code{cpe} \code{cpf} \code{cpg} \code{cph} \code{cpi} \code{cpj} \code{cpk} \code{cpl} \code{cpm} \code{cpn} \code{cpo} \code{cpp} \code{cpq} \code{cpr} \code{cps} \code{cpt} \code{cpu} \code{cpx} \code{cpy} \code{cpz} \code{cra} \code{crc} \code{crd} \code{crh} \code{crn} \code{cro} \code{crp} \code{crt} \code{cru} \code{crv} \code{csa} \code{csc} \code{cse} \code{csh} \code{csk} \code{csy} \code{cta} \code{ctb} \code{ctc} \code{ctd} \code{cte} \code{ctf} \code{ctg} \code{cth} \code{ctj} \code{ctk} \code{ctl} \code{ctm} \code{ctn} \code{cto} \code{ctq} \code{ctr} \code{cts} \code{ctt} \code{ctu} \code{ctv} \code{ctw} \code{ctx} \code{cty} \code{ctz} \code{cuc} \code{cue} \code{cul} \code{cur} \code{cva} \code{cvi} \code{cwo} \code{cya} \code{cyb} \code{cyc} \code{cyh} \code{cyj} \code{cyn} \code{cyp} \code{cyt} \code{cyu} \code{dac} \code{dae} \code{daf} \code{dai} \code{dak} \code{dal} \code{dao} \code{dap} \code{dar} \code{das} \code{dat} \code{dau} \code{dba} \code{dbr} \code{dca} \code{dda} \code{ddc} \code{ddd} \code{dde} \code{ddf} \code{ddh} \code{ddn} \code{ddr} \code{dds} \code{deb} \code{deg} \code{deh} \code{del} \code{det} \code{dev} \code{dfd} \code{dfe} \code{dge} \code{dgo} \code{dhd} \code{dia} \code{din} \code{dka} \code{dku} \code{dly} \code{dma} \code{dmi} \code{dmr} \code{dmu} \code{dno} \code{dol} \code{dor} \code{dpr} \code{dps} \code{dpt} \code{dra} \code{drm} \code{drt} \code{dru} \code{dsa} \code{dsh} \code{dsu} \code{dsy} \code{dte} \code{dth} \code{dti} \code{dtu} \code{dvg} \code{dvl} \code{dvm} \code{dvu} \code{dze} \code{eab} \code{eae} \code{eam} \code{eas} \code{eat} \code{eay} \code{eba} \code{ebd} \code{ebe} \code{ebi} \code{ebr} \code{ebt} \code{eca} \code{ecc} \code{ecd} \code{ece} \code{ecf} \code{ecg} \code{ech} \code{eci} \code{ecj} \code{ecl} \code{ecm} \code{ecn} \code{ecp} \code{ecs} \code{ecv} \code{ecw} \code{ecx} \code{ecy} \code{edh} \code{edj} \code{eec} \code{eel} \code{efa} \code{efc} \code{efd} \code{efi} \code{efu} \code{eha} \code{ehr} \code{eic} \code{eih} \code{ekf} \code{eko} \code{elc} \code{eld} \code{ele} \code{elh} \code{eli} \code{ell} \code{elm} \code{eln} \code{elo} \code{elp} \code{elr} \code{elu} \code{elx} \code{emi} \code{ena} \code{enc} \code{enl} \code{ent} \code{eoc} \code{eoh} \code{eoi} \code{eoj} \code{eok} \code{epr} \code{epy} \code{ere} \code{erh} \code{erj} \code{eru} \code{esa} \code{esc} \code{esi} \code{eta} \code{etd} \code{etr} \code{etw} \code{eun} \code{eyy} \code{faa} \code{fau} \code{fba} \code{fbc} \code{fbl} \code{fbr} \code{fcf} \code{fco} \code{ffo} \code{fin} \code{fjo} \code{fli} \code{fma} \code{fna} \code{fno} \code{fnu} \code{fpe} \code{fph} \code{fpl} \code{fps} \code{fra} \code{fre} \code{fri} \code{frt} \code{fsc} \code{fsi} \code{fsu} \code{fsy} \code{fta} \code{fte} \code{ftf} \code{ftg} \code{fth} \code{ftl} \code{ftm} \code{ftn} \code{ftr} \code{ftt} \code{ftu} \code{ftw} \code{gag} \code{gan} \code{gau} \code{gbe} \code{gbm} \code{gbr} \code{gca} \code{gct} \code{gdi} \code{gdj} \code{geb} \code{gem} \code{geo} \code{gfo} \code{gka} \code{glo} \code{gma} \code{gmc} \code{gme} \code{gni} \code{gob} \code{gor} \code{gox} \code{gpb} \code{gpo} \code{gsk} \code{gsu} \code{gte} \code{gth} \code{gtn} \code{gur} \code{gva} \code{gvg} \code{gvh} \code{gvi} \code{gwc} \code{gxy} \code{gya} \code{gyc} \code{gym} \code{hac} \code{hah} \code{hal} \code{hap} \code{has} \code{hau} \code{hba} \code{hbi} \code{hbo} \code{hbu} \code{hca} \code{hce} \code{hch} \code{hci} \code{hcm} \code{hcn} \code{hcp} \code{hde} \code{hdn} \code{hdu} \code{hef} \code{heg} \code{hel} \code{hen} \code{hep} \code{heq} \code{hes} \code{heu} \code{hex} \code{hey} \code{hfe} \code{hha} \code{hhd} \code{hhe} \code{hhi} \code{hhp} \code{hhq} \code{hhr} \code{hhy} \code{hif} \code{hil} \code{hin} \code{hip} \code{hiq} \code{hit} \code{hiu} \code{hiz} \code{hje} \code{hla} \code{hma} \code{hme} \code{hmo} \code{hmr} \code{hms} \code{hmu} \code{hna} \code{hne} \code{hoh} \code{hor} \code{hpc} \code{hpe} \code{hpf} \code{hpg} \code{hph} \code{hpi} \code{hpj} \code{hpk} \code{hpl} \code{hpm} \code{hpn} \code{hpo} \code{hpp} \code{hpq} \code{hpr} \code{hps} \code{hpt} \code{hpu} \code{hpv} \code{hpw} \code{hpx} \code{hpy} \code{hpz} \code{hse} \code{hsl} \code{hsm} \code{hso} \code{hte} \code{hth} \code{htu} \code{hut} \code{hvo} \code{hwc} \code{hxa} \code{hya} \code{iag} \code{ial} \code{ica} \code{iho} \code{ilo} \code{ipa} \code{ipo} \code{iva} \code{jan} \code{jde} \code{kcr} \code{kdi} \code{kfl} \code{kko} \code{kol} \code{kox} \code{kpe} \code{kpm} \code{kpo} \code{kpu} \code{kra} \code{krh} \code{kse} \code{ksk} \code{kva} \code{kvl} \code{kvu} \code{lac} \code{lai} \code{lam} \code{lan} \code{las} \code{lay} \code{lba} \code{lbf} \code{lbh} \code{lbi} \code{lbj} \code{lbl} \code{lbr} \code{lbu} \code{lby} \code{lca} \code{lcb} \code{lce} \code{lch} \code{lci} \code{lcr} \code{lcs} \code{lcz} \code{ldb} \code{ldl} \code{lec} \code{lfc} \code{lfe} \code{lfr} \code{lga} \code{lgr} \code{lgs} \code{lgv} \code{lhe} \code{lhk} \code{lhl} \code{lhr} \code{lic} \code{lie} \code{lil} \code{lin} \code{lip} \code{liv} \code{ljf} \code{ljh} \code{ljo} \code{lke} \code{lki} \code{lla} \code{llc} \code{llk} \code{llm} \code{lln} \code{llt} \code{lmc} \code{lme} \code{lmf} \code{lmg} \code{lmh} \code{lmj} \code{lml} \code{lmm} \code{lmn} \code{lmo} \code{lmoa} \code{lmoc} \code{lmon} \code{lmoo} \code{lmos} \code{lmot} \code{lmoy} \code{lmp} \code{lmq} \code{lms} \code{lmt} \code{lmw} \code{lmx} \code{lmy} \code{lmz} \code{lpa} \code{lpc} \code{lpe} \code{lpf} \code{lpj} \code{lpl} \code{lpn} \code{lpp} \code{lps} \code{lra} \code{lre} \code{lrf} \code{lrg} \code{lrh} \code{lrl} \code{lrm} \code{lru} \code{lsa} \code{lsg} \code{lsi} \code{lsl} \code{lsn} \code{lso} \code{lsp} \code{lwe} \code{lxx} \code{maa} \code{mab} \code{mac} \code{mad} \code{mae} \code{maf} \code{mag} \code{mai} \code{mal} \code{maq} \code{mar} \code{mas} \code{mat} \code{mau} \code{mav} \code{mba} \code{mbb} \code{mbg} \code{mbh} \code{mbi} \code{mbm} \code{mbn} \code{mbt} \code{mbu} \code{mbv} \code{mca} \code{mcb} \code{mcd} \code{mce} \code{mch} \code{mci} \code{mcj} \code{mcl} \code{mcn} \code{mco} \code{mcp} \code{mct} \code{mcu} \code{mec} \code{meh} \code{mei} \code{mej} \code{mel} \code{mem} \code{men} \code{mep} \code{mes} \code{met} \code{mev} \code{mew} \code{mex} \code{mez} \code{mfa} \code{mfe} \code{mfl} \code{mfm} \code{mfr} \code{mfs} \code{mfu} \code{mfv} \code{mga} \code{mgc} \code{mge} \code{mgh} \code{mgi} \code{mgm} \code{mgn} \code{mgq} \code{mgs} \code{mgt} \code{mgu} \code{mgv} \code{mgw} \code{mgx} \code{mha} \code{mhd} \code{mhe} \code{mhf} \code{mhh} \code{mhi} \code{mhj} \code{mhm} \code{mhn} \code{mho} \code{mhp} \code{mhr} \code{mhu} \code{mhy} \code{mia} \code{mif} \code{mig} \code{mil} \code{min} \code{mir} \code{mit} \code{mja} \code{mjd} \code{mjl} \code{mka} \code{mkm} \code{mla} \code{mlb} \code{mlc} \code{mle} \code{mlh} \code{mlo} \code{mlu} \code{mma} \code{mmar} \code{mmb} \code{mmc} \code{mmd} \code{mme} \code{mmg} \code{mmh} \code{mmi} \code{mml} \code{mmm} \code{mmn} \code{mmo} \code{mmp} \code{mmq} \code{mmr} \code{mms} \code{mmt} \code{mmv} \code{mmw} \code{mmx} \code{mmy} \code{mmz} \code{mno} \code{mok} \code{mop} \code{mox} \code{mpa} \code{mpc} \code{mpd} \code{mpf} \code{mpg} \code{mph} \code{mpi} \code{mpj} \code{mpl} \code{mpm} \code{mpn} \code{mpo} \code{mps} \code{mpt} \code{mpu} \code{mpx} \code{mpz} \code{mra} \code{mrb} \code{mrd} \code{mrs} \code{mru} \code{mse} \code{msg} \code{msi} \code{msk} \code{msl} \code{msm} \code{msp} \code{mss} \code{mst} \code{msu} \code{msv} \code{msy} \code{mta} \code{mtb} \code{mtc} \code{mte} \code{mtf} \code{mtg} \code{mth} \code{mti} \code{mtk} \code{mtl} \code{mto} \code{mtp} \code{mts} \code{mtt} \code{mtv} \code{mtz} \code{mul} \code{mva} \code{mvn} \code{mvo} \code{mvu} \code{mwe} \code{mxa} \code{mzh} \code{nal} \code{nam} \code{nar} \code{nat} \code{naz} \code{nca} \code{nda} \code{nde} \code{neq} \code{net} \code{neu} \code{nfa} \code{ngk} \code{ngo} \code{ngt} \code{nha} \code{nhl} \code{nii} \code{nis} \code{nit} \code{nko} \code{nla} \code{nma} \code{nmc} \code{nmd} \code{nme} \code{nmg} \code{nmh} \code{nmi} \code{nml} \code{nmm} \code{nmn} \code{nmp} \code{nmq} \code{nmr} \code{nms} \code{nmu} \code{nmw} \code{nmz} \code{noc} \code{nph} \code{npp} \code{npu} \code{nri} \code{nsa} \code{nse} \code{nth} \code{nwa} \code{nwi} \code{oan} \code{oca} \code{oce} \code{ocg} \code{oco} \code{oho} \code{oih} \code{ols} \code{ooe} \code{opr} \code{orh} \code{osp} \code{ote} \code{ots} \code{ott} \code{ova} \code{paa} \code{pab} \code{pad} \code{pae} \code{paf} \code{pag} \code{pah} \code{pai} \code{paj} \code{pak} \code{pal} \code{pam} \code{pao} \code{pap} \code{paq} \code{par} \code{pas} \code{pat} \code{pau} \code{pav} \code{paw} \code{pax} \code{pay} \code{paz} \code{pba} \code{pbr} \code{pbs} \code{pca} \code{pce} \code{pcl} \code{pcn} \code{pcr} \code{pct} \code{pcu} \code{pde} \code{pdi} \code{pdk} \code{pdn} \code{pdx} \code{pel} \code{pfc} \code{pfe} \code{pfi} \code{pfl} \code{pfm} \code{pfo} \code{pfr} \code{pfs} \code{pfu} \code{pfv} \code{pga} \code{pgi} \code{pgl} \code{pgn} \code{pgt} \code{pgv} \code{pha} \code{phe} \code{phm} \code{pho} \code{pin} \code{pis} \code{pit} \code{pjd} \code{pla} \code{ple} \code{plf} \code{plm} \code{plt} \code{plu} \code{pma} \code{pmb} \code{pmc} \code{pme} \code{pmf} \code{pmg} \code{pmh} \code{pmi} \code{pmj} \code{pmk} \code{pml} \code{pmm} \code{pmn} \code{pmo} \code{pmp} \code{pmq} \code{pmr} \code{pms} \code{pmt} \code{pmu} \code{pmv} \code{pmw} \code{pmx} \code{pmy} \code{pmz} \code{pna} \code{pnc} \code{pne} \code{pnu} \code{pol} \code{poy} \code{ppb} \code{ppc} \code{ppd} \code{ppe} \code{ppf} \code{ppg} \code{pph} \code{ppi} \code{ppm} \code{ppn} \code{ppo} \code{ppr} \code{ppt} \code{ppu} \code{ppw} \code{ppy} \code{pru} \code{prw} \code{psa} \code{psb} \code{psc} \code{psd} \code{pse} \code{psf} \code{psi} \code{psj} \code{psl} \code{psm} \code{psn} \code{psp} \code{psr} \code{pst} \code{psu} \code{psz} \code{pta} \code{pth} \code{pto} \code{pub} \code{pul} \code{put} \code{puv} \code{pva} \code{pvi} \code{pwa} \code{pya} \code{pyn} \code{pyr} \code{pys} \code{pzu} \code{raa} \code{raf} \code{rah} \code{rai} \code{rak} \code{ral} \code{ram} \code{ran} \code{raq} \code{rar} \code{rau} \code{rba} \code{rbe} \code{rbi} \code{rbo} \code{rca} \code{rcc} \code{rce} \code{rci} \code{rcm} \code{rco} \code{rcp} \code{rde} \code{rdn} \code{reh} \code{req} \code{rer} \code{reu} \code{rfe} \code{rfr} \code{rge} \code{rha} \code{rhe} \code{rhi} \code{rho} \code{rip} \code{rja} \code{rle} \code{rlg} \code{rli} \code{rlt} \code{rma} \code{rme} \code{rmg} \code{rmi} \code{rmo} \code{rmr} \code{rms} \code{rmu} \code{rop} \code{rpa} \code{rpb} \code{rpc} \code{rpd} \code{rpe} \code{rpf} \code{rpg} \code{rph} \code{rpi} \code{rpk} \code{rpm} \code{rpo} \code{rpp} \code{rpq} \code{rpr} \code{rps} \code{rpt} \code{rpv} \code{rpw} \code{rpx} \code{rpz} \code{rra} \code{rrb} \code{rrc} \code{rre} \code{rrf} \code{rrh} \code{rri} \code{rrj} \code{rrn} \code{rrp} \code{rrs} \code{rru} \code{rsa} \code{rsd} \code{rsh} \code{rsi} \code{rsn} \code{rso} \code{rsp} \code{rsq} \code{rsv} \code{rsw} \code{rta} \code{rtb} \code{rtt} \code{rty} \code{rva} \code{rxy} \code{saa} \code{sab} \code{sac} \code{sad} \code{sae} \code{saf} \code{sag} \code{sai} \code{saj} \code{sak} \code{sal} \code{sam} \code{san} \code{sap} \code{saq} \code{sar} \code{sas} \code{sat} \code{sau} \code{sav} \code{saw} \code{sax} \code{say} \code{saz} \code{sba} \code{sbb} \code{sbc} \code{sbg} \code{sbh} \code{sbl} \code{sbm} \code{sbn} \code{sbo} \code{sbp} \code{sbs} \code{sbt} \code{sbu} \code{sca} \code{scb} \code{scc} \code{scd} \code{scf} \code{sch} \code{scl} \code{sco} \code{scp} \code{scy} \code{sde} \code{sdg} \code{sdl} \code{sdn} \code{sds} \code{sdt} \code{sdy} \code{sea} \code{seb} \code{sec} \code{sect} \code{sed} \code{see} \code{sef} \code{seg} \code{seh} \code{sei} \code{sej} \code{sek} \code{sem} \code{sen} \code{seo} \code{sep} \code{seq} \code{ser} \code{ses} \code{set} \code{seu} \code{sev} \code{sew} \code{sex} \code{sey} \code{sez} \code{sfa} \code{sfc} \code{sfd} \code{sfe} \code{sfh} \code{sfl} \code{sfr} \code{sfu} \code{sfv} \code{sfx} \code{sga} \code{sgg} \code{sgl} \code{sgn} \code{sgo} \code{sgp} \code{sgr} \code{sgt} \code{sgy} \code{shb} \code{shc} \code{she} \code{shg} \code{shi} \code{shl} \code{shm} \code{shn} \code{shp} \code{shw} \code{shy} \code{sia} \code{sid} \code{sie} \code{sif} \code{sih} \code{sil} \code{sim} \code{sin} \code{sir} \code{sis} \code{sit} \code{siv} \code{siy} \code{sjj} \code{sjp} \code{ske} \code{sku} \code{slg} \code{sli} \code{sln} \code{slo} \code{slp} \code{slt} \code{sma} \code{smb} \code{smc} \code{smd} \code{smf} \code{smh} \code{smk} \code{sml} \code{smn} \code{smq} \code{smr} \code{smt} \code{smu} \code{smut} \code{smx} \code{smz} \code{sna} \code{snb} \code{snc} \code{sne} \code{sng} \code{sni} \code{snm} \code{sno} \code{snp} \code{snt} \code{snv} \code{snx} \code{sol} \code{son} \code{sor} \code{soz} \code{spa} \code{spc} \code{spd} \code{spe} \code{spf} \code{spg} \code{spl} \code{spm} \code{spn} \code{spp} \code{spq} \code{spr} \code{sps} \code{spt} \code{spv} \code{spw} \code{spx} \code{spy} \code{sra} \code{sri} \code{srm} \code{sro} \code{srp} \code{srr} \code{srs} \code{srt} \code{sru} \code{ssb} \code{ssd} \code{sse} \code{ssf} \code{ssg} \code{ssi} \code{ssj} \code{ssk} \code{ssm} \code{ssn} \code{ssp} \code{ssq} \code{ssr} \code{sss} \code{sst} \code{ssu} \code{ssv} \code{ssw} \code{ssx} \code{ssy} \code{ssz} \code{sta} \code{stb} \code{stc} \code{std} \code{ste} \code{stg} \code{sth} \code{sti} \code{stk} \code{stl} \code{stm} \code{stn} \code{sto} \code{stp} \code{stq} \code{str} \code{stt} \code{stu} \code{stw} \code{stx} \code{stz} \code{sua} \code{sub} \code{suc} \code{sud} \code{sue} \code{suh} \code{sui} \code{suj} \code{suk} \code{sul} \code{sun} \code{suo} \code{sup} \code{suq} \code{sur} \code{sus} \code{sut} \code{suu} \code{suv} \code{suw} \code{sux} \code{suz} \code{svi} \code{svl} \code{svo} \code{swd} \code{swi} \code{swo} \code{swp} \code{syc} \code{syd} \code{sye} \code{syf} \code{syg} \code{syn} \code{syp} \code{syq} \code{syr} \code{sys} \code{syt} \code{syw} \code{syx} \code{syy} \code{tac} \code{taf} \code{tag} \code{tai} \code{tal} \code{tam} \code{tas} \code{tau} \code{taz} \code{tba} \code{tbd} \code{tbe} \code{tbi} \code{tbo} \code{tcu} \code{tcx} \code{tcy} \code{tde} \code{tdn} \code{tea} \code{tel} \code{tep} \code{teq} \code{ter} \code{tex} \code{tfo} \code{tfu} \code{tga} \code{tgr} \code{thc} \code{the} \code{thg} \code{thl} \code{thm} \code{thx} \code{tid} \code{tin} \code{tit} \code{tjr} \code{tkm} \code{tko} \code{tle} \code{tli} \code{tma} \code{tme} \code{tmo} \code{tmr} \code{tmt} \code{tmz} \code{tne} \code{tnp} \code{tnr} \code{toc} \code{ton} \code{top} \code{tpa} \code{tpc} \code{tpd} \code{tpe} \code{tpg} \code{tph} \code{tpi} \code{tpl} \code{tpm} \code{tpn} \code{tpp} \code{tpq} \code{tpr} \code{tpt} \code{tpu} \code{tpx} \code{tra} \code{tro} \code{trq} \code{trs} \code{tsa} \code{tsc} \code{tsh} \code{tsi} \code{tsu} \code{tta} \code{tte} \code{ttj} \code{ttl} \code{ttm} \code{ttr} \code{tts} \code{ttu} \code{tuz} \code{tvi} \code{tvo} \code{twh} \code{twi} \code{tws} \code{txy} \code{tye} \code{upa} \code{uue} \code{uur} \code{van} \code{vap} \code{vce} \code{vch} \code{vci} \code{vcj} \code{vcl} \code{vcm} \code{vco} \code{vcr} \code{vdi} \code{vei} \code{vej} \code{vex} \code{vfi} \code{vfm} \code{vfu} \code{vha} \code{vma} \code{vmo} \code{vok} \code{vpa} \code{vpe} \code{vpr} \code{vsa} \code{vvm} \code{vvu} \code{vvy} \code{wbm} \code{wbr} \code{wch} \code{wgl} \code{wko} \code{wol} \code{wpi} \code{wri} \code{wsu} \code{wvi} \code{xau} \code{xax} \code{xbo} \code{xca} \code{xcb} \code{xce} \code{xcp} \code{xcv} \code{xfa} \code{xff} \code{xfm} \code{xfn} \code{xft} \code{xne} \code{xom} \code{xoo} \code{xop} \code{xor} \code{yen} \code{yep} \code{yey} \code{ypa} \code{ypb} \code{ypd} \code{ype} \code{ypg} \code{yph} \code{ypi} \code{ypk} \code{ypm} \code{ypn} \code{ypp} \code{yps} \code{ypt} \code{ypx} \code{ypy} \code{ypz} \code{zga} \code{zin} \code{zmb} \code{zmm} \code{zmn} \code{zmo} \code{zmp} \code{zpr}}
\item{\code{DataSource}}{a factor with levels \code{RefSeq (Project:13791)} \code{RefSeq (Project:156759)} \code{RefSeq (Project:156845)} \code{RefSeq (Project:156881)} \code{RefSeq (Project:156977)} \code{RefSeq (Project:157247)} \code{RefSeq (Project:157331)} \code{RefSeq (Project:157907)} \code{RefSeq (Project:157909)} \code{RefSeq (Project:157911)} \code{RefSeq (Project:157913)} \code{RefSeq (Project:157997)} \code{RefSeq (Project:157999)} \code{RefSeq (Project:158037)} \code{RefSeq (Project:158039)} \code{RefSeq (Project:158043)} \code{RefSeq (Project:158045)} \code{RefSeq (Project:158047)} \code{RefSeq (Project:158049)} \code{RefSeq (Project:158051)} \code{RefSeq (Project:158053)} \code{RefSeq (Project:158055)} \code{RefSeq (Project:158057)} \code{RefSeq (Project:158059)} \code{RefSeq (Project:158061)} \code{RefSeq (Project:158063)} \code{RefSeq (Project:158065)} \code{RefSeq (Project:158071)} \code{RefSeq (Project:158073)} \code{RefSeq (Project:158119)} \code{RefSeq (Project:158133)} \code{RefSeq (Project:158135)} \code{RefSeq (Project:158151)} \code{RefSeq (Project:158157)} \code{RefSeq (Project:158159)} \code{RefSeq (Project:158161)} \code{RefSeq (Project:158163)} \code{RefSeq (Project:158165)} \code{RefSeq (Project:158167)} \code{RefSeq (Project:158169)} \code{RefSeq (Project:158171)} \code{RefSeq (Project:158173)} \code{RefSeq (Project:158357)} \code{RefSeq (Project:158363)} \code{RefSeq (Project:158369)} \code{RefSeq (Project:158377)} \code{RefSeq (Project:158509)} \code{RefSeq (Project:158533)} \code{RefSeq (Project:158535)} \code{RefSeq (Project:158537)} \code{RefSeq (Project:15862)} \code{RefSeq (Project:158677)} \code{RefSeq (Project:158679)} \code{RefSeq (Project:158681)} \code{RefSeq (Project:158685)} \code{RefSeq (Project:158689)} \code{RefSeq (Project:158693)} \code{RefSeq (Project:158699)} \code{RefSeq (Project:158701)} \code{RefSeq (Project:158843)} \code{RefSeq (Project:158845)} \code{RefSeq (Project:158849)} \code{RefSeq (Project:158851)} \code{RefSeq (Project:158853)} \code{RefSeq (Project:158855)} \code{RefSeq (Project:158857)} \code{RefSeq (Project:158859)} \code{RefSeq (Project:158861)} \code{RefSeq (Project:158863)} \code{RefSeq (Project:158865)} \code{RefSeq (Project:158867)} \code{RefSeq (Project:158869)} \code{RefSeq (Project:158871)} \code{RefSeq (Project:158875)} \code{RefSeq (Project:158879)} \code{RefSeq (Project:158881)} \code{RefSeq (Project:159001)} \code{RefSeq (Project:159003)} \code{RefSeq (Project:159233)} \code{RefSeq (Project:159241)} \code{RefSeq (Project:159389)} \code{RefSeq (Project:159467)} \code{RefSeq (Project:159491)} \code{RefSeq (Project:159493)} \code{RefSeq (Project:159513)} \code{RefSeq (Project:159515)} \code{RefSeq (Project:159517)} \code{RefSeq (Project:159519)} \code{RefSeq (Project:159521)} \code{RefSeq (Project:159523)} \code{RefSeq (Project:159525)} \code{RefSeq (Project:159527)} \code{RefSeq (Project:159529)} \code{RefSeq (Project:159531)} \code{RefSeq (Project:159533)} \code{RefSeq (Project:159535)} \code{RefSeq (Project:159537)} \code{RefSeq (Project:159539)} \code{RefSeq (Project:159541)} \code{RefSeq (Project:159543)} \code{RefSeq (Project:159611)} \code{RefSeq (Project:159615)} \code{RefSeq (Project:159639)} \code{RefSeq (Project:159657)} \code{RefSeq (Project:159659)} \code{RefSeq (Project:159665)} \code{RefSeq (Project:159667)} \code{RefSeq (Project:159669)} \code{RefSeq (Project:159671)} \code{RefSeq (Project:159673)} \code{RefSeq (Project:159675)} \code{RefSeq (Project:159677)} \code{RefSeq (Project:159685)} \code{RefSeq (Project:159689)} \code{RefSeq (Project:159691)} \code{RefSeq (Project:159693)} \code{RefSeq (Project:159835)} \code{RefSeq (Project:159841)} \code{RefSeq (Project:159845)} \code{RefSeq (Project:159855)} \code{RefSeq (Project:159857)} \code{RefSeq (Project:159859)} \code{RefSeq (Project:159861)} \code{RefSeq (Project:159863)} \code{RefSeq (Project:159865)} \code{RefSeq (Project:159867)} \code{RefSeq (Project:159869)} \code{RefSeq (Project:159871)} \code{RefSeq (Project:159873)} \code{RefSeq (Project:159875)} \code{RefSeq (Project:159879)} \code{RefSeq (Project:159881)} \code{RefSeq (Project:159955)} \code{RefSeq (Project:159981)} \code{RefSeq (Project:159983)} \code{RefSeq (Project:159985)} \code{RefSeq (Project:159987)} \code{RefSeq (Project:159989)} \code{RefSeq (Project:159991)} \code{RefSeq (Project:159993)} \code{RefSeq (Project:160233)} \code{RefSeq (Project:160253)} \code{RefSeq (Project:161097)} \code{RefSeq (Project:161139)} \code{RefSeq (Project:161143)} \code{RefSeq (Project:161145)} \code{RefSeq (Project:161149)} \code{RefSeq (Project:161151)} \code{RefSeq (Project:161153)} \code{RefSeq (Project:161157)} \code{RefSeq (Project:161159)} \code{RefSeq (Project:161161)} \code{RefSeq (Project:161353)} \code{RefSeq (Project:161361)} \code{RefSeq (Project:161369)} \code{RefSeq (Project:161377)} \code{RefSeq (Project:161403)} \code{RefSeq (Project:161409)} \code{RefSeq (Project:161919)} \code{RefSeq (Project:161923)} \code{RefSeq (Project:161927)} \code{RefSeq (Project:161929)} \code{RefSeq (Project:161931)} \code{RefSeq (Project:161935)} \code{RefSeq (Project:161937)} \code{RefSeq (Project:161941)} \code{RefSeq (Project:161943)} \code{RefSeq (Project:161945)} \code{RefSeq (Project:161949)} \code{RefSeq (Project:161951)} \code{RefSeq (Project:161953)} \code{RefSeq (Project:161955)} \code{RefSeq (Project:161957)} \code{RefSeq (Project:161959)} \code{RefSeq (Project:161961)} \code{RefSeq (Project:161971)} \code{RefSeq (Project:161973)} \code{RefSeq (Project:161975)} \code{RefSeq (Project:161977)} \code{RefSeq (Project:161979)} \code{RefSeq (Project:161981)} \code{RefSeq (Project:161983)} \code{RefSeq (Project:161985)} \code{RefSeq (Project:161987)} \code{RefSeq (Project:161989)} \code{RefSeq (Project:161991)} \code{RefSeq (Project:161993)} \code{RefSeq (Project:161997)} \code{RefSeq (Project:161999)} \code{RefSeq (Project:162003)} \code{RefSeq (Project:162005)} \code{RefSeq (Project:162007)} \code{RefSeq (Project:162009)} \code{RefSeq (Project:162013)} \code{RefSeq (Project:162015)} \code{RefSeq (Project:162017)} \code{RefSeq (Project:162019)} \code{RefSeq (Project:162021)} \code{RefSeq (Project:162023)} \code{RefSeq (Project:162025)} \code{RefSeq (Project:162027)} \code{RefSeq (Project:162029)} \code{RefSeq (Project:162031)} \code{RefSeq (Project:162033)} \code{RefSeq (Project:162035)} \code{RefSeq (Project:162037)} \code{RefSeq (Project:162039)} \code{RefSeq (Project:162041)} \code{RefSeq (Project:162043)} \code{RefSeq (Project:162045)} \code{RefSeq (Project:162047)} \code{RefSeq (Project:162049)} \code{RefSeq (Project:162051)} \code{RefSeq (Project:162053)} \code{RefSeq (Project:162055)} \code{RefSeq (Project:162057)} \code{RefSeq (Project:162059)} \code{RefSeq (Project:162061)} \code{RefSeq (Project:162065)} \code{RefSeq (Project:162067)} \code{RefSeq (Project:162069)} \code{RefSeq (Project:162071)} \code{RefSeq (Project:162073)} \code{RefSeq (Project:162075)} \code{RefSeq (Project:162077)} \code{RefSeq (Project:162079)} \code{RefSeq (Project:162081)} \code{RefSeq (Project:162083)} \code{RefSeq (Project:162085)} \code{RefSeq (Project:162087)} \code{RefSeq (Project:162089)} \code{RefSeq (Project:162093)} \code{RefSeq (Project:162095)} \code{RefSeq (Project:162097)} \code{RefSeq (Project:162099)} \code{RefSeq (Project:162101)} \code{RefSeq (Project:162103)} \code{RefSeq (Project:162105)} \code{RefSeq (Project:162109)} \code{RefSeq (Project:162111)} \code{RefSeq (Project:162113)} \code{RefSeq (Project:162115)} \code{RefSeq (Project:162117)} \code{RefSeq (Project:162119)} \code{RefSeq (Project:162121)} \code{RefSeq (Project:162123)} \code{RefSeq (Project:162125)} \code{RefSeq (Project:162127)} \code{RefSeq (Project:162129)} \code{RefSeq (Project:162131)} \code{RefSeq (Project:162133)} \code{RefSeq (Project:162135)} \code{RefSeq (Project:162137)} \code{RefSeq (Project:162139)} \code{RefSeq (Project:162141)} \code{RefSeq (Project:162143)} \code{RefSeq (Project:162147)} \code{RefSeq (Project:162149)} \code{RefSeq (Project:162153)} \code{RefSeq (Project:162157)} \code{RefSeq (Project:162159)} \code{RefSeq (Project:162163)} \code{RefSeq (Project:162165)} \code{RefSeq (Project:162167)} \code{RefSeq (Project:162169)} \code{RefSeq (Project:162171)} \code{RefSeq (Project:162173)} \code{RefSeq (Project:162175)} \code{RefSeq (Project:162177)} \code{RefSeq (Project:162179)} \code{RefSeq (Project:162181)} \code{RefSeq (Project:162185)} \code{RefSeq (Project:162187)} \code{RefSeq (Project:162189)} \code{RefSeq (Project:162193)} \code{RefSeq (Project:162195)} \code{RefSeq (Project:162199)} \code{RefSeq (Project:162201)} \code{RefSeq (Project:162205)} \code{RefSeq (Project:162207)} \code{RefSeq (Project:162209)} \code{RefSeq (Project:162211)} \code{RefSeq (Project:162213)} \code{RefSeq (Project:162215)} \code{RefSeq (Project:162217)} \code{RefSeq (Project:162219)} \code{RefSeq (Project:162333)} \code{RefSeq (Project:162335)} \code{RefSeq (Project:162509)} \code{RefSeq (Project:162511)} \code{RefSeq (Project:162513)} \code{RefSeq (Project:162739)} \code{RefSeq (Project:162947)} \code{RefSeq (Project:162949)} \code{RefSeq (Project:163151)} \code{RefSeq (Project:163995)} \code{RefSeq (Project:163997)} \code{RefSeq (Project:164001)} \code{RefSeq (Project:164779)} \code{RefSeq (Project:165043)} \code{RefSeq (Project:165185)} \code{RefSeq (Project:165195)} \code{RefSeq (Project:165869)} \code{RefSeq (Project:165871)} \code{RefSeq (Project:165873)} \code{RefSeq (Project:166827)} \code{RefSeq (Project:167045)} \code{RefSeq (Project:167260)} \code{RefSeq (Project:167261)} \code{RefSeq (Project:167315)} \code{RefSeq (Project:167481)} \code{RefSeq (Project:167482)} \code{RefSeq (Project:167483)} \code{RefSeq (Project:167484)} \code{RefSeq (Project:167485)} \code{RefSeq (Project:167486)} \code{RefSeq (Project:167487)} \code{RefSeq (Project:167488)} \code{RefSeq (Project:167583)} \code{RefSeq (Project:167781)} \code{RefSeq (Project:167988)} \code{RefSeq (Project:167994)} \code{RefSeq (Project:167998)} \code{RefSeq (Project:168059)} \code{RefSeq (Project:168117)} \code{RefSeq (Project:168180)} \code{RefSeq (Project:168181)} \code{RefSeq (Project:168182)} \code{RefSeq (Project:168183)} \code{RefSeq (Project:168256)} \code{RefSeq (Project:168257)} \code{RefSeq (Project:168258)} \code{RefSeq (Project:168259)} \code{RefSeq (Project:168320)} \code{RefSeq (Project:168321)} \code{RefSeq (Project:168322)} \code{RefSeq (Project:168323)} \code{RefSeq (Project:168379)} \code{RefSeq (Project:168516)} \code{RefSeq (Project:168614)} \code{RefSeq (Project:168665)} \code{RefSeq (Project:168996)} \code{RefSeq (Project:168997)} \code{RefSeq (Project:169223)} \code{RefSeq (Project:169620)} \code{RefSeq (Project:169879)} \code{RefSeq (Project:170255)} \code{RefSeq (Project:170532)} \code{RefSeq (Project:170533)} \code{RefSeq (Project:170612)} \code{RefSeq (Project:170731)} \code{RefSeq (Project:170732)} \code{RefSeq (Project:170940)} \code{RefSeq (Project:171261)} \code{RefSeq (Project:171337)} \code{RefSeq (Project:171377)} \code{RefSeq (Project:171830)} \code{RefSeq (Project:171958)} \code{RefSeq (Project:172624)} \code{RefSeq (Project:172625)} \code{RefSeq (Project:172627)} \code{RefSeq (Project:172628)} \code{RefSeq (Project:172629)} \code{RefSeq (Project:172732)} \code{RefSeq (Project:172733)} \code{RefSeq (Project:172734)} \code{RefSeq (Project:172735)} \code{RefSeq (Project:172736)} \code{RefSeq (Project:172737)} \code{RefSeq (Project:173370)} \code{RefSeq (Project:173371)} \code{RefSeq (Project:173372)} \code{RefSeq (Project:173373)} \code{RefSeq (Project:173858)} \code{RefSeq (Project:173859)} \code{RefSeq (Project:174333)} \code{RefSeq (Project:174334)} \code{RefSeq (Project:174439)} \code{RefSeq (Project:174511)} \code{RefSeq (Project:174812)} \code{RefSeq (Project:174871)} \code{RefSeq (Project:174872)} \code{RefSeq (Project:175105)} \code{RefSeq (Project:175106)} \code{RefSeq (Project:175107)} \code{RefSeq (Project:175108)} \code{RefSeq (Project:175109)} \code{RefSeq (Project:175110)} \code{RefSeq (Project:175111)} \code{RefSeq (Project:175255)} \code{RefSeq (Project:20871)} \code{RefSeq (Project:29855)} \code{RefSeq (Project:32235)} \code{RefSeq (Project:40659)} \code{RefSeq (Project:40821)} \code{RefSeq (Project:40839)} \code{RefSeq (Project:40845)} \code{RefSeq (Project:40861)} \code{RefSeq (Project:40863)} \code{RefSeq (Project:40921)} \code{RefSeq (Project:41013)} \code{RefSeq (Project:41017)} \code{RefSeq (Project:41019)} \code{RefSeq (Project:41021)} \code{RefSeq (Project:41023)} \code{RefSeq (Project:41053)} \code{RefSeq (Project:41127)} \code{RefSeq (Project:41131)} \code{RefSeq (Project:41169)} \code{RefSeq (Project:41171)} \code{RefSeq (Project:41287)} \code{RefSeq (Project:41297)} \code{RefSeq (Project:41317)} \code{RefSeq (Project:41333)} \code{RefSeq (Project:41403)} \code{RefSeq (Project:41425)} \code{RefSeq (Project:41455)} \code{RefSeq (Project:41533)} \code{RefSeq (Project:41601)} \code{RefSeq (Project:41709)} \code{RefSeq (Project:41729)} \code{RefSeq (Project:41735)} \code{RefSeq (Project:41819)} \code{RefSeq (Project:41861)} \code{RefSeq (Project:41863)} \code{RefSeq (Project:41865)} \code{RefSeq (Project:41875)} \code{RefSeq (Project:41885)} \code{RefSeq (Project:41925)} \code{RefSeq (Project:41927)} \code{RefSeq (Project:41935)} \code{RefSeq (Project:41997)} \code{RefSeq (Project:42011)} \code{RefSeq (Project:42113)} \code{RefSeq (Project:42115)} \code{RefSeq (Project:42117)} \code{RefSeq (Project:42155)} \code{RefSeq (Project:42393)} \code{RefSeq (Project:42497)} \code{RefSeq (Project:42499)} \code{RefSeq (Project:42501)} \code{RefSeq (Project:42519)} \code{RefSeq (Project:42521)} \code{RefSeq (Project:42523)} \code{RefSeq (Project:42613)} \code{RefSeq (Project:42615)} \code{RefSeq (Project:42777)} \code{RefSeq (Project:42831)} \code{RefSeq (Project:42887)} \code{RefSeq (Project:42975)} \code{RefSeq (Project:43089)} \code{RefSeq (Project:43091)} \code{RefSeq (Project:43093)} \code{RefSeq (Project:43209)} \code{RefSeq (Project:43211)} \code{RefSeq (Project:43247)} \code{RefSeq (Project:43327)} \code{RefSeq (Project:43329)} \code{RefSeq (Project:43333)} \code{RefSeq (Project:43413)} \code{RefSeq (Project:43465)} \code{RefSeq (Project:43467)} \code{RefSeq (Project:43471)} \code{RefSeq (Project:43493)} \code{RefSeq (Project:43501)} \code{RefSeq (Project:43671)} \code{RefSeq (Project:43697)} \code{RefSeq (Project:43725)} \code{RefSeq (Project:43727)} \code{RefSeq (Project:45847)} \code{RefSeq (Project:45857)} \code{RefSeq (Project:45893)} \code{RefSeq (Project:45895)} \code{RefSeq (Project:45897)} \code{RefSeq (Project:45927)} \code{RefSeq (Project:46061)} \code{RefSeq (Project:46083)} \code{RefSeq (Project:46085)} \code{RefSeq (Project:46097)} \code{RefSeq (Project:46181)} \code{RefSeq (Project:46215)} \code{RefSeq (Project:46219)} \code{RefSeq (Project:46231)} \code{RefSeq (Project:46233)} \code{RefSeq (Project:46241)} \code{RefSeq (Project:46245)} \code{RefSeq (Project:46253)} \code{RefSeq (Project:46255)} \code{RefSeq (Project:46257)} \code{RefSeq (Project:46345)} \code{RefSeq (Project:46353)} \code{RefSeq (Project:46523)} \code{RefSeq (Project:46531)} \code{RefSeq (Project:46625)} \code{RefSeq (Project:46647)} \code{RefSeq (Project:46653)} \code{RefSeq (Project:46655)} \code{RefSeq (Project:46657)} \code{RefSeq (Project:46661)} \code{RefSeq (Project:46663)} \code{RefSeq (Project:46679)} \code{RefSeq (Project:46801)} \code{RefSeq (Project:46803)} \code{RefSeq (Project:46807)} \code{RefSeq (Project:46839)} \code{RefSeq (Project:46841)} \code{RefSeq (Project:46845)} \code{RefSeq (Project:46943)} \code{RefSeq (Project:46989)} \code{RefSeq (Project:47075)} \code{RefSeq (Project:47077)} \code{RefSeq (Project:47079)} \code{RefSeq (Project:47081)} \code{RefSeq (Project:47083)} \code{RefSeq (Project:47085)} \code{RefSeq (Project:47087)} \code{RefSeq (Project:47313)} \code{RefSeq (Project:47317)} \code{RefSeq (Project:47323)} \code{RefSeq (Project:47507)} \code{RefSeq (Project:47509)} \code{RefSeq (Project:48071)} \code{RefSeq (Project:48073)} \code{RefSeq (Project:48129)} \code{RefSeq (Project:48131)} \code{RefSeq (Project:48359)} \code{RefSeq (Project:48361)} \code{RefSeq (Project:48363)} \code{RefSeq (Project:48371)} \code{RefSeq (Project:48589)} \code{RefSeq (Project:48643)} \code{RefSeq (Project:48801)} \code{RefSeq (Project:48803)} \code{RefSeq (Project:48809)} \code{RefSeq (Project:48815)} \code{RefSeq (Project:48819)} \code{RefSeq (Project:48821)} \code{RefSeq (Project:48823)} \code{RefSeq (Project:48825)} \code{RefSeq (Project:48829)} \code{RefSeq (Project:48993)} \code{RefSeq (Project:48999)} \code{RefSeq (Project:49001)} \code{RefSeq (Project:49049)} \code{RefSeq (Project:49117)} \code{RefSeq (Project:49131)} \code{RefSeq (Project:49133)} \code{RefSeq (Project:49135)} \code{RefSeq (Project:49313)} \code{RefSeq (Project:49331)} \code{RefSeq (Project:49467)} \code{RefSeq (Project:49469)} \code{RefSeq (Project:49481)} \code{RefSeq (Project:49483)} \code{RefSeq (Project:49485)} \code{RefSeq (Project:49487)} \code{RefSeq (Project:49489)} \code{RefSeq (Project:49513)} \code{RefSeq (Project:49527)} \code{RefSeq (Project:49529)} \code{RefSeq (Project:49531)} \code{RefSeq (Project:49533)} \code{RefSeq (Project:49535)} \code{RefSeq (Project:49661)} \code{RefSeq (Project:49695)} \code{RefSeq (Project:49725)} \code{RefSeq (Project:49735)} \code{RefSeq (Project:49857)} \code{RefSeq (Project:49871)} \code{RefSeq (Project:49873)} \code{RefSeq (Project:49903)} \code{RefSeq (Project:49953)} \code{RefSeq (Project:49957)} \code{RefSeq (Project:50119)} \code{RefSeq (Project:50305)} \code{RefSeq (Project:50325)} \code{RefSeq (Project:50331)} \code{RefSeq (Project:50385)} \code{RefSeq (Project:50427)} \code{RefSeq (Project:50547)} \code{RefSeq (Project:50551)} \code{RefSeq (Project:50553)} \code{RefSeq (Project:50555)} \code{RefSeq (Project:50565)} \code{RefSeq (Project:50583)} \code{RefSeq (Project:50585)} \code{RefSeq (Project:50609)} \code{RefSeq (Project:50615)} \code{RefSeq (Project:50673)} \code{RefSeq (Project:51175)} \code{RefSeq (Project:51367)} \code{RefSeq (Project:51369)} \code{RefSeq (Project:51371)} \code{RefSeq (Project:51377)} \code{RefSeq (Project:51395)} \code{RefSeq (Project:51419)} \code{RefSeq (Project:51421)} \code{RefSeq (Project:51423)} \code{RefSeq (Project:51489)} \code{RefSeq (Project:51501)} \code{RefSeq (Project:51503)} \code{RefSeq (Project:51505)} \code{RefSeq (Project:51535)} \code{RefSeq (Project:51635)} \code{RefSeq (Project:51637)} \code{RefSeq (Project:51639)} \code{RefSeq (Project:51641)} \code{RefSeq (Project:51695)} \code{RefSeq (Project:51721)} \code{RefSeq (Project:51875)} \code{RefSeq (Project:51877)} \code{RefSeq (Project:51879)} \code{RefSeq (Project:52453)} \code{RefSeq (Project:52455)} \code{RefSeq (Project:52459)} \code{RefSeq (Project:52533)} \code{RefSeq (Project:52537)} \code{RefSeq (Project:52547)} \code{RefSeq (Project:52581)} \code{RefSeq (Project:52593)} \code{RefSeq (Project:52597)} \code{RefSeq (Project:52599)} \code{RefSeq (Project:52601)} \code{RefSeq (Project:52605)} \code{RefSeq (Project:52607)} \code{RefSeq (Project:52609)} \code{RefSeq (Project:52695)} \code{RefSeq (Project:52781)} \code{RefSeq (Project:52827)} \code{RefSeq (Project:53037)} \code{RefSeq (Project:53043)} \code{RefSeq (Project:53065)} \code{RefSeq (Project:53251)} \code{RefSeq (Project:53371)} \code{RefSeq (Project:53477)} \code{RefSeq (Project:53509)} \code{RefSeq (Project:53535)} \code{RefSeq (Project:53537)} \code{RefSeq (Project:53539)} \code{RefSeq (Project:53541)} \code{RefSeq (Project:53543)} \code{RefSeq (Project:54335)} \code{RefSeq (Project:54411)} \code{RefSeq (Project:54441)} \code{RefSeq (Project:54443)} \code{RefSeq (Project:54459)} \code{RefSeq (Project:54461)} \code{RefSeq (Project:54715)} \code{RefSeq (Project:54717)} \code{RefSeq (Project:54719)} \code{RefSeq (Project:54733)} \code{RefSeq (Project:54919)} \code{RefSeq (Project:54927)} \code{RefSeq (Project:54947)} \code{RefSeq (Project:55329)} \code{RefSeq (Project:55353)} \code{RefSeq (Project:55357)} \code{RefSeq (Project:55381)} \code{RefSeq (Project:55403)} \code{RefSeq (Project:55487)} \code{RefSeq (Project:55639)} \code{RefSeq (Project:55641)} \code{RefSeq (Project:55727)} \code{RefSeq (Project:55771)} \code{RefSeq (Project:55779)} \code{RefSeq (Project:55845)} \code{RefSeq (Project:57581)} \code{RefSeq (Project:57583)} \code{RefSeq (Project:57585)} \code{RefSeq (Project:57587)} \code{RefSeq (Project:57589)} \code{RefSeq (Project:57591)} \code{RefSeq (Project:57593)} \code{RefSeq (Project:57595)} \code{RefSeq (Project:57597)} \code{RefSeq (Project:57599)} \code{RefSeq (Project:57601)} \code{RefSeq (Project:57607)} \code{RefSeq (Project:57609)} \code{RefSeq (Project:57611)} \code{RefSeq (Project:57613)} \code{RefSeq (Project:57615)} \code{RefSeq (Project:57617)} \code{RefSeq (Project:57621)} \code{RefSeq (Project:57623)} \code{RefSeq (Project:57625)} \code{RefSeq (Project:57627)} \code{RefSeq (Project:57629)} \code{RefSeq (Project:57631)} \code{RefSeq (Project:57633)} \code{RefSeq (Project:57635)} \code{RefSeq (Project:57637)} \code{RefSeq (Project:57639)} \code{RefSeq (Project:57641)} \code{RefSeq (Project:57643)} \code{RefSeq (Project:57645)} \code{RefSeq (Project:57647)} \code{RefSeq (Project:57649)} \code{RefSeq (Project:57651)} \code{RefSeq (Project:57653)} \code{RefSeq (Project:57655)} \code{RefSeq (Project:57657)} \code{RefSeq (Project:57659)} \code{RefSeq (Project:57661)} \code{RefSeq (Project:57663)} \code{RefSeq (Project:57665)} \code{RefSeq (Project:57667)} \code{RefSeq (Project:57669)} \code{RefSeq (Project:57671)} \code{RefSeq (Project:57673)} \code{RefSeq (Project:57679)} \code{RefSeq (Project:57681)} \code{RefSeq (Project:57683)} \code{RefSeq (Project:57685)} \code{RefSeq (Project:57687)} \code{RefSeq (Project:57689)} \code{RefSeq (Project:57691)} \code{RefSeq (Project:57693)} \code{RefSeq (Project:57697)} \code{RefSeq (Project:57699)} \code{RefSeq (Project:57701)} \code{RefSeq (Project:57703)} \code{RefSeq (Project:57705)} \code{RefSeq (Project:57707)} \code{RefSeq (Project:57709)} \code{RefSeq (Project:57711)} \code{RefSeq (Project:57713)} \code{RefSeq (Project:57715)} \code{RefSeq (Project:57717)} \code{RefSeq (Project:57719)} \code{RefSeq (Project:57723)} \code{RefSeq (Project:57725)} \code{RefSeq (Project:57727)} \code{RefSeq (Project:57731)} \code{RefSeq (Project:57733)} \code{RefSeq (Project:57735)} \code{RefSeq (Project:57737)} \code{RefSeq (Project:57739)} \code{RefSeq (Project:57741)} \code{RefSeq (Project:57743)} \code{RefSeq (Project:57745)} \code{RefSeq (Project:57747)} \code{RefSeq (Project:57749)} \code{RefSeq (Project:57751)} \code{RefSeq (Project:57753)} \code{RefSeq (Project:57755)} \code{RefSeq (Project:57757)} \code{RefSeq (Project:57759)} \code{RefSeq (Project:57761)} \code{RefSeq (Project:57763)} \code{RefSeq (Project:57765)} \code{RefSeq (Project:57767)} \code{RefSeq (Project:57769)} \code{RefSeq (Project:57771)} \code{RefSeq (Project:57773)} \code{RefSeq (Project:57775)} \code{RefSeq (Project:57781)} \code{RefSeq (Project:57783)} \code{RefSeq (Project:57785)} \code{RefSeq (Project:57787)} \code{RefSeq (Project:57789)} \code{RefSeq (Project:57791)} \code{RefSeq (Project:57797)} \code{RefSeq (Project:57799)} \code{RefSeq (Project:57801)} \code{RefSeq (Project:57803)} \code{RefSeq (Project:57805)} \code{RefSeq (Project:57807)} \code{RefSeq (Project:57809)} \code{RefSeq (Project:57811)} \code{RefSeq (Project:57813)} \code{RefSeq (Project:57815)} \code{RefSeq (Project:57817)} \code{RefSeq (Project:57819)} \code{RefSeq (Project:57821)} \code{RefSeq (Project:57823)} \code{RefSeq (Project:57825)} \code{RefSeq (Project:57827)} \code{RefSeq (Project:57829)} \code{RefSeq (Project:57831)} \code{RefSeq (Project:57833)} \code{RefSeq (Project:57835)} \code{RefSeq (Project:57837)} \code{RefSeq (Project:57839)} \code{RefSeq (Project:57841)} \code{RefSeq (Project:57843)} \code{RefSeq (Project:57845)} \code{RefSeq (Project:57847)} \code{RefSeq (Project:57849)} \code{RefSeq (Project:57851)} \code{RefSeq (Project:57853)} \code{RefSeq (Project:57855)} \code{RefSeq (Project:57857)} \code{RefSeq (Project:57859)} \code{RefSeq (Project:57861)} \code{RefSeq (Project:57863)} \code{RefSeq (Project:57865)} \code{RefSeq (Project:57867)} \code{RefSeq (Project:57869)} \code{RefSeq (Project:57871)} \code{RefSeq (Project:57873)} \code{RefSeq (Project:57875)} \code{RefSeq (Project:57877)} \code{RefSeq (Project:57879)} \code{RefSeq (Project:57881)} \code{RefSeq (Project:57883)} \code{RefSeq (Project:57885)} \code{RefSeq (Project:57891)} \code{RefSeq (Project:57893)} \code{RefSeq (Project:57895)} \code{RefSeq (Project:57897)} \code{RefSeq (Project:57899)} \code{RefSeq (Project:57901)} \code{RefSeq (Project:57903)} \code{RefSeq (Project:57905)} \code{RefSeq (Project:57907)} \code{RefSeq (Project:57909)} \code{RefSeq (Project:57911)} \code{RefSeq (Project:57913)} \code{RefSeq (Project:57915)} \code{RefSeq (Project:57917)} \code{RefSeq (Project:57919)} \code{RefSeq (Project:57921)} \code{RefSeq (Project:57925)} \code{RefSeq (Project:57927)} \code{RefSeq (Project:57929)} \code{RefSeq (Project:57931)} \code{RefSeq (Project:57933)} \code{RefSeq (Project:57935)} \code{RefSeq (Project:57937)} \code{RefSeq (Project:57939)} \code{RefSeq (Project:57941)} \code{RefSeq (Project:57943)} \code{RefSeq (Project:57945)} \code{RefSeq (Project:57947)} \code{RefSeq (Project:57949)} \code{RefSeq (Project:57951)} \code{RefSeq (Project:57953)} \code{RefSeq (Project:57955)} \code{RefSeq (Project:57957)} \code{RefSeq (Project:57959)} \code{RefSeq (Project:57961)} \code{RefSeq (Project:57963)} \code{RefSeq (Project:57965)} \code{RefSeq (Project:57967)} \code{RefSeq (Project:57969)} \code{RefSeq (Project:57971)} \code{RefSeq (Project:57973)} \code{RefSeq (Project:57975)} \code{RefSeq (Project:57977)} \code{RefSeq (Project:57979)} \code{RefSeq (Project:57981)} \code{RefSeq (Project:57983)} \code{RefSeq (Project:57985)} \code{RefSeq (Project:57987)} \code{RefSeq (Project:57989)} \code{RefSeq (Project:57991)} \code{RefSeq (Project:57993)} \code{RefSeq (Project:57995)} \code{RefSeq (Project:57997)} \code{RefSeq (Project:57999)} \code{RefSeq (Project:58001)} \code{RefSeq (Project:58003)} \code{RefSeq (Project:58005)} \code{RefSeq (Project:58007)} \code{RefSeq (Project:58009)} \code{RefSeq (Project:58011)} \code{RefSeq (Project:58013)} \code{RefSeq (Project:58015)} \code{RefSeq (Project:58017)} \code{RefSeq (Project:58019)} \code{RefSeq (Project:58021)} \code{RefSeq (Project:58023)} \code{RefSeq (Project:58025)} \code{RefSeq (Project:58027)} \code{RefSeq (Project:58029)} \code{RefSeq (Project:58031)} \code{RefSeq (Project:58035)} \code{RefSeq (Project:58037)} \code{RefSeq (Project:58039)} \code{RefSeq (Project:58041)} \code{RefSeq (Project:58043)} \code{RefSeq (Project:58045)} \code{RefSeq (Project:58047)} \code{RefSeq (Project:58049)} \code{RefSeq (Project:58051)} \code{RefSeq (Project:58053)} \code{RefSeq (Project:58055)} \code{RefSeq (Project:58057)} \code{RefSeq (Project:58059)} \code{RefSeq (Project:58061)} \code{RefSeq (Project:58063)} \code{RefSeq (Project:58065)} \code{RefSeq (Project:58067)} \code{RefSeq (Project:58069)} \code{RefSeq (Project:58071)} \code{RefSeq (Project:58073)} \code{RefSeq (Project:58075)} \code{RefSeq (Project:58077)} \code{RefSeq (Project:58079)} \code{RefSeq (Project:58081)} \code{RefSeq (Project:58083)} \code{RefSeq (Project:58085)} \code{RefSeq (Project:58087)} \code{RefSeq (Project:58089)} \code{RefSeq (Project:58091)} \code{RefSeq (Project:58093)} \code{RefSeq (Project:58095)} \code{RefSeq (Project:58097)} \code{RefSeq (Project:58099)} \code{RefSeq (Project:58103)} \code{RefSeq (Project:58105)} \code{RefSeq (Project:58107)} \code{RefSeq (Project:58109)} \code{RefSeq (Project:58111)} \code{RefSeq (Project:58113)} \code{RefSeq (Project:58115)} \code{RefSeq (Project:58117)} \code{RefSeq (Project:58119)} \code{RefSeq (Project:58121)} \code{RefSeq (Project:58123)} \code{RefSeq (Project:58125)} \code{RefSeq (Project:58127)} \code{RefSeq (Project:58129)} \code{RefSeq (Project:58131)} \code{RefSeq (Project:58133)} \code{RefSeq (Project:58135)} \code{RefSeq (Project:58137)} \code{RefSeq (Project:58139)} \code{RefSeq (Project:58141)} \code{RefSeq (Project:58143)} \code{RefSeq (Project:58145)} \code{RefSeq (Project:58147)} \code{RefSeq (Project:58149)} \code{RefSeq (Project:58151)} \code{RefSeq (Project:58153)} \code{RefSeq (Project:58155)} \code{RefSeq (Project:58157)} \code{RefSeq (Project:58159)} \code{RefSeq (Project:58161)} \code{RefSeq (Project:58163)} \code{RefSeq (Project:58165)} \code{RefSeq (Project:58167)} \code{RefSeq (Project:58169)} \code{RefSeq (Project:58171)} \code{RefSeq (Project:58173)} \code{RefSeq (Project:58175)} \code{RefSeq (Project:58177)} \code{RefSeq (Project:58179)} \code{RefSeq (Project:58181)} \code{RefSeq (Project:58183)} \code{RefSeq (Project:58185)} \code{RefSeq (Project:58187)} \code{RefSeq (Project:58189)} \code{RefSeq (Project:58191)} \code{RefSeq (Project:58193)} \code{RefSeq (Project:58195)} \code{RefSeq (Project:58197)} \code{RefSeq (Project:58199)} \code{RefSeq (Project:58201)} \code{RefSeq (Project:58203)} \code{RefSeq (Project:58205)} \code{RefSeq (Project:58207)} \code{RefSeq (Project:58209)} \code{RefSeq (Project:58211)} \code{RefSeq (Project:58213)} \code{RefSeq (Project:58215)} \code{RefSeq (Project:58217)} \code{RefSeq (Project:58219)} \code{RefSeq (Project:58221)} \code{RefSeq (Project:58223)} \code{RefSeq (Project:58225)} \code{RefSeq (Project:58227)} \code{RefSeq (Project:58231)} \code{RefSeq (Project:58233)} \code{RefSeq (Project:58235)} \code{RefSeq (Project:58239)} \code{RefSeq (Project:58241)} \code{RefSeq (Project:58247)} \code{RefSeq (Project:58249)} \code{RefSeq (Project:58251)} \code{RefSeq (Project:58253)} \code{RefSeq (Project:58255)} \code{RefSeq (Project:58257)} \code{RefSeq (Project:58259)} \code{RefSeq (Project:58261)} \code{RefSeq (Project:58263)} \code{RefSeq (Project:58265)} \code{RefSeq (Project:58267)} \code{RefSeq (Project:58271)} \code{RefSeq (Project:58273)} \code{RefSeq (Project:58275)} \code{RefSeq (Project:58277)} \code{RefSeq (Project:58279)} \code{RefSeq (Project:58281)} \code{RefSeq (Project:58283)} \code{RefSeq (Project:58285)} \code{RefSeq (Project:58287)} \code{RefSeq (Project:58289)} \code{RefSeq (Project:58293)} \code{RefSeq (Project:58295)} \code{RefSeq (Project:58297)} \code{RefSeq (Project:58299)} \code{RefSeq (Project:58301)} \code{RefSeq (Project:58303)} \code{RefSeq (Project:58305)} \code{RefSeq (Project:58307)} \code{RefSeq (Project:58309)} \code{RefSeq (Project:58311)} \code{RefSeq (Project:58313)} \code{RefSeq (Project:58315)} \code{RefSeq (Project:58317)} \code{RefSeq (Project:58319)} \code{RefSeq (Project:58321)} \code{RefSeq (Project:58323)} \code{RefSeq (Project:58325)} \code{RefSeq (Project:58327)} \code{RefSeq (Project:58329)} \code{RefSeq (Project:58331)} \code{RefSeq (Project:58333)} \code{RefSeq (Project:58339)} \code{RefSeq (Project:58341)} \code{RefSeq (Project:58343)} \code{RefSeq (Project:58345)} \code{RefSeq (Project:58347)} \code{RefSeq (Project:58349)} \code{RefSeq (Project:58351)} \code{RefSeq (Project:58353)} \code{RefSeq (Project:58355)} \code{RefSeq (Project:58357)} \code{RefSeq (Project:58359)} \code{RefSeq (Project:58361)} \code{RefSeq (Project:58363)} \code{RefSeq (Project:58365)} \code{RefSeq (Project:58367)} \code{RefSeq (Project:58369)} \code{RefSeq (Project:58371)} \code{RefSeq (Project:58373)} \code{RefSeq (Project:58375)} \code{RefSeq (Project:58379)} \code{RefSeq (Project:58383)} \code{RefSeq (Project:58385)} \code{RefSeq (Project:58387)} \code{RefSeq (Project:58389)} \code{RefSeq (Project:58391)} \code{RefSeq (Project:58393)} \code{RefSeq (Project:58395)} \code{RefSeq (Project:58397)} \code{RefSeq (Project:58399)} \code{RefSeq (Project:58401)} \code{RefSeq (Project:58403)} \code{RefSeq (Project:58405)} \code{RefSeq (Project:58407)} \code{RefSeq (Project:58409)} \code{RefSeq (Project:58411)} \code{RefSeq (Project:58413)} \code{RefSeq (Project:58415)} \code{RefSeq (Project:58417)} \code{RefSeq (Project:58419)} \code{RefSeq (Project:58421)} \code{RefSeq (Project:58423)} \code{RefSeq (Project:58425)} \code{RefSeq (Project:58427)} \code{RefSeq (Project:58429)} \code{RefSeq (Project:58431)} \code{RefSeq (Project:58433)} \code{RefSeq (Project:58435)} \code{RefSeq (Project:58437)} \code{RefSeq (Project:58439)} \code{RefSeq (Project:58441)} \code{RefSeq (Project:58443)} \code{RefSeq (Project:58445)} \code{RefSeq (Project:58447)} \code{RefSeq (Project:58449)} \code{RefSeq (Project:58451)} \code{RefSeq (Project:58453)} \code{RefSeq (Project:58455)} \code{RefSeq (Project:58459)} \code{RefSeq (Project:58463)} \code{RefSeq (Project:58465)} \code{RefSeq (Project:58467)} \code{RefSeq (Project:58469)} \code{RefSeq (Project:58471)} \code{RefSeq (Project:58473)} \code{RefSeq (Project:58475)} \code{RefSeq (Project:58477)} \code{RefSeq (Project:58479)} \code{RefSeq (Project:58481)} \code{RefSeq (Project:58483)} \code{RefSeq (Project:58485)} \code{RefSeq (Project:58487)} \code{RefSeq (Project:58489)} \code{RefSeq (Project:58491)} \code{RefSeq (Project:58493)} \code{RefSeq (Project:58495)} \code{RefSeq (Project:58499)} \code{RefSeq (Project:58501)} \code{RefSeq (Project:58503)} \code{RefSeq (Project:58505)} \code{RefSeq (Project:58507)} \code{RefSeq (Project:58509)} \code{RefSeq (Project:58511)} \code{RefSeq (Project:58513)} \code{RefSeq (Project:58515)} \code{RefSeq (Project:58519)} \code{RefSeq (Project:58521)} \code{RefSeq (Project:58523)} \code{RefSeq (Project:58525)} \code{RefSeq (Project:58527)} \code{RefSeq (Project:58529)} \code{RefSeq (Project:58531)} \code{RefSeq (Project:58533)} \code{RefSeq (Project:58535)} \code{RefSeq (Project:58537)} \code{RefSeq (Project:58539)} \code{RefSeq (Project:58541)} \code{RefSeq (Project:58543)} \code{RefSeq (Project:58545)} \code{RefSeq (Project:58547)} \code{RefSeq (Project:58549)} \code{RefSeq (Project:58551)} \code{RefSeq (Project:58553)} \code{RefSeq (Project:58555)} \code{RefSeq (Project:58557)} \code{RefSeq (Project:58559)} \code{RefSeq (Project:58561)} \code{RefSeq (Project:58563)} \code{RefSeq (Project:58565)} \code{RefSeq (Project:58577)} \code{RefSeq (Project:58579)} \code{RefSeq (Project:58581)} \code{RefSeq (Project:58583)} \code{RefSeq (Project:58585)} \code{RefSeq (Project:58587)} \code{RefSeq (Project:58589)} \code{RefSeq (Project:58591)} \code{RefSeq (Project:58593)} \code{RefSeq (Project:58595)} \code{RefSeq (Project:58597)} \code{RefSeq (Project:58599)} \code{RefSeq (Project:58601)} \code{RefSeq (Project:58603)} \code{RefSeq (Project:58605)} \code{RefSeq (Project:58607)} \code{RefSeq (Project:58609)} \code{RefSeq (Project:58611)} \code{RefSeq (Project:58613)} \code{RefSeq (Project:58615)} \code{RefSeq (Project:58617)} \code{RefSeq (Project:58619)} \code{RefSeq (Project:58621)} \code{RefSeq (Project:58623)} \code{RefSeq (Project:58625)} \code{RefSeq (Project:58627)} \code{RefSeq (Project:58629)} \code{RefSeq (Project:58631)} \code{RefSeq (Project:58635)} \code{RefSeq (Project:58637)} \code{RefSeq (Project:58641)} \code{RefSeq (Project:58643)} \code{RefSeq (Project:58645)} \code{RefSeq (Project:58647)} \code{RefSeq (Project:58649)} \code{RefSeq (Project:58651)} \code{RefSeq (Project:58653)} \code{RefSeq (Project:58655)} \code{RefSeq (Project:58657)} \code{RefSeq (Project:58659)} \code{RefSeq (Project:58661)} \code{RefSeq (Project:58663)} \code{RefSeq (Project:58665)} \code{RefSeq (Project:58667)} \code{RefSeq (Project:58669)} \code{RefSeq (Project:58671)} \code{RefSeq (Project:58675)} \code{RefSeq (Project:58677)} \code{RefSeq (Project:58679)} \code{RefSeq (Project:58681)} \code{RefSeq (Project:58683)} \code{RefSeq (Project:58685)} \code{RefSeq (Project:58687)} \code{RefSeq (Project:58689)} \code{RefSeq (Project:58691)} \code{RefSeq (Project:58693)} \code{RefSeq (Project:58697)} \code{RefSeq (Project:58699)} \code{RefSeq (Project:58701)} \code{RefSeq (Project:58703)} \code{RefSeq (Project:58705)} \code{RefSeq (Project:58707)} \code{RefSeq (Project:58709)} \code{RefSeq (Project:58711)} \code{RefSeq (Project:58713)} \code{RefSeq (Project:58715)} \code{RefSeq (Project:58717)} \code{RefSeq (Project:58719)} \code{RefSeq (Project:58721)} \code{RefSeq (Project:58723)} \code{RefSeq (Project:58725)} \code{RefSeq (Project:58727)} \code{RefSeq (Project:58729)} \code{RefSeq (Project:58731)} \code{RefSeq (Project:58733)} \code{RefSeq (Project:58735)} \code{RefSeq (Project:58737)} \code{RefSeq (Project:58739)} \code{RefSeq (Project:58741)} \code{RefSeq (Project:58743)} \code{RefSeq (Project:58745)} \code{RefSeq (Project:58747)} \code{RefSeq (Project:58749)} \code{RefSeq (Project:58751)} \code{RefSeq (Project:58753)} \code{RefSeq (Project:58755)} \code{RefSeq (Project:58757)} \code{RefSeq (Project:58759)} \code{RefSeq (Project:58761)} \code{RefSeq (Project:58763)} \code{RefSeq (Project:58765)} \code{RefSeq (Project:58767)} \code{RefSeq (Project:58769)} \code{RefSeq (Project:58771)} \code{RefSeq (Project:58773)} \code{RefSeq (Project:58775)} \code{RefSeq (Project:58777)} \code{RefSeq (Project:58781)} \code{RefSeq (Project:58783)} \code{RefSeq (Project:58785)} \code{RefSeq (Project:58787)} \code{RefSeq (Project:58789)} \code{RefSeq (Project:58791)} \code{RefSeq (Project:58793)} \code{RefSeq (Project:58795)} \code{RefSeq (Project:58799)} \code{RefSeq (Project:58801)} \code{RefSeq (Project:58803)} \code{RefSeq (Project:58805)} \code{RefSeq (Project:58807)} \code{RefSeq (Project:58809)} \code{RefSeq (Project:58811)} \code{RefSeq (Project:58813)} \code{RefSeq (Project:58815)} \code{RefSeq (Project:58817)} \code{RefSeq (Project:58819)} \code{RefSeq (Project:58821)} \code{RefSeq (Project:58823)} \code{RefSeq (Project:58825)} \code{RefSeq (Project:58827)} \code{RefSeq (Project:58829)} \code{RefSeq (Project:58831)} \code{RefSeq (Project:58833)} \code{RefSeq (Project:58835)} \code{RefSeq (Project:58837)} \code{RefSeq (Project:58839)} \code{RefSeq (Project:58841)} \code{RefSeq (Project:58843)} \code{RefSeq (Project:58845)} \code{RefSeq (Project:58847)} \code{RefSeq (Project:58849)} \code{RefSeq (Project:58851)} \code{RefSeq (Project:58853)} \code{RefSeq (Project:58855)} \code{RefSeq (Project:58857)} \code{RefSeq (Project:58859)} \code{RefSeq (Project:58861)} \code{RefSeq (Project:58863)} \code{RefSeq (Project:58865)} \code{RefSeq (Project:58867)} \code{RefSeq (Project:58869)} \code{RefSeq (Project:58871)} \code{RefSeq (Project:58873)} \code{RefSeq (Project:58875)} \code{RefSeq (Project:58877)} \code{RefSeq (Project:58879)} \code{RefSeq (Project:58881)} \code{RefSeq (Project:58885)} \code{RefSeq (Project:58887)} \code{RefSeq (Project:58889)} \code{RefSeq (Project:58891)} \code{RefSeq (Project:58893)} \code{RefSeq (Project:58895)} \code{RefSeq (Project:58897)} \code{RefSeq (Project:58899)} \code{RefSeq (Project:58901)} \code{RefSeq (Project:58903)} \code{RefSeq (Project:58905)} \code{RefSeq (Project:58907)} \code{RefSeq (Project:58909)} \code{RefSeq (Project:58911)} \code{RefSeq (Project:58913)} \code{RefSeq (Project:58915)} \code{RefSeq (Project:58917)} \code{RefSeq (Project:58919)} \code{RefSeq (Project:58921)} \code{RefSeq (Project:58923)} \code{RefSeq (Project:58925)} \code{RefSeq (Project:58927)} \code{RefSeq (Project:58929)} \code{RefSeq (Project:58931)} \code{RefSeq (Project:58933)} \code{RefSeq (Project:58935)} \code{RefSeq (Project:58937)} \code{RefSeq (Project:58939)} \code{RefSeq (Project:58945)} \code{RefSeq (Project:58947)} \code{RefSeq (Project:58949)} \code{RefSeq (Project:58951)} \code{RefSeq (Project:58953)} \code{RefSeq (Project:58957)} \code{RefSeq (Project:58959)} \code{RefSeq (Project:58961)} \code{RefSeq (Project:58963)} \code{RefSeq (Project:58965)} \code{RefSeq (Project:58967)} \code{RefSeq (Project:58969)} \code{RefSeq (Project:58971)} \code{RefSeq (Project:58973)} \code{RefSeq (Project:58977)} \code{RefSeq (Project:58979)} \code{RefSeq (Project:58981)} \code{RefSeq (Project:58983)} \code{RefSeq (Project:58985)} \code{RefSeq (Project:58987)} \code{RefSeq (Project:58989)} \code{RefSeq (Project:58991)} \code{RefSeq (Project:58993)} \code{RefSeq (Project:58995)} \code{RefSeq (Project:58997)} \code{RefSeq (Project:58999)} \code{RefSeq (Project:59001)} \code{RefSeq (Project:59003)} \code{RefSeq (Project:59007)} \code{RefSeq (Project:59009)} \code{RefSeq (Project:59011)} \code{RefSeq (Project:59013)} \code{RefSeq (Project:59015)} \code{RefSeq (Project:59017)} \code{RefSeq (Project:59019)} \code{RefSeq (Project:59021)} \code{RefSeq (Project:59023)} \code{RefSeq (Project:59025)} \code{RefSeq (Project:59027)} \code{RefSeq (Project:59029)} \code{RefSeq (Project:59033)} \code{RefSeq (Project:59035)} \code{RefSeq (Project:59037)} \code{RefSeq (Project:59041)} \code{RefSeq (Project:59043)} \code{RefSeq (Project:59045)} \code{RefSeq (Project:59047)} \code{RefSeq (Project:59049)} \code{RefSeq (Project:59051)} \code{RefSeq (Project:59053)} \code{RefSeq (Project:59055)} \code{RefSeq (Project:59057)} \code{RefSeq (Project:59061)} \code{RefSeq (Project:59063)} \code{RefSeq (Project:59067)} \code{RefSeq (Project:59069)} \code{RefSeq (Project:59071)} \code{RefSeq (Project:59073)} \code{RefSeq (Project:59075)} \code{RefSeq (Project:59077)} \code{RefSeq (Project:59079)} \code{RefSeq (Project:59081)} \code{RefSeq (Project:59083)} \code{RefSeq (Project:59085)} \code{RefSeq (Project:59087)} \code{RefSeq (Project:59089)} \code{RefSeq (Project:59091)} \code{RefSeq (Project:59093)} \code{RefSeq (Project:59095)} \code{RefSeq (Project:59097)} \code{RefSeq (Project:59099)} \code{RefSeq (Project:59101)} \code{RefSeq (Project:59103)} \code{RefSeq (Project:59105)} \code{RefSeq (Project:59107)} \code{RefSeq (Project:59109)} \code{RefSeq (Project:59111)} \code{RefSeq (Project:59113)} \code{RefSeq (Project:59117)} \code{RefSeq (Project:59119)} \code{RefSeq (Project:59121)} \code{RefSeq (Project:59123)} \code{RefSeq (Project:59125)} \code{RefSeq (Project:59127)} \code{RefSeq (Project:59129)} \code{RefSeq (Project:59131)} \code{RefSeq (Project:59133)} \code{RefSeq (Project:59135)} \code{RefSeq (Project:59137)} \code{RefSeq (Project:59139)} \code{RefSeq (Project:59143)} \code{RefSeq (Project:59145)} \code{RefSeq (Project:59147)} \code{RefSeq (Project:59149)} \code{RefSeq (Project:59151)} \code{RefSeq (Project:59153)} \code{RefSeq (Project:59155)} \code{RefSeq (Project:59157)} \code{RefSeq (Project:59159)} \code{RefSeq (Project:59161)} \code{RefSeq (Project:59163)} \code{RefSeq (Project:59165)} \code{RefSeq (Project:59167)} \code{RefSeq (Project:59169)} \code{RefSeq (Project:59171)} \code{RefSeq (Project:59173)} \code{RefSeq (Project:59175)} \code{RefSeq (Project:59177)} \code{RefSeq (Project:59179)} \code{RefSeq (Project:59181)} \code{RefSeq (Project:59183)} \code{RefSeq (Project:59185)} \code{RefSeq (Project:59187)} \code{RefSeq (Project:59189)} \code{RefSeq (Project:59191)} \code{RefSeq (Project:59193)} \code{RefSeq (Project:59195)} \code{RefSeq (Project:59197)} \code{RefSeq (Project:59199)} \code{RefSeq (Project:59201)} \code{RefSeq (Project:59203)} \code{RefSeq (Project:59205)} \code{RefSeq (Project:59207)} \code{RefSeq (Project:59209)} \code{RefSeq (Project:59211)} \code{RefSeq (Project:59213)} \code{RefSeq (Project:59215)} \code{RefSeq (Project:59217)} \code{RefSeq (Project:59219)} \code{RefSeq (Project:59221)} \code{RefSeq (Project:59223)} \code{RefSeq (Project:59225)} \code{RefSeq (Project:59227)} \code{RefSeq (Project:59229)} \code{RefSeq (Project:59231)} \code{RefSeq (Project:59233)} \code{RefSeq (Project:59235)} \code{RefSeq (Project:59237)} \code{RefSeq (Project:59241)} \code{RefSeq (Project:59243)} \code{RefSeq (Project:59245)} \code{RefSeq (Project:59247)} \code{RefSeq (Project:59249)} \code{RefSeq (Project:59251)} \code{RefSeq (Project:59253)} \code{RefSeq (Project:59257)} \code{RefSeq (Project:59259)} \code{RefSeq (Project:59261)} \code{RefSeq (Project:59263)} \code{RefSeq (Project:59265)} \code{RefSeq (Project:59267)} \code{RefSeq (Project:59269)} \code{RefSeq (Project:59271)} \code{RefSeq (Project:59273)} \code{RefSeq (Project:59275)} \code{RefSeq (Project:59281)} \code{RefSeq (Project:59283)} \code{RefSeq (Project:59285)} \code{RefSeq (Project:59287)} \code{RefSeq (Project:59289)} \code{RefSeq (Project:59291)} \code{RefSeq (Project:59293)} \code{RefSeq (Project:59295)} \code{RefSeq (Project:59297)} \code{RefSeq (Project:59299)} \code{RefSeq (Project:59301)} \code{RefSeq (Project:59303)} \code{RefSeq (Project:59305)} \code{RefSeq (Project:59307)} \code{RefSeq (Project:59309)} \code{RefSeq (Project:59311)} \code{RefSeq (Project:59313)} \code{RefSeq (Project:59315)} \code{RefSeq (Project:59317)} \code{RefSeq (Project:59319)} \code{RefSeq (Project:59321)} \code{RefSeq (Project:59323)} \code{RefSeq (Project:59325)} \code{RefSeq (Project:59327)} \code{RefSeq (Project:59341)} \code{RefSeq (Project:59343)} \code{RefSeq (Project:59345)} \code{RefSeq (Project:59347)} \code{RefSeq (Project:59349)} \code{RefSeq (Project:59351)} \code{RefSeq (Project:59355)} \code{RefSeq (Project:59357)} \code{RefSeq (Project:59359)} \code{RefSeq (Project:59361)} \code{RefSeq (Project:59363)} \code{RefSeq (Project:59365)} \code{RefSeq (Project:59367)} \code{RefSeq (Project:59369)} \code{RefSeq (Project:59371)} \code{RefSeq (Project:59373)} \code{RefSeq (Project:59385)} \code{RefSeq (Project:59387)} \code{RefSeq (Project:59389)} \code{RefSeq (Project:59395)} \code{RefSeq (Project:59397)} \code{RefSeq (Project:59399)} \code{RefSeq (Project:59401)} \code{RefSeq (Project:59403)} \code{RefSeq (Project:59405)} \code{RefSeq (Project:59407)} \code{RefSeq (Project:59409)} \code{RefSeq (Project:59411)} \code{RefSeq (Project:59413)} \code{RefSeq (Project:59417)} \code{RefSeq (Project:59419)} \code{RefSeq (Project:59421)} \code{RefSeq (Project:59423)} \code{RefSeq (Project:59425)} \code{RefSeq (Project:59427)} \code{RefSeq (Project:59429)} \code{RefSeq (Project:59431)} \code{RefSeq (Project:59433)} \code{RefSeq (Project:59435)} \code{RefSeq (Project:59437)} \code{RefSeq (Project:59439)} \code{RefSeq (Project:59545)} \code{RefSeq (Project:59581)} \code{RefSeq (Project:59583)} \code{RefSeq (Project:59769)} \code{RefSeq (Project:59777)} \code{RefSeq (Project:59883)} \code{RefSeq (Project:59887)} \code{RefSeq (Project:59899)} \code{RefSeq (Project:59969)} \code{RefSeq (Project:60157)} \code{RefSeq (Project:60161)} \code{RefSeq (Project:60163)} \code{RefSeq (Project:60165)} \code{RefSeq (Project:60167)} \code{RefSeq (Project:60171)} \code{RefSeq (Project:60191)} \code{RefSeq (Project:60393)} \code{RefSeq (Project:60487)} \code{RefSeq (Project:60491)} \code{RefSeq (Project:60575)} \code{RefSeq (Project:60583)} \code{RefSeq (Project:60723)} \code{RefSeq (Project:60725)} \code{RefSeq (Project:60727)} \code{RefSeq (Project:60789)} \code{RefSeq (Project:60821)} \code{RefSeq (Project:60837)} \code{RefSeq (Project:60849)} \code{RefSeq (Project:60851)} \code{RefSeq (Project:60855)} \code{RefSeq (Project:60859)} \code{RefSeq (Project:61179)} \code{RefSeq (Project:61245)} \code{RefSeq (Project:61247)} \code{RefSeq (Project:61249)} \code{RefSeq (Project:61349)} \code{RefSeq (Project:61403)} \code{RefSeq (Project:61409)} \code{RefSeq (Project:61411)} \code{RefSeq (Project:61563)} \code{RefSeq (Project:61565)} \code{RefSeq (Project:61567)} \code{RefSeq (Project:61569)} \code{RefSeq (Project:61571)} \code{RefSeq (Project:61573)} \code{RefSeq (Project:61575)} \code{RefSeq (Project:61577)} \code{RefSeq (Project:61579)} \code{RefSeq (Project:61581)} \code{RefSeq (Project:61583)} \code{RefSeq (Project:61585)} \code{RefSeq (Project:61587)} \code{RefSeq (Project:61589)} \code{RefSeq (Project:61591)} \code{RefSeq (Project:61593)} \code{RefSeq (Project:61595)} \code{RefSeq (Project:61599)} \code{RefSeq (Project:61603)} \code{RefSeq (Project:61605)} \code{RefSeq (Project:61607)} \code{RefSeq (Project:61609)} \code{RefSeq (Project:61611)} \code{RefSeq (Project:61613)} \code{RefSeq (Project:61619)} \code{RefSeq (Project:61621)} \code{RefSeq (Project:61623)} \code{RefSeq (Project:61625)} \code{RefSeq (Project:61627)} \code{RefSeq (Project:61629)} \code{RefSeq (Project:61631)} \code{RefSeq (Project:61633)} \code{RefSeq (Project:61635)} \code{RefSeq (Project:61639)} \code{RefSeq (Project:61641)} \code{RefSeq (Project:61643)} \code{RefSeq (Project:61645)} \code{RefSeq (Project:61647)} \code{RefSeq (Project:61649)} \code{RefSeq (Project:61727)} \code{RefSeq (Project:61729)} \code{RefSeq (Project:61897)} \code{RefSeq (Project:62003)} \code{RefSeq (Project:62083)} \code{RefSeq (Project:62095)} \code{RefSeq (Project:62097)} \code{RefSeq (Project:62099)} \code{RefSeq (Project:62101)} \code{RefSeq (Project:62103)} \code{RefSeq (Project:62105)} \code{RefSeq (Project:62107)} \code{RefSeq (Project:62123)} \code{RefSeq (Project:62125)} \code{RefSeq (Project:62135)} \code{RefSeq (Project:62159)} \code{RefSeq (Project:62183)} \code{RefSeq (Project:62207)} \code{RefSeq (Project:62225)} \code{RefSeq (Project:62227)} \code{RefSeq (Project:62243)} \code{RefSeq (Project:62245)} \code{RefSeq (Project:62265)} \code{RefSeq (Project:62273)} \code{RefSeq (Project:62461)} \code{RefSeq (Project:62463)} \code{RefSeq (Project:62693)} \code{RefSeq (Project:62695)} \code{RefSeq (Project:62715)} \code{RefSeq (Project:62789)} \code{RefSeq (Project:62901)} \code{RefSeq (Project:62903)} \code{RefSeq (Project:62905)} \code{RefSeq (Project:62907)} \code{RefSeq (Project:62909)} \code{RefSeq (Project:62911)} \code{RefSeq (Project:62913)} \code{RefSeq (Project:62921)} \code{RefSeq (Project:62923)} \code{RefSeq (Project:62925)} \code{RefSeq (Project:62937)} \code{RefSeq (Project:62939)} \code{RefSeq (Project:62947)} \code{RefSeq (Project:62961)} \code{RefSeq (Project:63159)} \code{RefSeq (Project:63163)} \code{RefSeq (Project:63269)} \code{RefSeq (Project:63343)} \code{RefSeq (Project:63345)} \code{RefSeq (Project:63397)} \code{RefSeq (Project:63399)} \code{RefSeq (Project:63401)} \code{RefSeq (Project:63403)} \code{RefSeq (Project:63405)} \code{RefSeq (Project:63605)} \code{RefSeq (Project:63617)} \code{RefSeq (Project:63621)} \code{RefSeq (Project:63623)} \code{RefSeq (Project:63627)} \code{RefSeq (Project:63629)} \code{RefSeq (Project:63631)} \code{RefSeq (Project:63633)} \code{RefSeq (Project:63663)} \code{RefSeq (Project:63665)} \code{RefSeq (Project:64753)} \code{RefSeq (Project:64755)} \code{RefSeq (Project:64757)} \code{RefSeq (Project:65087)} \code{RefSeq (Project:65089)} \code{RefSeq (Project:65091)} \code{RefSeq (Project:65267)} \code{RefSeq (Project:65269)} \code{RefSeq (Project:65271)} \code{RefSeq (Project:65447)} \code{RefSeq (Project:65449)} \code{RefSeq (Project:65781)} \code{RefSeq (Project:65783)} \code{RefSeq (Project:65785)} \code{RefSeq (Project:65787)} \code{RefSeq (Project:65789)} \code{RefSeq (Project:66189)} \code{RefSeq (Project:66203)} \code{RefSeq (Project:66205)} \code{RefSeq (Project:66207)} \code{RefSeq (Project:66295)} \code{RefSeq (Project:66297)} \code{RefSeq (Project:66299)} \code{RefSeq (Project:66301)} \code{RefSeq (Project:66303)} \code{RefSeq (Project:66305)} \code{RefSeq (Project:66307)} \code{RefSeq (Project:66323)} \code{RefSeq (Project:66327)} \code{RefSeq (Project:66329)} \code{RefSeq (Project:66331)} \code{RefSeq (Project:66391)} \code{RefSeq (Project:66551)} \code{RefSeq (Project:66567)} \code{RefSeq (Project:66593)} \code{RefSeq (Project:66595)} \code{RefSeq (Project:66601)} \code{RefSeq (Project:66603)} \code{RefSeq (Project:66607)} \code{RefSeq (Project:66777)} \code{RefSeq (Project:66779)} \code{RefSeq (Project:66803)} \code{RefSeq (Project:66847)} \code{RefSeq (Project:66873)} \code{RefSeq (Project:66875)} \code{RefSeq (Project:66917)} \code{RefSeq (Project:67253)} \code{RefSeq (Project:67313)} \code{RefSeq (Project:67315)} \code{RefSeq (Project:67317)} \code{RefSeq (Project:67319)} \code{RefSeq (Project:67321)} \code{RefSeq (Project:67323)} \code{RefSeq (Project:67349)} \code{RefSeq (Project:67351)} \code{RefSeq (Project:67355)} \code{RefSeq (Project:67357)} \code{RefSeq (Project:67359)} \code{RefSeq (Project:67363)} \code{RefSeq (Project:67365)} \code{RefSeq (Project:67367)} \code{RefSeq (Project:67369)} \code{RefSeq (Project:67383)} \code{RefSeq (Project:67387)} \code{RefSeq (Project:67391)} \code{RefSeq (Project:67407)} \code{RefSeq (Project:67501)} \code{RefSeq (Project:67507)} \code{RefSeq (Project:67985)} \code{RefSeq (Project:68019)} \code{RefSeq (Project:68021)} \code{RefSeq (Project:68053)} \code{RefSeq (Project:68055)} \code{RefSeq (Project:68057)} \code{RefSeq (Project:68067)} \code{RefSeq (Project:68101)} \code{RefSeq (Project:68103)} \code{RefSeq (Project:68105)} \code{RefSeq (Project:68141)} \code{RefSeq (Project:68143)} \code{RefSeq (Project:68147)} \code{RefSeq (Project:68249)} \code{RefSeq (Project:68279)} \code{RefSeq (Project:68281)} \code{RefSeq (Project:68283)} \code{RefSeq (Project:68285)} \code{RefSeq (Project:68291)} \code{RefSeq (Project:68311)} \code{RefSeq (Project:68317)} \code{RefSeq (Project:68321)} \code{RefSeq (Project:68335)} \code{RefSeq (Project:68443)} \code{RefSeq (Project:68445)} \code{RefSeq (Project:68447)} \code{RefSeq (Project:68449)} \code{RefSeq (Project:68451)} \code{RefSeq (Project:68687)} \code{RefSeq (Project:68689)} \code{RefSeq (Project:68691)} \code{RefSeq (Project:68705)} \code{RefSeq (Project:68707)} \code{RefSeq (Project:68739)} \code{RefSeq (Project:68741)} \code{RefSeq (Project:68743)} \code{RefSeq (Project:68745)} \code{RefSeq (Project:68747)} \code{RefSeq (Project:68749)} \code{RefSeq (Project:68753)} \code{RefSeq (Project:68837)} \code{RefSeq (Project:68839)} \code{RefSeq (Project:68841)} \code{RefSeq (Project:68843)} \code{RefSeq (Project:70155)} \code{RefSeq (Project:70481)} \code{RefSeq (Project:70619)} \code{RefSeq (Project:70621)} \code{RefSeq (Project:70729)} \code{RefSeq (Project:70731)} \code{RefSeq (Project:70791)} \code{RefSeq (Project:70793)} \code{RefSeq (Project:70839)} \code{RefSeq (Project:70841)} \code{RefSeq (Project:71131)} \code{RefSeq (Project:71153)} \code{RefSeq (Project:71231)} \code{RefSeq (Project:71379)} \code{RefSeq (Project:71485)} \code{RefSeq (Project:72473)} \code{RefSeq (Project:72475)} \code{RefSeq (Project:72479)} \code{RefSeq (Project:72481)} \code{RefSeq (Project:72619)} \code{RefSeq (Project:72627)} \code{RefSeq (Project:72767)} \code{RefSeq (Project:72793)} \code{RefSeq (Project:72795)} \code{RefSeq (Project:72801)} \code{RefSeq (Project:72937)} \code{RefSeq (Project:73179)} \code{RefSeq (Project:73353)} \code{RefSeq (Project:73413)} \code{RefSeq (Project:73415)} \code{RefSeq (Project:73417)} \code{RefSeq (Project:73419)} \code{RefSeq (Project:73421)} \code{RefSeq (Project:73423)} \code{RefSeq (Project:73425)} \code{RefSeq (Project:73473)} \code{RefSeq (Project:73585)} \code{RefSeq (Project:73587)} \code{RefSeq (Project:73759)} \code{RefSeq (Project:73771)} \code{RefSeq (Project:73895)} \code{RefSeq (Project:73963)} \code{RefSeq (Project:73967)} \code{RefSeq (Project:74025)} \code{RefSeq (Project:74441)} \code{RefSeq (Project:74445)} \code{RefSeq (Project:75097)} \code{RefSeq (Project:75113)} \code{RefSeq (Project:75119)} \code{RefSeq (Project:77027)} \code{RefSeq (Project:77127)} \code{RefSeq (Project:77129)} \code{RefSeq (Project:78143)} \code{RefSeq (Project:78933)} \code{RefSeq (Project:80731)} \code{RefSeq (Project:80733)} \code{RefSeq (Project:80735)} \code{RefSeq (Project:80739)} \code{RefSeq (Project:80743)} \code{RefSeq (Project:80745)} \code{RefSeq (Project:80859)} \code{RefSeq (Project:81081)} \code{RefSeq (Project:81083)} \code{RefSeq (Project:81103)} \code{RefSeq (Project:81197)} \code{RefSeq (Project:81199)} \code{RefSeq (Project:81439)} \code{RefSeq (Project:81627)} \code{RefSeq (Project:81629)} \code{RefSeq (Project:81631)} \code{RefSeq (Project:81775)} \code{RefSeq (Project:81779)} \code{RefSeq (Project:82341)} \code{RefSeq (Project:82343)} \code{RefSeq (Project:82345)} \code{RefSeq (Project:82347)} \code{RefSeq (Project:82363)} \code{RefSeq (Project:82365)} \code{RefSeq (Project:82367)} \code{RefSeq (Project:82369)} \code{RefSeq (Project:82371)} \code{RefSeq (Project:82373)} \code{RefSeq (Project:82379)} \code{RefSeq (Project:82553)} \code{RefSeq (Project:82815)} \code{RefSeq (Project:82931)} \code{RefSeq (Project:82939)} \code{RefSeq (Project:82949)} \code{RefSeq (Project:82951)} \code{RefSeq (Project:83123)} \code{RefSeq (Project:83125)} \code{RefSeq (Project:83157)} \code{RefSeq (Project:83159)} \code{RefSeq (Project:83161)} \code{RefSeq (Project:83605)} \code{RefSeq (Project:83607)} \code{RefSeq (Project:83609)} \code{RefSeq (Project:83613)} \code{RefSeq (Project:83615)} \code{RefSeq (Project:83617)} \code{RefSeq (Project:83619)} \code{RefSeq (Project:84215)} \code{RefSeq (Project:84217)} \code{RefSeq (Project:84295)} \code{RefSeq (Project:84297)} \code{RefSeq (Project:84299)} \code{RefSeq (Project:84301)} \code{RefSeq (Project:84303)} \code{RefSeq (Project:84305)} \code{RefSeq (Project:84307)} \code{RefSeq (Project:84309)} \code{RefSeq (Project:84311)} \code{RefSeq (Project:84313)} \code{RefSeq (Project:84317)} \code{RefSeq (Project:84337)} \code{RefSeq (Project:84383)} \code{RefSeq (Project:84387)} \code{RefSeq (Project:84393)} \code{RefSeq (Project:84397)} \code{RefSeq (Project:85495)} \code{RefSeq (Project:86059)} \code{RefSeq (Project:86061)} \code{RefSeq (Project:86645)} \code{RefSeq (Project:86647)} \code{RefSeq (Project:86651)} \code{RefSeq (Project:86653)} \code{RefSeq (Project:86655)} \code{RefSeq (Project:86657)} \code{RefSeq (Project:86659)} \code{RefSeq (Project:86751)} \code{RefSeq (Project:86855)} \code{RefSeq (Project:86861)} \code{RefSeq (Project:86865)} \code{RefSeq (Project:86885)} \code{RefSeq (Project:86887)} \code{RefSeq (Project:86889)} \code{RefSeq (Project:87001)} \code{RefSeq (Project:87003)} \code{RefSeq (Project:87025)} \code{RefSeq (Project:87033)} \code{RefSeq (Project:87037)} \code{RefSeq (Project:87049)} \code{RefSeq (Project:87051)} \code{RefSeq (Project:87065)} \code{RefSeq (Project:87067)} \code{RefSeq (Project:87069)} \code{RefSeq (Project:88061)} \code{RefSeq (Project:88063)} \code{RefSeq (Project:88065)} \code{RefSeq (Project:88067)} \code{RefSeq (Project:88069)} \code{RefSeq (Project:88071)} \code{RefSeq (Project:88073)} \code{RefSeq (Project:88075)} \code{RefSeq (Project:89371)} \code{RefSeq (Project:89373)} \code{RefSeq (Project:89375)} \code{RefSeq (Project:89377)} \code{RefSeq (Project:89379)} \code{RefSeq (Project:89381)} \code{RefSeq (Project:89383)} \code{RefSeq (Project:89385)} \code{RefSeq (Project:89387)} \code{RefSeq (Project:89389)} \code{RefSeq (Project:89393)} \code{RefSeq (Project:89409)}}
\item{\code{TaxonomyID}}{a numeric vector}
\item{\code{SuperkingdomID}}{a numeric vector}
\item{\code{PhylumID}}{a numeric vector}
\item{\code{ClassID}}{a numeric vector}
\item{\code{OrderID}}{a numeric vector}
\item{\code{FamilyID}}{a numeric vector}
\item{\code{GenusID}}{a numeric vector}
\item{\code{SpeciesID}}{a numeric vector}
\item{\code{SubspeciesID}}{a numeric vector}
\item{\code{Superkingdom}}{a factor with levels \code{Archaea} \code{Bacteria}}
\item{\code{Phylum}}{a factor with levels \code{} \code{Acidobacteria} \code{Actinobacteria} \code{Aquificae} \code{Bacteroidetes} \code{Caldiserica} \code{Chlamydiae} \code{Chlorobi} \code{Chloroflexi} \code{Chrysiogenetes} \code{Crenarchaeota} \code{Cyanobacteria} \code{Deferribacteres} \code{Deinococcus-Thermus} \code{Dictyoglomi} \code{Elusimicrobia} \code{Euryarchaeota} \code{Fibrobacteres} \code{Firmicutes} \code{Fusobacteria} \code{Gemmatimonadetes} \code{Ignavibacteriae} \code{Korarchaeota} \code{Nanoarchaeota} \code{Nitrospirae} \code{Planctomycetes} \code{Proteobacteria} \code{Spirochaetes} \code{Synergistetes} \code{Tenericutes} \code{Thaumarchaeota} \code{Thermodesulfobacteria} \code{Thermotogae} \code{Verrucomicrobia}}
\item{\code{Class}}{a factor with levels \code{} \code{Acidobacteriia} \code{Actinobacteria} \code{Alphaproteobacteria} \code{Anaerolineae} \code{Aquificae} \code{Archaeoglobi} \code{Bacilli} \code{Bacteroidia} \code{Betaproteobacteria} \code{Caldilineae} \code{Caldisericia} \code{Chlamydiia} \code{Chlorobia} \code{Chloroflexi} \code{Chrysiogenetes} \code{Clostridia} \code{Cytophagia} \code{Deferribacteres} \code{Dehalococcoidia} \code{Deinococci} \code{Deltaproteobacteria} \code{Dictyoglomia} \code{Elusimicrobia} \code{Epsilonproteobacteria} \code{Erysipelotrichia} \code{Fibrobacteria} \code{Flavobacteriia} \code{Fusobacteriia} \code{Gammaproteobacteria} \code{Gemmatimonadetes} \code{Gloeobacteria} \code{Halobacteria} \code{Ignavibacteria} \code{Methanobacteria} \code{Methanococci} \code{Methanomicrobia} \code{Methanopyri} \code{Mollicutes} \code{Negativicutes} \code{Nitrospira} \code{Opitutae} \code{Phycisphaerae} \code{Planctomycetia} \code{Solibacteres} \code{Sphingobacteriia} \code{Spirochaetia} \code{Synergistia} \code{Thermococci} \code{Thermodesulfobacteria} \code{Thermomicrobia} \code{Thermoplasmata} \code{Thermoprotei} \code{Thermotogae} \code{Verrucomicrobiae}}
\item{\code{Order}}{a factor with levels \code{} \code{Acholeplasmatales} \code{Acidilobales} \code{Acidimicrobiales} \code{Acidithiobacillales} \code{Acidobacteriales} \code{Actinomycetales} \code{Aeromonadales} \code{Alteromonadales} \code{Anaerolineales} \code{Aquificales} \code{Archaeoglobales} \code{Bacillales} \code{Bacteroidales} \code{Bacteroidetes Order II. Incertae sedis} \code{Bdellovibrionales} \code{Bifidobacteriales} \code{Burkholderiales} \code{Caldilineales} \code{Caldisericales} \code{Campylobacterales} \code{Cardiobacteriales} \code{Caulobacterales} \code{Cenarchaeales} \code{Chlamydiales} \code{Chlorobiales} \code{Chloroflexales} \code{Chromatiales} \code{Chroococcales} \code{Chrysiogenales} \code{Clostridiales} \code{Coriobacteriales} \code{Cytophagales} \code{Deferribacterales} \code{Dehalococcoidales} \code{Deinococcales} \code{Desulfarculales} \code{Desulfobacterales} \code{Desulfovibrionales} \code{Desulfurellales} \code{Desulfurococcales} \code{Desulfuromonadales} \code{Dictyoglomales} \code{Elusimicrobiales} \code{Enterobacteriales} \code{Entomoplasmatales} \code{Erysipelotrichales} \code{Fervidicoccales} \code{Fibrobacterales} \code{Flavobacteriales} \code{Fusobacteriales} \code{Gallionellales} \code{Gemmatimonadales} \code{Gloeobacterales} \code{Halanaerobiales} \code{Halobacteriales} \code{Herpetosiphonales} \code{Hydrogenophilales} \code{Ignavibacteriales} \code{Lactobacillales} \code{Legionellales} \code{Magnetococcales} \code{Methanobacteriales} \code{Methanocellales} \code{Methanococcales} \code{Methanomicrobiales} \code{Methanopyrales} \code{Methanosarcinales} \code{Methylacidiphilales} \code{Methylococcales} \code{Methylophilales} \code{Mycoplasmatales} \code{Myxococcales} \code{Natranaerobiales} \code{Nautiliales} \code{Neisseriales} \code{Nitrosomonadales} \code{Nitrosopumilales} \code{Nitrospirales} \code{Nostocales} \code{Oceanospirillales} \code{Oscillatoriales} \code{Parvularculales} \code{Pasteurellales} \code{Phycisphaerales} \code{Planctomycetales} \code{Prochlorales} \code{Pseudomonadales} \code{Puniceicoccales} \code{Rhizobiales} \code{Rhodobacterales} \code{Rhodocyclales} \code{Rhodospirillales} \code{Rickettsiales} \code{Rubrobacterales} \code{Selenomonadales} \code{Solibacterales} \code{Solirubrobacterales} \code{Sphaerobacterales} \code{Sphingobacteriales} \code{Sphingomonadales} \code{Spirochaetales} \code{Sulfolobales} \code{Synergistales} \code{Syntrophobacterales} \code{Thermales} \code{Thermoanaerobacterales} \code{Thermococcales} \code{Thermodesulfobacteriales} \code{Thermomicrobiales} \code{Thermoplasmatales} \code{Thermoproteales} \code{Thermotogales} \code{Thiotrichales} \code{Verrucomicrobiales} \code{Vibrionales} \code{Xanthomonadales}}
\item{\code{Family}}{a factor with levels \code{} \code{Acetobacteraceae} \code{Acholeplasmataceae} \code{Acidaminococcaceae} \code{Acidilobaceae} \code{Acidimicrobiaceae} \code{Acidithiobacillaceae} \code{Acidobacteriaceae} \code{Acidothermaceae} \code{Actinomycetaceae} \code{Aerococcaceae} \code{Aeromonadaceae} \code{Alcaligenaceae} \code{Alcanivoracaceae} \code{Alicyclobacillaceae} \code{Alteromonadaceae} \code{Anaerolineaceae} \code{Anaplasmataceae} \code{Aquificaceae} \code{Archaeoglobaceae} \code{Bacillaceae} \code{Bacteriovoracaceae} \code{Bacteroidaceae} \code{Bartonellaceae} \code{Bdellovibrionaceae} \code{Beijerinckiaceae} \code{Beutenbergiaceae} \code{Bifidobacteriaceae} \code{Blattabacteriaceae} \code{Brachyspiraceae} \code{Bradyrhizobiaceae} \code{Brucellaceae} \code{Burkholderiaceae} \code{Caldilineaceae} \code{Caldisericaceae} \code{Campylobacteraceae} \code{Candidatus Midichloriaceae} \code{Cardiobacteriaceae} \code{Carnobacteriaceae} \code{Catenulisporaceae} \code{Caulobacteraceae} \code{Cellulomonadaceae} \code{Cenarchaeaceae} \code{Chlamydiaceae} \code{Chlorobiaceae} \code{Chloroflexaceae} \code{Chromatiaceae} \code{Chrysiogenaceae} \code{Clostridiaceae} \code{Clostridiales Family XI. Incertae Sedis} \code{Clostridiales Family XVIII. Incertae Sedis} \code{Clostridiales Family XVII. Incertae Sedis} \code{Colwelliaceae} \code{Comamonadaceae} \code{Conexibacteraceae} \code{Coriobacteriaceae} \code{Corynebacteriaceae} \code{Coxiellaceae} \code{Cryomorphaceae} \code{Cyclobacteriaceae} \code{Cystobacteraceae} \code{Cytophagaceae} \code{Deferribacteraceae} \code{Dehalococcoidaceae} \code{Deinococcaceae} \code{Dermabacteraceae} \code{Dermacoccaceae} \code{Desulfarculaceae} \code{Desulfobacteraceae} \code{Desulfobulbaceae} \code{Desulfohalobiaceae} \code{Desulfomicrobiaceae} \code{Desulfovibrionaceae} \code{Desulfurellaceae} \code{Desulfurobacteriaceae} \code{Desulfurococcaceae} \code{Dictyoglomaceae} \code{Ectothiorhodospiraceae} \code{Elusimicrobiaceae} \code{Enterobacteriaceae} \code{Enterococcaceae} \code{Entomoplasmataceae} \code{Erysipelotrichaceae} \code{Erythrobacteraceae} \code{Eubacteriaceae} \code{Ferrimonadaceae} \code{Fervidicoccaceae} \code{Fibrobacteraceae} \code{Flammeovirgaceae} \code{Flavobacteriaceae} \code{Francisellaceae} \code{Frankiaceae} \code{Fusobacteriaceae} \code{Gallionellaceae} \code{Gemmatimonadaceae} \code{Geobacteraceae} \code{Geodermatophilaceae} \code{Glycomycetaceae} \code{Gordoniaceae} \code{Hahellaceae} \code{Halanaerobiaceae} \code{Halobacteriaceae} \code{Halobacteroidaceae} \code{Halomonadaceae} \code{Halothiobacillaceae} \code{Helicobacteraceae} \code{Heliobacteriaceae} \code{Herpetosiphonaceae} \code{Hydrogenophilaceae} \code{Hydrogenothermaceae} \code{Hyphomicrobiaceae} \code{Hyphomonadaceae} \code{Idiomarinaceae} \code{Ignavibacteriaceae} \code{Intrasporangiaceae} \code{Jonesiaceae} \code{Kineosporiaceae} \code{Kofleriaceae} \code{Lachnospiraceae} \code{Lactobacillaceae} \code{Legionellaceae} \code{Leptospiraceae} \code{Leptotrichiaceae} \code{Leuconostocaceae} \code{Listeriaceae} \code{Magnetococcaceae} \code{Methanobacteriaceae} \code{Methanocaldococcaceae} \code{Methanocellaceae} \code{Methanococcaceae} \code{Methanocorpusculaceae} \code{Methanomicrobiaceae} \code{Methanopyraceae} \code{Methanoregulaceae} \code{Methanosaetaceae} \code{Methanosarcinaceae} \code{Methanospirillaceae} \code{Methanothermaceae} \code{Methylacidiphilaceae} \code{Methylobacteriaceae} \code{Methylococcaceae} \code{Methylophilaceae} \code{Microbacteriaceae} \code{Micrococcaceae} \code{Micromonosporaceae} \code{Moraxellaceae} \code{Mycobacteriaceae} \code{Mycoplasmataceae} \code{Myxococcaceae} \code{Nakamurellaceae} \code{Natranaerobiaceae} \code{Nautiliaceae} \code{Neisseriaceae} \code{Nitrosomonadaceae} \code{Nitrosopumilaceae} \code{Nitrospiraceae} \code{Nocardiaceae} \code{Nocardioidaceae} \code{Nocardiopsaceae} \code{Nostocaceae} \code{Oceanospirillaceae} \code{Opitutaceae} \code{Oscillospiraceae} \code{Oxalobacteraceae} \code{Paenibacillaceae} \code{Parachlamydiaceae} \code{Parvularculaceae} \code{Pasteurellaceae} \code{Pelobacteraceae} \code{Peptococcaceae} \code{Peptostreptococcaceae} \code{Phycisphaeraceae} \code{Phyllobacteriaceae} \code{Picrophilaceae} \code{Piscirickettsiaceae} \code{Planctomycetaceae} \code{Planococcaceae} \code{Polyangiaceae} \code{Porphyromonadaceae} \code{Prevotellaceae} \code{Prochlorococcaceae} \code{Promicromonosporaceae} \code{Propionibacteriaceae} \code{Pseudoalteromonadaceae} \code{Pseudomonadaceae} \code{Pseudonocardiaceae} \code{Psychromonadaceae} \code{Puniceicoccaceae} \code{Pyrodictiaceae} \code{Rhizobiaceae} \code{Rhodobacteraceae} \code{Rhodocyclaceae} \code{Rhodospirillaceae} \code{Rhodothermaceae} \code{Rickettsiaceae} \code{Rikenellaceae} \code{Rubrobacteraceae} \code{Ruminococcaceae} \code{Sanguibacteraceae} \code{Saprospiraceae} \code{Segniliparaceae} \code{Shewanellaceae} \code{Simkaniaceae} \code{Solibacteraceae} \code{Sphaerobacteraceae} \code{Sphingobacteriaceae} \code{Sphingomonadaceae} \code{Spirochaetaceae} \code{Staphylococcaceae} \code{Streptococcaceae} \code{Streptomycetaceae} \code{Streptosporangiaceae} \code{Sulfolobaceae} \code{Synergistaceae} \code{Syntrophaceae} \code{Syntrophobacteraceae} \code{Syntrophomonadaceae} \code{Thermaceae} \code{Thermoanaerobacteraceae} \code{Thermoanaerobacterales Family III. Incertae Sedis} \code{Thermoanaerobacterales Family IV. Incertae Sedis} \code{Thermococcaceae} \code{Thermodesulfobacteriaceae} \code{Thermodesulfobiaceae} \code{Thermofilaceae} \code{Thermomicrobiaceae} \code{Thermomonosporaceae} \code{Thermoplasmataceae} \code{Thermoproteaceae} \code{Thermotogaceae} \code{Trueperaceae} \code{Tsukamurellaceae} \code{Veillonellaceae} \code{Verrucomicrobiaceae} \code{Vibrionaceae} \code{Waddliaceae} \code{Xanthobacteraceae} \code{Xanthomonadaceae}}
\item{\code{Genus}}{a factor with levels \code{} \code{Acaryochloris} \code{Acetobacter} \code{Acetobacterium} \code{Acetohalobium} \code{Acholeplasma} \code{Achromobacter} \code{Acidaminococcus} \code{Acidianus} \code{Acidilobus} \code{Acidimicrobium} \code{Acidiphilium} \code{Acidithiobacillus} \code{Acidobacterium} \code{Acidothermus} \code{Acidovorax} \code{Aciduliprofundum} \code{Acinetobacter} \code{Actinobacillus} \code{Actinoplanes} \code{Actinosynnema} \code{Advenella} \code{Aequorivita} \code{Aerococcus} \code{Aeromonas} \code{Aeropyrum} \code{Aggregatibacter} \code{Agrobacterium} \code{Akkermansia} \code{Albidiferax} \code{Alcanivorax} \code{Alicycliphilus} \code{Alicyclobacillus} \code{Aliivibrio} \code{Alistipes} \code{Alkalilimnicola} \code{Alkaliphilus} \code{Allochromatium} \code{Alteromonas} \code{Aminobacterium} \code{Ammonifex} \code{Amycolatopsis} \code{Amycolicicoccus} \code{Anabaena} \code{Anaerobaculum} \code{Anaerococcus} \code{Anaerolinea} \code{Anaeromyxobacter} \code{Anaplasma} \code{Anoxybacillus} \code{Aquifex} \code{Arcanobacterium} \code{Archaeoglobus} \code{Arcobacter} \code{Aromatoleum} \code{Arthrobacter} \code{Asticcacaulis} \code{Atopobium} \code{Azoarcus} \code{Azorhizobium} \code{Azospira} \code{Azospirillum} \code{Azotobacter} \code{Bacillus} \code{Bacteriovorax} \code{Bacteroides} \code{Bartonella} \code{Basfia} \code{Bdellovibrio} \code{Beijerinckia} \code{Belliella} \code{Beutenbergia} \code{Bifidobacterium} \code{Blattabacterium} \code{Bordetella} \code{Borrelia} \code{Brachybacterium} \code{Brachyspira} \code{Bradyrhizobium} \code{Brevibacillus} \code{Brevundimonas} \code{Brucella} \code{Buchnera} \code{Burkholderia} \code{Butyrivibrio} \code{Caldanaerobacter} \code{Caldicellulosiruptor} \code{Caldilinea} \code{Caldisericum} \code{Calditerrivibrio} \code{Caldivirga} \code{Campylobacter} \code{Candidatus Accumulibacter} \code{Candidatus Amoebophilus} \code{Candidatus Arthromitus} \code{Candidatus Azobacteroides} \code{Candidatus Blochmannia} \code{Candidatus Carsonella} \code{Candidatus Chloracidobacterium} \code{Candidatus Desulforudis} \code{Candidatus Hamiltonella} \code{Candidatus Hodgkinia} \code{Candidatus Korarchaeum} \code{Candidatus Koribacter} \code{Candidatus Liberibacter} \code{Candidatus Methylomirabilis} \code{Candidatus Midichloria} \code{Candidatus Moranella} \code{Candidatus Pelagibacter} \code{Candidatus Phytoplasma} \code{Candidatus Portiera} \code{Candidatus Protochlamydia} \code{Candidatus Puniceispirillum} \code{Candidatus Riesia} \code{Candidatus Solibacter} \code{Candidatus Sulcia} \code{Candidatus Tremblaya} \code{Candidatus Zinderia} \code{Capnocytophaga} \code{Carboxydothermus} \code{Carnobacterium} \code{Catenulispora} \code{Caulobacter} \code{Cellulomonas} \code{Cellulophaga} \code{Cellulosilyticum} \code{Cellvibrio} \code{Cenarchaeum} \code{Chelativorans} \code{Chitinophaga} \code{Chlamydia} \code{Chlamydophila} \code{Chlorobaculum} \code{Chlorobium} \code{Chloroflexus} \code{Chloroherpeton} \code{Chromobacterium} \code{Chromohalobacter} \code{Citrobacter} \code{Clavibacter} \code{Clostridium} \code{Collimonas} \code{Colwellia} \code{Comamonas} \code{Conexibacter} \code{Coprothermobacter} \code{Coraliomargarita} \code{Corallococcus} \code{Coriobacterium} \code{Corynebacterium} \code{Coxiella} \code{Croceibacter} \code{Cronobacter} \code{Cryptobacterium} \code{Cupriavidus} \code{Cyanothece} \code{Cyclobacterium} \code{Cytophaga} \code{Dechloromonas} \code{Deferribacter} \code{Dehalococcoides} \code{Dehalogenimonas} \code{Deinococcus} \code{Delftia} \code{Denitrovibrio} \code{Desulfarculus} \code{Desulfatibacillum} \code{Desulfitobacterium} \code{Desulfobacca} \code{Desulfobacterium} \code{Desulfobulbus} \code{Desulfococcus} \code{Desulfohalobium} \code{Desulfomicrobium} \code{Desulfomonile} \code{Desulfosporosinus} \code{Desulfotalea} \code{Desulfotomaculum} \code{Desulfovibrio} \code{Desulfurispirillum} \code{Desulfurivibrio} \code{Desulfurobacterium} \code{Desulfurococcus} \code{Dichelobacter} \code{Dickeya} \code{Dictyoglomus} \code{Dinoroseobacter} \code{Dyadobacter} \code{Edwardsiella} \code{Eggerthella} \code{Ehrlichia} \code{Elusimicrobium} \code{Enterobacter} \code{Enterococcus} \code{Erwinia} \code{Erysipelothrix} \code{Erythrobacter} \code{Escherichia} \code{Ethanoligenens} \code{Eubacterium} \code{Exiguobacterium} \code{Ferrimonas} \code{Ferroglobus} \code{Fervidicoccus} \code{Fervidobacterium} \code{Fibrobacter} \code{Filifactor} \code{Finegoldia} \code{Flavobacterium} \code{Flexibacter} \code{Flexistipes} \code{Fluviicola} \code{Francisella} \code{Frankia} \code{Frateuria} \code{Fusobacterium} \code{Gallibacterium} \code{Gallionella} \code{Gardnerella} \code{Gemmatimonas} \code{Geobacillus} \code{Geobacter} \code{Geodermatophilus} \code{Glaciecola} \code{Gloeobacter} \code{Gluconacetobacter} \code{Gluconobacter} \code{Gordonia} \code{Gramella} \code{Granulibacter} \code{Granulicella} \code{Haemophilus} \code{Hahella} \code{Halalkalicoccus} \code{Halanaerobium} \code{Haliangium} \code{Haliscomenobacter} \code{Haloarcula} \code{Halobacillus} \code{Halobacterium} \code{Haloferax} \code{Halogeometricum} \code{Halomicrobium} \code{Halomonas} \code{Halopiger} \code{Haloquadratum} \code{Halorhabdus} \code{Halorhodospira} \code{Halorubrum} \code{Haloterrigena} \code{Halothermothrix} \code{Halothiobacillus} \code{Helicobacter} \code{Heliobacterium} \code{Herbaspirillum} \code{Herpetosiphon} \code{Hippea} \code{Hirschia} \code{Histophilus} \code{Hydrogenobacter} \code{Hydrogenobaculum} \code{Hyperthermus} \code{Hyphomicrobium} \code{Hyphomonas} \code{Idiomarina} \code{Ignavibacterium} \code{Ignicoccus} \code{Ignisphaera} \code{Ilyobacter} \code{Intrasporangium} \code{Isoptericola} \code{Isosphaera} \code{Jannaschia} \code{Janthinobacterium} \code{Jonesia} \code{Kangiella} \code{Ketogulonicigenium} \code{Kineococcus} \code{Kitasatospora} \code{Klebsiella} \code{Kocuria} \code{Kosmotoga} \code{Kribbella} \code{Krokinobacter} \code{Kyrpidia} \code{Kytococcus} \code{Lacinutrix} \code{Lactobacillus} \code{Lactococcus} \code{Laribacter} \code{Lawsonia} \code{Leadbetterella} \code{Legionella} \code{Leifsonia} \code{Leptospira} \code{Leptospirillum} \code{Leptothrix} \code{Leptotrichia} \code{Leuconostoc} \code{Listeria} \code{Lysinibacillus} \code{Macrococcus} \code{Magnetococcus} \code{Magnetospirillum} \code{Mahella} \code{Maribacter} \code{Maricaulis} \code{Marinithermus} \code{Marinitoga} \code{Marinobacter} \code{Marinomonas} \code{Marivirga} \code{Meiothermus} \code{Melissococcus} \code{Mesoplasma} \code{Mesorhizobium} \code{Mesotoga} \code{Metallosphaera} \code{Methanobacterium} \code{Methanobrevibacter} \code{Methanocaldococcus} \code{Methanocella} \code{Methanococcoides} \code{Methanococcus} \code{Methanocorpusculum} \code{Methanoculleus} \code{Methanohalobium} \code{Methanohalophilus} \code{Methanoplanus} \code{Methanopyrus} \code{Methanoregula} \code{Methanosaeta} \code{Methanosalsum} \code{Methanosarcina} \code{Methanosphaera} \code{Methanosphaerula} \code{Methanospirillum} \code{Methanothermobacter} \code{Methanothermococcus} \code{Methanothermus} \code{Methanotorris} \code{Methylacidiphilum} \code{Methylibium} \code{Methylobacillus} \code{Methylobacterium} \code{Methylocella} \code{Methylococcus} \code{Methylomonas} \code{Methylophaga} \code{Methylotenera} \code{Methylovorus} \code{Micavibrio} \code{Microbacterium} \code{Micrococcus} \code{Microcystis} \code{Microlunatus} \code{Micromonospora} \code{Mobiluncus} \code{Modestobacter} \code{Moorella} \code{Moraxella} \code{Muricauda} \code{Mycobacterium} \code{Mycoplasma} \code{Myxococcus} \code{Nakamurella} \code{Nanoarchaeum} \code{Natranaerobius} \code{Natrialba} \code{Natrinema} \code{Natronomonas} \code{Nautilia} \code{Neisseria} \code{Neorickettsia} \code{Niastella} \code{Nitratifractor} \code{Nitratiruptor} \code{Nitrobacter} \code{Nitrosococcus} \code{Nitrosomonas} \code{Nitrosopumilus} \code{Nitrosospira} \code{Nitrospira} \code{Nocardia} \code{Nocardioides} \code{Nocardiopsis} \code{Nostoc} \code{Novosphingobium} \code{Oceanimonas} \code{Oceanithermus} \code{Oceanobacillus} \code{Ochrobactrum} \code{Odoribacter} \code{Oenococcus} \code{Oligotropha} \code{Olsenella} \code{Opitutus} \code{Orientia} \code{Ornithobacterium} \code{Oscillibacter} \code{Owenweeksia} \code{Paenibacillus} \code{Paludibacter} \code{Pantoea} \code{Parabacteroides} \code{Parachlamydia} \code{Paracoccus} \code{Parvibaculum} \code{Parvularcula} \code{Pasteurella} \code{Pectobacterium} \code{Pediococcus} \code{Pedobacter} \code{Pelobacter} \code{Pelodictyon} \code{Pelotomaculum} \code{Persephonella} \code{Petrotoga} \code{Phaeobacter} \code{Phenylobacterium} \code{Photobacterium} \code{Photorhabdus} \code{Phycisphaera} \code{Picrophilus} \code{Pirellula} \code{Planctomyces} \code{Polaromonas} \code{Polynucleobacter} \code{Porphyromonas} \code{Prevotella} \code{Prochlorococcus} \code{Propionibacterium} \code{Prosthecochloris} \code{Proteus} \code{Providencia} \code{Pseudoalteromonas} \code{Pseudogulbenkiania} \code{Pseudomonas} \code{Pseudonocardia} \code{Pseudovibrio} \code{Pseudoxanthomonas} \code{Psychrobacter} \code{Psychromonas} \code{Pusillimonas} \code{Pyrobaculum} \code{Pyrococcus} \code{Pyrolobus} \code{Rahnella} \code{Ralstonia} \code{Ramlibacter} \code{Renibacterium} \code{Rhizobium} \code{Rhodobacter} \code{Rhodococcus} \code{Rhodomicrobium} \code{Rhodopirellula} \code{Rhodopseudomonas} \code{Rhodospirillum} \code{Rhodothermus} \code{Rickettsia} \code{Riemerella} \code{Robiginitalea} \code{Roseburia} \code{Roseiflexus} \code{Roseobacter} \code{Rothia} \code{Rubrivivax} \code{Rubrobacter} \code{Ruegeria} \code{Ruminococcus} \code{Runella} \code{Saccharomonospora} \code{Saccharophagus} \code{Saccharopolyspora} \code{Salinibacter} \code{Salinispora} \code{Salmonella} \code{Sanguibacter} \code{Saprospira} \code{Sebaldella} \code{Segniliparus} \code{Selenomonas} \code{Serratia} \code{Shewanella} \code{Shigella} \code{Shimwellia} \code{Sideroxydans} \code{Simkania} \code{Sinorhizobium} \code{Slackia} \code{Sodalis} \code{Solibacillus} \code{Sorangium} \code{Sphaerobacter} \code{Sphaerochaeta} \code{Sphingobacterium} \code{Sphingobium} \code{Sphingomonas} \code{Sphingopyxis} \code{Spirochaeta} \code{Spirosoma} \code{Stackebrandtia} \code{Staphylococcus} \code{Staphylothermus} \code{Starkeya} \code{Stenotrophomonas} \code{Stigmatella} \code{Streptobacillus} \code{Streptococcus} \code{Streptomyces} \code{Streptosporangium} \code{Sulfobacillus} \code{Sulfolobus} \code{Sulfuricurvum} \code{Sulfurihydrogenibium} \code{Sulfurimonas} \code{Sulfurospirillum} \code{Sulfurovum} \code{Symbiobacterium} \code{Synechococcus} \code{Synechocystis} \code{Syntrophobacter} \code{Syntrophobotulus} \code{Syntrophomonas} \code{Syntrophothermus} \code{Syntrophus} \code{Tannerella} \code{Taylorella} \code{Tepidanaerobacter} \code{Teredinibacter} \code{Terriglobus} \code{Tetragenococcus} \code{Thauera} \code{Thermaerobacter} \code{Thermanaerovibrio} \code{Thermincola} \code{Thermoanaerobacter} \code{Thermoanaerobacterium} \code{Thermobaculum} \code{Thermobifida} \code{Thermobispora} \code{Thermococcus} \code{Thermocrinis} \code{Thermodesulfatator} \code{Thermodesulfobacterium} \code{Thermodesulfobium} \code{Thermodesulfovibrio} \code{Thermofilum} \code{Thermogladius} \code{Thermomicrobium} \code{Thermomonospora} \code{Thermoplasma} \code{Thermoproteus} \code{Thermosediminibacter} \code{Thermosipho} \code{Thermosphaera} \code{Thermosynechococcus} \code{Thermotoga} \code{Thermovibrio} \code{Thermovirga} \code{Thermus} \code{Thioalkalimicrobium} \code{Thioalkalivibrio} \code{Thiobacillus} \code{Thiocystis} \code{Thiomicrospira} \code{Thiomonas} \code{Tistrella} \code{Tolumonas} \code{Treponema} \code{Trichodesmium} \code{Trichormus} \code{Tropheryma} \code{Truepera} \code{Tsukamurella} \code{Turneriella} \code{Ureaplasma} \code{Variovorax} \code{Veillonella} \code{Verminephrobacter} \code{Verrucosispora} \code{Vibrio} \code{Vulcanisaeta} \code{Waddlia} \code{Weeksella} \code{Weissella} \code{Wigglesworthia} \code{Wolbachia} \code{Wolinella} \code{Xanthobacter} \code{Xanthomonas} \code{Xenorhabdus} \code{Xylanimonas} \code{Xylella} \code{Yersinia} \code{Zobellia} \code{Zunongwangia} \code{Zymomonas}}
\item{\code{Species}}{a factor with levels \code{Acaryochloris marina} \code{Acetobacterium woodii} \code{Acetobacter pasteurianus} \code{Acetohalobium arabaticum} \code{Acholeplasma laidlawii} \code{Achromobacter xylosoxidans} \code{Acidaminococcus fermentans} \code{Acidaminococcus intestini} \code{Acidianus hospitalis} \code{Acidilobus saccharovorans} \code{Acidimicrobium ferrooxidans} \code{Acidiphilium cryptum} \code{Acidiphilium multivorum} \code{Acidithiobacillus caldus} \code{Acidithiobacillus ferrivorans} \code{Acidithiobacillus ferrooxidans} \code{Acidobacterium capsulatum} \code{Acidothermus cellulolyticus} \code{Acidovorax avenae} \code{Acidovorax citrulli} \code{Acidovorax ebreus} \code{Acidovorax sp. JS42} \code{Aciduliprofundum boonei} \code{Acinetobacter baumannii} \code{Acinetobacter calcoaceticus} \code{Acinetobacter oleivorans} \code{Actinobacillus pleuropneumoniae} \code{Actinobacillus succinogenes} \code{Actinoplanes missouriensis} \code{Actinoplanes sp. SE50/110} \code{Actinosynnema mirum} \code{Advenella kashmirensis} \code{Aequorivita sublithincola} \code{Aerococcus urinae} \code{Aeromonas hydrophila} \code{Aeromonas salmonicida} \code{Aeromonas veronii} \code{Aeropyrum pernix} \code{Aggregatibacter actinomycetemcomitans} \code{Aggregatibacter aphrophilus} \code{Agrobacterium fabrum} \code{Agrobacterium sp. H13-3} \code{Agrobacterium vitis} \code{Akkermansia muciniphila} \code{Albidiferax ferrireducens} \code{Alcanivorax borkumensis} \code{Alicycliphilus denitrificans} \code{Alicyclobacillus acidocaldarius} \code{Aliivibrio fischeri} \code{Aliivibrio salmonicida} \code{Alistipes finegoldii} \code{Alkalilimnicola ehrlichii} \code{Alkaliphilus metalliredigens} \code{Alkaliphilus oremlandii} \code{Allochromatium vinosum} \code{Alteromonas macleodii} \code{Alteromonas sp. SN2} \code{Aminobacterium colombiense} \code{Ammonifex degensii} \code{Amycolatopsis mediterranei} \code{Amycolicicoccus subflavus} \code{Anabaena variabilis} \code{Anaerobaculum mobile} \code{Anaerococcus prevotii} \code{Anaerolinea thermophila} \code{Anaeromyxobacter dehalogenans} \code{Anaeromyxobacter sp. Fw109-5} \code{Anaeromyxobacter sp. K} \code{Anaplasma centrale} \code{Anaplasma marginale} \code{Anaplasma phagocytophilum} \code{Anoxybacillus flavithermus} \code{Aquifex aeolicus} \code{Arcanobacterium haemolyticum} \code{Archaeoglobus fulgidus} \code{Archaeoglobus profundus} \code{Archaeoglobus veneficus} \code{Arcobacter butzleri} \code{Arcobacter nitrofigilis} \code{Arcobacter sp. L} \code{Aromatoleum aromaticum} \code{Arthrobacter arilaitensis} \code{Arthrobacter aurescens} \code{Arthrobacter chlorophenolicus} \code{Arthrobacter phenanthrenivorans} \code{Arthrobacter sp. FB24} \code{Arthrobacter sp. Rue61a} \code{Aster yellows witches'-broom phytoplasma} \code{Asticcacaulis excentricus} \code{Atopobium parvulum} \code{Azoarcus sp. BH72} \code{Azorhizobium caulinodans} \code{Azospira oryzae} \code{Azospirillum lipoferum} \code{Azotobacter vinelandii} \code{Bacillus amyloliquefaciens} \code{Bacillus anthracis} \code{Bacillus atrophaeus} \code{Bacillus cellulosilyticus} \code{Bacillus cereus} \code{Bacillus coagulans} \code{Bacillus cytotoxicus} \code{Bacillus halodurans} \code{Bacillus licheniformis} \code{Bacillus megaterium} \code{Bacillus pseudofirmus} \code{Bacillus pumilus} \code{Bacillus selenitireducens} \code{Bacillus sp. JS} \code{Bacillus subtilis} \code{Bacillus thuringiensis} \code{Bacillus weihenstephanensis} \code{Bacteriovorax marinus} \code{Bacteroides fragilis} \code{Bacteroides helcogenes} \code{Bacteroides salanitronis} \code{Bacteroides thetaiotaomicron} \code{Bacteroides vulgatus} \code{Bartonella bacilliformis} \code{Bartonella grahamii} \code{Bartonella henselae} \code{Bartonella quintana} \code{Bartonella tribocorum} \code{Bdellovibrio bacteriovorus} \code{Beijerinckia indica} \code{Belliella baltica} \code{Beutenbergia cavernae} \code{Bifidobacterium adolescentis} \code{Bifidobacterium animalis} \code{Bifidobacterium bifidum} \code{Bifidobacterium breve} \code{Bifidobacterium dentium} \code{Bifidobacterium longum} \code{Blattabacterium punctulatus} \code{Blattabacterium sp. (Blaberus giganteus)} \code{Blattabacterium sp. (Blattella germanica)} \code{Blattabacterium sp. (Mastotermes darwiniensis)} \code{Blattabacterium sp. (Periplaneta americana)} \code{Bordetella avium} \code{Bordetella bronchiseptica} \code{Bordetella parapertussis} \code{Bordetella pertussis} \code{Bordetella petrii} \code{Borrelia afzelii} \code{Borrelia bavariensis} \code{Borrelia bissettii} \code{Borrelia burgdorferi} \code{Borrelia crocidurae} \code{Borrelia duttonii} \code{Borrelia garinii} \code{Borrelia hermsii} \code{Borrelia recurrentis} \code{Borrelia turicatae} \code{Brachybacterium faecium} \code{Brachyspira hyodysenteriae} \code{Brachyspira intermedia} \code{Brachyspira murdochii} \code{Brachyspira pilosicoli} \code{Bradyrhizobium japonicum} \code{Bradyrhizobium sp. BTAi1} \code{Bradyrhizobium sp. S23321} \code{Brevibacillus brevis} \code{Brevundimonas subvibrioides} \code{Brucella abortus} \code{Brucella canis} \code{Brucella melitensis} \code{Brucella microti} \code{Brucella ovis} \code{Brucella pinnipedialis} \code{Brucella suis} \code{Buchnera aphidicola} \code{Burkholderia ambifaria} \code{Burkholderia cenocepacia} \code{Burkholderia cepacia} \code{Burkholderia gladioli} \code{Burkholderia glumae} \code{Burkholderia mallei} \code{Burkholderia multivorans} \code{Burkholderia phymatum} \code{Burkholderia phytofirmans} \code{Burkholderia pseudomallei} \code{Burkholderia rhizoxinica} \code{Burkholderia sp. 383} \code{Burkholderia sp. CCGE1001} \code{Burkholderia sp. CCGE1002} \code{Burkholderia sp. CCGE1003} \code{Burkholderia sp. KJ006} \code{Burkholderia sp. YI23} \code{Burkholderia thailandensis} \code{Burkholderia vietnamiensis} \code{Burkholderia xenovorans} \code{Butyrivibrio proteoclasticus} \code{Caldanaerobacter subterraneus} \code{Caldicellulosiruptor bescii} \code{Caldicellulosiruptor hydrothermalis} \code{Caldicellulosiruptor kristjanssonii} \code{Caldicellulosiruptor kronotskyensis} \code{Caldicellulosiruptor lactoaceticus} \code{Caldicellulosiruptor obsidiansis} \code{Caldicellulosiruptor owensensis} \code{Caldicellulosiruptor saccharolyticus} \code{Caldilinea aerophila} \code{Caldisericum exile} \code{Calditerrivibrio nitroreducens} \code{Caldivirga maquilingensis} \code{Calyptogena okutanii thioautotrophic gill symbiont} \code{Campylobacter concisus} \code{Campylobacter curvus} \code{Campylobacter fetus} \code{Campylobacter hominis} \code{Campylobacter jejuni} \code{Campylobacter lari} \code{Candidatus Accumulibacter phosphatis} \code{Candidatus Amoebophilus asiaticus} \code{Candidatus Arthromitus sp. SFB-mouse} \code{Candidatus Arthromitus sp. SFB-rat-Yit} \code{Candidatus Azobacteroides pseudotrichonymphae} \code{Candidatus Baumannia cicadellinicola} \code{Candidatus Blochmannia floridanus} \code{Candidatus Blochmannia pennsylvanicus} \code{Candidatus Blochmannia vafer} \code{Candidatus Carsonella ruddii} \code{Candidatus Chloracidobacterium thermophilum} \code{Candidatus Desulforudis audaxviator} \code{Candidatus Hamiltonella defensa} \code{Candidatus Hodgkinia cicadicola} \code{Candidatus Korarchaeum cryptofilum} \code{Candidatus Koribacter versatilis} \code{Candidatus Liberibacter asiaticus} \code{Candidatus Liberibacter solanacearum} \code{Candidatus Methylomirabilis oxyfera} \code{Candidatus Midichloria mitochondrii} \code{Candidatus Moranella endobia} \code{Candidatus Nitrospira defluvii} \code{Candidatus Pelagibacter sp. IMCC9063} \code{Candidatus Pelagibacter ubique} \code{Candidatus Phytoplasma australiense} \code{Candidatus Phytoplasma mali} \code{Candidatus Portiera aleyrodidarum} \code{Candidatus Protochlamydia amoebophila} \code{Candidatus Puniceispirillum marinum} \code{Candidatus Rickettsia amblyommii} \code{Candidatus Riesia pediculicola} \code{Candidatus Ruthia magnifica} \code{Candidatus Solibacter usitatus} \code{Candidatus Sulcia muelleri} \code{Candidatus Tremblaya princeps} \code{Candidatus Zinderia insecticola} \code{Capnocytophaga ochracea} \code{Carboxydothermus hydrogenoformans} \code{Carnobacterium sp. 17-4} \code{Catenulispora acidiphila} \code{Caulobacter segnis} \code{Caulobacter sp. K31} \code{Caulobacter vibrioides} \code{Cellulomonas fimi} \code{Cellulomonas flavigena} \code{Cellulophaga algicola} \code{Cellulophaga lytica} \code{Cellulosilyticum lentocellum} \code{[Cellvibrio] gilvus} \code{Cellvibrio japonicus} \code{Cenarchaeum symbiosum} \code{Chelativorans sp. BNC1} \code{Chitinophaga pinensis} \code{Chlamydia muridarum} \code{Chlamydia pecorum} \code{Chlamydia pneumoniae} \code{Chlamydia psittaci} \code{Chlamydia trachomatis} \code{Chlamydophila abortus} \code{Chlamydophila caviae} \code{Chlamydophila felis} \code{Chlorobaculum parvum} \code{Chlorobaculum tepidum} \code{Chlorobium chlorochromatii} \code{Chlorobium limicola} \code{Chlorobium phaeobacteroides} \code{Chlorobium phaeovibrioides} \code{Chloroflexus aggregans} \code{Chloroflexus aurantiacus} \code{Chloroflexus sp. Y-400-fl} \code{Chloroherpeton thalassium} \code{Chromobacterium violaceum} \code{Chromohalobacter salexigens} \code{Citrobacter koseri} \code{Citrobacter rodentium} \code{Clavibacter michiganensis} \code{Clostridiales genomosp. BVAB3} \code{Clostridium acetobutylicum} \code{Clostridium beijerinckii} \code{Clostridium botulinum} \code{Clostridium cellulolyticum} \code{Clostridium cellulovorans} \code{Clostridium clariflavum} \code{[Clostridium] difficile} \code{Clostridium kluyveri} \code{Clostridium ljungdahlii} \code{Clostridium novyi} \code{Clostridium perfringens} \code{Clostridium phytofermentans} \code{Clostridium saccharolyticum} \code{Clostridium sp. BNL1100} \code{Clostridium sp. SY8519} \code{Clostridium tetani} \code{Clostridium thermocellum} \code{Collimonas fungivorans} \code{Colwellia psychrerythraea} \code{Comamonas testosteroni} \code{Conexibacter woesei} \code{Coprothermobacter proteolyticus} \code{Coraliomargarita akajimensis} \code{Corallococcus coralloides} \code{Coriobacterium glomerans} \code{Corynebacterium aurimucosum} \code{Corynebacterium diphtheriae} \code{Corynebacterium efficiens} \code{Corynebacterium glutamicum} \code{Corynebacterium jeikeium} \code{Corynebacterium kroppenstedtii} \code{Corynebacterium pseudotuberculosis} \code{Corynebacterium resistens} \code{Corynebacterium ulcerans} \code{Corynebacterium urealyticum} \code{Corynebacterium variabile} \code{Coxiella burnetii} \code{Croceibacter atlanticus} \code{Cronobacter sakazakii} \code{Cronobacter turicensis} \code{Cryptobacterium curtum} \code{Cupriavidus metallidurans} \code{Cupriavidus necator} \code{Cupriavidus pinatubonensis} \code{cyanobacterium UCYN-A} \code{Cyanothece sp. ATCC 51142} \code{Cyanothece sp. PCC 7424} \code{Cyanothece sp. PCC 7425} \code{Cyanothece sp. PCC 7822} \code{Cyanothece sp. PCC 8801} \code{Cyanothece sp. PCC 8802} \code{Cyclobacterium marinum} \code{Cytophaga hutchinsonii} \code{Dechloromonas aromatica} \code{Deferribacter desulfuricans} \code{Dehalococcoides mccartyi} \code{Dehalogenimonas lykanthroporepellens} \code{Deinococcus deserti} \code{Deinococcus geothermalis} \code{Deinococcus gobiensis} \code{Deinococcus maricopensis} \code{Deinococcus proteolyticus} \code{Deinococcus radiodurans} \code{Delftia acidovorans} \code{Delftia sp. Cs1-4} \code{Denitrovibrio acetiphilus} \code{Desulfarculus baarsii} \code{Desulfatibacillum alkenivorans} \code{Desulfitobacterium dehalogenans} \code{Desulfitobacterium hafniense} \code{Desulfobacca acetoxidans} \code{Desulfobacterium autotrophicum} \code{Desulfobulbus propionicus} \code{Desulfococcus oleovorans} \code{Desulfohalobium retbaense} \code{Desulfomicrobium baculatum} \code{Desulfomonile tiedjei} \code{Desulfosporosinus acidiphilus} \code{Desulfosporosinus meridiei} \code{Desulfosporosinus orientis} \code{Desulfotalea psychrophila} \code{Desulfotomaculum acetoxidans} \code{Desulfotomaculum carboxydivorans} \code{Desulfotomaculum kuznetsovii} \code{Desulfotomaculum reducens} \code{Desulfotomaculum ruminis} \code{Desulfovibrio aespoeensis} \code{Desulfovibrio africanus} \code{Desulfovibrio alaskensis} \code{Desulfovibrio desulfuricans} \code{Desulfovibrio magneticus} \code{Desulfovibrio salexigens} \code{Desulfovibrio vulgaris} \code{Desulfurispirillum indicum} \code{Desulfurivibrio alkaliphilus} \code{Desulfurobacterium thermolithotrophum} \code{Desulfurococcus fermentans} \code{Desulfurococcus kamchatkensis} \code{Desulfurococcus mucosus} \code{Dichelobacter nodosus} \code{Dickeya dadantii} \code{Dickeya zeae} \code{Dictyoglomus thermophilum} \code{Dictyoglomus turgidum} \code{Dinoroseobacter shibae} \code{Dyadobacter fermentans} \code{Edwardsiella ictaluri} \code{Edwardsiella tarda} \code{Eggerthella lenta} \code{Eggerthella sp. YY7918} \code{Ehrlichia canis} \code{Ehrlichia chaffeensis} \code{Ehrlichia ruminantium} \code{Elusimicrobium minutum} \code{Enterobacter aerogenes} \code{Enterobacter asburiae} \code{Enterobacter cloacae} \code{Enterobacter lignolyticus} \code{Enterobacter sp. 638} \code{Enterococcus faecalis} \code{Enterococcus faecium} \code{Enterococcus hirae} \code{Erwinia amylovora} \code{Erwinia billingiae} \code{Erwinia pyrifoliae} \code{Erwinia sp. Ejp617} \code{Erwinia tasmaniensis} \code{Erysipelothrix rhusiopathiae} \code{Erythrobacter litoralis} \code{Escherichia coli} \code{Ethanoligenens harbinense} \code{Eubacterium eligens} \code{Eubacterium limosum} \code{Eubacterium rectale} \code{Exiguobacterium sibiricum} \code{Exiguobacterium sp. AT1b} \code{Ferrimonas balearica} \code{Ferroglobus placidus} \code{Fervidicoccus fontis} \code{Fervidobacterium nodosum} \code{Fervidobacterium pennivorans} \code{Fibrobacter succinogenes} \code{Filifactor alocis} \code{Finegoldia magna} \code{Flavobacteriaceae bacterium 3519-10} \code{Flavobacterium branchiophilum} \code{Flavobacterium columnare} \code{Flavobacterium indicum} \code{Flavobacterium johnsoniae} \code{Flavobacterium psychrophilum} \code{Flexibacter litoralis} \code{Flexistipes sinusarabici} \code{Fluviicola taffensis} \code{Francisella noatunensis} \code{Francisella novicida} \code{Francisella philomiragia} \code{Francisella sp. TX077308} \code{Francisella tularensis} \code{Frankia sp. CcI3} \code{Frankia sp. EAN1pec} \code{Frankia sp. EuI1c} \code{Frankia symbiont of Datisca glomerata} \code{Frateuria aurantia} \code{Fusobacterium nucleatum} \code{Gallibacterium anatis} \code{Gallionella capsiferriformans} \code{gamma proteobacterium HdN1} \code{Gardnerella vaginalis} \code{Gemmatimonas aurantiaca} \code{Geobacillus kaustophilus} \code{Geobacillus sp. C56-T3} \code{Geobacillus sp. WCH70} \code{Geobacillus sp. Y412MC52} \code{Geobacillus sp. Y412MC61} \code{Geobacillus sp. Y4.1MC1} \code{Geobacillus thermodenitrificans} \code{Geobacillus thermoglucosidasius} \code{Geobacillus thermoleovorans} \code{Geobacter bemidjiensis} \code{Geobacter daltonii} \code{Geobacter lovleyi} \code{Geobacter metallireducens} \code{Geobacter sp. M18} \code{Geobacter sp. M21} \code{Geobacter sulfurreducens} \code{Geobacter uraniireducens} \code{Geodermatophilus obscurus} \code{Glaciecola nitratireducens} \code{Glaciecola sp. 4H-3-7+YE-5} \code{Gloeobacter violaceus} \code{Gluconacetobacter diazotrophicus} \code{Gluconacetobacter xylinus} \code{Gluconobacter oxydans} \code{Gordonia bronchialis} \code{Gordonia polyisoprenivorans} \code{Gordonia sp. KTR9} \code{Gramella forsetii} \code{Granulibacter bethesdensis} \code{Granulicella mallensis} \code{Granulicella tundricola} \code{Haemophilus ducreyi} \code{Haemophilus influenzae} \code{Haemophilus parainfluenzae} \code{Haemophilus parasuis} \code{Hahella chejuensis} \code{Halalkalicoccus jeotgali} \code{Halanaerobium hydrogeniformans} \code{Halanaerobium praevalens} \code{Haliangium ochraceum} \code{Haliscomenobacter hydrossis} \code{Haloarcula hispanica} \code{Haloarcula marismortui} \code{Halobacillus halophilus} \code{Halobacterium salinarum} \code{Haloferax mediterranei} \code{Haloferax volcanii} \code{Halogeometricum borinquense} \code{Halomicrobium mukohataei} \code{Halomonas elongata} \code{halophilic archaeon DL31} \code{Halopiger xanaduensis} \code{Haloquadratum walsbyi} \code{Halorhabdus utahensis} \code{Halorhodospira halophila} \code{Halorubrum lacusprofundi} \code{Haloterrigena turkmenica} \code{Halothermothrix orenii} \code{Halothiobacillus neapolitanus} \code{Helicobacter acinonychis} \code{Helicobacter bizzozeronii} \code{Helicobacter cetorum} \code{Helicobacter cinaedi} \code{Helicobacter felis} \code{Helicobacter hepaticus} \code{Helicobacter mustelae} \code{Helicobacter pylori} \code{Heliobacterium modesticaldum} \code{Herbaspirillum seropedicae} \code{Herpetosiphon aurantiacus} \code{Hippea maritima} \code{Hirschia baltica} \code{Histophilus somni} \code{Hydrogenobacter thermophilus} \code{Hydrogenobaculum sp. Y04AAS1} \code{Hyperthermus butylicus} \code{Hyphomicrobium denitrificans} \code{Hyphomonas neptunium} \code{Idiomarina loihiensis} \code{Ignavibacterium album} \code{Ignicoccus hospitalis} \code{Ignisphaera aggregans} \code{Ilyobacter polytropus} \code{Intrasporangium calvum} \code{Isoptericola variabilis} \code{Isosphaera pallida} \code{Jannaschia sp. CCS1} \code{Janthinobacterium sp. Marseille} \code{Jonesia denitrificans} \code{Kangiella koreensis} \code{Ketogulonicigenium vulgare} \code{Kineococcus radiotolerans} \code{Kitasatospora setae} \code{Klebsiella oxytoca} \code{Klebsiella pneumoniae} \code{Klebsiella variicola} \code{Kocuria rhizophila} \code{Kosmotoga olearia} \code{Kribbella flavida} \code{Krokinobacter sp. 4H-3-7-5} \code{Kyrpidia tusciae} \code{Kytococcus sedentarius} \code{Lacinutrix sp. 5H-3-7-4} \code{Lactobacillus acidophilus} \code{Lactobacillus amylovorus} \code{Lactobacillus brevis} \code{Lactobacillus buchneri} \code{Lactobacillus casei} \code{Lactobacillus crispatus} \code{Lactobacillus delbrueckii} \code{Lactobacillus fermentum} \code{Lactobacillus gasseri} \code{Lactobacillus helveticus} \code{Lactobacillus johnsonii} \code{Lactobacillus kefiranofaciens} \code{Lactobacillus plantarum} \code{Lactobacillus reuteri} \code{Lactobacillus rhamnosus} \code{Lactobacillus ruminis} \code{Lactobacillus sakei} \code{Lactobacillus salivarius} \code{Lactobacillus sanfranciscensis} \code{Lactococcus garvieae} \code{Lactococcus lactis} \code{Laribacter hongkongensis} \code{Lawsonia intracellularis} \code{Leadbetterella byssophila} \code{Legionella pneumophila} \code{Leifsonia xyli} \code{Leptospira biflexa} \code{Leptospira borgpetersenii} \code{Leptospira interrogans} \code{Leptospirillum ferrooxidans} \code{Leptothrix cholodnii} \code{Leptotrichia buccalis} \code{Leuconostoc citreum} \code{Leuconostoc gasicomitatum} \code{Leuconostoc kimchii} \code{Leuconostoc mesenteroides} \code{Leuconostoc sp. C2} \code{Listeria innocua} \code{Listeria ivanovii} \code{Listeria monocytogenes} \code{Listeria seeligeri} \code{Listeria welshimeri} \code{Lysinibacillus sphaericus} \code{Macrococcus caseolyticus} \code{Magnetococcus marinus} \code{Magnetospirillum magneticum} \code{Mahella australiensis} \code{Mannheimia succiniciproducens} \code{Maribacter sp. HTCC2170} \code{Maricaulis maris} \code{Marinithermus hydrothermalis} \code{Marinitoga piezophila} \code{Marinobacter adhaerens} \code{Marinobacter hydrocarbonoclasticus} \code{Marinomonas mediterranea} \code{Marinomonas posidonica} \code{Marinomonas sp. MWYL1} \code{Marivirga tractuosa} \code{Meiothermus ruber} \code{Meiothermus silvanus} \code{Melissococcus plutonius} \code{Mesoplasma florum} \code{Mesorhizobium ciceri} \code{Mesorhizobium loti} \code{Mesorhizobium opportunistum} \code{Mesotoga prima} \code{Metallosphaera cuprina} \code{Metallosphaera sedula} \code{Methanobacterium sp. AL-21} \code{Methanobacterium sp. SWAN-1} \code{Methanobrevibacter ruminantium} \code{Methanobrevibacter smithii} \code{Methanocaldococcus fervens} \code{Methanocaldococcus infernus} \code{Methanocaldococcus jannaschii} \code{Methanocaldococcus sp. FS406-22} \code{Methanocaldococcus vulcanius} \code{Methanocella arvoryzae} \code{Methanocella conradii} \code{Methanocella paludicola} \code{Methanococcoides burtonii} \code{Methanococcus aeolicus} \code{Methanococcus maripaludis} \code{Methanococcus vannielii} \code{Methanococcus voltae} \code{Methanocorpusculum labreanum} \code{Methanoculleus bourgensis} \code{Methanoculleus marisnigri} \code{Methanohalobium evestigatum} \code{Methanohalophilus mahii} \code{Methanoplanus petrolearius} \code{Methanopyrus kandleri} \code{Methanoregula boonei} \code{Methanosaeta concilii} \code{Methanosaeta harundinacea} \code{Methanosaeta thermophila} \code{Methanosalsum zhilinae} \code{Methanosarcina acetivorans} \code{Methanosarcina barkeri} \code{Methanosarcina mazei} \code{Methanosphaera stadtmanae} \code{Methanosphaerula palustris} \code{Methanospirillum hungatei} \code{Methanothermobacter marburgensis} \code{Methanothermobacter thermautotrophicus} \code{Methanothermococcus okinawensis} \code{Methanothermus fervidus} \code{Methanotorris igneus} \code{Methylacidiphilum infernorum} \code{Methylibium petroleiphilum} \code{Methylobacillus flagellatus} \code{Methylobacterium extorquens} \code{Methylobacterium nodulans} \code{Methylobacterium populi} \code{Methylobacterium radiotolerans} \code{Methylobacterium sp. 4-46} \code{Methylocella silvestris} \code{Methylococcus capsulatus} \code{Methylomonas methanica} \code{Methylophaga sp. JAM1} \code{Methylophaga sp. JAM7} \code{Methylotenera mobilis} \code{Methylotenera versatilis} \code{Methylovorus glucosotrophus} \code{Methylovorus sp. MP688} \code{Micavibrio aeruginosavorus} \code{Microbacterium testaceum} \code{Micrococcus luteus} \code{Microcystis aeruginosa} \code{Microlunatus phosphovorus} \code{Micromonospora aurantiaca} \code{Micromonospora sp. L5} \code{Mobiluncus curtisii} \code{Modestobacter marinus} \code{Moorella thermoacetica} \code{Moraxella catarrhalis} \code{Muricauda ruestringensis} \code{Mycobacterium abscessus} \code{Mycobacterium africanum} \code{Mycobacterium avium} \code{Mycobacterium bovis} \code{Mycobacterium canettii} \code{Mycobacterium chubuense} \code{Mycobacterium gilvum} \code{Mycobacterium intracellulare} \code{Mycobacterium leprae} \code{Mycobacterium marinum} \code{Mycobacterium smegmatis} \code{Mycobacterium sp. JDM601} \code{Mycobacterium sp. JLS} \code{Mycobacterium sp. KMS} \code{Mycobacterium sp. MCS} \code{Mycobacterium sp. MOTT36Y} \code{Mycobacterium tuberculosis} \code{Mycobacterium ulcerans} \code{Mycobacterium vanbaalenii} \code{Mycoplasma agalactiae} \code{Mycoplasma arthritidis} \code{Mycoplasma bovis} \code{Mycoplasma capricolum} \code{Mycoplasma conjunctivae} \code{Mycoplasma crocodyli} \code{Mycoplasma fermentans} \code{Mycoplasma gallisepticum} \code{Mycoplasma genitalium} \code{Mycoplasma haemocanis} \code{Mycoplasma haemofelis} \code{Mycoplasma hominis} \code{Mycoplasma hyopneumoniae} \code{Mycoplasma hyorhinis} \code{Mycoplasma leachii} \code{Mycoplasma mobile} \code{Mycoplasma mycoides} \code{Mycoplasma pneumoniae} \code{Mycoplasma pulmonis} \code{Mycoplasma putrefaciens} \code{Mycoplasma suis} \code{Mycoplasma synoviae} \code{Mycoplasma wenyonii} \code{Myxococcus fulvus} \code{Myxococcus xanthus} \code{Nakamurella multipartita} \code{Nanoarchaeum equitans} \code{Natranaerobius thermophilus} \code{Natrialba magadii} \code{Natrinema sp. J7-2} \code{Natronomonas pharaonis} \code{Nautilia profundicola} \code{Neisseria gonorrhoeae} \code{Neisseria lactamica} \code{Neisseria meningitidis} \code{Neorickettsia risticii} \code{Neorickettsia sennetsu} \code{Niastella koreensis} \code{Nitratifractor salsuginis} \code{Nitratiruptor sp. SB155-2} \code{Nitrobacter hamburgensis} \code{Nitrobacter winogradskyi} \code{Nitrosococcus halophilus} \code{Nitrosococcus oceani} \code{Nitrosococcus watsonii} \code{Nitrosomonas europaea} \code{Nitrosomonas eutropha} \code{Nitrosomonas sp. AL212} \code{Nitrosomonas sp. Is79A3} \code{Nitrosopumilus maritimus} \code{Nitrosospira multiformis} \code{Nocardia farcinica} \code{Nocardioides sp. JS614} \code{Nocardiopsis alba} \code{Nocardiopsis dassonvillei} \code{Nostoc punctiforme} \code{Nostoc sp. PCC 7120} \code{Novosphingobium aromaticivorans} \code{Novosphingobium sp. PP1Y} \code{Oceanimonas sp. GK1} \code{Oceanithermus profundus} \code{Oceanobacillus iheyensis} \code{Ochrobactrum anthropi} \code{Odoribacter splanchnicus} \code{Oenococcus oeni} \code{Oligotropha carboxidovorans} \code{Olsenella uli} \code{Onion yellows phytoplasma} \code{Opitutus terrae} \code{Orientia tsutsugamushi} \code{Ornithobacterium rhinotracheale} \code{Oscillibacter valericigenes} \code{Owenweeksia hongkongensis} \code{Paenibacillus mucilaginosus} \code{Paenibacillus polymyxa} \code{Paenibacillus sp. JDR-2} \code{Paenibacillus sp. Y412MC10} \code{Paenibacillus terrae} \code{Paludibacter propionicigenes} \code{Pantoea ananatis} \code{Pantoea sp. At-9b} \code{Pantoea vagans} \code{Parabacteroides distasonis} \code{Parachlamydia acanthamoebae} \code{Paracoccus denitrificans} \code{Parvibaculum lavamentivorans} \code{Parvularcula bermudensis} \code{Pasteurella multocida} \code{Pectobacterium atrosepticum} \code{Pectobacterium carotovorum} \code{Pectobacterium wasabiae} \code{Pediococcus claussenii} \code{Pediococcus pentosaceus} \code{Pedobacter heparinus} \code{Pedobacter saltans} \code{Pelobacter carbinolicus} \code{Pelobacter propionicus} \code{Pelodictyon luteolum} \code{Pelodictyon phaeoclathratiforme} \code{Pelotomaculum thermopropionicum} \code{Persephonella marina} \code{Petrotoga mobilis} \code{Phaeobacter gallaeciensis} \code{Phenylobacterium zucineum} \code{Photobacterium profundum} \code{Photorhabdus asymbiotica} \code{Photorhabdus luminescens} \code{Phycisphaera mikurensis} \code{Picrophilus torridus} \code{Pirellula staleyi} \code{Planctomyces brasiliensis} \code{Planctomyces limnophilus} \code{Polaromonas naphthalenivorans} \code{Polaromonas sp. JS666} \code{Polymorphum gilvum} \code{Polynucleobacter necessarius} \code{Porphyromonas asaccharolytica} \code{Porphyromonas gingivalis} \code{Prevotella denticola} \code{Prevotella intermedia} \code{Prevotella melaninogenica} \code{Prevotella ruminicola} \code{Prochlorococcus marinus} \code{Propionibacterium acnes} \code{Propionibacterium freudenreichii} \code{Propionibacterium propionicum} \code{Prosthecochloris aestuarii} \code{Proteus mirabilis} \code{Providencia stuartii} \code{Pseudoalteromonas atlantica} \code{Pseudoalteromonas haloplanktis} \code{Pseudoalteromonas sp. SM9913} \code{Pseudogulbenkiania sp. NH8B} \code{Pseudomonas aeruginosa} \code{Pseudomonas brassicacearum} \code{Pseudomonas fluorescens} \code{Pseudomonas fulva} \code{Pseudomonas mendocina} \code{Pseudomonas protegens} \code{Pseudomonas putida} \code{Pseudomonas savastanoi} \code{Pseudomonas stutzeri} \code{Pseudomonas syringae} \code{Pseudomonas syringae group genomosp. 3} \code{Pseudonocardia dioxanivorans} \code{Pseudovibrio sp. FO-BEG1} \code{Pseudoxanthomonas spadix} \code{Pseudoxanthomonas suwonensis} \code{Psychrobacter arcticus} \code{Psychrobacter cryohalolentis} \code{Psychrobacter sp. PRwf-1} \code{Psychromonas ingrahamii} \code{Pusillimonas sp. T7-7} \code{Pyrobaculum aerophilum} \code{Pyrobaculum arsenaticum} \code{Pyrobaculum calidifontis} \code{Pyrobaculum islandicum} \code{Pyrobaculum neutrophilum} \code{Pyrobaculum sp. 1860} \code{Pyrococcus abyssi} \code{Pyrococcus furiosus} \code{Pyrococcus horikoshii} \code{Pyrococcus sp. NA2} \code{Pyrococcus sp. ST04} \code{Pyrococcus yayanosii} \code{Pyrolobus fumarii} \code{Rahnella aquatilis} \code{Rahnella sp. Y9602} \code{Ralstonia pickettii} \code{Ralstonia solanacearum} \code{Ramlibacter tataouinensis} \code{Renibacterium salmoninarum} \code{Rhizobium leguminosarum} \code{Rhodobacter capsulatus} \code{Rhodobacter sphaeroides} \code{Rhodococcus equi} \code{Rhodococcus erythropolis} \code{Rhodococcus jostii} \code{Rhodococcus opacus} \code{Rhodomicrobium vannielii} \code{Rhodopirellula baltica} \code{Rhodopseudomonas palustris} \code{Rhodospirillum centenum} \code{Rhodospirillum photometricum} \code{Rhodospirillum rubrum} \code{Rhodothermus marinus} \code{Rickettsia africae} \code{Rickettsia akari} \code{Rickettsia australis} \code{Rickettsia bellii} \code{Rickettsia canadensis} \code{Rickettsia conorii} \code{Rickettsia felis} \code{Rickettsia heilongjiangensis} \code{Rickettsia japonica} \code{Rickettsia massiliae} \code{Rickettsia montanensis} \code{Rickettsia parkeri} \code{Rickettsia peacockii} \code{Rickettsia philipii} \code{Rickettsia prowazekii} \code{Rickettsia rhipicephali} \code{Rickettsia rickettsii} \code{Rickettsia slovaca} \code{Rickettsia typhi} \code{Riemerella anatipestifer} \code{Robiginitalea biformata} \code{Roseburia hominis} \code{Roseiflexus castenholzii} \code{Roseiflexus sp. RS-1} \code{Roseobacter denitrificans} \code{Roseobacter litoralis} \code{Rothia dentocariosa} \code{Rothia mucilaginosa} \code{Rubrivivax gelatinosus} \code{Rubrobacter xylanophilus} \code{Ruegeria pomeroyi} \code{Ruegeria sp. TM1040} \code{Ruminococcus albus} \code{Runella slithyformis} \code{Saccharomonospora viridis} \code{Saccharophagus degradans} \code{Saccharopolyspora erythraea} \code{Salinibacter ruber} \code{Salinispora arenicola} \code{Salinispora tropica} \code{Salmonella bongori} \code{Salmonella enterica} \code{Sanguibacter keddieii} \code{Saprospira grandis} \code{Sebaldella termitidis} \code{secondary endosymbiont of Ctenarytaina eucalypti} \code{Segniliparus rotundus} \code{Selenomonas ruminantium} \code{Selenomonas sputigena} \code{Serratia plymuthica} \code{Serratia proteamaculans} \code{Serratia sp. AS12} \code{Serratia sp. AS13} \code{Serratia symbiotica} \code{Shewanella amazonensis} \code{Shewanella baltica} \code{Shewanella denitrificans} \code{Shewanella frigidimarina} \code{Shewanella halifaxensis} \code{Shewanella loihica} \code{Shewanella oneidensis} \code{Shewanella pealeana} \code{Shewanella piezotolerans} \code{Shewanella putrefaciens} \code{Shewanella sediminis} \code{Shewanella sp. ANA-3} \code{Shewanella sp. MR-4} \code{Shewanella sp. MR-7} \code{Shewanella sp. W3-18-1} \code{Shewanella violacea} \code{Shewanella woodyi} \code{Shigella boydii} \code{Shigella dysenteriae} \code{Shigella flexneri} \code{Shigella sonnei} \code{Shimwellia blattae} \code{Sideroxydans lithotrophicus} \code{Simkania negevensis} \code{Sinorhizobium fredii} \code{Sinorhizobium medicae} \code{Sinorhizobium meliloti} \code{Slackia heliotrinireducens} \code{Sodalis glossinidius} \code{Solibacillus silvestris} \code{Sorangium cellulosum} \code{Sphaerobacter thermophilus} \code{Sphaerochaeta coccoides} \code{Sphaerochaeta globosa} \code{Sphaerochaeta pleomorpha} \code{Sphingobacterium sp. 21} \code{Sphingobium chlorophenolicum} \code{Sphingobium japonicum} \code{Sphingobium sp. SYK-6} \code{Sphingomonas wittichii} \code{Sphingopyxis alaskensis} \code{Spirochaeta africana} \code{Spirochaeta smaragdinae} \code{Spirochaeta thermophila} \code{Spirosoma linguale} \code{Stackebrandtia nassauensis} \code{Staphylococcus aureus} \code{Staphylococcus carnosus} \code{Staphylococcus epidermidis} \code{Staphylococcus lugdunensis} \code{Staphylococcus pseudintermedius} \code{Staphylococcus saprophyticus} \code{Staphylothermus hellenicus} \code{Staphylothermus marinus} \code{Starkeya novella} \code{Stenotrophomonas maltophilia} \code{Stigmatella aurantiaca} \code{Streptobacillus moniliformis} \code{Streptococcus agalactiae} \code{Streptococcus dysgalactiae} \code{Streptococcus equi} \code{Streptococcus gallolyticus} \code{Streptococcus gordonii} \code{Streptococcus infantarius} \code{Streptococcus intermedius} \code{Streptococcus macedonicus} \code{Streptococcus mitis} \code{Streptococcus mutans} \code{Streptococcus oralis} \code{Streptococcus parasanguinis} \code{Streptococcus parauberis} \code{Streptococcus pasteurianus} \code{Streptococcus pneumoniae} \code{Streptococcus pseudopneumoniae} \code{Streptococcus pyogenes} \code{Streptococcus salivarius} \code{Streptococcus suis} \code{Streptococcus thermophilus} \code{Streptococcus uberis} \code{Streptomyces avermitilis} \code{Streptomyces bingchenggensis} \code{Streptomyces cattleya} \code{Streptomyces coelicolor} \code{Streptomyces flavogriseus} \code{Streptomyces griseus} \code{Streptomyces hygroscopicus} \code{Streptomyces scabiei} \code{Streptomyces sp. SirexAA-E} \code{Streptomyces violaceusniger} \code{Streptosporangium roseum} \code{Sulfobacillus acidophilus} \code{Sulfolobus acidocaldarius} \code{Sulfolobus islandicus} \code{Sulfolobus solfataricus} \code{Sulfolobus tokodaii} \code{Sulfuricurvum kujiense} \code{Sulfurihydrogenibium azorense} \code{Sulfurihydrogenibium sp. YO3AOP1} \code{Sulfurimonas autotrophica} \code{Sulfurimonas denitrificans} \code{Sulfurospirillum barnesii} \code{Sulfurospirillum deleyianum} \code{Sulfurovum sp. NBC37-1} \code{Symbiobacterium thermophilum} \code{Synechococcus elongatus} \code{Synechococcus sp. CC9311} \code{Synechococcus sp. CC9605} \code{Synechococcus sp. CC9902} \code{Synechococcus sp. JA-2-3B'a(2-13)} \code{Synechococcus sp. JA-3-3Ab} \code{Synechococcus sp. PCC 7002} \code{Synechococcus sp. RCC307} \code{Synechococcus sp. WH 7803} \code{Synechococcus sp. WH 8102} \code{Synechocystis sp. PCC 6803} \code{Syntrophobacter fumaroxidans} \code{Syntrophobotulus glycolicus} \code{Syntrophomonas wolfei} \code{Syntrophothermus lipocalidus} \code{Syntrophus aciditrophicus} \code{Tannerella forsythia} \code{Taylorella asinigenitalis} \code{Taylorella equigenitalis} \code{Tepidanaerobacter acetatoxydans} \code{Teredinibacter turnerae} \code{Terriglobus roseus} \code{Terriglobus saanensis} \code{Tetragenococcus halophilus} \code{Thauera sp. MZ1T} \code{Thermaerobacter marianensis} \code{Thermanaerovibrio acidaminovorans} \code{Thermincola potens} \code{Thermoanaerobacter brockii} \code{Thermoanaerobacter italicus} \code{Thermoanaerobacterium saccharolyticum} \code{Thermoanaerobacterium thermosaccharolyticum} \code{Thermoanaerobacterium xylanolyticum} \code{Thermoanaerobacter mathranii} \code{Thermoanaerobacter pseudethanolicus} \code{Thermoanaerobacter sp. X513} \code{Thermoanaerobacter sp. X514} \code{Thermoanaerobacter wiegelii} \code{Thermobaculum terrenum} \code{Thermobifida fusca} \code{Thermobispora bispora} \code{Thermococcus barophilus} \code{Thermococcus gammatolerans} \code{Thermococcus kodakarensis} \code{Thermococcus onnurineus} \code{Thermococcus sibiricus} \code{Thermococcus sp. 4557} \code{Thermococcus sp. CL1} \code{Thermocrinis albus} \code{Thermodesulfatator indicus} \code{Thermodesulfobacterium geofontis} \code{Thermodesulfobium narugense} \code{Thermodesulfovibrio yellowstonii} \code{Thermofilum pendens} \code{Thermogladius cellulolyticus} \code{Thermomicrobium roseum} \code{Thermomonospora curvata} \code{Thermoplasma acidophilum} \code{Thermoplasma volcanium} \code{Thermoproteus uzoniensis} \code{Thermosediminibacter oceani} \code{Thermosipho africanus} \code{Thermosipho melanesiensis} \code{Thermosphaera aggregans} \code{Thermosynechococcus elongatus} \code{Thermotoga lettingae} \code{Thermotoga maritima} \code{Thermotoga naphthophila} \code{Thermotoga petrophila} \code{Thermotoga sp. RQ2} \code{Thermotoga thermarum} \code{Thermovibrio ammonificans} \code{Thermovirga lienii} \code{Thermus scotoductus} \code{Thermus sp. CCB_US3_UF1} \code{Thermus thermophilus} \code{Thioalkalimicrobium cyclicum} \code{Thioalkalivibrio sp. K90mix} \code{Thioalkalivibrio sulfidophilus} \code{Thiobacillus denitrificans} \code{Thiocystis violascens} \code{Thiomicrospira crunogena} \code{Thiomonas intermedia} \code{Tistrella mobilis} \code{Tolumonas auensis} \code{Treponema azotonutricium} \code{Treponema brennaborense} \code{Treponema caldaria} \code{Treponema denticola} \code{Treponema pallidum} \code{Treponema paraluiscuniculi} \code{Treponema primitia} \code{Treponema succinifaciens} \code{Trichodesmium erythraeum} \code{Trichormus azollae} \code{Tropheryma whipplei} \code{Truepera radiovictrix} \code{Tsukamurella paurometabola} \code{Turneriella parva} \code{uncultured Termite group 1 bacterium} \code{Ureaplasma parvum} \code{Ureaplasma urealyticum} \code{Variovorax paradoxus} \code{Veillonella parvula} \code{Verminephrobacter eiseniae} \code{Verrucosispora maris} \code{Vibrio anguillarum} \code{Vibrio cholerae} \code{Vibrio furnissii} \code{Vibrio harveyi} \code{Vibrio parahaemolyticus} \code{Vibrio sp. EJY3} \code{Vibrio sp. Ex25} \code{Vibrio vulnificus} \code{Vulcanisaeta distributa} \code{Vulcanisaeta moutnovskia} \code{Waddlia chondrophila} \code{Weeksella virosa} \code{Weissella koreensis} \code{Wigglesworthia glossinidia} \code{Wolbachia endosymbiont of Brugia malayi} \code{Wolbachia endosymbiont of Culex quinquefasciatus} \code{Wolbachia endosymbiont of Drosophila melanogaster} \code{Wolbachia sp. wRi} \code{Wolinella succinogenes} \code{Xanthobacter autotrophicus} \code{Xanthomonas alfalfae} \code{Xanthomonas campestris} \code{Xanthomonas euvesicatoria} \code{Xanthomonas oryzae} \code{Xenorhabdus bovienii} \code{Xenorhabdus nematophila} \code{Xylanimonas cellulosilytica} \code{Xylella fastidiosa} \code{Yersinia enterocolitica} \code{Yersinia pestis} \code{Yersinia pseudotuberculosis} \code{Zobellia galactanivorans} \code{Zunongwangia profunda} \code{Zymomonas mobilis}}
\item{\code{Subspecies}}{a factor with levels \code{} \code{Acetobacter pasteurianus subsp. pasteurianus} \code{Acidovorax avenae subsp. avenae} \code{Aeromonas hydrophila subsp. hydrophila} \code{Aeromonas salmonicida subsp. salmonicida} \code{Alicyclobacillus acidocaldarius subsp. acidocaldarius} \code{Bacillus amyloliquefaciens subsp. amyloliquefaciens} \code{Bacillus amyloliquefaciens subsp. plantarum} \code{Bacillus subtilis subsp. spizizenii} \code{Bacillus subtilis subsp. subtilis} \code{Beijerinckia indica subsp. indica} \code{Bifidobacterium animalis subsp. animalis} \code{Bifidobacterium animalis subsp. lactis} \code{Bifidobacterium longum subsp. infantis} \code{Bifidobacterium longum subsp. longum} \code{Caldanaerobacter subterraneus subsp. tengcongensis} \code{Campylobacter fetus subsp. fetus} \code{Campylobacter jejuni subsp. doylei} \code{Campylobacter jejuni subsp. jejuni} \code{Clavibacter michiganensis subsp. michiganensis} \code{Clavibacter michiganensis subsp. sepedonicus} \code{Desulfovibrio desulfuricans subsp. desulfuricans} \code{Enterobacter cloacae subsp. cloacae} \code{Enterobacter cloacae subsp. dissolvens} \code{Fibrobacter succinogenes subsp. succinogenes} \code{Francisella noatunensis subsp. orientalis} \code{Francisella tularensis subsp. holarctica} \code{Francisella tularensis subsp. mediasiatica} \code{Francisella tularensis subsp. tularensis} \code{Fusobacterium nucleatum subsp. nucleatum} \code{Klebsiella pneumoniae subsp. pneumoniae} \code{Lactobacillus delbrueckii subsp. bulgaricus} \code{Lactobacillus plantarum subsp. plantarum} \code{Lactobacillus sakei subsp. sakei} \code{Lactococcus lactis subsp. cremoris} \code{Lactococcus lactis subsp. lactis} \code{Legionella pneumophila subsp. pneumophila} \code{Leifsonia xyli subsp. xyli} \code{Leuconostoc mesenteroides subsp. mesenteroides} \code{Listeria ivanovii subsp. ivanovii} \code{Mycobacterium abscessus subsp. bolletii} \code{Mycobacterium avium subsp. paratuberculosis} \code{Mycoplasma capricolum subsp. capricolum} \code{Mycoplasma mycoides subsp. capri} \code{Mycoplasma mycoides subsp. mycoides} \code{Nocardiopsis dassonvillei subsp. dassonvillei} \code{Pasteurella multocida subsp. multocida} \code{Pectobacterium carotovorum subsp. carotovorum} \code{Photorhabdus asymbiotica subsp. asymbiotica} \code{Photorhabdus luminescens subsp. laumondii} \code{Polynucleobacter necessarius subsp. asymbioticus} \code{Polynucleobacter necessarius subsp. necessarius} \code{Prochlorococcus marinus subsp. marinus} \code{Prochlorococcus marinus subsp. pastoris} \code{Propionibacterium freudenreichii subsp. shermanii} \code{Pseudomonas brassicacearum subsp. brassicacearum} \code{Salmonella enterica subsp. arizonae} \code{Salmonella enterica subsp. enterica} \code{Selenomonas ruminantium subsp. lactilytica} \code{Staphylococcus aureus subsp. aureus} \code{Staphylococcus carnosus subsp. carnosus} \code{Staphylococcus saprophyticus subsp. saprophyticus} \code{Streptococcus dysgalactiae subsp. equisimilis} \code{Streptococcus equi subsp. equi} \code{Streptococcus equi subsp. zooepidemicus} \code{Streptococcus gallolyticus subsp. gallolyticus} \code{Streptococcus infantarius subsp. infantarius} \code{Streptomyces griseus subsp. griseus} \code{Streptomyces hygroscopicus subsp. jinggangensis} \code{Syntrophomonas wolfei subsp. wolfei} \code{Thermoanaerobacter brockii subsp. finnii} \code{Thermoanaerobacter mathranii subsp. mathranii} \code{Treponema pallidum subsp. pallidum} \code{Treponema pallidum subsp. pertenue} \code{Xanthomonas alfalfae subsp. citrumelonis} \code{Xylella fastidiosa subsp. fastidiosa} \code{Yersinia enterocolitica subsp. enterocolitica} \code{Yersinia enterocolitica subsp. palearctica} \code{Yersinia pestis subsp. microtus} \code{Yersinia pestis subsp. pestis} \code{Zymomonas mobilis subsp. mobilis} \code{Zymomonas mobilis subsp. pomaceae}}
}
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
data(KEGGBacArchTaxInformation)
## maybe str(KEGGBacArchTaxInformation) ; plot(KEGGBacArchTaxInformation) ...
}
\keyword{datasets}
|
1338caf6ded117ec9e3e334553bdb199972daa0d
|
edc914203a9a25f422c6e85c53c92362cb144011
|
/man/find.mindist.block.Rd
|
118d4d40e50148f41e826a88da48f4236297ad71
|
[] |
no_license
|
seonjoo/clustest
|
8e3686b929fa65ea64e5b5162ee0d313310530a8
|
8ca36631157629f0f19b1de9d6fd2d82b77b4492
|
refs/heads/main
| 2022-12-27T22:00:07.800589
| 2020-10-08T18:51:06
| 2020-10-08T18:51:06
| 302,422,329
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 812
|
rd
|
find.mindist.block.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find.mindist.block.R
\name{find.mindist.block}
\alias{find.mindist.block}
\title{Compute the minimum distance between data block.}
\usage{
find.mindist.block(dat, indx1, indx2, seg.length = 100, n.cores = 4)
}
\arguments{
\item{dat}{: n x p matrix with rows are observations, columns are variables}
\item{indx1}{: 1< length(indx1) < n index vector}
\item{indx2}{: 1< length(indx2) < n index vector}
\item{seg.length}{: segmentation length (default=1000).
If seg.length> min(length(indx1),length(indx2)), floor(min(length(indx1),length(indx2))/2) will be used.}
\item{n.cores}{: number of cores (default=4)}
}
\value{
aa : minimum distance between indexed blocks.
}
\description{
Compute the minimum distance between data block.
}
|
9900ac3e6794861c4455b38bc3a52ee89991f0c1
|
02c37615762af39de855590a40efd5d29858c9fc
|
/man/WPSA.Rd
|
8506c1bf3e56297a3fac518da7c489c09809c910
|
[] |
no_license
|
ericdunipace/WpProj
|
d950d1f8e36094b1b93cd2bb62e99fc1b9ec3aef
|
6039e5ce8c5d3386e776fc1e6784807411805889
|
refs/heads/master
| 2023-03-27T19:23:12.132980
| 2021-04-02T21:32:56
| 2021-04-02T21:32:56
| 229,637,281
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,797
|
rd
|
WPSA.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WPsimulatedAnnealing.R
\name{WPSA}
\alias{WPSA}
\title{p-Wasserstein distance projections using simulated annealing}
\usage{
WPSA(
X,
Y = NULL,
theta,
force = NULL,
p = 2,
ground_p = 2,
model.size = 3,
iter = 1,
temps = 1000,
max.time = 3600,
const = NULL,
proposal = proposal.fun,
options = list(method = c("selection.variable", "scale", "projection"),
transport.method = transport_options(), energy.distribution = "boltzman",
cooling.schedule = "Geman-Geman", proposal.method = "covariance", epsilon = 0.05,
OTmaxit = 100),
display.progress = FALSE,
parallel = NULL,
get.theta = TRUE,
xtx = NULL,
xty = NULL
)
}
\arguments{
\item{X}{Covariate vector}
\item{Y}{Predictions}
\item{theta}{optional matrix of parameters for generating predictions}
\item{force}{any covariates to force into the model?}
\item{p}{power of the wassersetin distance}
\item{ground_p}{power of the distance metric. Typically same as `p`}
\item{model.size}{Maximum number of coefficients}
\item{iter}{Maximum number of iterations}
\item{temps}{number of temperatures}
\item{max.time}{maximum time in seconds to run}
\item{const}{maximum value for simulated annealing distance}
\item{proposal}{proposal function. There is a default method but can provide your own with parameters xty, cur, idx, force, d, method}
\item{options}{options for simulated annealing}
\item{display.progress}{}
\item{parallel}{foreach backend}
\item{get.theta}{Should the model save the linear coefficients?}
\item{xtx}{precomputed crossproduct \code{crossprod(X,X)}}
\item{xty}{precomputed \code{crossprod(X, Y)}}
}
\value{
}
\description{
p-Wasserstein distance projections using simulated annealing
}
|
dc5cc8e7123d9f182ea03fa7beeeb3fe436f29ab
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PowerUpR/examples/bcra4r3.Rd.R
|
169f09931f53f0c972034490aeb572959900b08a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 520
|
r
|
bcra4r3.Rd.R
|
library(PowerUpR)
### Name: bcra4r3
### Title: Four-Level Blocked Cluster-level Random Assignment Design,
### Treatment at Level 3
### Aliases: bcra4r3 mdes.bcra4r3 power.bcra4r3 mrss.bcra4r3
### ** Examples
# cross-checks
mdes.bcra4r3(rho4=.05, rho3=.15, rho2=.15,
omega4=.50, n=10, J=4, K=4, L=20)
power.bcra4r3(es = .316, rho4=.05, rho3=.15, rho2=.15,
omega4=.50, n=10, J=4, K=4, L=20)
mrss.bcra4r3(es = .316, rho4=.05, rho3=.15, rho2=.15,
omega4=.50, n=10, J=4, K=4)
|
8c2d892c89095f29d2484cf1a9695e5f3cdf758f
|
efa8c7bf003496e7f0414dfe68b2667df2bca25d
|
/lab3/p8.R
|
25eba64f145e5c4a384595e4ebd1e910b4d1e8fd
|
[] |
no_license
|
ShashankSirmour/RLab
|
55d7d9bcfb67a494c75a32dcd587e392caa9708d
|
0b3073556d102566014d1dc659ccecef0fce4b49
|
refs/heads/master
| 2020-12-19T14:58:42.006462
| 2020-01-23T10:02:25
| 2020-01-23T10:02:25
| 235,767,240
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 113
|
r
|
p8.R
|
fact<-function(x)
{
if(x==1){
return (1)
}
else{
return (x*fact(x-1))
}
}
print(fact(5))
|
f02839dee134dcb4d2f1c8ec61c651b6be6a5f0c
|
1175e1cd2c18a5c8bc0ce240b7d6c5a39635c03b
|
/plot4.R
|
ecea29c51e754bf3149364f46a1051b5eb48f79d
|
[] |
no_license
|
BenCoppola/ExData_Plotting1
|
9924791d0a54b476a995001a71fe0085c1219206
|
0bc625c13caec440acc8f800c0460df616a3e49d
|
refs/heads/master
| 2021-01-18T04:37:48.883225
| 2015-03-07T17:24:19
| 2015-03-07T17:24:19
| 31,720,505
| 0
| 0
| null | 2015-03-05T15:25:49
| 2015-03-05T15:25:47
| null |
UTF-8
|
R
| false
| false
| 2,630
|
r
|
plot4.R
|
#clear the environment
rm(list=ls())
#change the working directory
setwd('C:/Users/coppab01/Documents/Coursera/Exploratory Data Analysis/Project 1')
# load up useful packages
library(dplyr)
tab5rows <- read.table("household_power_consumption.txt", header = TRUE, nrows = 5, sep=";")
classes <- sapply(tab5rows, class)
tabAll <- read.table("household_power_consumption.txt", header = TRUE, colClasses = classes, nrows = 2075259, na.strings = "?", sep=";")
tabAll <- tbl_df(tabAll)
#combine date and time strings
tabAll <- mutate(tabAll, DateTime = paste(Date, Time))
#convert "DateTime" string to POSIXct class and "Date" to Date class
tabAll$DateTime <- as.POSIXct(strptime(tabAll$DateTime, "%d/%m/%Y %H:%M:%S"))
tabAll$Date <- as.Date(tabAll$Date, "%d/%m/%Y")
#filter to the 2 days in question
data <- filter(tabAll, Date %in% as.Date(c("2007-02-01", "2007-02-02")))
#make Plot 4
png(filename = "plot4.png", width = 480, height = 480, type="windows")
par(mfrow = c(2, 2), mar = c(4, 4, 2, 2), oma = c(0, 0, 2, 0))
with(data, {
#Make first plot
plot(DateTime, Global_active_power, type = "n", xlab = "", ylab = "Global Active Power")
lines(DateTime, Global_active_power)
#Make second plot
plot(DateTime, Voltage, type = "n", xlab = "datetime")
lines(DateTime, Voltage)
#Make third plot
ylimits <- range(c(Sub_metering_1,Sub_metering_2,Sub_metering_3))
plot(DateTime, Sub_metering_1, type = "n",
axes = FALSE, xlab = "", ylab = "", ylim = ylimits)
lines(DateTime, Sub_metering_1, col="black")
par(new=TRUE) #similar to 'hold on' in MATLAB
plot(DateTime, Sub_metering_2, type = "n",
axes = FALSE, xlab = "", ylab = "", ylim = ylimits)
lines(DateTime, Sub_metering_2, col="red")
par(new=TRUE)
plot(DateTime, Sub_metering_3, type = "n",
ylim = ylimits,
ylab = "Energy sub metering", xlab = "")
lines(DateTime, Sub_metering_3, col="blue")
legend("topright", # places a legend at the appropriate place
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), # puts text in the legend
lty=c(1,1,1), # gives the legend appropriate symbols (lines)
col=c("black", "red", "blue"), # gives the legend lines the correct color and width
bty = "n") #turns off the border
#Make fourth plot
plot(DateTime, Global_reactive_power, type = "n", xlab = "datetime")
lines(DateTime, Global_reactive_power)
})
dev.off()
|
bad01d31fa3cb76b1e4b8068d6b645cbee3c9bf0
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.machine.learning/man/sagemaker_list_trials.Rd
|
9b91e41fb1ae9e426edaf6d0b536747d3d159a4f
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,791
|
rd
|
sagemaker_list_trials.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sagemaker_operations.R
\name{sagemaker_list_trials}
\alias{sagemaker_list_trials}
\title{Lists the trials in your account}
\usage{
sagemaker_list_trials(
ExperimentName = NULL,
TrialComponentName = NULL,
CreatedAfter = NULL,
CreatedBefore = NULL,
SortBy = NULL,
SortOrder = NULL,
MaxResults = NULL,
NextToken = NULL
)
}
\arguments{
\item{ExperimentName}{A filter that returns only trials that are part of the specified
experiment.}
\item{TrialComponentName}{A filter that returns only trials that are associated with the specified
trial component.}
\item{CreatedAfter}{A filter that returns only trials created after the specified time.}
\item{CreatedBefore}{A filter that returns only trials created before the specified time.}
\item{SortBy}{The property used to sort results. The default value is \code{CreationTime}.}
\item{SortOrder}{The sort order. The default value is \code{Descending}.}
\item{MaxResults}{The maximum number of trials to return in the response. The default
value is 10.}
\item{NextToken}{If the previous call to \code{\link[=sagemaker_list_trials]{list_trials}} didn't
return the full set of trials, the call returns a token for getting the
next set of trials.}
}
\description{
Lists the trials in your account. Specify an experiment name to limit the list to the trials that are part of that experiment. Specify a trial component name to limit the list to the trials that associated with that trial component. The list can be filtered to show only trials that were created in a specific time range. The list can be sorted by trial name or creation time.
See \url{https://www.paws-r-sdk.com/docs/sagemaker_list_trials/} for full documentation.
}
\keyword{internal}
|
2c128b374789c50fa0265cd336cb9d8739f77dbe
|
9995fdb4f2b2341db3a9f7e2b036b656e802e38c
|
/man/passthroughConceptProcessor.Rd
|
78a4bd406c6460aa5d6c497d6826342b46b24e31
|
[
"Apache-2.0"
] |
permissive
|
sverchkov/BaselineRegularization
|
100f8c647199ae58ecc0281e2b8cf598525e8d4a
|
d3c208b543104ae63609fe1320a68ed2015b1e79
|
refs/heads/master
| 2021-07-04T18:33:11.068161
| 2019-02-12T19:37:56
| 2019-02-12T19:37:56
| 112,363,026
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 710
|
rd
|
passthroughConceptProcessor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/passthroughConceptProcessor.R
\name{passthroughConceptProcessor}
\alias{passthroughConceptProcessor}
\title{Passthrough concept processor}
\usage{
passthroughConceptProcessor(record_table,
record_table_column = "concept_id", out_column = record_table_column)
}
\arguments{
\item{record_table}{The table to process}
\item{record_table_column}{The record table column to process}
\item{out_column}{The column to which to write the processor result}
}
\value{
`record_table` with `out_column` added, being a copy of `record_table_column`
}
\description{
A concept processor that just copies `record_table_column` to `out_column`
}
|
8462a32d10261be69733d697c3c0a548c403b32d
|
6b2ceb82d28058ce11e5557d07caa00929726c3b
|
/man/guide_legend_interactive.Rd
|
daa0ca62d29fbb06237884a3dbbf846ae57a4198
|
[] |
no_license
|
krishnapsrinivasan/ggiraph
|
aff4de22801b019f601cd4410c411cdf93ca63a8
|
fec5ac510710e45eec1dc19f9b0e4e51b8a53a13
|
refs/heads/master
| 2020-07-25T11:33:18.462968
| 2019-09-03T15:10:26
| 2019-09-03T15:10:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 406
|
rd
|
guide_legend_interactive.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guide_legend_interactive.R
\name{guide_legend_interactive}
\alias{guide_legend_interactive}
\title{interactive legend guide}
\usage{
guide_legend_interactive(...)
}
\arguments{
\item{...}{arguments passed to guide_legend.}
}
\description{
an interactive legend guide.
See \code{\link[ggplot2]{guide_legend}} for more details.
}
|
6638dc001ef63222eba454c2d30f1bff319289ff
|
c048b06205170c1eae99e34aec414967874ccce0
|
/plot2.R
|
9212b5e31f6a143398107ad389ddc02182a66133
|
[] |
no_license
|
jayant-singh/Assignment-1-for-Exploratory-Data-Analysis-in-R
|
efe780ea9a5865263ec21bccad90c205b16f5ccb
|
f2f1e2033cfeb0c451328d0d7ba11aec1007f1b2
|
refs/heads/master
| 2021-01-10T10:37:18.962970
| 2016-03-03T20:27:27
| 2016-03-03T20:27:27
| 53,079,500
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 853
|
r
|
plot2.R
|
# Here, we give the code to plot values of Global active power vs. Datetime.
#This plot gives the consumption trend on those two days, Thursday and Friday.
###########################################################################
data_new=read.table("household_power_consumption.txt",sep=";",header=TRUE)
data_two_day=data_new[(as.character(data_new$Date)=="1/2/2007"|as.character(data_new$Date)=="2/2/2007"),]
data_two_day$Date=as.character(data_two_day$Date)
data_two_day$Date=as.Date(data_two_day$Date,"%d/%m/%Y")
#convert the dates
datetime = paste(data_two_day$Date, data_two_day$Time)
data_two_day$Datetime = as.POSIXct(datetime)
#plot it
png(file="plot2.png")
plot(as.numeric(as.character(data_two_day$Global_active_power)) ~ data_two_day$Datetime, type = "l",
ylab = "Global Active Power (kilowatts)", xlab = "")
dev.off()
|
1ddbf71d7e601dce1f92c2c6c4ef2c3bb3992638
|
942d40cffac9a26fb6a195d0b535eeb4ecc9c786
|
/man/process_contact_matrix_scaled_age.Rd
|
e6396df98b621509b58d8eef4eb19cce9a862fd1
|
[
"MIT"
] |
permissive
|
willpearse/squire
|
7652a94bd2c75e75fbe24ff3e421c87ac02fa018
|
c33ff50849f2d0886423bc7887e972ac3e0f99ec
|
refs/heads/master
| 2022-11-06T14:11:54.940845
| 2020-06-16T15:28:19
| 2020-06-16T15:28:19
| 270,962,087
| 0
| 0
|
MIT
| 2020-06-09T09:39:49
| 2020-06-09T09:39:48
| null |
UTF-8
|
R
| false
| true
| 483
|
rd
|
process_contact_matrix_scaled_age.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/contact_matrices.R
\name{process_contact_matrix_scaled_age}
\alias{process_contact_matrix_scaled_age}
\title{Process a contact matrix with an extra}
\usage{
process_contact_matrix_scaled_age(contact_matrix, population)
}
\arguments{
\item{contact_matrix}{A contact matrix}
\item{population}{Vector of population by age}
}
\value{
Processed matrix
}
\description{
Process a contact matrix with an extra
}
|
f40450075fef6e3b191f31b254a1c1578f8c3c5d
|
2abfdd54a8e36e67aa5dea6567bfd1058b86f580
|
/Part2/Question3.7 - FoodDrinkRules.R
|
79b0debf0e0271ad5c3b0254eebc6026deab2c17
|
[
"Apache-2.0"
] |
permissive
|
JosePedro90/DataMiningAssig
|
e9302d58b86256a5029d349d1d49107897b08bd6
|
244e01c0a57d4896acc48ce2b6e599bafd0ff90d
|
refs/heads/master
| 2021-01-25T00:56:50.926470
| 2017-02-03T16:30:48
| 2017-02-03T16:30:48
| 94,704,101
| 1
| 0
| null | 2017-06-18T18:01:25
| 2017-06-18T18:01:24
| null |
UTF-8
|
R
| false
| false
| 2,140
|
r
|
Question3.7 - FoodDrinkRules.R
|
# Question 3 - Association Rule Mining
library(arules)
library(arulesViz)
## 7. Association rules include only both food and drink
# Create an empty logical isFoodAndDrink column
indexToName <- function(ItemIndex) {
itemName = as.character(goods$ItemName[ItemIndex + 1]) # R is not zero-index based
itemType = as.character(goods$ItemType[ItemIndex + 1])
paste(itemName, itemType, sep="")
}
# Replace item index to item name
receiptWithName <- sapply(receiptSparse, indexToName)
receiptWithName[receiptWithName == 'NANA'] <- NA
receiptWithName <- as.data.frame(receiptWithName)
receiptWithName['isFoodAndDrink'] <- as.character(0)
# Function to determine category whether it is food or drink
whatType <- function(itemname) {
as.character(goods$ItemCategory[itemname])
}
# Determine whether a row contains both food and drink
for (r in 1:nrow(receiptWithName)) {
food = FALSE
drink = FALSE
for (col in 1:(ncol(receiptWithName)-1)) {
if (!is.na(receiptWithName[r, col])) {
if (whatType(receiptWithName[r, col]) == "Food")
food = TRUE
else if (whatType(receiptWithName[r, col]) == "Drink")
drink = TRUE
}
}
receiptWithName[r, ncol(receiptWithName)] <- food & drink
}
# Get only rows that contains both food and drink
foodAndDrink <- subset(receiptWithName, isFoodAndDrink==TRUE, select=Item1:Item8)
foodAndDrink_trans <- as(foodAndDrink, "transactions")
# Use Apriori to generate rules for food and drink
FDrules <- apriori(foodAndDrink_trans, parameter= list(minlen=1, target="rules",supp=0.02, conf=0.75))
inspect(FDrules)
# Sorted by lift
quality(FDrules) <- round(quality(FDrules), digits=2)
sortedFDRules <- sort(FDrules,by="lift")
inspect(sortedFDRules)
# Remove redundant rules
subset.matrix <- is.subset(sortedFDRules, sortedFDRules)
subset.matrix[lower.tri(subset.matrix, diag=T)] <- NA
redundant <- colSums(subset.matrix, na.rm=T) >= 1
which(redundant)
rules.pruned <- sortedFDRules[!redundant]
inspect(rules.pruned)
# Visualise the rules
plot(rules.pruned)
plot(rules.pruned, method="graph",interactive = TRUE)
plot(rules.pruned, method="grouped", interactive= TRUE)
|
e748557984ff147479e52d2d01fbbc4161e643e5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/galgo/examples/summary.World.Rd.R
|
8a492ad7d1ea6326b639a2d1afa116aa251f03be
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 439
|
r
|
summary.World.Rd.R
|
library(galgo)
### Name: summary.World
### Title: Prints the representation and statistics of the world object
### Aliases: summary.World World.summary summary.World summary,World-method
### Keywords: methods internal print
### ** Examples
cr <- Chromosome(genes=newCollection(Gene(shape1=1, shape2=100),5))
ni <- Niche(chromosomes=newRandomCollection(cr, 10))
wo <- World(niches=newRandomCollection(ni,2))
wo
summary(wo)
|
072926781a43f03e656f276b74f5dc702c6697ef
|
5d6beae57500eeb415219ae512b6f1716aea2f36
|
/RFsp.R
|
4fc052fecb09e4c985e093bad05cd48ef12ab4cd
|
[] |
no_license
|
ycm961120/py-used-for-zju
|
f36a5e99099919bb9dd5ce602d4eca2588936f65
|
fde67db8ed47818e04e8d6ad555a009bb6c1ee4a
|
refs/heads/master
| 2021-07-22T05:52:29.984379
| 2020-09-27T13:19:47
| 2020-09-27T13:19:47
| 218,698,902
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 236
|
r
|
RFsp.R
|
library(gstat)
library(GSIF)
library(sp)
library(rgdal)
library(raster)
library(geoR)
library(ranger)
library(intamap)
library(plyr)
library(plotKML)
library(scales)
library(RCurl)
library(parallel)
library(lattice)
library(gridExtra)
|
f1840e553613465bf097f2611fed602fa09d4b76
|
17702ad07b28844fda4799bba3da728bfdc5704b
|
/db/man/check.lists.Rd
|
cecaf81c04ee84e23503d71b33d1277c7f45019d
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
yogeshdarji/pecan
|
c4d620590f37ab3928f1d28b006456fee0b96b7b
|
109381603fab9f9927ad18c95b9389cea68deef1
|
refs/heads/master
| 2021-06-15T05:56:03.976607
| 2017-03-25T17:33:58
| 2017-03-25T17:33:58
| 86,267,939
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 440
|
rd
|
check.lists.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.trait.data.R
\name{check.lists}
\alias{check.lists}
\title{Compares two lists}
\usage{
check.lists(x, y)
}
\arguments{
\item{x}{first list}
\item{y}{second list}
}
\value{
true if two list are the same
}
\description{
Check two lists. Identical does not work since one can be loaded
from the database and the other from a CSV file.
}
\author{
Rob Kooper
}
|
456f0195a3403d3d3db71710a31aeb2dc4b2a7fb
|
516180dc556ee75e61d7a5684444d8d7d0d9746a
|
/man/myGGplot.Rd
|
7bbafa9354c69bf93831cd48337213ca47985cd3
|
[] |
no_license
|
power502/PowerKyleTools
|
57075862a3df8d8d4aaf8e5dcfe47d3feb17a2c6
|
7dc36531188b8e5f678b9d6b71433f756b9972f3
|
refs/heads/master
| 2021-04-26T23:50:25.940370
| 2018-03-06T20:09:05
| 2018-03-06T20:09:05
| 123,866,863
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 341
|
rd
|
myGGplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myGGplot.R
\name{myGGplot}
\alias{myGGplot}
\title{Wrapper function for ggplot2 for data d}
\usage{
myGGplot(x)
}
\arguments{
\item{x}{data.frame}
}
\value{
ggplot2
}
\description{
Computes the mean, variance and sd of a vector
}
\examples{
data(d)
myGGplot(d)
}
|
766a012f4c8bacf133e342fb1bada52301ee402c
|
15d030eb134e7bd16ceacca7feae5b6f9705d53c
|
/Uebungen/2020/02_Uebung.R
|
487ece75b67432f644a17502378340c0fc017345
|
[] |
no_license
|
m-Py/Testtheorie-R
|
c4e525c6d0861e3351740436d1dc71cf8b81dd33
|
efea9e6f7735945470fe7379aae201fbe9969463
|
refs/heads/master
| 2023-06-09T18:41:48.319171
| 2023-06-01T13:54:36
| 2023-06-01T13:54:36
| 142,132,705
| 11
| 3
| null | 2020-06-02T08:33:11
| 2018-07-24T09:00:46
|
R
|
UTF-8
|
R
| false
| false
| 8,082
|
r
|
02_Uebung.R
|
##########################
# Termin 2 Uebungsblatt ##
##########################
############################################
## Vor den naechsten Aufgaben kommen neue ##
## Inhalte: 1. Logisches UND / ODER ##
############################################
# Logisches UND
TRUE & TRUE
TRUE & FALSE
FALSE & FALSE
# Logisches ODER
TRUE | TRUE
TRUE | FALSE
FALSE | FALSE
# Entgegen umgangssprachlicher Intuition kein ENTWEDER/ODER, sondern
# ein "einschließendes ODER"
# Logische Operatoren arbeiten paarweise / komponentenweise:
c(TRUE, FALSE, FALSE) & c(TRUE, TRUE, FALSE)
c(TRUE, FALSE, FALSE) | c(TRUE, TRUE, FALSE)
####################################################
## Vor den naechsten Aufgaben kommen neue ##
## Inhalte: 2. Auswahl von Elementen aus Vektoren ##
####################################################
# In R kann man auf einzelne oder mehrere Elemente in Vektoren
# zugreifen.
meinVektor <- c(1, 2, 6, 2, 9) # Beispielvektor
# Mit der [ ]-Notation koennen wir Elemente aus Vektoren anhand ihrer
# Position auswaehlen
meinVektor[3] # waehlt das dritte Element aus
# Man kann eine "Negativ"-Auswahl durchfuehren:
meinVektor[-3] # laesst das dritte Element aus
bla <- meinVektor[-3]
# Man kann mehrere Elemente auswaehlen:
meinVektor[c(1, 4)]
# Oder mehrere Elemente auslassen:
meinVektor[-c(1, 4)]
# Oftmals erwuenscht: waehle Elemente anhand ihrer Eigenschaften aus.
# Waehle alle Elemente die groesser bzw kleiner gleich 2 sind aus:
meinVektor[meinVektor >= 2] # was ist `meinVektor > 2`?
meinVektor[meinVektor <= 2]
# Hier waehle ich die Daten mit einem logischen Vektor aus, der fuer
# jedes Element angibt, ob es in der Ausgabemenge sein soll. Das
# heisst das hier funktioniert auch:
meinVektor[c(TRUE, TRUE, FALSE, TRUE, FALSE)]
# Man kann mit dem [ ] Zugriff auch Daten veraendern, indem ich die
# "<-"-Zuweisung verwende:
meinVektor[3] <- 0
meinVektor[meinVektor > 8] <- 9999
# Beispiel: in Fragebogenuebertragung wurden fehlende Werte mit -99
# kodiert.
punktZahlen <- c(10, 32, 12, 42, -99, -99)
punktZahlen[punktZahlen == -99] <- NA # NA = fehlender Wert; mehr dazu naechste Woche
# Merke: mit dem [ ] Zugriff kann man Elemente aus Vektoren
# auswaehlen:
# (a) anhand ihrer Position im Vektor
# (b) mithilfe eines logischen Vektors, der fuer jedes Element angibt,
# ob es in der Ausgabemenge sein soll.
# (c) zusammen mit der "<-" Zuweisung kann man so bestimmte Elemente
# in Vektoren veraendern.
##### Start der Aufgaben:
# Nach der Niederlage gegen den amerikanischen Verband sollen die
# Leistungen der deutschen Sportler mithilfe eines Dopingprogramms
# verbessert werden. Dafuer wird an einige Sportler*innen das
# Dopingmittel Diuretika verteilt. (Diuretika dienen der Entwässerung
# des Körpers und damit wird der Athlet / die Athletin leichter) Um
# den Erfolg des Programms zu evaluieren, werden die Leistungen aller
# Sportler*innen vor und nach Vergabe des Dopings verglichen. Die
# folgenden zwei Vektoren beinhalten die Sprungleistungen der Springer
# vor und nach dem Doping. Dabei kommt der erste Wert in
# `spruenge_vorher` von derselben Athlet*in wie Wert 1 in
# `spruenge_nachher` usw.
###############
## Aufgabe 0 ##
###############
# Ladet die zwei Variablen mit STRG-Enter in euren "workspace", damit
# ihr die weiteren Aufgaben loesen koennt.
spruenge_vorher <- c(1.85, 1.89, 2.02, 1.99, 1.84, 2.31, 1.99, 1.79,
2.15, 2.07, 2.01, 2.00, 1.87, 2.08, 2.13)
spruenge_nachher <- c(1.95, 1.83, 2.15, 2.03, 1.75, 2.28, 1.89, 2.04,
2.39, 2.33, 1.99, 2.13, 1.94, 2.26, 2.11)
###############
## Aufgabe 1 ##
###############
# Wie hoch war die mittlere Sprunghoehe an Termin 1 und an Termin 2?
###############
## Aufgabe 2 ##
###############
# Wie hoch war die mittlere Sprunghoehe ueber alle Spruenge hinweg?
# Speichert alle Spruenge in *einer* Variablen ab, bevor ihr den
# Mittelwert berechnet.
###############
## Aufgabe 3 ##
###############
# Erstellt mit einem logischen Vergleich einen logischen (TRUE/FALSE)
# Vektor, der kodiert, ob die Sprungweite nach Vergabe von Diuretikum
# hoeher war als davor. Speichert diesen Vektor in einer Variablen ab.
# An dieser Stelle wichtig: Logische Operatoren arbeiten paarweise /
# komponentenweise. Es ist also nur ein Befehl noetig
###############
## Aufgabe 4 ##
###############
# Wie viele Springer sind nach der Diuretikum-Vergabe hoeher
# gesprungen? Welchem relativen Anteil entspricht das?
###############
## Aufgabe 5 ##
###############
# Speichert die Differenz zwischen den Sprunghoehen an Termin 1 und
# Termin 2 in einer Variablen ab und betrachtet die Variable.
###############
## Aufgabe 6 ##
###############
# Wie hoch war die mittlere Verbesserung vom ersten zum zweiten Sprung
###############
## Aufgabe 7 ##
###############
# Was war die staerkste Verbesserung nach Diuretikum-Einnahme? Was
# die staerkste Verschlechterung?
###############
## Aufgabe 8 ##
###############
# Recherchiert online, wie man einen t-Test ausfuehren kann, um
# gepaarte Daten zu vergleichen; fuehrt ihn aus, um zu entscheiden, ob
# Diuretika zu einer signifikanten Verbesserung der Sprungleistung
# fuehren (bei einem konventionellen Alpha-Fehler Niveau von .05).
# Ja, betrachtet den "p-value".
# Fuehrt einen Einstichproben-t-Test (-> Internetrecherche) durch, der
# die Differenz der Messwerte (vorher/nachher) mit dem Wert Null
# vergleicht. Was faellt im Vergleich zum gepaarten t-Test auf?
# dasselbe wie ein gepaarter t-Test
################################################
## Ab dieser Aufgabe: Verwendung von UND/ODER ##
################################################
###############
## Aufgabe 9 ##
###############
# (a) Wie viele Springer haben *vor* Vergabe von Diuretikum die
# 2-Meter Marke geknackt?
# (b) Wie viele Springer haben *nach* Vergabe von Diuretikum die
# 2-Meter Marke geknackt?
# (c) erstellt eine Variable, die kodiert wer vor ODER nach der
# Einnahme von Diuretikum die 2m Marke geknackt hat (nutzt das
# *logische* ODER, nicht das umgangsprachliche "entweder-oder").
# Wie viele Sportler*innen haben an mindestens einem Termin die
# 2-Meter-Marke geknackt?
# (d) Wie viele Sportler*innen haben an beiden Terminen die
# 2-Meter-Marke geknackt?
# (e) Bonus-Frage (ist etwa schwieriger, kann uebersprungen werden):
# Kodiert mit einem logischen Vektor, wer nur »entweder« vorher »oder«
# nachher die 2-Meter-Marke geknackt hat. (TRUE bedeutet also: Person
# hat einmal die Marke geknackt; FALSE bedeutet also: Person hat entweder
# keinmal oder zweimal die Marke geknackt).
##########################################
## Ab dieser Aufgabe Verwendung von [ ] ##
##########################################
################
## Aufgabe 10 ##
################
# (a) Waehlt das dritte Element aus dem Vektor `spruenge_nachher` aus
# und speichert es in einer Variablen.
# (b) Waehlt das erste, fuenfte und zehnte Element aus dem Vektor
# `spruenge_vorher` aus und speichert diese Auswahl in einer neuen
# Variablen.
# (c) Waehlt alle bis auf das das erste, fuenfte und zehnte Element
# aus dem Vektor `spruenge_vorher` aus.
################
## Aufgabe 11 ##
################
# (a) Waehlt alle Elemente aus dem Vektor `spruenge_nachher` aus, bei
# denen die Sprunghoehe groesser als 2m war:
# (b) Waehlt alle Elemente aus dem Vektor `spruenge_nachher` aus, die
# zu den Springern gehoeren, die sowohl *vor als auch nach* der
# Einnahme von Diuretikum *niedriger* als 2m gesprungen sind.
################
## Aufgabe 12 ##
################
mein_vektor <- c(1, 0, 2, 9)
# (a) Ersetzt in `mein_vektor` das zweite Element durch 55:
# (b) Erhoeht in `mein_vektor` das erste Element um 10
# (c) Ersetzt in `mein_vektor` das dritte und vierte Element durch 999
################
## Aufgabe 13 ##
################
# Wegen schlechter Windverhaeltnisse sollen die Sprunghoehen einiger
# Sportler nachtraeglich aufgewertet werden. Erhoeht die Werte der
# Sportler 3, 8, und 13 zu Zeitpunkt 2 um 10cm.
|
a3e2a65e3641379e381890ebe96cf9d1c2840f9b
|
8d2106d12a3daca1516d34b83c46aedc25e2e18a
|
/MitochondriaSim.R
|
d970e58ba12a183b69a490ed5d4bb1cb70e4874c
|
[] |
no_license
|
snaketron/MitochondrionVolume
|
a4fbf621367f28181f3982d6822bf03a492f4807
|
fe06e21dae7505c8c4730aabbf2147e8d0d25a8e
|
refs/heads/master
| 2020-07-07T19:38:37.413701
| 2019-08-28T18:10:05
| 2019-08-28T18:10:05
| 203,456,882
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,382
|
r
|
MitochondriaSim.R
|
require(parallel)
require(ggplot2)
S <- 30 # side dimension
Nmb <- 1000 # number of Mb in cell
Nb <- 100
masses <- c(1, 10, 100, 1000, 10000, 100000)
# Assumption:
# Mb density ~ 0.3*Mass^(-1/4)
mitdens.allometry <- function(m, n) {
return(ceiling(x = 0.2*m^(-1/4)*n))
}
getMinDist1D <- function(mass, S, Nb, N, Nmb, mitdens.func) {
R <- 1:(S)
Nm <- mitdens.func(m = mass, n = N)
mean.min.dist.mb <- numeric(length = Nb)
for(b in 1:Nb) {
dist.mb <- numeric(length = Nmb)
cell <- numeric(length = S)
# generate mitochondrion positions
Im <- sample(x = R, size = Nm, replace = FALSE)
cell[Im] <- 1
i.Im <- which(cell == 1, arr.ind = TRUE)
i.Im <- i.Im + 0.5
# generate myoglobin positions
Imb <- runif(n = Nmb, min = 1, max = S)
# Mb-specific
for(j in 1:Nmb) {
dist.mb[j] <- min(sqrt( (Imb[j]-i.Im)^2 ))
}
mean.min.dist.mb[b] <- mean(dist.mb)
}
return(mean.min.dist.mb)
}
getMeanDist1D <- function(mass, S, Nb, N, Nmb, mitdens.func) {
R <- 1:(S)
Nm <- mitdens.func(m = mass, n = N)
mean.min.dist.mb <- numeric(length = Nb)
for(b in 1:Nb) {
dist.mb <- numeric(length = Nmb)
cell <- numeric(length = S)
# generate mitochondrion positions
Im <- sample(x = R, size = Nm, replace = FALSE)
cell[Im] <- 1
i.Im <- which(cell == 1, arr.ind = TRUE)
i.Im <- i.Im + 0.5
# generate myoglobin positions
Imb <- runif(n = Nmb, min = 1, max = S)
# Mb-specific
for(j in 1:Nmb) {
dist.mb[j] <- mean(sqrt( (Imb[j]-i.Im)^2 ))
}
mean.min.dist.mb[b] <- mean(dist.mb)
}
return(mean.min.dist.mb)
}
getMinDist2D <- function(mass, S, Nb, N, Nmb, mitdens.func) {
R <- 1:(S^2)
Nm <- mitdens.func(m = mass, n = N)
cat("Mass:", mass, "; Nm:", Nm, "; Nmb:", Nmb, "\n", sep = ' ')
mean.min.dist.mb <- numeric(length = Nb)
for(b in 1:Nb) {
dist.mb <- numeric(length = Nmb)
cell <- matrix(data = 0, nrow = S, ncol = S)
# generate mitochondrion positions
Im <- sample(x = R, size = Nm, replace = FALSE)
cell[Im] <- 1
i.Im <- which(cell == 1, arr.ind = TRUE)
i.Im <- i.Im - 0.5
# generate myoglobin positions
Imb <- replicate(n = 2, expr = runif(n = Nmb, min = 1, max = S))
# Mb-specific
for(j in 1:Nmb) {
dist.mb[j] <- min(sqrt( (Imb[j, 1]-i.Im[, 1])^2 + (Imb[j, 2]-i.Im[, 2])^2 ))
}
mean.min.dist.mb[b] <- mean(dist.mb)
}
return (mean.min.dist.mb)
}
getMeanDist2D <- function(mass, S, Nb, N, Nmb, mitdens.func) {
R <- 1:(S^2)
Nm <- mitdens.func(m = mass, n = N)
cat("Mass:", mass, "; Nm:", Nm, "; Nmb:", Nmb, "\n", sep = ' ')
mean.min.dist.mb <- numeric(length = Nb)
for(b in 1:Nb) {
dist.mb <- numeric(length = Nmb)
cell <- matrix(data = 0, nrow = S, ncol = S)
# generate mitochondrion positions
Im <- sample(x = R, size = Nm, replace = FALSE)
cell[Im] <- 1
i.Im <- which(cell == 1, arr.ind = TRUE)
i.Im <- i.Im - 0.5
# generate myoglobin positions
Imb <- replicate(n = 2, expr = runif(n = Nmb, min = 1, max = S))
# Mb-specific
for(j in 1:Nmb) {
dist.mb[j] <- mean(sqrt( (Imb[j, 1]-i.Im[, 1])^2 + (Imb[j, 2]-i.Im[, 2])^2 ))
}
mean.min.dist.mb[b] <- mean(dist.mb)
}
return (mean.min.dist.mb)
}
getMinDist3D <- function(mass, S, Nb, N, Nmb, mitdens.func) {
R <- 1:(S^3)
Nm <- mitdens.func(m = mass, n = N)
cat("Mass:", mass, "; Nm:", Nm, "; Nmb:", Nmb, "\n", sep = ' ')
mean.min.dist.mb <- numeric(length = Nb)
for(b in 1:Nb) {
dist.mb <- numeric(length = Nmb)
cell <- array(data = 0, dim = c(S, S, S))
# generate mitochondrion positions
Im <- sample(x = R, size = Nm, replace = FALSE)
cell[Im] <- 1
i.Im <- which(cell == 1, arr.ind = TRUE)
i.Im <- i.Im - 0.5
# generate myoglobin positions
Imb <- replicate(n = 3, expr = runif(n = Nmb, min = 0, max = S))
# Mb-specific
for(j in 1:Nmb) {
dist.mb[j] <- min(sqrt( (Imb[j, 1]-i.Im[, 1])^2
+ (Imb[j, 2]-i.Im[, 2])^2
+ (Imb[j, 3]-i.Im[, 3])^2))
}
# average min distance
mean.min.dist.mb[b] <- mean(dist.mb)
}
return (mean.min.dist.mb)
}
getMeanDist3D <- function(mass, S, Nb, N, Nmb, mitdens.func) {
R <- 1:(S^3)
Nm <- mitdens.func(m = mass, n = N)
cat("Mass:", mass, "; Nm:", Nm, "; Nmb:", Nmb, "\n", sep = ' ')
mean.min.dist.mb <- numeric(length = Nb)
for(b in 1:Nb) {
dist.mb <- numeric(length = Nmb)
cell <- array(data = 0, dim = c(S, S, S))
# generate mitochondrion positions
Im <- sample(x = R, size = Nm, replace = FALSE)
cell[Im] <- 1
i.Im <- which(cell == 1, arr.ind = TRUE)
i.Im <- i.Im - 0.5
# generate myoglobin positions
Imb <- replicate(n = 3, expr = runif(n = Nmb, min = 0, max = S))
# Mb-specific
for(j in 1:Nmb) {
dist.mb[j] <- mean(sqrt( (Imb[j, 1]-i.Im[, 1])^2
+ (Imb[j, 2]-i.Im[, 2])^2
+ (Imb[j, 3]-i.Im[, 3])^2))
}
# average min distance
mean.min.dist.mb[b] <- mean(dist.mb)
}
return (mean.min.dist.mb)
}
minD1 <- mclapply(X = masses, FUN = getMinDist1D, S = S, Nb = Nb, N = S,
Nmb = Nmb, mitdens.func = mitdens.allometry, mc.cores = 4)
minD2 <- mclapply(X = masses, FUN = getMinDist2D, S = S, Nb = Nb, N = S^2,
Nmb = Nmb, mitdens.func = mitdens.allometry, mc.cores = 4)
minD3 <- mclapply(X = masses, FUN = getMinDist3D, S = S, Nb = Nb, N = S^3,
Nmb = Nmb, mitdens.func = mitdens.allometry, mc.cores = 4)
meanD1 <- mclapply(X = masses, FUN = getMeanDist1D, S = S, Nb = Nb, N = S,
Nmb = Nmb, mitdens.func = mitdens.allometry, mc.cores = 4)
meanD2 <- mclapply(X = masses, FUN = getMeanDist2D, S = S, Nb = Nb, N = S^2,
Nmb = Nmb, mitdens.func = mitdens.allometry, mc.cores = 4)
meanD3 <- mclapply(X = masses, FUN = getMeanDist3D, S = S, Nb = Nb, N = S^3,
Nmb = Nmb, mitdens.func = mitdens.allometry, mc.cores = 4)
minD1 <- unlist(lapply(X = minD1, FUN = mean))
minD2 <- unlist(lapply(X = minD2, FUN = mean))
minD3 <- unlist(lapply(X = minD3, FUN = mean))
meanD1 <- unlist(lapply(X = meanD1, FUN = mean))
meanD2 <- unlist(lapply(X = meanD2, FUN = mean))
meanD3 <- unlist(lapply(X = meanD3, FUN = mean))
summary <- rbind(data.frame(mean.min.dist = minD1, mean.dist = meanD1, dimension = "D1", mass = masses),
data.frame(mean.min.dist = minD2, mean.dist = meanD2, dimension = "D2", mass = masses),
data.frame(mean.min.dist = minD3, mean.dist = meanD3, dimension = "D3", mass = masses))
ggplot(data = summary)+
geom_point(aes(x = mass, y = mean.min.dist, col = dimension))+
theme_bw()+
scale_x_log10()+
theme(legend.position = "top")+
annotation_logticks(base = 10, sides = "b")
ggplot(data = summary)+
geom_point(aes(x = mass, y = mean.dist, col = dimension))+
theme_bw()+
scale_x_log10()+
theme(legend.position = "top")+
annotation_logticks(base = 10, sides = "b")
# coefficients
summary(lm(log10(minD1) ~ log10(masses)))
summary(lm(log10(minD2) ~ log10(masses)))
summary(lm(log10(minD3) ~ log10(masses)))
|
3f2e714714612950c9d1e048ccba660bcb7ec606
|
570112006c0e12d873f6cfcdec7ee7d846f8250e
|
/ui.R
|
594f94285df262629d503065edfcf54148f2b99e
|
[] |
no_license
|
kitoogofred/Capstone-Project
|
8821f160ae94680828c3b6a44fd9aea6e9046140
|
5bd4657d1c91e70f516991263e45be36996e6b53
|
refs/heads/master
| 2021-09-02T09:13:39.887511
| 2018-01-01T09:37:08
| 2018-01-01T09:37:08
| 115,869,259
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,526
|
r
|
ui.R
|
## Final Capstone Project
## Data Science Specialization
## Fredrick Edward Kitoogo
library(shiny)
library(markdown)
shinyUI(fluidPage(
titlePanel("Next Word Text Predicting App"),
sidebarLayout(
sidebarPanel(
h4('Instructions'),
h5('1. Enter a word or sentence.'),
h5('2. Then get the next word predicted (best option and additional options)'),
textInput("inputText", NULL, value = "",
width = "180px", label = "Text for Prediction"),
numericInput("n",
h5("Number of options of predicted words"),
value = 1),
submitButton("Predict"),
br(),
"The app is created by ",
a("Kitoogo Fredrick Edward", href = "mailto:kitoogofred@gmail.com")
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Prediction",
h3("Best Predicted Word"),
span(h4(textOutput('predictedWord1')),style = "color:red"),
h3('Additional Predictions'),
h5('The other predictions based on the input and analysis are:'),
tableOutput("view")
),
# tabPanel("Dataset"),
# tabPanel("Algorithm"),
# tabPanel("Algorithm", includeMarkdown("algorithm.md")),
tabPanel("About", includeMarkdown("about.Rmd"))
)
))
))
|
b5dca15fd3505ec2788b39fd1c477a4696830c64
|
e0b0ddad937ce559aba8ce8e0ef2d5432ed839c9
|
/temp/real_fit_0707.R
|
d9d24f724602e8b29851fe198e4913e3dccc21fa
|
[] |
no_license
|
jenjong/RankConsistency
|
968b4dfb7f5a43b33ad8bc0a8d872dbb4d6a24f5
|
ad262789e833757e2437fb2423ca71e7ce89793c
|
refs/heads/master
| 2020-03-11T01:21:25.848962
| 2019-04-08T11:11:15
| 2019-04-08T11:11:15
| 129,689,557
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,574
|
r
|
real_fit_0707.R
|
rm(list = ls())
gc()
# training code
# set path
if (Sys.info()[1] == "Linux") {
setwd("/home/jeon/Documents/Github/RankConsistency")
} else {
setwd('C:/Users/Jeon/Documents/GitHub/RankConsistency')
}
# load car segmentation
load("Real_BT_gBT2_cv5_all_data.rdata")
i_1 = 1
i_2 = 13
sel_idx = which(BT_est_rank >= i_1 & BT_est_rank <= i_2)
# library
library(MASS)
library(igraph)
library(glmnet)
source('./lib/car_lib.R')
source('./lib/lib_rank.R')
source('./lib/sim.R')
source('./lib/real_lib.R')
sim.num = 50
rdata<-read.csv('racing_data.csv', header=F)
rdata = rbind(rdata,rdata)
n = nrow(rdata)
bt_est.list = gbt_est.list = sr1_est.list =
vector(mode='list', length = sim.num)
for (i in 1:sim.num)
{
set.seed(i)
s_idx = sample(1:n, trunc(n*0.7))
# training code
race_mat <- as.matrix(rdata[s_idx,18:33])
num_vec <- rdata$V1[s_idx]
Qmat_fit <-QmatFun(race_mat, num_vec, cut_var = 0,
p=43, sel_idx)
bt_est <- btFun(Qmat_fit)
gbt_fit <- gbtFun(Qmat_fit, cut_v = 0, 'balance')
if (is.null(gbt_fit$gbt_est)) next
gbt_fit.result = gbt_fit$sc_list
gbt_est = gbtFun_recov(gbt_fit.result, Qmat_fit,
method = 'count', allowties = F)
sr1.result = sr1_fun(Qmat_fit)
sr1_est = gbtFun_recov(sr1.result, Qmat_fit, method='count',
allowties = F)
bt_est.list[[i]] = bt_est
gbt_est.list[[i]] = gbt_est
sr1_est.list[[i]] = sr1_est
}
# test procedure
vmat1 = vmat2 = vmat3 = NULL
for (i in 1:sim.num)
{
set.seed(i)
s_idx = sample(1:n, trunc(n*0.7))
race_mat <- as.matrix(rdata[-s_idx,18:33])
num_vec <- rdata$V1[-s_idx]
Qmat_fit <-QmatFun(race_mat, num_vec, cut_var = 0,
p=43, sel_idx)
gbt_est = gbt_est.list[[i]]
if (is.null(gbt_est)) next
bt_est = bt_est.list[[i]]
sr1_est = sr1_est.list[[i]]
v1 = evalFun_1(rdata[-s_idx,], bt_est, sel_idx)
v2 = evalFun_1(rdata[-s_idx,], gbt_est, sel_idx)
v3 = evalFun_1(rdata[-s_idx,], sr1_est, sel_idx)
vmat1 = rbind(vmat1, c(v1,v2,v3))
v1 = evalFun_2(rdata[-s_idx,], bt_est, sel_idx)
v2 = evalFun_2(rdata[-s_idx,], gbt_est, sel_idx)
v3 = evalFun_2(rdata[-s_idx,], sr1_est, sel_idx)
vmat2 = rbind(vmat2, c(v1,v2,v3))
v1 = evalFun_3(Qmat_fit, bt_est)
v2 = evalFun_3(Qmat_fit, gbt_est)
v3 = evalFun_3(Qmat_fit, sr1_est)
vmat3 = rbind(vmat3, c(v1,v2,v3))
}
boxplot(vmat1[,1:3], names= c("a",'b','c'))
boxplot(vmat2[,1:3], names= c("a",'b','c'))
boxplot(vmat3[,1:3], names= c("a",'b','c'))
colMeans(vmat1[,1:3])
colMeans(vmat2[,1:3])
colMeans(vmat3[,1:3])
|
02add9800ddb0bc5cc3e10cbddaf8649bf649589
|
620c119a14451f184885ba212c11a8b243a583f6
|
/data/facets/facet_data_prep.R
|
7243f4477cb4d420e8acabff6fcffbc2017b0432
|
[] |
no_license
|
matthewkling/cappa
|
12c7a47c3523675af5cbf5d994baf9bba49384d0
|
1ec5914cbe2990a7b317e1f4f8c15ad2a18a196b
|
refs/heads/master
| 2022-02-23T13:14:57.075256
| 2019-09-17T21:16:01
| 2019-09-17T21:16:01
| 139,507,435
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,415
|
r
|
facet_data_prep.R
|
library(tidyverse)
library(ape)
library(raster)
library(phytools)
library(ggtree)
select <- dplyr::select
indir <- "e:/phycon/002_sxt/pred_s25km"
outdir <- "e:/phycon/results/pred_s25km"
setwd(outdir)
r <- readRDS("optimization_results.rds")
d <- r[sapply(r, class)=="data.frame"] %>%
do.call("rbind", .) %>%
group_by(taxonomy, lambda, algorithm, currency, era, slate) %>%
mutate(rank = ifelse(algorithm=="backward", max(rank, na.rm=T)-rank+1, rank),
priority = rank/max(rank)) %>%
arrange(desc(rank)) %>%
mutate(slope = lag(prop) - prop,
slope = ifelse(is.na(slope), 0, slope),
slope = scales::rescale(slope),
marginal = -marginal)
# add data on land area
template <- stack("E:/phycon/data/cpad_cced/cpad_cced_raster_15km.tif")
land <- template[[2]] %>% rasterToPoints() %>% as.data.frame()
names(land)[3] <- "land_area"
d <- left_join(d, land)
land <- left_join(land, d %>% ungroup() %>% dplyr::select(x, y) %>% distinct() %>% mutate(data=T)) %>%
mutate(data=ifelse(is.na(data), F, T),
land_area=ifelse(data, land_area, NA))
template <- template[[2]]
template[!is.na(template)] <- land$land_area
writeRaster(template, "e:/phycon/shiny/cappa/raster_template.tif", overwrite=T)
d <- d %>%
group_by(taxonomy, lambda, algorithm, currency, era, slate) %>%
arrange(rank) %>%
mutate(cum_area = cumsum(land_area))
lambda <- 1
for(taxonomy in c("chrono", "phylo", "clade", "species")){
tax <- taxonomy
pd <- d %>%
ungroup() %>%
filter(algorithm=="forward", lambda==1, currency=="endemism",
taxonomy==tax, slate=="existing") %>%
arrange(id)
cell <- pd$id[pd$rank==1]
intactness <- raster("E:/phycon/data/intactness/intactness_15km.tif")
con <- stack("E:/phycon/data/cpad_cced/cpad_cced_raster_15km.tif")
names(con) <- c("con", "land")
land <- con$land
con <- con$con[]
xy <- as.data.frame(coordinates(land))
security <- function(x, lambda=1){
lambda <- 2^lambda
(1-(1-x)^lambda)^(1/lambda)
}
setwd(indir)
if(taxonomy=="endemic species"){
diversity_mat <- readRDS("site_by_species.rds")
diversity_mat <- diversity_mat[,readRDS("site_by_species_endemic.rds")]
branch_lengths <- rep(1, ncol(diversity_mat))
tree <- starTree(colnames(diversity_mat))
}
if(taxonomy=="species"){
diversity_mat <- readRDS("site_by_species.rds")
branch_lengths <- rep(1, ncol(diversity_mat))
tree <- starTree(colnames(diversity_mat))
}
if(taxonomy=="otu"){
diversity_mat <- readRDS("site_by_otu.rds")
branch_lengths <- rep(1, ncol(diversity_mat))
tree <- starTree(colnames(diversity_mat))
}
if(taxonomy=="chrono"){
diversity_mat <- readRDS("site_by_branch.rds")
tree <- readRDS("site_by_chrono_phylogeny.rds")
branch_lengths <- tree$edge.length / sum(tree$edge.length)
}
if(taxonomy=="phylo"){
diversity_mat <- readRDS("site_by_branch.rds")
tree <- readRDS("site_by_phylo_phylogeny.rds")
branch_lengths <- tree$edge.length / sum(tree$edge.length)
}
if(taxonomy=="clade"){
diversity_mat <- readRDS("site_by_branch.rds")
tree <- readRDS("site_by_phylo_phylogeny.rds")
tree$edge.length <- rep(1, length(tree$edge.length))
branch_lengths <- rep(1, length(tree$edge.length))
}
#diversity_mat <- readRDS(paste0(indir, "/site_by_branch.rds"))
#tree <- readRDS(paste0(indir, "/site_by_chrono_phylogeny.rds"))
#branch_lengths <- tree$edge.length / sum(tree$edge.length)
if(taxonomy %in% c("clade", "chrono", "phylo")){
# label internal edges by distinctive OTU pair
label_nodes <- function(tree){
for(i in 1:tree$Nnode){
clade <- extract.clade(tree, length(tree$tip.label)+i)
root_node <- setdiff(1:nrow(clade$edge), clade$edge[,2])
if(length(clade$tip.label)==2) root_node <- 3
children <- clade$edge[,2][clade$edge[,1]==root_node] %>%
sapply(function(x){
if(length(x)==0){
clade$tip.label
}else if(x<=length(clade$tip.label)){
clade$tip.label[x]
}else{
sort(extract.clade(clade, x)$tip.label)[1]
}
}) %>%
sort()
label <- paste(paste(children, collapse=" & ")) %>%
paste0(" (", length(clade$tip.label), "-OTU clade)")
tree$node.label[i] <- label
}
return(tree)
}
tree <- label_nodes(tree)
tree$edge.label <- c(tree$tip.label, tree$node.label)[tree$edge[,2]]
colnames(diversity_mat) <- tree$edge.label
}
############## tree data ############
td <- fortify(tree)
# parents
for(i in 1:nrow(td)){
td$x0[i] <- td$x[td$node==td$parent[i]][1]
td$y0[i] <- td$y[td$node==td$parent[i]][1]
}
# elbows
td$x1 <- td$x0
td$y1 <- td$y
# radius & angle
td$r <- td$x
td$r0 <- td$x0
td$r1 <- td$x1
td$a <- td$y / max(td$y) * 2 * pi
td$a0 <- td$y0 / max(td$y) * 2 * pi
td$a1 <- td$y1 / max(td$y) * 2 * pi
# polar coordinates
td$xp <- td$r * cos(td$a)
td$xp0 <- td$r0 * cos(td$a0)
td$xp1 <- td$r1 * cos(td$a1)
td$yp <- td$r * sin(td$a)
td$yp0 <- td$r0 * sin(td$a0)
td$yp1 <- td$r1 * sin(td$a1)
td <- select(td, label, xp:yp1)
td1 <- select(td, label, xp, yp, xp1, yp1)
td2 <- select(td, label, xp0, yp0, xp1, yp1)
names(td2) <- names(td1)
td <- rbind(td1, td2)
########## asdf ###########
inbounds <- apply(diversity_mat, 1, function(x) !all(is.na(x)))
land <- land[inbounds]
con <- con[inbounds]
xy <- xy[inbounds,]
intactness <- intactness[inbounds]
# normalize occurrence probabilities by land area to correct edge effects
diversity_mat <- apply(diversity_mat[inbounds,], 2, function(x) x*land)
# normalize by range size to generate endemism
endemism_mat <- apply(diversity_mat, 2, function(x) x/sum(x, na.rm=T))
#ncol <- 100
#ramp <- colorRampPalette(c("black", "blue", "red", "yellow"))(ncol)
#plot(tree, edge.color=ramp[cut(cd$value, ncol)], show.tip.label=F, type="fan")
#plot(rnorm(100), rnorm(100), col=ramp[cut(rnorm(100), ncol)])
# Marginal conservation benefit
R <- apply(diversity_mat, 2, sum, na.rm=T) # range sizes
V <- branch_lengths
C <- apply(diversity_mat, 2, function(p){
x <- cbind(con, p) %>% na.omit()
weighted.mean(x[,1], x[,2])})
B <- security(C, lambda=lambda)
MB <- apply(diversity_mat, 2, function(p) p*(1-con)) %>% # p gain
apply(1, function(x) x/R) %>% t() %>% # gain as percentage of range size
apply(1, function(x) C + x) %>% t() %>% # resulting C
apply(c(1,2), security, lambda=lambda) %>% # resulting B
apply(1, function(x) x-B) %>% t() %>%
apply(1, function(x) x*V) %>% t()
marginal <- apply(MB, 1, sum)
##### DIVERSITY STATISTICS #####
# D: Diversity
D <- apply(diversity_mat, 1, sum, na.rm=T)
# PD: Phylogenetic diversity
#V <- branch_lengths
V[V==Inf] <- max(V[V!=Inf])
PD <- apply(diversity_mat, 1, function(p) sum(p * V, na.rm=T))
# E: Endemism, i.e. total endemic diversity, aka WE
#R <- apply(diversity_mat, 2, sum, na.rm=T) # range sizes
E <- apply(diversity_mat, 1, function(p) sum(p / R, na.rm=T))
# PE: Phylogenetic endemism
PE <- apply(diversity_mat, 1, function(p) sum(p * V / R, na.rm=T))
# Em: Mean endemism
# The following derivation is equivalent to ED / D
Em <- apply(diversity_mat, 1, function(p) weighted.mean(1/R, w=p, na.rm=T))
# PDm: Mean phylogenetic diversity, i.e. branch length of mean resident
# The following derivation is equivalent to PD / D
PDm <- apply(diversity_mat, 1, function(p) weighted.mean(V, w=p, na.rm=T))
# PEm: Mean phylogenetic endemism, i.e. branch length / range size of mean resident
# The following derivation is equivalent to PE / D
PEm <- apply(diversity_mat, 1, function(p) weighted.mean(V / R, w=p, na.rm=T))
# Mean branch length of the endemics
BEm <- apply(diversity_mat, 1, function(p) weighted.mean(V, w=p/R, na.rm=T))
pd <- pd %>%
ungroup() %>%
select(id, x, y, land_area, con, rank) %>%
mutate(int=intactness, marginal=marginal,
D=D, PD=PD, E=E, PE=PE, Em=Em, PDm=PDm, PEm=PEm, BEm=BEm)
data <- list(tree=tree,
tree_data=td,
diversity_mat=diversity_mat,
pd=pd,
R=R, V=V, C=C, B=B, MB=MB)
saveRDS(data, paste0("e:/phycon/shiny/cappa/data/facets/data_", taxonomy, ".rds"))
#saveRDS(data, paste0("e:/phycon/shiny/sandbox/data_", taxonomy, ".rds"))
# split this one to get under github size limit
if(taxonomy == "species"){
saveRDS(data[1:8], paste0("e:/phycon/shiny/cappa/data/facets/data_", taxonomy, "1.rds"))
saveRDS(data[9], paste0("e:/phycon/shiny/cappa/data/facets/data_", taxonomy, "2.rds"))
}
}
|
2a1de5549d084d47f1ccb9cfa6f73a97c2c1a0e1
|
bae1ceee3b44cf4816e329bc0322540c7524770f
|
/R/New_NEMURO Analysis.R
|
f811139a3f4ffae39d38dcf7316c29341e60f300
|
[] |
no_license
|
tbrycekelly/Lateral-connectivity-in-the-gom
|
bc8c71a209e8501b9e4092afb4b331c99db0c451
|
4259cd01af5ad09db283b838cd2cc2dfb385a431
|
refs/heads/master
| 2023-04-10T16:26:58.463885
| 2021-04-07T19:36:14
| 2021-04-07T19:36:14
| 321,499,622
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,844
|
r
|
New_NEMURO Analysis.R
|
library(TheSource)
library(ncdf4)
source('RX/source.r')
source('RX/NEMURO.source.r')
## Load in a NEMURO File
raw.files = list.files('Z:/Data/Taylor/gom_data/may_runs_nemuro_gom', pattern = '.nc', full.names = T, recursive = T)
raw.files = raw.files[!grepl('raw', raw.files)]
raw.files = raw.files[grepl('2002', raw.files)]
for (i in 1:length(raw.files)) {
message()
## Datetime Info
year = strsplit(raw.files[i], '/')[[1]][6]
month = substr(strsplit(raw.files[i], '/')[[1]][7], 8,9)
day = substr(strsplit(raw.files[i], '/')[[1]][7], 10,11)
out.file = paste0('Z:/Data/Taylor/_rdata/NEMURO.', year, '.', month, '.', day, '.rdata')
if (file.exists(out.file)) {
message(Sys.time(), ': File exists for ', raw.files[i], '. Skipping.')
} else {
message(Sys.time(), ': Loading Data for ', month, '/', day, '/', year, ' (', i, '/', length(raw.files), ')')
## Load and build domain
nemuro = load.nc(raw.files[i])
nemuro$domain = list(Year = as.numeric(year),
Month = as.numeric(month),
Day = as.numeric(day),
XC = nemuro$XC,
YC = nemuro$YC,
ZC = nemuro$ZC)
delta = as.numeric(difftime(conv.time.matlab(nemuro$TC)[2], conv.time.matlab(nemuro$TC)[1], units = 'days'))
## Load external files
nemuro.light = load.lightfield(nemuro, paste0('Z:/Data/Taylor/gom_data/initial_condition_files/ini', year, '/light_data.bin'))
physics = load.physics(nemuro,
U = paste0('Z:/Data/Taylor/gom_data/may_runs_nemuro_gom/', year, '/forcing_', nemuro$domain$Year, '/uvel/u.00', delta, '.data'),
V = paste0('Z:/Data/Taylor/gom_data/may_runs_nemuro_gom/', year, '/forcing_', nemuro$domain$Year, '/vvel/v.00', delta, '.data'),
W = paste0('Z:/Data/Taylor/gom_data/may_runs_nemuro_gom/', year, '/forcing_', nemuro$domain$Year, '/wvel/w.00', delta, '.data'),
S = paste0('Z:/Data/Taylor/gom_data/may_runs_nemuro_gom/', year, '/forcing_', nemuro$domain$Year, '/sal/s.00', delta, '.data'),
T = paste0('Z:/Data/Taylor/gom_data/may_runs_nemuro_gom/', year, '/forcing_', nemuro$domain$Year, '/tmp/t.00', delta, '.data'))
## TODO Load river
river = load.river(dis.file = paste0('Z:/Data/Taylor/gom_data/initial_condition_files/ini', year, '/riv_dis_data.bin'),
nut.file = paste0('Z:/Data/Taylor/gom_data/initial_condition_files/ini', year, '/riv_nutr_data.bin'))
nemuro$river = river[,,delta] ## Already transposed!
message(Sys.time(), ': Compiling dataset. ', appendLF = F)
k = c(1:20)
#message('Unstaggering U, V, W... ', appendLF = F)
nemuro$U = physics$U[,,k]
nemuro$V = physics$V[,,k]
nemuro$W = physics$W[,,k]
nemuro$S = physics$S[,,k]
nemuro$T = physics$T[,,k]
rm(physics)
## DIN
nemuro$DIN = nemuro$NO[,,k] + nemuro$NH[,,k]
nemuro$NH = NULL
nemuro$NH = NULL
nemuro$SI = NULL
## DON
nemuro$DON = nemuro$DON[,,k]
## PON
nemuro$PON = nemuro$PON[,,k]
nemuro$OP = NULL
##Org
nemuro$Org = nemuro$SP[,,k] + nemuro$LP[,,k] + nemuro$SZ[,,k] + nemuro$LZ[,,k] + nemuro$PZ[,,k]
nemuro$SP = NULL
nemuro$LP = NULL
nemuro$SZ = NULL
nemuro$LZ = NULL
nemuro$PZ = NULL
nemuro$CHL = NULL
## Conditions
nemuro$S = NULL
## Domain
nemuro$domain$grid = expand.grid(lon = nemuro$XC, lat = nemuro$YC)
nemuro$ZC = nemuro$ZC[k]
nemuro$domain$ZC = nemuro$domain$ZC[k]
dx = matrix(0, nrow = length(nemuro$XC), ncol = length(nemuro$YC))
dy = matrix(0, nrow = length(nemuro$XC), ncol = length(nemuro$YC))
for (i in 1:length(nemuro$XC)) {
for (j in 1:length(nemuro$YC)) {
if (i == 1) {
dx[i,j] = geosphere::distCosine(p1 = c(nemuro$XC[2], nemuro$YC[j]), p2 = c(nemuro$XC[1], nemuro$YC[j]))
} else {
dx[i,j] = geosphere::distCosine(p1 = c(nemuro$XC[i], nemuro$YC[j]), p2 = c(nemuro$XC[i-1], nemuro$YC[j]))
}
if (j == 1) {
dy[i,j] = geosphere::distCosine(p1 = c(nemuro$XC[i], nemuro$YC[2]), p2 = c(nemuro$XC[i], nemuro$YC[1]))
} else {
dy[i,j] = geosphere::distCosine(p1 = c(nemuro$XC[i], nemuro$YC[j]), p2 = c(nemuro$XC[i], nemuro$YC[j-1]))
}
}
}
nemuro$domain$dx = dx
nemuro$domain$dy = dy
nemuro$Mask = !is.na(nemuro$DIN)
lx = c(1:length(nemuro$XC))
llx = c(1, 1:length(nemuro$XC[-1]))
ly = c(1:length(nemuro$YC))
lly = c(1, 1:length(nemuro$YC[-1]))
dz = diff(nemuro$ZC)
# PON
nemuro$PON.Flux.U = nemuro$PON
nemuro$PON.Flux.V = nemuro$PON
nemuro$PON.Flux.W = nemuro$PON
nemuro$PON.Flux.S = nemuro$PON
# DON
nemuro$DON.Flux.U = nemuro$PON
nemuro$DON.Flux.V = nemuro$PON
nemuro$DON.Flux.W = nemuro$PON
#DIN
nemuro$DIN.Flux.U = nemuro$PON
nemuro$DIN.Flux.V = nemuro$PON
nemuro$DIN.Flux.W = nemuro$PON
# Org
nemuro$Org.Flux.U = nemuro$PON
nemuro$Org.Flux.V = nemuro$PON
nemuro$Org.Flux.W = nemuro$PON
# Water
nemuro$Water.Flux.U = nemuro$PON
nemuro$Water.Flux.V = nemuro$PON
nemuro$Water.Flux.W = nemuro$PON
for (d in 1:length(nemuro$ZC)) {
if (d == 1) { dd = d } else { dd = d-1 }
# PON
nemuro$PON.Flux.U[,,d] = 0.5 * (nemuro$PON[lx,ly,d] + nemuro$PON[lx,lly,d]) * nemuro$U[,,d] * 86400 * dz[d] * 0.5 * (nemuro$domain$dy[lx,ly] + nemuro$domain$dy[lx,lly]) # C * vel * area = mmol N d-1
nemuro$PON.Flux.V[,,d] = 0.5 * (nemuro$PON[lx,ly,d] + nemuro$PON[llx,ly,d]) * nemuro$V[,,d] * 86400 * dz[d] * 0.5 * (nemuro$domain$dx[lx,ly] + nemuro$domain$dx[llx,ly]) # C * vel * area = mmol N d-1
nemuro$PON.Flux.W[,,d] = 0.5 * (nemuro$PON[,,dd] + nemuro$PON[,,d]) * nemuro$W[,,d] * 86400 * nemuro$domain$dy * nemuro$domain$dx # C * vel * area = mmol N d-1
if (d == length(nemuro$ZC)) {
nemuro$PON.Flux.S[,,d] = 0 ## No sinking out of last cell.
} else {
nemuro$PON.Flux.S[,,d] = nemuro$PON[,,d] * 15 * nemuro$domain$dy * nemuro$domain$dx * nemuro$Mask[,,d+1]
}
# DON
nemuro$DON.Flux.U[,,d] = 0.5 * (nemuro$DON[lx,ly,d] + nemuro$DON[lx,lly,d]) * nemuro$U[,,d] * 86400 * dz[d] * 0.5 * (nemuro$domain$dy[lx,ly] + nemuro$domain$dy[lx,lly]) # C * vel * area = umol N d-1
nemuro$DON.Flux.V[,,d] = 0.5 * (nemuro$DON[lx,ly,d] + nemuro$DON[llx,ly,d]) * nemuro$V[,,d] * 86400 * dz[d] * 0.5 * (nemuro$domain$dx[lx,ly] + nemuro$domain$dx[llx,ly]) # C * vel * area = umol N d-1
nemuro$DON.Flux.W[,,d] = 0.5 * (nemuro$DON[,,dd] + nemuro$DON[,,d]) * nemuro$W[,,d] * 86400 * nemuro$domain$dy * nemuro$domain$dx # C * vel * area = umol N d-1
# Org
nemuro$Org.Flux.U[,,d] = 0.5 * (nemuro$Org[lx,ly,d] + nemuro$Org[lx,lly,d]) * nemuro$U[,,d] * 86400 * dz[d] * 0.5 * (nemuro$domain$dy[lx,ly] + nemuro$domain$dy[lx,lly]) # C * vel * area = umol N d-1
nemuro$Org.Flux.V[,,d] = 0.5 * (nemuro$Org[lx,ly,d] + nemuro$Org[llx,ly,d]) * nemuro$V[,,d] * 86400 * dz[d] * 0.5 * (nemuro$domain$dx[lx,ly] + nemuro$domain$dx[llx,ly]) # C * vel * area = umol N d-1
nemuro$Org.Flux.W[,,d] = 0.5 * (nemuro$Org[,,dd] + nemuro$Org[,,d]) * nemuro$W[,,d] * 86400 * nemuro$domain$dy * nemuro$domain$dx # C * vel * area = umol N d-1
# DIN
nemuro$DIN.Flux.U[,,d] = 0.5 * (nemuro$DIN[lx,ly,d] + nemuro$DIN[lx,lly,d]) * nemuro$U[,,d] * 86400 * dz[d] * 0.5 * (nemuro$domain$dy[lx,ly] + nemuro$domain$dy[lx,lly]) # C * vel * area = umol N d-1
nemuro$DIN.Flux.V[,,d] = 0.5 * (nemuro$DIN[lx,ly,d] + nemuro$DIN[llx,ly,d]) * nemuro$V[,,d] * 86400 * dz[d] * 0.5 * (nemuro$domain$dx[lx,ly] + nemuro$domain$dx[llx,ly]) # C * vel * area = umol N d-1
nemuro$DIN.Flux.W[,,d] = 0.5 * (nemuro$DIN[,,dd] + nemuro$DIN[,,d]) * nemuro$W[,,d] * 86400 * nemuro$domain$dy * nemuro$domain$dx # C * vel * area = umol N d-1
# water
nemuro$Water.Flux.U[,,d] = nemuro$U[,,d] * 86400 * dz[d] * 0.5 * (nemuro$domain$dy[lx,ly] + nemuro$domain$dy[lx,lly]) # m3 d-1
nemuro$Water.Flux.V[,,d] = nemuro$V[,,d] * 86400 * dz[d] * 0.5 * (nemuro$domain$dx[lx,ly] + nemuro$domain$dx[llx,ly]) # m3 d-1
nemuro$Water.Flux.W[,,d] = nemuro$W[,,d] * 86400 * nemuro$domain$dy * nemuro$domain$dx
}
message(Sys.time(), ': Saving to file ', out.file)
save(nemuro, file = out.file, compression_level = 1)
}
}
|
f08daa021f39d8ab1fbf1de4de2885a39636fc19
|
8e08e6016b00581ee99bb6fda6646206013cafff
|
/pollutantmean.R
|
e5c896c70921ca3441094625660fc8fcfb4686f5
|
[] |
no_license
|
LemonCanon/datasciencecoursera
|
00b47d74bbcc54980d7e0eb1315459b46c913057
|
c86c33aa0a8e93859afb30d8cac0dd276fd44b52
|
refs/heads/master
| 2021-01-10T16:57:43.942356
| 2016-02-19T14:29:40
| 2016-02-19T14:29:40
| 51,541,176
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,183
|
r
|
pollutantmean.R
|
pollutanmean <- function(directory, polutant, id = 1:332) {
##'directory' is a character vector of length 1indicating
##'the location of the CSV files
##'pollutant' is a character vector of length 1 indicating
##'the name of the pollutant for which we will calculate the
##'mean; either "sulfate" or "nitrate".
##'id' is an integer vector indicating the monitor ID numbers
##'to be used
## return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
## do not round the result!
##--------------------------------------------------------------------------
library(stringr)
bigdata <- numeric()
for(i in 1:length(id)){
##select data to be read
datatarget <- paste(directory, "/",str_pad(id[i],3,pad = 0), ".csv", sep = "" )
##read data and store it to memory
data <- read.csv(datatarget)
pdata <- data[polutant]
##remove NA values
cleandata <- pdata[!is.na(pdata)]
##add cleaned data to big data
bigdata <- c(bigdata, cleandata)
}
mean(bigdata)
}
|
3d6c04b7549a7399fc2a09ec0f2fdd76d75eed98
|
056f795d9a0d8b602de4399426fb82b4583f3c34
|
/Crime_SPC/Scripts/Projections/2_Simulations_for_Remainder_of_Year.R
|
e65f4a4f12dd9ae570c6ac08c0461a9bdd8717d5
|
[] |
no_license
|
RPD-OBI1/Crime_SPC
|
c25b9e7bf477fffde84e2b817ad4c95a1076f16f
|
7a525489dd77c26e182e38fd9c67bac8c39471b4
|
refs/heads/master
| 2022-09-02T14:29:15.699779
| 2022-07-21T14:56:55
| 2022-07-21T14:56:55
| 194,919,015
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,760
|
r
|
2_Simulations_for_Remainder_of_Year.R
|
### This Script calls upon a variety of functions to make calculations from summarized data to create other necessary summary tables
# First set the working directory to the project folder that contains the folders "Data", "Functions" and "Scripts"
# The "Data" folder should contain the raw data used, "Functions" should have all prepared functions, and "Scripts" should contain prepared scripts
# setwd("YOUR_PATH/Crime_SPC/") # This needs to be set if using scripts independently of the "Run_All....R" files
source("Functions/Functions.R")
library(dplyr)
# Read in tables made prior and saved in the "Data" folder
# Current Year Weekly Aggregates
section_current_weekly_aggregates <- read.csv("Data/Projections/Current_Weekly_Section.csv",
header = TRUE,
stringsAsFactors = TRUE)
citywide_current_weekly_aggregates <- read.csv("Data/Projections/Current_Weekly_Citywide.csv",
header = TRUE,
stringsAsFactors = TRUE)
# Weekly Props and Averages
section_weekly_props <- read.csv("Data/Data_Prep/Weekly_Proportions_Data_Section.csv",
header = TRUE,
stringsAsFactors = TRUE)
citywide_weekly_props <- read.csv("Data/Data_Prep/Weekly_Proportions_Data_Citywide.csv",
header = TRUE,
stringsAsFactors = TRUE)
# Run Simulations with average method (median is only recommended in situations where many years of data are available)
Section_Simulations <- Create_Simulations(Current_Year_Weekly_Aggregation_File = section_current_weekly_aggregates,
Weekly_Props_and_Avgs_File = section_weekly_props,
number_of_simulations = 1000,
method = "average")
Citywide_Simulations <- Create_Simulations(Current_Year_Weekly_Aggregation_File = citywide_current_weekly_aggregates,
Weekly_Props_and_Avgs_File = citywide_weekly_props,
number_of_simulations = 1000,
method = "average")
# Save data tables to the "Data" Folder
# Weekly Section bounds
write.csv(x = Section_Simulations,
file = "Data/Projections/Simulations_Section.csv",
row.names = FALSE)
# Weekly Citywide Proportions
write.csv(x = Citywide_Simulations,
file = "Data/Projections/Simulations_Citywide.csv",
row.names = FALSE)
|
bae5423d09f53e3a50dc02b59a1059c892cbf779
|
5722c4e256abf33f361b571047d9b1074a85cbaf
|
/figures/svpop-figures.R
|
d2bf1e606734f35404d40c4f9a324a77c824cf21
|
[
"CC0-1.0",
"CC-BY-4.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmonlong/manu-vgsv
|
f4ea0ce4d12b7165f3c72ac1b26da0e7ed734cc6
|
525c3717f15cb2257f0b8c7e101b2fa6afa567d4
|
refs/heads/master
| 2022-04-06T16:53:14.704254
| 2020-02-24T05:40:06
| 2020-02-24T05:40:06
| 166,314,077
| 9
| 2
|
NOASSERTION
| 2019-10-23T21:18:13
| 2019-01-17T23:52:55
|
HTML
|
UTF-8
|
R
| false
| false
| 3,165
|
r
|
svpop-figures.R
|
library(ggplot2)
library(dplyr)
library(ggrepel)
library(knitr)
source('colors-functions.R')
## HG00514/HG00733/NA19240 reads, but processed with 15-sample graph of SMRT-SV v2 discovery data from Audano et. al 2019
## There are no genotypes for this data, but there are a few inversions
## Method names and renaming vector to fit color palette
methods = c('vg','smrtsv')
methconv = c(vg='vg', smrtsv='SMRT-SV v2 Genotyper', paragraph='Paragraph')
## Read evaluation results
pr.df = read.table('data/human-merged-prcurve.tsv', as.is=TRUE, header=TRUE)
## Keep SVPOP experiment only and polish data.frame
pr.df$method = factor(methconv[pr.df$method], levels=names(pal.tools))
pr.df = pr.df %>% filter(grepl('svpop', exp), type!='Total', !is.na(method), min.cov==.5,
region%in%c('all', 'nonrep')) %>% arrange(qual)
pr.df = relabel(pr.df)
## Merge samples
pr.df = pr.df %>% group_by(type, qual, method, region, eval) %>%
select(TP, TP.baseline, FN, FP) %>% summarize_all(sum)
pr.df = prf(pr.df)
label.df = pr.df %>% group_by(region, method, type, eval) %>% arrange(desc(F1)) %>% do(head(.,1))
pdf('pdf/svpop.pdf', 8, 4)
zoomgp(subset(pr.df, eval=='presence'), subset(label.df, eval=='presence'),
zoom.xy=.6, zoom.br=.1, annot=TRUE, zout.only=TRUE)
dev.off()
## Bar plots with best F1
pdf('pdf/svpop-best-f1.pdf', 8, 4)
label.df %>%
ggplot(aes(x=region, y=F1, fill=method, alpha=eval, group=method)) +
geom_bar(stat='identity', position=position_dodge()) +
facet_grid(type~.) +
scale_fill_manual(values=pal.tools) +
scale_alpha_manual(name='SV evaluation', values=c(.5,1)) +
theme_bw() + ylim(0,1) +
labs(x='Genomic regions', y='Best F1', fill='Method') +
theme()
dev.off()
label.df %>% filter(!is.na(F1)) %>%
ungroup %>%
select(method, region, type, TP.baseline, FP, FN, precision, recall, F1) %>%
arrange(method, region, type) %>%
kable(digits=3) %>%
cat(file='tables/svpop.md', sep='\n')
##
## Regional analysis analysis
## Regions: all, repeats, non-repeats, called in SMRT-SV v2, not called in SMRT-SV
##
pr.df = read.table('data/human-merged-prcurve.tsv', as.is=TRUE, header=TRUE)
## Keep SVPOP experiment only and polish data.frame
pr.df$method = factor(methconv[pr.df$method], levels=names(pal.tools))
pr.df = pr.df %>% filter(exp=='svpop', type!='Total', sample=='HG00514', !is.na(method),
method!='Paragraph', min.cov==.5) %>%
arrange(qual)
pr.df = relabel(pr.df)
label.df = pr.df %>% group_by(region, method, type, eval) %>% arrange(desc(F1)) %>% do(head(.,1))
pdf('pdf/svpop-regions.pdf', 8, 5)
pr.df %>% filter(type!='INV', eval=='presence') %>%
ggplot(aes(x=recall, y=precision, colour=region)) +
geom_path(size=1, alpha=.9) +
theme_bw() +
labs(x='Recall', y='Precision', color='Genomic regions') +
facet_grid(method~type) +
scale_colour_brewer(palette='Set1')
dev.off()
## Bar plots with best F1
label.df %>% filter(!is.na(F1)) %>%
ungroup %>%
select(method, region, type, TP.baseline, FP, FN, precision, recall, F1) %>%
arrange(method, region, type) %>%
kable(digits=3) %>%
cat(file='tables/svpop-regions.md', sep='\n')
|
8fd4c6a0b0e041ed54a6ba44f231dc21b1151484
|
bf0af8599a868c4c066caf37630d22bf31135ea3
|
/man/find_last_commit.Rd
|
78c8a71be3e19784ff7c0ff31434bea0fb88247b
|
[
"MIT"
] |
permissive
|
lorenzwalthert/gitsum
|
5e49f0e8ac4344832b1f6ae6baedbe91c04812fa
|
33555bc87854212f4f37a37ab9c22c043e9b85eb
|
refs/heads/master
| 2021-01-21T15:19:22.677699
| 2021-01-16T17:14:09
| 2021-01-16T17:14:09
| 91,839,782
| 50
| 7
|
MIT
| 2021-01-16T17:14:10
| 2017-05-19T19:41:04
|
R
|
UTF-8
|
R
| false
| true
| 485
|
rd
|
find_last_commit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{find_last_commit}
\alias{find_last_commit}
\title{Find the last commit in a repo}
\usage{
find_last_commit(path = ".", update_dump = FALSE)
}
\arguments{
\item{path}{The path to the git directory one wants to create summaries for.}
\item{update_dump}{Whether or not to update the dump in .gitsum after
parsing.}
}
\description{
Finds the last commit in a repo and optionally updates the dump.
}
|
c3608de129d01fef44c725190b2f8765f4d62fa3
|
2bcebe837e572741007abe14f95150ba4cdb6de8
|
/VisionCredit/www/Scripts/varcuant.R
|
98a0d32f9135ff92f131d1c52c25f80e3efeaa6b
|
[] |
no_license
|
synergyvision/Riesgo-de-Credito
|
cdeda5e113d6784d38ce33eed16d105857cfc1ef
|
8add90a8389d50ef224cc30ceca1df9d05e81f49
|
refs/heads/master
| 2021-04-25T08:04:59.738160
| 2020-04-04T01:14:08
| 2020-04-04T01:14:08
| 122,198,136
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 672
|
r
|
varcuant.R
|
library(readr)
datos <- read_csv("~/Riesgo_de_Credito/App/appTemplate/data/datos_completos.csv")
D<- datos
M <- c()
for (i in 1:length(names(datos))) {
if(summary(as.factor(D[[i]]))<=10){
M[length(M)+1] <- i
}
}
M
D1 <- D[,c(1,M)]
D1
pval <- NULL
nomb <- colnames(D1)
for (i in 2:length(nomb)) {
df1 <- D1[,c(1,i)]
d0 <- subset(df1, Creditability==0)
d1 <- subset(df1, Creditability==1)
p1 <- d0[[2]]
p2 <- d1[[2]]
w <- ks.test(p1,p2)
pval[i] <- w$p.value
}
pval <- t(pval)
vd <- nomb[which(pval > 0.05)]
j <- colnames(datos)
inf <- data.frame(pval)
colnames(inf)<-nomb
final <- datos[, !(j %in% vd)]
|
02146658dca3113691087271850485cede2502f7
|
8911516df63a06e0dd7795a26ff14ab1bcef0171
|
/sim.R
|
277793c906ed61686b7c8c6b2049e7915b386aba
|
[] |
no_license
|
AnieBee/ACES
|
1a85c5e6eca47041df5076f03c8e8f370ef3a4ac
|
dab8dfabd9c580ce3a20e7b25ebbe31ef7a7f86d
|
refs/heads/master
| 2021-01-20T05:26:37.118358
| 2017-04-29T12:03:45
| 2017-04-29T12:03:45
| 89,782,606
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,060
|
r
|
sim.R
|
install.packages("BayesFactor", repos = "https://cloud.r-project.org/", type="source", dependencies = T)
library(BayesFactor)
options(scipen = 1)
N = c(20, 40, 80) # sample size, index with n
ES = c(0, .2, .5) # effect size, index with e (is mean, sd is always 1)
Trails = c(2, 3, 5) # number of Trails, index with t
D = c(-.2, 0, .2) # inferiority-margin delta, index with d
Runs = 500 # index with r
IntervalPoint = c(1, 2) # 1 for interval null 2 for point null
LowHighBF01 = array(NA, dim =c(2, length(IntervalPoint), length(N), length(ES), length(Trails), length(D), Runs))
# first dimension = lowest highest BFs, lowest is 1
#Optim Point func------
RPointVary <- function(data, par){# No need for D[d] for point BFs because you only compute them in D[d] = 0 cases
BF <- 1/as.vector(ttestBF(x = data[,1], y= data[,2], nullInterval = c(0, Inf), rscale = par))[1]
BF
}
## Simulation 1 -----
for(d in 3:length(D)){
#Optim Interval func ------
RInterVary <- function(data, par){ # CORRECTION: should have been: BF <- ttestBF(x = (data[,1]-D[d]), y = (data[,2]), nullInterval = c(0, Inf), rscale = par)
BF <- ttestBF(x = (data[,1]-D[d]), y = (data[,2]-D[d]), nullInterval = c(0, Inf), rscale = par)
as.vector(BF[2]/BF[1])[1]
}
#----
for(e in 1:length(ES)){
for(t in 1:length(Trails)){
for(n in 1:length(N)){
cat("N = ", N[n], ", ", "Trails = ", Trails[t], ", ", "ES = ", ES[e], ", ", "D = ", D[d], "\n ")
for(r in 1:Runs){
p = rep(0.5, Trails[t])
while(!(sum(p < .025) == 2 & sum(p > .975) == 0)){
Control = matrix(rnorm(N[n]*Trails[t], 0, 1), nrow = Trails[t], ncol = N[n])
Treat = matrix(rnorm(N[n]*Trails[t], ES[e], 1), nrow = Trails[t], ncol = N[n])
for(i in 1:Trails[t]){
p[i] = t.test (Treat[i,], Control[i,], mu = D[d], alternative = "greater", var.eq=TRUE)$p.value
}
}
### optim()----------------
Data= matrix(c(Treat, Control), ncol=2)
OptimMin <- optim(par = sqrt(2)/2, fn =RInterVary, data= Data, method="Brent",
lower=sqrt(2)/2, upper=sqrt(2))
OptimMax <- optim(par = 0, fn = RInterVary, data= Data, method="Brent",
lower=sqrt(2)/2, upper=sqrt(2), control=list(fnscale= -1))
LowHighBF01[ , IntervalPoint[1], n, e, t, d, r] <- c(OptimMin$value, OptimMax$value)
if(!D[d]){# Point Nulls ----
OptimMinP <- optim(par = sqrt(2)/2, fn = RPointVary, data= Data, method="Brent",
lower=sqrt(2)/2, upper=sqrt(2))
OptimMaxP <- optim(par = sqrt(2)/2, fn = RPointVary, data= Data, method="Brent",
lower=sqrt(2)/2, upper=sqrt(2), control=list(fnscale= -1))
LowHighBF01[ , IntervalPoint[2], n, e, t, d, r] <- c(OptimMinP$value, OptimMaxP$value)
#----
}
#-----
}
save(LowHighBF01, file = paste("/home/anja/Desktop/ReMa/FDA/C_R/data/","Sim1BF", ".RData", sep=""))
}
}
}
} #-------
## Simulation 2 -----
N = c(20, 40, 80) # sample size, index with n
ES = c(0, .2, .3) # effect size, index with e (is mean, sd is always 1)
Trails = 2 # number of Trails, not varied
D <- matrix(c(-.2, -.3, .2, .3), 2) # inferiority-margin delta, not varied
Runs = 500 # index with r
EquivBF01 = array(NA, dim =c(2, length(D[,1]), length(N), length(ES), Runs))
# first element = lowest highest, lowest is 1
# Sim 2----
for(d in 1:length(D[,1])){
# Equivalence Optim Interval func ------
REquivalenceVary <- function(data, par){
BF <- ttestBF(x = data[,1], y= data[,2], nullInterval = c(D[d, 1], D[d, 2]), rscale = par)
as.vector(BF[1]/BF[2])[1]#BF01: H0 is the equivalence region youdo not want to reject
}
for(e in 1:length(ES)){
for(n in 1:length(N)){
cat("D = ", D[d, 2], "N = ", N[n], ", ", "ES = ", ES[e], "\n ")
for(r in 1:Runs){
p = matrix(rep(0, 2*Trails), ncol = 2)
while(!(sum(p < .025) == 0 & sum(p > .975) == 0)){
Control = matrix(rnorm(N[n]*Trails, 0, 1), nrow = Trails, ncol = N[n])
Treat = matrix(rnorm(N[n]*Trails, ES[e], 1), nrow = Trails, ncol = N[n])
for(i in 1:Trails){
p[i, 1] = t.test (Treat[i,], Control[i,], mu = D[d, 1], alternative = "less", var.eq=TRUE)$p.value
p[i, 2] = t.test (Treat[i,], Control[i,], mu = D[d, 2], alternative = "greater", var.eq=TRUE)$p.value
}
}
### optim()----------------
Data = matrix(c(Treat, Control), ncol = 2)
OptimMin <- optim(par = sqrt(2)/2, fn = REquivalenceVary, data= Data, method="Brent",
lower = sqrt(2)/2, upper = sqrt(2))
OptimMax <- optim(par = 0, fn = REquivalenceVary, data= Data, method="Brent",
lower = sqrt(2)/2, upper = sqrt(2), control = list(fnscale= -1))
EquivBF01[ , d, n, e, r] <- c(OptimMin$value, OptimMax$value)
}#-----
}
}
}
save(EquivBF01, file = paste("/home/anja/Desktop/ReMa/FDA/C_R/data/","EquivBF01", ".RData", sep=""))
## Result PlotSimulation 1------------------
N = c(20, 40, 80) # sample size, index with n
ES = c(0, .2, .5) # effect size, index with e (is mean, sd is always 1)
Trails = c(2, 3, 5) # number of Trails, index with t
D = c(-.2, 0, .2) # inferiority-margin delta, index with d
Runs = 500 # index with r
IntervalPoint = c(1, 2) # 1 for interval null 2 for point null
options(scipen = 1)
Ylim <- c(-12, 2)
Cex1 <- 1.3
Cex2 <- .8 #axis ticks
Cex3 <- 1.3 #ES heading
for(d in 1:length(D)){
png(filename = paste("/home/anja/Desktop/ReMa/FDA/C_R/plots/","boxplot",d,".png",sep=""),
width = 960, height = 960)
layout(matrix (1:9, 3, 3, byrow = T))
par(mar = c(4.5, 6.5, 8, 0.5), cex.main = 2, cex.axis = 2)
for(e in 1:length(ES)){
for(t in 1:length(Trails)){
if(!D[d]){
boxplot(col = adjustcolor("white", alpha=0.2), border= "red", at = c(2, 4, 6), x = t(log(LowHighBF01[1, 1, , e, t, d, ], base = 10)), axes = F, ylim = Ylim, xlim= c(0,7), xlab = "", ylab = "", main = paste ("Trials:", Trails[t]), pch = 16)
boxplot(col = adjustcolor("red", alpha=0.2), add= TRUE, at = c(1, 3, 5), x = t(log(LowHighBF01[1, 2, , e, t, d, ], base = 10)), axes = F, ylim = Ylim, xlab = "", ylab = "", main = paste ("Trials:", Trails[t]), pch = 16)
abline(h = 0, lty = 2, lwd = 2)
axis(1, at = c(0, 1.7 , 3.7, 5.7, 7), labels = c( "", N, ""))
axis(2, at = Ylim[1]:Ylim[2], labels = 10^(Ylim[1]:Ylim[2]), las = 1, cex= Cex2)
mtext(text = "N", side = 1, line = 3, cex =Cex1)
boxplot(add= TRUE, col = adjustcolor("white", alpha=0.2), border="blue", at = c(2.2, 4.2, 6.2), x = t(log(LowHighBF01[2, 1, , e, t, d, ], base = 10)), axes = F, ylim = Ylim, xlab = "", ylab = "", main = paste ("Trials:", Trails[t]), pch = 16)
boxplot(add= TRUE, col=adjustcolor("blue", .2), at = c(1.2, 3.2, 5.2), x = t(log(LowHighBF01[2, 2, , e, t, d, ], base = 10)), axes = F, ylim = Ylim, xlab = "", ylab = "", main = paste ("Trials:", Trails[t]), pch = 16)
}else{
boxplot(col = adjustcolor("white", alpha=0.2), border= "red", at = c(1, 2, 3), x = t(log(LowHighBF01[1, 1, , e, t, d, ], base = 10)), axes = F, ylim = Ylim, xlim= c(0,4), xlab = "", ylab = "", main = paste ("Trials:", Trails[t]), pch = 16)
abline(h = 0, lty = 2, lwd = 2)
axis(1, at = c(0, 1.1:3.1, 4), labels = c( "", N, ""))
axis(2, at = Ylim[1]:Ylim[2], labels = 10^(Ylim[1]:Ylim[2]), las = 1)
mtext(text = "N", side = 1, line = 3, cex =Cex1)
boxplot(add= TRUE, col=adjustcolor("white", .2), border= "blue", at = c(1.2, 2.2, 3.2), x = t(log(LowHighBF01[2, 1, , e, t, d, ], base = 10)), axes = F, ylim = Ylim, xlim= c(0,4), xlab = "", ylab = "", main = paste ("Trials:", Trails[t]), pch = 16)
}
if(t%%2 == 0){
mtext(text = paste("ES =", ES[e]), side = 3, line = 6, cex = Cex3, font = 2)
}
if(t*e == 1){
mtext(text = paste("Non-Inferiority:", D[d]), side = 3, line = 5.4, cex = 2.2, font = 2)
}
}
}
dev.off ()
}
# Histogram -------------------
# Add the box.cex parameter to the legend function by altering legend source code
# source code from: http://code.metager.de/source/xref/gnu/R/src/library/graphics/R/legend.R
Legend <- function( #--------------------
x, y = NULL, legend, fill = NULL, col = par("col"), border="black",
lty, lwd, pch, angle = 45, density = NULL, bty = "o", bg = par("bg"),
box.lwd = par("lwd"), box.lty = par("lty"), box.col = par("fg"),
pt.bg = NA, cex = 1, pt.cex = cex, pt.lwd = lwd,
xjust = 0, yjust = 1, x.intersp = 1, y.intersp = 1, adj = c(0, 0.5),
text.width = NULL, text.col = par("col"), text.font = NULL,
merge = do.lines && has.pch, trace = FALSE,
plot = TRUE, ncol = 1, horiz = FALSE, title = NULL,
inset = 0, xpd, title.col = text.col, title.adj = 0.5,
seg.len = 2, box.cex =c(0.8, 0.5))
{
## the 2nd arg may really be `legend'
if(missing(legend) && !missing(y) &&
(is.character(y) || is.expression(y))) {
legend <- y
y <- NULL
}
mfill <- !missing(fill) || !missing(density)
if(!missing(xpd)) {
op <- par("xpd")
on.exit(par(xpd=op))
par(xpd=xpd)
}
title <- as.graphicsAnnot(title)
if(length(title) > 1) stop("invalid 'title'")
legend <- as.graphicsAnnot(legend)
n.leg <- if(is.call(legend)) 1 else length(legend)
if(n.leg == 0) stop("'legend' is of length 0")
auto <-
if (is.character(x))
match.arg(x, c("bottomright", "bottom", "bottomleft", "left",
"topleft", "top", "topright", "right", "center"))
else NA
if (is.na(auto)) {
xy <- xy.coords(x, y); x <- xy$x; y <- xy$y
nx <- length(x)
if (nx < 1 || nx > 2) stop("invalid coordinate lengths")
} else nx <- 0
xlog <- par("xlog")
ylog <- par("ylog")
rect2 <- function(left, top, dx, dy, density = NULL, angle, ...) {
r <- left + dx; if(xlog) { left <- 10^left; r <- 10^r }
b <- top - dy; if(ylog) { top <- 10^top; b <- 10^b }
rect(left, top, r, b, angle = angle, density = density, ...)
}
segments2 <- function(x1, y1, dx, dy, ...) {
x2 <- x1 + dx; if(xlog) { x1 <- 10^x1; x2 <- 10^x2 }
y2 <- y1 + dy; if(ylog) { y1 <- 10^y1; y2 <- 10^y2 }
segments(x1, y1, x2, y2, ...)
}
points2 <- function(x, y, ...) {
if(xlog) x <- 10^x
if(ylog) y <- 10^y
points(x, y, ...)
}
text2 <- function(x, y, ...) {
##--- need to adjust adj == c(xadj, yadj) ?? --
if(xlog) x <- 10^x
if(ylog) y <- 10^y
text(x, y, ...)
}
if(trace)
catn <- function(...)
do.call("cat", c(lapply(list(...),formatC), list("\n")))
cin <- par("cin")
Cex <- cex * par("cex") # = the `effective' cex for text
## at this point we want positive width even for reversed x axis.
if(is.null(text.width))
text.width <- max(abs(strwidth(legend, units="user",
cex=cex, font = text.font)))
else if(!is.numeric(text.width) || text.width < 0)
stop("'text.width' must be numeric, >= 0")
xc <- Cex * xinch(cin[1L], warn.log=FALSE) # [uses par("usr") and "pin"]
yc <- Cex * yinch(cin[2L], warn.log=FALSE)
if(xc < 0) text.width <- -text.width
xchar <- xc
xextra <- 0
yextra <- yc * (y.intersp - 1)
## watch out for reversed axis here: heights can be negative
ymax <- yc * max(1, strheight(legend, units="user", cex=cex)/yc)
ychar <- yextra + ymax
if(trace) catn(" xchar=", xchar, "; (yextra,ychar)=", c(yextra,ychar))
if(mfill) {
##= sizes of filled boxes.
xbox <- xc * box.cex[1]
ybox <- yc * box.cex[2]
dx.fill <- xbox ## + x.intersp*xchar
}
do.lines <- (!missing(lty) && (is.character(lty) || any(lty > 0))
) || !missing(lwd)
## legends per column:
n.legpercol <-
if(horiz) {
if(ncol != 1)
warning(gettextf("horizontal specification overrides: Number of columns := %d",
n.leg), domain = NA)
ncol <- n.leg
1
} else ceiling(n.leg / ncol)
has.pch <- !missing(pch) && length(pch) > 0 # -> default 'merge' is available
if(do.lines) {
x.off <- if(merge) -0.7 else 0
} else if(merge)
warning("'merge = TRUE' has no effect when no line segments are drawn")
if(has.pch) {
if(is.character(pch) && !is.na(pch[1L]) &&
nchar(pch[1L], type = "c") > 1) {
if(length(pch) > 1)
warning("not using pch[2..] since pch[1L] has multiple chars")
np <- nchar(pch[1L], type = "c")
pch <- substr(rep.int(pch[1L], np), 1L:np, 1L:np)
}
## this coercion was documented but not done in R < 3.0.0
if(!is.character(pch)) pch <- as.integer(pch)
}
if (is.na(auto)) {
##- Adjust (x,y) :
if (xlog) x <- log10(x)
if (ylog) y <- log10(y)
}
if(nx == 2) {
## (x,y) are specifiying OPPOSITE corners of the box
x <- sort(x)
y <- sort(y)
left <- x[1L]
top <- y[2L]
w <- diff(x)# width
h <- diff(y)# height
w0 <- w/ncol # column width
x <- mean(x)
y <- mean(y)
if(missing(xjust)) xjust <- 0.5
if(missing(yjust)) yjust <- 0.5
}
else {## nx == 1 or auto
## -- (w,h) := (width,height) of the box to draw -- computed in steps
h <- (n.legpercol + !is.null(title)) * ychar + yc
w0 <- text.width + (x.intersp + 1) * xchar
if(mfill) w0 <- w0 + dx.fill
if(do.lines) w0 <- w0 + (seg.len + x.off)*xchar
w <- ncol*w0 + .5* xchar
if (!is.null(title)
&& (abs(tw <- strwidth(title, units="user", cex=cex) + 0.5*xchar)) > abs(w)) {
xextra <- (tw - w)/2
w <- tw
}
##-- (w,h) are now the final box width/height.
if (is.na(auto)) {
left <- x - xjust * w
top <- y + (1 - yjust) * h
} else {
usr <- par("usr")
inset <- rep_len(inset, 2)
insetx <- inset[1L]*(usr[2L] - usr[1L])
left <- switch(auto, "bottomright" =,
"topright" =, "right" = usr[2L] - w - insetx,
"bottomleft" =, "left" =, "topleft" = usr[1L] + insetx,
"bottom" =, "top" =, "center" = (usr[1L] + usr[2L] - w)/2)
insety <- inset[2L]*(usr[4L] - usr[3L])
top <- switch(auto, "bottomright" =,
"bottom" =, "bottomleft" = usr[3L] + h + insety,
"topleft" =, "top" =, "topright" = usr[4L] - insety,
"left" =, "right" =, "center" = (usr[3L] + usr[4L] + h)/2)
}
}
if (plot && bty != "n") { ## The legend box :
if(trace)
catn(" rect2(", left, ",", top,", w=", w, ", h=", h, ", ...)",
sep = "")
rect2(left, top, dx = w, dy = h, col = bg, density = NULL,
lwd = box.lwd, lty = box.lty, border = box.col)
}
## (xt[],yt[]) := `current' vectors of (x/y) legend text
xt <- left + xchar + xextra +
(w0 * rep.int(0:(ncol-1), rep.int(n.legpercol,ncol)))[1L:n.leg]
yt <- top - 0.5 * yextra - ymax -
(rep.int(1L:n.legpercol,ncol)[1L:n.leg] - 1 + !is.null(title)) * ychar
if (mfill) { #- draw filled boxes -------------
if(plot) {
if(!is.null(fill)) fill <- rep_len(fill, n.leg)
rect2(left = xt, top=yt+ybox/2, dx = xbox, dy = ybox,
col = fill,
density = density, angle = angle, border = border)
}
xt <- xt + dx.fill
}
if(plot && (has.pch || do.lines))
col <- rep_len(col, n.leg)
## NULL is not documented but people use it.
if(missing(lwd) || is.null(lwd))
lwd <- par("lwd") # = default for pt.lwd
if (do.lines) { #- draw lines ---------------------
## NULL is not documented
if(missing(lty) || is.null(lty)) lty <- 1
lty <- rep_len(lty, n.leg)
lwd <- rep_len(lwd, n.leg)
ok.l <- !is.na(lty) & (is.character(lty) | lty > 0) & !is.na(lwd)
if(trace)
catn(" segments2(",xt[ok.l] + x.off*xchar, ",", yt[ok.l],
", dx=", seg.len*xchar, ", dy=0, ...)")
if(plot)
segments2(xt[ok.l] + x.off*xchar, yt[ok.l],
dx = seg.len*xchar, dy = 0,
lty = lty[ok.l], lwd = lwd[ok.l], col = col[ok.l])
# if (!merge)
xt <- xt + (seg.len+x.off) * xchar
}
if (has.pch) { #- draw points -------------------
pch <- rep_len(pch, n.leg)
pt.bg <- rep_len(pt.bg, n.leg)
pt.cex <- rep_len(pt.cex, n.leg)
pt.lwd <- rep_len(pt.lwd, n.leg)
ok <- !is.na(pch)
if (!is.character(pch)) {
## R 2.x.y omitted pch < 0
ok <- ok & (pch >= 0 | pch <= -32)
} else {
## like points
ok <- ok & nzchar(pch)
}
x1 <- (if(merge && do.lines) xt-(seg.len/2)*xchar else xt)[ok]
y1 <- yt[ok]
if(trace)
catn(" points2(", x1,",", y1,", pch=", pch[ok],", ...)")
if(plot)
points2(x1, y1, pch = pch[ok], col = col[ok],
cex = pt.cex[ok], bg = pt.bg[ok], lwd = pt.lwd[ok])
##D if (!merge) xt <- xt + dx.pch
}
xt <- xt + x.intersp * xchar
if(plot) {
if (!is.null(title))
text2(left + w*title.adj, top - ymax, labels = title,
adj = c(title.adj, 0), cex = cex, col = title.col)
text2(xt, yt, labels = legend, adj = adj, cex = cex,
col = text.col, font = text.font)
}
invisible(list(rect = list(w = w, h = h, left = left, top = top),
text = list(x = xt, y = yt)))
} #----------------------------
palf <- colorRampPalette(c("yellow", "blue"))
Ylim <- c(-13, 1)
Cex2 = 2
png(filename = paste("/home/anja/Desktop/ReMa/FDA/C_R/plots/","histogram",".png",sep=""),
width = 960, height = 960)
layout(matrix (1:9, 3, 3, byrow = T))
par(mar = c(4.5, 6.5, 8, 0.5), cex.main = 2, cex.axis = 2)
d = 2
for(e in 1:length(ES)){
for(t in 1:length(Trails)){
p <- hist(t(log(LowHighBF01[1, 1, 1, e, t, d, ], base = 10)), breaks = (Ylim[2]+.4):(Ylim[1]-.4), plot = F)
p2 <- hist(t(log(LowHighBF01[1, 1, 2, e, t, d, ], base = 10)), breaks = (Ylim[2]+.2):(Ylim[1]-.2), plot = F)
p3 <- hist(t(log(LowHighBF01[1, 1, 3, e, t, d, ], base = 10)), breaks = Ylim[2]:Ylim[1], plot = F)
plot(p, col = adjustcolor(palf(100)[15], alpha=0.3), xlab = "", xaxt='n', ann=FALSE, cex.axis = Cex2, main = paste ("Trials:", Trails[t]), pch = 16, xlim =Ylim, ylim = c(0, 250))
plot(p2, col= adjustcolor(palf(100)[20], .15), axes = FALSE, xlab = "", ylab = "", main = "", xlim = Ylim, ylim = c(0, 250), add = T)
plot(p3, col= adjustcolor(palf(100)[100], .08), axes = FALSE, xlab = "", ylab = "", main = "", xlim = Ylim, ylim = c(0, 250), add = T)
axis(1, at = Ylim[1]:Ylim[2], labels = 10^(Ylim[1]:Ylim[2]), las = 1, cex= Cex2)
abline(v =t(log(.05, base = 10)), lty = 2, lwd = 2)
Legend(x= -13, y= 270, legend = c(paste("N =", N[1]), paste("N =", N[2]), paste("N =", N[3]))
, fill= c(adjustcolor(palf(100)[15], 0.3),
adjustcolor(palf(100)[25], .15),
adjustcolor(palf(100)[100], .08)
),
cex =Cex2, bty = "n",
x.intersp = .7, y.intersp = 1.4, box.cex=c(1.3 , 1.3)
)
mtext(text = expression(paste("Lowest Interval ", BF[0][1])), side = 1, line = 3, cex =Cex1)
if(t%%2 == 0){
mtext(text = paste("ES =", ES[e]), side = 3, line = 6, cex = Cex3, font = 2)
}
}
}
dev.off ()
#Equivalence boxplots----
N = c(20, 40, 80) # sample size, index with n
ES = c(0, .2, .3) # effect size, index with e (is mean, sd is always 1)
Trails = 2# number of Trails, not varied
D <- matrix(c(-.2, -.3, .2, .3), 2) # inferiority-margin delta, not varied
Runs = 500 # index with r
options(scipen = 1)
Ylim <- c(-1, 3)
Cex1 <- 1.3
Cex2 <- .8 #axis ticks
Cex3 <- 1.3 #ES heading
png(filename = paste("/home/anja/Desktop/ReMa/FDA/C_R/plots/","boxplotEquivalence",".png",sep=""),
width = 960, height = 960)
layout(matrix (1:6, 2, 3, byrow = T))
par(mar = c(4.5, 6.5, 8, 0.5), cex.main = 2, cex.axis = 2)
for(d in 1:length(D[,1])){
for(e in 1:length(ES)){
boxplot(col = adjustcolor("white", alpha=0.2), border= "red", at = c(1, 2, 3), x = t(log(EquivBF01[1, d, , e, ], base = 10)),
axes = F, ylim = Ylim, xlim= c(0,4), xlab = "", ylab = "", main = paste ("Effect Size:", ES[e]), pch = 16)
abline(h = 0, lty = 2, lwd = 2)
axis(1, at = c(0, 1.1:3.1, 4), labels = c( "", N, ""))
axis(2, at = Ylim[1]:Ylim[2], labels = 10^(Ylim[1]:Ylim[2]), las = 1)
mtext(text = "N", side = 1, line = 3, cex =Cex1)
boxplot(add= TRUE, col=adjustcolor("white", .2), border= "blue",at = c(1.2, 2.2, 3.2), x = t(log(EquivBF01[2, d, , e, ], base = 10)),
axes = F, ylim = Ylim, xlim= c(0,4), xlab = "", ylab = "", main = paste ("Effect Size:", ES[e]), pch = 16)
if(e == 1){
mtext(text = paste("Equivalence Region: (", D[d, 1], " - ", D[d, 2], ")"), side = 3, line = 5.4, cex = 1.5, font = 2)
}
}
}
dev.off ()
|
f1c7555200e52fcec83ed1f44458ef920713dc46
|
31f3d6031b5ac2310317b72a20ef8f2c29d55049
|
/r/src/error_functions/cal.met.wind.asos.r
|
44b49283637e339dba8120b0d08d54cc9be58711
|
[] |
no_license
|
uataq/X-STILT
|
638c3c76e6e396c0939c85656a53eb20a1eaba74
|
eaa7cfabfc13569a9e598c90593f6418bc9113d5
|
refs/heads/master
| 2023-07-22T03:14:50.992219
| 2023-07-14T16:40:55
| 2023-07-14T16:40:55
| 128,477,511
| 12
| 5
| null | 2023-06-19T22:57:03
| 2018-04-06T22:46:36
|
R
|
UTF-8
|
R
| false
| false
| 5,538
|
r
|
cal.met.wind.asos.r
|
# script to interpolate the winds from GDAS, using trajwind() for each receptor based on asosiosonde data
# read output model-data comparisons and make plots
# written by DW, 05/15/2017
# Add temporal interpolation of GDAS winds, DW, 05/26/2017
# optimize and convert original script to subroutine, DW, 08/29/2018
#
# 'asos.wind' is a date frame that is generated from grab.asos()
# switch to Ben's calc_trajectory, via get.ground.hgt(), DW
#
# fix wd err, if wd.err is closed to -360 or 360, cycle it, DW, 08/31/2018
# add surface wind from ASOS, DW, 09/19/2018
cal.met.wind.asos <- function(filename, met, met.path, met.format, workdir,
site, timestr, overwrite = F, asos.file, nhrs = -120){
# loop over each time period
if (overwrite == T | !file.exists(filename)){
# read ASOS data
asos <- read.table(asos.file, header = T, sep = ',', stringsAsFactors = F)
asos <- asos %>%
mutate(date = as.POSIXct(valid, format = '%Y-%m-%d %H:%M', tz = 'UTC'),
timestr = as.numeric(format(date, '%Y%m%d%H')),
temp.asos = as.numeric(tmpc),
wd.asos = as.numeric(drct), # degree from true north
ws.asos = as.numeric(sknt) * 0.514, # convert knots to m/s
u.asos = sin((wd.asos - 180) * pi/180) * ws.asos,
v.asos = cos((wd.asos - 180) * pi/180) * ws.asos) %>%
dplyr::select(station, lon, lat, date, timestr, temp.asos,
wd.asos, ws.asos, u.asos, v.asos) %>% na.omit()
# only interpolate model wind for OCO-2 overpasses
recp.date <- as.POSIXct(timestr, format = '%Y%m%d%H', tz = 'UTC')
end.date <- recp.date + nhrs * 60 * 60
asos <- asos %>% filter(date >= min(end.date, recp.date), date <= max(end.date, recp.date))
asos.hgt <- 612 # 612 m for two ASOS surface stations
asos <- asos %>% mutate(met.grdhgt = ifelse(station == 'OERK', 605.1306, 537.8216),
agl = asos.hgt - met.grdhgt)
# compute unique receptors (time, lat, lon)
recpstr <- paste(asos$timestr, asos$lat, asos$lon, asos$agl)
sort.recpstr <- sort(unique(recpstr))
uni.recp <- matrix(unlist(strsplit(sort.recpstr,' ')), ncol = 4, byrow = T)
colnames(uni.recp) <- list('time', 'lat', 'lon', 'agl')
# compute 'receptor'
receptor <- data.frame(
lati = as.numeric(uni.recp[, 'lat']),
long = as.numeric(uni.recp[, 'lon']),
zagl = as.numeric(uni.recp[, 'agl'])) %>% mutate(
run_time = as.POSIXct(uni.recp[, 'time'], '%Y%m%d%H', tz = 'UTC'))
rundir <- file.path(workdir, paste0('out_wind_', timestr, '_', met, '_asos'))
#var1 <- c('time', 'index', 'lon', 'lat', 'agl', 'grdht', 'zi', 'temp', 'pres')
var2 <- c('time', 'indx', 'long', 'lati', 'zagl', 'zsfc', 'mlht', 'temp', 'pres')
# Ensure necessary files and directory structure are established in the
# current rundir
dir.create(rundir, showWarnings = FALSE)
# change the path of hymodelc executable, DW, 07/31/2018
# since we added another version of hymodelc (AER_NOAA_branch) under exe
link_files <- dir(file.path(workdir, 'exe', 'master'))
if (!file.exists(file.path(rundir, link_files))[1])
file.symlink(file.path(workdir, 'exe', 'master', link_files), rundir)
# loop over each unique location + time
cat(paste(nrow(receptor), 'unique asos station + asos time\n'))
err.info <- NULL
for (i in 1 : nrow(receptor)){
cat(paste('working on', i, 'unique asos\n'))
sel.asos <- asos[i, ]
# if no postive AGL
int.info <- get.ground.hgt(varsiwant = var2, met_loc = met.path,
met_file_format = met.format, n_hours = 1,
receptor = receptor[i,], rundir = rundir,
timeout = 20 * 60, r_zagl = receptor$zagl[i],
run_trajec = T)
if (is.null(int.info)) next
# merge obs and sim
merge.info <- cbind(sel.asos, int.info) %>%
rename(u.met = ubar, v.met = vbar, w.met = wbar, temp.met = temp)
# calculate ws and wd (FROM which dir, degree from true North)
merge.info <- merge.info %>% mutate(
ws.met = sqrt(u.met^2 + v.met^2),
wd.met = atan2(u.met/ws.met, v.met/ws.met) * 180 / pi + 180)
# when ws == 0, wd is NA, thus, replace NA with 0
merge.info[is.na(merge.info$wd.met), 'wd.met'] <- 0
# calculate wind errors
tmp.err.info <- merge.info %>% mutate(temp.err = temp.met - temp.asos,
u.err = u.met - u.asos,
v.err = v.met - v.asos,
ws.err = ws.met - ws.asos,
wd.err = wd.met - wd.asos)
# if wd.err is closed to -360 or 360, cycle it, DW, 08/31/2018
tmp.err.info[abs(tmp.err.info$wd.err) > 180, 'wd.err'] <-
abs(tmp.err.info[abs(tmp.err.info$wd.err) > 180, 'wd.err']) - 360
print(tmp.err.info$wd.err)
colTF <- F; if (i == 1) colTF <- T
appTF <- T; if (i == 1) appTF <- F
write.table(tmp.err.info, file = filename, append = appTF, sep = ',',
quote = F, row.names = F, col.names = colTF)
err.info <- rbind(err.info, tmp.err.info)
} # end for i
} else {
err.info <- read.table(file = filename, sep = ',', header = T)
} # end if reading txtfile
return(err.info)
}
# end of function
|
04604de1f3ce23fe1fec8ee1dbf25beedcbc543d
|
a845a15920e9e4b094434d8c7eeacff3e99a0aa1
|
/LR - updated.R
|
ce4e368e8784794300100744609ae9f036d476fe
|
[] |
no_license
|
gyz0807/R-works
|
f3dcd3ffb96d7ea4631508a96ac14f318d8b3740
|
3c66dee5aab4c94c6786c2e0f00477c9513d28b2
|
refs/heads/master
| 2021-01-10T11:00:21.558924
| 2016-03-15T05:51:19
| 2016-03-15T05:51:19
| 53,918,499
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,576
|
r
|
LR - updated.R
|
################################## Data Preparation ##################################
## Walking Distance
W.Data <- read.xlsx("Data1.xlsx", 1)
W.GC <- read.xlsx("Data2.xlsx", 1)
W.Zipcodes <- read.xlsx("Data3.xlsx", 1)
W.Counts <- read.xlsx("Data4.xlsx", 1)
AllData <- read.xlsx("Data5.xlsx", 1)
## Commuters
C.Data <- read.xlsx("Data6.xlsx", 1)
C.GC <- read.xlsx("Data7.xlsx", 1)
C.Zipcodes <- read.xlsx("Data8.xlsx", 1)
C.Counts <- read.xlsx("Data9.xlsx", 1)
## Travelers
W.C.Zipcodes <- unique(rbind(W.Zipcodes, C.Zipcodes))
T.Data <- AllData[!AllData$zip5_cd %in% W.C.Zipcodes$KEY, ]
## Calculating T.Counts
T.transcount <- with(T.Data, tapply(trans_amt, segment_id, length))
segments_id <- as.data.frame(names(T.transcount))
names(T.transcount) <- 1:8
T.Data$unique_id <- as.character(T.Data$unique_id)
T.customercount <- with(T.Data[!duplicated(T.Data$unique_id),], tapply(trans_amt, segment_id, length))
names(T.customercount) <- 1:8
T.Counts <- cbind(segments_id, as.data.frame(T.transcount), as.data.frame(T.customercount))
names(T.Counts) <- c("segment_id", "Sum_Count", "Sum_CountDistinct_unique_id")
## DataAll % applied
DataAll <- rbind(W.Data, C.Data, T.Data)
GCData <- rbind(W.GC, C.GC)
CountAll <- rbind(W.Counts, C.Counts, T.Counts)
################################### Functions ###########################################
Sales <- function(salesData){
sales <- as.data.frame.list(matrix(0, 9, 1))
names(sales) <- c("S1", "S2", "S3", "S4", "S5", "S6","S7","S8","Total")
x <- with(salesData, tapply(trans_amt, segment_id, sum))
for (i in 1:8){
sales[i] <- x[names(x) == names(sales[i])]
}
sales[is.na(sales)] <- 0
sales$Total <- sum(sales[1, 1:8])
sales
}
Transactions <- function(transData){
transactions <- as.data.frame.list(matrix(0, 9, 1))
names(transactions) <- c("S1", "S2", "S3", "S4", "S5", "S6","S7","S8","Total")
x <- with(transData, tapply(Sum_Count, segment_id, sum))
for (i in 1:8){
transactions[i] <- x[names(x) == names(transactions[i])]
}
transactions[is.na(transactions)] <- 0
transactions$Total <- sum(transactions[1, 1:8])
transactions
}
Customers <- function(cusData){
customers <- as.data.frame.list(matrix(0, 9, 1))
names(customers) <- c("S1", "S2", "S3", "S4", "S5", "S6","S7","S8","Total")
x <- with(cusData, tapply(Sum_CountDistinct_unique_id, segment_id, sum))
for (i in 1:8){
customers[i] <- x[names(x) == names(customers[i])]
}
customers[is.na(customers)] <- 0
customers$Total <- sum(customers[1, 1:8])
customers
}
PeakDays <- function(dataAll){
## Changing data classes
dataAll$transdate <- as.Date(dataAll$transdate, "%m/%d/%Y")
dataAll$segment_id <- as.character(dataAll$segment_id)
## Creating sum of revenue matrix
SumOfRevenues <- data.frame(matrix(0, 9, 7), row.names = c("S1", "S2","S3","S4","S5","S6","S7","S8","Total"))
names(SumOfRevenues) <- c("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")
dataAll$weekdays <- weekdays(dataAll$transdate)
i <- 1; j <- 1
for (i in 1:8){
for (j in 1:7){
SumOfRevenues[i, j] <- sum(with(dataAll, dataAll[segment_id == rownames(SumOfRevenues)[i] & weekdays == names(SumOfRevenues)[j], "trans_amt"]))
}
}
SumOfRevenues[9, ] <- sapply(SumOfRevenues, sum)
## Counting weekdays
x <- as.data.frame.list(table(weekdays(unique(dataAll$transdate))))
weekdayCount <- as.data.frame(matrix(0, 1, 7))
names(weekdayCount) <- c("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")
m <- 1
for (m in 1:7){
if (names(weekdayCount)[m] %in% names(x)){
weekdayCount[1, m]<- x[1, names(weekdayCount)[m]]
}
}
weekdayCountMatrix <- rbind(weekdayCount, weekdayCount, weekdayCount, weekdayCount, weekdayCount, weekdayCount, weekdayCount, weekdayCount, weekdayCount)
## Creating average matrix
Avgs <- SumOfRevenues/weekdayCountMatrix
Avgs[is.na(Avgs)] <- 0
## Finding peak days
locateMax <- max.col(Avgs)
locateMax <- as.character(locateMax)
a <- 1
for (a in 1:length(locateMax)){
if(sum(Avgs[a, ] == 0) == 7){
locateMax[a] <- "None"
}
}
n <- 1
for (n in 1:length(locateMax)){
if (locateMax[n]==1){locateMax[n] <- "Monday"}
if (locateMax[n]==2){locateMax[n] <- "Tuesday"}
if (locateMax[n]==3){locateMax[n] <- "Wednesday"}
if (locateMax[n]==4){locateMax[n] <- "Thursday"}
if (locateMax[n]==5){locateMax[n] <- "Friday"}
if (locateMax[n]==6){locateMax[n] <- "Saturday"}
if (locateMax[n]==7){locateMax[n] <- "Sunday"}
}
locateMax <- as.data.frame.list(locateMax)
names(locateMax) <- c("S1", "S2","S3","S4","S5","S6","S7","S8","Total")
locateMax
}
ValleyDays <- function(dataAll){
## Changing data classes
dataAll$transdate <- as.Date(dataAll$transdate, "%m/%d/%Y")
dataAll$segment_id <- as.character(dataAll$segment_id)
## Creating sum of revenue matrix
SumOfRevenues <- data.frame(matrix(0, 9, 7), row.names = c("S1", "S2","S3","S4","S5","S6","S7","S8","Total"))
names(SumOfRevenues) <- c("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")
dataAll$weekdays <- weekdays(dataAll$transdate)
i <- 1; j <- 1
for (i in 1:8){
for (j in 1:7){
SumOfRevenues[i, j] <- sum(with(dataAll, dataAll[segment_id == rownames(SumOfRevenues)[i] & weekdays == names(SumOfRevenues)[j], "trans_amt"]))
}
}
SumOfRevenues[9, ] <- sapply(SumOfRevenues, sum)
## Counting weekdays
x <- as.data.frame.list(table(weekdays(unique(dataAll$transdate))))
weekdayCount <- as.data.frame(matrix(0, 1, 7))
names(weekdayCount) <- c("Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday")
m <- 1
for (m in 1:7){
if (names(weekdayCount)[m] %in% names(x)){
weekdayCount[1, m]<- x[1, names(weekdayCount)[m]]
}
}
weekdayCountMatrix <- rbind(weekdayCount, weekdayCount, weekdayCount, weekdayCount, weekdayCount, weekdayCount, weekdayCount, weekdayCount, weekdayCount)
## Creating average matrix
Avgs <- SumOfRevenues/weekdayCountMatrix
Avgs[is.na(Avgs)] <- 0
## Finding peak days
locateMin <- max.col(-Avgs)
locateMin <- as.character(locateMin)
a <- 1
for (a in 1:length(locateMin)) {
if (sum(Avgs[a, ] == 0) > 1){
locateMin[a] <- "None"
}
}
n <- 1
for (n in 1:length(locateMin)){
if (locateMin[n]==1){locateMin[n] <- "Monday"}
if (locateMin[n]==2){locateMin[n] <- "Tuesday"}
if (locateMin[n]==3){locateMin[n] <- "Wednesday"}
if (locateMin[n]==4){locateMin[n] <- "Thursday"}
if (locateMin[n]==5){locateMin[n] <- "Friday"}
if (locateMin[n]==6){locateMin[n] <- "Saturday"}
if (locateMin[n]==7){locateMin[n] <- "Sunday"}
}
locateMin <- as.data.frame.list(locateMin)
names(locateMin) <- c("S1", "S2","S3","S4","S5","S6","S7","S8","Total")
locateMin
}
GC <- function(GCData){
GC <- as.data.frame.list(matrix(0, 9, 1))
names(GC) <- c("S1", "S2", "S3", "S4", "S5", "S6","S7","S8","Total")
x <- sapply(GCData[, 3:10], sum)
for (i in 1:8){
GC[i] <- x[names(x) == names(GC[i])]
}
GC[is.na(GC)] <- 0
GC$Total <- sum(GC[1, 1:8])
GC
}
################################### Outputs #######################################
## All Location Data
x1 <- Sales(DataAll) / Sales(DataAll)$Total; x1[is.na(x1)] <- 0
x2 <- Transactions(CountAll) / Transactions(CountAll)$Total; x2[is.na(x2)] <- 0
x3 <- Customers(CountAll) / Customers(CountAll)$Total; x3[is.na(x3)] <- 0
x4 <- Transactions(CountAll) / Customers(CountAll); x4[is.na(x4)] <- 0
x5 <- Sales(DataAll) / Transactions(CountAll); x5[is.na(x5)] <- 0
x6 <- x4 * x5
x7 <- PeakDays(DataAll)
x8 <- ValleyDays(DataAll)
## Market of Location
x9 <- GC(GCData); x9[is.na(x9)] <- 0
x10 <- GC(GCData) / GC(GCData)$Total; x10[is.na(x10)] <- 0
x11 <- (Transactions(W.Counts) + Transactions(C.Counts)) / Transactions(CountAll); x11[is.na(x11)] <- 0
x12 <- Transactions(T.Counts) / Transactions(CountAll); x12[is.na(x12)] <- 0
## Walking Distance
x13 <- Sales(W.Data) / Sales(W.Data)$Total; x13[is.na(x13)] <- 0
x14 <- Transactions(W.Counts) / Transactions(W.Counts)$Total; x14[is.na(x14)] <- 0
x15 <- Customers(W.Counts) / Customers(W.Counts)$Total; x15[is.na(x15)] <- 0
x10.1 <- GC(W.GC) / GC(W.GC)$Total; x10.1[is.na(x10.1)] <- 0
x16 <- (x15 / x10.1) * 100; x16$Total <- sum(x16[1, 1:8] * x13[1, 1:8]); x16[is.na(x16)] <- 0
x17 <- Transactions(W.Counts) / Customers(W.Counts); x17[is.na(x17)] <- 0
x18 <- Sales(W.Data) / Transactions(W.Counts); x18[is.na(x18)] <- 0
x19 <- x17 * x18
x20 <- PeakDays(W.Data)
x21 <- ValleyDays(W.Data)
## Commuters
x22 <- Sales(C.Data) / Sales(C.Data)$Total; x22[is.na(x22)] <- 0
x23 <- Transactions(C.Counts) / Transactions(C.Counts)$Total; x23[is.na(x23)] <- 0
x24 <- Customers(C.Counts) / Customers(C.Counts)$Total; x24[is.na(x24)] <- 0
x10.2 <- GC(C.GC) / GC(C.GC)$Total; x10.2[is.na(x10.2)] <- 0
x25 <- (x24 / x10.2) * 100; x25$Total <- sum(x25[1, 1:8] * x22[1, 1:8]); x25[is.na(x25)] <- 0
x26 <- Transactions(C.Counts) / Customers(C.Counts); x26[is.na(x26)] <- 0
x27 <- Sales(C.Data) / Transactions(C.Counts); x27[is.na(x27)] <- 0
x28 <- x26*x27
x29 <- PeakDays(C.Data)
x30 <- ValleyDays(C.Data)
## Travelers
x31 <- Sales(T.Data) / Sales(T.Data)$Total; x31[is.na(x31)] <- 0
x32 <- Transactions(T.Counts) / Transactions(T.Counts)$Total; x32[is.na(x32)] <- 0
x33 <- Customers(T.Counts) / Customers(T.Counts)$Total; x33[is.na(x33)] <- 0
x34 <- Transactions(T.Counts) / Customers(T.Counts); x34[is.na(x34)] <- 0
x35 <- Sales(T.Data) / Transactions(T.Counts); x35[is.na(x35)] <- 0
x36 <- x34 * x35
x37 <- PeakDays(T.Data)
x38 <- ValleyDays(T.Data)
OutputVersion <- rbind(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14,
x15, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26,
x27, x28, x29, x30, x31, x32, x33, x34, x35, x36, x37, x38)
|
06ea2b539a9a8e60f82130c25e66a4dc779888f0
|
500edfa818d623e6be6788073ed51b473b0d4d0b
|
/man/dyad.sim.Rd
|
553be4d3edc105a8ff75b4124b4128ebb651b4cc
|
[] |
no_license
|
prodriguezsosa/dyadRobust
|
639e13046764e6275285895d447367525b9b36b7
|
8bb28eecffca3e663d2209e76a08c4749d4f78ba
|
refs/heads/master
| 2022-04-01T17:49:14.939665
| 2019-12-14T18:19:36
| 2019-12-14T18:19:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 926
|
rd
|
dyad.sim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim-dat.R
\docType{data}
\name{dyad.sim}
\alias{dyad.sim}
\title{Simulated dyadic data from \code{\href{https://arxiv.org/pdf/1312.3398.pdf}{Aronow, Peter M., Cyrus Samii, and Valentina A. Assenova. "Cluster-robust variance estimation for dyadic data." Political Analysis 23.4 (2015): 564-577.}}}
\format{An object of class \code{data.frame} with 4950 rows and 5 columns.}
\source{
\href{https://doi.org/10.7910/DVN/OMJYE5}{Harvard Dataverse}
}
\usage{
data(dyad.sim)
}
\description{
Simulated data with 100 units and full pairwise dyads.
}
\examples{
data(dyad.sim)
head(dyad.sim)
}
\references{
Aronow, Peter M., Cyrus Samii, and Valentina A. Assenova. "Cluster-robust variance estimation for dyadic data." Political Analysis 23.4 (2015): 564-577.
(\href{https://arxiv.org/pdf/1312.3398.pdf}{ARXIV})
}
\keyword{datasets}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.