blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dec6634a3692c9babd14fecbca8c786224954a86 | 65e3d7ce7ffd653b629f9cd82b2b926407d4cc61 | /Secret.R | 2e5fad31b340fa07a1daa46cd7a2b735df9b1525 | [] | no_license | so13839/cse | 97b5d45c3550a8a02dc4cc12a03a1c9de61de5c7 | ec91d251445df96e31b4c1615422c9eafb8d21c2 | refs/heads/master | 2020-03-27T04:47:00.180583 | 2019-06-19T17:28:29 | 2019-06-19T17:28:29 | 145,967,969 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 207 | r | Secret.R |
rsconnect::setAccountInfo(name='use-ab', token='FDBC1A84297F77EE1C1228708AB56031', secret='P6O+0o16WOGG5dFbfJCQ0hlJGan7Ke3RLM8dIFxg')
library(rsconnect)
rsconnect::deployApp('/Desktop_2019/CoverApp/Shiny')
|
2487bafa127807e5bd165e5d96cce6d27e6126a1 | 459199610ff49bd7bfe0cb0fe9d9b12cce9fc031 | /R/leaflet_transform.R | 08c01533fde94667f27da35f72fea57833e35c3c | [] | no_license | sitscholl/Rgadgets | 4e8e5c493adbc045264bb35db5b8fefcd4530231 | b142de2c996f3c0773951e43813c8cf0713bf77d | refs/heads/master | 2023-03-10T11:08:12.905825 | 2021-02-18T10:50:44 | 2021-02-18T10:50:44 | 275,093,346 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 739 | r | leaflet_transform.R | #' Transforming the spatial reference of sp or sf objects to be plotted via leaflet. Transforms the
#' spatial reference to the WGS84 projection (epsg:4326).
#'
#' @param object Sp or sf object
#' @keywords leflet, transform, spatial reference
#' @export
#' @examples
#'
#' points <- leaflet_transform(station_shp)
rg_leaflet_transform <- function(object) {
if (any(inherits(object, 'SpatialPointsDataFrame'),
inherits(object, 'SpatialLinesDataFrame'),
inherits(object, 'SpatialPolygonsDataFrame'))) {
object_out <- sp::spTransform(object, sp::CRS("+init=epsg:4326"))
} else if(inherits(object, 'sf')) {
object_out <- sf::st_transform(object, sp::CRS("+init=epsg:4326"))
}
return(object_out)
}
|
3922cdc8f5a29de63dea02f70335293a09ad1514 | ae761c20e7a3fbdc7024d0d8291b9219bb93e82c | /code/cross-val-icom/nimble-code/icom-NEON.R | 8fe8760e076d81122731bc20d1ebcf3f7c7d7bd6 | [] | no_license | zipkinlab/Doser_etal_2022_MEE | 79bbeb80ed5d8e07920097d258297cec56636569 | 50ad96121c94f16ddcf4d3b6aa8272f9cddf95d0 | refs/heads/main | 2023-04-12T10:49:28.223695 | 2022-03-01T13:29:54 | 2022-03-01T13:29:54 | 403,090,162 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,390 | r | icom-NEON.R | # icom-NEON.R: BUGS code to run community model in NIMBLE using only data
# from NEON for the foliage-gleaning bird case study. No Bayesian
# p-value included, which speeds things up.
# Author: Jeffrey W. Doser (doserjef@msu.edu)
# Citation:
require(nimble)
icom.code <- nimbleCode({
# Priors ------------------------------------------------------------------
# Occurrence ------------------------
beta.1.mean ~ dnorm(0, 0.368) # linear elevation
beta.2.mean ~ dnorm(0, 0.368) # quadratic elevation
beta.3.mean ~ dnorm(0, 0.368) # percent forest
phi.mean ~ dnorm(0, 0.368) # auto-logistic parameter
for (t in 1:n.years) {
int.beta.mean[t] ~ dunif(0, 1)
beta.0.mean[t] <- logit(int.beta.mean[t])
tau.beta.0[t] ~ dgamma(0.1, 0.1)
} # t
# Detection -------------------------
int.gamma.1.mean ~ dunif(0, 1) # overall (species and yer) NEON detection
gamma.1.0.mean <- logit(int.gamma.1.mean)
gamma.1.1.mean ~ dnorm(0, 0.368) # day
gamma.1.2.mean ~ dnorm(0, 0.368) # day^2
gamma.1.3.mean ~ dnorm(0, 0.368) # hour
# Precision Parameters --------------
tau.beta.1 ~ dgamma(0.1, 0.1)
tau.beta.2 ~ dgamma(0.1, 0.1)
tau.beta.3 ~ dgamma(0.1, 0.1)
tau.phi ~ dgamma(0.1, 0.1)
tau.gamma.1.0 ~ dgamma(0.1, 0.1)
tau.gamma.1.1 ~ dgamma(0.1, 0.1)
tau.gamma.1.2 ~ dgamma(0.1, 0.1)
tau.gamma.1.3 ~ dgamma(0.1, 0.1)
# Species-specific coefficients -----------------------------------------
for (i in 1:I) {
beta.1[i] ~ dnorm(beta.1.mean, tau.beta.1)
beta.2[i] ~ dnorm(beta.2.mean, tau.beta.2)
beta.3[i] ~ dnorm(beta.3.mean, tau.beta.3)
phi[i] ~ dnorm(phi.mean, tau.phi)
for (t in 1:n.years) {
gamma.1.0[i, t] ~ dnorm(gamma.1.0.mean, tau.gamma.1.0)
beta.0[i, t] ~ dnorm(beta.0.mean[t], tau.beta.0[t])
logit(int.beta[i, t]) <- beta.0[i, t]
} # t
gamma.1.1[i] ~ dnorm(gamma.1.1.mean, tau.gamma.1.1)
gamma.1.2[i] ~ dnorm(gamma.1.2.mean, tau.gamma.1.2)
gamma.1.3[i] ~ dnorm(gamma.1.3.mean, tau.gamma.1.3)
} # i
# Process Models and Likelihoods ----------------------------------------
for (j in 1:J) {
for (i in 1:I) {
logit(psi[i, j, 1]) <- beta.0[i, 1] +
beta.1[i] * ELEV[j] +
beta.2[i] * pow(ELEV[j], 2) +
beta.3[i] * FOREST[j]
z.neon[i, j, 1] ~ dbern(psi[i, j, 1])
for (t in 2:n.years) {
# Process Model ---------------------------------------------------
logit(psi[i, j, t]) <- beta.0[i, t] +
beta.1[i] * ELEV[j] +
beta.2[i] * pow(ELEV[j], 2) +
beta.3[i] * FOREST[j] +
phi[i] * z.neon[i, j, t - 1]
z.neon[i, j, t] ~ dbern(psi[i, j, t])
} # t
} # i
} # j
# NEON Data -----------------------------------------------------------
# Data are stacked in a single vector as opposed to a multi-dimensional
# array to improve computational performance.
for (i in 1:n.vals.neon) {
logit(pi.1[i]) <- gamma.1.0[sp.indx.neon[i], year.indx.neon[i]] +
gamma.1.1[sp.indx.neon[i]] * DAY.neon[i] +
gamma.1.2[sp.indx.neon[i]] * pow(DAY.neon[i], 2) +
gamma.1.3[sp.indx.neon[i]] * HOUR.neon[i]
v.1[i] ~ dbern(pi.1[i] * z.neon[sp.indx.neon[i], site.neon[i], year.indx.neon[i]])
DAY.neon[i] ~ dnorm(0, 1)
HOUR.neon[i] ~ dnorm(0, 1)
} # i
})
|
df78a71a9363fa7779c82a8873a4cd92bdf1dd03 | a59037eafb2df330b546bdaba8c86e50e76773e7 | /script/data-reformat.R | fabf6d44140c911b0e600e4806a7246da220fcdd | [] | no_license | elvintam/capstone | 157f2647945c02aa130368517044f2b152b2a2cd | 55fe5d3ae5bd4323edee8c86b978db14c63cf31c | refs/heads/main | 2023-03-23T06:15:45.740447 | 2021-03-16T12:54:26 | 2021-03-16T12:54:26 | 334,059,752 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,717 | r | data-reformat.R | library(tidyverse)
library(caret)
library(data.table)
library(lubridate)
head(edx)
temp <- edx
temp <- temp %>% separate(title, into = c("title", "year"), sep = "\\s\\((?=[0-9]{4}\\))", remove = TRUE) %>%
mutate(year = as.numeric(str_sub(year, 1, 4))) %>%
mutate(genrescount = str_count(genres, pattern = "\\|") + 1) %>%
mutate(date = as_datetime(timestamp)) %>% select(-timestamp)
head(temp)
summary(temp)
##avg rating for genre
#edx %>% filter(str_detect(genres, "Drama")) %>% summarise(mean(rating))
### partition creation, previous one 1234
set.seed(78789, sample.kind="Rounding")
test_index <- createDataPartition(y = temp$rating, times = 1,
p = 0.2, list = FALSE)
test_set <- temp[test_index,]
train_set <- temp[-test_index,]
## to remove NA
test_set <- test_set %>%
semi_join(train_set, by = "movieId") %>%
semi_join(train_set, by = "userId")
## end to remove NA
rm(temp)
rm(test_index)
### end partition creation
### data explory
summary(train_set)
train_set %>% group_by(movieId) %>%
summarize(n = n(), year = as.character(first(year))) %>%
qplot(year, n, data = ., geom = "boxplot") +
coord_trans(y = "sqrt") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
### rating based on rate chart
train_set %>%
group_by(movieId) %>%
summarize(n = n(), years = 2009 - min(year),
rating = mean(rating)) %>%
mutate(rate = n/years) %>%
ggplot(aes(rate, rating)) +
geom_point() +
geom_smooth(method = "lm")
#end chart
train_set %>%
mutate(date = round_date(date, unit = "week")) %>%
group_by(date) %>%
summarize(rating = mean(rating)) %>%
ggplot(aes(date, rating)) +
geom_point() +
geom_smooth()
|
03d6669a1c5106dcd368706df37085834ca75835 | dd04ff3ae3f86c3d93bebaab90501079e8bb4b09 | /code/01-qPCR-Disease-Signs.R | ca0d4d5e72059f6a3a9770f4c66fffae26b0856b | [] | no_license | eimd-2019/project-EWD-transcriptomics | 361aad29945af6e31ad840d15a7535f42d31604d | d853096e1b7390e3ed4d26ad768328af49039040 | refs/heads/master | 2023-04-08T15:42:35.706595 | 2023-04-05T20:49:27 | 2023-04-05T20:49:27 | 195,850,862 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,683 | r | 01-qPCR-Disease-Signs.R | rm(list =ls())
library(ggplot2)
library (ordinal)
library(glmm)
library(lme4)
library(lmerTest)
library(optimx)
library(plyr)
require(mdthemes)
require(readr)
require(gridExtra)
#upload data
#setwd("output/01-Lz/")
Adult36 <- read.delim("../../data/36hourAdult.txt")
Adult36$Temp<-ifelse(Adult36$Temp=='Cold', '11 °C', '18 °C')
Adult5<-read.delim("../../data/5dayAdult.txt")
Adult5$Temp<-ifelse(Adult5$Temp=='Cold', '11 °C', '18 °C')
#####After 36 hours, impact of treatments on disease presence and pathogen load
m3a<-glmer(diseased~Laby*Temp+(1|ID),data=Adult36, na.action = na.omit, family=binomial, nAGQ=0, glmerControl(optimizer ='optimx', optCtrl=list(method='L-BFGS-B')))
summary(m3a)
m3b<-lmer(log10cells~Laby*Temp+(1|ID),na.action = na.omit, data=Adult36)
summary(m3b)
###Graph after 36hours
#Disease signs
Adult36$predicteddisease <- predict(m3a, re.form=NA, type="response")
Adult36Sum<-ddply(Adult36, .(Temp, Laby), summarize, meanDisease=mean(predicteddisease), seDisease=sd(predicteddisease)/sqrt(length(predicteddisease)))
p<-ggplot(Adult36Sum, aes(x=Laby, y=meanDisease, fill=Temp))+geom_col(position = "dodge")+
geom_errorbar(aes(ymin=meanDisease-seDisease, ymax=meanDisease+seDisease), width=.2,
position=position_dodge(.9)) +scale_fill_manual(values=c("blue", "red"))+theme_bw()+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
p+labs(x=expression(paste(italic("Labyrinthula"), "treatment")), y=expression(paste("Predicted prevalence of disease signs")))
#################################
##After 5 days, impact on disease presence
Adult5<-Adult5[!is.na(Adult5$Diseased),]
m3c<-glmer(Diseased~Laby*Temp+(1|ID),data=Adult5, nAGQ=0, na.action = na.omit,family=binomial)
summary(m3c)
Adult5$predicteddisease <- predict(m3c, re.form=NA, type="response")
Adult5Sum<-ddply(Adult5, .(Temp, Laby), summarize, meanDisease=mean(predicteddisease), seDisease=sd(predicteddisease)/sqrt(length(predicteddisease)))
#graph
p<-ggplot(Adult5Sum, aes(x=Laby, y=meanDisease, fill=Temp))+geom_col(position = "dodge")+
geom_errorbar(aes(ymin=meanDisease-seDisease, ymax=meanDisease+seDisease), width=.2,
position=position_dodge(.9)) +scale_fill_manual(values=c("blue", "red"))+theme_bw()+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
p+labs(x=expression(paste(italic("Labyrinthula"), "treatment")), y=expression(paste("Predicted prevalence of disease signs")))
#Graph disease sign data for days 2 and 5 together
Adult36Sum<-ddply(Adult36, .(Temp, Laby, ID), summarize, meanDisease=sum(diseased, na.rm=TRUE)/length(diseased))
Adult36Sum$Day<-'Day 2'
Adult5Sum<-ddply(Adult5, .(Temp, Laby, ID), summarize, meanDisease=sum(Diseased, na.rm=TRUE)/length(Diseased))
Adult5Sum$Day<-'Day 5'
AdultSum<-rbind(Adult36Sum, Adult5Sum)
AdultSum2<-ddply(AdultSum, .(Temp, Laby, Day), summarize, meanDisease2=mean(meanDisease, na.rm=TRUE), seDisease=sd(meanDisease)/2)
AdultSum2$Day<-ifelse(AdultSum2$Day=='Day 2', '2 days post-exposure', '5 days post-exposure')
p<-ggplot(AdultSum2, aes(x=Laby, y=meanDisease2, fill=Temp))+geom_col(position = "dodge")+
geom_errorbar(aes(ymin=meanDisease2-seDisease, ymax=meanDisease2+seDisease), width=.2, position=position_dodge(.9))+
scale_fill_manual(values=c("blue", "red"))+theme_bw()+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
p+facet_grid(.~Day)+labs(x=expression(paste(italic("Labyrinthula"), " treatment")), y=expression(paste("Prevalence of disease signs")))+scale_y_continuous(limits = c(0,1.1), expand = c(0, 0))+ theme(legend.title = element_blank())+ theme(legend.position = c(0.1, 0.8))
#qPCR results
#qPCR results graph
Adult36$predictedlog10load<- predict(m3b, re.form=NA, type="response")
Adult36Sum<-ddply(Adult36, .(Temp, Laby), summarize, meanpredictedlog10=mean(predictedlog10load), meanlog10=log10(mean(cells)), selog10cells=(sd(log10(cells)))/sqrt(length(cells)))
Adult36Sum$Temp<-ifelse(Adult36Sum$Temp=='11 °C', '11 °C', '18 °C')
p<-ggplot(Adult36Sum, aes(x=Laby, y=meanlog10, fill=Temp))+geom_col(position = "dodge")+
geom_errorbar(aes(ymin=meanlog10-selog10cells, ymax=meanlog10+selog10cells), width=.2,position=position_dodge(.9)) +
scale_fill_manual(values=c("blue", "red"))+theme_bw()+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
p+labs(x=expression(paste(italic("Labyrinthula"), " treatment")), y=expression(paste("log"[10]* " (", italic('Labyrinthula '), 'cells/ mg eelgrass tissue)')))+scale_y_continuous(limits = c(0,5), expand = c(0, 0))+ theme(legend.title = element_blank())+ theme(legend.position = c(0.2, 0.8)) |
99d5571f05bde0712890bde272ef42479232796b | aa9a926bf3878f5092e25b4873b9adba2b5c85a8 | /man/check_continue.Rd | 8ec08cfe2722daac798febc2103d938d3d6b2245 | [
"MIT"
] | permissive | PhHermann/LDJump | cb235bf30f10f03a6e6676d45fc462f71fd8b666 | 5925182f4ba01ce1e1017ed45f12c41c9f6e3bf9 | refs/heads/master | 2021-06-07T02:53:37.435729 | 2019-11-13T07:48:21 | 2019-11-13T07:48:21 | 96,107,564 | 42 | 9 | MIT | 2020-06-10T06:39:27 | 2017-07-03T12:17:42 | R | UTF-8 | R | false | false | 3,478 | rd | check_continue.Rd | \name{check_continue}
\alias{check_continue}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Checks whether there are SNPs in each segment
%% ~~function to do ... ~~
}
\description{This function calculates the number of SNPs per segment. In case that there exist segments with less than 2 SNPs the user is asked for input to continue ("y") or not ("n"). In case that the user wants to continue, the recombination rates for segments without SNPs are estimated via imputation.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
check_continue(seqName, segs, accept, format)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{seqName}{
%% ~~Describe \code{seqName} here~~
A character string containing the full path and the name of the sequence file in \code{fasta} of \code{vcf} format. It is necessary to add the extension ("fileName.fa", "fileName.fasta", "fileName.vcf") in order to run \code{LDJump}. In case that \code{format} equals to \code{DNABin} the seqName equals to the name of the \code{DNABin}-object (without any extension).
}
\item{segs}{
A (non-negative) integer which reflects the number of segments considered. It is calculated in the program based on the user-defined segment length.
%% ~~Describe \code{segs} here~~
}
\item{accept}{
an optional logical value: by default \code{FALSE} and \code{LDJump} checks for segments with less than 2 SNPs and requires user input to proceed. If set to \code{TRUE}, the user accepts that the rates for these segments (<= 1 SNP) are estimated via imputation.
}
\item{format}{
A character string which can be \code{fasta}, \code{vcf}, or \code{DNAStringSet}. If \code{fasta} is used, the package will proceed with the computation of the recombination map. If \code{vcf}, the package will convert the data in \code{vcf} format to \code{fasta} format with the function \code{vcfR_to_fasta} and then proceed as in case \code{fasta}. For the last format the \code{seqName} must equal to the \code{DNABin}-object which contains the sequences.
}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
This function returns \code{TRUE} in case that all segments contain SNPs. It will also return \code{TRUE} if the user agrees to continue although there exist segments without SNPs. It returns \code{FALSE} if the user denies to continue due to segments without SNPs.
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
Paradis E., Claude J. & Strimmer K. 2004. APE: analyses of phylogenetics and evolution
in R language. Bioinformatics 20: 289-290.
}
\author{
Philipp Hermann \email{philipp.hermann@jku.at}, Andreas Futschik
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link[ape]{read.FASTA}}, \code{\link[ape]{seg.sites}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##### Do not run these examples #####
##### check_continue(seqName, segs = segs) #####
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
95669c0d35f5724e6a52e589b5561c3c7c580937 | a2ff13bc6a059a2af876a4384cd0e247b8ac4a4a | /R/.old/magicHME-1.0.R | eb6fe6057846a7fb0c4a08fab3fba0d1a790b57d | [] | no_license | mkearney/foley | 7c876f213e7f717a276684d0e20b1f8898e7f35e | 0637d123cd0d742f8c2ff5a672f5c70e5678be1d | refs/heads/master | 2021-06-22T13:08:36.658697 | 2017-08-14T16:31:21 | 2017-08-14T16:31:21 | 100,272,265 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,795 | r | magicHME-1.0.R | #NOTE: I ended up getting all of this to work (and the scale worked!) but I get the
#feeling that I'm not writing this code very efficiently or that I'm not thinking through
#my syntax by asking myself the right questions.
#Basically, the scenario is that I'm creaing my conspiracy belief scale from 4 variables
#in the Magical thinking/HME dataset from before.
#any comments?
### file system -
### It's good to keep this stuff organized. I have a folder labelled "r" that I house
### R related projects. So, the tilde is a linux shortcut for your computer's
### "home" directory. To see the path to your home directory, run this:
path.expand("~")
### Your current "working directory" is the place in your system of files that R
### thinks you're "working" from. When you open Rstudio, it will assume you are
### working from what it defaults to. This is your computer's home directory
### OR some other location that you can respecify by customizing the setting
### (click around in Rstudio and you can find something about the "default
### working directory").
### Based on your code, it appears that you have crated an Rstats folder. If that's
### your current working directory, then you should be able to read in the data
### by specifying the location of the data relative to the current working
### directory.
getwd() ## this should be "/Users/<jumbles>/RStats" or something like that
#Import dataset - I just edited the raw file to remove whatever weird header business
#was going on there, this seemed to load just fine.
##Magic.csv <- read.csv("~/RStats/Magic HME data.csv", header = TRUE)
Magic.csv <- read.csv("Magic HME data.csv", header = TRUE)
#load packages - plyr for the revalue, but idk if there is a more efficient way.
#tidyverse b/c Hadley said so.
### Hadley has a few outdated packages that should be avoided. One is plyr. If
### you can do something in plyr, then you can do it in "dplyr" (the successor
### and crown jewell of the tidyverse). Note: hadley actually has a "forcats"
### package that might be useful in this case. It's all about categorical vars.
### Other outdated packages that come up a lot include "reshape" and "reshape2".
### The package you want instead of those is "tidyr"
##library(plyr)
library(tidyverse)
library(psych)
### Recoding likert type items is always a bit difficult. It ultimately depends
### on how you decide to treat them. It seems like most COMS people assume
### that it's okay to treat 5 or 7-item likert-type questions as numeric. In
### which case, you'll want to convert them to factor and then to numeric.
### You can shortcut this process below and skip the revalue function by
### specifying the order of the factor levels directly:
conspiracy1 <- factor(Magic.csv$Q83_1,
levels = c("Strongly disagree",
"Somewhat disagree",
"Neither agree nor disagree",
"Somewhat agree",
"Strongly\nagree"))
### And then convert the created factor to numeric:
conspiracy1 <- as.numeric(conspiracy1)
### You can streamline this conversion by creating a user function:
likert2numeric <- function(x) {
## convert object x (supplied by user when using this function)
## into factor with these likert levels
x <- factor(x,
levels = c("Strongly disagree",
"Somewhat disagree",
"Neither agree nor disagree",
"Somewhat agree",
"Strongly\nagree"))
## now convert x to numeric. since it's the last line of the function,
## it's what will be returned.
as.numeric(x)
}
### Once you've read in the function, you can use it.
conspiracy1 <- likert2numeric(Magic.csv$Q83_1)
conspiracy2 <- likert2numeric(Magic.csv$Q83_2)
conspiracy3 <- likert2numeric(Magic.csv$Q83_3)
conspiracy4 <- likert2numeric(Magic.csv$Q83_4)
### You can simplify this even more by taking four vectors of variables you'd
### like to convert and applying a function to each of them.
### There are a few diffent "apply"-type of functions. Really, there all different
### versions of `lapply`, which is code for list apply.
### When dealing with data frames, each column is actually just a special case of
### a list. So we can specify those four columns:
Q83 <- Magic.csv[, c("Q83_1", "Q83_2", "Q83_3", "Q83_4")]
### And then use lapply to apply the likert2numeric function to each element of
### the list, Q83.
c_scale <- lapply(Q83, likert2numeric)
### Bingo bango, now convert that to a data frame and you're done
c_scale <- data.frame(c_scale)
### Another apply function is `apply`, which is designed for matrices/data frames.
### The apply function is different from lapply because it expects you to
### specify either rows (1) or columns (2).
c_scale <- apply(Q83, 2, likert2numeric)
### The apply function actually returns a matrix, which is a more restrictive
### version of a data frame. That can cause some trouble, which is why I think
### it's useful to get a handle of `lapply` first. The lapply function always
### returns lists, which means you'll have to convert it to the desired class
### (for example, into a data frame like above) yourself. But I think that's
### better than being tricked by `apply`. Tho apply is still useful to know!
class(c_scale)
c_scale <- data.frame(c_scale)
### Also, I'm not sure where you got the "c1", "c2" from. perhaps taht was an
### older named version of "conspiracy1" "conspiracy2" etc. Just make sure
### as you rename and change code to check how that may affect other parts
### of your code. It's always a challenge, but it's easier when you're
### mindful of it throughout a session.
#Scaling and aphla analyses to confirm scales - this solution ended up working for me but,
#again, probably a more efficient way of doing this
##c_scale <- data.frame(as.numeric(c1),
## as.numeric(c2),
## as.numeric(c3),
## as.numeric(c4))
### The method you've used to get reliability works, but you're giving yourself
### more work than what is necessary. The psych package is the right choice,
### but your items are already standardized (they are on the same scale),
### so you can just use psych's `alpha` function directly:
psych::alpha(c_scale)
##scaleKey <- c(1, 1, 1, 1)
##results <- psych::scoreItems(keys = scaleKey, items = c_scale,
## totals = FALSE, missing = TRUE, min = 1, max = 5)
##results
### For the record, I didn't memorize what the name of the function was. I
### actually just searched in R for it, using this search:
help.search("cronbach")
### If you want to browse through a package, try something like this to get
### a list of all of a package's functions.
library(help = "psych")
### Oh, okay, I see you found out alpha as well. Yeah, the only difference
### is that alpha assumes the scores are on the same scale. For the record,
### you can rescale things in base R using the `scale` function.
c_scale[, 1:ncol(c_scale)] <- scale(c_scale)
psych::alpha(c_scale)
#got this to work too, but unsure what the difference is between scoreItems and alpha
##alpha(c_scale)
### Scale can be a bit annoying because it returns a column with attributes
### "center" and "scaled". So, if you supply one vector, it returns a matrix:
scale(sample(-10:10, 10))
### You can remedy that by specifying scale return the scaled column:
scale(sample(-10:10, 10))[, 1]
|
3ccdb06569728ff7e6de4e14e813251058bf8dae | 5f79236e635f929078172ff639cf81ed13acc673 | /Random Rule Application/RandomDerivation.R | 7be1f064567cfde70972eb4dff906043faeb20cf | [] | no_license | fernando-demingo/ACORD-Algorithm | d474faa77b7fb10032467132a4cf42569d9100af | 6c48bf1d56b7695b89681bda38b78022b57c0917 | refs/heads/master | 2021-09-09T21:40:08.385440 | 2018-03-19T21:10:40 | 2018-03-19T21:10:40 | 111,556,984 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 902 | r | RandomDerivation.R | source("Utils.R")
procressGrammarRandom <- function(input_file, max_iter) {
g <- ReadBNFFile(input_file)
expression <- (g[[1]][1])
expression_list <- list(paste(unlist(expression), collapse=" "))
rules_applications <- 0
solution <- FALSE
derivation_list <- list()
repeat {
resultado <- iterate_rule(expression, g)
if (resultado[[2]] == FALSE)
rules_applications <- rules_applications + 1
derivation_list <- append(derivation_list, resultado[[4]])
expression <- resultado[[1]]
if ((resultado[[2]] == TRUE) || rules_applications >= max_iter) {
solution <- resultado[[3]]
break
}
expression_list <- append(expression_list, paste(unlist(expression), collapse=" "))
}
list(resultado=resultado, rules_applications=rules_applications, expression_list=expression_list, derivation_list=derivation_list, expression=unlist(expression))
} |
7c5469e90e2cfd16501dbee6afe9b19eae27b7a3 | a01f85f8d00b70cf19556d2d151c469163d6f80e | /final_project/demo_code/fmri_example.R | a943b8a1afeed18b75b746f754989956c70d9425 | [] | no_license | bstavel/stats_for_drew | 2630517316cba42cb190f3e16dad86c3d47d91da | 38291b6b805f346dcf97790953db12911635c64a | refs/heads/master | 2020-08-25T07:56:25.398978 | 2019-12-23T03:34:40 | 2019-12-23T03:34:40 | 216,985,487 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 858 | r | fmri_example.R | # This file loads and looks at some of the data sets for the
# STAT215A Fall 2015 final project.
library(corrplot)
library(ggplot2)
library(reshape2)
source('utils.R')
# Load the data.
load("fMRIdata.RData")
ls()
# Read in a raw image.
img1 <- ReadImage(1)
image(img1, col=gray((1:500) / 501))
# Load in a raw basis function.
wav1 <- ReadRealBasisFunction(150)
image(wav1)
# Take a look at the distribution of responses.
resp.dat <- data.frame(resp_dat)
names(resp.dat) <- paste("voxel", 1:ncol(resp.dat), sep="")
rm(resp_dat)
resp.melt <- melt(resp.dat)
ggplot(resp.melt) +
geom_density(aes(x=value)) +
facet_grid(variable ~ .)
corrplot(cor(resp.dat))
# Look at the first image's feature distribution.
fit.feat <- data.frame(fit_feat)
rm(fit_feat)
qplot(x=as.numeric(fit.feat[1, ]), geom="density")
# Look at the validation set.
dim(val_feat)
|
1132fb7c43401f96ec53e81f8b3cbc03b090e50a | 25b886acf82912f9ccf84696d6e24751cc36483a | /man/Coaxial.Rd | f514eada87de4efaba24e97b9b52dd43c0da0383 | [] | no_license | lelou6666/BSDA | c5169408437227ca476b6988192ec160099b2501 | 6004a35f908cd24367dfce21fb4949a5c8440e27 | refs/heads/master | 2020-12-11T08:14:51.673318 | 2016-04-01T22:00:08 | 2016-04-01T22:00:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 717 | rd | Coaxial.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Coaxial}
\alias{Coaxial}
\title{Signal loss from three types of coxial cable}
\format{A data frame with 45 observations on the following 5 variables.
\describe{
\item{Type.A}{a numeric vector}
\item{Type.B}{a numeric vector}
\item{Type.C}{a numeric vector}
\item{Signal}{a numeric vector}
\item{Cable}{a numeric vector}
}}
\source{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Duxbury
}
\description{
Data for Exercise 10.24 and 10.25
}
\examples{
str(Coaxial)
attach(Coaxial)
boxplot(Signal~Cable)
kruskal.test(Signal~as.factor(Cable))
detach(Coaxial)
}
\keyword{datasets}
|
eff367dcbc74822a10da3d2e4c16a96a1f0f3667 | 89dda583b56ec4a176228320b6c878180452d7e1 | /lin_clf/lin_clf.R | b1dbc1a7ffa02d59ca5109f6c729d108f18faf65 | [] | no_license | Unuseptiy/SMPR | 687dedf7a20ca8fa0def6bca07508d9347fef97a | 6d71b0a23b842978ff8cd5709850f370cbd35010 | refs/heads/master | 2023-02-01T23:40:22.909031 | 2020-12-18T08:06:45 | 2020-12-18T08:06:45 | 296,440,182 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,367 | r | lin_clf.R | # все, что тут используется как АДАЛАЙН переделать на СГД
# еще надо будет добавить передачу градиента, чтобы убрать реализацию через флаги
SGD <- function (feature_matrix, labels, L, flag, eps) {
l <- dim(feature_matrix)[1]
n <- dim(feature_matrix)[2]
#Q_arr <- vector()
#вектор весов
w <- rep(0, n)
# вспомогательный вектор весов, хранящий веса, с которыми
# ошибка минимальна (используется когда стабилизируется функционал ошибки)
better_w <- rep(0, n)
#параметры для отрисовки линии
#left <- min(feature_matrix[,1])
#right <- max(feature_matrix[,1])
#coords <- matrix(0, 0, 2)
#инициализация вектора весов слцчайнфми небольшими значениями
tmp <- 1
for (i in 1:n) {
w[i] <- runif(1, -1 / (2 * tmp), 1 / (2 * tmp))
}
#подсчет ошибки
Q <- 0
for (i in 1:l) {
Q <- Q + L((w %*% feature_matrix[i,]) * labels[i])
}
#Q_arr <- c(Q_arr, Q)
# счетчик кол-ва итераций на которых Q менялся незначительно
# в пределах [Q - 1e-2; Q + 1e-2]
cnt <- 0
#параметр сглаживания
lambda <- 1 / l
#счетчик кол-ва шагов
check <- 0
#счетчик кол-ва шагов, на которых min(Q) не меняется
min_cnt <- 0
min_Q <- 1e6
# выходим из массива, если:
# ошибка Q меньше некоторой заданной;
# количество шагов, на которых Q меняется незначительно больше некоторого заданного;
# количество шагов, на которых Q не уменьшалось больше некоторого заданного.
# если условие на небольшое отклонение не будет использоваться -- удалить cnt
while (Q > eps & cnt < 10 & min_cnt < 500) {
check <- check + 1
# выбираем случайный элемент
index <- runif(1, 1, l) %/% 1
# считаем ошибку на нем
epsilon <- L((w %*% feature_matrix[index,]) * labels[index])
# измение Q и w, в зависимости от выбранного лин клф:
# ADALINE
if (flag == 1) {
if (epsilon < 1e-2) {next}
etha <- 1 / feature_matrix[index,] %*% feature_matrix[index,]
w <- w - as.double(etha) * as.double((w %*% feature_matrix[index,]) - labels[index]) * feature_matrix[index,]
}
# Hebbs rule
if (flag == 2) {
etha <- 1 / check
if (epsilon > 0) w <- w + etha * feature_matrix[index,] * labels[index]
}
# Logistic regression
if (flag == 3) {
etha <- 1 / check
w <- w + etha * feature_matrix[index,] * labels[index] * sigmoid(as.double(-w %*% feature_matrix[index,]) * labels[index])
}
# SVM
if (flag == 4) {
etha <- 1 / check
w <- w - etha * feature_matrix[index,] * labels[index]
}
#etha <- 1 / check
#w <- w - as.double(etha) * L1(feature_matrix[index,], labels[index], w)
######################################################################################################
# эта запись для второго критерия выхода из массива
#new_Q <- (1 - lambda) * Q + lambda * epsilon
#print(c(epsilon, Q, new_Q, cnt))
## сравниваем новое Q с Q, полученным на предыдущих шагах и, если оно отличается
## меньше, чем на 1е-6, то Q не меняем, иначе -- меняем
# это условие не надо в связи с тем, что условие на изменение минимума сильнее
# (а если минимум меняется, но не сильно? тогда это уловие может сыграть свою роль)
#if (new_Q >= Q - 1e-2 & new_Q <= Q + 1e-2) {
# cnt <- cnt + 1
#}
#else {
# Q <- new_Q
# cnt <- 0
#}
######################################################################################################
Q <- (1 - lambda) * Q + lambda * epsilon
print(c(epsilon, Q, min_Q, min_cnt))
# если Q не стало меньше минимального, полученного на предыдущих шагах, то не меняем его,
# иначе -- меняем и обновляем счетчик шагов без изменения минимума и вектор лучших весов.
if (Q >= min_Q) {
min_cnt <- min_cnt + 1
} else {
min_Q <- Q
min_cnt <- 0
better_w <- w
}
#Q_arr <- c(Q_arr, Q)
# незатратная отрисовка
#if (Q >= eps) abline(a = -w[3] / w[2], b = -w[1] / w[2], col = "blue")
}
return(better_w)
# для построения графиков для Q
#return(Q_arr)
}
L_adaline <- function(x) {
return((x - 1) ^ 2)
}
L1_adaline <- function(x ,y, w) {
return(as.double(w %*% x - y) * x)
}
L_hebb <- function(x){
if (x < 0) return(-x)
else return(0)
}
L1_hebb <- function(x, y, w) {
return(x * y)
}
L_logistic <- function(x) {
return(log(1 + exp(-x),2))
}
L1_logistic <- function(x, y, w) {
return(x * y * sigmoid(as.double(-w %*% x) * y))
}
L_SVM <- function(x) {
if (1 - x > 0) return(1 - x)
else return(0)
}
ADALINE <- function(feature_matrix, labels) {
weight <- SGD(feature_matrix, labels, L_adaline, 1, 5)
return(weight)
# L1_adaline
}
Hebbs_rule <- function(feature_matrix, labels) {
weight <- SGD(feature_matrix, labels, L_hebb, 2, 5)
return(weight)
#L1_hebb
}
Logistic_regression <- function(feature_matrix, labels) {
weight <- SGD(feature_matrix, labels, L_logistic, 3, 5)
return(weight)
#L1_logistic
}
SVM <- function(feature_matrix, labels) {
weight <- SGD(feature_matrix, labels, L_SVM, 4, 5)
return(weight)
}
sigmoid <- function(x) {
return(1 / (1 + exp(-x)))
}
#работа на ирисах
u <- 1
b <- 100
left <- 3
right <- 4
colors <- c("setosa" = "red", "versicolor" = "green3", "virginica" = "blue")
plot(iris[u:b,left:right], pch = 21, col = colors[iris[u:b,5]], bg = colors[iris[u:b,5]], main = "Разделяющие плоскости линейных классификаторов")
tmp <- matrix(0, 100, 1)
for (i in 1:100) {
tmp[i] <- 1
}
# формируем обучающую выборку
feature_matrix <- cbind(as.matrix(iris[u:b, left:right]), tmp)
labels <- rep(1, 100)
for (i in 1:50) {
labels[i] <- -1
}
# отрисовка графиков для Q
#Q <- Logistic_regression(feature_matrix, labels)
#plot(Q, type="l", main = "Logistic regression Q")
weight1 <- Logistic_regression(feature_matrix, labels)
weight2 <- Hebbs_rule(feature_matrix, labels)
weight3 <- Logistic_regression(feature_matrix, labels)
points(iris[u:b,left:right], pch = 21, col = colors[iris[u:b,5]], bg = colors[iris[u:b,5]])
#coords
# легкая отрисовка посчитанной гиперплоскости
abline(a = -weight1[3] / weight1[2], b = -weight1[1] / weight1[2], col = "black")
abline(a = -weight2[3] / weight2[2], b = -weight2[1] / weight2[2], col = "brown")
abline(a = -weight3[3] / weight3[2], b = -weight3[1] / weight3[2], col = "yellow")
legend('bottomright', c("ADALINE", "Hebbs rule", "Logistic regression"), lty=1, col=c('black', 'brown', 'yellow'), bty='n', cex=1)
# считаем эмпирическую оценку
#Q <- 0
#length <- 100
#L <- L_SVM
#for (i in 1:length) {
# Q <- Q + L((weight %*% feature_matrix[i,]) * labels[i])
#}
#Q
#print("====================================================================")
# СЧИТАЕМ ЦВЕТА ДЛЯ ЛОГИСТИЧЕСКОЙ
#weight <- Logistic_regression(feature_matrix, labels)
##функция считает апостериорную вероятность
#aposterior <- function(w, x, y) {
# return(sigmoid((w %*% x) * y))
#}
#
##считаем апостериорную вероятность эл-тов обучающей выборки
#PYX <- rep(0, b - u + 1)
#for (i in u:b) {
# PYX[i - u + 1] <- aposterior(weight, feature_matrix[i - u + 1,], labels[i - u + 1])
#}
#PYX
#max_ver <- max(PYX)
#max_ver
#tmp_colors <- rep(0, 100)
##подбор интенсичности цветов
#for (i in u:b) {
# alpha <- 255 %/% max_ver * PYX[i - u + 1]
# if (i - u + 1 <= 50) {
# tmp_colors[i - u + 1] <- rgb(255, 0,0, alpha,,255)
# } else {
# tmp_colors[i - u + 1] <- rgb(0, 255,0, alpha,,255)
# }
#
#}
#plot(iris[u:b,left:right], pch = 21, bg = tmp_colors, col = tmp_colors, main = "Апостериорные вероятности для логистической регрессии")
#abline(a = -weight[3] / weight[2], b = -weight[1] / weight[2], col = "black")
#генерация выборки
#fx <- runif(50,-1,-0.1)
#fy <- runif(50,0,3)
#
#sx <- runif(50, 0.1,1)
#sy <- runif(50, 0,3)
#
#fclass <- cbind.data.frame(fx, fy, 1)
#sclass <- cbind.data.frame(sx, sy, -1)
#
#tip_name <- c("one", "two", "thri")
#
#names(fclass) <- tip_name
#names(sclass) <- tip_name
#
#for_ADALINE <- rbind.data.frame(fclass, sclass)
#labels <- for_ADALINE[,3]
#for_ADALINE[,3] <- rep(1, dim(for_ADALINE)[1])
#colors <- c("1"="red", "-1"="green")
#plot(for_ADALINE[,1:2], pch = 21, col=colors[as.character(labels)], bg = colors[as.character(labels)])
##for_ADALINE$thri <- as.factor(for_ADALINE$thri)
#
#weight <- ADALINE(z, as.matrix(for_ADALINE), labels, L_adaline, 1, 20)
#points(for_ADALINE[,1:2], pch = 21, col=colors[as.character(labels)], bg = colors[as.character(labels)])
#print("Weight")
#weight
#l <- min(for_ADALINE$one)
#r <- max(for_ADALINE$one)
#coords <- matrix(0, 0, 2)
#for (x in seq(l, r, 0.1)) {
# y <- (-weight[1] * x - weight[3]) / weight[2]
# coords <- rbind(coords, c(x, y))
#}
#points(coords, type="l", col = "yellow")
#
#Q <- 0
#l <- 100
#L <- L_adaline
##labels
#for (i in 1:l) {
# Q <- Q + L((weight %*% as.double(for_ADALINE[i,])) * labels[i])
#}
#print("Q")
#Q
|
6ece64a34a653dbba6ced7f99a3039073b677cad | f05b833009df583e821f72d2d6f9875650256106 | /scripts/genome_wide_correlation.R | d25bd934f2a096d0c7a506132262c079365825e8 | [] | no_license | Bongomountainthesis/ESC_RESTKO_xpn_ac | 69c5969f5d56b1e0de092f9a3fa5ff36594c5859 | a3c78ec95a08530ca8d3cf44532c57d9fe793834 | refs/heads/master | 2020-05-07T19:01:51.788274 | 2012-09-18T10:20:08 | 2012-09-18T10:20:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 68,862 | r | genome_wide_correlation.R | options(stringsAsFactors=FALSE)
library(IRanges)
library(ShortRead)
library(Rsamtools)
library(ChIPpeakAnno)
library(RColorBrewer)
#library(GenomeGraphs)
library(biomaRt)
library(gplots)
##pull in BAM files
d0_rest <- "/mnt/data/REST_CtrlES_D0.clean_sorted_nodups.bam"
d4_rest <- "/mnt/data/REST_CtrlES_D4.clean_sorted_nodups.bam"
rest_k9ac <- "/mnt/data/CME143_GA3R71_export_sorted_nodups.bam"
ctrl_k9ac <- "/mnt/data/CME141_GA3R71_export_sorted_nodups.bam"
rest_h4ac <- "/mnt/data/FloResCre__C18__H4ac_CME117_s_2_export_sorted_nodups.bam"
ctrl_h4ac <- "/mnt/data/FloRes_H4acs_CME118_3_export_sorted_nodups.bam"
d0_rest <- "REST_ChIP/results/alignment/bowtie/REST_CtrlES_D0.clean_sorted_nodups.bam"
d4_rest <- "REST_ChIP/results/alignment/bowtie/REST_CtrlES_D4.clean_sorted_nodups.bam"
rest_k9ac <- "Ac_ChIP/results/alignment/bowtie/CME143_GA3R71_export_sorted_nodups.bam"
ctrl_k9ac <- "Ac_ChIP/results/alignment/bowtie/CME141_GA3R71_export_sorted_nodups.bam"
rest_h4ac <- "Ac_ChIP/results/alignment/bowtie/FloResCre__C18__H4ac_CME117_s_2_export_sorted_nodups.bam"
ctrl_h4ac <- "Ac_ChIP/results/alignment/bowtie/FloRes_H4acs_CME118_3_export_sorted_nodups.bam"
##take REST and K9/K4Ac read files and count overlaps in 200bp bins. Test for correlation between presence of both peaks.
chr_lengths <- as.data.frame(matrix(nrow = 19, ncol = 2))
colnames(chr_lengths) <- c("Chr", "Length")
chr_lengths[1,2] <- as.numeric(197195432)
chr_lengths[2,2] <- as.numeric(181748087)
chr_lengths[3,2] <- as.numeric(159599783)
chr_lengths[4,2] <- as.numeric(155630120)
chr_lengths[5,2] <- as.numeric(152537259)
chr_lengths[6,2] <- as.numeric(152537259)
chr_lengths[7,2] <- as.numeric(152524553)
chr_lengths[8,2] <- as.numeric(131738871)
chr_lengths[9,2] <- as.numeric(124076172)
chr_lengths[10,2] <- as.numeric(129993255)
chr_lengths[11,2] <- as.numeric(121843856)
chr_lengths[12,2] <- as.numeric(121257530)
chr_lengths[13,2] <- as.numeric(120284312)
chr_lengths[14,2] <- as.numeric(125194864)
chr_lengths[15,2] <- as.numeric(103494974)
chr_lengths[16,2] <- as.numeric(98319150)
chr_lengths[17,2] <- as.numeric(95272651)
chr_lengths[18,2] <- as.numeric(90772031)
chr_lengths[19,2] <- as.numeric(61342430)
chr_lengths[,1] <- paste("chr",seq(from = 1, to = 19),sep="")
bin.size <- 500
########## REST DAY 0 - count overlaps
bam_file <- d0_rest
######Chr1
start <- seq(from=0, to = chr_lengths[1,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[1,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[1,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr1_bam <- countBam(bam_file, param=param)
######Chr2
start <- seq(from=0, to = chr_lengths[2,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[2,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[2,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr2_bam <- countBam(bam_file, param=param)
######Chr3
start <- seq(from=0, to = chr_lengths[3,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[3,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[3,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr3_bam <- countBam(bam_file, param=param)
######Chr4
start <- seq(from=0, to = chr_lengths[4,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[4,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[4,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr4_bam <- countBam(bam_file, param=param)
######Chr5
start <- seq(from=0, to = chr_lengths[5,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[5,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[5,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr5_bam <- countBam(bam_file, param=param)
######Chr6
start <- seq(from=0, to = chr_lengths[6,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[6,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[6,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr6_bam <- countBam(bam_file, param=param)
######Chr7
start <- seq(from=0, to = chr_lengths[7,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[7,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[7,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr7_bam <- countBam(bam_file, param=param)
######Chr8
start <- seq(from=0, to = chr_lengths[8,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[8,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[8,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr8_bam <- countBam(bam_file, param=param)
######Chr9
start <- seq(from=0, to = chr_lengths[9,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[9,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[9,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr9_bam <- countBam(bam_file, param=param)
######Chr10
start <- seq(from=0, to = chr_lengths[10,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[10,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[10,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr10_bam <- countBam(bam_file, param=param)
######Chr11
start <- seq(from=0, to = chr_lengths[11,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[11,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[11,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr11_bam <- countBam(bam_file, param=param)
######Chr12
start <- seq(from=0, to = chr_lengths[12,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[12,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[12,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr12_bam <- countBam(bam_file, param=param)
######Chr13
start <- seq(from=0, to = chr_lengths[13,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[13,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[13,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr13_bam <- countBam(bam_file, param=param)
######Chr14
start <- seq(from=0, to = chr_lengths[14,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[14,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[14,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr14_bam <- countBam(bam_file, param=param)
######Chr15
start <- seq(from=0, to = chr_lengths[15,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[15,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[15,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr15_bam <- countBam(bam_file, param=param)
######Chr16
start <- seq(from=0, to = chr_lengths[16,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[16,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[16,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr16_bam <- countBam(bam_file, param=param)
######Chr17
start <- seq(from=0, to = chr_lengths[17,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[17,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[17,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr17_bam <- countBam(bam_file, param=param)
######Chr18
start <- seq(from=0, to = chr_lengths[18,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[18,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[18,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr18_bam <- countBam(bam_file, param=param)
######Chr19
start <- seq(from=0, to = chr_lengths[19,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[19,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[19,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr19_bam <- countBam(bam_file, param=param)
######join all together and save
rest_d0_counts <- c(chr1_bam[,"records"],chr2_bam[,"records"],chr3_bam[,"records"],chr4_bam[,"records"],chr5_bam[,"records"],chr6_bam[,"records"],chr7_bam[,"records"],chr8_bam[,"records"],chr9_bam[,"records"],chr10_bam[,"records"],chr11_bam[,"records"],chr12_bam[,"records"],chr13_bam[,"records"],chr14_bam[,"records"],chr15_bam[,"records"],chr16_bam[,"records"],chr17_bam[,"records"],chr18_bam[,"records"],chr19_bam[,"records"])
save(rest_d0_counts, file = "comparison_results/rest_d0_tag_counts.RData")
########## REST DAY 4 - count overlaps
bam_file <- d4_rest
######Chr1
start <- seq(from=0, to = chr_lengths[1,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[1,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[1,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr1_bam <- countBam(bam_file, param=param)
######Chr2
start <- seq(from=0, to = chr_lengths[2,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[2,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[2,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr2_bam <- countBam(bam_file, param=param)
######Chr3
start <- seq(from=0, to = chr_lengths[3,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[3,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[3,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr3_bam <- countBam(bam_file, param=param)
######Chr4
start <- seq(from=0, to = chr_lengths[4,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[4,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[4,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr4_bam <- countBam(bam_file, param=param)
######Chr5
start <- seq(from=0, to = chr_lengths[5,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[5,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[5,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr5_bam <- countBam(bam_file, param=param)
######Chr6
start <- seq(from=0, to = chr_lengths[6,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[6,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[6,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr6_bam <- countBam(bam_file, param=param)
######Chr7
start <- seq(from=0, to = chr_lengths[7,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[7,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[7,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr7_bam <- countBam(bam_file, param=param)
######Chr8
start <- seq(from=0, to = chr_lengths[8,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[8,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[8,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr8_bam <- countBam(bam_file, param=param)
######Chr9
start <- seq(from=0, to = chr_lengths[9,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[9,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[9,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr9_bam <- countBam(bam_file, param=param)
######Chr10
start <- seq(from=0, to = chr_lengths[10,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[10,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[10,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr10_bam <- countBam(bam_file, param=param)
######Chr11
start <- seq(from=0, to = chr_lengths[11,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[11,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[11,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr11_bam <- countBam(bam_file, param=param)
######Chr12
start <- seq(from=0, to = chr_lengths[12,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[12,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[12,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr12_bam <- countBam(bam_file, param=param)
######Chr13
start <- seq(from=0, to = chr_lengths[13,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[13,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[13,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr13_bam <- countBam(bam_file, param=param)
######Chr14
start <- seq(from=0, to = chr_lengths[14,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[14,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[14,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr14_bam <- countBam(bam_file, param=param)
######Chr15
start <- seq(from=0, to = chr_lengths[15,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[15,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[15,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr15_bam <- countBam(bam_file, param=param)
######Chr16
start <- seq(from=0, to = chr_lengths[16,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[16,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[16,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr16_bam <- countBam(bam_file, param=param)
######Chr17
start <- seq(from=0, to = chr_lengths[17,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[17,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[17,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr17_bam <- countBam(bam_file, param=param)
######Chr18
start <- seq(from=0, to = chr_lengths[18,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[18,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[18,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr18_bam <- countBam(bam_file, param=param)
######Chr19
start <- seq(from=0, to = chr_lengths[19,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[19,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[19,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr19_bam <- countBam(bam_file, param=param)
######join all together and save
rest_d4_counts <- c(chr1_bam[,"records"],chr2_bam[,"records"],chr3_bam[,"records"],chr4_bam[,"records"],chr5_bam[,"records"],chr6_bam[,"records"],chr7_bam[,"records"],chr8_bam[,"records"],chr9_bam[,"records"],chr10_bam[,"records"],chr11_bam[,"records"],chr12_bam[,"records"],chr13_bam[,"records"],chr14_bam[,"records"],chr15_bam[,"records"],chr16_bam[,"records"],chr17_bam[,"records"],chr18_bam[,"records"],chr19_bam[,"records"])
save(rest_d4_counts, file = "/mnt/data/rest_d4_tag_counts.RData")
########## H3K9Ac (Cntl) - count overlaps
bam_file <- ctrl_k9ac
######Chr1
start <- seq(from=0, to = chr_lengths[1,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[1,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[1,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr1_bam <- countBam(bam_file, param=param)
######Chr2
start <- seq(from=0, to = chr_lengths[2,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[2,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[2,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr2_bam <- countBam(bam_file, param=param)
######Chr3
start <- seq(from=0, to = chr_lengths[3,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[3,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[3,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr3_bam <- countBam(bam_file, param=param)
######Chr4
start <- seq(from=0, to = chr_lengths[4,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[4,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[4,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr4_bam <- countBam(bam_file, param=param)
######Chr5
start <- seq(from=0, to = chr_lengths[5,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[5,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[5,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr5_bam <- countBam(bam_file, param=param)
######Chr6
start <- seq(from=0, to = chr_lengths[6,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[6,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[6,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr6_bam <- countBam(bam_file, param=param)
######Chr7
start <- seq(from=0, to = chr_lengths[7,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[7,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[7,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr7_bam <- countBam(bam_file, param=param)
######Chr8
start <- seq(from=0, to = chr_lengths[8,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[8,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[8,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr8_bam <- countBam(bam_file, param=param)
######Chr9
start <- seq(from=0, to = chr_lengths[9,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[9,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[9,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr9_bam <- countBam(bam_file, param=param)
######Chr10
start <- seq(from=0, to = chr_lengths[10,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[10,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[10,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr10_bam <- countBam(bam_file, param=param)
######Chr11
start <- seq(from=0, to = chr_lengths[11,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[11,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[11,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr11_bam <- countBam(bam_file, param=param)
######Chr12
start <- seq(from=0, to = chr_lengths[12,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[12,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[12,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr12_bam <- countBam(bam_file, param=param)
######Chr13
start <- seq(from=0, to = chr_lengths[13,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[13,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[13,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr13_bam <- countBam(bam_file, param=param)
######Chr14
start <- seq(from=0, to = chr_lengths[14,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[14,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[14,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr14_bam <- countBam(bam_file, param=param)
######Chr15
start <- seq(from=0, to = chr_lengths[15,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[15,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[15,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr15_bam <- countBam(bam_file, param=param)
######Chr16
start <- seq(from=0, to = chr_lengths[16,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[16,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[16,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr16_bam <- countBam(bam_file, param=param)
######Chr17
start <- seq(from=0, to = chr_lengths[17,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[17,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[17,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr17_bam <- countBam(bam_file, param=param)
######Chr18
start <- seq(from=0, to = chr_lengths[18,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[18,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[18,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr18_bam <- countBam(bam_file, param=param)
######Chr19
start <- seq(from=0, to = chr_lengths[19,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[19,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[19,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr19_bam <- countBam(bam_file, param=param)
######join all together and save
ctrl_k9ac_counts <- c(chr1_bam[,"records"],chr2_bam[,"records"],chr3_bam[,"records"],chr4_bam[,"records"],chr5_bam[,"records"],chr6_bam[,"records"],chr7_bam[,"records"],chr8_bam[,"records"],chr9_bam[,"records"],chr10_bam[,"records"],chr11_bam[,"records"],chr12_bam[,"records"],chr13_bam[,"records"],chr14_bam[,"records"],chr15_bam[,"records"],chr16_bam[,"records"],chr17_bam[,"records"],chr18_bam[,"records"],chr19_bam[,"records"])
save(ctrl_k9ac_counts, file = "/mnt/data/ctrl_k9ac_tag_counts.RData")
########## ctrl_h4ac - count overlaps
bam_file <- ctrl_h4ac
######Chr1
start <- seq(from=0, to = chr_lengths[1,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[1,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[1,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr1_bam <- countBam(bam_file, param=param)
######Chr2
start <- seq(from=0, to = chr_lengths[2,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[2,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[2,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr2_bam <- countBam(bam_file, param=param)
######Chr3
start <- seq(from=0, to = chr_lengths[3,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[3,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[3,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr3_bam <- countBam(bam_file, param=param)
######Chr4
start <- seq(from=0, to = chr_lengths[4,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[4,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[4,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr4_bam <- countBam(bam_file, param=param)
######Chr5
start <- seq(from=0, to = chr_lengths[5,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[5,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[5,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr5_bam <- countBam(bam_file, param=param)
######Chr6
start <- seq(from=0, to = chr_lengths[6,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[6,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[6,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr6_bam <- countBam(bam_file, param=param)
######Chr7
start <- seq(from=0, to = chr_lengths[7,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[7,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[7,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr7_bam <- countBam(bam_file, param=param)
######Chr8
start <- seq(from=0, to = chr_lengths[8,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[8,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[8,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr8_bam <- countBam(bam_file, param=param)
######Chr9
start <- seq(from=0, to = chr_lengths[9,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[9,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[9,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr9_bam <- countBam(bam_file, param=param)
######Chr10
start <- seq(from=0, to = chr_lengths[10,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[10,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[10,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr10_bam <- countBam(bam_file, param=param)
######Chr11
start <- seq(from=0, to = chr_lengths[11,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[11,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[11,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr11_bam <- countBam(bam_file, param=param)
######Chr12
start <- seq(from=0, to = chr_lengths[12,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[12,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[12,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr12_bam <- countBam(bam_file, param=param)
######Chr13
start <- seq(from=0, to = chr_lengths[13,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[13,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[13,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr13_bam <- countBam(bam_file, param=param)
######Chr14
start <- seq(from=0, to = chr_lengths[14,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[14,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[14,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr14_bam <- countBam(bam_file, param=param)
######Chr15
start <- seq(from=0, to = chr_lengths[15,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[15,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[15,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr15_bam <- countBam(bam_file, param=param)
######Chr16
start <- seq(from=0, to = chr_lengths[16,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[16,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[16,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr16_bam <- countBam(bam_file, param=param)
######Chr17
start <- seq(from=0, to = chr_lengths[17,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[17,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[17,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr17_bam <- countBam(bam_file, param=param)
######Chr18
start <- seq(from=0, to = chr_lengths[18,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[18,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[18,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr18_bam <- countBam(bam_file, param=param)
######Chr19
start <- seq(from=0, to = chr_lengths[19,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[19,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[19,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr19_bam <- countBam(bam_file, param=param)
######join all together and save
ctrl_h4ac_counts <- c(chr1_bam[,"records"],chr2_bam[,"records"],chr3_bam[,"records"],chr4_bam[,"records"],chr5_bam[,"records"],chr6_bam[,"records"],chr7_bam[,"records"],chr8_bam[,"records"],chr9_bam[,"records"],chr10_bam[,"records"],chr11_bam[,"records"],chr12_bam[,"records"],chr13_bam[,"records"],chr14_bam[,"records"],chr15_bam[,"records"],chr16_bam[,"records"],chr17_bam[,"records"],chr18_bam[,"records"],chr19_bam[,"records"])
save(ctrl_h4ac_counts, file = "/mnt/data/ctrl_h4ac_tag_counts.RData")
########## H3K9Ac (rest) - count overlaps
bam_file <- rest_k9ac
######Chr1
start <- seq(from=0, to = chr_lengths[1,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[1,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[1,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr1_bam <- countBam(bam_file, param=param)
######Chr2
start <- seq(from=0, to = chr_lengths[2,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[2,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[2,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr2_bam <- countBam(bam_file, param=param)
######Chr3
start <- seq(from=0, to = chr_lengths[3,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[3,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[3,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr3_bam <- countBam(bam_file, param=param)
######Chr4
start <- seq(from=0, to = chr_lengths[4,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[4,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[4,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr4_bam <- countBam(bam_file, param=param)
######Chr5
start <- seq(from=0, to = chr_lengths[5,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[5,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[5,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr5_bam <- countBam(bam_file, param=param)
######Chr6
start <- seq(from=0, to = chr_lengths[6,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[6,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[6,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr6_bam <- countBam(bam_file, param=param)
######Chr7
start <- seq(from=0, to = chr_lengths[7,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[7,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[7,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr7_bam <- countBam(bam_file, param=param)
######Chr8
start <- seq(from=0, to = chr_lengths[8,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[8,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[8,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr8_bam <- countBam(bam_file, param=param)
######Chr9
start <- seq(from=0, to = chr_lengths[9,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[9,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[9,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr9_bam <- countBam(bam_file, param=param)
######Chr10
start <- seq(from=0, to = chr_lengths[10,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[10,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[10,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr10_bam <- countBam(bam_file, param=param)
######Chr11
start <- seq(from=0, to = chr_lengths[11,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[11,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[11,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr11_bam <- countBam(bam_file, param=param)
######Chr12
start <- seq(from=0, to = chr_lengths[12,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[12,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[12,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr12_bam <- countBam(bam_file, param=param)
######Chr13
start <- seq(from=0, to = chr_lengths[13,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[13,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[13,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr13_bam <- countBam(bam_file, param=param)
######Chr14
start <- seq(from=0, to = chr_lengths[14,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[14,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[14,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr14_bam <- countBam(bam_file, param=param)
######Chr15
start <- seq(from=0, to = chr_lengths[15,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[15,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[15,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr15_bam <- countBam(bam_file, param=param)
######Chr16
start <- seq(from=0, to = chr_lengths[16,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[16,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[16,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr16_bam <- countBam(bam_file, param=param)
######Chr17
start <- seq(from=0, to = chr_lengths[17,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[17,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[17,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr17_bam <- countBam(bam_file, param=param)
######Chr18
start <- seq(from=0, to = chr_lengths[18,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[18,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[18,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr18_bam <- countBam(bam_file, param=param)
######Chr19
start <- seq(from=0, to = chr_lengths[19,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[19,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[19,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr19_bam <- countBam(bam_file, param=param)
######join all together and save
rest_k9ac_counts <- c(chr1_bam[,"records"],chr2_bam[,"records"],chr3_bam[,"records"],chr4_bam[,"records"],chr5_bam[,"records"],chr6_bam[,"records"],chr7_bam[,"records"],chr8_bam[,"records"],chr9_bam[,"records"],chr10_bam[,"records"],chr11_bam[,"records"],chr12_bam[,"records"],chr13_bam[,"records"],chr14_bam[,"records"],chr15_bam[,"records"],chr16_bam[,"records"],chr17_bam[,"records"],chr18_bam[,"records"],chr19_bam[,"records"])
save(rest_k9ac_counts, file = "/mnt/data/rest_k9ac_tag_counts.RData")
########## rest_h4ac - count overlaps
bam_file <- rest_h4ac
######Chr1
start <- seq(from=0, to = chr_lengths[1,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[1,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[1,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr1_bam <- countBam(bam_file, param=param)
######Chr2
start <- seq(from=0, to = chr_lengths[2,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[2,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[2,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr2_bam <- countBam(bam_file, param=param)
######Chr3
start <- seq(from=0, to = chr_lengths[3,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[3,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[3,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr3_bam <- countBam(bam_file, param=param)
######Chr4
start <- seq(from=0, to = chr_lengths[4,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[4,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[4,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr4_bam <- countBam(bam_file, param=param)
######Chr5
start <- seq(from=0, to = chr_lengths[5,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[5,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[5,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr5_bam <- countBam(bam_file, param=param)
######Chr6
start <- seq(from=0, to = chr_lengths[6,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[6,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[6,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr6_bam <- countBam(bam_file, param=param)
######Chr7
start <- seq(from=0, to = chr_lengths[7,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[7,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[7,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr7_bam <- countBam(bam_file, param=param)
######Chr8
start <- seq(from=0, to = chr_lengths[8,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[8,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[8,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr8_bam <- countBam(bam_file, param=param)
######Chr9
start <- seq(from=0, to = chr_lengths[9,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[9,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[9,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr9_bam <- countBam(bam_file, param=param)
######Chr10
start <- seq(from=0, to = chr_lengths[10,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[10,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[10,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr10_bam <- countBam(bam_file, param=param)
######Chr11
start <- seq(from=0, to = chr_lengths[11,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[11,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[11,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr11_bam <- countBam(bam_file, param=param)
######Chr12
start <- seq(from=0, to = chr_lengths[12,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[12,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[12,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr12_bam <- countBam(bam_file, param=param)
######Chr13
start <- seq(from=0, to = chr_lengths[13,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[13,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[13,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr13_bam <- countBam(bam_file, param=param)
######Chr14
start <- seq(from=0, to = chr_lengths[14,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[14,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[14,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr14_bam <- countBam(bam_file, param=param)
######Chr15
start <- seq(from=0, to = chr_lengths[15,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[15,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[15,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr15_bam <- countBam(bam_file, param=param)
######Chr16
start <- seq(from=0, to = chr_lengths[16,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[16,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[16,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr16_bam <- countBam(bam_file, param=param)
######Chr17
start <- seq(from=0, to = chr_lengths[17,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[17,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[17,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr17_bam <- countBam(bam_file, param=param)
######Chr18
start <- seq(from=0, to = chr_lengths[18,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[18,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[18,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr18_bam <- countBam(bam_file, param=param)
######Chr19
start <- seq(from=0, to = chr_lengths[19,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[19,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[19,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr19_bam <- countBam(bam_file, param=param)
######join all together and save
rest_h4ac_counts <- c(chr1_bam[,"records"],chr2_bam[,"records"],chr3_bam[,"records"],chr4_bam[,"records"],chr5_bam[,"records"],chr6_bam[,"records"],chr7_bam[,"records"],chr8_bam[,"records"],chr9_bam[,"records"],chr10_bam[,"records"],chr11_bam[,"records"],chr12_bam[,"records"],chr13_bam[,"records"],chr14_bam[,"records"],chr15_bam[,"records"],chr16_bam[,"records"],chr17_bam[,"records"],chr18_bam[,"records"],chr19_bam[,"records"])
save(rest_h4ac_counts, file = "/mnt/data/rest_h4ac_tag_counts.RData")
###############################################
######would be good to train this on H3K4me3 modification to see if see any correlation
#take from MLA NS K4me3
bam_file <- "/space/MLA2_MLA2dNeuron_ChIP/results/alignment/bowtie/MLA_NS_H3K4me3_CMN054_s_2_export_sorted_nodups.bam"
######Chr1
start <- seq(from=0, to = chr_lengths[1,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[1,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[1,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr1_bam <- countBam(bam_file, param=param)
######Chr2
start <- seq(from=0, to = chr_lengths[2,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[2,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[2,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr2_bam <- countBam(bam_file, param=param)
######Chr3
start <- seq(from=0, to = chr_lengths[3,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[3,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[3,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr3_bam <- countBam(bam_file, param=param)
######Chr4
start <- seq(from=0, to = chr_lengths[4,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[4,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[4,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr4_bam <- countBam(bam_file, param=param)
######Chr5
start <- seq(from=0, to = chr_lengths[5,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[5,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[5,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr5_bam <- countBam(bam_file, param=param)
######Chr6
start <- seq(from=0, to = chr_lengths[6,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[6,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[6,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr6_bam <- countBam(bam_file, param=param)
######Chr7
start <- seq(from=0, to = chr_lengths[7,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[7,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[7,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr7_bam <- countBam(bam_file, param=param)
######Chr8
start <- seq(from=0, to = chr_lengths[8,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[8,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[8,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr8_bam <- countBam(bam_file, param=param)
######Chr9
start <- seq(from=0, to = chr_lengths[9,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[9,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[9,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr9_bam <- countBam(bam_file, param=param)
######Chr10
start <- seq(from=0, to = chr_lengths[10,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[10,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[10,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr10_bam <- countBam(bam_file, param=param)
######Chr11
start <- seq(from=0, to = chr_lengths[11,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[11,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[11,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr11_bam <- countBam(bam_file, param=param)
######Chr12
start <- seq(from=0, to = chr_lengths[12,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[12,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[12,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr12_bam <- countBam(bam_file, param=param)
######Chr13
start <- seq(from=0, to = chr_lengths[13,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[13,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[13,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr13_bam <- countBam(bam_file, param=param)
######Chr14
start <- seq(from=0, to = chr_lengths[14,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[14,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[14,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr14_bam <- countBam(bam_file, param=param)
######Chr15
start <- seq(from=0, to = chr_lengths[15,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[15,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[15,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr15_bam <- countBam(bam_file, param=param)
######Chr16
start <- seq(from=0, to = chr_lengths[16,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[16,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[16,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr16_bam <- countBam(bam_file, param=param)
######Chr17
start <- seq(from=0, to = chr_lengths[17,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[17,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[17,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr17_bam <- countBam(bam_file, param=param)
######Chr18
start <- seq(from=0, to = chr_lengths[18,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[18,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[18,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr18_bam <- countBam(bam_file, param=param)
######Chr19
start <- seq(from=0, to = chr_lengths[19,2] - bin.size, by=bin.size)
end <- seq(from=0 + bin.size, to = chr_lengths[19,2], by=bin.size)
chr_bins <- RangedData(ranges = IRanges(
start= start,
end = end
),
space = chr_lengths[19,1]
)
## get counts from BAM file
param <- ScanBamParam(which=chr_bins)
chr19_bam <- countBam(bam_file, param=param)
######join all together and save
mla_k4me3_counts <- c(chr1_bam[,"records"],chr2_bam[,"records"],chr3_bam[,"records"],chr4_bam[,"records"],chr5_bam[,"records"],chr6_bam[,"records"],chr7_bam[,"records"],chr8_bam[,"records"],chr9_bam[,"records"],chr10_bam[,"records"],chr11_bam[,"records"],chr12_bam[,"records"],chr13_bam[,"records"],chr14_bam[,"records"],chr15_bam[,"records"],chr16_bam[,"records"],chr17_bam[,"records"],chr18_bam[,"records"],chr19_bam[,"records"])
save(mla_k4me3_counts, file = "/mnt/data/mla_k4me3_tag_counts.RData")
#########################
#reload files in
rest_d0_counts <- get(load("comparison_results/rest_d0_tag_counts.RData"))
rest_d4_counts <- get(load("comparison_results/rest_d4_tag_counts.RData"))
ctrl_k9ac_counts <- get(load("comparison_results/ctrl_k9ac_tag_counts.RData"))
rest_k9ac_counts <- get(load("comparison_results/rest_k9ac_tag_counts.RData"))
ctrl_h4ac_counts <- get(load("comparison_results/ctrl_h4ac_tag_counts.RData"))
rest_h4ac_counts <- get(load("comparison_results/rest_h4ac_tag_counts.RData"))
mla_k4me3_counts <- get(load("comparison_results/mla_k4me3_tag_counts.RData"))
###need to take into account peaks that aren't really peaks - filter on minimum number of reads in peaks?
rest_d0_peaks <- read.csv(file = "REST_ChIP/results/REST_D0_nearest_peak_to_gene_TSS.csv")
rest_d4_peaks <- read.csv(file = "REST_ChIP/results/REST_D4_nearest_peak_to_gene_TSS.csv")
cntl_k9ac_peaks <- read.csv(file = "Ac_ChIP/results/new_annotation/ctrl_h3k9ac/ctrl_h3k4ac_nearest_or_overlapping_peak_to_gene.csv")
rest_k9ac_peaks <- read.csv(file = "Ac_ChIP/results/new_annotation/rest_h3k9ac/rest_h3k9ac_nearest_or_overlapping_peak_to_gene.csv")
cntl_h4ac_peaks <- read.csv(file = "Ac_ChIP/results/new_annotation/ctrl_h4ac/ctrl_h4ac_nearest_or_overlapping_peak_to_gene.csv")
rest_h4ac_peaks <- read.csv(file = "Ac_ChIP/results/new_annotation/rest_h4ac/rest_h4ac_nearest_or_overlapping_peak_to_gene.csv")
##normalise to read counts
rest_d0_depth <- as.numeric(18488744)
rest_d4_depth <- as.numeric(17510073)
ctrl_k9ac_depth <- as.numeric(13960865)
rest_k9ac_depth <- as.numeric(19301764)
ctrl_h4ac_depth <- as.numeric(19561815)
rest_h4ac_depth <- as.numeric(17039692)
mla_k4me3_depth <- as.numeric(18769907)
total_depth <- rest_d0_depth + rest_d4_depth + ctrl_k9ac_depth + rest_k9ac_depth + ctrl_h4ac_depth + rest_h4ac_depth + mla_k4me3_depth
rest_d0_depth <- rest_d0_depth/total_depth
rest_d4_depth <- rest_d4_depth/total_depth
ctrl_k9ac_depth <- ctrl_k9ac_depth/total_depth
rest_k9ac_depth <- rest_k9ac_depth/total_depth
ctrl_h4ac_depth <- ctrl_h4ac_depth/total_depth
rest_h4ac_depth <- rest_h4ac_depth/total_depth
mla_k4me3_depth <- mla_k4me3_depth/total_depth
## normalise
rest_d0_counts <- rest_d0_counts / rest_d0_depth
rest_d4_counts <- rest_d4_counts / rest_d4_depth
ctrl_k9ac_counts <- ctrl_k9ac_counts / ctrl_k9ac_depth
rest_k9ac_counts <- rest_k9ac_counts / rest_k9ac_depth
ctrl_h4ac_counts <- ctrl_h4ac_counts / ctrl_h4ac_depth
rest_h4ac_counts <- rest_h4ac_counts / rest_h4ac_depth
mla_k4me3_counts <- mla_k4me3_counts / mla_k4me3_depth
##plot together
#plot(rest_d0_counts, rest_d4_counts)
#plot(rest_d0_counts, ctrl_k9ac_counts)
#plot(rest_d0_counts, ctrl_h4ac_counts)
##vs mla k4
#plot(rest_d0_counts, mla_k4me3_counts)
#################
##make a plot clustered together to show overlap of REST at D0 and K9ac with/without REST
#make matrix to cluster
peak_matrix <- cbind(rest_d0_counts, ctrl_k9ac_counts, rest_k9ac_counts)
#remove lines that are 0 everywhere
peak_matrix_sum <- matrix(nrow = nrow(peak_matrix), ncol = 1)
for(i in 1:nrow(peak_matrix)){
peak_matrix_sum[i] <- sum(peak_matrix[i,])
}
peak_matrix_counts <- peak_matrix[which(peak_matrix_sum >3),]
#scale peaks
#peak_matrix_scale <- scale(peak_matrix_counts)
##save and export to cluster 3 as R can't process this many lines...
name <- seq(1,nrow(peak_matrix_counts))
peak_matrix_name <- cbind(name,peak_matrix_counts)
write.table(peak_matrix_name, file = "comparison_results/tag_count_matrix.txt", sep = "\t",row.names = F, col.names = F)
##command for Cluster 3 (to be run from command line)
# cluster -f comparison_results/tag_count_matrix.txt -ng -ca a -g 2
## make differential peak
heatmap.2(peak_matrix_counts)
plot(peak_matrix_counts[seq(1,nrow(peak_matrix_counts)),1])
|
595f21159e440b6c23b574067d0b89167a2ef755 | d0b69806a6afd6c4086ca66d92dc2206273d055a | /example/test.R | 18bf590eadb24e18993b18ef33ce1094383cd993 | [] | no_license | zdn123/garch | 1afd0db48e86ffad43dca2ad2ffa4849ee9d9264 | 44a09e03327713391713b0dd8fb2e96edeacd7be | refs/heads/master | 2021-01-17T22:22:41.312325 | 2017-03-07T10:59:14 | 2017-03-07T10:59:14 | 84,195,459 | 0 | 1 | null | 2017-03-07T12:15:22 | 2017-03-07T12:15:22 | null | GB18030 | R | false | false | 1,315 | r | test.R | #加载库
library(TSA)
library(rjson)
#数据载入
json_data<-fromJSON(paste(readLines('D:/R-Data/SpotData/c4.4xlarge-spotprice_linux-unix_us-east-1e.json'), collapse=''))
#预处理
source("D:/workspace/garch/test/timeseriesanalysis/ParseSpotScript.R")
base<-100
learnstep<-360
prestep<-30
temp<-f[base:(base+learnstep)]
prepart<-f[(base+learnstep+1):(base+learnstep+prestep)]
#fGarch库测试
#library(fGarch)
#predicted=100
#garchmod=garch(x=temp,order=c(1,1))
#plot(residuals(garchmod),type='l',ylab = 'Standard residual')
#g1 = garchFit(formula=~garch(1,1),data=difflog,trace=F,cond.dist="std")
#plot(predict(g1,n.ahead=100)$meanError,type='b')
#plot(predict(g2,n.ahead=100)$standardDeviation,type='b')
#差分建模
d<-diff(log(temp))*100
m1<-garch(d,order=c(1,1))
summary(m1)
#图形分析
#plot(residuals(m1),type='h')
#qqnorm(residuals(m1))
#qqline(residuals(m1))
fm1<-(fitted(m1)[,1])^2
pm1<-(predict(m1,newdata=data.frame(learnstep:(learnstep+learnstep)))[,1])^2
plot(pm1,col='blue',type='l',ylab='Conditional Variance',ylim=c(0,50),xlab='t')
#长期方差预测
long = m1$coef[1]/(1-m1$coef[2]-m1$coef[3])
long
#条件方差向前一步预测
p = m1$coef[1] + m1$coef[2] * d[learnstep]^2 + m1$coef[3] * sd(d)^2
p
#条件方差向前多步预测
p = m1$coef[1] + m1$coef[2] * p + m1$coef[3] * p
p |
9b0deb4131a9df75a2facea67e7d1dd32f7d70af | 13f0b3f37544339d5821b2a416a9b31a53f674b1 | /R/eyer-constants.R | 236cadd3ad3a5d064f10917119b64f2a53e834f4 | [
"MIT"
] | permissive | hejtmy/eyer | 1f8a90fd7a8af0a4c4c73790633589dc624edda2 | 0b49566c76ab659184d62e1cdd658b45b0d33247 | refs/heads/master | 2020-04-24T11:17:25.414641 | 2019-09-17T22:44:52 | 2019-09-17T22:44:52 | 171,920,561 | 0 | 0 | MIT | 2019-09-10T21:54:40 | 2019-02-21T18:08:07 | R | UTF-8 | R | false | false | 120 | r | eyer-constants.R | EYE_POSITION_DATA_FIELDS <- c("gaze", "fixations")
ALL_DATA_FIELDS <- c(EYE_POSITION_DATA_FIELDS, "events", "diameter")
|
4a412f8cb78d270f1d0fd259bd3f4b7263356649 | 590142f535831def89b5b2d0f6ac1d47b8306850 | /tests/testthat/single_numeric_few_distinct/setup_data.R | 6a222ee1d075a06228657b5ea7c389f52ff4fd93 | [] | no_license | jfontestad/makeParallel | 2b62704c9e26477bc89d505de313ea07aaebdcca | 6e43f34f51a23692907ec1563d3d47a8e189d7bf | refs/heads/master | 2023-01-13T03:27:16.260825 | 2020-11-17T16:41:04 | 2020-11-17T16:41:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 327 | r | setup_data.R | # Set up some toy data
n = 100
nDistinct = 10L
set.seed(3890)
vals = rnorm(nDistinct)
saveRDS(sample(vals, size = n, replace = TRUE), "small1.rds", compress = FALSE)
saveRDS(sample(vals, size = n, replace = TRUE), "small2.rds", compress = FALSE)
saveRDS(sample(vals, size = 2 *n, replace = TRUE), "big.rds", compress = FALSE)
|
9fda32c696e2d00505d2256a18bf8050376ebf3a | e9449c042d50f61ffe72bbc94f19d8173487d090 | /R/catTrajectory.R | 46f0db950222ccbaf8444cfcd175ceef88b5ee4d | [] | no_license | bilintoh/TomTrajectories | 227832b0ce370cabeff573b2b9d38febd394213d | 627d583bc52a0dda982d4414f57ecdeab839eb16 | refs/heads/main | 2023-08-25T17:19:52.128907 | 2021-11-11T23:26:27 | 2021-11-11T23:26:27 | 427,078,080 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,613 | r | catTrajectory.R | #' Title
#'
#' @param dfRaster
#' @param noData
#' @param category
#'
#' @return
#' @export
#'
#' @examples
catTrajectory <- function(dfRaster,noData = 0, category) {
# Filter pixel locations with values == 0, which is NA
pixelColumns <- length(dfRaster[1,])
dfRaster[,3:pixelColumns][dfRaster[,3:pixelColumns] == as.numeric(noData)] <- NA
dfNa <- dfRaster %>% filter_all(any_vars(is.na(.)))
naXY <- dfNa[1:2]
naXYChange <- naXY %>% mutate(change = 0)
# Filter Data which is not NA
dfNonZero <- dfRaster[complete.cases(dfRaster), ]
nonNaXY <- dfNonZero[, 1:2]
dfNonZero2 <- dfNonZero[, 3:pixelColumns]
lenDfNonZero2 <- length(dfNonZero2)
# extend length of pixel column by 1 and 2
pixelColumnsBy1 <- pixelColumns + 1
pixelColumnsBy2 <- pixelColumns + 2
# convert selected columns to a Boolean dataframe where 0 = absence and 1 = presence
dfNonZero2[dfNonZero2 != as.numeric(category)] <- as.numeric(0)
dfBoolean2 <- dfNonZero2
dfBoolean2[dfBoolean2 == as.numeric(category)] <- as.numeric(1)
# Combined coordinates and columns without NA
dfXYZBoolean <- cbind(nonNaXY,dfBoolean2)
#Create the five trajectories that make up the map
# 0 = Mask, 1 = Absence, 2 = Presence, 3 = Gain, and 4 = Loss
#1 = Absence
absence <- dfXYZBoolean %>% mutate(change = ifelse(
.[3] == 0 &.[pixelColumns] == 0 ,1,0))
absence <-absence %>% subset(.$change == 1) %>% subset(., select=c("x", "y", "change"))
#2 = Presence
presence <- dfXYZBoolean %>% mutate(change = ifelse(
.[3] == 1 &.[pixelColumns] == 1 ,2,0))
presence <-presence %>% subset(.$change == 2) %>% subset(., select=c("x", "y", "change"))
#Compute the difference between the last and first time points
lastFirstTimepoints <- dfXYZBoolean[pixelColumns] - dfXYZBoolean[3]
xylastFirstTimepoints<- cbind(nonNaXY,lastFirstTimepoints)
xylastFirstTimepoints2 <- xylastFirstTimepoints %>% filter(lastFirstTimepoints!=0)
dfReclass <- xylastFirstTimepoints2 %>% mutate(change = ifelse(.[3] == 1,3,4))
dfReclass2 <- data.frame(dfReclass$x,dfReclass$y,dfReclass$change)
# name the columns of the new dataframe x,y, and change
names(dfReclass2) <- c("x","y","change")
# Put the 4 component into 1 data frame
noNaComponents <- rbind(absence,presence,dfReclass2)
# Join NA, comp_1,comp_2, and comp_3_4_v2
combinedTrajectory <- rbind(naXYChange,absence,presence,dfReclass2)
#factCombinedTrajectory <- as.factor(rasterFromXYZ(as.data.frame(combinedTrajectory)))
return(list("combinedTrajectory" = combinedTrajectory,
"dfXYZBoolean" = dfXYZBoolean))
}
|
0a1a1af2cae077b18a180703f983230a11e6fb9c | dd6e3af0a23d2f4700beab08c1b998f1f2703404 | /Lab 4/Lab 4.R | 56f15dc7541fc185066b0c121ed159881b6c60c5 | [] | no_license | wesleywchang/STAT-100B | 0b949bed19fe9bb4085f2d43021d6538dbe6cb57 | 730ee8bced0a754160117d0fcf099a7824845cf9 | refs/heads/master | 2022-12-24T05:32:27.997050 | 2020-08-28T10:30:09 | 2020-08-28T10:30:09 | 287,695,239 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 984 | r | Lab 4.R | # STAT 100B Lab 4
# ANOVA
# InsectSprays data in R gives the counts of insects in agricultural
# experimental units treated with six different insecticides
# we can use this data to examine the relative effectiveness of the different insecticides
# if the insecticide is effective, we would expect to see relatively few insects on the
# corresponding experimental unit
# load data
data(InsectSprays)
?InsectSprays
# consider summaries of the data
summary(InsectSprays)
dim(InsectSprays)
hist(InsectSprays$count)
# generate side-by-side boxplots of the data
# allows us to visualize the interaction between variables
boxplot(InsectSprays$count ~ InsectSprays$spray)
# examine whether the conditions of ANOVA are satisfied
hist(InsectSprays$count[InsectSprays$spray == "A"], breaks=6,
main = "Histogram of Count for Spray A", xlab="count")
# check for normality using shapiro-wilk test
shapiro.test(InsectSprays$count[InsectSprays$spray == "F"])
|
ce935ad770175e8c353dd2e04ec4816810a4808b | d3d685639cac59c9a41649af2735b2cf57cad7c9 | /man/listAvailableFeatureSets.Rd | 6853c6eb24d0441abb062868b3b57d8501579ca6 | [] | no_license | belkhir-nacim/flacco | cbcb042f507aeed5509caf59e6a2f8e45cf866a8 | 77ce443ed007c5c334b717caeb8bc8a0c65c8264 | refs/heads/master | 2021-01-24T00:15:47.805980 | 2018-05-20T00:22:55 | 2018-05-20T00:22:55 | 43,465,197 | 0 | 0 | null | 2015-09-30T23:16:59 | 2015-09-30T23:16:58 | null | UTF-8 | R | false | false | 1,155 | rd | listAvailableFeatureSets.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/listAvailableFeatures.R
\name{listAvailableFeatureSets}
\alias{listAvailableFeatureSets}
\title{List Available Feature Sets}
\usage{
listAvailableFeatureSets(subset, allow.cellmapping, allow.additional_costs,
blacklist)
}
\arguments{
\item{subset}{[\code{\link{character}}]\cr
Vector of feature sets, which should be considered. If not defined, all
features will be considered.}
\item{allow.cellmapping}{[\code{\link{logical}(1)}]\cr
Should (general) cell mapping features be considered as well? The default is
\code{TRUE}.}
\item{allow.additional_costs}{[\code{\link{logical}(1)}]\cr
Should feature sets be considered, which require additional function
evaluations? The default is \code{TRUE}.}
\item{blacklist}{[\code{\link{character}}]\cr
Vector of feature sets, which should not be considered. The default is
\code{NULL}.}
}
\value{
[\code{\link{character}}].\cr
Feature sets, which could be computed - based on the provided input.
}
\description{
Lists all available feature sets w.r.t. certain restrictions.
}
\examples{
sets = listAvailableFeatureSets()
}
|
913ecda766256df13d36a22c43eb3ae8bb8bcc9d | 7557ea2f26cc5894106aed39a65adf8b4a1d47fe | /R/FFS3_runscript.R | fe2c1c14362983b66c9f4ab142ea82fd136f9ee0 | [] | no_license | SchoenbergA/AOA-Impact | c3e3f1c34784522eaef31993d19a3764062baabe | 20cb6acf7313b6a30afe57e6b434579abd4d88e3 | refs/heads/master | 2023-03-28T03:13:07.925381 | 2021-03-31T15:21:02 | 2021-03-31T15:21:02 | 348,056,344 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,886 | r | FFS3_runscript.R | ### Classification and AOA Testing
# setup set working directory
getwd()
path <-"C:/Envimaster/AOA-Impact" # set drive letter for stick
setwd(path)
# load libs
require(raster)
require(caret)
require(CAST)
require(doParallel)
require(rgeos)
# required for visualization
require(viridis)
require(png)
require(latticeExtra)
require(gridExtra)
require(IKARUS)
# load functions
source(file.path(path,"R/002_FUN_IKARUS.R"))
source(file.path(path,"R/003_Dawn2.R"))
# load data
# RGB data
lau_FFS3 <- raster::stack(file.path(path,"Data/Area3/FFS3_Stack.grd") )
# plot
plot(lau_FFS3[[1:3]])
# response layer
lau3_rsp <-rgdal::readOGR(file.path(path,"DATA/Area3/area3_response.shp"))
# handle CRS string
crs(lau3_rsp) <- crs(lau_FFS3)
# load Training Points
lau3_tP1 <-rgdal::readOGR(file.path(path,"DATA/Area3/lau3_Tpoints.shp"))
lau3_tP2 <-rgdal::readOGR(file.path(path,"DATA/Area3/lau3_Tpoints2.shp"))
# handle CRS string
crs(lau3_tP1) <- crs(lau_FFS3)
crs(lau3_tP2) <- crs(lau_FFS3)
# path to save resulting pngs
path_png <- file.path(path,"result_IMG3//")
# RGB3 c15
rgb3115 <-Dawn2(FFS=T, Tpoints = lau3_tP1,buf_size = 0.15,design = "ROUND",Stk = lau_FFS3,Stk_name = "FFS3_tp1",plot_res = T,save_png = T,save_res = F,path_png = path_png,validate = T,rsp = lau3_rsp,rsp_class = "t")
rgb3215 <-Dawn2(FFS=T, Tpoints = lau3_tP2,buf_size = 0.15,design = "ROUND",Stk = lau_FFS3,Stk_name = "FFS3_tp2",plot_res = T,save_png = T,save_res = F,path_png = path_png,validate = T,rsp = lau3_rsp,rsp_class = "t")
# RGB3 c30
rgb3130 <-Dawn2(FFS=T, Tpoints = lau3_tP1,buf_size = 0.30,design = "ROUND",Stk = lau_FFS3,Stk_name = "FFS3_tp1",plot_res = T,save_png = T,save_res = F,path_png = path_png,validate = T,rsp = lau3_rsp,rsp_class = "t")
rgb3230 <-Dawn2(FFS=T, Tpoints = lau3_tP2,buf_size = 0.30,design = "ROUND",Stk = lau_FFS3,Stk_name = "FFS3_tp2",plot_res = T,save_png = T,save_res = F,path_png = path_png,validate = T,rsp = lau3_rsp,rsp_class = "t")
# RGB3 c45
rgb3145 <-Dawn2(FFS=T, Tpoints = lau3_tP1,buf_size = 0.45,design = "ROUND",Stk = lau_FFS3,Stk_name = "FFS3_tp1",plot_res = T,save_png = T,save_res = F,path_png = path_png,validate = T,rsp = lau3_rsp,rsp_class = "t")
rgb3245 <-Dawn2(FFS=F, Tpoints = lau3_tP2,buf_size = 0.45,design = "ROUND",Stk = lau_FFS3,Stk_name = "FFS3_tp2",plot_res = T,save_png = T,save_res = F,path_png = path_png,validate = T,rsp = lau3_rsp,rsp_class = "t")
# RGB3 c60
rgb3160 <-Dawn2(FFS=T, Tpoints = lau3_tP1,buf_size = 0.60,design = "ROUND",Stk = lau_FFS3,Stk_name = "FFS3_tp1",plot_res = T,save_png = T,save_res = F,path_png = path_png,validate = T,rsp = lau3_rsp,rsp_class = "t")
rgb3260 <-Dawn2(FFS=T, Tpoints = lau3_tP2,buf_size = 0.60,design = "ROUND",Stk = lau_FFS3,Stk_name = "FFS3_tp2",plot_res = T,save_png = T,save_res = F,path_png = path_png,validate = T,rsp = lau3_rsp,rsp_class = "t")
# RGB3 c75
rgb3175 <-Dawn2(FFS=T, Tpoints = lau3_tP1,buf_size = 0.75,design = "ROUND",Stk = lau_FFS3,Stk_name = "FFS3_tp1",plot_res = T,save_png = T,save_res = F,path_png = path_png,validate = T,rsp = lau3_rsp,rsp_class = "t")
rgb3275 <-Dawn2(FFS=T, Tpoints = lau3_tP2,buf_size = 0.75,design = "ROUND",Stk = lau_FFS3,Stk_name = "FFS3_tp2",plot_res = T,save_png = T,save_res = F,path_png = path_png,validate = T,rsp = lau3_rsp,rsp_class = "t")
# RGB3 c90
rgb3190 <-Dawn2(FFS=T, Tpoints = lau3_tP1,buf_size = 0.90,design = "ROUND",Stk = lau_FFS3,Stk_name = "FFS3_tp1",plot_res = T,save_png = T,save_res = F,path_png = path_png,validate = T,rsp = lau3_rsp,rsp_class = "t")
rgb3290 <-Dawn2(FFS=F, Tpoints = lau3_tP2,buf_size = 0.90,design = "ROUND",Stk = lau_FFS3,Stk_name = "FFS3_tp2",plot_res = T,save_png = T,save_res = F,path_png = path_png,validate = T,rsp = lau3_rsp,rsp_class = "t")
# get list for all values
ls <- list(rgb3115$VALUES,rgb3215$VALUES,
rgb3130$VALUES,rgb3230$VALUES,
rgb3145$VALUES,rgb3245$VALUES,
rgb3160$VALUES,rgb3260$VALUES,
rgb3175$VALUES,rgb3275$VALUES,
rgb3190$VALUES,rgb3290$VALUES
)
# function to merge df
mergeDFs <- function(ls){
# loop
for (i in 1:(length(ls)-1)){
# first run set df
if(i==1){
df <- merge(ls[i],ls[i+1],all=T)
# all other runs add next ls to df
} else {
df <- merge(df,ls[i+1],all=T)
}
}
if(nrow(df)!=length(ls)){
stop("something wrong")
}
return(df)
}# end function
# merge df
results <- mergeDFs(ls)
results
write.csv(results,file.path(path,"results_FFS3.csv"))
# selected variables
rgb3115$model_LLOCV$selectedvars
rgb3215$model_LLOCV$selectedvars
rgb3130$model_LLOCV$selectedvars
rgb3230$model_LLOCV$selectedvars
rgb3145$model_LLOCV$selectedvars
rgb3245$model_LLOCV$selectedvars
rgb3160$model_LLOCV$selectedvars
rgb3260$model_LLOCV$selectedvars
rgb3175$model_LLOCV$selectedvars
rgb3275$model_LLOCV$selectedvars
rgb3190$model_LLOCV$selectedvars
rgb3290$model_LLOCV$selectedvars |
9e58fbd3675db840c758a22877d609e525c1afa0 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/DEploid/examples/extractCoverageFromTxt.Rd.R | a236a09013c7f96487850ef7d86ec45db6d2125a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 362 | r | extractCoverageFromTxt.Rd.R | library(DEploid)
### Name: extractCoverageFromTxt
### Title: Extract read counts from plain text file
### Aliases: extractCoverageFromTxt
### ** Examples
refFile = system.file("extdata", "PG0390-C.test.ref", package = "DEploid")
altFile = system.file("extdata", "PG0390-C.test.alt", package = "DEploid")
PG0390 = extractCoverageFromTxt(refFile, altFile)
|
26555114a0f5b72f621ecc75daf7589eb32ae530 | 91a58b6ace41068b4537d489961b2bf31cbb9606 | /examples/lsm_grid_statistic_example.R | 25686aabb6973e4e0b12465cefda205230399fe6 | [] | no_license | mauriciovancine/lsmetrics | c965c2be2138c417259c9b225f3b6983aedfafb4 | c2a2fdb41713e41435d093124c2de67ae724e666 | refs/heads/main | 2023-08-31T03:50:17.747399 | 2023-08-26T03:23:20 | 2023-08-26T03:23:20 | 646,293,095 | 7 | 1 | null | 2023-06-12T02:17:44 | 2023-05-27T23:04:20 | R | UTF-8 | R | false | false | 2,135 | r | lsm_grid_statistic_example.R | library(lsmetrics)
library(terra)
# read habitat data
f <- system.file("raster/toy_landscape_habitat.tif", package = "lsmetrics")
r <- terra::rast(f)
# plot
plot(r, legend = FALSE, axes = FALSE, main = "Binary habitat")
plot(as.polygons(r, dissolve = FALSE), lwd = .1, add = TRUE)
plot(as.polygons(r), add = TRUE)
text(r)
# find grass
path_grass <- system("grass --config path", inter = TRUE) # windows users need to find the grass gis path installation, e.g. "C:/Program Files/GRASS GIS 8.3"
# create grassdb
rgrass::initGRASS(gisBase = path_grass,
SG = r,
gisDbase = "grassdb",
location = "newLocation",
mapset = "PERMANENT",
override = TRUE)
# import raster from r to grass
rgrass::write_RAST(x = r, flags = c("o", "overwrite", "quiet"), vname = "r", verbose = FALSE)
# area
lsmetrics::lsm_fragment_area(input = "r")
# files
# rgrass::execGRASS(cmd = "g.list", type = "raster")
# import r
r_fragment_area_ha <- rgrass::read_RAST("r_fragment_area_ha", flags = "quiet")
# plot
plot(r_fragment_area_ha, legend = FALSE, axes = FALSE, main = "Fragment area (ha)")
plot(as.polygons(r, dissolve = FALSE), lwd = .1, add = TRUE)
plot(as.polygons(r), add = TRUE)
text(r_fragment_area_ha)
# grid
lsmetrics::lsm_grid_statistic(input = "r",
landscape_metric = "r_fragment_area_ha",
landscape_metric_has_null = TRUE,
size = 200,
hexagon = TRUE,
column_prefix = "area",
method = "average")
# files
# rgrass::execGRASS(cmd = "g.list", type = "vector")
# import r
r_grid <- rgrass::read_VECT("r_grid200", flags = "quiet")
# plot
r_grid <- r_grid[is.na(r_grid$area_average) == FALSE, ]
plot(r_grid, "area_average", legend = FALSE, axes = FALSE, main = "Area average (ha)")
text(r_grid, labels = "area_average", cex = .7)
plot(as.polygons(r), col = c(adjustcolor("white", 0), adjustcolor("gray", .5)), add = TRUE)
# delete grassdb
unlink("grassdb", recursive = TRUE)
|
defa5625fbc62337976aca5437223835c55f6821 | 20eb548b689f85ce0d88ee44ad6f1575999bf158 | /twinkle/man/starforecast-methods.Rd | 198ebf2f5a03a2af1a151a89e40b30d7a8ba46ce | [] | no_license | andbucci/twinkle | ea125d2f8d93a5a0a9836e96c046333904fdcaa7 | 615b8490bd3a6832bf747b7a9a39a2ba6b030e21 | refs/heads/master | 2020-04-13T11:52:42.870226 | 2018-12-26T14:07:42 | 2018-12-26T14:07:42 | 163,186,316 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,830 | rd | starforecast-methods.Rd | \name{starforecast-methods}
\docType{methods}
\alias{starforecast}
\alias{starforecast-methods}
\alias{starforecast,ANY-method}
\alias{starforecast,STARfit-method}
\alias{starforecast,STARspec-method}
\title{function: Univariate STAR Forecasting}
\description{
Method for forecasting from a STAR models.
}
\usage{
starforecast(fitORspec, data = NULL, n.ahead = 1, n.roll = 0, out.sample = 0,
external.forecasts = list(xregfor = NULL, vregfor = NULL,
sfor = NULL, probfor = NULL), method = c("an.parametric",
"an.kernel", "mc.empirical", "mc.parametric", "mc.kernel"),
mc.sims = NULL, ...)
}
\arguments{
\item{fitORspec}{
Either a univariate STAR fit object of class \code{\linkS4class{STARfit}} or
alternatively a univariate STAR specification object of class
\code{\linkS4class{STARspec}} with valid fixed parameters.}
\item{data}{
Required if a specification rather than a fit object is supplied.}
\item{n.ahead}{
The forecast horizon.}
\item{n.roll}{
The no. of rolling forecasts to create beyond the first one (see details).}
\item{out.sample}{
Optional. If a specification object is supplied, indicates how many data points
to keep for out of sample testing.}
\item{external.forecasts}{
A list with forecasts for the external regressors in the mean and/or variance
equations if specified, the state dynamics (sfor) in the case when \sQuote{type}
used was \dQuote{s} in the model, and forecast state probabilities in the case when
\sQuote{fixed.probs} was used in the model.}
\item{method}{
The nonlinear nature of the model means that for n.ahead>1 there is no simple closed form
way of obtaining the forecasts. In that case, a number of methods are provided including
the numerical integration using the parametric density (\dQuote{an.parametric}) or a kernel
estimated density on the residuals (\dQuote{an.kernel}), monte carlo integration using the
empirical residuals (\dQuote{mc.empirical}), parametric density (\dQuote{mc.parametric}) and
kernel estimated density (\dQuote{mc.kernel}). See the vignette for further details.
}
\item{mc.sims}{The number of simulations to perform for the monte carlo integration.}
\item{...}{.}
}
\value{
A \code{\linkS4class{STARforecast}} object containing details of the STAR
forecast. See the class for details on the returned object and methods for
accessing it and performing some tests.
}
\details{
The forecast function has two dispatch methods allowing the user to call it with
either a fitted object (in which case the data argument is ignored), or a
specification object (in which case the data is required) with fixed parameters.\cr
The ability to roll the forecast 1 step at a time is implemented with the
\code{n.roll} argument which controls how many times to roll the n.ahead
forecast. The default argument of n.roll = 0 denotes no rolling and returns the
standard n.ahead forecast. Critically, since n.roll depends on data being
available from which to base the rolling forecast, the \code{\link{starfit}}
function needs to be called with the argument \code{out.sample} being at least
as large as the n.roll argument, or in the case of a specification being used
instead of a fit object, the \code{out.sample} argument directly in the forecast
function.\cr
The vignette contains the details on the n.ahead>1 implementation and the interested
reader should definitely consult that.
}
\author{Alexios Ghalanos}
\seealso{
For specification \code{\link{starspec}}, estimation \code{\link{starfit}},
filtering \code{\link{starfilter}}, forecasting \code{\link{starforecast}},
simulation from estimated object \code{\link{starsim}},
simulation from spec \code{\link{starpath}}, rolling forecast and
estimation \code{\link{rollstar}}.
}
\keyword{methods} |
90e90e2099597793719b49fc89d07cf866260815 | da916f1ff93ed20ac43397b5f8b2f7d238bb821c | /man/e0.DLcurve.plot.Rd | 783a98c6dc5a0d91bb0982f5fc6e21749d0ff9a4 | [] | no_license | raquelrguima/bayesLife | 8572e4749f6e27ab5cf9d747746382e3c921fea9 | 8e1787fae8739a90eaf76aa8b6dd3352fb0a1756 | refs/heads/master | 2020-03-24T22:19:19.362694 | 2017-10-14T06:07:47 | 2017-10-14T06:07:47 | 143,079,065 | 1 | 0 | null | 2018-07-31T23:10:42 | 2018-07-31T23:10:42 | null | UTF-8 | R | false | false | 5,133 | rd | e0.DLcurve.plot.Rd | \name{e0.DLcurve.plot}
\alias{e0.DLcurve.plot}
\alias{e0.DLcurve.plot.all}
\alias{e0.world.dlcurves}
\alias{e0.country.dlcurves}
\alias{e0.parDL.plot}
\title{
Plotting Posterior Distribution of the Double Logistic Function of Life Expectancy
}
\description{
The functions plot the posterior distribution of the double logistic function used in the simulation, including their median and given probability intervals.
}
\usage{
e0.DLcurve.plot(mcmc.list, country, burnin = NULL, pi = 80,
e0.lim = NULL, nr.curves = 20, predictive.distr = FALSE, ylim = NULL,
xlab = "e(0)", ylab = "5-year gains", main = NULL, show.legend=TRUE,
col=c('black', 'red', "#00000020"), \dots)
e0.DLcurve.plot.all(mcmc.list = NULL, sim.dir = NULL,
output.dir = file.path(getwd(), "DLcurves"),
output.type = "png", burnin = NULL, verbose = FALSE, \dots)
e0.parDL.plot(mcmc.set, country = NULL, burnin = NULL, lty = 2,
ann = TRUE, \dots)
e0.world.dlcurves(x, mcmc.list, burnin=NULL, \dots)
e0.country.dlcurves(x, mcmc.list, country, burnin=NULL, \dots)
}
\arguments{
\item{mcmc.list}{List of \code{\link{bayesLife.mcmc}} objects, an object of class \code{\link{bayesLife.mcmc.set}} or of class \code{\link{bayesLife.prediction}}. In case of \code{e0.DLcurve.plot.all} if it si \code{NULL}, it is loaded from \code{sim.dir}.}
\item{mcmc.set}{Object of class \code{\link{bayesLife.mcmc.set}} or \code{\link{bayesLife.prediction}}.}
\item{country}{Name or numerical code of a country.}
\item{burnin}{Number of iterations to be discarded from the beginning of parameter traces.}
\item{pi}{Probability interval. It can be a single number or an array.}
\item{e0.lim}{It can be a tuple of the minimum and maximum life expectancy to be shown in the plot. If \code{NULL}, it takes the minimum of observed data and 40, and the maximum of observed data and 90.}
\item{nr.curves}{Number of curves to be plotted. If \code{NULL}, all curves are plotted.}
\item{predictive.distr}{Logical. If \code{TRUE}, an error term is added to each trajectory.}
\item{ylim, xlab, ylab, main, lty}{Graphical parameters passed to the \code{plot} function.}
\item{show.legend}{Logical determining if the legend should be shown.}
\item{col}{Vector of colors in this order: 1. observed data points, 2. quantiles, 3. trajectories}
\item{\dots}{Additional graphical parameters. In addition, any arguments from \code{e0.DLcurve.plot} except \code{country} can be passed to \code{e0.DLcurve.plot.all}.}
\item{sim.dir}{Directory with the simulation results. Only relevant, if \code{mcmc.list} is \code{NULL}.}
\item{output.dir}{Directory into which resulting graphs are stored.}
\item{output.type}{Type of the resulting files. It can be \dQuote{png}, \dQuote{pdf}, \dQuote{jpeg}, \dQuote{bmp}, \dQuote{tiff}, or \dQuote{postscript}.}
\item{verbose}{Logical switching log messages on and off.}
\item{x}{e0 values for which the double logistic should be computed.}
\item{ann}{Logical if parameters should be annotated.}
}
\details{\code{e0.DLcurve.plot} plots double logistic curves for the given country. \code{e0.DLcurve.plot.all} creates such plots for all countries and stores them in \code{output.dir}. Parameters passed to the double logistic function are either thinned traces created by the \code{\link{e0.predict}} function (if \code{mcmc.list} is an object of class \code{\link{bayesLife.prediction}}), or they are selected by equal spacing from the MCMC traces. In the former case, \code{burnin} is set automatically; in the latter case, \code{burnin} defaults to 0 since such object has already been \dQuote{burned}. If \code{nr.curves} is smaller than 2000, the median and probability intervals are computed on a sample of 2000 equally spaced data points, otherwise on all plotted curves.
Function \code{e0.parDL.plot} draws the means of the DL parameters as vertical and horizontal lines. The lines are added to the current graphical device and annotated if \code{ann} is \code{TRUE}. If country is \code{NULL}, the mean of world parameters are drawn.
Function \code{e0.world.dlcurves} returns the DL curves of the hierarchical distribution. Function \code{e0.country.dlcurves} returns DL curves for a given country. If \code{mcmc.list} is a prediction object, \code{burnin} should not be given, as such object has already been \dQuote{burned}.
}
\value{
\code{e0.world.dlcurves} and \code{e0.country.dlcurves} return a matrix of size \eqn{N \times M} where \eqn{N} is the number of trajectories and \eqn{M} is the number of values of \eqn{x}.
}
\author{
Hana Sevcikova
}
\examples{
\dontrun{
sim.dir <- file.path(find.package("bayesLife"), "ex-data", "bayesLife.output")
mcmc.set <- get.e0.mcmc(sim.dir=sim.dir)
e0.DLcurve.plot(mcmc.set, country="Japan", burnin=40)
e0.parDL.plot(mcmc.set, "Japan")
# add the median of the hierarchical DL curves
x <- seq(40, 90, length=100)
world <- e0.world.dlcurves(x, mcmc.set, burnin=40)
qw <- apply(world, 2, median)
lines(x, qw, col='blue')
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ hplot }
|
5cdac3189d1dc61a9a1b6c1d32e2aef162f0e064 | a51f14302c8e4a2a0a48dc636d035c4e6669f686 | /man/colplotLikeExcel.Rd | 7829a5be3a66db4156416b5a115cc272c1c7deaa | [] | no_license | holgerman/toolboxH | b135c6033c015ac0c4906392f613945f1d2763ad | fb8a98ee4629dc5fef14b88f2272d559d5d40f30 | refs/heads/master | 2022-07-07T22:26:39.857012 | 2022-06-23T14:56:23 | 2022-06-23T14:56:23 | 100,366,781 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,246 | rd | colplotLikeExcel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colplotLikeExcel.R
\name{colplotLikeExcel}
\alias{colplotLikeExcel}
\title{FUNCTION_TITLE}
\usage{
colplotLikeExcel(
plotdat,
mycolors = c("dodgerblue2", "white", "red"),
lowest_colorval = "minimum",
middle_colorval = "median",
highest_colorval = "maximum",
xlabel = "",
ylabel = "",
x_axis_pos = "top",
myround = 0,
userdefined_labels = NULL,
row_names = NULL
)
}
\arguments{
\item{plotdat}{PARAM_DESCRIPTION}
\item{mycolors}{PARAM_DESCRIPTION, Default: c("dodgerblue2", "white", "red")}
\item{lowest_colorval}{PARAM_DESCRIPTION, Default: 'minimum'}
\item{middle_colorval}{PARAM_DESCRIPTION, Default: 'median'}
\item{highest_colorval}{PARAM_DESCRIPTION, Default: 'maximum'}
\item{xlabel}{PARAM_DESCRIPTION, Default: ''}
\item{ylabel}{PARAM_DESCRIPTION, Default: ''}
\item{x_axis_pos}{PARAM_DESCRIPTION, Default: 'top'}
\item{myround}{PARAM_DESCRIPTION, Default: 0}
\item{userdefined_labels}{PARAM_DESCRIPTION, Default: NULL}
\item{row_names}{PARAM_DESCRIPTION, Default: NULL}
}
\value{
OUTPUT_DESCRIPTION
}
\description{
FUNCTION_DESCRIPTION
}
\details{
DETAILS
}
\examples{
\dontrun{
if(interactive()){
#EXAMPLE1
}
}
}
\seealso{
}
|
cf0db20336033374a5da66d597a9f2ec0cf95654 | 5feca36689ab072f63447022f577c0b5dcdcd214 | /R/tz_codes.R | 8dd66685ca14017c80c2d824cf0296224c36f60b | [
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | iembry-USGS/ie2miscdata | dfc205ab712ff2b6adf1249fde4477104f9d6b20 | 142bffa6f678eb8d7cadc59669e8429d45da19c9 | refs/heads/master | 2021-01-21T04:35:30.514811 | 2016-07-16T06:55:22 | 2016-07-16T06:55:22 | 49,751,904 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 761 | r | tz_codes.R | #' Timezone (tz) codes
#'
#' A table containing the worldwide timezones and UTC offsets.
#'
#'
#'
#' @format A data.table data frame with 54 rows and 7 variables:
#' \describe{
#' \item{Tz Cd}{Timezone Code}
#' \item{Tz Nm}{Timezone Name}
#' \item{Tz Ds}{Timezone Location}
#' \item{Tz Utc Offset Tm}{Timezone UTC Offset}
#' \item{Tz Dst Cd}{Timezone Daylight Savings Code}
#' \item{Tz Dst Nm}{Timezone Daylight Savings Name}
#' \item{Tz Dst Utc Offset Tm}{Timezone Daylight Savings UTC Offset}
#' }
#'
#'
#' @references
#' This data is from National Water Information System: Help System Time Zone Codes. See \url{http://help.waterdata.usgs.gov/code/tz_query?fmt=html}.
#'
#'
#' @docType data
#' @name tz_codes
#' @usage tz_codes
#' @examples
#' tz_codes
NULL
|
cc519e5b5fb59a7113c88467ab04f36bdeb3a0c6 | 76ff01977e7c670992ee2d31e1cc4bdb6ee8c9a4 | /projects/PAUP_lab/simulatingData.R | 74360efb7f4025085386696de1c92b1a7062281d | [
"CC-BY-4.0"
] | permissive | uyedaj/macrophy_course | 7555d206a0015d08402800429d371f4f9e02c317 | 6f9b8f39ae677d88ffbbc911b8491afcb063fcc9 | refs/heads/master | 2022-11-04T10:44:24.188048 | 2022-09-29T14:05:29 | 2022-09-29T14:05:29 | 146,303,663 | 3 | 15 | null | 2018-10-30T18:17:03 | 2018-08-27T13:46:38 | HTML | UTF-8 | R | false | false | 1,011 | r | simulatingData.R | ## Creating datasets for phylogenetic analysis in macrophy lab
## These are the answers for the lab exercise.
tree <- read.nexus("bears.tre")
plot(tree)
tree_sl <- tree
tree_sl$edge.length <- tree$edge.length/100
tree_sl$edge.length <- tree_sl$edge.length + 0.01
write.tree(tree_sl, file="bears.nwk")
system("seq-gen -mHKY -t1 -f0.25,0.25,0.25,0.25 -l300 -n1 -on <bears.nwk> bears_JC69.nex")
tree_fz <- tree
tree_fz$edge.length <- tree_sl$edge.length
tree_fz$edge.length[c(5,11)] <- tree_fz$edge.length[c(5,11)]+1
plot(tree_fz)
write.tree(tree_fz, file="bears_fz.nwk")
write.nexus(tree_fz, file="bears_LBAtree.nex")
system("seq-gen -mHKY -t1 -f0.25,0.25,0.25,0.25 -l5000 -n1 -on <bears_fz.nwk> bears_LBA5000.nex")
system("seq-gen -mHKY -t1 -f0.25,0.25,0.25,0.25 -l1000 -n1 -on <bears_fz.nwk> bears_LBA1000.nex")
system("seq-gen -mHKY -t1 -f0.25,0.25,0.25,0.25 -l500 -n1 -on <bears_fz.nwk> bears_LBA500.nex")
system("seq-gen -mHKY -t1 -f0.25,0.25,0.25,0.25 -l100 -n1 -on <bears_fz.nwk> bears_LBA100.nex")
|
ee73487a94ce118b10d26ce3aae5cb8437c1ae70 | 29d97c13269fbd0543f44a68e1eee165d3c1c618 | /helper/DBSC/DBSCHelper.R | 849e2c3e3c35fa041150d6c638818d335ec78da4 | [] | no_license | roth-mh/occ-cluster | 22b6bb9a08fd084e0269d7389bdcb28abc2bda75 | 8365424fc6c845ca9f35fbd1e955ca25b074f62e | refs/heads/main | 2023-08-24T17:14:44.466616 | 2021-10-21T19:21:30 | 2021-10-21T19:21:30 | 322,646,819 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,729 | r | DBSCHelper.R | # DBSC.Helper.R
library(rgeos)
library(sp)
library(sqldf)
library(RTriangle)
library(igraph)
library(tcR)
# extracts and formats vertex points
formatVert <- function(WETA_df, covObj){
v_pts <- subset(WETA_df, select = c("latitude", "longitude", "checklist_id", covObj$siteCovs))
v_pts_df <- as.data.frame(v_pts)
# v_pts <- sqldf("SELECT latitude, longitude, checklist_id, fall_nbr_TCA_mean_75,
# fall_nbr_B4_stdDev_150,
# elevation_stdDev_150,
# spring_nbr_B7_stdDev_300,
# aspect_mean_300 from v_pts_df group by latitude, longitude")
v_pts <- sqldf("SELECT latitude, longitude, checklist_id,
TCB, TCA, TCW, TCG from v_pts_df")
v_pts$checklist_id <- as.character(v_pts$checklist_id)
return(v_pts)
}
# calculates the global mean and standard deviation
# of the edges in a Delaunay Triangulation
# note: distance includes geo-coords
calcGlobalMeanSD <- function(DT, attr_df){
edge_wts <- c()
attr_df_no_checklist <- subset(attr_df, select = -c(checklist_id))
for(i in 1:(length(DT$E)/2)){
e <- DT$E[i,]
v1 <- e[1]
v2 <- e[2]
pt1 <- attr_df[v1,]
pt2 <- attr_df[v2,]
d <- dist(rbind(attr_df_no_checklist[v1,], attr_df_no_checklist[v2,]))[1]
edge_wts <- rbind(edge_wts, list(v1_name=pt1$checklist_id,v2_name=pt2$checklist_id,d=d))
}
if(nrow(edge_wts) == 1){
sd <- 0
} else {
sd <- sd(as.double(as.data.frame(edge_wts)$d))
}
return(list(mean=mean(as.double(as.data.frame(edge_wts)$d)), sd=sd, edgeWts=edge_wts))
}
# removes Global Long Edges from a DT to form C-DT
##################################################
# for each vertex v in DT this func calculates the
# local mean (mean length of edges incident to v)
# and determiens if any edge of v, dist(e_v) > gdc
# where:
# gdc = globalmean + (globalmean/localmean(v))*globalSD
# if so, e_v is removed from DT
removeGlobalLongEdges <- function(DT, globalMean, globalSD, edgeWts, DT_graph){
gdt.v <- igraph::as_data_frame(DT_graph, what = "vertices") #%>% setDT()
edg_att <- get.edge.attribute(DT_graph)
to_delete_edge_ids <- c()
for(v in V(DT_graph)$name){
# nghbrs <- neighbors(DT_graph, v)$name
in_edges_of_v <- edgeWts[edgeWts$v1_name == v,]
out_edges_of_v <- edgeWts[edgeWts$v2_name == v,]
edges_of_v <- rbind(in_edges_of_v, out_edges_of_v)
localMean <- mean(as.double(edges_of_v$d))
gdc <- globalMean + (globalMean/localMean)*globalSD
for(e in 1:nrow(edges_of_v)){
v1_name <- edges_of_v[e,]$v1_name
v2_name <- edges_of_v[e,]$v2_name
d <- edges_of_v[e,]$d
if(d > gdc){
edge_id <- get.edge.ids(DT_graph, vp = c(as.character(v1_name), as.character(v2_name)), error = FALSE)
if(!(edge_id %in% to_delete_edge_ids)){
to_delete_edge_ids <- append(to_delete_edge_ids, edge_id)
}
}
}
}
DT_graph <- DT_graph - edge(to_delete_edge_ids)
return(list(graph=DT_graph, nRmv=length(to_delete_edge_ids)))
}
# local variation is the sd of the length of all
# edges incident to a vertex v
################################################
# given a vertex name and graph, this func finds
# v's neighbors and adds the distance to a list
# then returns the sd. returns0 if less than 2
# items
local_variation <- function(g, v_name, gdt.v, edg_att){
nghbrs <- neighbors(g, v_name)$name
edge_val <- as.double()
x <- 0
for(n1 in nghbrs){
x <- x + 1
edge_id <- get.edge.ids(g, vp = c(v_name,n1))
if(edge_id){
d <- edg_att$d[[edge_id]]
edge_val <- append(edge_val, d)
}
}
if(x < 2){
return(0)
} else {
return(sd(edge_val))
}
}
# removes long local edges from each disconnected
# graph in C-DT
#################################################
# for each graph G_i in C-DT, this func iterates
# through each vertex, v, and creates its 2-order
# neighborhood. calculate ldc and determine if
# any edge in 2neighborhood is greater than ldc.
# if so, delete it from G_i.
# ldc = 2-order-mean(v) + BETA*mean_variation(v)
#
# where the 2-order-mean is the mean of all edge
# dist in 2neighborhood and the mean variation is
# the mean value of all local_variation calculations
# for each vertex in 2neighborhood
removeLongLocalEdges <- function(de, Beta=2){
list_of_graphs <- list()
j <- 0
for(g in de){
edg_att <- igraph::edge.attributes(g)
to_delete_edge_ids <- c()
j <- j + 1
gdt.v <- igraph::as_data_frame(g, what = "vertices") %>% setDT()
if(length(gdt.v$name) > 1){
paths_in_2 <- make_ego_graph(g, order=2)
for(v_nghbrs in paths_in_2){
edges_in_2 <- get.edgelist(v_nghbrs)
list_dist_2_order <- c()
for(e in 1:nrow(edges_in_2)){
vertices_of_edge <- edges_in_2[e,]
edge_id <- get.edge.ids(g, vp = vertices_of_edge)
# if we found the edge
if(edge_id != 0){
edge_val <- edg_att$d[[edge_id]]
list_dist_2_order <- append(list_dist_2_order, edge_val)
}
}
near_v <- unique(as.character(edges_in_2))
local_variation_list <- c()
for(v in near_v){
local_variation_list <- append(local_variation_list, local_variation(g, v, gdt.v, edg_att))
}
mean_2_order <- mean(list_dist_2_order)
mean_variation <- mean(local_variation_list)
if(is.na(mean_2_order)){
mean_2_order = 100000
disp("broken mean 2 order")
}
if(is.na(mean_variation)){
mean_variation = 100000
disp("broken mean variation")
}
ldc <- mean_2_order + Beta * mean_variation
# if length of a 2-order edge > ldc -----> delete that mf
for(i in 1:nrow(edges_in_2)){
e <- edges_in_2[i,]
edge_id <- get.edge.ids(g, vp = e)
if(edge_id != 0){
e_dist <- edg_att$d[[edge_id]]
if(e_dist > ldc){
to_delete_edge_ids <- append(to_delete_edge_ids, edge_id)
}
}
}
}
}
disp(length(to_delete_edge_ids), " deleted edges")
g <- g - edge(to_delete_edge_ids)
list_of_graphs[[j]] <- g
}
return(list_of_graphs)
}
# calculates T1 from C-DT
#################################################
# for each graph G_i in C-DT, for every vertex v
# in G_i, find v's min neighbor in the spatial
# domain. detect and remove outliers by the rule
# of 3 SDs from the min_edge list. return the
# average difference
calcT1 <- function(de){
min_edge_df <- data.frame(matrix(ncol = 2, nrow = 0))
colnames(min_edge_df) <- c("edge_id", "edge_dist")
for(g in de){
v_df <- igraph::as_data_frame(g, what = "vertices") %>% setDT()
edg_att <- igraph::edge.attributes(g)
if(length(edg_att$d) != 0){
for(v_name in v_df$name){
# idx <- which(v_df$name == v_name)
nghbrs <- neighbors(g, v_name)$name
min_edge_wt <- Inf
min_edge_id <- NULL
for(n_1 in nghbrs){
# idx2 <- which(gdt.v$name == n)
e_id <- get.edge.ids(g, vp = c(v_name, n_1))
e_dist <- edg_att$d[[e_id]]
if(e_dist < min_edge_wt){
min_edge_wt <- e_dist
min_edge_id <- e_id
}
}
if(!is.null(min_edge_id)){
min_edge_df <- rbind(min_edge_df, data.frame(edge_id=min_edge_id,edge_dist=min_edge_wt))
}
}
}
}
sd <- sd(as.double(min_edge_df$edge_dist))
edges_to_drop <- c()
count <- 0
for(i in 1:nrow(min_edge_df)){
ed <- min_edge_df[i,]
if(ed$edge_dist >= 3*sd){
count <- count + 1
edges_to_drop <- append(edges_to_drop, i)
}
}
disp("edges dropped: ", as.character(count))
min_edge_df <- min_edge_df[-c(edges_to_drop),]
return(mean(min_edge_df$edge_dist))
}
isExpandingCore <- function(v1_name, g, T1){
nghbrs <- neighbors(g, v1_name)$name
for(n1_name in nghbrs){
if(isSpatiallyDirectlyReachable(v1_name, n1_name, g, T1)){
return(TRUE)
}
}
return(FALSE)
}
isSpatiallyDirectlyReachable <- function(v1_name, v2_name, g, T1){
nghbrs <- neighbors(g, v1_name)$name
edg_att <- get.edge.attribute(g)
edge_id <- get.edge.ids(g, vp = c(v1_name, v2_name))
d <- edg_att$d[edge_id]
if(v2_name %in% nghbrs){
if(d <= T1){
return(TRUE)
}
}
return(FALSE)
}
calcDensityIndicator <- function(v_name, g, v_df, T1){
# idx <- which(v_df$name == v_name)
nghbrs <- neighbors(g, v_name)$name
num_nghbrs <- length(nghbrs)
edg_att <- igraph::edge.attributes(g)
num_sdr <- 0
for(v2_name in nghbrs){
# is sdr
e_id <- get.edge.ids(g, vp = c(v_name, v2_name))
d <- edg_att$d[[e_id]]
if(d <= T1){
num_sdr <- num_sdr + 1
}
}
return(num_sdr + num_sdr/num_nghbrs)
}
# calculate DI for each vertex and sort
# for each graph. return a sorted list on
# density indicator
calcDI.DF <- function(new_graphs, T1){
DI_df <- data.frame(matrix(ncol = 2, nrow = 0))
colnames(DI_df) <- c("v_name", "density_indicator")
g_num <- 0
for(g in new_graphs){
g_num <- g_num + 1
# for each vertex
v_df <- igraph::as_data_frame(g, what = "vertices") %>% setDT()
for(i in 1:nrow(v_df)){
di <- calcDensityIndicator(v_df[i,]$name, g, v_df, T1)
if(is.nan(di)){
di <- 0
}
DI_df <- rbind(DI_df, data.frame(v_name=v_df[i,]$name, density_indicator=di, graph=g_num))
}
}
return(DI_df[order(-DI_df$density_indicator),])
}
# selecting spatial core
select.SPCC <- function(sorted_DI, new_graphs){
if(max(sorted_DI$density_indicator) == 0){
return(NULL)
}
max_DI <- sorted_DI[sorted_DI$density_indicator == max(sorted_DI$density_indicator),]
if(length(max_DI) > 1){
min_avg_diff <- Inf
min_spcc <- NULL
for(i in 1:nrow(max_DI)){
v <- max_DI[i,]
edge_val <- c()
v_name <- as.character(v$v_name)
gra <- new_graphs[[v$graph]]
edg_att <- get.edge.attribute(gra)
nghbrs <- neighbors(gra, v_name)$name
# TODO: get.edge.ids(g, vp=c(v,n1,v,n2,v,n3 ...))
for(n1 in nghbrs){
edge_id <- get.edge.ids(gra, vp = c(v_name, as.character(n1)))
edge_val <- append(edge_val, edg_att$d[[edge_id]])
}
if(mean(edge_val) < min_avg_diff){
min_avg_diff <- mean(edge_val)
min_spcc <- v
}
}
}
return(min_spcc)
}
isSpatiallyReachable <- function(v_name, CLU_names, g, v_pts, T1){
v_df <- igraph::as_data_frame(g, what = "vertices") %>% setDT()
nghbrs <- neighbors(g, v_name)$name
v_pts_no_checklist <- subset(v_pts, select = -c(checklist_id))
# CLU_idx <- which(v_df$name %in% CLU_v)
if(sum(as.double(tcR::intersectLogic(.alpha = CLU_names, .beta = nghbrs))) > 0){
# CLU_df <- as.data.frame(v_df[v_df$name %in% CLU_v])
v_info <- v_pts_no_checklist[v_pts$checklist_id == v_name,]
avg_att <- colMeans(v_pts_no_checklist[v_pts$checklist_id %in% CLU_names,])
if(dist(rbind(avg_att, v_info))[1] <= T1){
return(TRUE)
}
}
return(FALSE)
}
################
# this is an implementation of Step 3 of the DBSC alogrithm.
# First, we calculate the density indicator for every vertex.
################
# (0): We find the spatial clustering core, spcc with largest
# DI and unlabeled
################
# (i): rank the expanding core neighbors of spcc. create
# clust and add spcc to it
################
# (ii): add these expanding cores in order of DI; every
# neighbor must be both sdr from spcc and sr from clust
################
# (iii): look at each subsequent bfs level. the first expanding
# core to be added is treated as the spcc and we go back to (i)
################
# when no more expanding cores can be added, a cluster is found.
# go back to Step (0).
################
################
createClusters <- function(de_new_graphs, v_pts, T1){
sorted_DI <- calcDI.DF(de_new_graphs, T1)
sorted_DI$clust <- -1
m_spcc <- select.SPCC(sorted_DI, de_new_graphs)
clust_count <- 1
s_DI <- sorted_DI
while(!is.null(m_spcc)){
spatialCore <- m_spcc
spatial_core_name <- as.character(spatialCore$v_name)
gra <- de_new_graphs[[spatialCore$graph]]
clust <- c(spatial_core_name)
bfs <- bfs(gra, root = spatial_core_name, unreachable = F, dist = T)
for(k in 1:max(bfs$dist)){
k_order_nghbrs <- rownames(as.data.frame(bfs$dist[bfs$dist == k]))
nghbrs_sorted <- as.character(s_DI[s_DI$v_name %in% k_order_nghbrs,]$v_name)
# Step (iii)/(i)
if(k > 1){
spatial_core_name <- NULL
for(n1 in nghbrs_sorted){
# TODO: where is v_pts defined? clarify
if(isSpatiallyReachable(n1, clust, gra, v_pts, T1)){
if(isExpandingCore(n1, gra, T1)){
spatial_core_name <- n1
nghbrs_sorted <- nghbrs_sorted[nghbrs_sorted != spatial_core_name]
clust <- append(clust, spatial_core_name)
break
}
}
}
}
if(is.null(spatial_core_name)){
break
}
# Step (ii)
# find which in nghbrs_sorted are exp_core; add these if sr & sdr to clust
for(n1 in nghbrs_sorted){
if(!(n1 %in% clust)){
if(isExpandingCore(n1, gra, T1)){
if(isSpatiallyReachable(n1, clust, gra, v_pts, T1)){
if(isSpatiallyDirectlyReachable(n1, spatial_core_name, gra, T1)){
clust <- append(clust, n1)
}
}
}
}
}
# the issue is that the clust is never removed from sorted_DI
}
sorted_DI[sorted_DI$v_name %in% clust,]$clust <- clust_count
clust_count <- clust_count + 1
s_DI <- sorted_DI[sorted_DI$clust == -1,]
m_spcc <- select.SPCC(s_DI, de_new_graphs)
}
return(list(sorted_DI, s_DI))
}
runDBSC <- function(WETA_2017, covObj){
v_pts <- formatVert(WETA_2017, covObj)
DT = RTriangle::triangulate(RTriangle::pslg(v_pts[c("latitude", "longitude")]))
globalMeanDT <- calcGlobalMeanSD(DT, v_pts)
globalMean <- globalMeanDT$mean
globalSD <- globalMeanDT$sd
edgeWts <- as.data.frame(globalMeanDT$edgeWts)
DT_graph <- igraph::graph_from_data_frame(edgeWts, directed = F)
DT_subgraphs <- removeGlobalLongEdges(DT, globalMean, globalSD, edgeWts, DT_graph)
de <- decompose(DT_subgraphs$graph)
new_graphs <- removeLongLocalEdges(de)
de_new_graphs <- list()
g_num <- 1
for(n_g in new_graphs){
decomp <- decompose(n_g)
for(d in decomp){
de_new_graphs[[g_num]] <- d
g_num <- g_num + 1
}
}
T1 <- calcT1(de_new_graphs)
# step 3; make clusters & assign noise to separate cluster
clust_li <- createClusters(de_new_graphs, v_pts, T1)
sorted_DI <- as.data.frame(clust_li[1])
max_clust <- max(sorted_DI$clust) + 1
for(i in 1:nrow(sorted_DI)){
pt <- sorted_DI[i,]
if(pt$clust == -1){
sorted_DI[i,]$clust <- max_clust
max_clust <- max_clust + 1
}
}
# assign clusters to checklists with the same lat/long
WETA_2017$clust <- -1
for(j in 1:nrow(sorted_DI)){
c_id <- as.character(sorted_DI[j,]$v_name)
lat_long <- v_pts[v_pts$checklist_id == c_id,][c("latitude", "longitude")]
WETA_2017[WETA_2017$latitude == lat_long$latitude & WETA_2017$longitude == lat_long$longitude,]$clust <- sorted_DI[j,]$clust
}
WETA_2017$site <- WETA_2017$clust
return(WETA_2017)
}
|
b5e9cdfb95ea1f4100e73dd365a5d9cf3f768aee | f03e91dffb3ebfbfc9dc611e91e07ff0d8f774e4 | /buildMapper.R | 210f7f5a9a307b90904789409dabe66d74badd06 | [] | no_license | rmoag/Mapper-Imputation | 1aedb2f94662735df4478d77335c92596b07e87a | bbba514e9e3dac249140be4d75ff131723596b26 | refs/heads/master | 2020-07-21T09:56:37.536992 | 2019-09-06T15:56:02 | 2019-09-06T15:56:02 | 206,824,405 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,276 | r | buildMapper.R | buildMapper = function(completeData,catVars = FALSE){
if(catVars){
distanceValues = as.matrix(daisy(completeData,metric = "gower"))
}else{
distanceValues = as.matrix(daisy(completeData,metric = "euclidean"))
}
filterValues = numeric(dim(completeData)[1])
points_in_vertex = list()
for(i in 1:length(filterValues)){
filterValues[i] = sum(exp(-((distanceValues[i,] ** 2)) / (2 * sd((distanceValues[i,])) ** 2)))
}
cent = floor(sqrt(length(filterValues) /2))
kclust = kmeans(filterValues,centers = cent)
for(i in 1:cent)
{
start = length(points_in_vertex)
if(length(which(kclust$cluster == i)) > 1)
{
levelDist = as.dist(distanceValues[which(kclust$cluster == i),which(kclust$cluster ==i)])
levelMaxDist = max(levelDist)
levelClusterOutput = hclust(levelDist,method="single")
heights = levelClusterOutput$height
cut = cluster_cutoff_at_first_empty_bin(heights,levelMaxDist,10)
clusterIndices = as.vector(cutree(levelClusterOutput,h=cut))
for(j in 1:max(clusterIndices))
{
points_in_vertex[[start + j]] = which(kclust$cluster == i)[which(clusterIndices == j)]
}
}else{
points_in_vertex[[start + 1]] == which(kclust$cluster == i)
}
}
return(points_in_vertex)
}
|
6716ba174d026057b6872f05d5ea3fde3a141f2d | d75069e9cafc74c591662c148c4571563480da46 | /onlineapi/R/onLoad.R | 2336124f302954155c42081656b9f0c606b7901f | [] | no_license | snakecy/R-Projects | de838ed2611efb162e176bb495945af92e304017 | efc6e6dcb42d3b11daac0c5ac2d7fcd2a6b6bf7d | refs/heads/master | 2021-01-10T03:33:04.905426 | 2016-03-14T11:40:50 | 2016-03-14T11:40:50 | 53,850,901 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 390 | r | onLoad.R | .onLoad <- function(lib, pkg){
#automatically loads the dataset when package is loaded
#do not use this in combination with lazydata=true
utils::data(wr,lm,clm, package = pkg, envir = parent.env(environment()))
library(methods)
library(foreach)
library(Matrix)
library(FeatureHashing)
library(glmnet)
library(jsonlite)
library(lubridate)
library(dplyr)
}
|
ac024e20548147663a9cf94ea53bfb223c331cff | d0108c3f8d99cf84d0227c57b10dbd8236585192 | /Fig.S4A.venn.H3K4me3.primed.R | 42e8f79cca44a0c1c2655eb60993e42cb65f47ae | [] | no_license | Webb-Laboratory/Maybury-Lewis_et_al_2021 | f2ef2f9e4571427571bfbf83f5242cc88254b51a | 77f353d6c5ae91fa0cb260abe0d674fbed946bc4 | refs/heads/main | 2023-06-05T10:24:03.584835 | 2021-06-30T19:55:13 | 2021-06-30T19:55:13 | 346,836,008 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,654 | r | Fig.S4A.venn.H3K4me3.primed.R | # ========================================================
# Venn diagrams for H3K4me3 in qNSCs and aNSCs
# that overlap with AvsQ.truStable sites
# v2 AvsQ DiffBind 9929 DBsites
# 19976 AvsQ truStable sites
# October-November 2019
# ========================================================
setwd("~/Dropbox/Brown_Webb/Desktop/ATAC-seq-PEAKS/AvsQ_analysis_2019_v2/H3K4me3")
library(venneuler)
# quiescent H3K4me3 and truStable sites
quies <- venneuler(c(H3K4me3=6247, stable=9037, "stable&H3K4me3"=10939))
plot(quies)
# activated H3K4me3 and truStable sites
act <- venneuler(c(H3K4me3=5357, stable=9353,"stable&H3K4me3"=10623))
plot(act)
# ==================================================
# Fisher's exact test
# Are AvsQ truStable sites enriched with H3K4me3?
# ==================================================
# ====================
# Quiescent H3K4me3
# ====================
# Not stable + Not H3K4me3 = DBsites - DB.quies.H3K4me3
quies_test <- matrix(c(10939,9037,6247,7868), nrow=2,
dimnames = list(c("H3K4me3", "notH3K4me3"), c("stable", "notStable")))
# stable notStable
# H3K4me3 10939 6247
# notH3K4me3 9037 7868
# Odds ratio
odds_H3K4me3 <- 10939 / 6247
odds_notH3K4me3 <- 9037 / 7868
odds_ratio <- odds_H3K4me3/odds_notH3K4me3 #1.52
# Fisher's test
fisher.test(quies_test, alternative="greater")
# Fisher's Exact Test for Count Data
#
# data: quies_test
# p-value < 2.2e-16
# alternative hypothesis: true odds ratio is greater than 1
# 95 percent confidence interval:
# 1.469741 Inf
# sample estimates:
# odds ratio
# 1.524509
# To get the exact p-value:
result2 <- fisher.test(quies_test, alternative="greater")
result2$p.value
# [1] 1.03646e-81
# ====================
# Activated H3K4me3
# ====================
# Not stable + Not H3K4me3 = DBsites - DB.act.H3K4me3
act_test <- matrix(c(10623,9353,5357,8053), nrow=2,
dimnames = list(c("H3K4me3", "notH3K4me3"), c("stable", "notStable")))
# stable notStable
# H3K4me3 10623 5357
# notH3K4me3 9353 8053
# Odds ratio
odds_H3K4me3 <- 10623 / 5357
odds_notH3K4me3 <- 9353 / 8063
odds_ratio <- odds_H3K4me3/odds_notH3K4me3 # 1.71
# Fisher's test
fisher.test(act_test, alternative="greater")
# Fisher's Exact Test for Count Data
#
# data: act_test
# p-value < 2.2e-16
# alternative hypothesis: true odds ratio is greater than 1
# 95 percent confidence interval:
# 1.644617 Inf
# sample estimates:
# odds ratio
# 1.707394
# To get the exact p-value:
result2 <- fisher.test(act_test, alternative="greater")
result2$p.value
# [1] 3.339426e-125
|
a0188bf14c6f21924c614d0950cf75995fa5e514 | 44cbf9de96bd4db4b1458d4169d931d4cd58a0d9 | /Plot2.R | e9d316281e1cd6c75d6d802b19c7b5bef015939a | [] | no_license | Ando3121/ExData_Plotting1 | 7beeaabd0959eaa836a0013fca464d4ee942ba1c | b40c9db88a97ae0c7f9d3d2e4942dcad06076637 | refs/heads/master | 2021-01-18T19:54:56.605094 | 2015-01-14T10:23:30 | 2015-01-14T10:23:30 | 28,993,618 | 0 | 0 | null | 2015-01-09T00:46:40 | 2015-01-09T00:46:39 | null | UTF-8 | R | false | false | 895 | r | Plot2.R | setwd("~/ExData_Plotting1")
## Import Data
household_power_consumption <- read.csv("~/ExData_Plotting1/household_power_consumption.txt", sep=";", stringsAsFactors=FALSE)
## Create datetime
household_power_consumption$DateTime <- as.POSIXct(paste(household_power_consumption$Date, household_power_consumption$Time), format="%d/%m/%Y %H:%M:%S")
## Convert date
household_power_consumption$Date <- as.Date(household_power_consumption$Date, "%d/%m/%Y")
## Create working subset of the Original Data
workset <- subset(household_power_consumption, Date >= "2007-02-01" & Date <= "2007-02-02")
## Plot 2 - GLobal Active Power by Day
png(file = "Plot2.png")
plot(workset$DateTime, as.numeric(workset$Global_active_power), type = "n", ylab = "Global Active Power (kilowatts)", xlab = "", cex.axis = 0.8, cex.lab = .7)
lines(workset$DateTime, as.numeric(workset$Global_active_power))
dev.off()
|
ecfa8160a7ce2e53a928d96f8e2b0096ee429c81 | e9d1de2290c91be3225a8d245c36ad5a1e1049f8 | /man/parallelModePlots.Rd | 978c04a864fdd064a54dcbf776c79e534a5ca6c4 | [] | no_license | mwrowe/fretr | 895b066c596dc10d04a6df9127ff17afc1397dbb | 75fbbe16b015be89ca4ae6829b67f15ea6263d20 | refs/heads/master | 2021-03-28T00:17:56.622540 | 2020-03-16T22:42:44 | 2020-03-16T22:42:44 | 247,820,350 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 907 | rd | parallelModePlots.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parallelModePlots.R
\name{parallelModePlots}
\alias{parallelModePlots}
\title{Fretboard Maps of All Parallel Modes of a Given Tonic}
\usage{
parallelModePlots(tonic, pdffile)
}
\arguments{
\item{tonic}{A character value specifying the (shared) tonic note of all the modes.}
\item{pdffile}{A character value specifying the path and name of the output *.pdf file
to generate.}
}
\value{
Returns a fretnote object.
}
\description{
Plots seven fretboard maps showing all the modes of a given tonic note,
ordered such that only a single note differs between successive modes.
}
\details{
The modes are ordered according to the cycle of fifths, from brightest to
darkest: lydian, ionian, mixolydian, dorian, aeolian, phrygian, locrian.
The goal is to relate the modes to each other.
}
\author{
M.W.Rowe, \email{mwr.stats@gmail.com}
}
|
08fc0ad1c3920dc1c549053ec54a22d397808a94 | 10b8c374979669fd8767d240da964924ed9b562c | /CountClust/MyHeatmapScript.R | aa281770657aa8ef59ba2755e5994ec2d4dcf5d8 | [] | no_license | benranco/docs | db253e4d160256ea4c72727903a4bceab48a0761 | 33e3dc82c0a97305747cb2720c78fbb733e820f4 | refs/heads/master | 2023-04-16T13:25:28.630662 | 2023-04-06T16:35:27 | 2023-04-06T16:35:27 | 68,756,861 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,618 | r | MyHeatmapScript.R | #########################################################
options(stringsAsFactors = FALSE, warn = 1)
#options(scipen = 999) # disable scientific notation for large numbers
#write("Running filterForHolly.R.", stdout())
#########################################################
# Initial Input Parameters:
#args <- commandArgs(trailingOnly = TRUE)
path <- "/home/benrancourt/Desktop/holly/RPKM-omega-theta-withRowNames"
heatmapSubfolder <- "heatmaps"
pdfSubfolder <- "pdf"
pngSubfolder <- "png"
inputFileNameBeforeNumber <- "RPKM-omega"
inputFileNameAfterNumber <- "-withRowNames.csv"
for(i in 15:35) {
inputFileName <- paste0(inputFileNameBeforeNumber,i,inputFileNameAfterNumber)
input <- read.csv(paste(path.expand(path),inputFileName,sep="/"),header=TRUE) ##, check.names=FALSE) # using check.names=FALSE in case the column names have dashes (-) in them. This will prevent them from being converted to periods. However, a column name with a dash in it will not be able to be used as a variable name, so we'll have to refer to columns by their index if accessing them.)
rNames <- input[,1]
input <- input[,-1]
row.names(input) <- rNames
inputMatrix <- as.matrix(input)
heatmapFileName <- paste0(inputFileNameBeforeNumber,i,"-heatmap.png")
png(filename=paste(path,heatmapSubfolder,pngSubfolder,heatmapFileName,sep="/"), width=1200, height=1000)
heatmap(inputMatrix)
dev.off()
heatmapFileName <- paste0(inputFileNameBeforeNumber,i,"-heatmap.pdf")
pdf(file=paste(path,heatmapSubfolder,pdfSubfolder,heatmapFileName,sep="/"), width=12, height=10)
heatmap(inputMatrix)
dev.off()
}
|
1e0fca5aa28a930cdf31ac739a2f571df8cfdfad | 6e5efc0b6b6b37c735c1c773531c41b51675eb10 | /man/GetROCLassoFreq.Rd | b9e952efc35731c181f8eb35fb5694d261f2f6ad | [
"GPL-2.0-or-later"
] | permissive | xia-lab/MetaboAnalystR | 09aa09c9e57d7da7d73679f5a515eb68c4158e89 | 9edbbd1e2edda3e0796b65adf440ad827abb7beb | refs/heads/master | 2023-08-10T06:08:56.194564 | 2023-08-01T15:13:15 | 2023-08-01T15:13:15 | 109,994,826 | 268 | 165 | MIT | 2023-03-02T16:33:42 | 2017-11-08T15:38:12 | R | UTF-8 | R | false | true | 419 | rd | GetROCLassoFreq.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biomarker_utils.R
\name{GetROCLassoFreq}
\alias{GetROCLassoFreq}
\title{Get p-values from lasso}
\usage{
GetROCLassoFreq(data, cls)
}
\arguments{
\item{data}{Input data}
\item{cls}{Input class labels}
}
\description{
Get p-values from lasso
}
\author{
Jeff Xia \email{jeff.xia@mcgill.ca}
McGill University, Canada
License: GNU GPL (>= 2)
}
|
64d8aac8b932b52c76b8926615c8f318d790fd49 | b0a28cdb2e2cd577094a703f3b17259f2bd91f5a | /Clases/Clase 3.R | 786919b4d463284b97e7038008e5cbf33a792bd2 | [] | no_license | EmilianoElias/Econometria | cb217ae73632454a9a3331daea61c7f32aaeb162 | 4d7bc6bfb7e1c435a0002f81223a9f7f24f3a62a | refs/heads/master | 2022-02-26T17:17:25.322624 | 2019-11-06T21:54:19 | 2019-11-06T21:54:19 | 108,468,820 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 237 | r | Clase 3.R | ###
### Clase 3
### Emiliano Elias Dena
###
install.packages("pastecs")
library(haven)
library(pastecs)
ceosal1 <- read_dta("Data/ceosal1.dta")
str(ceosal1)
plot(ceosal1$finance, ceosal1$salary)
pairs(ceosal1)
stat.desc(ceosal1) |
b63cc0055f38fd3de4934abc92c1b6d18aabb341 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i10-m10-u5-v0.pddl_planlen=31/dungeon_i10-m10-u5-v0.pddl_planlen=31.R | f4c7eaeb6d144abd55a0a250a4db3173559c2fb2 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 89 | r | dungeon_i10-m10-u5-v0.pddl_planlen=31.R | b9b6945c57f78e64ae9abb49f44bfa35 dungeon_i10-m10-u5-v0.pddl_planlen=31.qdimacs 5438 16660 |
e94e9599a8e7828464b1fe8a93ec243caa3f5bff | d252c07cab0da95211119ab06dca698b712652cf | /Code/mantel_phylogenetic_vs_latitudinal_distance.R | d6ca75edb120f73559e2a477378ff384ecca5b3d | [] | no_license | dgkontopoulos/Kontopoulos_et_al_thermal_sensitivity_2020 | ecf0bb2e05ba09e06a70a96adb241dbb38328154 | 0c8749d1000fdbccb3a403c5f337de721aa059f5 | refs/heads/master | 2022-11-30T01:54:57.349997 | 2020-08-18T08:04:21 | 2020-08-18T08:04:21 | 288,392,890 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,187 | r | mantel_phylogenetic_vs_latitudinal_distance.R | #!/usr/bin/Rscript
# This script performs a Mantel test to estimate the correlation
# between phylogenetic distance and latitudinal distance for the
# datasets of phytoplankton and prokaryotes.
library(ade4)
library(ape)
#####################
# F U N C T I O N S #
#####################
# This function runs a Mantel test of phylogenetic vs latitudinal
# distance. The results are written to a file.
run_mantel_test <- function(dataset, tree, output_file)
{
# Prepare the dataset.
dataset$Species <- gsub("\\W", "_", dataset$Species)
dataset <- dataset[dataset$Species %in% tree$tip.label,]
dataset <- na.omit(dataset[,c('Species', 'Latitude')])
dataset <- dataset[!duplicated(dataset),]
# Prepare the latitudinal distance matrix.
mat_lat <- matrix(nrow = dim(dataset)[1], ncol = dim(dataset)[1])
diag(mat_lat) <- 0
# Prepare the phylogenetic distance matrix.
mat_evol <- matrix(nrow = dim(dataset)[1], ncol = dim(dataset)[1])
diag(mat_evol) <- 0
# Populate the matrices.
cophenetic_dists <- cophenetic(tree)
for ( i in 1:(nrow(dataset) - 1) )
{
for ( j in i:nrow(dataset) )
{
mat_evol[i,j] <- cophenetic_dists[
dataset$Species[i], dataset$Species[j]
]
mat_evol[j,i] <- mat_evol[i,j]
mat_lat[i,j] <- sqrt((dataset$Latitude[i] - dataset$Latitude[j])^2)
mat_lat[j,i] <- mat_lat[i,j]
}
}
# Execute the Mantel test with 9,999 permutations.
set.seed(1337)
mantel_res <- mantel.rtest(as.dist(mat_lat), as.dist(mat_evol), nrepet = 9999)
# Write the results of the test to the output file.
sink(file = output_file)
print(mantel_res)
sink(NULL)
return(0)
}
####################
# M A I N C O D E #
####################
# Read the time-calibrated phylogeny.
tree <- read.nexus('../Data/final_calibrated_phylogeny.nex')
tree$node.label <- NULL
# Prepare a list of Cyanobacteria species in the datasets.
Cyanobacteria <- c(
'Mastigocladus laminosus', 'Anabaena ucrainica',
'Aphanizomenon flosaquae', 'Aphanizomenon gracile',
'Aphanizomenon ovalisporum', 'Cylindrospermopsis raciborskii',
'Limnothrix redekei', 'Microcystis aeruginosa',
'Planktothrix agardhii', 'Prochlorococcus marinus',
'Sphaerospermopsis aphanizomenoides', 'Spirulina platensis',
'Synechococcus elongatus', 'Synechococcus lividus',
'Trichodesmium erythraeum', 'Tychonema bourrellyi'
)
# Read the TPC datasets of phytoplankton and prokaryotes.
dat_phytoplankton <- read.csv('../Data/TPC_parameter_estimates_phytoplankton_r_max.csv', stringsAsFactors = FALSE)
dat_prokaryotes <- read.csv('../Data/TPC_parameter_estimates_prokaryotes_r_max.csv', stringsAsFactors = FALSE)
# Put all Cyanobacteria in the phytoplankton dataset and run the Mantel test.
dat_phytoplankton <- rbind(dat_phytoplankton, dat_prokaryotes[dat_prokaryotes$Species %in% Cyanobacteria,])
run_mantel_test(dat_phytoplankton, tree, '../Results/Mantel_phylogenetic_vs_latitude_distance_phytoplankton.txt')
# Remove all Cyanobacteria from the prokaryotes dataset and run the Mantel test.
dat_prokaryotes <- dat_prokaryotes[!(dat_prokaryotes$Species %in% Cyanobacteria),]
run_mantel_test(dat_prokaryotes, tree, '../Results/Mantel_phylogenetic_vs_latitude_distance_prokaryotes.txt')
|
51437ef478136bef685aa4ca765cde8266b823ae | ab73d2e79ee4d4b6ad46a4d3bebf38ed902d2c8e | /plot4.R | 596f5e943a17ffd44fb0fd9ae0b6f4718dde1d7e | [] | no_license | ashmtwr/ExData_Plotting1 | d557306dc25dfa20a5b0013db822ec4498dc5786 | d57294f2b499829119af1de8d6541b1007947399 | refs/heads/master | 2020-05-21T08:44:54.995858 | 2015-04-12T21:09:34 | 2015-04-12T21:09:34 | 33,803,187 | 0 | 0 | null | 2015-04-12T04:38:02 | 2015-04-12T04:38:02 | null | UTF-8 | R | false | false | 1,768 | r | plot4.R | # This block of code is to read file from URL and create subset of data for plot function
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if(!file.exists("data")){
dir.create("data")
}
if(!file.exists("./data/exdata-data-household_power_consumption.zip")){
download.file(fileURL,"./data/exdata-data-household_power_consumption.zip")
unzip("./data/exdata-data-household_power_consumption.zip", exdir="./data")
}
filename <- "./data/household_power_consumption.txt"
df <- read.table(filename,header=TRUE,sep=";",colClasses=c("character","character",rep("numeric",7)),na="?")
df$Time <- strptime(paste(df$Date, df$Time), "%d/%m/%Y %H:%M:%S")
df$Date <- as.Date(df$Date, "%d/%m/%Y")
dates <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
df <- subset(df, Date %in% dates)
# Following block of code creates the graph using base package
png("plot4.png", width=400, height=400)
par(mfrow=c(2,2)) # Define frame to accommodate 4 groups
par(mar=c(4,4,2,2))
# First Graph
plot(df$Time, df$Global_active_power,
type="l",
xlab="",
ylab="Global Active Power")
# Second Graph
plot(df$Time, df$Voltage, type="l",
xlab="datetime", ylab="Voltage")
# Third Graph
plot(df$Time, df$Sub_metering_1, type="l", col="black",
xlab="", ylab="Energy sub metering")
lines(df$Time, df$Sub_metering_2, col="red")
lines(df$Time, df$Sub_metering_3, col="blue")
legend("topright", col=c("black", "red", "blue"),c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=1,border="white",
box.lwd=0)
# Fourth Graph
plot(df$Time, df$Global_reactive_power, type="n",
xlab="datetime", ylab="Global_reactive_power")
lines(df$Time, df$Global_reactive_power)
dev.off()
|
3455fcca3813d7abbc623b570f3a1ef57e6d6420 | 3b801d00b90dee6d58f4f2c68fb7b4a242a16071 | /pre_made_scripts/LPS_belize_analysis14.R | f7404d45cacf62d4f4921b146fdd751b42fbf1c5 | [] | no_license | connor-klopfer/animal_response_nets | 421fe4319ad8524fd42432eab5967162dd7df032 | 31f9a561196f46a2a71224f77bf52c0deeb6e76e | refs/heads/master | 2023-06-14T17:21:42.976099 | 2021-06-29T18:18:51 | 2021-06-29T18:18:51 | 327,428,275 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 50,171 | r | LPS_belize_analysis14.R | # Analyze effects of LPS on Belize vampire bat social networks
# Gerry Carter and Simon Ripperger
# # set directory
# setwd(dirname(file.choose()))
# setwd("~/Dropbox/Dropbox/_working/_ACTIVE/belize_LPS/2018_Belize_analysis")
# setwd("C:/Users/simon.ripperger/Dropbox/2018_Belize_analysis")
# .libPaths("C:/R libraries")
# clear workspace
rm(list=ls())
# load packages
library(tidyverse)
library(boot)
library(lubridate)
library(lme4)
library(lmerTest)
library(igraph)
library(cowplot)
library(patchwork)
library(scales)
# choose number of permutations
perms <- 100
perms <- 10000
# make random numbers consistent
set.seed(123)
# functions----
# get the mean and 95% CI of a vector by bootstrapping
boot_ci <- function(x, perms=5000) {
mean.w=function(x,w) sum(x*w)
numNA <- sum(is.na(x))
x <- as.vector(na.omit(x))
mean <- mean(x)
boot <- boot.ci(boot(data=x, statistic=mean.w, R=perms, stype="w", parallel = "multicore", ncpus = 4), type="bca")
low <- boot$bca[1,4]
high <- boot$bca[1,5]
c(low=low,mean=mean,high=high, N=round(length(x)))
}
# get the mean and 95% CI within groups of a dataframe by bootstrapping
boot_ci2 <- function(d=d, y=d$y, x=d$x, perms=5000){
df <- data.frame(effect=unique(x))
df$low <- NA
df$mean <- NA
df$high <- NA
df$n.obs <- NA
for (i in 1:nrow(df)) {
ys <- y[which(x==df$effect[i])]
if (length(ys)>1){
b <- boot_ci(y[which(x==df$effect[i])], perms=perms)
df$low[i] <- b[1]
df$mean[i] <- b[2]
df$high[i] <- b[3]
df$n.obs[i] <- b[4]
}else{
df$low[i] <- NA
df$mean[i] <- ys
df$high[i] <- NA
df$n.obs[i] <- 1
}
}
df
}
# convert a list of dyadic interactions to a sociomatrix
# (requires igraph)
a_b_edgelist_to_matrix <- function(el=el, symbol="_", directed= T, make.NA.zero=T){
a <- str_split(as.data.frame(el)[,1],symbol, simplify = TRUE)[,1]
r <- str_split(as.data.frame(el)[,1],symbol, simplify = TRUE)[,2]
y <- as.data.frame(el)[,2]
e <- data.frame(a,r,y, stringsAsFactors = F)
if (make.NA.zero){
g <- graph_from_data_frame(e, directed=directed)
m <- get.adjacency(g, attr='y', sparse=FALSE)
m
}else{
e$y <- e$y+1 # temporarily add one to distinguish between 0 and NA
g <- graph_from_data_frame(e, directed=directed)
m <- get.adjacency(g, attr='y', sparse=FALSE)
m[m==0] <- NA # relabel missing values as NA
m <- m-1 # subtract one to adjust values back
m
}
}
# convert interactions to rates (duration within time bin)
# this function adds zeros for possible associations/interactions within each time bin
# it assumes that every individual ("ids") is present in every time bin ("bin")
# it requires a df with dyad ("a_b"), bin, and duration
events_to_rates <- function(df= df, bin= df$bin, ids= ids, directed= T){
if(directed){print("Assuming interactions are directed")}
if(!directed){print("Assuming interactions are undirected")}
if (directed){
actual.rates <-
df %>%
mutate(bin= bin) %>%
group_by(bin, dyad) %>%
summarize(duration= sum(duration)) %>%
separate(dyad, into=c("actor", "receiver"), sep="_")
possible.rates <-
expand_grid(actor= ids, receiver= ids, bin= unique(bin)) %>%
filter(actor!=receiver) %>%
mutate(duration=0)
rates <-
rbind.data.frame(actual.rates, possible.rates) %>%
group_by(bin, actor, receiver) %>%
summarize(duration= sum(duration)) %>%
ungroup() %>%
mutate(dyad= paste(actor, receiver, sep="_"))
rates
}else{
if (!directed){
actual.rates <-
df %>%
mutate(bin= bin) %>%
separate(dyad, into=c("id1", "id2"), sep="_") %>%
mutate(dyad= if_else(id1<id2, paste(id1,id2, sep="_"), paste(id2,id1, sep="_"))) %>%
group_by(bin, dyad) %>%
summarize(duration= sum(duration)) %>%
separate(dyad, into=c("id1", "id2"), sep="_")
possible.rates <-
expand_grid(id1= ids, id2= ids, bin= unique(bin)) %>%
filter(id1!=id2) %>%
filter(id1<id2) %>%
mutate(duration=0)
rates <-
rbind.data.frame(actual.rates, possible.rates) %>%
group_by(bin, id1, id2) %>%
summarize(duration= sum(duration)) %>%
ungroup() %>%
mutate(dyad= paste(id1, id2, sep="_"))
rates
}
}
return(rates)
}
# plot permutation test results
hist_perm <- function(exp=exp, obs=obs, perms=perms){
ggplot()+
geom_histogram(aes(x=exp), color="black",fill="light blue")+
xlim(min= min(c(exp,obs)), max= max(c(exp,obs)))+
geom_vline(aes(xintercept=obs), color="red", size=1)+
xlab("expected values from null model")+
labs(subtitle= paste('obs = ',round(obs, digits=2),", one-sided p = ", mean(exp>=obs),", permutations=",perms, sep=""))
}
# get standard error of the mean
se <- function(x=x){sd(x, na.rm=T)/sqrt(sum(!is.na(x)))}
# define treatment time period-----
# all bats were injected by 2 pm (1400 h) on April 25
# LPS effect begins 3 hours later (1700 h)
# captive data shows LPS effects at 3 hours and 6 hours post-injection
# LPS effects could last longer
# start sampling behavior at 5 pm on April 25
treatment_start <- as.POSIXct("2018-04-25 17:00:00", tz = "CST6CDT")
# stop sampling before midnight (before bats are likely to forage)
LPS_duration_hours <- 6
treatment_stop <- treatment_start + LPS_duration_hours*60*60
# get first post-treatment time period (same time of day 24 hours later)
post24_start <- treatment_start + 60*60*24
post24_stop <- treatment_stop + 60*60*24
# get second post-treatment time period (same time of day 48 hours later)
post48_start <- treatment_start + 60*60*48
post48_stop <- treatment_stop + 60*60*48
# clean data----
# get raw meeting data (unfused)
Belize <-
read_delim("../data/Belize_pre-fusion.csv", delim = ";")
# exclude missing bats and dropped sensors ----
# bat ID 23 also seems to have left 1 day early
excluded_bats <- c(10,14,26,41)
BatIDs <- unique(c(unique(Belize$SenderID),unique(Belize$EncounteredID)))
BatIDs <- BatIDs[! BatIDs %in% excluded_bats]
# get bat attributes
bats <-
read.csv("../data/Belize_tracked_bats02.csv", stringsAsFactors = F) %>%
mutate(sex = "female") %>%
filter(sensor_node %in% BatIDs)
# define association using RSSI ----
thresholds <- quantile(Belize$RSSI, probs = c(0, 25, 50, 75, 80, 85, 90, 95, 97.5, 99)/100)
thresholds
RSSI_threshold = thresholds[6] #set RSSI threshold
RSSI_threshold
#threshold for 2019 Current Biology paper was -26dmb at 90%; here -27dbm at 85%
# plot and print RSSI threshold-----
rssi.plot <-
Belize %>%
ggplot(aes(x=RSSI))+
geom_histogram(binwidth=1, fill= "grey", color= 'black')+
geom_vline(xintercept= RSSI_threshold)+
ggtitle("proximity sensor signal strength threshold",
subtitle= paste(names(RSSI_threshold), "threshold at", RSSI_threshold, "dBm"))+
xlab("Received Signal Strength Indicator (RSSI)")+
theme_cowplot()
ggsave("RSSI_plot.pdf", width= 6, height= 3, units="in", dpi=1200)
# clean data
df <-
Belize %>%
filter(RSSI > RSSI_threshold) %>%
filter(SenderID %in% BatIDs ) %>%
filter(EncounteredID %in% BatIDs ) %>%
select(- PacketID, -ChunkID) %>%
mutate(dyad= if_else(SenderID<EncounteredID,
paste(SenderID,EncounteredID, sep="_"),
paste(EncounteredID,SenderID, sep="_"))) %>%
mutate(EndOfMeeting = StartOfMeeting + MeetingDuration) %>%
group_by(dyad) %>%
arrange(StartOfMeeting) %>%
mutate(indx = c(0, cumsum(as.numeric(lead(StartOfMeeting)) >
cummax(as.numeric(EndOfMeeting)))[-n()])) %>%
group_by(dyad, indx) %>%
summarise(StartOfMeeting = min(StartOfMeeting),
EndOfMeeting = max(EndOfMeeting),
duration = difftime(EndOfMeeting, StartOfMeeting,unit = "secs"),
RSSI = max(RSSI)) %>%
mutate(bat1 = sub( "_.*$", "", dyad ), bat2 = sub('.*_', '', dyad)) %>%
mutate(duration = as.numeric(duration, unit = "secs"))
# insert hour breaks
# convert start and end times to interval
df$interval <- interval(df$StartOfMeeting, df$EndOfMeeting)
# label "events" sequentially
df$event <- c(1:nrow(df))
# create function to get hours within a time interval
get_hours <- function(event, StartOfMeeting, EndOfMeeting){
hours <- seq(StartOfMeeting-minute(StartOfMeeting)*60-second(StartOfMeeting),
EndOfMeeting-minute(EndOfMeeting)*60-second(EndOfMeeting),
"hour")
dateseq <- hours
dateseq[1] <- StartOfMeeting
r <- c(dateseq, EndOfMeeting)
dur <- as.numeric(difftime(r[-1], r[-length(r)], unit = 'secs'))
data.frame(event, hour = hours, duration = dur)
}
# create new events with event durations within each hour
df2 <-
df %>%
rowwise %>%
do(get_hours(.$event, .$StartOfMeeting, .$EndOfMeeting)) %>%
ungroup() %>%
group_by(event, hour) %>%
summarize(duration = sum(duration)) %>%
as.data.frame()
# match original start time back into new events
df2$StartOfMeeting <- df$StartOfMeeting[match(df2$event, df$event)]
# if start time is past the hour use that start time, otherwise use the hour slot as the start time
df2$StartOfMeeting <- if_else(df2$StartOfMeeting>df2$hour, df2$StartOfMeeting, df2$hour)
# match original end time back into new events
df2$EndOfMeeting <- df$EndOfMeeting[match(df2$event, df$event)]
# if end time is before the next hour (start hour+ 1 hour), use that end time, otherwise use the next hour
df2$EndOfMeeting <- if_else(df2$EndOfMeeting<(df2$hour+3600), df2$EndOfMeeting, df2$hour+3600)
# match other data back in
df2$dyad <- df$dyad[match(df2$event, df$event)]
df2$RSSI <- df$RSSI[match(df2$event, df$event)]
# set end of meeting
# set timezone to BelizeTime
# set start of study to 3pm on April 25th
df <-
df2 %>%
mutate(bat1 = sub( "_.*$", "", dyad ), bat2 = sub('.*_', '', dyad)) %>%
mutate(StartBelizeTime = force_tz(StartOfMeeting, tzone = "CST6CDT")) %>%
mutate(EndBelizeTime = force_tz(EndOfMeeting, tzone = "CST6CDT")) %>%
mutate(StartBelizeTime= StartBelizeTime - hours(8), EndBelizeTime=EndBelizeTime - hours(8)) %>%
filter(StartBelizeTime >= as.POSIXct("2018-04-25 15:00:00", tz = "CST6CDT")) %>%
select(StartBelizeTime,bat1,bat2,duration, RSSI)
# assign treatment for each bat into df
df$treatment_bat1 <- bats$treatment[match(df$bat1, bats$sensor_node)]
df$treatment_bat2 <- bats$treatment[match(df$bat2, bats$sensor_node)]
# remove other dataframe
rm(df2)
# label treatment and post-treatment periods
# label dyad types
d <-
df %>%
mutate(datetime= as.POSIXct(StartBelizeTime,tz = "CST6CDT")) %>%
mutate(treatment_period= datetime >= treatment_start & datetime < treatment_stop) %>%
mutate(post24_period= datetime >= post24_start & datetime < post24_stop) %>%
mutate(post48_period= datetime >= post48_start & datetime < post48_stop) %>%
mutate(dyad_type= case_when(
treatment_bat1== 'LPS' & treatment_bat2== 'LPS' ~ 'sick-sick',
treatment_bat1== 'PBS' & treatment_bat2== 'PBS' ~ 'control-control',
treatment_bat1!= treatment_bat2 ~ 'sick-control',
TRUE ~ 'NA')) %>%
mutate(hour= substring(datetime, 1,13))
# get treated bats
treated.bats <-
d %>%
mutate(treated= ifelse(treatment_bat1=="LPS", bat1,
ifelse(treatment_bat2=="LPS", bat2, NA))) %>%
filter(!is.na(treated)) %>%
pull(treated) %>%
unique()
# number of treated bats = 16
length(treated.bats)
# number of untreated bats = 15
d %>%
mutate(untreated= ifelse(treatment_bat1!="LPS", bat1,
ifelse(treatment_bat2!="LPS", bat2, NA))) %>%
filter(!is.na(untreated)) %>%
pull(untreated) %>%
unique() %>%
length()
# get hourly associations----
h <-
d %>%
mutate(dyad= paste(bat1, bat2, sep="_")) %>%
group_by(hour, dyad) %>%
summarize(duration= sum(duration)) %>%
ungroup() %>%
events_to_rates(df=., bin= .$hour, ids= BatIDs, directed=F) %>%
select(hour= bin, dyad, duration)
# make one-hour networks----------
# make multilayer network where every layer is an hour
# net.list is a list of networks (one per hour)
all.hours <- unique(h$hour)
net.list <- list()
for (i in 1:length(all.hours)){
layer <- sort(all.hours)[i]
net <-
h %>%
filter(hour==layer) %>%
select(dyad, duration) %>%
a_b_edgelist_to_matrix() %>% ###
graph_from_adjacency_matrix("undirected", weighted=T, diag=F) ###
net.list[[i]] <- net
}
# get mean centrality per treatment within each hour (as a list)
d.list <- list()
for (i in 1:length(all.hours)){
net <- net.list[[i]]
d.list[[i]] <-
tibble(hour= as.POSIXct(all.hours[i], format= "%Y-%m-%d %H", tz="CST6CDT"),
bat= names(degree(net)),
degree= degree(net),
strength= strength(net),
eigenvector= eigen_centrality(net)$vector) %>%
mutate(treated= bat %in% treated.bats)
}
# d2 is the mean centrality per treatment group and per hour ----
d2 <-
bind_rows(d.list) %>%
group_by(hour, treated) %>%
summarize(degree= mean(degree),
strength= mean(strength),
eigenvector= mean(eigenvector)) %>%
pivot_longer(cols= degree:eigenvector,
names_to = "centrality", values_to = "value") %>%
filter(hour <= as.POSIXct("2018-04-28 01:00:00", tz = "CST6CDT")) %>%
mutate(period= case_when(
hour >= treatment_start & hour < treatment_stop ~ 'treatment',
hour >= post24_start & hour < post24_stop ~ '24 hours later',
hour >= post48_start & hour < post48_stop ~ '48 hours later',
TRUE ~"none")) %>%
ungroup()
# get the corresponding standard errors
d2.se <-
bind_rows(d.list) %>%
group_by(hour, treated) %>%
summarize(degree= se(degree),
strength= se(strength),
eigenvector= se(eigenvector)) %>%
pivot_longer(cols= degree:eigenvector,
names_to = "centrality", values_to = "se") %>%
filter(hour <= as.POSIXct("2018-04-28 01:00:00", tz = "CST6CDT")) %>%
ungroup()
# add standard errors
d2 <- full_join(d2, d2.se)
rm(d2.se)
# d3 is the mean centrality per bat and per hour----
d3 <-
bind_rows(d.list) %>%
mutate(period= case_when(
hour >= treatment_start & hour < treatment_stop ~ 'treatment',
hour >= post24_start & hour < post24_stop ~ '24 hours later',
hour >= post48_start & hour < post48_stop ~ '48 hours later',
TRUE ~"none")) %>%
filter(period!="none") %>%
mutate(treatment= ifelse(treated, "sick", "control"))
# make six-hour network for each period----
network.treatment <-
d %>%
filter(treatment_period) %>%
mutate(dyad= paste(bat1,bat2, sep="_")) %>%
select(dyad, hour, duration) %>%
group_by(dyad, hour) %>%
summarize(duration= sum(duration)) %>%
group_by(dyad) %>%
summarize(duration= sum(duration)) %>%
ungroup() %>%
a_b_edgelist_to_matrix(directed=F) %>%
graph_from_adjacency_matrix("undirected", weighted=T, diag=F)
network.post24 <-
d %>%
filter(post24_period) %>%
mutate(dyad= paste(bat1,bat2, sep="_")) %>%
select(dyad, hour, duration) %>%
group_by(dyad, hour) %>%
summarize(duration= sum(duration)) %>%
group_by(dyad) %>%
summarize(duration= sum(duration)) %>%
ungroup() %>%
a_b_edgelist_to_matrix(directed=F) %>%
graph_from_adjacency_matrix("undirected", weighted=T, diag=F) ###
network.post48 <-
d %>%
filter(post48_period) %>%
mutate(dyad= paste(bat1,bat2, sep="_")) %>%
select(dyad, hour, duration) %>%
group_by(dyad, hour) %>%
summarize(duration= sum(duration)) %>%
group_by(dyad) %>%
summarize(duration= sum(duration)) %>%
ungroup() %>%
a_b_edgelist_to_matrix(directed=F) %>%
graph_from_adjacency_matrix("undirected", weighted=T, diag=F) ###
# function to plot networks
network_plot <- function(net, title=""){
set.seed(123)
V(net)$treated <- ifelse(V(net)$name %in% treated.bats, 'dark blue', "light blue")
layout <- layout_with_gem(net)
png(filename=paste(title,".png", sep=""), width = 1200, height = 1200)
plot(net,
edge.color="slate grey",
vertex.shape= 'sphere',
vertex.color=V(net)$treated,
vertex.label="",
vertex.size=8,
edge.width=log10(E(net)$weight),
layout=layout)
title(title,cex.main=5,col.main="black")
dev.off()
}
network_plot(network.treatment, 'social network during treatment period')
network_plot(network.post24, "social network after 24 hours")
network_plot(network.post48, 'social network after 48 hours')
# get network centrality by treatment period-----
centrality.treatment <-
tibble(bat= names(degree(network.treatment)),
degree= degree(network.treatment),
strength= strength(network.treatment),
eigenvector= eigen_centrality(network.treatment)$vector,
period= "treatment")
centrality.post24 <-
tibble(bat= names(degree(network.post24)),
degree= degree(network.post24),
strength= strength(network.post24),
eigenvector= eigen_centrality(network.post24)$vector,
period= "24 hours later")
centrality.post48 <-
tibble(bat= names(degree(network.post48)),
degree= degree(network.post48),
strength= strength(network.post48),
eigenvector= eigen_centrality(network.post48)$vector,
period= "48 hours later")
d.period <-
rbind(centrality.treatment, centrality.post24, centrality.post48) %>%
mutate(treated= bat %in% treated.bats) %>%
mutate(treatment= ifelse(treated, "sick", "control"))
# get mean centrality values
means.degree <-
d.period %>%
mutate(group= paste(treated, period, sep="_")) %>%
boot_ci2(y=.$degree, x=.$group) %>%
separate(effect, into=c('treated', 'period'), sep="_")
means.str<-
d.period %>%
mutate(group= paste(treated, period, sep="_")) %>%
boot_ci2(y=.$strength/3600, x=.$group) %>%
separate(effect, into=c('treated', 'period'), sep="_")
means.ec<-
d.period %>%
mutate(group= paste(treated, period, sep="_")) %>%
boot_ci2(y=.$eigenvector, x=.$group) %>%
separate(effect, into=c('treated', 'period'), sep="_")
# create function to plot effect sizes
plot_effect <- function (df){
df %>%
mutate(period2= factor(period, levels= c('treatment', '24 hours later', '48 hours later'))) %>%
mutate(treatment= ifelse(treated, 'sick', 'control')) %>%
ggplot(aes(x=treatment, y=mean, color=treatment, shape=treatment))+
facet_wrap(~period2)+
geom_point(size=3)+
geom_errorbar(aes(ymin=low, ymax=high, width=.1), size=1)+
theme_bw()+
theme(legend.position= 'none',
axis.title.y=element_blank(),
axis.title.x=element_blank())+
scale_color_manual(values=c("light blue", "dark blue"))+
scale_shape_manual(values=c("circle", 'triangle'))
}
# plot centrality by hour-----
ch.plot <-
d2 %>%
mutate(value= ifelse(centrality=='strength', value/3600, value)) %>%
mutate(se= ifelse(centrality=='strength', se/3600, se)) %>%
mutate(centrality= paste(centrality, "centrality")) %>%
mutate(centrality= factor(centrality,
levels= c("degree centrality", 'strength centrality', 'eigenvector centrality'))) %>%
mutate(injection= ifelse(treated, "sick", "control")) %>%
mutate(high= value+se, low= value-se) %>%
ggplot(aes(x=hour, y=value, color=injection, shape=injection))+
#facet_wrap(~centrality, scales = "free_y", nrow = 3, labeller = "label_value")+
facet_wrap(~centrality, scales = "free_y", nrow = 3,
strip.position = "left",
labeller = as_labeller(c(`degree centrality` = "degree centrality",
`strength centrality` = "strength centrality",
`eigenvector centrality` = "eigenvector centrality")))+
geom_point()+
geom_errorbar(aes(ymin=low, ymax= high), width=0.1)+
geom_line()+
geom_rect(aes(xmin=treatment_stop, xmax=post24_start, ymin=0, ymax=Inf), fill='white', color=NA, alpha=0.02)+
geom_rect(aes(xmin=post24_stop, xmax=post48_start, ymin=0, ymax=Inf), fill='white', color=NA, alpha=0.02)+
geom_rect(aes(xmin=post48_stop, xmax=max(d2$hour)+1500, ymin=0, ymax=Inf), fill='white', color=NA, alpha=0.02)+
geom_vline(xintercept = treatment_start, color= "black")+
geom_vline(xintercept=treatment_stop, color= "black")+
geom_vline(xintercept = post24_start, linetype= "solid")+
geom_vline(xintercept=post24_stop, linetype= "solid")+
geom_vline(xintercept = post48_start, linetype= "solid")+
geom_vline(xintercept=post48_stop, linetype= "solid")+
theme_bw()+
ylab(NULL) +
theme(strip.background = element_blank(),
strip.placement = "outside",
axis.title.x=element_blank(),
legend.position = c(0.18, 0.72),
legend.margin=margin(c(0.5,0.5,0.5,0.5)),
legend.justification = "left",
legend.title = element_blank(),
legend.direction = "horizontal",
legend.background = element_rect(fill='white',
size=0.5, linetype="solid"))+
scale_x_datetime(breaks=date_breaks("6 hour"), labels=date_format("%H:%M", tz = "CST6CDT")) +
scale_color_manual(values=c("light blue", "dark blue"))+
scale_shape_manual(values=c("circle", 'triangle'))
ch.plot
# add panels of effect sizes
p1 <- means.degree %>% plot_effect() + theme(axis.title.x=element_blank())
p2 <- means.str %>% plot_effect() + theme(axis.title.x=element_blank())
p3 <- means.ec %>% plot_effect() + theme(axis.title.x=element_blank())
p4 <- plot_grid(p1,p2,p3, ncol=1, align = 'hv', rel_heights = c(1,1,1))
# plot and print to PDF
plot_grid(ch.plot, p4, ncol = 2, axis= 't',align = "v", rel_widths = c(2,1))
ggsave("centrality_by_hour.pdf", width= 11, height= 5, units="in", dpi=1200)
# fit models----
# general linear mixed effect model of LPS effect on degree
# response = degree centrality (one network per time period)
# fixed effect = time period, treatment, and their interaction
# random effect = bat
# first fit parametric GLM model
# add day as variable
data <-
d.period %>%
mutate(day= case_when(
period=="treatment" ~ 1,
period=="24 hours later" ~ 2,
period=="48 hours later" ~ 3))
# get standardized effect sizes
# get degree effects
fit1 <- summary(lmer(scale(degree)~ day*treated+(1|bat), data= data))
fit2 <- summary(lm(scale(degree)~ treated, data= data[which(data$day==1),])) #effect during treatment
fit2b <- summary(lm(scale(degree)~ treated, data= data[which(data$day==2),]))
fit3 <- summary(lm(scale(degree)~ treated, data= data[which(data$day==3),])) #effect outside treatment
# get strength effects
fit4 <- summary(lmer(scale(strength)~ day*treated+(1|bat), data= data))
fit5 <- summary(lm(scale(strength)~ treated, data= data[which(data$day==1),]))
fit5b <- summary(lm(scale(strength)~ treated, data= data[which(data$day==2),]))
fit6 <- summary(lm(scale(strength)~ treated, data= data[which(data$day==3),]))
# get eigenvector effects
fit7 <- summary(lmer(scale(eigenvector)~ day*treated+(1|bat), data= data))
fit8 <- summary(lm(scale(eigenvector)~ treated, data= data[which(data$day==1),]))
fit8b <- summary(lm(scale(eigenvector)~ treated, data= data[which(data$day==2),]))
fit9 <- summary(lm(scale(eigenvector)~ treated, data= data[which(data$day==3),]))
# get biologically meaningful effect sizes
# get change in number of bats
summary(lm((degree)~ treated, data= data[which(data$day==1),]))
# 4 fewer bats
# get change in time spent per bat
summary(lm((strength/30)~ treated, data= data[which(data$day==1),]))
# 1510 fewer seconds/six hour
# get observed slopes
# degree
obs1 <- fit1$coefficients[4,1] # interaction effect treatedTrue:period
obs1.1 <- fit1$coefficients[3,1] # LPS effect controlling for period
obs2 <- fit2$coefficients[2,1] # LPS effect during treatment
obs2.1 <- fit2b$coefficients[2,1] # LPS effect on day 2
obs3 <- fit3$coefficients[2,1] # LPS effect post treatment
# strength
obs4 <- fit4$coefficients[4,1] # interaction effect treatedTrue:period
obs4.1 <- fit4$coefficients[3,1] # LPS effect controlling for period
obs5 <- fit5$coefficients[2,1] # LPS effect during treatment
obs5.1 <- fit5b$coefficients[2,1] # LPS effect on day 2
obs6 <- fit6$coefficients[2,1] # LPS effect post treatment
# eigenvector
obs7 <- fit7$coefficients[4,1] # interaction effect treatedTrue:period
obs7.1 <- fit7$coefficients[3,1] # LPS effect controlling for period
obs8 <- fit8$coefficients[2,1] # LPS effect during treatment
obs8.1 <- fit8b$coefficients[2,1] # LPS effect on day 2
obs9 <- fit9$coefficients[2,1] # LPS effect post treatment
# permutation test to obtain non-parametric p-values
if (TRUE){
# get observed coefficients
# get expected
exp1 <- rep(NA, perms)
exp1.1 <- rep(NA, perms)
exp2 <- rep(NA, perms)
exp2.1 <- rep(NA, perms)
exp3 <- rep(NA, perms)
exp4 <- rep(NA, perms)
exp4.1 <- rep(NA, perms)
exp5 <- rep(NA, perms)
exp5.1 <- rep(NA, perms)
exp6 <- rep(NA, perms)
exp7 <- rep(NA, perms)
exp7.1 <- rep(NA, perms)
exp8 <- rep(NA, perms)
exp8.1 <- rep(NA, perms)
exp9 <- rep(NA, perms)
start <- Sys.time()
for (i in 1:perms){
# swap which bats are treated
random.treated.bats <-
d.period %>%
group_by(bat) %>%
summarize(treated=sum(treated)==3) %>%
mutate(random.treated= sample(treated)) %>%
filter(random.treated) %>%
pull(bat)
# refit models with random treated bats
rdata <-
d.period %>%
mutate(day= case_when(
period=="treatment" ~ 1,
period=="24 hours later" ~ 2,
period=="48 hours later" ~ 3)) %>%
# relabel treated bats
mutate(treated= bat %in% random.treated.bats)
# get degree effects
rfit1 <- summary(lmer(scale(degree)~ day*treated+(1|bat), data= rdata))
rfit2 <- summary(lm(scale(degree)~ treated, data= rdata[which(rdata$day==1),])) #effect during treatment
rfit2b <- summary(lm(scale(degree)~ treated, data= rdata[which(rdata$day==2),])) # effect on day 2
rfit3 <- summary(lm(scale(degree)~ treated, data= rdata[which(rdata$day==3),])) #effect outside treatment
# get strength effects
rfit4 <- summary(lmer(scale(strength)~ day*treated+(1|bat), data= rdata))
rfit5 <- summary(lm(scale(strength)~ treated, data= rdata[which(rdata$day==1),]))
rfit5b <- summary(lm(scale(strength)~ treated, data= rdata[which(rdata$day==2),])) # effect on day 2
rfit6 <- summary(lm(scale(strength)~ treated, data= rdata[which(rdata$day==3),]))
# get eigenvector effects
rfit7 <- summary(lmer(scale(eigenvector)~ day*treated+(1|bat), data= rdata))
rfit8 <- summary(lm(scale(eigenvector)~ treated, data= rdata[which(rdata$day==1),]))
rfit8b <- summary(lm(scale(eigenvector)~ treated, data= rdata[which(rdata$day==2),])) # effect on day 2
rfit9 <- summary(lm(scale(eigenvector)~ treated, data= rdata[which(rdata$day==3),]))
# save coefficients
exp1[i]<- rfit1$coefficients[4,1]
exp1.1[i]<- rfit1$coefficients[3,1]
exp2[i] <- rfit2$coefficients[2,1]
exp2.1[i] <- rfit2b$coefficients[2,1]
exp3[i] <- rfit3$coefficients[2,1]
exp4[i]<- rfit4$coefficients[4,1]
exp4.1[i]<- rfit4$coefficients[3,1]
exp5[i] <- rfit5$coefficients[2,1]
exp5.1[i] <- rfit5b$coefficients[2,1]
exp6[i] <- rfit6$coefficients[2,1]
exp7[i]<- rfit7$coefficients[4,1]
exp7.1[i]<- rfit7$coefficients[3,1]
exp8[i] <- rfit8$coefficients[2,1]
exp8.1[i] <- rfit8b$coefficients[2,1]
exp9[i] <- rfit9$coefficients[2,1]
if(i%%10==0) print(paste(i, "of", perms))
}
save(list= c('exp1', 'exp1.1','exp2','exp2.1', 'exp3','exp4', 'exp4.1','exp5','exp5.1', 'exp6','exp7', 'exp7.1','exp8', 'exp8.1', 'exp9'),
file= "perm_results.Rdata") ###
}else{ load('perm_results.Rdata') }
# end permutation test
speed <- Sys.time()- start
speed
# get perm test results----
# model coefficients (slopes) and p-values
# degree
# interaction effect (does LPS effect differ by treatment period?)
t1 <- hist_perm(exp1, obs1, perms)+labs(title='does LPS effect on degree differ by treatment period?')
# does LPS have an effect (controlling for period)?
t2 <- hist_perm(exp1.1, obs1.1, perms)+labs(title='does LPS affect degree?')
# does LPS have effect during treatment period?
t3 <- hist_perm(exp2, obs2, perms)+labs(title='does LPS affect degree in treatment period?')
t3.1 <- hist_perm(exp2.1, obs2.1, perms)+labs(title='does LPS affect degree on day 2?')
# does LPS have effect outside the treatment period?
t4 <- hist_perm(exp3, obs3, perms)+labs(title='does LPS affect degree in post-treatment period?')
# strength
# interaction effect (does LPS effect differ by treatment period?)
t5 <- hist_perm(exp4, obs4, perms)+labs(title='does LPS effect on strength differ by treatment period?')
# does LPS have an effect (controlling for period)?
t6 <- hist_perm(exp4.1, obs4.1, perms)+labs(title='does LPS affect strength?')
# does LPS have effect during treatment period?
t7 <- hist_perm(exp5, obs5, perms)+labs(title='does LPS affect strength in treatment period?')
t7.1 <- hist_perm(exp5.1, obs5.1, perms)+labs(title='does LPS affect strength on day 2?')
# does LPS have effect outside the treatment period?
t8 <- hist_perm(exp6, obs6, perms)+labs(title='does LPS affect strength in post-treatment period?')
# eigenvector
# interaction effect (does LPS effect differ by treatment period?)
t9 <- hist_perm(exp7, obs7, perms)+labs(title='does LPS effect on eigenvector centrality differ by treatment period?')
# does LPS have an effect (controlling for period)?
t10 <- hist_perm(exp7.1, obs7.1, perms)+labs(title='does LPS affect eigenvector centrality?')
# does LPS have effect during treatment period?
t11 <- hist_perm(exp8, obs8, perms)+labs(title='does LPS affect eigenvector centrality in treatment period?')
t11.1 <- hist_perm(exp8.1, obs8.1, perms)+labs(title='does LPS affect eigenvector centrality in treatment period?')
# does LPS have effect outside the treatment period?
t12 <- hist_perm(exp9, obs9, perms)+labs(title='does LPS affect eigenvector centrality in post-treatment period?')
# combine plots
(perm.test <- t1+t2+t3+t3.1+t4+t5+t6+t7+t7.1+t8+t9+t10+t11+t11.1+t12+plot_layout(ncol=3))
ggsave("perm_tests.pdf", width=18, height= 15, units="in", dpi=1200)
# make results table-----
results <-
data.frame(response= rep(c('degree', 'strength', 'eigenvector'), times=1, each=5),
fixed_effect= rep(c('interaction', 'treatment', 'treatment.within.period','treatment.day2', 'treatment.within.post'), times=3),
coefficient= c(obs1, obs1.1, obs2, obs2.1, obs3, obs4, obs4.1, obs5, obs5.1, obs6, obs7, obs7.1, obs8, obs8.1, obs9),
pvalue= c(mean(exp1>=obs1),
mean(exp1.1>=obs1.1),
mean(exp2>=obs2),
mean(exp2.1>=obs2.1),
mean(exp3>=obs3),
mean(exp4>=obs4),
mean(exp4.1>=obs4.1),
mean(exp5>=obs5),
mean(exp5.1>=obs5.1),
mean(exp6>=obs6),
mean(exp7>=obs7),
mean(exp7.1>=obs7.1),
mean(exp8>=obs8),
mean(exp8.1>=obs8.1),
mean(exp9>=obs9))) %>%
mutate(pvalue= if_else(coefficient<0, (1-pvalue),pvalue)) %>%
mutate(pvalue2= c(mean(abs(exp1) >= abs(obs1)),
mean(abs(exp1.1) >= abs(obs1.1)),
mean(abs(exp2) >= abs(obs2)),
mean(abs(exp2.1) >= abs(obs2.1)),
mean(abs(exp3) >= abs(obs3)),
mean(abs(exp4) >= abs(obs4)),
mean(abs(exp4.1) >= abs(obs4.1)),
mean(abs(exp5) >= abs(obs5)),
mean(abs(exp5.1) >= abs(obs5.1)),
mean(abs(exp6) >= abs(obs6)),
mean(abs(exp7) >= abs(obs7)),
mean(abs(exp7.1) >= abs(obs7.1)),
mean(abs(exp8) >= abs(obs8)),
mean(abs(exp8.1) >= abs(obs8.1)),
mean(abs(exp9) >= abs(obs9))))
results
# Alternate analysis----
# this is an alternate analysis requested by reviewers
if(TRUE){
# fit models on HOURLY DATA----
# general linear mixed effect model of LPS effect on degree
# response = degree centrality (one network per hour)
# fixed effect = time period, treatment, and their interaction
# random effect = bat, hour
time1=Sys.time()
perms2=1000
# first fit parametric GLM model
# add day as variable
data <-
d3 %>%
mutate(day= case_when(
period=="treatment" ~ 1,
period=="24 hours later" ~ 2,
period=="48 hours later" ~ 3)) %>%
# convert datetime to hours (1-6)
mutate(hour= as.numeric(substring(hour, 12,13)) -16 ) %>%
# convert to string
mutate(hour=as.character(hour))
# get degree effects
fit1 <- summary(lmer(scale(degree)~ day*treated+(1|hour) +(1|bat), data= data))
fit2 <- summary(lmer(scale(degree)~ treated+(1|hour), data= data[which(data$day==1),])) #effect during treatment
fit2b <- summary(lmer(scale(degree)~ treated+(1|hour), data= data[which(data$day==2),]))
fit3 <- summary(lmer(scale(degree)~ treated+(1|hour), data= data[which(data$day==3),])) #effect outside treatment
# get strength effects
fit4 <- summary(lmer(scale(strength)~ day*treated+(1|hour)+(1|bat), data= data))
fit5 <- summary(lmer(scale(strength)~ treated+(1|hour), data= data[which(data$day==1),]))
fit5b <- summary(lmer(scale(strength)~ treated+(1|hour), data= data[which(data$day==2),]))
fit6 <- summary(lmer(scale(strength)~ treated+(1|hour), data= data[which(data$day==3),]))
# get eigenvector effects
fit7 <- summary(lmer(scale(eigenvector)~ day*treated+(1|hour)+(1|bat), data= data))
fit8 <- summary(lmer(scale(eigenvector)~ treated+(1|hour), data= data[which(data$day==1),]))
fit8b <- summary(lmer(scale(eigenvector)~ treated+(1|hour), data= data[which(data$day==2),]))
fit9 <- summary(lmer(scale(eigenvector)~ treated+(1|hour), data= data[which(data$day==3),]))
# get observed slopes
# degree
obs1 <- fit1$coefficients[4,1] # interaction effect treatedTrue:period
obs1.1 <- fit1$coefficients[3,1] # LPS effect controlling for period
obs2 <- fit2$coefficients[2,1] # LPS effect during treatment
obs2.1 <- fit2b$coefficients[2,1] # LPS effect on day 2
obs3 <- fit3$coefficients[2,1] # LPS effect post treatment
# strength
obs4 <- fit4$coefficients[4,1] # interaction effect treatedTrue:period
obs4.1 <- fit4$coefficients[3,1] # LPS effect controlling for period
obs5 <- fit5$coefficients[2,1] # LPS effect during treatment
obs5.1 <- fit5b$coefficients[2,1] # LPS effect on day 2
obs6 <- fit6$coefficients[2,1] # LPS effect post treatment
# eigenvector
obs7 <- fit7$coefficients[4,1] # interaction effect treatedTrue:period
obs7.1 <- fit7$coefficients[3,1] # LPS effect controlling for period
obs8 <- fit8$coefficients[2,1] # LPS effect during treatment
obs8.1 <- fit8b$coefficients[2,1] # LPS effect on day 2
obs9 <- fit9$coefficients[2,1] # LPS effect post treatment
# permutation test to obtain non-parametric p-values
if (TRUE){
# get observed coefficients
# get expected
exp1 <- rep(NA, perms2)
exp1.1 <- rep(NA, perms2)
exp2 <- rep(NA, perms2)
exp2.1 <- rep(NA, perms2)
exp3 <- rep(NA, perms2)
exp4 <- rep(NA, perms2)
exp4.1 <- rep(NA, perms2)
exp5 <- rep(NA, perms2)
exp5.1 <- rep(NA, perms2)
exp6 <- rep(NA, perms2)
exp7 <- rep(NA, perms2)
exp7.1 <- rep(NA, perms2)
exp8 <- rep(NA, perms2)
exp8.1 <- rep(NA, perms2)
exp9 <- rep(NA, perms2)
start <- Sys.time()
for (i in 1:perms2){
# swap which bats are treated
random.treated.bats <-
d.period %>%
group_by(bat) %>%
summarize(treated=sum(treated)==3) %>%
mutate(random.treated= sample(treated)) %>%
filter(random.treated) %>%
pull(bat)
# refit models with random treated bats
rdata <-
d3 %>%
mutate(day= case_when(
period=="treatment" ~ 1,
period=="24 hours later" ~ 2,
period=="48 hours later" ~ 3)) %>%
# convert datetime to hours (1-6)
mutate(hour= as.numeric(substring(hour, 12,13)) -16 ) %>%
# convert to string
mutate(hour=as.character(hour)) %>%
# relabel treated bats
mutate(treated= bat %in% random.treated.bats)
# get degree effects
rfit1 <- summary(lmer(scale(degree)~ day*treated+(1|hour), data= rdata))
rfit2 <- summary(lmer(scale(degree)~ treated+(1|hour), data= rdata[which(rdata$day==1),])) #effect during treatment
rfit2b <- summary(lmer(scale(degree)~ treated+(1|hour), data= rdata[which(rdata$day==2),])) # effect on day 2
rfit3 <- summary(lmer(scale(degree)~ treated+(1|hour), data= rdata[which(rdata$day==3),])) #effect outside treatment
# get strength effects
rfit4 <- summary(lmer(scale(strength)~ day*treated+(1|hour), data= rdata))
rfit5 <- summary(lmer(scale(strength)~ treated +(1|hour), data= rdata[which(rdata$day==1),]))
rfit5b <- summary(lmer(scale(strength)~ treated +(1|hour), data= rdata[which(rdata$day==2),])) # effect on day 2
rfit6 <- summary(lmer(scale(strength)~ treated+(1|hour), data= rdata[which(rdata$day==3),]))
# get eigenvector effects
rfit7 <- summary(lmer(scale(eigenvector)~ day*treated+(1|hour), data= rdata))
rfit8 <- summary(lmer(scale(eigenvector)~ treated+(1|hour), data= rdata[which(rdata$day==1),]))
rfit8b <- summary(lmer(scale(eigenvector)~ treated+(1|hour), data= rdata[which(rdata$day==2),])) # effect on day 2
rfit9 <- summary(lmer(scale(eigenvector)~ treated+(1|hour), data= rdata[which(rdata$day==3),]))
# save coefficients
exp1[i]<- rfit1$coefficients[4,1]
exp1.1[i]<- rfit1$coefficients[3,1]
exp2[i] <- rfit2$coefficients[2,1]
exp2.1[i] <- rfit2b$coefficients[2,1]
exp3[i] <- rfit3$coefficients[2,1]
exp4[i]<- rfit4$coefficients[4,1]
exp4.1[i]<- rfit4$coefficients[3,1]
exp5[i] <- rfit5$coefficients[2,1]
exp5.1[i] <- rfit5b$coefficients[2,1]
exp6[i] <- rfit6$coefficients[2,1]
exp7[i]<- rfit7$coefficients[4,1]
exp7.1[i]<- rfit7$coefficients[3,1]
exp8[i] <- rfit8$coefficients[2,1]
exp8.1[i] <- rfit8b$coefficients[2,1]
exp9[i] <- rfit9$coefficients[2,1]
if(i%%10==0) print(paste(i, "of", perms2))
}
save(list= c('exp1', 'exp1.1','exp2','exp2.1', 'exp3','exp4', 'exp4.1','exp5','exp5.1', 'exp6','exp7', 'exp7.1','exp8', 'exp8.1', 'exp9'),
file= "perm_results02.Rdata") ###
}else{ load('perm_results02.Rdata') }
# end permutation test
# get perm test results
# model coefficients (slopes) and p-values
# degree
# interaction effect (does LPS effect differ by treatment period?)
t1 <- hist_perm(exp1, obs1, perms2)+labs(title='does LPS effect on degree differ by treatment period?')
# does LPS have an effect (controlling for period)?
t2 <- hist_perm(exp1.1, obs1.1, perms2)+labs(title='does LPS affect degree?')
# does LPS have effect during treatment period?
t3 <- hist_perm(exp2, obs2, perms2)+labs(title='does LPS affect degree in treatment period?')
t3.1 <- hist_perm(exp2.1, obs2.1, perms2)+labs(title='does LPS affect degree on day 2?')
# does LPS have effect outside the treatment period?
t4 <- hist_perm(exp3, obs3, perms2)+labs(title='does LPS affect degree in post-treatment period?')
# strength
# interaction effect (does LPS effect differ by treatment period?)
t5 <- hist_perm(exp4, obs4, perms2)+labs(title='does LPS effect on strength differ by treatment period?')
# does LPS have an effect (controlling for period)?
t6 <- hist_perm(exp4.1, obs4.1, perms2)+labs(title='does LPS affect strength?')
# does LPS have effect during treatment period?
t7 <- hist_perm(exp5, obs5, perms2)+labs(title='does LPS affect strength in treatment period?')
t7.1 <- hist_perm(exp5.1, obs5.1, perms2)+labs(title='does LPS affect strength on day 2?')
# does LPS have effect outside the treatment period?
t8 <- hist_perm(exp6, obs6, perms2)+labs(title='does LPS affect strength in post-treatment period?')
# eigenvector
# interaction effect (does LPS effect differ by treatment period?)
t9 <- hist_perm(exp7, obs7, perms2)+labs(title='does LPS effect on eigenvector centrality differ by treatment period?')
# does LPS have an effect (controlling for period)?
t10 <- hist_perm(exp7.1, obs7.1, perms2)+labs(title='does LPS affect eigenvector centrality?')
# does LPS have effect during treatment period?
t11 <- hist_perm(exp8, obs8, perms2)+labs(title='does LPS affect eigenvector centrality in treatment period?')
t11.1 <- hist_perm(exp8.1, obs8.1, perms2)+labs(title='does LPS affect eigenvector centrality in treatment period?')
# does LPS have effect outside the treatment period?
t12 <- hist_perm(exp9, obs9, perms2)+labs(title='does LPS affect eigenvector centrality in post-treatment period?')
# combine plots
(perm.test2 <- t1+t2+t3+t3.1+t4+t5+t6+t7+t7.1+t8+t9+t10+t11+t11.1+t12+plot_layout(ncol=3))
ggsave("perm_tests2.pdf", width=18, height= 15, units="in", dpi=1200)
# make results table
results2 <-
data.frame(response= rep(c('degree', 'strength', 'eigenvector'), times=1, each=5),
fixed_effect= rep(c('interaction', 'treatment', 'treatment.within.period','treatment.day2', 'treatment.within.post'), times=3),
coefficient= c(obs1, obs1.1, obs2, obs2.1, obs3, obs4, obs4.1, obs5, obs5.1, obs6, obs7, obs7.1, obs8, obs8.1, obs9),
pvalue= c(mean(exp1>=obs1),
mean(exp1.1>=obs1.1),
mean(exp2>=obs2),
mean(exp2.1>=obs2.1),
mean(exp3>=obs3),
mean(exp4>=obs4),
mean(exp4.1>=obs4.1),
mean(exp5>=obs5),
mean(exp5.1>=obs5.1),
mean(exp6>=obs6),
mean(exp7>=obs7),
mean(exp7.1>=obs7.1),
mean(exp8>=obs8),
mean(exp8.1>=obs8.1),
mean(exp9>=obs9))) %>%
mutate(pvalue= if_else(coefficient<0, (1-pvalue), pvalue)) %>%
mutate(pvalue2= c(mean(exp1>=obs1),
mean(exp1.1>=obs1.1),
mean(exp2>=obs2),
mean(exp2.1>=obs2.1),
mean(exp3>=obs3),
mean(exp4>=obs4),
mean(exp4.1>=obs4.1),
mean(exp5>=obs5),
mean(exp5.1>=obs5.1),
mean(exp6>=obs6),
mean(exp7>=obs7),
mean(exp7.1>=obs7.1),
mean(exp8>=obs8),
mean(exp8.1>=obs8.1),
mean(exp9>=obs9)),
pvalue2= c(mean(abs(exp1) >= abs(obs1)),
mean(abs(exp1.1) >= abs(obs1.1)),
mean(abs(exp2) >= abs(obs2)),
mean(abs(exp2.1) >= abs(obs2.1)),
mean(abs(exp3) >= abs(obs3)),
mean(abs(exp4) >= abs(obs4)),
mean(abs(exp4.1) >= abs(obs4.1)),
mean(abs(exp5) >= abs(obs5)),
mean(abs(exp5.1) >= abs(obs5.1)),
mean(abs(exp6) >= abs(obs6)),
mean(abs(exp7) >= abs(obs7)),
mean(abs(exp7.1) >= abs(obs7.1)),
mean(abs(exp8) >= abs(obs8)),
mean(abs(exp8.1) >= abs(obs8.1)),
mean(abs(exp9) >= abs(obs9))))
results2
time2= Sys.time() - time1
time2
}
# get associations by dyad type----
# label edge types in hourly data
dt <-
d %>%
mutate(dyad= paste(bat1, bat2, sep='_')) %>%
group_by(dyad, dyad_type) %>%
summarize(n=n())
h$dyad_type <- dt$dyad_type[match(h$dyad, dt$dyad)]
# get mean association time and prob per edge type per hour
e <-
h %>%
mutate(hour= as.POSIXct(hour, format= "%Y-%m-%d %H", tz="CST6CDT")) %>%
group_by(hour, dyad_type) %>%
summarize(`mean association duration`= mean(duration),
`mean association probability`= mean(duration>0)) %>%
ungroup() %>%
filter(hour <= as.POSIXct("2018-04-28 01:00:00", tz = "CST6CDT")) %>%
pivot_longer(cols= 'mean association duration' : 'mean association probability',
values_to = 'value', names_to = 'measure') %>%
mutate(period= case_when(
hour >= treatment_start & hour < treatment_stop ~ 'treatment',
hour >= post24_start & hour < post24_stop ~ '24 hours later',
hour >= post48_start & hour < post48_stop ~ '48 hours later',
TRUE ~"none"))
# plot dyad-type associations per hour-----
(eh.plot <-
e %>%
ggplot(aes(x=hour, y=value, color= dyad_type, shape=dyad_type, linetype= dyad_type))+
facet_wrap(~measure, ncol=1, scales="free_y")+
geom_point()+
geom_line()+
geom_rect(aes(xmin=treatment_stop, xmax=post24_start, ymin=0, ymax=Inf), fill='white', color=NA, alpha=0.01)+
geom_rect(aes(xmin=post24_stop, xmax=post48_start, ymin=0, ymax=Inf), fill='white', color=NA, alpha=0.01)+
geom_rect(aes(xmin=post48_stop, xmax=max(d2$hour)+1500, ymin=0, ymax=Inf), fill='white', color=NA, alpha=0.01)+
geom_vline(xintercept = treatment_start, color= "black")+
geom_vline(xintercept=treatment_stop, color= "black")+
geom_vline(xintercept = post24_start, linetype= "solid")+
geom_vline(xintercept=post24_stop, linetype= "solid")+
geom_vline(xintercept = post48_start, linetype= "solid")+
geom_vline(xintercept=post48_stop, linetype= "solid")+
theme_bw()+
ylab("")+
theme(legend.position = c(0.18, 0.58),
legend.margin=margin(c(0.5,0.5,0.5,0.5)),
legend.justification = "left",
legend.title = element_blank(),
legend.direction = "horizontal",
legend.background = element_rect(fill='white',
size=0.5, linetype="solid"))+
scale_x_datetime(breaks=date_breaks("6 hour"), labels=date_format("%H:%M", tz = "CST6CDT")) +
scale_color_manual(values=c("black", "red", "springgreen"))+
scale_linetype_manual(values= c('solid', 'dashed', 'dotted'))+
scale_shape_manual(values=c("circle", "triangle", "square")))
# get association times and probs by dyad type during treatment and post-treatment periods----
d4 <-
h %>%
mutate(hour= as.POSIXct(hour, format= "%Y-%m-%d %H", tz="CST6CDT")) %>%
filter(hour <= as.POSIXct("2018-04-28 01:00:00", tz = "CST6CDT")) %>%
mutate(period= case_when(
hour >= treatment_start & hour < treatment_stop ~ 'treatment',
hour >= post24_start & hour < post24_stop ~ '24 hours later',
hour >= post48_start & hour < post48_stop ~ '48 hours later',
TRUE ~"none")) %>%
filter(period!='none') %>%
group_by(hour, dyad, dyad_type, period) %>%
summarize(time= mean(duration),
prob= mean(duration>0)) %>%
group_by(dyad, dyad_type, period) %>%
summarize(time= mean(time),
prob= mean(prob),
n= n()) %>%
ungroup()
# get means and 95% CI
assoc.untreated <-
d4 %>%
filter(dyad_type=='control-control') %>%
select(period, time, prob) %>%
boot_ci2(y=.$time, x=.$period) %>%
mutate(type= 'control-control')
assoc.mixed <-
d4 %>%
filter(dyad_type=='sick-control') %>%
select(period, time, prob) %>%
boot_ci2(y=.$time, x=.$period) %>%
mutate(type= 'sick-control')
prob.untreated <-
d4 %>%
filter(dyad_type=='control-control') %>%
select(period, time, prob) %>%
boot_ci2(y=.$prob, x=.$period) %>%
mutate(type= 'control-control')
prob.mixed <-
d4 %>%
filter(period!='none') %>%
filter(dyad_type=='sick-control') %>%
select(period, time, prob) %>%
boot_ci2(y=.$prob, x=.$period) %>%
mutate(type= 'sick-control')
# plot association time
p1 <-
rbind(assoc.untreated, assoc.mixed) %>%
mutate(period= factor(effect, levels= c('treatment', '24 hours later', '48 hours later'))) %>%
ggplot(aes(x=type, y=mean, color=type, shape=type))+
facet_wrap(~period)+
geom_point(size=3)+
geom_errorbar(aes(ymin=low, ymax=high, width=.1), size=1)+
scale_color_manual(values=c("black", 'red'))+
scale_shape_manual(values=c('circle', 'triangle'))+
theme_bw()+
theme(legend.position= 'none',
axis.title.y=element_blank(),
axis.title.x=element_blank(),
axis.text.x=element_blank())
# plot association prob
p2 <-
rbind(prob.untreated, prob.mixed) %>%
mutate(period= factor(effect, levels= c('treatment', '24 hours later', '48 hours later'))) %>%
mutate(type= ifelse(type=="control-control", "c-c", 's-c')) %>%
ggplot(aes(x=type, y=mean, color=type, shape=type))+
facet_wrap(~period)+
geom_point(size=3)+
geom_errorbar(aes(ymin=low, ymax=high, width=.1), size=1)+
scale_color_manual(values=c("black", 'red'))+
scale_shape_manual(values=c('circle', 'triangle'))+
theme_bw()+
theme(legend.position= 'none',
axis.title.y=element_blank())+
xlab("dyad type")
# plot and make PDF
(eh.plot2 <- (eh.plot|(p1+p2+plot_layout(ncol=1)))+plot_layout(widths= c(2,1)))
ggsave("dyad_types_by_hour.pdf", width=11, height= 5, units="in", dpi=1200)
# save output with timestamp-----
timestamp <- substr(gsub(x=gsub(":","_",Sys.time()),
pattern=" ", replace="_"), start=1, stop=16)
workspace <- paste("results_", timestamp, ".Rdata", sep="")
save.image(file= workspace)
# print results table
results
results2
# print current results file (paste and copy below)
workspace
# code to load old data
if(FALSE){
load("results_2020-09-16_17_12.Rdata")
}
|
b97fa272cba6095062a2f41db9f50eb1ff3f4f53 | e15ee0c012b591d8d81de637a4204d998e830778 | /East and West Coast UK names/scripts/metadata.R | 6aadbd3b16341a2d7ec7895816a5eb2ee67de73a | [] | no_license | froocpu/Analysis | ef6fe471d0a07ac685feed79597a476e90c2b898 | 3f00c62bccd24e555248fab5d4bba8075b11ed43 | refs/heads/master | 2020-03-21T13:09:16.129469 | 2017-09-28T21:57:01 | 2017-09-28T21:57:01 | 138,590,686 | 0 | 0 | null | 2018-06-25T12:15:37 | 2018-06-25T12:15:36 | null | UTF-8 | R | false | false | 480 | r | metadata.R | ## Get the relevant metadata for the region codes.
## fread() will try and get the data directly from the URL.
## We will join these back to the main data later.
codes <- "http://www.maxmind.com/download/geoip/misc/region_codes.csv"
regions <- fread(codes, col.names = c("Country","Region","State"))
## Create final data table.
stateLookup <- data.table(
Country = tolower(regions$Country),
Region = regions$Region,
State = regions$State
)
## Clean up.
rm(codes, regions) |
5ac4154b094f825587c061b430dbeb91393ccdcc | 37539e38b533649faf0531c00391755b81d823e1 | /twitter_cloud.R | 8d8bed698d52dd76c53738d4b306fee7ef8cbdd8 | [] | no_license | jawad3838/WordCloud-Using-R | f91f30f9bd303de272ab515074bb644a555cca7a | 835aafb9c4f0ca5603ef3714e164b7fd05068bd5 | refs/heads/master | 2020-06-08T17:34:09.221674 | 2019-06-22T21:01:08 | 2019-06-22T21:01:08 | 193,273,859 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,272 | r | twitter_cloud.R | # You can remove these lines once the packages are installed.
install.packages("twitteR")
install.packages("RCurl")
install.packages("tm")
install.packages("wordcloud")
# Include the necessary libraries
require(twitteR)
require(RCurl)
require(tm)
require(wordcloud)
# Download this file if you're running the code on windows.
download.file(url="http://curl.haxx.se/ca/cacert.pem", destfile="cacert.pem")
# I'll provide you with a link that will allow you to get the following keys from your own twitter account.
consumer_key <- 'Insert your key here'
consumer_secret <- 'Insert your key here'
access_token <- 'Insert your key here'
access_secret <- 'Insert your key here'
# Connect to the twitter app.
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
# Search all tweets on twitter (500) that contain the word "big data", limit to 500 for clarity.
bd_tweets = searchTwitter("big data", n=500, lang="en")
# Save the tweets temporarily
bd_text = sapply(bd_tweets, function(x) x$getText())
# This line of code removes all emoticons from the tweets as R cannot read them properly.
bd_text <- data.frame(text = iconv(bd_text, "latin1", "ASCII", "byte"),
stringsAsFactors = FALSE)
# Create a corpus ------ Merge all tweets into a single chunk of text
bd_corpus = Corpus(VectorSource(bd_text))
# Inspect the corpus ----- Just to display what tweets were captured while searching through twitter for Bigdata
inspect(bd_corpus[1])
# Clean the corpus by removing punctuation, numbers, and white spaces
bd_clean <- tm_map(bd_corpus, removePunctuation)
bd_clean <- tm_map(bd_clean, removeNumbers)
bd_clean <- tm_map(bd_clean, stripWhitespace)
# Create Word Cloud from clean data ---- Bigger and bold words indicate high occurence rate of that word in tweets and vice versa.
wordcloud(bd_clean)
# Modify your Word Cloud ---- limits the number of words in the word cloud to 50 and scales the smaller words so they are visible.
wordcloud(bd_clean, random.order = F, max.words = 50, scale = c(3, 0.5))
# Add some colors to the word Cloud so the words are clearly visible
wordcloud(bd_clean, random.order = F, max.words = 50, scale = c(3, 0.5), color = rainbow(50)) |
08d17cf35ade8503662f1c80abba7868f3c48e77 | 92438ef9bab506dc0366f61cbf572a98d2a4b966 | /Week09/RglIntro.R | d5cc9a00dc9b59b5ab237eb6aadf48c02680e55b | [] | no_license | maduhu/DataAnalysis101 | d24eefb7810349a9ee74d1bd3081f2af5fc718b6 | e11531f393ba5102121a5923589244aab6be44b2 | refs/heads/master | 2021-06-11T08:02:25.684357 | 2017-03-10T03:05:56 | 2017-03-10T03:05:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 885 | r | RglIntro.R | library( rgl)
library( dataWorkshop)
# need to point session to Week 9
Bolder<- read.table( "BB200.txt", header=TRUE, skip=2)
convertTime<- function(tt){
tt <- as.character(tt)
m<- nchar( tt)
mn<- substr(tt,1, m-3)
sec<- substr(tt,m-1,m)
mn<- as.numeric( mn)
sec<- as.numeric( sec)
return( mn + sec/60)
}
BolderN<- Bolder
for(k in 6:13 ){
BolderN[,k]<- convertTime(Bolder[,k] )
}
fitD<- lm( PLACE ~ MILE1 + I(MILE1^2) + MILE6 + I(MILE6^2) , data=BolderN)
plot( fitD$fitted.values, fitD$residuals)
ind<- identify( fitD$fitted.values, fitD$residuals)
open3d()
N<- nrow( BolderN)
colTable<- rainbow(N)
colTable[ind]<- "black"
plot3d(BolderN$MILE1, BolderN$MILE2, BolderN$MILE6,
col= colTable, cex=1.5)
plot3d(BolderN$MILE1, BolderN$MILE2, BolderN$MILE3,
col= rainbow(N))
out<- Tps( cbind( BolderN$MILE1, BolderN$MILE2), BolderN$PLACE)
|
dd67ba857b1c46cb602b401df90269eb84ea9223 | d4900aec988678febffdfbed490f784c562a2bec | /R/scale-continuous.r | 884d28c7f8cd0956db41daa8f1268e573cd49de4 | [] | no_license | vivekktiwari/animint2 | d64883c9f18c606fb3d5f34e457b5cb896d9d291 | 9a2ff243da6d97eb6b62a82ed81bbdb7ae55b554 | refs/heads/master | 2021-09-20T21:50:48.979177 | 2018-08-15T22:21:37 | 2018-08-15T22:21:37 | 117,336,879 | 0 | 0 | null | 2018-08-15T12:07:24 | 2018-01-13T11:07:03 | R | UTF-8 | R | false | false | 6,358 | r | scale-continuous.r | #' Continuous position scales (x & y).
#'
#' \code{a_scale_x_continuous} and \code{a_scale_y_continuous} are the key functions.
#' The others, \code{a_scale_x_log10}, \code{a_scale_y_sqrt} etc, are aliases
#' that set the \code{trans} argument to commonly used transformations.
#'
#' @inheritParams continuous_a_scale
#' @seealso \code{\link{a_scale_date}} for date/time position scales.
#' @param ... Other arguments passed on to \code{a_scale_(x|y)_continuous}
#' @examples
#' \donttest{
#' if (require(ggplot2movies)) {
#' m <- a_plot(subset(movies, votes > 1000), a_aes(rating, votes)) +
#' a_geom_point(na.rm = TRUE)
#' m
#'
#' # Manipulating the default position scales lets you:
#'
#' # * change the axis labels
#' m + a_scale_y_continuous("number of votes")
#' m + a_scale_y_continuous(quote(votes ^ alpha))
#'
#' # * modify the axis limits
#' m + a_scale_y_continuous(limits = c(0, 5000))
#' m + a_scale_y_continuous(limits = c(1000, 10000))
#' m + a_scale_x_continuous(limits = c(7, 8))
#'
#' # you can also use the short hand functions xlim and ylim
#' m + ylim(0, 5000)
#' m + ylim(1000, 10000)
#' m + xlim(7, 8)
#'
#' # * choose where the ticks appear
#' m + a_scale_x_continuous(breaks = 1:10)
#' m + a_scale_x_continuous(breaks = c(1,3,7,9))
#'
#' # * manually label the ticks
#' m + a_scale_x_continuous(breaks = c(2,5,8), a_labels = c("two", "five", "eight"))
#' m + a_scale_x_continuous(breaks = c(2,5,8), a_labels = c("horrible", "ok", "awesome"))
#' m + a_scale_x_continuous(breaks = c(2,5,8), a_labels = expression(Alpha, Beta, Omega))
#'
#' # There are a few built in transformation that you can use:
#' m + a_scale_y_log10()
#' m + a_scale_y_sqrt()
#' m + a_scale_y_reverse()
#' # You can also create your own and supply them to the trans argument.
#' # See ?scales::trans_new
#'
#' # You can control the formatting of the labels with the formatter
#' # argument. Some common formats are built into the scales package:
#' df <- data.frame(
#' x = rnorm(10) * 100000,
#' y = seq(0, 1, length.out = 10)
#' )
#' p <- a_plot(df, a_aes(x, y)) + a_geom_point()
#' p + a_scale_y_continuous(a_labels = scales::percent)
#' p + a_scale_y_continuous(a_labels = scales::dollar)
#' p + a_scale_x_continuous(a_labels = scales::comma)
#'
#' # Other shortcut functions
#' a_plot(movies, a_aes(rating, votes)) +
#' a_geom_point() +
#' ylim(1e4, 5e4)
#' # * axis labels
#' a_plot(movies, a_aes(rating, votes)) +
#' a_geom_point() +
#' labs(x = "My x axis", y = "My y axis")
#' # * log scaling
#' a_plot(movies, a_aes(rating, votes)) +
#' a_geom_point() +
#' a_scale_x_log10() +
#' a_scale_y_log10()
#' }
#' }
#' @name a_scale_continuous
NULL
#' @rdname a_scale_continuous
#' @export
a_scale_x_continuous <- function(name = waiver(), breaks = waiver(),
minor_breaks = waiver(), a_labels = waiver(),
limits = NULL, expand = waiver(), oob = censor,
na.value = NA_real_, trans = "identity") {
sc <- continuous_a_scale(
c("x", "xmin", "xmax", "xend", "xintercept", "xmin_final", "xmax_final", "xlower", "xmiddle", "xupper"),
"position_c", identity, name = name, breaks = breaks,
minor_breaks = minor_breaks, a_labels = a_labels, limits = limits,
expand = expand, oob = oob, na.value = na.value, trans = trans,
a_guide = "none"
)
# TODO: Fix this hack. We're reassigning the parent ggproto object, but this
# object should in the first place be created with the correct parent.
sc$super <- a_ScaleContinuousPosition
class(sc) <- class(a_ScaleContinuousPosition)
sc
}
#' @rdname a_scale_continuous
#' @export
a_scale_y_continuous <- function(name = waiver(), breaks = waiver(),
minor_breaks = waiver(), a_labels = waiver(),
limits = NULL, expand = waiver(), oob = censor,
na.value = NA_real_, trans = "identity") {
sc <- continuous_a_scale(
c("y", "ymin", "ymax", "yend", "yintercept", "ymin_final", "ymax_final", "lower", "middle", "upper"),
"position_c", identity, name = name, breaks = breaks,
minor_breaks = minor_breaks, a_labels = a_labels, limits = limits,
expand = expand, oob = oob, na.value = na.value, trans = trans,
a_guide = "none"
)
# TODO: Fix this hack. We're reassigning the parent ggproto object, but this
# object should in the first place be created with the correct parent.
sc$super <- a_ScaleContinuousPosition
class(sc) <- class(a_ScaleContinuousPosition)
sc
}
#' @rdname animint2-ggproto
#' @format NULL
#' @usage NULL
#' @export
a_ScaleContinuousPosition <- a_ggproto("a_ScaleContinuousPosition", a_ScaleContinuous,
# Position aesthetics don't map, because the coordinate system takes
# care of it. But they do need to be made in to doubles, so stat methods
# can tell the difference between continuous and discrete data.
map = function(self, x, limits = self$get_limits()) {
scaled <- as.numeric(self$oob(x, limits))
ifelse(!is.na(scaled), scaled, self$na.value)
}
)
# Transformed scales ---------------------------------------------------------
#' @rdname a_scale_continuous
#' @export
a_scale_x_log10 <- function(...) {
a_scale_x_continuous(..., trans = log10_trans())
}
#' @rdname a_scale_continuous
#' @export
a_scale_y_log10 <- function(...) {
a_scale_y_continuous(..., trans = log10_trans())
}
#' @rdname a_scale_continuous
#' @export
a_scale_x_reverse <- function(...) {
a_scale_x_continuous(..., trans = reverse_trans())
}
#' @rdname a_scale_continuous
#' @export
a_scale_y_reverse <- function(...) {
a_scale_y_continuous(..., trans = reverse_trans())
}
#' @rdname a_scale_continuous
#' @export
a_scale_x_sqrt <- function(...) {
a_scale_x_continuous(..., trans = sqrt_trans())
}
#' @rdname a_scale_continuous
#' @export
a_scale_y_sqrt <- function(...) {
a_scale_y_continuous(..., trans = sqrt_trans())
}
|
9c0f9243eaa6544acb8df46859733bf95c42df99 | 92630399e7e476b1307eba0deceeab6048c69687 | /man/fit.optimal_feature_selection_learner.Rd | 3de14ca1a4518b5aeac0d6f5205b861911a59620 | [] | no_license | cran/iai | 53bbc8fa096659614e4b288902cec2021feb35d0 | 591d213c1a7f9cafa0673e3f897f2c589f3e0435 | refs/heads/master | 2023-06-23T03:16:43.953505 | 2023-06-13T15:10:06 | 2023-06-13T15:10:06 | 197,544,870 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,060 | rd | fit.optimal_feature_selection_learner.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimalfeatureselection.R
\name{fit.optimal_feature_selection_learner}
\alias{fit.optimal_feature_selection_learner}
\title{Fits an Optimal Feature Selection learner to the training data}
\usage{
\method{fit}{optimal_feature_selection_learner}(obj, X, ...)
}
\arguments{
\item{obj}{The learner or grid to fit.}
\item{X}{The features of the data.}
\item{...}{Other parameters, including zero or more target vectors as
required by the problem type. Refer to the Julia documentation for
available parameters.}
}
\description{
When the \code{coordinated_sparsity} parameter of the learner is \code{TRUE},
additional keyword arguments are required - please refer to the Julia
documentation.
}
\details{
Julia Equivalent:
\href{https://docs.interpretable.ai/v3.1.1/OptimalFeatureSelection/reference/#IAI.fit\%21-Tuple\%7BOptimalFeatureSelectionLearner\%7D}{\code{IAI.fit!}}
}
\section{IAI Compatibility}{
Requires IAI version 1.1 or higher.
}
\examples{
\dontrun{iai::fit(lnr, X)}
}
|
444591267174921d484a04a141de07c41eb52ed6 | 1bc5af7c19261605be4cc80159914fc4e7d0835a | /ch07-regional-counts/ch07e-geary.R | 33cf9fdc49077e264d4cf06007f3ab6c91c7572c | [] | no_license | GregoryMatesi/SpatialDataAnalysis | 8f8e76b5ffc873c45a1a3e4b8e6896a32fac82a9 | 1f79582e7e7cf1f3ed5be9f15836afcc62d660b7 | refs/heads/main | 2023-08-03T01:45:29.980457 | 2021-09-30T17:43:48 | 2021-09-30T17:43:48 | 406,417,256 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,930 | r | ch07e-geary.R | # install.packages(spdep", "sf")
library(spdep)
library(sf)
# read shapefile for new york counties
ny8 <- sf::st_read("./data/NY_data/ny8_utm18.shp")
# read neighbor information
ny_nb <- spdep::read.gal("./data/NY_data/NY_nb.gal", override.id = TRUE)
# plot region boundaries from ny8
plot(st_geometry(ny8), border="grey60")
# plot neighbors
plot(ny_nb, coords = st_centroid(st_geometry(ny8)),
add=TRUE, col="blue", pch = 19, cex = 0.6)
### geary's c
# assume adjacency weights (w_ij = 1 if regions i and j share a boundary)
# proximity matrix, binary style. W is row standardized.
w = nb2mat(ny_nb, style = "B")
# see ?nb2listw for more options
# proximaty matrix in list format
lw = nb2listw(ny_nb, style = "B")
geary.test(ny8$Cases, listw = lw, randomisation = FALSE)
# base test w/ randomization p-value
geary.mc(ny8$Cases, listw = lw, nsim = 499)
# base test w/ Monto Carlo p-value, simulating data under constant risk hypothesis
# some preliminaries
N = length(ny8$Cases) # number of regions
y = ny8$Cases # number of cases
n = ny8$POP8 #population sizes
r <- sum(y)/sum(n) # estimated risk
rni <- r * n # expected per region
# observed geary's statistic
nsim = 499
t0 = geary(y, listw = lw, n = N, n1 = N - 1, S0 = Szero(lw))$C
# simulate data under CRH
tsim = numeric(nsim)
# calculate geary's c for poisson data simulated under crh
for (i in 1:nsim) {
tsim[i] = geary(rpois(N, rni), listw = lw, n = N, n1 = N - 1, S0 = Szero(lw))$C
}
# p-value for geary's c constant risk monte carlo test
(sum(tsim <= t0) + 1)/(nsim + 1)
## Do the same tests with incidence rates
rates = y/n
# incidence test w/ normality approximation for p-value
geary.test(rates, listw = lw, randomisation = FALSE)
# incicdence test w/ randomization p-value
geary.mc(rates, listw = lw, nsim = 499)
# incidence test w/ Monto Carlo p-value, simulating data under constant risk hypothesis
# some preliminaries
# observed geary's statistic
t0b = geary(rates, listw = lw, n = N, n1 = N - 1, S0 = Szero(lw))$C
# calculate geary's c for poisson data simulated under crh, after rate
tsimb = numeric(nsim)
# correction
for (i in 1:nsim) {
tsimb[i] = geary(rpois(N, rni)/n, listw = lw, n = N, n1 = N - 1, S0 = Szero(lw))$C
}
# p-value for geary's c constant risk monte carlo test for incidence rate
(sum(tsimb <= t0b) + 1)/(nsim + 1)
#### summary of results
### Counts
## normality assumption: conclude spatial autocorrelation
## randomization assumption: conclude spatial autocorrelation
## Monte Carlo CRH assumption: no spatial autocorrelation
# Conclusion: similarity in counts caused by heterogeneities in population
# size, not similarities in spatial deviation from the mean
### Incidence proportion
## normality assumption: conclude spatial autocorrelation
## randomization assumption: no spatial autocorrelation
## Monte Carlo CRH assumption: suggestive, not conclusive autocorrelation
## results are sensitive to assumptions.
|
c75448ca234c1f59f05982e2a6bfe4de95fece00 | 5cbb67ee4bbb8b0522d5258059d6ed348ee79b6a | /Bagging_Boosting_RandomForest.R | e5bb0be826daa523fe3216afd114c8f5229f0245 | [] | no_license | jpzorrilla/R_Stats | 3096fc1c4ba948e604115c69ba6352b3d1613520 | 311316df0559c934bd84a4ba7a70861a25a193df | refs/heads/master | 2023-04-04T05:19:01.252600 | 2021-04-11T14:40:26 | 2021-04-11T14:40:26 | 297,765,850 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 14,596 | r | Bagging_Boosting_RandomForest.R | install.packages("rpart")
install.packages("rpart.plot")
install.packages("randomForest")
install.packages("adabag")
require(rpart) #arboles
require(rpart.plot)#gráficos árboles
require(randomForest)#radom forests y bagging
require(adabag)#boosting
setwd("C:/Users/usuario/Desktop/Archivos Escritorio/MAESTRIA EN POLÍTICAS PÚBLICAS/Cursos Maestría/04-01.EstadísticaAvanzada")
#levantamos los datos, recuerden ubicarse en el directorio de trabajo
data=read.csv("vivienda.csv",sep=";", dec=",", header=T)
summary(data)
#Dado que son categóricas, las variablas Precios_clases y Grandes_sup deben estar como factores.
data$Precios_clases=as.factor(data$Precios_clases)
data$Grandes_sup=as.factor(data$Grandes_sup)
summary(data$Precios_clases)
summary(data$Grandes_sup)
#Árboles de regresión
#ajustando un árbol de regresión con la función rpart().
names(data) #Utilizo todas las variables X como variables predictoras
set.seed(16)
arbol=rpart(Precios~Cont_1+Zona_ind+Grandes_sup+Cont_2+Nro_hab+Dist_GCiud+Imp+Hab_per,cp=0.001,data) #Iniciamos con un valor de cp de 0.001
#Lista de arboles anidados
a=printcp(arbol)
which.min(a[,4]) #aquí le pido que me indique que fila corresponde con el menor valor de la columna 4 (xerror) de la tabla de cp
#El árbol 23 es el que tiene menor xerror
#El siguiente plot es útil para visualizar como disminuye el xerror al aumentar la complejidad del árbol
plotcp(arbol)
#Veamos ahora cual cumple la regla 1-SE:
a[23,4]+a[23,5] #sumo el xerror y el sd de la fila 23 de la tabla de cp (que previamente llamamos a)
#El árbol más chico con un valor de xerror menor a 0.3640175 es el árbol 7
#Retengo ese árbol (regla 1-SE).
#Poda:
arbol.pod=prune(arbol,cp=0.011034945) #cp correspondiente al árbol 7
rpart.plot(arbol.pod)
#arbol 7 ver tabla resultado en #Lista de arboles anidados
#Calculemos el error del árbol mediante la metodología de muestra de prueba
set.seed(75)
K=30
error.arbol.reg.learn=matrix(NA, K)
error.arbol.reg.test=matrix(NA, K)
var.arbol.reg.learn=matrix(NA, K)
var.arbol.reg.test=matrix(NA, K)
n = nrow(data)
for(k in 1:K) {
smp=sample(n,round(n/3))
learn=data[-smp,]
test=data[smp,]
arbol.reg.learn=rpart(Precios~Cont_1+Zona_ind+Grandes_sup+Cont_2+Nro_hab+Dist_GCiud+Imp+Hab_per,cp=0.001,learn) #inicio con los mismos parámetros que escogimos arriba que generan una secuencia larga de árboles anidados y luego en cada iteración elijo aquel que cumpla la regla 1-SE
a=printcp(arbol.reg.learn)
b=which.min(a[,4])
c=a[b,4]+a[b,5]
d=min(which(a[,4]<=c))
e=a[d,1]
arbol.reg.learn.pod=prune(arbol.reg.learn,cp=e)
pred.reg.learn=predict(arbol.reg.learn.pod,learn)
pred.reg.test=predict(arbol.reg.learn.pod,test)
error.arbol.reg.learn[k]=sqrt(mean((pred.reg.learn-learn[,1])^2))
error.arbol.reg.test[k] = sqrt(mean((pred.reg.test-test[,1])^2))
#Nos podría interesar calcular una pseudo varianza explicada a partir del MSE a modo de obtener resutlados más interpretables del desempeño del modelo.
var.arbol.reg.learn[k]=1-((mean((pred.reg.learn-learn[,1])^2))/var(learn[,1]))
var.arbol.reg.test[k]=1-((mean((pred.reg.test-test[,1])^2))/var(test[,1]))
}
#Resultados del loop:
mean.error.arbol.reg.learn=mean(error.arbol.reg.learn)
sd.error.arbol.reg.learn=sd(error.arbol.reg.learn)
mean.error.arbol.reg.test=mean(error.arbol.reg.test)
sd.error.arbol.reg.test=sd(error.arbol.reg.test)
mean.var.arbol.reg.learn=mean(var.arbol.reg.learn)
sd.var.arbol.reg.learn=sd(var.arbol.reg.learn)
mean.var.arbol.reg.test=mean(var.arbol.reg.test)
sd.var.arbol.reg.test=sd(var.arbol.reg.test)
mean.error.arbol.reg.learn
sd.error.arbol.reg.learn
mean.error.arbol.reg.test
sd.error.arbol.reg.test
mean.var.arbol.reg.learn
sd.var.arbol.reg.learn
mean.var.arbol.reg.test
sd.var.arbol.reg.test
#RANDOM FORESTS
#ntree = num. arboles = 500
set.seed(16)
modelo.rf.reg=randomForest(Precios~Cont_1+Zona_ind+Grandes_sup+Cont_2+Nro_hab+Dist_GCiud+Imp+Hab_per,ntree=500, data)
modelo.rf.reg
modelo.rf.reg=randomForest(Precios~Cont_1+Zona_ind+Grandes_sup+Cont_2+Nro_hab+Dist_GCiud+Imp+Hab_per,mtry=7,ntree=500, data)
#lo hacemos con el parámetro mtry, observar que en este caso, al escoger 7, que corresponde con el número total de variables X de nuestra data.frame, estamos frente a un modelo Bagging ya que la gran diferencia entre RF y Bagging es el número de variables a escoger en cada nodo!
#Ajustaremos este modelo enseguida....
plot(modelo.rf.reg)
#El error se encuetra estabilizado a partir de 150-200 árboles aproximadamente, así que 500 es un número más que adecuado.
#Importancia de variables.
varImpPlot(modelo.rf.reg)
#Este gráfico es muy útil para conocer la importacia de las variables (la medida de importancia cuantifica cuanto aporta en términos globales, cada variable en la disminución de impureza de los nodos hijos)
#Las variables que se encuentran más arriba, son más importantes, y las de más abajo, son las menos importantes
#Aquí vemos que hay una variable que es más importante que todas (Nro_hab), luego de la cual, hay un gran salto en relación a la importancia
#Observar que la variable más improtante coincide con la variable que aparece en la primer partición del árbol de regresión.
#Error por muestra de prueba del modelo Random Forests:
set.seed(75)
K=30
error.rf.reg.learn=matrix(NA, K)
error.rf.reg.test=matrix(NA, K)
var.rf.reg.learn=matrix(NA, K)
var.rf.reg.test=matrix(NA, K)
n = nrow(data)
for(k in 1:K) {
smp=sample(n,round(n/3))
learn=data[-smp,]
test=data[smp,]
modelo.rf.reg.learn=randomForest(Precios~Cont_1+Zona_ind+Grandes_sup+Cont_2+Nro_hab+Dist_GCiud+Imp+Hab_per,ntree=500, learn)
pred.rf.reg.learn=predict(modelo.rf.reg.learn,learn)
pred.rf.reg.test=predict(modelo.rf.reg.learn,test)
error.rf.reg.learn[k]=sqrt(mean((pred.rf.reg.learn-learn[,1])^2))
error.rf.reg.test[k] = sqrt(mean((pred.rf.reg.test-test[,1])^2))
#pseudo varianza explicada
var.rf.reg.learn[k]=1-((mean((pred.rf.reg.learn-learn[,1])^2))/var(learn[,1]))
var.rf.reg.test[k]=1-((mean((pred.rf.reg.test-test[,1])^2))/var(test[,1]))
}
#Resultados del loop
mean.error.rf.reg.learn=mean(error.rf.reg.learn)
sd.error.rf.reg.learn=sd(error.rf.reg.learn)
mean.error.rf.reg.test=mean(error.rf.reg.test)
sd.error.rf.reg.test=sd(error.rf.reg.test)
mean.var.rf.reg.learn=mean(var.rf.reg.learn)
sd.var.rf.reg.learn=sd(var.rf.reg.learn)
mean.var.rf.reg.test=mean(var.rf.reg.test)
sd.var.rf.reg.test=sd(var.rf.reg.test)
mean.error.rf.reg.learn
sd.error.rf.reg.learn
mean.error.rf.reg.test
sd.error.rf.reg.test
mean.var.rf.reg.learn
sd.var.rf.reg.learn
mean.var.rf.reg.test
sd.var.rf.reg.test
#BAGGING
#Ahora ajsutemos un modelo Bagging pero utilizando la función randomForest(). Si bien hay funciones específicas para ajustar modelos Bagging (ej: ipred), es recomendado usar la función randomForest por ser más flexible.
modelo.bag.reg=randomForest(Precios~Cont_1+Zona_ind+Grandes_sup+Cont_2+Nro_hab+Dist_GCiud+Imp+Hab_per,mtry=7,ntree=500, data)
#como decíamos más arriba, al escoger mtry=7 (número de variables X) estamos frente a un modelo Bagging.
modelo.bag.reg
#Error por muestra de prueba del modelo bagging
set.seed(75)
K=30
error.bag.reg.learn=matrix(NA, K)
error.bag.reg.test=matrix(NA, K)
var.bag.reg.learn=matrix(NA, K)
var.bag.reg.test=matrix(NA, K)
n = nrow(data)
for(k in 1:K) {
smp=sample(n,round(n/3))
learn=data[-smp,]
test=data[smp,]
modelo.bag.reg.learn=randomForest(Precios~Cont_1+Zona_ind+Grandes_sup+Cont_2+Nro_hab+Dist_GCiud+Imp+Hab_per,ntree=500,mtry=7,learn)
pred.bag.reg.learn=predict(modelo.bag.reg.learn,learn)
pred.bag.reg.test=predict(modelo.bag.reg.learn,test)
error.bag.reg.learn[k]=sqrt(mean((pred.bag.reg.learn-learn[,1])^2))
error.bag.reg.test[k] = sqrt(mean((pred.bag.reg.test-test[,1])^2))
#pseudo varianza explicada
var.bag.reg.learn[k]=1-((mean((pred.bag.reg.learn-learn[,1])^2))/var(learn[,1]))
var.bag.reg.test[k]=1-((mean((pred.bag.reg.test-test[,1])^2))/var(test[,1]))
}
#Resultados del loop:
mean.error.bag.reg.learn=mean(error.bag.reg.learn)
sd.error.bag.reg.learn=sd(error.bag.reg.learn)
mean.error.bag.reg.test=mean(error.bag.reg.test)
sd.error.bag.reg.test=sd(error.bag.reg.test)
mean.var.bag.reg.learn=mean(var.bag.reg.learn)
sd.var.bag.reg.learn=sd(var.bag.reg.learn)
mean.var.bag.reg.test=mean(var.bag.reg.test)
sd.var.bag.reg.test=sd(var.bag.reg.test)
mean.error.bag.reg.learn
sd.error.bag.reg.learn
mean.error.bag.reg.test
sd.error.bag.reg.test
mean.var.bag.reg.learn
sd.var.bag.reg.learn
mean.var.bag.reg.test
sd.var.bag.reg.test
#Comparación de errores en una tabla: media y desvío estándar de la raíz cuadrada del error cuadrático medio sobre la muestra test
MeanTest<-c(mean.error.arbol.reg.test,mean.error.rf.reg.test,mean.error.bag.reg.test)
SdTest<-c(sd.error.arbol.reg.test,sd.error.rf.reg.test,sd.error.bag.reg.test)
Results<-rbind(MeanTest, SdTest)
colnames(Results)<-c("CART", "RF", "Bagging")
round(Results,3)
## CART RF Bagging
## MeanTest 5.541 4.216 4.308
## SdTest 0.457 0.578 0.710
#En general los modelos funcionan bien, el que presenta menor error es el modelo Random Forests (aunque tiene un comportamiento muy similar al modelo Bagging)
#Como es esperable, el árbol de regresión es el que funciona peor, aunque los resultados no son malos
#De todas formas, a fin de escoger un modelo predictivo, seleccionaríamos el modelo Random Forests.
#MODELOS DE CLASIFICACIÓN
#ARBOL DE CLASIFICACION
#var. de respuesta = Precios_clases
set.seed(61)
K=30
error.arbol.cl.learn=matrix(NA, K)
error.arbol.cl.test=matrix(NA, K)
n = nrow(data)
for(k in 1:K) {
smp=sample(n,round(n/3))
learn=data[-smp,]
test=data[smp,]
arbol.cl.learn=rpart(Precios_clases~Cont_1+Zona_ind+Grandes_sup+Cont_2+Nro_hab+Dist_GCiud+Imp+Hab_per,cp=0.001,learn) #inicio con cp bajos y luego podo.
aa=printcp(arbol.cl.learn)
bb=which.min(aa[,4])
cc=aa[bb,4]+aa[bb,5]
dd=min(which(aa[,4]<=cc))
ee=aa[dd,1]
arbol.cl.learn.pod=prune(arbol.cl.learn,cp=ee)
pred.cl.learn=predict(arbol.cl.learn.pod,learn,type="class")
pred.cl.test=predict(arbol.cl.learn.pod,test,type="class")
error.arbol.cl.learn[k]=mean(pred.cl.learn!=learn[,2])
error.arbol.cl.test[k] = mean(pred.cl.test!=test[,2])
}
mean.error.arbol.cl.learn=mean(error.arbol.cl.learn)
sd.error.arbol.cl.learn=sd(error.arbol.cl.learn)
mean.error.arbol.cl.test=mean(error.arbol.cl.test)
sd.error.arbol.cl.test=sd(error.arbol.cl.test)
#RANDOM FORESTS
set.seed(75)
K=30
error.rf.cl.learn=matrix(NA, K)
error.rf.cl.test=matrix(NA, K)
n = nrow(data)
for(k in 1:K) {
smp=sample(n,round(n/3))
learn=data[-smp,]
test=data[smp,]
modelo.rf.cl.learn=randomForest(Precios_clases~Cont_1+Zona_ind+Grandes_sup+Cont_2+Nro_hab+Dist_GCiud+Imp+Hab_per,ntree=500, learn)
pred.rf.cl.learn=predict(modelo.rf.cl.learn,learn,type="class")
pred.rf.cl.test=predict(modelo.rf.cl.learn,test,type="class")
error.rf.cl.learn[k]=mean(pred.rf.cl.learn!=learn[,2])
error.rf.cl.test[k] =mean(pred.rf.cl.test!=test[,2])
}
mean.error.rf.cl.learn=mean(error.rf.cl.learn)
sd.error.rf.cl.learn=sd(error.rf.cl.learn)
mean.error.rf.cl.test=mean(error.rf.cl.test)
sd.error.rf.cl.test=sd(error.rf.cl.test)
#BOOSTING
#Los modelos Boosting son caros computacionalmente, este loop puede tomar su tiempo
#Para evitar grandes demoras, a efectos del práctico, fijamos K en 3, aunque en ejemplos reales, recomendamos usar números de K mayores (20 - 30)
#Además, para comparar errores entre modelos, la implementación del loop debe ser la misma, así que, la comparación que haremos entre modelos es únicamaente a fines ilustrativos ya que todos los loops deben presentar el mismo número de interaciones.
set.seed(72)
K=3
error.boost.cl.learn=matrix(NA, K)
error.boost.cl.test=matrix(NA, K)
n = nrow(data)
for(k in 1:K) {
smp=sample(n,round(n/3))
learn=data[-smp,]
test=data[smp,]
modelo.boost.cl.learn=boosting(Precios_clases~Cont_1+Zona_ind+Grandes_sup+Cont_2+Nro_hab+Dist_GCiud+Imp+Hab_per,data=learn) #así expresamos un modelo boosting
pred.boost.cl.learn=predict(modelo.boost.cl.learn,learn)
pred.boost.cl.test=predict(modelo.boost.cl.learn,test)
error.boost.cl.learn[k]=mean(pred.boost.cl.learn$class != learn[,2]) #en el caso de boosting, como el resultado de la función predict() es una lista que contiene varios elementos, debemos espeficarle en este caso que queremos las clases predichas.
error.boost.cl.test[k] = mean(pred.boost.cl.test$class != test[,2])
}
mean.error.boost.cl.learn=mean(error.boost.cl.learn)
sd.error.boost.cl.learn=sd(error.boost.cl.learn)
mean.error.boost.cl.test=mean(error.boost.cl.test)
sd.error.boost.cl.test=sd(error.boost.cl.test)
#Comparación de errores en una tabla: media y desvío estándar de errores de clasificación sobre la muestra test
MeanTest<-c(mean.error.arbol.cl.test,mean.error.rf.cl.test,mean.error.boost.cl.test)
SdTest<-c(sd.error.arbol.cl.test,sd.error.rf.cl.test,sd.error.boost.cl.test)
Results<-rbind(MeanTest, SdTest)
colnames(Results)<-c("CART", "RF", "Bagging")
round(Results,3)
#Nuevamente, el modelo que tiene mejor desempeño es el RF, pero su error promedio se encuentra muy cerca del boosting (de todas formas, tener en cuenta que se consideraron muy pocas iteraciones)
#El árbol de clasificación es el peor modelo.
#Si bien el error promedio sobre muestra test es el indicador que debemos mirar para comparar la capacidad predictiva de los modelos, al presentar los resultados de los mismos, es recomendado presentar tanto los errores promedio (y sus desvíos) sobre la muestra test como la learn.
MeanLearn<-c(mean.error.arbol.cl.learn,mean.error.rf.cl.learn,mean.error.boost.cl.learn)
SdLearn<-c(sd.error.arbol.cl.learn,sd.error.rf.cl.learn,sd.error.boost.cl.learn)
MeanTest<-c(mean.error.arbol.cl.test,mean.error.rf.cl.test,mean.error.boost.cl.test)
SdTest<-c(sd.error.arbol.cl.test,sd.error.rf.cl.test,sd.error.boost.cl.test)
Results<-cbind(MeanLearn, SdLearn,MeanTest, SdTest)
rownames(Results)<-c("CART", "RF", "Bagging")
round(Results,3) |
5806fd06444cc54a06d7c45ff22d55316a6e17e3 | 24a12c041aab38220e0c4bbeabc50d296dd86ea3 | /deficit.R | 84ab627dc709ffc510473a467ba9616ca0248427 | [] | no_license | luksurious/national-footprint-visualization | 5a2770230abb49276b2f9c6ad415ae60c2ff8760 | 239273d9be9b09f4beb7bf8710d99290e2533459 | refs/heads/master | 2020-05-14T09:55:46.461190 | 2019-06-09T23:18:53 | 2019-06-09T23:18:53 | 181,752,596 | 0 | 2 | null | 2019-06-09T20:01:48 | 2019-04-16T19:21:37 | R | UTF-8 | R | false | false | 10,514 | r | deficit.R | library(plotly)
library(purrr)
library(RColorBrewer)
deficitTrendUI <- function (id) {
ns <- NS(id)
tagList(
h2(
"How is the evolution of the ecological reserve/deficit?"
),
sidebarLayout(
sidebarPanel(
radioButtons(
ns("regionType"),
label = "Type of region",
choices = c("Continents", "Countries"),
selected = "Continents"
),
conditionalPanel(
condition = "input.regionType == 'Continents'",
ns = ns,
selectInput(
ns("region"),
label = "Choose the region to show in the chart",
choices = dataRegions,
selected = "World"
)
),
conditionalPanel(
condition = "input.regionType == 'Countries'",
ns = ns,
selectInput(
ns("country"),
label = "Choose the country to show in the chart",
choices = dataCountries,
selected = "Spain"
)
),
radioButtons(
ns("dataType"),
label = "Type of data",
choices = c("Per person", "Total"),
selected = "Per person"
),
sliderInput(
ns("years"),
"Years",
dataYears[1],
dataYears[2],
value = c(
dataYears[2] - 20,
dataYears[2]
),
sep = "",
step = 1
),
radioButtons(
ns("colors"),
"Color scheme",
choices = c("Semantic (green & red)" = "semantic", "Colorblind friendly (Dark2)" = "Dark2")
)
),
mainPanel(
plotlyOutput(ns("plot"))
)
)
)
}
deficitTrend <- function (input, output, session) {
filteredDeficitData <- reactive({
mydata <- deficitData(input$regionType, input$dataType)
if (input$regionType == 'Countries') {
mydata <- mydata[mydata$country == input$country
& mydata$year >= input$years[1]
& mydata$year <= input$years[2],]
} else {
mydata <- mydata[mydata$UN_region == input$region
& mydata$year >= input$years[1]
& mydata$year <= input$years[2],]
}
})
colors <- reactive({
if (input$colors == 'semantic') {
list(
"bio" = c("green", '#00800050'),
"footprint" = c("red", "#FF000050")
)
} else if (input$colors == 'Dark2') {
list(
"bio" = c(categoricalDark2Colors8[1], paste0(categoricalDark2Colors8[1], "50")),
"footprint" = c(categoricalDark2Colors8[3], paste0(categoricalDark2Colors8[3], "50"))
)
}
})
output$plot <- renderPlotly({
mydata <- filteredDeficitData()
highEFData <- mydata[mydata$total.x > mydata$total.y,]
highBioData <- mydata[mydata$total.y > mydata$total.x,]
highEFRanges <- getEdgesOfTrace(highEFData$year, input$years[1], input$years[2])
highBioRanges <- getEdgesOfTrace(highBioData$year, input$years[1], input$years[2])
p <- plot_ly(data = mydata,
type = 'scatter',
mode = 'lines+markers',
marker = list(size = 4)
) %>%
layout(
title = "Ecological Reserve/Deficit",
xaxis = list(title = "Year"),
yaxis = list(title = "in global hectares"))
bioConfig <- bioConfig()
footprintConfig <- footprintConfig()
# add traces where biocapacity is bigger than footprint
for (range in highBioRanges) {
rangeData <- highBioData[highBioData$year >= range$start & highBioData$year <= range$end,]
p <- addHighResourceTrace(bioConfig, rangeData, p)
footprintConfig$legend = FALSE
bioConfig$legend = FALSE
if (range$end < input$years[2]) {
intersections = calcIntersectionLines(mydata, range)
p <- addIntersectionTrace(bioConfig, intersections, p)
}
}
# add traces where footprint is bigger than biocapacity
for (range in highEFRanges) {
rangeData <- highEFData[highEFData$year >= range$start & highEFData$year <= range$end,]
p <- addHighResourceTrace(footprintConfig, rangeData, p)
footprintConfig$legend = FALSE
if (range$end < input$years[2]) {
intersections = calcIntersectionLines(mydata, range)
p <- addIntersectionTrace(footprintConfig, intersections, p)
}
}
p
})
calcIntersectionLines <- function (data, range) {
conData <- data[data$year >= range$end & data$year <= (range$end+1),]
model1 <- lm(total.y ~ year, data = conData)
model2 <- lm(total.x ~ year, data = conData)
yearIntersect <- (model1$coefficients[1] - model2$coefficients[1]) / (model2$coefficients[2] - model1$coefficients[2])
valueIntersect <- model1$coefficients[1] + model1$coefficients[2] * yearIntersect
bioX <- c(range$end, yearIntersect)
bioY <- c(conData[conData$year == range$end, c("total.y")], valueIntersect)
bioX1 <- c(yearIntersect, range$end+1)
bioY1 <- c(valueIntersect, conData[conData$year == (range$end+1), c("total.y")])
efX <- c(range$end, yearIntersect)
efY <- c(conData[conData$year == range$end, c("total.x")], valueIntersect)
efX1 <- c(yearIntersect, range$end+1)
efY1 <- c(valueIntersect, conData[conData$year == (range$end+1), c("total.x")])
list("bioX" = bioX, "bioY" = bioY, "bioX1" = bioX1, "bioY1" = bioY1, "efX" = efX, "efY" = efY, "efX1" = efX1, "efY1" = efY1)
}
addIntersectionTrace <- function (config, intersections, p) {
add_trace(p = p,
data = data.frame(intersections[config$crossLow1X], intersections[config$crossLow1Y]),
y = as.formula(paste0("~", config$crossLow1Y)),
x = as.formula(paste0("~", config$crossLow1X)),
name = config$lowerName, legendgroup = config$lowerGroup,
hoverinfo = "skip",
showlegend = FALSE,
marker = NULL,
line = list(color = config$lowerColor[1], width = 2),
mode = 'lines') %>%
add_trace(data = data.frame(intersections[config$crossHigh1X], intersections[config$crossHigh1Y]),
y = as.formula(paste0("~", config$crossHigh1Y)),
x = as.formula(paste0("~", config$crossHigh1X)),
name = config$higherName, legendgroup = config$higherGroup,
line = list(color = config$higherColor[1], width = 2),
fillcolor = config$higherColor[2],
showlegend = FALSE,
hoverinfo = "skip",
marker = NULL,
fill = 'tonexty',
mode = 'lines') %>%
add_trace(data = data.frame(intersections[config$crossLow2X], intersections[config$crossLow2Y]),
y = as.formula(paste0("~", config$crossLow2Y)),
x = as.formula(paste0("~", config$crossLow2X)),
name = config$higherName, legendgroup = config$higherGroup,
hoverinfo = "skip",
marker = NULL,
showlegend = FALSE,
line = list(color = config$higherColor[1], width = 2),
mode = 'lines') %>%
add_trace(data = data.frame(intersections[config$crossHigh2X], intersections[config$crossHigh2Y]),
y = as.formula(paste0("~", config$crossHigh2Y)),
x = as.formula(paste0("~", config$crossHigh2X)),
name = config$lowerName, legendgroup = config$lowerGroup,
hoverinfo = "skip",
marker = NULL,
showlegend = FALSE,
line = list(color = config$lowerColor[1], width = 2),
fillcolor = config$lowerColor[2],
fill = 'tonexty',
mode = 'lines')
}
bioConfig <- reactive({
colors <- colors()
list("lowerColors" = colors$footprint, "higherColors" = colors$bio,
"lowerName" = "Footprint", "higherName" = "Biocapacity",
"lowerGroup" = "ef", "higherGroup" = "bio",
"lowerY" = "~total.x", "higherY" = "~total.y",
"crossLow1X" = "efX", "crossLow1Y" = "efY",
"crossHigh1X" = "bioX", "crossHigh1Y" = "bioY",
"crossLow2X" = "bioX1", "crossLow2Y" = "bioY1",
"crossHigh2X" = "efX1", "crossHigh2Y" = "efY1",
"legend" = TRUE)
})
footprintConfig <- reactive({
colors <- colors()
list("lowerColors" = colors$bio, "higherColors" = colors$footprint,
"lowerName" = "Biocapacity", "higherName" = "Footprint",
"lowerGroup" = "bio", "higherGroup" = "ef",
"lowerY" = "~total.y", "higherY" = "~total.x",
"crossLow1X" = "bioX", "crossLow1Y" = "bioY",
"crossHigh1X" = "efX", "crossHigh1Y" = "efY",
"crossLow2X" = "efX1", "crossLow2Y" = "efY1",
"crossHigh2X" = "bioX1", "crossHigh2Y" = "bioY1",
"legend" = TRUE)
})
addHighResourceTrace <- function (config, data, p) {
add_trace(p = p,
data = data,
y = as.formula(config$lowerY),
showlegend = config$legend,
x = ~ year,
name = config$lowerName, legendgroup = config$lowerGroup,
marker = list(color = config$lowerColor[1]),
line = list(color = config$lowerColor[1], width = 2)) %>%
add_trace(data = data,
y = as.formula(config$higherY),
x = ~ year,
showlegend = config$legend,
name = config$higherName, legendgroup = config$higherGroup,
marker = list(color = config$higherColor[1]),
line = list(color = config$higherColor[1], width = 2),
fillcolor = config$higherColor[2],
fill = 'tonexty')
}
getEdgesOfTrace <- function (years, minYear, maxYear) {
i <- 1
ranges <- list()
if (!is_empty(years)) {
rangeStart <- minYear
startYear <- min(years)
if (startYear > minYear) {
rangeStart <- startYear
}
for (theYear in years) {
if (theYear > (startYear+1)) {
# gap between previous year
ranges[[i]] <- list(start = rangeStart, end = startYear); i <- i + 1
rangeStart <- theYear
}
startYear <- theYear
}
ranges[[i]] <- list(start = rangeStart, end = max(years))
}
return(ranges)
}
} |
9782c2d5894c2ec1231e28573f6ce8cd38fd9285 | a20a227ce60b3c63f5018f03a91ef00110e5a7a0 | /task-1/Homework-Input/documentation/remote_nodes_and_searcher.R | e5f0738c3b79c1b0b3f5804c95565af18d59fb7f | [] | no_license | jiriklepl/NSWI080 | 7c8411bb4c78e370156372c1060599f2d8305ce3 | 0a1a30045b03c8f89abbdaad431618631e83b68f | refs/heads/master | 2023-06-04T09:36:02.866942 | 2021-06-29T00:20:30 | 2021-06-29T00:20:30 | 349,251,396 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 4,163 | r | remote_nodes_and_searcher.R | # This script to be run in the documentation folder after the files (see below) are generated
library(stringr)
library(tidyverse)
library(dplyr)
library(ggplot2)
files <- list.files(pattern="remote_nodes_and_searcher_\\d+_\\d+")
per_edges <- c()
remote_nodes_per_edges <- c()
remote_both_per_edges <- c()
distance_counts <- c()
remote_nodes_distance_counts <- c()
remote_both_distance_counts <- c()
distance_total <- c()
remote_nodes_distance_total <- c()
remote_both_distance_total <- c()
process_file <- function(file_data, per_edges, distance_counts, distance_total) {
for(i in 1:length(file_data$Distance)) {
if (i == -1) i <- 1
else i <- i + 1
t <- distance_counts[file_data$Distance[i]]
distance_counts[file_data$Distance[i]] <- 1
if (!is.na(t) && length(t) > 0)
distance_counts[file_data$Distance[i]] <- t + distance_counts[file_data$Distance[i]]
t <- distance_total[file_data$Distance[i]]
distance_total[file_data$Distance[i]] <- file_data$Time[i]
if (!is.na(t) && length(t) > 0)
distance_total[file_data$Distance[i]] <- t + distance_total[file_data$Distance[i]]
}
per_edges$Time[length(per_edges$Time) + 1] <- sum(file_data$Time) / length(file_data$Time)
per_edges$Density[length(per_edges$Density) + 1] <- 2 * numbers[2] / (numbers[1] * (numbers[1] - 1))
return (list("per_edges" = per_edges, "distance_counts" = distance_counts, "distance_total" = distance_total))
}
for (file in files) {
numbers <- as.numeric(str_extract_all(file, '\\d+')[[1]])
file_data <- read_table(file, n_max=50) %>% mutate (Time = as.numeric(Time)) %>% drop_na
rtrn <- process_file(file_data, per_edges, distance_counts, distance_total)
per_edges <- rtrn$per_edges
distance_counts <- rtrn$distance_counts
distance_total <- rtrn$distance_total
file_data <- read_table(file, skip=51, n_max=50) %>% mutate (Time = as.numeric(Time)) %>% drop_na
rtrn <- process_file(file_data, remote_nodes_per_edges, remote_nodes_distance_counts, remote_nodes_distance_total)
remote_nodes_per_edges <- rtrn$per_edges
remote_nodes_distance_counts <- rtrn$distance_counts
remote_nodes_distance_total <- rtrn$distance_total
file_data <- read_table(file, skip=102, n_max=50) %>% mutate (Time = as.numeric(Time)) %>% drop_na
rtrn <- process_file(file_data, remote_both_per_edges, remote_both_distance_counts, remote_both_distance_total)
remote_both_per_edges <- rtrn$per_edges
remote_both_distance_counts <- rtrn$distance_counts
remote_both_distance_total <- rtrn$distance_total
}
per_edges <- per_edges %>% as_tibble %>% arrange(Density)
ggplot(NULL, aes(x=Density, y=Time)) +
geom_point(data=as.data.frame(per_edges), aes(color="Time")) +
geom_point(data=as.data.frame(remote_nodes_per_edges), aes(color="Remote nodes time")) +
geom_point(data=as.data.frame(remote_both_per_edges), aes(color="Remote both time"))
ggsave("remote_nodes_and_searcher1.png")
distance_average <- ifelse(distance_counts > 0, distance_total / distance_counts, NA)
remote_nodes_distance_average <- ifelse(remote_nodes_distance_counts > 0, remote_nodes_distance_total / remote_nodes_distance_counts, NA)
remote_both_distance_average <- ifelse(remote_both_distance_counts > 0, remote_both_distance_total / remote_both_distance_counts, NA)
local_data = cbind(Distance = (1:(distance_average %>% length)) - 1, Average_time = distance_average)
remote_nodes_data = cbind(Distance = (1:(remote_nodes_distance_average %>% length)) - 1, Average_time = remote_nodes_distance_average)
remote_both_data = cbind(Distance = (1:(remote_both_distance_average %>% length)) - 1, Average_time = remote_both_distance_average)
ggplot(NULL, aes(x=Distance, y=Average_time)) +
geom_point(data=as.data.frame(local_data), aes(color="Average local time")) +
geom_point(data=as.data.frame(remote_nodes_data), aes(color="Average remote nodes time")) +
geom_point(data=as.data.frame(remote_both_data), aes(color="Average remote both time")) +
labs(x="Distance (0 denotes infinity)", y="Average time")
ggsave("remote_nodes_and_searcher2.png")
|
cba6173834de1a036879a842b30dd8fdbc6b141d | 7394bc0e210ebb12931965b405cd53223fc1fe09 | /inst/tinytest/cv-ncvreg.R | 2774c7acb5d9bf51eba6cfe4f9cea22f741879d6 | [] | no_license | pbreheny/ncvreg | 75d33b6807f923a0e42e7f01e17552cca430012b | a50df574898bc19b6cae8bd8eeb0766cc93f4af3 | refs/heads/master | 2023-06-21T12:33:13.494695 | 2023-04-03T23:28:53 | 2023-04-03T23:28:53 | 6,141,265 | 39 | 30 | null | 2021-01-19T17:14:17 | 2012-10-09T13:38:30 | R | UTF-8 | R | false | false | 3,036 | r | cv-ncvreg.R | suppressPackageStartupMessages(library(glmnet))
#### Linear regression ####
# Works
X <- matrix(rnorm(500), 50, 10)
y <- X[,1] + rnorm(50)
cvfit <- cv.ncvreg(X, y)
print(summary(cvfit))
# Predict
b <- coef(cvfit)
p <- predict(cvfit, X, type='link')
p <- predict(cvfit, X, type='response')
p <- predict(cvfit, X, type='coef')
p <- predict(cvfit, X, type='vars')
p <- predict(cvfit, X, type='nvars')
# Integers
X <- matrix(rpois(500, 1), 50, 10)
y <- rpois(50, 1)
cvfit <- cv.ncvreg(X, y)
# Data frame
cvfit <- cv.ncvreg(as.data.frame(X), y)
# LOOCV
X <- matrix(rnorm(25*4), 25, 4)
y <- rnorm(25)
cvfit <- cv.ncvreg(X, y, nfolds=25)
summary(cvfit)
cvfit <- cv.ncvreg(X, y, fold=1:25)
#### Logistic regression ####
# Works
X <- matrix(rnorm(500), 50, 10)
y <- rbinom(50, 1, binomial()$linkinv(X[,1]))
cvfit <- cv.ncvreg(X, y, family='binomial')
print(summary(cvfit))
# Predict
b <- coef(cvfit)
p <- predict(cvfit, X, type='link')
p <- predict(cvfit, X, type='response')
p <- predict(cvfit, X, type='class')
p <- predict(cvfit, X, type='coef')
p <- predict(cvfit, X, type='vars')
p <- predict(cvfit, X, type='nvars')
# LOOCV
X <- matrix(rnorm(30*2), 30, 2)
y <- rbinom(30, 1, 0.5)
cvfit <- cv.ncvreg(X, y, nfolds=30, family='binomial')
summary(cvfit)
cvfit <- cv.ncvreg(X, y, fold=1:30, family='binomial')
#### Poisson regression ####
# Works
cvfit <- cv.ncvreg(X, y, family='poisson')
summary(cvfit)
# Predict
b <- coef(cvfit)
p <- predict(cvfit, X, type='link')
p <- predict(cvfit, X, type='response')
p <- predict(cvfit, X, type='coef')
p <- predict(cvfit, X, type='vars')
p <- predict(cvfit, X, type='nvars')
# LOOCV
cvfit <- cv.ncvreg(X, y, nfolds=30, family='poisson')
summary(cvfit)
cvfit <- cv.ncvreg(X, y, fold=1:30, family='poisson')
######################################
# cv.ncvreg() seems to work
######################################
n <- 40
p <- 10
X <- matrix(rnorm(n*p), ncol=p)
b <- c(2, -2, 1, -1, rep(0, p-4))
y <- rnorm(n, mean=X%*%b, sd=2)
yb <- y > .5
yp <- rpois(n, exp(X%*%b/3))
par(mfrow=c(3,2))
gcvfit <- cv.glmnet(X, y, nfolds=n)
plot(gcvfit)
ncvfit <- cv.ncvreg(X, y, penalty="lasso", lambda=gcvfit$lambda, nfolds=n)
plot(ncvfit)
gcvfit <- cv.glmnet(X, yb, family="binomial", nfolds=n)
plot(gcvfit)
ncvfit <- cv.ncvreg(X, yb, family="binomial", penalty="lasso", lambda=gcvfit$lambda, nfolds=n)
plot(ncvfit)
cvfit <- cv.glmnet(X, yp, family="poisson")
plot(cvfit)
cvfit <- cv.ncvreg(X, yp, family="poisson", penalty="lasso", lambda=cvfit$lambda)
plot(cvfit)
##############################################
# cv.ncvreg() return LP array works
##############################################
n <- 100
p <- 10
X <- matrix(rnorm(n*p), ncol=p)
b <- c(-3, 3, rep(0, 8))
y <- rnorm(n, mean=X%*%b, sd=1)
cvfit <- cv.ncvreg(X, y, returnY=TRUE)
cve <- apply(cvfit$Y - y, 2, crossprod)/n
expect_equivalent(cve, cvfit$cve, tol= .001)
y <- rnorm(n, mean=X%*%b) > 0
cvfit <- cv.ncvreg(X, y, family='binomial', returnY=TRUE)
pe <- apply((cvfit$Y>=0.5)!=y, 2, mean)
expect_equivalent(pe, cvfit$pe, tol= .001)
|
aeecc440742e5a791acc699712366e14301d9240 | 8c662559e91c296b097e105eb62d2c748cb4898e | /matchingOverTime-loop.R | 28799a71301f37e44a70ceeae8a1d7abacf024c3 | [] | no_license | mfransham/2017-09-SHBE-analysis | 551351fcdd58c2e1c24a95b8e102e6abb59d00ea | d7c6112386b170b826eb5c1826bbd42aaaeab0bd | refs/heads/master | 2021-07-04T05:17:58.915694 | 2017-09-25T14:10:21 | 2017-09-25T14:10:21 | 104,734,945 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,154 | r | matchingOverTime-loop.R | # variables to match benefit units over time
library(dplyr)
library(ggplot2)
shbefile.1 <- as.character(filter(SHBEfile.matches, matchNo == matchIndex) %>% select(shbefile.1))
shbefile.2 <- as.character(filter(SHBEfile.matches, matchNo == matchIndex) %>% select(shbefile.2))
# read files
shbe.1 <- read.csv(shbefile.1,
sep = "|",
header=F,
strip.white=T,
stringsAsFactors = F)
shbe.2 <- read.csv(shbefile.2,
sep = "|",
header=F,
strip.white=T,
stringsAsFactors = F)
# variable names
shbecolnames <- read.csv("data/shbe-colnames-Apr2015.csv",
header=F,
strip.white=T)
colnames(shbe.1) <- shbecolnames$V1
colnames(shbe.2) <- shbecolnames$V1
rm(shbecolnames)
# remove header and footer, store elsewhere, create reference dates and years
headandfoot.1 <- shbe.1[c(1, nrow(shbe.1)), ] # store header and trailer records
shbe.1 <- shbe.1[-c(1, nrow(shbe.1)), ] # remove header and trailer records
refdate.1 <- headandfoot.1[1,3]
refyear.1 <- as.numeric(substr(refdate.1, 7, 10))
headandfoot.2 <- shbe.2[c(1, nrow(shbe.2)), ] # store header and trailer records
shbe.2 <- shbe.2[-c(1, nrow(shbe.2)), ] # remove header and trailer records
refdate.2 <- headandfoot.2[1,3]
refyear.2 <- as.numeric(substr(refdate.2, 7, 10))
# create a single 'id' variable
shbe.1 <- shbe.1 %>%
mutate(id = ifelse(is.na(LCHBREF) | LCHBREF=="", LCLCTRREF, LCHBREF))
shbe.2 <- shbe.2 %>%
mutate(id = ifelse(is.na(LCHBREF) | LCHBREF=="", LCLCTRREF, LCHBREF))
# select main D records for each time point
shbe.1.hh <- shbe.1 %>% filter(REC_TYPE=="D") %>% select(id, CCNINO, CCFORENAME, CCFORENAME2, CCSURNAME, CDDOB, CCSEX, CCPCODE, PCNINO, PCSURNAME, PCFORENAME)
colnames(shbe.1.hh) <- paste(colnames(shbe.1.hh), "1", sep=".")
shbe.2.hh <- shbe.2 %>% filter(REC_TYPE=="D") %>% select(id, CCNINO, CCFORENAME, CCFORENAME2, CCSURNAME, CDDOB, CCSEX, CCPCODE, PCNINO, PCSURNAME, PCFORENAME)
colnames(shbe.2.hh) <- paste(colnames(shbe.2.hh), "2", sep=".")
# link these together
hh.join <- full_join(shbe.1.hh, shbe.2.hh, by=c("id.1"="id.2"))
# let's see if there are people I can match who aren't matched by housing benefit ID
hh.join.unmatch <- filter(hh.join, is.na(CCNINO.1) | is.na(CCNINO.2))
hh.join.match <- filter(hh.join, !id.1 %in% hh.join.unmatch$id.1) %>%
select(id.1) %>% mutate(id.2=id.1)
# match on NI number
hh.join.match2 <- inner_join(hh.join.unmatch[c(1,2:11)], hh.join.unmatch[c(1,12:21)], by=c("CCNINO.1"="CCNINO.2")) %>%
filter(!CCNINO.1=="")
hh.join.match <- rbind(hh.join.match,
select(hh.join.match2, id.1.x, id.1.y) %>%
rename(id.1=id.1.x, id.2=id.1.y))
hh.join.unmatch <- filter(hh.join, !id.1 %in% hh.join.match$id.1 & !id.1 %in% hh.join.match$id.2)
# match on surname and DoB
hh.join.match3 <- inner_join(hh.join.unmatch[c(1,2:11)], hh.join.unmatch[c(1,12:21)],
by=c("CCSURNAME.1"="CCSURNAME.2",
"CDDOB.1"="CDDOB.2"))
# Not using because it will be matched below when joining partner and claimant NI numbers
# match on partner NI number - main claimant becoming partner
hh.join.match4 <- inner_join(hh.join.unmatch[c(1,2:11)], hh.join.unmatch[c(1,12:21)],
by=c("CCNINO.1"="PCNINO.2")) %>%
filter(!CCNINO.1=="")
hh.join.match <- rbind(hh.join.match,
select(hh.join.match4, id.1.x, id.1.y) %>%
rename(id.1=id.1.x, id.2=id.1.y))
hh.join.unmatch <- filter(hh.join, !id.1 %in% hh.join.match$id.1 & !id.1 %in% hh.join.match$id.2)
# match on partner NI number - partner becoming main claimant
hh.join.match5 <- inner_join(hh.join.unmatch[c(1,2:11)], hh.join.unmatch[c(1,12:21)],
by=c("PCNINO.1"="CCNINO.2")) %>%
filter(!PCNINO.1=="")
hh.join.match <- rbind(hh.join.match,
select(hh.join.match5, id.1.x, id.1.y) %>%
rename(id.1=id.1.x, id.2=id.1.y))
hh.join.unmatch <- filter(hh.join, !id.1 %in% hh.join.match$id.1 & !id.1 %in% hh.join.match$id.2)
# add in the unmatched id numbers
hh.join.unmatch <- select(hh.join.unmatch, id.1, CCNINO.1, CCNINO.2)
hh.join.unmatch <- mutate(hh.join.unmatch,
id = id.1,
id.1 = ifelse(is.na(CCNINO.1), NA, id),
id.2 = ifelse(is.na(CCNINO.2), NA, id))
hh.join.unmatch <- select(hh.join.unmatch, id.1, id.2)
hh.join.final <- rbind(hh.join.match, hh.join.unmatch)
# name the variables with the reference year
stem <- substr(colnames(hh.join.final), 1, nchar(colnames(hh.join.final))-1)
suffix <- substr(colnames(hh.join.final), nchar(colnames(hh.join.final)), nchar(colnames(hh.join.final)))
suffix <- ifelse(suffix=="1", refyear.1, refyear.2)
colnames(hh.join.final) <- paste0(stem, suffix)
rm(stem, suffix)
# write out to a csv file without NI numbers
write.csv(hh.join.final, paste0("../notForGitHub/hh.join.", refyear.1, "-", refyear.2, ".csv")) |
1b8e4eabfb76a754e173569303384e6293514847 | 0a3d5398e435fc81a61f832c0921304c72d7dbd5 | /man/unload.Rd | 31afa0d3190f500d7b5aced07a38ceac73dbd34e | [] | no_license | r-lib/pkgload | 609ea95e6f19b51f2ccfed1f71bbb0adbc647283 | 75938cd13f80e912af43d9632cdb54aa0bc9ebff | refs/heads/main | 2023-09-01T00:53:19.446111 | 2023-07-06T07:34:53 | 2023-07-06T08:15:47 | 73,123,753 | 46 | 38 | null | 2023-09-11T13:26:16 | 2016-11-07T21:45:48 | R | UTF-8 | R | false | true | 1,507 | rd | unload.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unload.R
\name{unload}
\alias{unload}
\alias{unregister}
\title{Unload a package}
\usage{
unload(package = pkg_name(), quiet = FALSE)
unregister(package = pkg_name())
}
\arguments{
\item{package}{package name.}
\item{quiet}{if \code{TRUE} suppresses output from this function.}
}
\description{
\code{unload()} attempts to cleanly unload a package, including unloading
its namespace, deleting S4 class definitions and unloading any loaded
DLLs. Unfortunately S4 classes are not really designed to be cleanly
unloaded, and so we have to manually modify the class dependency graph in
order for it to work - this works on the cases for which we have tested
but there may be others. Similarly, automated DLL unloading is best tested
for simple scenarios (particularly with \code{useDynLib(pkgname)} and may
fail in other cases. If you do encounter a failure, please file a bug report
at \url{https://github.com/r-lib/pkgload/issues}.
\code{unregister()} is a gentler version of \code{unload()} which removes the
package from the search path, unregisters methods, and unregisters
the namespace. It doesn't unload the namespace or its DLL to keep
it in working order in case of dangling references.
}
\examples{
\dontrun{
# Unload package that is in current directory
unload()
# Unload package that is in ./ggplot2/
unload(pkg_name("ggplot2/"))
library(ggplot2)
# unload the ggplot2 package directly by name
unload("ggplot2")
}
}
|
ceb0c408942d0cfabe983ef7712237d17a3f5769 | ebd6a50bb49055ecba0527b1e3336a2e107ce9fc | /R/plot_cumu_set.R | b3129d32b3be4778fa31244fbca830092090d41c | [] | no_license | swmpkim/SETr | 3abbbc3e87004231703859f531e05559a9bef8b0 | 21a4d13fc0fd49215817e3b76878c35ea309f90c | refs/heads/master | 2021-07-12T11:10:47.274219 | 2021-02-27T17:24:20 | 2021-02-27T17:24:20 | 238,076,972 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,167 | r | plot_cumu_set.R | #' Make a graph of change over time by SET
#'
#' x-axis is date; y-axis is the average of the 36 pin heights' difference from baseline (first measurement). One facet per SET id.
#'
#' @param data data frame (e.g. `$set` piece of output from `calc_change_cumu()`) with one row per faceting variable, and the following columns, named exactly: date, set_id, mean_cumu. `mean_cumu` should be an already-calculated field of change since baseline.
#' @param columns number of columns you want in the faceted output
#' @param pointsize size of points you want (goes into the `size` argument of `ggplot2::geom_point`)
#' @param scales passed to `facet_wrap`; same fixed/free options as that function
#' @param smooth do you want a linear regression plotted on top?
#' @param lty_smooth type of line (1 = solid; 2 and 5 = dashed; normal line types)
#'
#' @return a ggplot object
#' @export
#'
#' @examples
#' cumu_set <- calc_change_cumu(example_sets)
#' plot_cumu_set(cumu_set$set)
#' plot_cumu_set(cumu_set$set, columns = 1, pointsize = 2, smooth = FALSE)
plot_cumu_set <- function(data, columns = 4, pointsize = 3.5, scales = "fixed", smooth = TRUE, lty_smooth = 5){
# data needs to be the $set piece of the output from calc_change_cumu
ggplot2::ggplot(data, ggplot2::aes(x = .data$date, y = .data$mean_cumu)) +
ggplot2::geom_line(col = 'lightsteelblue4') +
{if(smooth) ggplot2::geom_smooth(se = FALSE, method = 'lm',
col = 'steelblue4', lty = lty_smooth, size = 1)} +
ggplot2::geom_point(shape = 21,
fill = 'lightsteelblue1', col = 'steelblue3',
size = pointsize, alpha = 0.9) +
ggplot2::facet_wrap(~.data$set_id, ncol = columns, scales = scales) +
{if(smooth) ggplot2::labs(title = 'Cumulative Change since first reading',
x = 'Date',
y = 'Change since first reading (mm)')} +
{if(!smooth) ggplot2::labs(title = 'Cumulative Change since first reading',
x = 'Date',
y = 'Change since first reading (mm)')} +
ggplot2::theme_classic()
}
|
ff2812d1056bb57ee6b485353c8f848159ffb244 | cf294059ae844d1d2abfcc989e5e04ee124fe37b | /man/alignSamples.Rd | 8038eb38f3b08acaaa8bdb046c7eda7a07044257 | [] | no_license | camilleroquencourt/ptairMS | 3557ba24e722508590a79c35f0c999d6ae186124 | 491ff08df3b2a07808db363dca961459a8aa3944 | refs/heads/master | 2022-08-19T04:05:01.232422 | 2022-06-30T14:11:08 | 2022-06-30T14:11:08 | 226,823,108 | 7 | 2 | null | 2020-03-18T10:30:49 | 2019-12-09T08:35:15 | R | UTF-8 | R | false | true | 3,431 | rd | alignSamples.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/alignment.R
\name{alignSamples}
\alias{alignSamples}
\alias{alignSamples,ptrSet-method}
\title{Alignment between samples}
\usage{
alignSamples(
X,
ppmGroup = 70,
fracGroup = 0.8,
group = NULL,
fracExp = 0.3,
pValGreaterThres = 0.001,
pValLessThres = 0,
quantiUnit = c("ppb", "ncps", "cps")[1],
bgCorrected = TRUE,
dmzGroup = 0.001
)
\S4method{alignSamples}{ptrSet}(
X,
ppmGroup = 70,
fracGroup = 0.8,
group = NULL,
fracExp = 0.3,
pValGreaterThres = 0.001,
pValLessThres = 0,
quantiUnit = c("ppb", "ncps", "cps")[1],
bgCorrected = TRUE,
dmzGroup = 0.001
)
}
\arguments{
\item{X}{ptrSet already processed by the \code{\link[ptairMS]{detectPeak}}
function}
\item{ppmGroup}{ppm maximal width for an mz group}
\item{fracGroup}{only variables present in \code{fracGroup}
percent of at least one \code{group} will be kept (if 0 the filter is
not applied)}
\item{group}{character: sampleMetadata data column name. If \code{NULL},
variables not present in \code{fracGroup} percent of samples will be deleted.
Else, variables not present in \code{fracGroup} percent in in at least one
group group will
be removed.}
\item{fracExp}{fraction of samples which must have their p-value less than
\code{pValGreaterThres} and \code{pValLessThres}}
\item{pValGreaterThres}{threshold of the p-value for the unilateral
testing that quantification (in cps) of expiration points are greater than
the intensities in the background.}
\item{pValLessThres}{threshold of the p-value for the unilateral
testing that quantification (in cps) of expiration points are less than the
intensities of the background.}
\item{quantiUnit}{ppb, ncps or cps}
\item{bgCorrected}{logical: should the peak table contain the background
corrected values?}
\item{dmzGroup}{minimum mz width to be used for grouping the features
(required for low masses)}
}
\value{
an \code{\link[Biobase]{ExpressionSet}} (Biobase object)
}
\description{
\code{AlignSamples} performs alignment between samples (i.e. the matching of
variables between the peak lists within the \code{ptrSet} object) by using a
kernel gaussian density (Delabriere et al, 2017).
This function returns an \code{\link[Biobase]{ExpressionSet}}, which contains
the matrix of peak intensities, the sample metadata (borrowed from the
input ptrSet) and the variable metadata which contains the peak intensities in
the background.
Two filters may be applied to:
\itemize{
\item keep only variables with a significant higher intensity in the
expirations compared to the background (i.e., a p-value less than
\code{pValGreaterThres}) for at least \code{fracExp} % of the samples
\item keep only variables which are detected in more
than \code{fracGroup} percent of the samples (or \code{group})
}
If you do not want to apply those filters, set \code{fracGroup} to 0 and
\code{pValGreaterThres} to 1.
}
\examples{
library(ptairData)
dirRaw <- system.file("extdata/exhaledAir", package = "ptairData")
exhaledPtrset <- createPtrSet(dir=dirRaw,
setName="exhaledPtrset", mzCalibRef = c(21.022, 60.0525),
fracMaxTIC = 0.7, saveDir = NULL )
exhaledPtrset<-detectPeak(exhaledPtrset,mzNominal=c(21,60,79))
eset <- alignSamples(exhaledPtrset,pValGreaterThres=0.05)
Biobase::exprs(eset)
Biobase::fData(eset)
Biobase::pData(eset)
}
\references{
Delabriere et al., 2017
}
|
a511ea751027a36dac367efaedb89eb437de5f1e | 4dbe90e567ee670e01a2e2ddf293fab56b5f815a | /man/start.experiment.Rd | afa624ef664637a0ed8d2dc905b7a3f6947e2ffb | [] | no_license | sashapr/betr | 2d39aa8625be5c4961354c42e3e6637bf5582524 | a5f8a7451aafc91f2d0559630792eaeee7d55783 | refs/heads/master | 2020-04-06T06:46:13.699847 | 2014-11-24T00:34:59 | 2014-11-24T00:35:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,360 | rd | start.experiment.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{start.Experiment}
\alias{start.Experiment}
\title{Start the experiment running.}
\usage{
\method{start}{Experiment}(experiment, force = FALSE)
}
\arguments{
\item{experiment}{Object of class Experiment}
\item{force}{Start the experiment even if there are fewer than N participants}
}
\value{
TRUE or FALSE, invisibly
}
\description{
Experiments can be in status Stopped, Waiting, Started, or Paused.
\code{start} moves the experiment from Waiting to Started.
If the experiment has autostart set, this will happen automatically
when N subjects have connected.
When the experiment starts, all subjects are moved to the first stage.
}
\details{
Note that \code{start} is an S3 generic to avoid clashes with \code{start} in
the stats package.
}
\examples{
start(expt)
}
\seealso{
Other command line functions: \code{\link{get_url}},
\code{\link{info}}, \code{\link{map}},
\code{\link{nperiods}},
\code{\link{print,Experiment,ANY-method}},
\code{\link{print,Experiment-method}},
\code{\link{print_stages}}, \code{\link{session_name}};
\code{\link{halt}}; \code{\link{load_commands}};
\code{\link{merge_subjects}}; \code{\link{next_stage}};
\code{\link{pause}}; \code{\link{ready}};
\code{\link{replay}}; \code{\link{restart}};
\code{\link{trace_stage}}, \code{\link{untrace_stage}}
}
|
0fe3e4f7ecd248d513c510ed6a8553e5525d33cb | 58e2ef9a0c60c30e728be8e4472da29d4da42048 | /library/tensorflow/tutorials/2-5_Overfitting_Underfitting.R | 923116a1c01b9f7d7787c90a608ace35801a3fa7 | [] | no_license | delta0726/r-deeplearning | 98ba3f982fbeacf7d7bc8b574f4e3cb398d192fb | 3f504b624e6ceb3ed55c36dc0ba63c930a83769d | refs/heads/master | 2023-07-17T14:58:17.990228 | 2021-08-20T22:14:54 | 2021-08-20T22:14:54 | 326,067,995 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,005 | r | 2-5_Overfitting_Underfitting.R | # ***************************************************************************************
# Title : Quick Start
# Objective : TODO
# Created by: Owner
# Created on: 2020/11/1
# URL : https://tensorflow.rstudio.com/tutorials/beginners/basic-ml/tutorial_overfit_underfit/
# ***************************************************************************************
# <オーバーフィッティングの回避策>
# - 過剰適合を防ぐための最善の解決策は、より多くのトレーニングデータを使用すること
# --- より多くのデータでトレーニングされたモデルはより一般化される
# - 正則化などの手法を使用する
# --- モデルが保存できる情報の量とタイプに制約を課す
# --- ネットワークが少数のパターンしか記憶できない場合、最適化プロセスにより、最も顕著なパターンに焦点を合わせるように強制される
# <目次>
# 0. 準備
# 1. データ準備
# 2. 過剰適合とは
# 3. モデル構築(ベースラインモデル)
# 4. モデル構築(小規模モデル)
# 5. モデル構築(大規模モデル)
# 6. 結果比較
# 7. 正則化の追加
# 8. ドロップアウトの追加
# 0. 準備 -------------------------------------------------------------------------------
# ライブラリ
library(reticulate)
library(tidyverse)
library(keras)
# 仮想環境の選択
use_condaenv("C:/Users/Owner/Anaconda3/envs/r-reticulate", required = TRUE)
py_config()
# 1. データ準備 -------------------------------------------------------------------------------
# データ準備
# --- IMDBデータ
# --- データ量を限定することで過剰適合の発生を確認
num_words <- 1000
imdb <- dataset_imdb(num_words = num_words)
# データ分割
c(train_data, train_labels) %<-% imdb$train
c(test_data, test_labels) %<-% imdb$test
# 関数定義
# --- マルチホットエンコーディング
# --- リストを0と1のベクトルに変換する
multi_hot_sequences <- function(sequences, dimension) {
multi_hot <- matrix(0, nrow = length(sequences), ncol = dimension)
for (i in 1:length(sequences)) {
multi_hot[i, sequences[[i]]] <- 1
}
multi_hot
}
# マルチホットエンコーディング
train_data <- train_data %>% multi_hot_sequences(num_words)
test_data <- test_data %>% multi_hot_sequences(num_words)
# プロット用データ
# --- 単語インデックスは頻度でソートされている
first_text <- data.frame(word = 1:num_words, value = train_data[1, ])
# プロット
# --- インデックスゼロの近くに1つの値が多い
first_text %>%
ggplot(aes(x = word, y = value)) +
geom_line() +
theme(axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())
# 2. 過剰適合とは --------------------------------------------------------------------------
# <ポイント>
# - 剰適合を防ぐ最も簡単な方法は、モデルのサイズ(学習可能なパラメーターの数)を減らすことです。
# --- レイヤーの数とレイヤーあたりのユニットの数によって決定される
# --- 学習可能なパラメーターの数は、モデルの「容量」と呼ばれることがよくある
# - パラメータが多いモデルほど「記憶能力」が高くなるため、トレーニングサンプルとそのターゲット間の完全なマッピングを行う
# --- 汎化能力のないマッピングが行われ、予測能力が期待できない
# --- ディープラーニングの目的は「適合」ではなく「一般化」
# - 一方、ネットワークの記憶リソースが限られている場合、マッピングを簡単に学習することはできません(学習不足)
# --- 損失を最小限に抑えるには、より予測力のある圧縮表現を学習する必要がある
# <方針>
# - モデルの適切なサイズまたはアーキテクチャを決定するための魔法の公式はない
# - 比較的少数のレイヤーとパラメーターから始めて、検証損失の収穫逓減が見られるまでモデルを拡張していく
# --- レイヤーのサイズを増やす
# --- 新しいレイヤーを追加する
# 3. モデル構築(ベースラインモデル) --------------------------------------------------------------------
# ネットワーク構築
baseline_model <-
keras_model_sequential() %>%
layer_dense(units = 16, activation = "relu", input_shape = num_words) %>%
layer_dense(units = 16, activation = "relu") %>%
layer_dense(units = 1, activation = "sigmoid")
# コンパイル
baseline_model %>% compile(
optimizer = "adam",
loss = "binary_crossentropy",
metrics = list("accuracy")
)
# サマリー
baseline_model %>% summary()
# トレーニング実行
baseline_history <-
baseline_model %>%
fit(train_data,
train_labels,
epochs = 20,
batch_size = 512,
validation_data = list(test_data, test_labels),
verbose = 2)
# 4. モデル構築(小規模モデル) -------------------------------------------------------------------
# ネットワーク構築
smaller_model <-
keras_model_sequential() %>%
layer_dense(units = 4, activation = "relu", input_shape = num_words) %>%
layer_dense(units = 4, activation = "relu") %>%
layer_dense(units = 1, activation = "sigmoid")
# コンパイル
smaller_model %>% compile(
optimizer = "adam",
loss = "binary_crossentropy",
metrics = list("accuracy")
)
# サマリー
smaller_model %>% summary()
# トレーニング実行
smaller_history <-
smaller_model %>%
fit(train_data,
train_labels,
epochs = 20,
batch_size = 512,
validation_data = list(test_data, test_labels),
verbose = 2)
# 5. モデル構築(大規模モデル) -------------------------------------------------------------------
# ネットワーク構築
bigger_model <-
keras_model_sequential() %>%
layer_dense(units = 512, activation = "relu", input_shape = num_words) %>%
layer_dense(units = 512, activation = "relu") %>%
layer_dense(units = 1, activation = "sigmoid")
# コンパイル
bigger_model %>% compile(
optimizer = "adam",
loss = "binary_crossentropy",
metrics = list("accuracy")
)
# サマリー
bigger_model %>% summary()
# トレーニング実行
bigger_history <-
bigger_model %>%
fit(train_data,
train_labels,
epochs = 20,
batch_size = 512,
validation_data = list(test_data, test_labels),
verbose = 2)
# 6. 結果比較 -------------------------------------------------------------------------------
# データ作成
# --- プロット用
compare_cx <-
data.frame(baseline_train = baseline_history$metrics$loss,
baseline_val = baseline_history$metrics$val_loss,
smaller_train = smaller_history$metrics$loss,
smaller_val = smaller_history$metrics$val_loss,
bigger_train = bigger_history$metrics$loss,
bigger_val = bigger_history$metrics$val_loss) %>%
rownames_to_column() %>%
mutate(rowname = as.integer(rowname)) %>%
gather(key = "type", value = "value", -rowname)
# プロット作成
# --- 結果比較
compare_cx %>%
ggplot(aes(x = rowname, y = value, color = type)) +
geom_line() +
xlab("epoch") +
ylab("loss")
# 7. 正則化の追加 -------------------------------------------------------------------------------
# <ポイント>
# - 2つの説明が与えられた場合、正しいと思われる説明は「最も単純なもの」「仮定の量が最も少ないもの」である
# --- DLにおける「単純なモデル」とは、パラメーター値の分布のエントロピーが少ないモデル
# - 過剰適合を軽減する方法の1つは、ネットワークの重みに小さな値のみを適用することでネットワークの複雑さに制約を課すこと(正則化)
# --- これにより、重み値の分布がより「規則的」になる
# --- ネットワークの損失関数に大きな重みを持つことに関連するコストを追加することによって行われます
# - 正則化には「L1正則化」と「L2正則化」の2種類がある
# --- L1正則化: 追加されるコストは、ウエイト係数の絶対値に比例(Lasso)
# --- L2正則化: 追加されるコストは、ウエイト係数の二乗に比例(Ridge)
# モデル構築
# --- レイヤーに正則化を追加
l2_model <-
keras_model_sequential() %>%
layer_dense(units = 16, activation = "relu", input_shape = num_words,
kernel_regularizer = regularizer_l2(l = 0.001)) %>%
layer_dense(units = 16, activation = "relu",
kernel_regularizer = regularizer_l2(l = 0.001)) %>%
layer_dense(units = 1, activation = "sigmoid")
# コンパイル
l2_model %>%
compile(optimizer = "adam",
loss = "binary_crossentropy",
metrics = list("accuracy"))
# サマリー
l2_model %>% summary()
# トレーニング実行
l2_history <-
l2_model %>%
fit(train_data,
train_labels,
epochs = 20,
batch_size = 512,
validation_data = list(test_data, test_labels),
verbose = 2)
# データ作成
# --- プロット用
compare_cx <-
data.frame(baseline_train = baseline_history$metrics$loss,
baseline_val = baseline_history$metrics$val_loss,
l2_train = l2_history$metrics$loss,
l2_val = l2_history$metrics$val_loss) %>%
rownames_to_column() %>%
mutate(rowname = as.integer(rowname)) %>%
gather(key = "type", value = "value", -rowname)
# プロット作成
# --- L2正則化が入るとLossが過剰に減少しなくなる
# --- 過剰適合に対して耐性を得ている
compare_cx %>%
ggplot(aes(x = rowname, y = value, color = type)) +
geom_line() +
xlab("epoch") +
ylab("loss")
# 8. ドロップアウトの追加 ------------------------------------------------------------------------
# <ポイント>
# - ニューラルネットワークで最も効果的で最も一般的に使用されている正則化手法の1つ
# - ドロップアウトは、トレーニング中にレイヤーのいくつかの出力機能をランダムに「ドロップアウト」(ゼロに設定)することで構成
# モデル構築
# --- ドロップアウトを追加
dropout_model <-
keras_model_sequential() %>%
layer_dense(units = 16, activation = "relu", input_shape = num_words) %>%
layer_dropout(0.6) %>%
layer_dense(units = 16, activation = "relu") %>%
layer_dropout(0.6) %>%
layer_dense(units = 1, activation = "sigmoid")
# コンパイル
dropout_model %>%
compile(optimizer = "adam",
loss = "binary_crossentropy",
metrics = list("accuracy"))
# サマリー
# --- 設定は表示されない
dropout_model %>% summary()
# トレーニング実行
dropout_history <-
dropout_model %>%
fit(train_data,
train_labels,
epochs = 20,
batch_size = 512,
validation_data = list(test_data, test_labels),
verbose = 2)
# データ作成
# --- プロット用
compare_cx <-
data.frame(baseline_train = baseline_history$metrics$loss,
baseline_val = baseline_history$metrics$val_loss,
dropout_train = dropout_history$metrics$loss,
dropout_val = dropout_history$metrics$val_loss) %>%
rownames_to_column() %>%
mutate(rowname = as.integer(rowname)) %>%
gather(key = "type", value = "value", -rowname)
# プロット作成
# --- L2正則化が入るとLossが過剰に減少しなくなる
# --- 過剰適合に対して耐性を得ている
compare_cx %>%
ggplot(aes(x = rowname, y = value, color = type)) +
geom_line() +
xlab("epoch") +
ylab("loss")
|
dcf344e5292c1532c40e3b5122a9a9e435e3a449 | c987617c7a52a18c327547fe76f5ecf5f4e636ec | /plot3.R | 97c07bca25f0fa9a3186745278dc38ad169adc0c | [] | no_license | lyonspat13/ExData_Plotting1 | 070568ddec13c33bf21b7af7472f9905e22282c4 | 182923ad478e1eb0d580c581540ab088cab402bd | refs/heads/master | 2021-01-15T19:49:17.359825 | 2015-09-13T20:19:43 | 2015-09-13T20:19:43 | 42,410,542 | 0 | 0 | null | 2015-09-13T19:52:35 | 2015-09-13T19:52:35 | null | UTF-8 | R | false | false | 1,906 | r | plot3.R | ### First we have the code for loading the data, which is the same for all plots ###
object.size(c(1)) * 2075259 * 9 / (10 ^9) ## roughly estimates full data size in gb, says 'bytes'
data<- read.table("household_power_consumption.txt",header = TRUE, sep=";") ## reads in full data set
data$dateTime <- strptime(paste(data$Date,data$Time), "%d/%m/%Y %H:%M:%S" ) ## creates new column and converts to POSIXlt
data <- subset(data, dateTime > as.POSIXlt("2007-01-31 23:59:59")) ## removes days that are too early
data <- subset(data, dateTime < as.POSIXlt("2007-02-03 00:00:00")) ## removes days that are too late
data$Global_active_power <- as.numeric(as.character(data$Global_active_power))
data$Sub_metering_1 <- as.numeric(as.character(data$Sub_metering_1))
data$Sub_metering_2 <- as.numeric(as.character(data$Sub_metering_2))
data$Voltage <- as.numeric(as.character(data$Voltage))
data$Global_reactive_power <- as.numeric(as.character(data$Global_reactive_power))
### End Code for loading the data ###
### Code to Create Plot 3 ###
dateTime <- c(data$dateTime,data$dateTime,data$dateTime)
subValue <- c(data$Sub_metering_1,data$Sub_metering_2,data$Sub_metering_3)
subText <- c(rep("Sub_metering_1",2880),rep("Sub_metering_2",2880),rep("Sub_metering_3",2880))
newdata <- data.frame(dateTime,subValue,subText)
#par(mfrow = c(1,1))
with(newdata, plot(dateTime, subValue, type = "n", ylab = "Energy sub metering"))
with(subset(newdata, subText == "Sub_metering_1"), lines(dateTime, subValue, col = "black"))
with(subset(newdata, subText == "Sub_metering_2"), lines(dateTime, subValue, col = "red"))
with(subset(newdata, subText == "Sub_metering_3"), lines(dateTime, subValue, col = "blue"))
legend("topright", lty = 1, col = c("black", "red","blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
dev.copy(png, file = "plot3.png")
dev.off()
### End Code to Create Plot 3 ###
|
bcb8dca04bce7279adc44b0f70a6da3aa64e953b | 5524c80b983a068aa0805f5a68fcecc65c6de12d | /lab6-1.R | 7b3f093e0c873527dd50c8e34f2d2cb5ab74bb3c | [] | no_license | sebastianpantin/Stochastic-data-processing-and-simulation-R | 40f08770b53458382c663ea392bc609f36896316 | b9865a6458f7ca4c9eba85fc8f2bbf234b362f8e | refs/heads/master | 2021-01-20T11:10:45.183911 | 2017-10-18T10:26:06 | 2017-10-18T10:26:06 | 101,667,228 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 599 | r | lab6-1.R | g <- function(timeVector) {
nbrOfTimes <- length(timeVector)
out <- rep(1,nbrOfTimes);
out[timeVector < 0 ] = 0
out[timeVector > 0.5 ] = 0
return(out)
}
n=100;
xsi <- rexp(n, rate = 1)
eta <- rexp(n, rate = 1)
shotNoise <- function(time){
k1 <- length(xsi[cumsum(xsi) <= 10])
k2 <- length(eta[cumsum(eta) <=0.5])
x = sum(g(time-cumsum(xsi[cumsum(xsi) <= 10])))+sum(g(time+cumsum(eta[cumsum(eta) <= 0.5])))
}
n=100;
time = matrix(seq(0,10,0.1),1)
result = cumsum(c(0,(apply(time, 2, shotNoise))))
plot(stepfun(time,result),xlim = c(0,10), do.points =F) |
44f8256263cdd33b1c0f722c203d24e30ece8f86 | 149d8c5dc5216adab97e6acf528d87bac9705a4a | /R/isFunctions.R | d0b053c9fccdb17b4cf7533c72e649255b6a642a | [] | no_license | andrewjameshaynes/isPackage | b91ebde61f73cb8128fb52edbe607126a323563f | 583bb843703c056dfb8b3badc172e71c199eb48f | refs/heads/master | 2020-03-11T10:50:48.075476 | 2018-04-18T21:55:45 | 2018-04-18T21:55:45 | 129,953,432 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,249 | r | isFunctions.R | ##is.zero() - checks if a numeric is zero
is.zero<-function(x, strict = F){
if(strict == F){ifelse(x==0,T,F)}
else if (strict == T){identical(x,0)}
}
##is.natural() - checks to see if a number is both whole, and > 0
is.natural<-function(x){
if(is.numeric(x) == F){
stop("your input is not a number!")
}
ifelse(is.numeric(x) == T & x > 0, is.zero(mod0(x, floor(x))), FALSE)
}
##: is.decimal() - checks to see if a number is a decimal, both positive and negative
##: where a decimal is defined as a number that can't coerce to integer without rounding
is.decimal<-function(x){
tmp = abs(mod0(x,floor(x)))
ifelse(!(is.natural(tmp) | is.zero(tmp)),T,F)
}
##: is.decimal_forced() - Checks the string conversion of x for a period.
is.decimal_forced<-function(x){
if(is.numeric(x) == F){
stop("your input is not a number!")
}
any(el(strsplit(as.character(x),'')) == '.')
}
##: is.discrete() - checks to see if a single (|) value is char or factor
is.discrete<-function(x){is.factor(x) | is.character(u)}
##: is.continuous() - checks to see if value is numeric and can therefore be applied to continuous scale
is.continuous<-function(x){
ifelse(class(x) %in% c("numeric", "integer", "double"), T, F)
}
|
1891d72f6e3c7f84541c62f0a54afcae9e5b591c | 1686fa06d5eb86865d08cb08d8ee300a407505bc | /R/cvi.R | 0174955d8cd67ac5dab5d0afa8d258ae6a831741 | [] | no_license | ForkBackups/dtwclust | 1d6fa2002a74988dd65c0d7a9dcaeadff6dcae55 | 88eeb7767d80764084cecb49d96b8d753b74d1c7 | refs/heads/master | 2021-01-21T18:10:18.734116 | 2017-05-21T13:21:20 | 2017-05-21T13:21:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,573 | r | cvi.R | #' Cluster validity indices
#'
#' Compute different cluster validity indices (CVIs) of a given cluster partition, using the
#' clustering distance measure and centroid function if applicable.
#'
#' @export
#' @exportMethod cvi
#'
#' @param a An object returned by the [dtwclust()] or [tsclust()] function, or a vector that can be
#' coerced to integers which indicate the cluster memeberships.
#' @param b If needed, a vector that can be coerced to integers which indicate the cluster
#' memeberships. The ground truth (if known) should be provided here.
#' @param type Character vector indicating which indices are to be computed. See supported values
#' below.
#' @param ... Arguments to pass to and from other methods.
#' @param log.base Base of the logarithm to be used in the calculation of VI.
#'
#' @details
#'
#' Clustering is commonly considered to be an unsupervised procedure, so evaluating its performance
#' can be rather subjective. However, a great amount of effort has been invested in trying to
#' standardize cluster evaluation metrics by using cluster validity indices (CVIs).
#'
#' CVIs can be classified as internal, external or relative depending on how they are computed.
#' Focusing on the first two, the crucial difference is that internal CVIs only consider the
#' partitioned data and try to define a measure of cluster purity, whereas external CVIs compare the
#' obtained partition to the correct one. Thus, external CVIs can only be used if the ground truth
#' is known. Each index defines their range of values and whether they are to be minimized or
#' maximized. In many cases, these CVIs can be used to evaluate the result of a clustering algorithm
#' regardless of how the clustering works internally, or how the partition came to be.
#'
#' Knowing which CVI will work best cannot be determined a priori, so they should be tested for each
#' specific application. Usually, many CVIs are utilized and compared to each other, maybe using a
#' majority vote to decide on a final result. Furthermore, it should be noted that many CVIs perform
#' additional distance calculations when being computed, which can be very considerable if using
#' DTW.
#'
#' Note that, even though a fuzzy partition can be changed into a crisp one, making it compatible
#' with many of the existing CVIs, there are also fuzzy CVIs tailored specifically to fuzzy
#' clustering, and these may be more suitable in those situations, but have not been implemented
#' here yet.
#'
#' @return The chosen CVIs
#'
#' @section External CVIs:
#'
#' The first 4 CVIs are calculated via [flexclust::comPart()], so please refer to that function.
#'
#' - `"RI"`: Rand Index (to be maximized).
#' - `"ARI"`: Adjusted Rand Index (to be maximized).
#' - `"J"`: Jaccard Index (to be maximized).
#' - `"FM"`: Fowlkes-Mallows (to be maximized).
#' - `"VI"`: Variation of Information (Meila (2003); to be minimized).
#'
#' @section Internal CVIs:
#'
#' The indices marked with an exclamation mark (!) calculate (or re-use if already available) the
#' whole distance matrix between the series in the data. If you were trying to avoid this in the
#' first place, then these CVIs might not be suitable for your application.
#'
#' The indices marked with a question mark (?) depend on the extracted centroids, so bear that in
#' mind if a hierarchical procedure was used and/or the centroid function has associated
#' randomness (such as [shape_extraction()] with series of different length).
#'
#' The indices marked with a tilde (~) require the calculation of a global centroid. Since [DBA()]
#' and [shape_extraction()] (for series of different length) have some randomness associated,
#' these indices might not be appropriate for those centroids.
#'
#' - `"Sil"` (!): Silhouette index (Arbelaitz et al. (2013); to be maximized).
#' - `"D"` (!): Dunn index (Arbelaitz et al. (2013); to be maximized).
#' - `"COP"` (!): COP index (Arbelaitz et al. (2013); to be minimized).
#' - `"DB"` (?): Davies-Bouldin index (Arbelaitz et al. (2013); to be minimized).
#' - `"DBstar"` (?): Modified Davies-Bouldin index (DB*) (Kim and Ramakrishna (2005); to be
#' minimized).
#' - `"CH"` (~): Calinski-Harabasz index (Arbelaitz et al. (2013); to be maximized).
#' - `"SF"` (~): Score Function (Saitta et al. (2007); to be maximized).
#'
#' @section Additionally:
#'
#' - `"valid"`: Returns all valid indices depending on the type of `a` and whether `b` was
#' provided or not.
#' - `"internal"`: Returns all internal CVIs. Only supported for [dtwclust-class] and
#' [TSClusters-class] objects.
#' - `"external"`: Returns all external CVIs. Requires `b` to be provided.
#'
#' @note
#'
#' In the original definition of many internal CVIs, the Euclidean distance and a mean centroid was
#' used. The implementations here change this, making use of whatever distance/centroid was chosen
#' during clustering.
#'
#' Some internal indices require the original data for calculations, so the control flag `save.data`
#' must be set to `TRUE` when running the clustering algorithm.
#'
#' The formula for the SF index in Saitta et al. (2007) does not correspond to the one in Arbelaitz
#' et al. (2013). The one specified in the former is used here.
#'
#' @references
#'
#' Arbelaitz, O., Gurrutxaga, I., Muguerza, J., Perez, J. M., & Perona, I. (2013). An extensive
#' comparative study of cluster validity indices. Pattern Recognition, 46(1), 243-256.
#'
#' Kim, M., & Ramakrishna, R. S. (2005). New indices for cluster validity assessment. Pattern
#' Recognition Letters, 26(15), 2353-2363.
#'
#' Meila, M. (2003). Comparing clusterings by the variation of information. In Learning theory and
#' kernel machines (pp. 173-187). Springer Berlin Heidelberg.
#'
#' Saitta, S., Raphael, B., & Smith, I. F. (2007). A bounded index for cluster validity. In
#' International Workshop on Machine Learning and Data Mining in Pattern Recognition (pp. 174-187).
#' Springer Berlin Heidelberg.
#'
setGeneric("cvi", def = function(a, b = NULL, type = "valid", ..., log.base = 10) {
## Only external CVIs for default, dtwclust-methods.R has the internal ones
if (is.null(b))
stop("A second set of cluster membership indices is required in 'b' for this/these CVI(s).")
a <- as.integer(a)
b <- as.integer(b)
if (length(a) != length(b)) stop("External CVIs: the length of 'a' and 'b' must match.")
type <- match.arg(type, several.ok = TRUE,
choices = c("RI", "ARI", "J", "FM", "VI", "valid", "external"))
if (any(type %in% c("valid", "external")))
type <- c("RI", "ARI", "J", "FM", "VI")
which_flexclust <- type %in% c("RI", "ARI", "J", "FM")
if (any(which_flexclust))
CVIs <- flexclust::comPart(x = a, y = b, type = type[which_flexclust])
else
CVIs <- numeric()
if (any(type == "VI")) {
## Variation of information
## taken from https://github.com/cran/mcclust/blob/master/R/vi.dist.R
## entropy
ent <- function(cl) {
n <- length(cl)
p <- table(cl) / n
-sum(p * log(p, base = log.base))
}
## mutual information
mi <- function(cl1, cl2) {
p12 <- table(cl1, cl2) / length(cl1)
p1p2 <- outer(table(cl1) / length(cl1), table(cl2) / length(cl2))
sum(p12[p12 > 0] * log(p12[p12 > 0] / p1p2[p12 > 0], base = log.base))
}
VI <- ent(a) + ent(b) - 2 * mi(a, b)
CVIs <- c(CVIs, VI = VI)
}
CVIs
})
|
b102e50942e9e35396449459521ccdd2a84bdff8 | 8f6e2b326ad3823b369bb6d9e07220d3dfa6756d | /poweranalysisVSmean_logscore.r | e02bb3f933e868ce56015c69238b21948e9f3372 | [] | no_license | jbessac/uncertainty_scoring | 491737ccf905fc6912a95566e72c305cb1dfe10c | 70f695106f762f626c931f786a4c49764f2f3d09 | refs/heads/main | 2023-03-08T17:47:59.734970 | 2021-02-05T01:14:56 | 2021-02-05T01:14:56 | 336,066,557 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,262 | r | poweranalysisVSmean_logscore.r |
rm(list=ls())
### --- true parameters
mu0 <- 0
sig0 <- 2
### ------------------------------------------------- statistical test and associated power
mu <- seq(mu0-3,mu0+3,by=.05)
Nsim <- 10000
reject_sx_mu <- NULL ; accept_sx <- NULL
reject_sy_mu <- NULL ; accept_sy <- NULL
reject_sc_mu <- NULL ; accept_sc <- NULL
c <- sqrt(.25)
rej_sx <- rep(0,length(mu)) ; acc_sx <- rep(0,length(mu))
rej_sy <- rep(0,length(mu)) ; acc_sy <- rep(0,length(mu))
rej_sc <- rep(0,length(mu)) ; acc_sc <- rep(0,length(mu))
for (s in 1:Nsim){
x0 <- rnorm(n=1000,mean=0,sd=1)
for (i in 1:length(mu)){
x <- mu0 + sig0*x0
y <- mu0 + sqrt(sig0^2+c^2)*x0
ybar <- (mu0*c^2)/(sig0^2+c^2) + (y*sig0^2)/(sig0^2+c^2)
Es0 <- log(sig0) + 1/2 + log(2*pi)/2
sx <- (log(sig0) + ((x - mu[i])^2)/(2*sig0^2) + log(2*pi)/2)
sy <- (log(sig0) + ((y - mu[i])^2)/(2*sig0^2) + log(2*pi)/2)
sc <- (log(sig0) + ((sig0^2*c^2)/(sig0^2+c^2) + (ybar - mu[i])^2)/(2*sig0^2) + log(2*pi)/2)
ICix <- Es0 - 1.96*sd(sx)/sqrt(length(x)) ; ICsx <- Es0 + 1.96*sd(sx)/sqrt(length(x))
ICiy <- Es0 - 1.96*sd(sy)/sqrt(length(x)) ; ICsy <- Es0 + 1.96*sd(sy)/sqrt(length(x))
ICic <- Es0 - 1.96*sd(sc)/sqrt(length(x)) ; ICsc <- Es0 + 1.96*sd(sc)/sqrt(length(x))
if (( mean(sx) > ICix )&( mean(sx) < ICsx )){ acc_sx[i] <- acc_sx[i] +1 }
else { rej_sx[i] <- rej_sx[i] +1 }
if (( mean(sy) > ICiy )&( mean(sy) < ICsy )){ acc_sy[i] <- acc_sy[i] +1 }
else { rej_sy[i] <- rej_sy[i] +1 }
if (( mean(sc) > ICic )&( mean(sc) < ICsc )){ acc_sc[i] <- acc_sc[i] +1 }
else { rej_sc[i] <- rej_sc[i] +1 } }}
accept_sx[[1]] <- acc_sx/Nsim ; reject_sx_mu[[1]] <- rej_sx/Nsim
accept_sy[[1]] <- acc_sy/Nsim ; reject_sy_mu[[1]] <- rej_sy/Nsim
accept_sc[[1]] <- acc_sc/Nsim ; reject_sc_mu[[1]] <- rej_sc/Nsim
c <- sqrt(0.5)
rej_sx <- rep(0,length(mu)) ; acc_sx <- rep(0,length(mu))
rej_sy <- rep(0,length(mu)) ; acc_sy <- rep(0,length(mu))
rej_sc <- rep(0,length(mu)) ; acc_sc <- rep(0,length(mu))
rej_med_sx <- rep(0,length(mu)) ; acc_med_sx <- rep(0,length(mu))
rej_med_sy <- rep(0,length(mu)) ; acc_med_sy <- rep(0,length(mu))
rej_med_sc <- rep(0,length(mu)) ; acc_med_sc <- rep(0,length(mu))
for (s in 1:Nsim){
for (i in 1:length(mu)){
x0 <- rnorm(n=1000,mean=0,sd=1)
x <- mu0 + sig0*x0
y <- mu0 + sqrt(sig0^2+c^2)*x0
ybar <- (mu0*c^2)/(sig0^2+c^2) + (y*sig0^2)/(sig0^2+c^2)
Es0 <- log(sig0) + 1/2 + log(2*pi)/2
sx <- log(sig0) + ((x - mu[i])^2)/(2*sig0^2) + log(2*pi)/2
sy <- log(sig0) + ((y - mu[i])^2)/(2*sig0^2) + log(2*pi)/2
sc <- log(sig0) + ((sig0^2*c^2)/(sig0^2+c^2) + (ybar - mu[i])^2)/(2*sig0^2) + log(2*pi)/2
ICix <- Es0 - 1.96*sd(sx)/sqrt(length(x)) ; ICsx <- Es0 + 1.96*sd(sx)/sqrt(length(x))
ICiy <- Es0 - 1.96*sd(sy)/sqrt(length(x)) ; ICsy <- Es0 + 1.96*sd(sy)/sqrt(length(x))
ICic <- Es0 - 1.96*sd(sc)/sqrt(length(x)) ; ICsc <- Es0 + 1.96*sd(sc)/sqrt(length(x))
# power of the mean score
if (( mean(sx) > ICix )&( mean(sx) < ICsx )){ acc_sx[i] <- acc_sx[i] +1 }
else { rej_sx[i] <- rej_sx[i] +1 }
if (( mean(sy) > ICiy )&( mean(sy) < ICsy )){ acc_sy[i] <- acc_sy[i] +1 }
else { rej_sy[i] <- rej_sy[i] +1 }
if (( mean(sc) > ICic )&( mean(sc) < ICsc )){ acc_sc[i] <- acc_sc[i] +1 }
else { rej_sc[i] <- rej_sc[i] +1 }
}}
accept_sx[[2]] <- acc_sx/Nsim ; reject_sx_mu[[2]] <- rej_sx/Nsim
accept_sy[[2]] <- acc_sy/Nsim ; reject_sy_mu[[2]] <- rej_sy/Nsim
accept_sc[[2]] <- acc_sc/Nsim ; reject_sc_mu[[2]] <- rej_sc/Nsim
#
c <- sqrt(1)
rej_sx <- rep(0,length(mu)) ; acc_sx <- rep(0,length(mu))
rej_sy <- rep(0,length(mu)) ; acc_sy <- rep(0,length(mu))
rej_sc <- rep(0,length(mu)) ; acc_sc <- rep(0,length(mu))
for (s in 1:Nsim){
for (i in 1:length(mu)){
x0 <- rnorm(n=1000,mean=0,sd=1)
x <- mu0 + sig0*x0
y <- mu0 + sqrt(sig0^2+c^2)*x0
ybar <- (mu0*c^2)/(sig0^2+c^2) + (y*sig0^2)/(sig0^2+c^2)
Es0 <- log(sig0) + 1/2 + log(2*pi)/2
sx <- log(sig0) + ((x - mu[i])^2)/(2*sig0^2) + log(2*pi)/2
sy <- log(sig0) + ((y - mu[i])^2)/(2*sig0^2) + log(2*pi)/2
sf <- log(sig0) + ((y - mu[i])^2 - c^2)/(2*sig0^2) + log(2*pi)/2
sc <- log(sig0) + ((sig0^2*c^2)/(sig0^2+c^2) + (ybar - mu[i])^2)/(2*sig0^2) + log(2*pi)/2
ICix <- Es0 - 1.96*sd(sx)/sqrt(length(x)) ; ICsx <- Es0 + 1.96*sd(sx)/sqrt(length(x))
ICiy <- Es0 - 1.96*sd(sy)/sqrt(length(x)) ; ICsy <- Es0 + 1.96*sd(sy)/sqrt(length(x))
ICic <- Es0 - 1.96*sd(sc)/sqrt(length(x)) ; ICsc <- Es0 + 1.96*sd(sc)/sqrt(length(x))
if (( mean(sx) > ICix )&( mean(sx) < ICsx )){ acc_sx[i] <- acc_sx[i] +1 }
else { rej_sx[i] <- rej_sx[i] +1 }
if (( mean(sy) > ICiy )&( mean(sy) < ICsy )){ acc_sy[i] <- acc_sy[i] +1 }
else { rej_sy[i] <- rej_sy[i] +1 }
if (( mean(sc) > ICic )&( mean(sc) < ICsc )){ acc_sc[i] <- acc_sc[i] +1 }
else { rej_sc[i] <- rej_sc[i] +1 } }}
accept_sx[[3]] <- acc_sx/Nsim ; reject_sx_mu[[3]] <- rej_sx/Nsim
accept_sy[[3]] <- acc_sy/Nsim ; reject_sy_mu[[3]] <- rej_sy/Nsim
accept_sc[[3]] <- acc_sc/Nsim ; reject_sc_mu[[3]] <- rej_sc/Nsim
|
74b96516b17224916dcdbb557207355591edfacf | 7ecfc5ee5160d783f8d73c5a30eb6be34a2d442a | /Heatmap.R | c6ae1442af9dc584cd35ec5bdb2663b2cc28c3a9 | [] | no_license | swu13/CAeditome | 77aef37c6bf6673c45497608b575e275a2cce4f8 | 1717b8ae7e0a8682df0ef818c5569df81bfdf2da | refs/heads/main | 2023-08-05T08:47:27.611547 | 2021-09-17T02:16:37 | 2021-09-17T02:16:37 | 407,364,352 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,921 | r | Heatmap.R | #!/usr/bin/R
####################################################
#draw the heatmap for any cancer-related files
#parameter1: Input File containing the heatmap_row and heatmap_col and values in each heatmap: Also containing the gene file
#parameter2: The col for the cancer type in the file
#parameter3: The col for the genes in the file
#parameter4: The col for the editing ID in the file
#parameter5:The col for the values in the file
#parameter6: the heatmap folder
####################################################
library(pheatmap) #install.packages("pheatmap")
args=commandArgs(T)
Filename=args[1] #/data3/swu/CAeditome/Differential/Tmp/Heatmap.data
CancerCol=as.numeric(args[2])
GeneCol=as.numeric(args[3])
EditingCol=as.numeric(args[4])
ValueCol=as.numeric(args[5])
DifferenatialNumber=as.numeric(args[6])
UpMax=as.numeric(args[7])
FigureFolder=args[8]
RawData=read.table(Filename,sep="\t",header=FALSE)
Cancers=c("ACC","BLCA","BRCA","CESC","CHOL","COAD","DLBC","ESCA","GBM","HNSC","KICH","KIRC","KIRP","LAML","LGG","LIHC","LUAD","LUSC","MESO","OV","PAAD","PCPG","PRAD","READ","SARC","SKCM","STAD","TGCT","THCA","THYM","UCEC","UCS","UVM")
Unique_ENSG=unique(RawData[,GeneCol])
for(i in 1:length(Unique_ENSG)){
index=which(RawData[,GeneCol]==Unique_ENSG[i])
Data_gene=RawData[index,]
Unique_Editing=unique(Data_gene[,EditingCol])
FigureData=c(rep(NA,length(Cancers)))
for(j in 1:length(Unique_Editing)){
index=which(Data_gene[,EditingCol]==Unique_Editing[j])
EachEditing=Data_gene[index,]
for(k in 1:length(Cancers)){
index=which(EachEditing[,CancerCol]==Cancers[k])
if(length(index)==0 && k==1){
FigureEach=NA
}else if(length(index)==0){
FigureEach=c(FigureEach,NA)
}else if(k==1){
FigureEach=as.numeric(EachEditing[index,ValueCol])
}else{
FigureEach=c(FigureEach,as.numeric(EachEditing[index,ValueCol]))
}
}
FigureEach[which(FigureEach>UpMax)]=UpMax
FigureData=rbind(FigureData,FigureEach)
}
FigureData=as.matrix(FigureData[-1,])
if(dim(FigureData)[1]==33){
FigureData=t(as.data.frame(FigureData))
}else{
FigureData=as.data.frame(FigureData)
}
colnames(FigureData)=Cancers
rownames(FigureData)=Unique_Editing
unique_value=unique(as.numeric(as.matrix(FigureData)))
unique_value=unique_value[which(is.na(unique_value)==FALSE)]
if(length(unique_value)>0){
figurename=paste0(FigureFolder,"/",Unique_ENSG[i],".png")
if(min(unique_value)<DifferenatialNumber){
bk1=seq(min(unique_value),max(DifferenatialNumber-0.001,min(unique_value)),by=0.001)
}else{
bk1=vector()
}
if(max(unique_value)>DifferenatialNumber){
bk2=seq(DifferenatialNumber,max(unique_value),by=0.001)
}else{
bk2=vector()
}
color=c(colorRampPalette(colors = c("darkgreen","white"))(length(bk1)),colorRampPalette(colors = c("white","firebrick3"))(length(bk2)))
if(length(color)>1){
pheatmap(FigureData,cluster_row = FALSE,cluster_cols=FALSE,color= color,cellwidth = 5,cellheight = 3,fontsize=3,file=figurename,na_col = "grey50",breaks=c(bk1,bk2))
}
}
} |
4a2df2617a77c8ebe63ed9ac3e7d8535c366e0ae | b0bb096cb220346b27b71a0c97db1b2056955ed0 | /crawling/crawling_icd10data.R | 3606dd8c6bea7c49a5d26ff391e71cfbf3980547 | [
"MIT"
] | permissive | lenamax2355/ICD2Vec | 596318fff534f482b47c64e1cb4ca42946207afc | 529f6a2a27607618a4844fb65859bc761ff55de8 | refs/heads/main | 2023-06-15T22:30:33.816176 | 2021-07-15T14:01:58 | 2021-07-15T14:01:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,336 | r | crawling_icd10data.R | ##crawling web
## https://www.icd10data.com
##https://www.icd10data.com/ICD10CM/Codes/A00-B99/A00-A09/A00-
library(rvest)
library(stringr)
library(data.table)
library(dplyr)
library(httr)
text_prepro <- function(tmp_string, tmp_word){
tmp_string <- str_replace_all(tmp_string, '</li', tmp_word)
tmp_string <- str_replace_all(tmp_string, '</a', tmp_word)
tmp_string <- str_replace_all(tmp_string, '\"', tmp_word)
tmp_string <- str_replace_all(tmp_string, "\r", tmp_word)
tmp_string <- str_replace_all(tmp_string, "\n", tmp_word)
tmp_string <- str_replace_all(tmp_string, "<ul>", tmp_word)
tmp_string <- str_replace_all(tmp_string, "</ul>", tmp_word)
tmp_string <- str_replace_all(tmp_string, "<li>", tmp_word)
tmp_string <- str_replace_all(tmp_string, "</li>", tmp_word)
tmp_string <- str_replace_all(tmp_string, '>', tmp_word)
tmp_string <- str_replace_all(tmp_string, '<em', tmp_word)
tmp_string <- str_replace_all(tmp_string, '</em', tmp_word)
tmp_string <- str_replace_all(tmp_string, '<b', tmp_word)
tmp_string <- str_replace_all(tmp_string, '</b', tmp_word)
tmp_string <- gsub("\\s+", " ", str_trim(tmp_string))
return(tmp_string)
}
##
main <- "https://www.icd10data.com"
tmp <- "https://www.icd10data.com/ICD10CM/Codes/"
tmp2 <- read_html(tmp)
tmp3 <- html_nodes(tmp2, "body") %>% html_nodes("ul li .identifier")
h1_1 <- tmp3 %>% html_attr("href") #URL
h1_1 <- h1_1[1:22]
h1_2 <- tmp3 %>% html_text() #DIS_CLASS
h1_2 <- h1_2[1:22]
h1_3 <- html_nodes(tmp2, "body") %>% html_nodes("ul li") %>% html_text() #DIS_NAME
h1_3 <- h1_3[69:90]
for(i in 1:length(h1_3)){
#DIS_NAME
tmp_h3 <- text_prepro(h1_3[i], "")
tmp_h3 <- substr(tmp_h3, 9, nchar(tmp_h3))
h1_3[i] <- tmp_h3
}
main2 <- paste0(main, h1_1)
icd <- data.frame(DIS_CLASS = h1_2, URL = main2, DIS_NAME = h1_3, stringsAsFactors = FALSE)
icd$CLI_INFO <- ""
icd$APP_SYN <- ""
icd2 <- data.frame(DIS_CLASS = "", URL = "", DIS_NAME = "", stringsAsFactors = FALSE)
icd2$CLI_INFO <- ""
icd2$APP_SYN <- ""
icd3 <- data.frame(DIS_CLASS = "", URL = "", DIS_NAME = "", stringsAsFactors = FALSE)
icd3$CLI_INFO <- ""
icd3$APP_SYN <- ""
icd4 <- data.frame(DIS_CLASS = "", URL = "", DIS_NAME = "", stringsAsFactors = FALSE)
icd4$CLI_INFO <- ""
icd4$APP_SYN <- ""
for(i in 1:nrow(icd)){
print(paste0(icd[i,]$DIS_CLASS, " start!! -- i"))
tmp_i <- icd[i,]$URL
tmp_i2 <- read_html(tmp_i)
#check whether Codes in the page, then we can add rows in 'icd2'
tmp_i3 <- html_nodes(tmp_i2, ".i51")
#URL
h2_1 <- html_nodes(tmp_i3, ".identifier") %>% html_attr("href")
h2_main <- paste0(main, h2_1)
#DIS_CLASS
h2_2 <- html_nodes(tmp_i3, "li") %>% html_nodes("a") %>% html_text()
h2_2 <- text_prepro(h2_2, "")
#DIS_NAME
h2_3 <- html_nodes(tmp_i3, "li") %>% html_text()
h2_3 <- str_replace_all(h2_3, h2_2, "")
h2_3 <- text_prepro(h2_3, "")
tmp_icd2 <- data.frame(DIS_CLASS = h2_2, URL = h2_main, DIS_NAME = h2_3, stringsAsFactors = FALSE)
tmp_icd2$CLI_INFO <- ""
tmp_icd2$APP_SYN <- ""
for(j in 1:nrow(tmp_icd2)){
print(paste0(tmp_icd2[j,]$DIS_CLASS, " start!! -- j"))
tmp_j <- tmp_icd2[j,]$URL
tmp_j2 <- read_html(tmp_j)
#check whether Codes in the page, then we can add rows in 'icd3'
tmp_j3 <- html_nodes(tmp_j2, ".i51")
#URL
h3_1 <- html_nodes(tmp_j3, ".identifier") %>% html_attr("href")
h3_main <- paste0(main, h3_1)
#DIS_CLASS
h3_2 <- html_nodes(tmp_j3, "li") %>% html_nodes("a") %>% html_text()
h3_2 <- text_prepro(h3_2, "")
#DIS_NAME
h3_3 <- html_nodes(tmp_j3, "li") %>% html_text()
h3_3 <- str_replace_all(h3_3, h3_2, "")
h3_3 <- text_prepro(h3_3, "")
tmp_icd3 <- data.frame(DIS_CLASS = h3_2, URL = h3_main, DIS_NAME = h3_3, stringsAsFactors = FALSE)
tmp_icd3$CLI_INFO <- ""
tmp_icd3$APP_SYN <- ""
tmp_j2 <- as.character(tmp_j2)
where_loc <- unlist(str_locate_all(string = tmp_j2, pattern = '<span>Clinical Information</span>'))
cli_info <- c()
if(length(where_loc) > 0){
cli_info <- str_split(tmp_j2, '<span>Clinical Information</span>')[[1]][2]
cli_info <- str_split(cli_info, '<div class="proper-ad-leaderboard">')[[1]][1]
cli_info <- text_prepro(cli_info, " ")
tmp_icd2[j,]$CLI_INFO <- cli_info
}
where_loc <- unlist(str_locate_all(string = tmp_j2, pattern = '<span>Approximate Synonyms</span>'))
app_syn <- c()
if(length(where_loc) > 0){
app_syn <- str_split(tmp_j2, '<span>Approximate Synonyms</span>')[[1]][2]
app_syn <- str_split(app_syn, '</ul>')[[1]][1]
app_syn <- text_prepro(app_syn, " ")
tmp_icd2[j,]$APP_SYN <- app_syn
}
#if(nrow(tmp_icd3) == 0)
for(k in 1:nrow(tmp_icd3)){
print(paste0(tmp_icd3[k,]$DIS_CLASS, " start!! -- k"))
tmp_k <- tmp_icd3[k,]$URL
tmp_k2 <- read_html(tmp_k)
tmp_k2 <- html_nodes(tmp_k2, "body")
#check whether Codes in the page, then we can add rows in 'icd4'
tmp_k3 <- html_nodes(tmp_k2, ".codeHierarchy")
tmp_k3 <- tmp_k3[length(tmp_k3)]
#URL
h4_1 <- html_nodes(tmp_k3, ".identifierSpacing") %>% html_attr("href")
if(length(h4_1) == 0){ h4_1 <- html_nodes(tmp_k3, ".identifier") %>% html_attr("href") }
h4_main <- paste0(main, h4_1)
#DIS_CLASS
h4_2 <- html_nodes(tmp_k3, "li") %>% html_nodes("a") %>% html_text()
h4_2 <- text_prepro(h4_2, "")
#DIS_NAME
h4_3 <- html_nodes(tmp_k3, "span") %>% html_text()
if(length(h4_3) == 0){ h4_3 <- html_nodes(tmp_k3, "li") %>% html_text() }
h4_3 <- str_replace_all(h4_3, h4_2, "")
h4_3 <- text_prepro(h4_3, "")
tmp_icd4 <- data.frame(DIS_CLASS = h4_2, URL = h4_main, DIS_NAME = h4_3, stringsAsFactors = FALSE)
tmp_icd4$CLI_INFO <- ""
tmp_icd4$APP_SYN <- ""
tmp_k2 <- as.character(tmp_k2)
where_loc <- unlist(str_locate_all(string = tmp_k2, pattern = '<span>Clinical Information</span>'))
cli_info <- c()
if(length(where_loc) > 0){
cli_info <- str_split(tmp_k2, '<span>Clinical Information</span>')[[1]][2]
cli_info <- str_split(cli_info, '<div class="proper-ad-leaderboard">')[[1]][1]
cli_info <- text_prepro(cli_info, " ")
tmp_icd3[k,]$CLI_INFO <- cli_info
}
where_loc <- unlist(str_locate_all(string = tmp_k2, pattern = '<span>Approximate Synonyms</span>'))
app_syn <- c()
if(length(where_loc) > 0){
app_syn <- str_split(tmp_k2, '<span>Approximate Synonyms</span>')[[1]][2]
app_syn <- str_split(app_syn, '</ul>')[[1]][1]
app_syn <- text_prepro(app_syn, " ")
tmp_icd3[k,]$APP_SYN <- app_syn
}
for(l in 1:nrow(tmp_icd4)){
print(paste0(tmp_icd4[l,]$DIS_CLASS, " start!! -- l"))
tmp_l <- tmp_icd4[l,]$URL
tmp_l2 <- read_html(tmp_l)
tmp_l2 <- html_nodes(tmp_l2, "body")
where_loc <- unlist(str_locate_all(string = tmp_l2, pattern = '<span>Clinical Information</span>'))
cli_info <- c()
if(length(where_loc) > 0){
cli_info <- str_split(tmp_l2, '<span>Clinical Information</span>')[[1]][2]
cli_info <- str_split(cli_info, '<span>Code History</span>')[[1]][1]
cli_info <- str_split(cli_info, '<span>ICD-10-CM')[[1]][1]
cli_info <- text_prepro(cli_info, " ")
tmp_icd4[l,]$CLI_INFO <- cli_info
}
where_loc <- unlist(str_locate_all(string = tmp_l2, pattern = '<span>Approximate Synonyms</span>'))
app_syn <- c()
if(length(where_loc) > 0){
app_syn <- str_split(tmp_l2, '<span>Approximate Synonyms</span>')[[1]][2]
app_syn <- str_split(app_syn, '</ul>')[[1]][1]
app_syn <- text_prepro(app_syn, " ")
tmp_icd4[l,]$APP_SYN <- app_syn
}
} # l end
icd4 <- rbind(icd4, tmp_icd4)
write.csv(icd4, "icd_info4_20210715.csv", row.names = FALSE)
} # k end
icd3 <- rbind(icd3, tmp_icd3)
write.csv(icd3, "icd_info3_20210715.csv", row.names = FALSE)
} # j end
icd2 <- rbind(icd2, tmp_icd2)
write.csv(icd2, "icd_info2_20210715.csv", row.names = FALSE)
} # i end
write.csv(icd, "icd_info_20210715.csv", row.names = FALSE)
write.csv(icd2, "icd_info2_20210715.csv", row.names = FALSE)
write.csv(icd3, "icd_info3_20210715.csv", row.names = FALSE)
write.csv(icd4, "icd_info4_20210715.csv", row.names = FALSE)
View(icd4)
View(icd3)
##processing
icd4[grep("<", icd4$APP_SYN)[1],]
icd4$CLI_INFO2 <- icd4$CLI_INFO
for(i in grep("<", icd4$CLI_INFO)){
tmp_cli <- icd4[i,]$CLI_INFO
tmp_cli <- text_prepro(tmp_cli, "")
tmp_cli <- unlist(str_split(tmp_cli,"<"))[1]
icd4[i,]$CLI_INFO2 <- tmp_cli
}
icd4 <- select(icd4, DIS_CLASS, URL, DIS_NAME, CLI_INFO2, APP_SYN)
icd4 <- rename(icd4, CLI_INFO=CLI_INFO2)
write.csv(icd4, "icd_info4.csv", row.names = FALSE)
icd4 <- fread("/data1/yeongchan/icd2vec/icd_info4.csv", data.table = FALSE)
tmp1 <- paste0(icd4$DIS_NAME, collapse = " ")
tmp1 <- tolower(tmp1)
tmp1 <- str_split(tmp1, " ")[[1]]
length(tmp1)
#[1] 321593
length(unique(tmp1))
#[1] 8772
tmp1 <- paste0(c(icd4$DIS_NAME, icd4$CLI_INFO, icd4$APP_SYN), collapse = " ")
tmp1 <- tolower(tmp1)
tmp1 <- str_split(tmp1, " ")[[1]]
length(tmp1)
#[1] 1166960
length(unique(tmp1))
#[1] 33388
head(unique(tmp1), 30)
|
b528e37e0a08b8bfbf64c500b83b3dca878751f6 | b2a13f2ce0f81ebe7cbc0d574976b70ee5d8694e | /functions/specify_columns.R | 0363c2663f395ca9fe3afc268017689c7512a4ad | [] | no_license | gitHubSlinkman/Tornadoes | 586684f00dfc350b447c727d497773fe86d49708 | ae7bc3652d5bb94f547eb0994f0369d76190a066 | refs/heads/main | 2023-05-31T12:40:35.273769 | 2021-06-23T03:17:53 | 2021-06-23T03:17:53 | 375,800,747 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,757 | r | specify_columns.R | # specify_columns.r
specify_columns <-
function(){
cols(
EVENT_ID = col_character(),
CZ_NAME_STR = col_character(),
BEGIN_LOCATION = col_character(),
BEGIN_DATE = col_character(),
BEGIN_TIME = col_character(),
EVENT_TYPE = col_skip(),
MAGNITUDE = col_skip(),
TOR_F_SCALE = col_character(),
DEATHS_DIRECT = col_double(),
INJURIES_DIRECT = col_double(),
DAMAGE_PROPERTY_NUM = col_double(),
DAMAGE_CROPS_NUM = col_double(),
STATE_ABBR = col_character(),
CZ_TIMEZONE = col_character(),
MAGNITUDE_TYPE = col_logical(),
EPISODE_ID = col_character,
CZ_TYPE = col_character(),
CZ_FIPS = col_character(),
WFO = col_character(),
INJURIES_INDIRECT = col_double(),
DEATHS_INDIRECT = col_double(),
SOURCE = col_character(),
FLOOD_CAUSE = col_skip(),
TOR_LENGTH = col_double(),
TOR_WIDTH = col_double(),
BEGIN_RANGE = col_double(),
BEGIN_AZIMUTH = col_character(),
END_RANGE = col_double(),
END_AZIMUTH = col_character(),
END_LOCATION = col_character(),
END_DATE = col_character(),
END_TIME = col_double(),
BEGIN_LAT = col_double(),
BEGIN_LON = col_double(),
END_LAT = col_double(),
END_LON = col_double(),
EVENT_NARRATIVE = col_character(),
EPISODE_NARRATIVE = col_character(),
ABSOLUTE_ROWNUMBER = col_skip() )
} |
7394bc64a9e15331e7b673a6c17ad2541a64fbe7 | 3330f6c7c00e931d336b4de7d8fe80f955edb0d4 | /R/get_tn.fn.R | f2b3e48ceb1fc776025d8377adb146de04d17ea3 | [] | no_license | Chengshu21/SS_Benchmark | 0e6ef80bf1021b150801ccae04eb91ac8729cb1a | f34ef58f9c924b16624aaabd8cd7cdec95553721 | refs/heads/master | 2020-05-31T22:05:12.835316 | 2020-04-13T02:59:46 | 2020-04-13T02:59:46 | 190,512,630 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 840 | r | get_tn.fn.R | #### select NON-significant pathways of each cluster at first
##################################
#### get TN and FN
get_tn.fn = function(nonsig.pathway_result, target_pathway){
tn_count <- 0
fn_count <- 0
for (i in 1:length(rownames(nonsig.pathway_result)))
{
for (j in 1:length(target_pathway))
{
if (rownames(nonsig.pathway_result)[i] == as.character(target_pathway[j])) {
fn_count <- fn_count + 1
FN[[fn_count]] <- target_pathway[j]
}
if (rownames(nonsig.pathway_result)[i] != as.character(target_pathway[j])) {
tn_count <- tn_count + 1
TN[[tn_count]]<- rownames(nonsig.pathway_result)[i]
}
}
}
result = list("tn_count" = nrow(nonsig.pathway_result)-fn_count,
"fn_count" = fn_count,
"FN" = FN)
}
|
888c42d7fef5a02bb7ea48fc1b898dfeb857199a | 5b51a6643499c4686b15f84052c18e79988cfa8f | /boggle.R | 16e1424934684214c4ea22d968cd2b0ad75d4ec2 | [] | no_license | cbedwards/Misc | c9325d4df4ba8a8c66a0d5f37e0a7afefe116d59 | a3dd887697028debb632b38902c7a1931586b6ef | refs/heads/master | 2021-03-30T06:35:43.131406 | 2020-03-17T17:00:19 | 2020-03-17T17:00:19 | 248,025,957 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,929 | r | boggle.R | # Code to generate and display boggle boards in R
# Using recent versions of R, random number generators should behave the same across platforms
# So multiple players can play across any distance
# by choose a shared number for the seed, and calling the function on their own computer.
# (although check this by generating a board and comparing). Each seed number creates a new board.
# boggle4() creates a 4x4 board, boggle5() creates a 5x5 board
# textsize() is an optional argument to make the letters bigger or smaller
#Bogglemaker:
boggle4=function(seed,
textsize=2){
die=data.frame(
d0=c('R','I','F','O','B','X'),
d1=c('I','F','E','H','E','Y'),
d2=c('D','E','N','O','W','S'),
d3=c('U','T','O','K','N','D'),
d4=c('H','M','S','R','A','O'),
d5=c('L','U','P','E','T','S'),
d6=c('A','C','I','T','O','A'),
d7=c('Y','L','G','K','U','E'),
d8=c('Qu','B','M','J','O','A'),
d9=c('E','H','I','S','P','N'),
d10=c('V','E','T','I','G','N'),
d11=c('B','A','L','I','Y','T'),
d12=c('E','Z','A','V','N','D'),
d13=c('R','A','L','E','S','C'),
d14=c('U','W','I','L','R','G'),
d15=c('P','A','C','E','C','D')
)
## operations:
set.seed(seed)
rolls=apply(die,2,function(x)sample(x,1))
ord=sample(1:16,replace=F)
board=matrix(rolls[ord], nrow=4, ncol=4)
plot(0,0,
type='n',
ylim=c(0,4),
xlim=c(0,4),
xlab='',
xaxt='n',
yaxt='n',
ylab='',
bty='n')
grid(lty=1, col='darkgrey')
at=expand.grid((0:3)+.5,(0:3)+.5)
text(x=at[,1], y=at[,2],
labels=board, cex=textsize)
}
boggle5=function(seed,
textsize=2){
die=data.frame(
c('A','A','A','F','R','S'),
c('A','A','E','E','E','E'),
c('A','A','F','I','R','S'),
c('A','D','E','N','N','N'),
c('A','E','E','E','E','M'),
c('A','E','E','G','M','U'),
c('A','E','G','M','N','N'),
c('A','F','I','R','S','Y'),
c('B','J','K','Q','X','Z'),
c('C','C','N','S','T','W'),
c('C','E','I','I','L','T'),
c('C','E','I','L','P','T'),
c('C','E','I','P','S','T'),
c('D','H','H','N','O','T'),
c('D','H','H','L','O','R'),
c('D','H','L','N','O','R'),
c('D','D','L','N','O','R'),
c('E','I','I','I','T','T'),
c('E','M','O','T','T','T'),
c('E','N','S','S','S','U'),
c('F','I','P','R','S','Y'),
c('G','O','R','R','V','W'),
c('H','I','P','R','R','Y'),
c('N','O','O','T','U','W'),
c('O','O','O','T','T','U')
)
set.seed(seed)
rolls=apply(die,2,function(x)sample(x,1))
ord=sample(1:25,replace=F)
board=matrix(rolls[ord], nrow=5, ncol=5)
plot(0,0,
type='n',
ylim=c(0,5),
xlim=c(0,5),
xlab='',
xaxt='n',
yaxt='n',
ylab='',
bty='n')
grid(lty=1, col='darkgrey')
at=expand.grid((0:4)+.5,(0:4)+.5)
text(x=at[,1], y=at[,2],
labels=board, cex=textsize)
}
boggle4(5)
boggle5(5)
|
cb6142cbe18428dd1ad511d2d23f2415bd9ab434 | 35ae1d08f123b9e8abc5e7e91c76bd53266977dc | /R/RAFSIL_utils.R | 3e810faea66facfbabb8a8fee6230e466562a8e0 | [] | no_license | rahul799/RAFSIL | 7fe93639a1956fc25d61ded8d43fd4fc38572b0f | 69b1ab6edceb74dfc4ba8f6c722f06ddb0293a4f | refs/heads/master | 2023-05-28T20:00:52.926354 | 2021-06-08T14:47:00 | 2021-06-08T14:47:00 | 288,529,555 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,933 | r | RAFSIL_utils.R |
normal <- function(x) {
x = x - min(x, na.rm=TRUE)
x = x/max(x,na.rm=TRUE)
return(x)
}
#' ELBOW DETECTION
#'
#' @param scores vector
elbow_detection <-function(scores, if_plot = FALSE) {
# We included this function from uSORT package, version 1.6.0 .
#=====================
num_scores <- length(scores)
if (num_scores < 2) {
stop("Input scores must be a vector with length more than 1!")
}
scores <- data.frame(id = seq_len(num_scores), value = scores)
sorted_scores <- scores[order(scores$value, decreasing = TRUE),
]
xy_coordinates <- cbind(x = seq_len(num_scores), y = sorted_scores$value)
start_point <- xy_coordinates[1, ]
end_point <- xy_coordinates[num_scores, ]
x1 <- start_point[1]
x2 <- end_point[1]
y1 <- start_point[2]
y2 <- end_point[2]
a <- y1 - y2
b <- x2 - x1
c <- x1 * y2 - x2 * y1
dist_to_line <- abs(a * xy_coordinates[, "x"] + b * xy_coordinates[,
"y"] + c)/sqrt(a^2 + b^2)
best_point_id <- which.max(dist_to_line)
score_cutoff <- xy_coordinates[best_point_id, "y"]
select_ID <- scores$id[which(scores$value >= score_cutoff)]
if (if_plot) {
plot(seq(nrow(scores)), sorted_scores$value, col = ifelse(sorted_scores$value >=
score_cutoff, "red", "black"), xlab = "ID", ylab = "Score",
main = paste0("Optimal number = ", length(select_ID),
" with cutoff value = ", round(score_cutoff,
digits = 4)))
}
return(select_ID)
}
##RSV
#' RANDOM PROJECTION SVD
#'
#' @param A matrix
#' @param K rank
#' @return list with components U S and V
#' @importFrom pracma orth
fast.rsvd<-function( A, K ) {
#============================
M = dim(A)[1]
N = dim(A)[2]
P = min(2*K,N)
X = matrix(rnorm(N*P),nrow=N,ncol=P)
Y = A%*%X
W1 = orth(Y)
B = t(W1)%*%A
res = svd(B,nu=min(dim(B)),nv=min(dim(B)))
W2 = res$u
tmp_S = res$d
S = array(0,c(length(tmp_S),length(tmp_S)))
diag(S) = tmp_S
V = res$v
U = W1%*%W2
K = min(K,dim(U)[2])
U = U[,1:K]
S = S[1:K,1:K]
V = V[,1:K]
return(list(U=U,S=S,V=V))
}
#' PCA built on fast.rsvd
#'
#' @param X matrix
#' @return projection of X on principal top principal components
#' @importFrom matrixStats colVars
fast.pca<-function(X){
#=====================
K<-400
#X = t(X)
tmp_val = as.vector(colSums(X)/nrow(X))
X = X - t(apply(array(0,dim(X)),MARGIN=1,FUN=function(x) {x=tmp_val}))
res = fast.rsvd(X,K)
U = res$U
S = res$S
K = min(dim(S)[2],K)
diag_val = sqrt(diag(S[1:K,1:K]))
diag_mat = array(0,c(length(diag_val),length(diag_val)))
diag(diag_mat) = diag_val
X = U[,1:K]%*%diag_mat
normalization_val = sqrt(rowSums(X*X))
X = X / apply(array(0,c(length(normalization_val),K)),MARGIN=2,FUN=function(x) {x=normalization_val})
pcgeneres<-X
varlist<-colVars(pcgeneres)
ordered_varlist<-order(varlist,decreasing = TRUE)
LM1<-pcgeneres[,ordered_varlist]
varf<-colVars(LM1)
num<-length(elbow_detection(varf))
pcgene<-LM1[,c(1:num)]
return(pcgene)
}
#' Plot a (dis)similarity matrix using the heatmap function
#'
#' @param x matrix (Dis)Similarities
#' @param labels integer Labels for classes/categories
#' @param col character "qnt" for quantile-based color breaks, "lin" for linear color breaks
#' @return list The list of components heatmap returns.
#' @importFrom RColorBrewer brewer.pal
#' @importFrom grDevices colorRampPalette
#' @export
plotSIM = function(x,labels,col="qnt") {
#--------------------------------
diag(x) = NA
if(col=="lin"){
x = x-min(x,na.rm=TRUE)
x = x/max(x,na.rm=TRUE)
brks = seq(0,1,len=65)
col = rev(colorRampPalette(brewer.pal(9,"PuBuGn"))(64))
} else {
brks = quantile(as.vector(x),seq(0,1,len=65),na.rm=TRUE)
col = rev(colorRampPalette(brewer.pal(9,"PuBuGn"))(64))
}
nc = length(unique(labels))
cl = brewer.pal(nc,"Set1")
#col = paste(col,"77",sep="")
hm = heatmap(x,Rowv=NA,Colv=NA,scale="none",labRow="",labCol="",
margins=c(.7,.7),col=col, breaks=brks,ColSideColors=cl[labels],
RowSideColors=cl[labels],revC=TRUE)
return(invisible(hm))
}
#' Plot a tSNE plot of a (dis)similarity matrix
#'
#' @param x matrix (Dis)Similarities
#' @param labels integer Labels for classes/categories
#' @param seed integer seed for RNG (->tSNE)
#' @param ... further arguments to Rtsne
#' @importFrom RColorBrewer brewer.pal
#' @importFrom graphics plot
#' @importFrom Rtsne Rtsne
#' @export
plotTSNE <- function(x,labels,seed=3749829,...) {
#-------------------------------
set.seed(seed)
a = Rtsne(x,...)
nc = length(unique(labels))
col = brewer.pal(nc,"Set1")
col = paste(col,"77",sep="")
plot(a$Y,col=col[labels],pch=19,cex=1,xaxt="n",yaxt="n",xlab="",ylab="")
}
|
21254b6c858becacb278479c7400f6a128cf0ea2 | f36920dec49fb522a2f4334e8fe9b8412067ed5c | /scripts/format_rawdata/standardize_COGU_stora karlso_data.R | 31eb03699c62435c5ae6fec4fb02b3a1286e7da1 | [] | no_license | MartinBeal/annual_consistency | 2c628f624238e25b8aaca13acc0feea817a6934c | 951241499fce705891fc2918ea6dd87d10f94f81 | refs/heads/main | 2023-07-27T09:07:11.899977 | 2021-08-30T15:39:10 | 2021-08-30T15:39:10 | 397,925,079 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,325 | r | standardize_COGU_stora karlso_data.R | #---------------------------------------------------------------------------####
## Common Murre - Stora Karlsö ##
## Standardize tracking data across datasets ##
pacman::p_load(stringr, lubridate, dplyr)
rawdatafolder <- "C:/Users/Martim Bill/Documents/annual_consistency/data/raw_data/"
folders <- list.files(rawdatafolder)
folders <- grep("STDB_zips", folders, value = TRUE, invert = TRUE)
spp <- do.call(rbind, str_split(folders, pattern = "_"))[,1]
#---------------------------------------------------------------------------####
## Buller's Albatross ##
one <- "COGU_stora karlso"
rawdata <- fread("data/raw_data/COGU_stora karlso/Baltic Seabird Stora Karls (Uria aalge & Alca torda).csv")
rawdata <- rawdata %>%
rename(
bird_id = `individual-local-identifier`, track_id = `event-id`, scientific_name = `individual-taxon-canonical-name`, longitude=`location-long`, latitude=`location-lat`, DateTime=timestamp) %>%
filter(scientific_name == "Uria aalge" & `sensor-type`=="gps") %>%
select(scientific_name, bird_id, track_id, DateTime) %>%
mutate(
DateTime = fasttime::fastPOSIXct(DateTime),
site_name = "Stora Karlso",
breed_stage = "brood-guard",
lat_colony = 57.289882,
lon_colony = 17.958266,
year = year(DateTime),
month = month(DateTime)
)
COGU_SK_summ <- rawdata %>%
group_by(scientific_name, site_name, year, breed_stage) %>%
summarise(
m_range = paste(month.abb[min(month)], month.abb[max(month)], sep = "-"),
n_pnts = n_distinct(track_id),
n_birds = n_distinct(bird_id)
)
COGU_SK_summ
## filter to CHICK-REARING data ##------------------------------------
COGU_SK_summ <- filter(COGU_SK_summ, breed_stage %in% c("brood-guard", "chick-rearing"))
goodyrs <- COGU_SK_summ$year[COGU_SK_summ$n_birds > 4]
tracks <- rawdata %>%
filter(breed_stage %in% c("brood-guard", "chick-rearing") & year %in% goodyrs)
sp <- tracks$scientific_name[1]
site <- tracks$site_name[1]
stage <- tracks$breed_stage[1]
y_range <- paste(min(tracks$year), max(tracks$year), sep="-")
nyrs <- paste0(n_distinct(tracks$year), "y")
filename <- paste0(paste(sp, site, stage, y_range, nyrs, sep = "_"), ".rds")
filename
# Save to analysis folder #
saveRDS(tracks, paste0("C:/Users/Martim Bill/Documents/annual_consistency/data/analysis/all_TD/", filename))
|
d8022035f4d34bf6e71328e394587f56996dc51d | 722e13d427cc095153233052b1f90ed138484cc3 | /man/as.spectra.Rd | 2b61c86a355d9cfdf8a0c9878f4da2e93e0efa9d | [] | no_license | annakat/casper_defunct | 2eb0a261c67af7e3299c64816ec3e7113034c6dd | ed378b53ec54c104bfe66d615d944bf3622d3cfe | refs/heads/master | 2020-12-24T06:03:06.612672 | 2016-11-15T17:22:21 | 2016-11-15T17:22:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 363 | rd | as.spectra.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conversion.R
\name{as.spectra}
\alias{as.spectra}
\title{Convert matrix or data frame to spectra}
\usage{
as.spectra(x)
}
\arguments{
\item{x}{matrix or dataframe. See details for format requirements}
}
\value{
spectra object
}
\description{
Convert matrix or data frame to spectra
}
|
524326e20846ab4260ce2d6747cd34c08384b147 | db33566a0bb122cefb77a8561740538ab4869a1f | /man/server.scfind.Rd | 6e6ebff82ea607be6844a0d1425fef6c61fe771d | [] | no_license | wikiselev/scfind | 1fbdffbbb7a6a3b2934516106d8b388afee483b4 | 642497fe2bfee97b37059fa1c063437af11f8198 | refs/heads/master | 2020-04-05T22:08:12.478089 | 2018-09-17T14:06:10 | 2018-09-17T14:06:10 | 157,246,204 | 1 | 0 | null | 2018-11-12T16:53:01 | 2018-11-12T16:53:01 | null | UTF-8 | R | false | true | 403 | rd | server.scfind.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ShinyMethods.R
\name{server.scfind}
\alias{server.scfind}
\title{Server handler for scfind}
\usage{
server.scfind(object)
}
\arguments{
\item{input}{handler item for the ShinyApp}
\item{output}{handel item for the ShinyApp}
\item{session}{handler item for the stateful ShinyApp}
}
\description{
Server handler for scfind
}
|
0126a69d8cec308082f3e482998995deba2b6f30 | 19f91f128022d247f25a91e3582b7a75d9b298c1 | /codes/manhattan_with_genes.R | ed541922e60e691e47f54f185661b86d3583da5a | [] | no_license | Jilong-Jerome/Pval2Gene | 682fc0ecdc2868990b52f1eadde13cd7febc36e5 | ccf4ce8dc3d8f7d4295c87b8c5315d29ecc03b41 | refs/heads/master | 2022-04-21T21:39:05.559016 | 2020-04-27T02:42:03 | 2020-04-27T02:42:03 | 251,028,320 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,014 | r | manhattan_with_genes.R | library(readr)
library(tidyr)
library(dplyr)
library(ggplot2)
# List of SNPs to highlight are in the snpsOfInterest object
# We will use ggrepel for the annotation
library(ggrepel)
# Prepare the dataset
args <- commandArgs(T)
snps <- read_tsv(args[1],col_types = "ddcdccc")
snps <- snps %>% filter(chr != 39)
don <- snps %>%
# Compute chromosome size
group_by(chr) %>%
summarise(chr_len=max(pos)) %>%
# Calculate cumulative position of each chromosome
mutate(tot=cumsum(chr_len)-chr_len) %>%
select(-chr_len) %>%
# Add this info to the initial dataset
left_join(snps, ., by=c("chr"="chr")) %>%
# Add a cumulative position of each SNP
arrange(chr, pos) %>%
mutate( poscum=pos+tot)
# Add highlight and annotation information
#mutate( is_highlight=ifelse(SNP %in% snpsOfInterest, "yes", "no")) %>%
#mutate( is_annotate=ifelse(-log10(P)>4, "yes", "no"))
# Prepare X axis
axisdf <- don %>% group_by(chr) %>% summarize(center=( max(poscum) + min(poscum) ) / 2 )
# Make the plot
png(args[2],width = 2000, height = 800)
ggplot(don, aes(x=poscum, y=localscore)) +
# Show all points
geom_point( aes(color=as.factor(chr)), alpha=0.8, size=1.3) +
scale_color_manual(values = rep(c("grey", "skyblue"), 22 )) +
# custom X axis:
scale_x_continuous( label = axisdf$chr, breaks= axisdf$center ) +
scale_y_continuous(expand = c(0, 0) ) + # remove space between plot area and x axis
# Add highlighted points
geom_point(data=subset(don, is_highlighted=="yes"), color="orange", size=2) +
# Add label using ggrepel to avoid overlapping
geom_label_repel( data=subset(don, is_annotated=="yes"), aes(label=gene), size=5) +
# Custom the theme:
theme_bw() + xlab("chromosome") +
theme(
legend.position="none",
text = element_text(size=20),
panel.border = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank()
)
dev.off()
|
2c6f864ee5ca30f2a8d9ee8b9cd5e661eb0e276a | fba8ed5d7792c8bbdff885690cc94f2ebcdb9881 | /man/clusterinfo.Rd | e69395d2027936a242820fb86df46009c58139e8 | [
"MIT"
] | permissive | kasperwelbers/topicbrowser | af19829fb092e84d3bcbbda6de62bae60e2fa216 | 14e9a00b7145fa9fdf8b607f831650f5be043084 | refs/heads/master | 2021-01-16T21:10:58.455765 | 2014-10-30T16:57:37 | 2014-10-30T16:57:37 | 26,169,122 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,030 | rd | clusterinfo.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{clusterinfo}
\alias{clusterinfo}
\title{Create a 'clusterinfo' object that can be used for the individual render methods}
\usage{
clusterinfo(m, terms, documents, meta, topic_ids = 1:m@k, words = terms,
date_interval = "year")
}
\arguments{
\item{m:}{the fitted LDA model object from the topicmodels package}
\item{terms:}{a vector of terms, which should be sorted in their original order and match m@terms}
\item{documents:}{a vector of the same length as terms, indicating the document of each term, matching m@documents}
\item{meta:}{a data frame with meta data about the documents, should have columns aid, headline, medium, and date (todo: make more flexible)}
\item{topic_ids:}{optionally restrict output to a selection of topics}
\item{date_interval:}{specify the interval for plotting the meta$data}
\item{words:}{if given, use instead of terms for displaying document}
}
\value{
a list with tokens, wordassignments, and other items
}
\description{
#'
}
|
6c15b4814e4378a5a951e273dc0fabfb1337d8b8 | dd16e59de66cbf7984dd4ddac1267b22bb3f3702 | /R/graph_traversal.R | 20cf0e6640f0080b7db1798b673e33eb829377c3 | [
"MIT"
] | permissive | aaronrudkin/werner | 5688b4c472947305aad8a7bc995dadcd8643cdda | fe912fd2674ea0e837bf71cb12b00997f4cd7c1f | refs/heads/master | 2021-05-11T09:02:27.640334 | 2018-02-25T17:39:36 | 2018-02-25T17:39:36 | 118,067,648 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,687 | r | graph_traversal.R | #' Breadth first search of a directed unweighted graph with cycles
#'
#' Performs a breadth first search of node_matrix to find a path from
#' node `from` to node `to`. If a path is found, returns path and distance.
#'
#' @param node_matrix A square matrix where cells are 1 if there is an edge
#' between the row cell and the column cell and 0 otherwise
#' @param from The origin node
#' @param to The destination node
#'
#' @return A list with the keys `status` (TRUE if there is a path,
#' FALSE if not), and if there is a path, also `distance` and `path`.
#'
breadth_first = function(node_matrix, from, to) {
# Skip the entire algorithm -- they're directly connected
if(node_matrix[from, to] == 1) {
return(list(status = TRUE,
distance = 1,
path = c(from, to)))
}
# Pre-allocate scores
score = numeric(ncol(node_matrix))
# Nodes to visit
unvisited = c(from)
# Nodes we visited
visited = c()
# Minimum paths
paths = list()
# Special case for dealing with the first hop
first_cost = TRUE
# First node to visit is the source
node_index = unvisited[1]
# Loop as long as there's an unvisited node.
while(length(unvisited)) {
# If we can go to the goal from here, we're done.
if(node_matrix[node_index, to] == 1) {
return(list(status = TRUE,
distance = score[node_index] + 1,
path = c(paths[[node_index]], to)))
}
# Take the current node off the unvisited list.
unvisited = setdiff(unvisited, node_index)
# Visit the node
visited = c(visited, node_index)
# Which nodes is the current node connected to that we haven't seen yet
nodes_to_visit = setdiff(which(node_matrix[node_index, ] == 1), visited)
nodes_to_visit = setdiff(nodes_to_visit, unvisited)
# We're going to check out any nodes we found
unvisited = c(unvisited, nodes_to_visit)
# Score the new nodes we found.
score[nodes_to_visit] = ifelse(first_cost == TRUE,
1,
score[node_index] + 1)
# Path the new nodes we found
for(node in nodes_to_visit) {
if(first_cost == TRUE) {
paths[[node]] = c(from, node)
} else {
paths[[node]] = c(paths[[node_index]], node)
}
}
# Remove the hack for the first node.
first_cost = FALSE
# Next node on deck
node_index = unvisited[1]
}
# Didn't find any results, exhausted nodes to explore.
return(list(status = FALSE))
}
is_recursive = function(node_matrix) {
vapply(seq.int(ncol(node_matrix)),
function(x) { breadth_first(node_matrix, x, x)$status },
TRUE)
}
|
c565eaef0fed6042fc60c2430ce29503eea8e52b | 2d817fdf272731acf2355bce324ff1cf2f51de3b | /voter_file_panel/summarize_for_poq_neu_pew.R | 369712b8ec26fce8bb5d593f1c7c35194ece15bd | [] | no_license | sdmccabe/poq-constructing-samples-replication | a05e11b3228ecb08dcaaea6d454e85c28e5089b5 | 07db825113b5c56f1a3e8d6c523787a6f8ac783d | refs/heads/main | 2023-03-09T01:27:51.703669 | 2021-02-17T16:42:16 | 2021-02-17T16:42:16 | 326,231,979 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,790 | r | summarize_for_poq_neu_pew.R | library(readr)
library(plyr)
library(dplyr)
in_dir <- ""
out_dir <- ""
raw_data_dir2 <- ""
raw_data_dir <- ""
load("pew_histograms_with_standardized_breaks.RData") # pew hists
setwd(in_dir)
df <- read.table(
"demog_v4.tsv",
sep = ",",
colClasses = rep("character", 23),
header = T
)
names(df) <- c("id","num_tweets","age","gender","race","party","state","cd","zip","county_fips","partisan_score","registration_status","first_tweet","last_tweet","statuses_count","favourites_count","followers_count","friends_count","user_created_at","verified")
df <- subset(df, registration_status == 1)
df$race[df$race=="Uncoded"] <- NA
behavior <- read.table(
file="behavior.tsv",
sep = "\t",
colClasses = rep("character", 7),
header = F
)
names(behavior) <- c('id','n_impeachment_tweets','n_retweets','n_impeachment_retweets','n_replies','n_replies_alt','n_tweets')
get_hist <- function(x, log = "yes", .breaks = NULL) {
if (class(x) != "numeric") {
x <- as.numeric(as.character(x))
}
if (is.null(.breaks)) {
.breaks <- "Sturges"
}
if (log == "yes") {
x <- log10(x)
x[!is.finite(x)] <- -1
}
if (.breaks != "Sturges") {
if (max(.breaks) < max(x, na.rm=T)) {
.breaks[length(.breaks)] <- max(x)
}
.breaks[.breaks == 0] <- -0.5
if (.breaks[1] != -1) {
.breaks <- c(-1, .breaks)
}
}
##
my_hist <- hist(x, breaks = .breaks, right = T, include.lowest=T)
if (.breaks == "Sturges") {
my_hist <- hist(x, breaks = c(-1, my_hist$breaks), right = T, include.lowest=T)
my_hist$mids[1] <- 0
}
##
my_hist <- data.frame(
breaks = my_hist$breaks,
mids = c(NA, my_hist$mids),
counts = c(NA, my_hist$counts)
)
return(my_hist)
}
hist_statuses <- get_hist(
df$statuses_count,
.breaks = atp_tweets$breaks
)
hist_favorites <- get_hist(
df$favourites_count,
.breaks = atp_likes$breaks
)
hist_followers <- get_hist(
df$followers_count,
.breaks = atp_followers$breaks
)
hist_friends <- get_hist(
df$friends_count,
.breaks = atp_followings$breaks
)
setwd(out_dir)
save(
hist_statuses,
hist_favorites,
hist_followers,
hist_friends,
file = "poq_neu_age_metadata_activity_histograms_replicate.RData"
)
#### VRA and closed primary analysis
closed_primary <- c("CT","DE","FL","KS","KY","ME","MD","DC","NE","NM","NY","PA","WY")
vra <- c("AL","GA","LA","MS","SC","VA","AK","AZ","TX")
round(prop.table(table(df$race)), 2)
round(prop.table(table(subset(df, state %in% vra)$race)), 2)
round(prop.table(table(df$party)), 2)
round(prop.table(table(subset(df, state %in% closed_primary)$party)), 2)
mean(subset(df, state %in% closed_primary)$party %in% c("Independent","No Party","Unaffiliated"))
#### unique name analysis
## read all raw files
thefiles <- paste0(
raw_data_dir,
grep(
"\\.zip",
list.files(raw_data_dir),
value=T
)
)
big_states <- c("ca","tx","fl","ny","il","pa","oh","mi","nc","ga") #unzip doesn't work for > 4gb
##
thefiles <- thefiles[!grepl(paste0("_", big_states, collapse = "|"), thefiles)]
##
thefiles2 <- paste0(
raw_data_dir, ## "unzipped/"),
grep(
"\\.csv",
list.files(raw_data_dir),
value=T
)
)
##
data_list <- list()
for (thefile in c(thefiles2)) {
cat(
"\n",
gsub(raw_data_dir, "", thefile),
"..\n"
)
data_list[[thefile]] <- read_delim(
thefile,
delim="\t", quote="\"", comment="", trim_ws=T,
col_types=cols(
.default=col_skip(),
tsmart_first_name = col_character(),
tsmart_last_name = col_character(),
voterbase_age = col_character(),
voterbase_gender = col_character(),
voterbase_race = col_character(),
vf_party = col_character(),
tsmart_state = col_character(),
voterbase_registration_status = col_character()
)
)
}
raw_data_registered <- subset(
ldply(data_list, data.frame),
voterbase_registration_status == "Registered" # seems to include previously registered
) %>% select(-voterbase_registration_status)
nrow(raw_data_registered)
raw_data_registered$voterbase_race[raw_data_registered$voterbase_race=="Uncoded"] <- NA
round(prop.table(table(raw_data_registered$voterbase_race)), 2)
round(prop.table(table(subset(raw_data_registered, tsmart_state %in% vra)$voterbase_race)), 2)
round(prop.table(table(subset(raw_data_registered, tsmart_state %in% closed_primary)$vf_party)), 2)
mean(subset(raw_data_registered, tsmart_state %in% closed_primary)$vf_party %in% c("Independent","No Party","Unaffiliated"))
## age_breaks <- seq(1900, 2000, 5)
age_breaks <- c(18, 30, 50, 65, 150)
raw_data_registered <- raw_data_registered %>%
mutate(
age_group = cut(
as.numeric(voterbase_age),
breaks=age_breaks,
right=F
)
)
raw_data_registered <- raw_data_registered %>%
group_by(tsmart_first_name, tsmart_last_name, tsmart_state) %>%
add_tally() %>%
rename(n_unique_name = n)
round(prop.table(table(subset(raw_data_registered, n_unique_name == 1 & tsmart_state %in% closed_primary)$vf_party)), 2)
mean(subset(raw_data_registered, n_unique_name == 1 & tsmart_state %in% closed_primary)$vf_party %in% c("Independent","No Party","Unaffiliated"))
round(prop.table(table(subset(raw_data_registered, n_unique_name == 1 & tsmart_state %in% vra)$voterbase_race)), 2)
prop.table(table(subset(raw_data_registered, n_unique_name == 1)$voterbase_gender))
prop.table(table(subset(raw_data_registered, n_unique_name == 1)$voterbase_age_group))
|
91c01c441ddbbd84cd55299ffde0b02c9f350563 | 290f7dc9b95b1f6895c72d74f72eeb6c977e7322 | /A2/hw2.r | a94feb352b9db4743f5f2f879d5bba532d059315 | [] | no_license | neha6490/MapReduceAssignments | 58d0c7ef1d7c4806244669e6362589fb21c47dde | ac954b8781f79b37b559d708bcdf6f05e111323c | refs/heads/master | 2020-12-24T19:04:05.837329 | 2016-05-13T13:13:04 | 2016-05-13T13:13:04 | 58,741,491 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 952 | r | hw2.r | require(dplyr)
require(ggplot2)
args <- commandArgs(TRUE)
srcFile <- args[1]
outPutFile <- args[2]
files <- dir(srcFile, pattern='part-r-', full.names = TRUE)
outputs <- lapply(files, read.csv, header=FALSE, sep="\t")
output <- bind_rows(outputs)
colnames(output) <- c("carrier","price","month","frequency")
library(ggplot2)
sorted <- output[order(-output$frequency),]
sorted$rank <- rank(-sorted$frequency,ties.method="min")
sorted$drank <- rep(1:length(rle(sorted$rank)$values),rle(sorted$rank)$lengths)
sortedNew <- sorted[sorted$drank <= 10,]
plot1 <- ggplot(data=sortedNew,aes(x=sortedNew$month,y=sortedNew$price,color=sortedNew$carrier)) + geom_line() + scale_color_manual(name="Top 10 Airlines",values = c("red","green","blue","orange","purple","pink","yellow","black","brown","maroon")) + ggtitle("Average price of airlines per month") + labs(x="Month(1-12)",y="Average Price(USD)")
plot1
ggsave(filename = outPutFile, plot = plot1)
|
732a31a2a0669871b7f8266eb83bdcb12a313f31 | 240bf1139f6098ec9c8c2fc96555780fc58cda30 | /lib/R/packages/lilbambu/man/harvestSites.Rd | 60c3d3ff8a30e9375176c9cb5c4841ef8f1dd422 | [] | no_license | cerobpm/lilbambu | b9cf0d341f7f3cca493757e393cdb860331bdcdd | af48399a3198bbd2d3b3846a06a662851f705ea8 | refs/heads/master | 2020-03-22T06:22:35.764750 | 2018-09-11T19:42:15 | 2018-09-11T19:42:15 | 139,629,159 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 699 | rd | harvestSites.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/harvestSites.R
\name{harvestSites}
\alias{harvestSites}
\title{harvestSites}
\usage{
harvestSites(url, north = 90, south = -90, east = 180, west = -180,
update = FALSE)
}
\arguments{
\item{url}{WOFWML WS end point}
\item{update}{boolean comportamiento ante SiteID duplicado (si TRUE, actualiza)}
}
\value{
""1 row inserted or updated. SiteID:..." or "nothing inserted/updated"
}
\description{
Esta funcion descarga Sites de un WOFWML WS e inserta en ODM usando "addSitesToLocal"
}
\examples{
harvestSites("http://brasilia.essi-lab.eu/gi-axe/services/cuahsi_1_1.asmx?WSDL",north=-32,south=-35,east=-55,west=-61,TRUE)
}
|
10a455ee58958ab91ef9c9cf512756f43afd1fcb | 2269026d56631cd66d6db71f2ff97854c32b6e54 | /RollFutures-overshoot.R | 12cb30ca94b46a70f39cf46f00b973e4abb30315 | [] | no_license | memazouni/RollFutures | a61402f7138aac7e9b790d32bddcd791dcd35608 | 512691e61edff9ae9b67ef2081249d6a35d79df5 | refs/heads/master | 2022-04-07T21:20:23.724006 | 2020-01-31T20:47:45 | 2020-01-31T20:47:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,239 | r | RollFutures-overshoot.R | #Download data
library(tidyverse)
library(profvis)
#apikey <- 'putyourapikeyhere'
tick_<- list('1983':'2012')[[1]]
#C
c_contracts <- c('H', 'K', 'N', 'U', 'Z')
c_months <- c('03', '05', '07', '09', '12')
#S
s_contracts <- c( 'F', 'H', 'K', 'N', 'Q', 'U', 'X')
s_months <- c('01', '03', '05', '07', '09', '11')
#W
w_contracts <- c( 'H', 'K', 'N', 'U', 'Z')
w_months <- c_months
# CL
cl_contracts <- c('F', 'H', 'K', 'N', 'Q', 'U', 'X', 'Z') # Chose contract months that match c and s
cl_months <- c('01', '03', '05', '07', '09', '11', '12')
#Years Loop
for(j in 1:length(tick_)){
#
# # Corn Contracts
# for(i in 1:length(c_contracts)){
#
# tick <- tick_[[j]]
#
# data <- paste0('http://ondemand.websol.barchart.com/getHistory.csv?apikey=', apikey, '&symbol=ZC', c_contracts[i],
# substr(as.character(tick), 3, 4), '&type=daily') %>%
# download.file(destfile =paste0('data/', 'ZC', c_contracts[i], tick, '.csv'), method = "libcurl")
#
# }
#
# # Soybeans
# for(i in 1:length(s_contracts)){
#
# tick <- tick_[[j]]
#
# data <- paste0('http://ondemand.websol.barchart.com/getHistory.csv?apikey=', apikey, '&symbol=ZS', s_contracts[i],
# substr(as.character(tick), 3, 4), '&type=daily') %>%
# download.file(destfile =paste0('data/', 'ZS', s_contracts[i], tick, '.csv'), method = "libcurl")
#
# }
#
# # Chi Wheat
# for(i in 1:length(w_contracts)){
#
# tick <- tick_[[j]]
#
# data <- paste0('http://ondemand.websol.barchart.com/getHistory.csv?apikey=', apikey, '&symbol=ZW', w_contracts[i],
# substr(as.character(tick), 3, 4), '&type=daily') %>%
# download.file(destfile =paste0('data/', 'ZW', w_contracts[i], tick, '.csv'), method = "libcurl")
#
# }
# WTI Crude
for(i in 1:length(cl_contracts)){
tick <- tick_[[j]]
data <- paste0('http://ondemand.websol.barchart.com/getHistory.csv?apikey=', apikey, '&symbol=CL', cl_contracts[i],
substr(as.character(tick), 3, 4), '&type=daily') %>%
download.file(destfile =paste0('data/', 'CL', cl_contracts[i], tick, '.csv'), method = "libcurl")
}
}
|
e2619edb6be61a46d247146e5b33439676bf235e | 200ea59341293119f381dee1affd33e394c0eabc | /man/simpson.Rd | e1f05bda4df8fa8a2c79bcd2c43335dd0d3b52ed | [] | no_license | robchavez/robR | c17c65b8ca7b12de62272340bbd1e88d7fa9a163 | 33ce627a875e6a94aedfa0a0f078f80a1230727d | refs/heads/master | 2021-01-17T08:39:02.524184 | 2019-05-16T05:36:37 | 2019-05-16T05:36:37 | 31,325,134 | 1 | 1 | null | 2015-03-14T01:52:52 | 2015-02-25T17:03:59 | R | UTF-8 | R | false | false | 572 | rd | simpson.Rd | \name{simpson}
\alias{simpson}
\title{Simpsons Episode Selector.}
\description{Randomly selects episodes of The Simpsons to watch (from seasons 3-10).}
\usage{
simpson()
}
%- maybe also 'usage' for other objects documented here.
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{ }
\references{none}
\author{Robert S. Chavez}
\note{This is just a joke... but useful.}
\seealso{N/A}
\examples{
#This is the only use.
simpson()
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{simpson} |
11d1334eb1959c0ac9142a84cae9828ccb8673a2 | 03bdb07e5846d37a63d45a37e80a72d681f04f9f | /R/themes.R | e6cf1e5f289180547d55338096a2e1df7a753ec7 | [
"MIT"
] | permissive | tpemartin/econDV | 5a36daabd2aca947cd1cce755328566419f3160b | c4d03f64b6ec142fc605f3e1fad3d46b5314e81c | refs/heads/master | 2023-02-24T03:12:47.791627 | 2021-02-08T09:56:46 | 2021-02-08T09:56:46 | 283,764,896 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 406 | r | themes.R | #' Create a theme for time series that fit the format of the Economist
#'
#' @return
#' @export
#'
#' @examples none
theme_timeSeries <-function(){
ggplot2::theme(
axis.title=ggplot2::element_blank(),
axis.line.y=ggplot2::element_blank(),
axis.ticks.y = ggplot2::element_blank(),
panel.grid.major.y = ggplot2::element_line(
color="#EEEEEE"
),
legend.position = "none"
)
}
|
3349c4d53f5cb2406634f090f0930a28c13c074b | 55fdb24e600e211d40241f5dec65ed422658a3c5 | /R/find.R | 6c4e397204b4ea9f70e265c65e76966da0fc5885 | [
"MIT"
] | permissive | layik/geocoder | d15ec93639ecf63392726fcb4983b49dbabf964b | 1ea6576a0d645ee1a42ee9c9944bab4914a2306a | refs/heads/master | 2022-04-04T03:29:54.526605 | 2020-02-28T00:38:55 | 2020-02-28T00:38:55 | 238,893,566 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,182 | r | find.R | #' Find geocode(s)
#'
#' The main function to find geocodes, using standard MongoDB queries,
#' with focus on particularly on returing "geometry" documents'
#' coordinates as useful objects (thinking about sfcs for instance).
#'
#' @return sf object from MongoDB or `mongolite::iterate`
#'
#' @param x keys and values to return geometries with
#' @param collection set collection from parameter default `geocode`.
#' @param full_url ability to use gc_find on any mongodb
#' @param as_sf return the results as an sf object.
#'
#' @export
#' @example
#' \dontrun{
#' gc_find("key")
#' }
gc_find = function(x,
collection = 'geocoder',
full_url,
as_sf = TRUE ) {
force(x)
con = mongolite::mongo(collection = collection)
if(!missing(full_url)) {
con = mongolite::mongo(url = full_url,
collection = collection)
}
json = jsonify::to_json(x)
if(substr(json, 1, 1) == "[" &&
substr(json, nchar(json), nchar(json)) == "]") {
json = substring(json, 2, nchar(json) - 1)
}
it = con$iterate(query = json)
# create sf and return it
if(as_sf) {
df = geojsonsf::geojson_sf(it$json())
return(df)
}
it
}
|
107cf0a8d792c7169de9253aed1d13ba642df08f | 982dcc77db4a58e81527649b33db10014fd2edb3 | /day14/ggplot2_lab.R | b4e4411de3128537e9b67fa25084a7bd6c410f52 | [] | no_license | jaewon-jun9/R-in-multi | c734aaa1bce11b583a64b2309b852989924e51c4 | acd5f828557b8c54fcfc64a799a80340d3e6a086 | refs/heads/master | 2020-12-03T01:17:14.655310 | 2020-01-01T03:10:51 | 2020-01-01T03:10:51 | 231,172,519 | 1 | 0 | null | 2020-01-01T03:47:20 | 2020-01-01T03:47:19 | null | UTF-8 | R | false | false | 877 | r | ggplot2_lab.R | library(ggplot2)
library(dplyr)
#문제1
ggplot(data = mpg,aes(x=cty, y=hwy))+geom_point(colour="Blue")
ggsave("result1.png")
#문제2
ggplot(data= mpg, aes(x=class))+geom_bar(aes(fill=drv))
ggsave("result2.png")
#문제3
options(scipen = 99)
ggplot(data = midwest,aes(x=poptotal, y=popasian)) + geom_point() + xlim(0,500000) + ylim(0,10000)
ggsave("result3.png")
#문제4
mpg_1 <- mpg %>% filter(class=="compact"|class=="subcompact"|class=="suv")
ggplot(data=mpg_1,aes(x=class,y=cty)) + geom_boxplot()
ggsave("result4.png")
#문제5
click <- read.table("product_click.log")
ggplot(data= click, aes(x=V2))+geom_bar(aes(fill=V2))
ggsave("result5.png")
#문제6
datetime <- strptime(click$V1,format='%Y%m%d%H%M')
day <- format(datetime,'%A')
ggplot(data=click,aes(x=day))+geom_bar(aes(fill=day))+labs(x="요일",y="클릭수")+ylim(0,200)+ theme_linedraw()
ggsave("result6.png")
|
a114faa4cbaa406730bbce03de263448b561af4d | dc3371eb1ebc1ee9a646ffa47a010a58695c393f | /reproducevarFMado/missingdata/completedata/maxima.R | 5c5e78c5c4cc196a76d99cb835cfcb91b9f48e8b | [] | no_license | Aleboul/var_FMado | 2a9a3eabcee7422540bad5b387b0030f8ef49754 | 8bf7b4bee89d1c98269b50b16d7ef34a25d0e47d | refs/heads/main | 2023-08-30T20:52:55.586686 | 2021-10-09T17:35:32 | 2021-10-09T17:35:32 | 380,979,571 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,065 | r | maxima.R | """
Produit un échantillon de maximum par bloc issu d'une copule de Student.
Estime un lambda-FMadogramme pour plusieur valeurs de lambda. Calcule la
variance sur plusieurs échantillons.
Inputs
------
M (int): number of iteration
n (c(int)): number of samples
nmax_ (c(int)) : length of sample for which the maximum is taken
theta ([float]), psi1 (floatt), psi2 (float) : parameters of the copula
p : array of missing's probabilities
"""
library(VineCopula)
library(doRNG)
library(dplyr)
prefix = "/home/aboulin/Documents/stage/var_FMado/bivariate/output/"
target <- list()
fmado_ <- function(xvec, yvec, lambda){
F_X = ecdf(xvec)(xvec)
G_Y = ecdf(yvec)(yvec)
value_ = 0.5 * mean(abs(F_X^lambda - G_Y^(1-lambda)))
return(value_)
}
target$generate_randomness <- function(nobservation){
sample_ = BiCopSim(nobservation * nmax, 2, 0.8, 3)
return(sample_)
}
target$robservation <- function(randomness){
sample = apply(matrix(t(randomness), ncol = nmax),1, max)
return(matrix(sample, ncol = 2, byrow = T))
}
M = 100 # number of iteration
n = c(128) # length of sample
nmax_ = c(16,32,64,128,256,512)#c(128,256,512,1024)
filename <- paste0(prefix, "max_student_M", M, "_n", n, ".txt")
simu = function(target){
foreach(rep = 1:M, .combine = rbind) %dorng% {
# foreach is a function that create a loop and we, at each iteration, increment the matrix of results (here output)
# using rbind.
# Allocate space for output
FMado_store = matrix(0, length(n))
FMado_runtimes = rep(0, length(n))
FMado_lambda = rep(0, length(n))
# generate all observations and sets of randomness to be used
obs_rand = target$generate_randomness(max(n)) # we produce our bivariate vector of data
obs_all = target$robservation(obs_rand)
for(i in 1:length(n)){
t_FMado = proc.time() # we compute the time to estimate
# subset observations
obs = obs_all[1:n[i],] # we pick the n[i] first rows, i.e 50 rows for the first, 100 for the second
### We compute the lambda FMadogram
FMado = fmado_(qnorm(obs[,1]), qnorm(obs[,2]), lambda) # we compute now the lambda-FMadogram (the normalized one)
t_FMado = proc.time() - t_FMado
# Save the results
FMado_store[i,] = FMado
FMado_runtimes[i] = t_FMado[3]
FMado_lambda[i] = lambda
}
output = cbind(FMado_store, FMado_runtimes,FMado_lambda, n,nmax)
output
}
}
lambda_ = seq(0.01, 0.99, length.out = 100)
store_ = matrix(ncol = 6, nrow = 0)
for (i in 1:length(nmax_)){
nmax = nmax_[i]
print(i)
lambda_FMadogram = foreach(rep = 1:length(lambda_), .combine = rbind) %dorng% {
lambda = lambda_[rep]
prod = simu(target)
scaled = (prod[,1]) * sqrt(prod[,4])
output = cbind(prod, scaled)
}
store_ = rbind(store_,lambda_FMadogram)
}
print(store_)
df_FMado = data.frame(store_)
names(df_FMado) = c("FMado", "runtime", "lambda", "n", "nmax", "scaled")
var = df_FMado %>% group_by(lambda, nmax) %>% summarise(var_emp = var(scaled))
print(var)
require('reticulate')
py_save_object(var, filename)
|
4d260500c5571fe0f66bb055507248ed0bc051d7 | 2a7aa480771be71c941a8eb2b52d89012178482e | /src/exercises/s2-2.R | a13538e2a916bdd6c542423df3e3df373293effe | [
"MIT"
] | permissive | wilsonify/ModernOptimization | 49dc0caac87040e6dfb39a399ec9899f52d9b384 | 0d6946e07e5f0d4d95311da6a62fd1d2be0c52f3 | refs/heads/main | 2023-04-30T10:40:53.666553 | 2020-12-21T19:07:29 | 2020-12-21T19:07:29 | 313,118,722 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 80 | r | s2-2.R | v <- seq(2, 50, by = 2) # one way
print(v)
v <- (1:25) * 2 # other way
print(v)
|
2cf864c92ca57ef1823043db87205ca539bc3d6b | 9c7d347db48a305cfc21ea9bbb7d318fd1914f85 | /inst/doc/c-county-choropleth.R | 78874cdece289e188f633c9738e58f7e1af19787 | [] | no_license | DanielHadley/choroplethrModified | 351f27f0e512992da8c9b36b972a05bf11121bb9 | 7c923f95b0507769a515a2357b2a9ef96ce80aa7 | refs/heads/master | 2020-12-25T23:26:43.841806 | 2014-12-01T23:03:27 | 2014-12-01T23:03:27 | 27,182,951 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 918 | r | c-county-choropleth.R | ## ----hold=TRUE-----------------------------------------------------------
library(choroplethr)
?df_pop_county
data(df_pop_county)
?county_choropleth
county_choropleth(df_pop_county)
## ------------------------------------------------------------------------
library(choroplethrMaps)
?county.regions
data(county.regions)
head(county.regions)
## ------------------------------------------------------------------------
county_choropleth(df_pop_county,
title = "2012 Population Estimates",
legend = "Population",
buckets = 1,
zoom = c("california", "washington", "oregon"))
## ------------------------------------------------------------------------
library(ggplot2)
choro = CountyChoropleth$new(df_pop_county)
choro$title = "2012 Population Estimates"
choro$ggplot_scale = scale_fill_brewer(name="Population", palette=2)
choro$render()
|
765d2b43be503b3b863ce1fa6fa7a5c781402a44 | ffcd9bbe4e4b11e19a53ffb85d2cca59b3762c9c | /man/dot-abort_water_leakc.Rd | c5cad6e597496388744aac2555edb0135dc46c20 | [
"MIT"
] | permissive | markushlang/sfcr | 6e6132238039e0b6aafe1ab8fa91501e4f323215 | 42f7fdf08da5deea3be2ca8210727a8fc3148ec7 | refs/heads/main | 2023-02-12T03:43:24.523380 | 2021-01-11T07:58:47 | 2021-01-11T07:58:47 | 330,479,115 | 1 | 0 | NOASSERTION | 2021-01-17T20:22:11 | 2021-01-17T20:22:11 | null | UTF-8 | R | false | true | 464 | rd | dot-abort_water_leakc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sfcr_validate.R
\name{.abort_water_leakc}
\alias{.abort_water_leakc}
\title{Abort if column validation is not fulfilled}
\usage{
.abort_water_leakc(c2names, which)
}
\arguments{
\item{c2names}{Names of offending columns}
\item{which}{Balance-sheet or transactions-flow matrix?}
}
\description{
Abort if column validation is not fulfilled
}
\author{
João Macalós
}
\keyword{internal}
|
ecebddf3623c45b23366f22bfca5601e499b9209 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/rbmn/examples/dev4mn.Rd.R | 7cdae39de5e84bc45517638cdefd929831e02ee4 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 208 | r | dev4mn.Rd.R | library(rbmn)
### Name: dev4mn
### Title: Computes the deviance for a sample of multinormal vector
### Aliases: dev4mn
### ** Examples
dev4mn(matrix(runif(3), 1), t(rbmn0mn.01$mu), rbmn0mn.01$gamma);
|
a30925b9cc87bcd9ecb7aa2f3e5a4d3c9e734160 | 060bf8e8b3f41e704a3d6620df1ccdcad4e4027b | /01_R.R | ad011b43261a8314976400a45e1eecad25334db0 | [] | no_license | FGalvao77/Fundamentos-da-linguagem-R | 51b96225ea46c01b7e283294a4d1fce938f37b3a | 1f1e4bb7a71d339a879f728b6aa106eeecdb4d44 | refs/heads/main | 2023-04-16T23:40:24.299488 | 2021-05-02T23:43:11 | 2021-05-02T23:43:11 | 363,127,544 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 21,304 | r | 01_R.R | ### LINGUAGEM R ###
# INTRODUÇÂO
# R é um software estatístico/linguagem de programação
# muito utilizado no campo acadêmico. Domina-lo é requesito
# essencial para Ciência de Dados e áreas correlatas,
# através da linguagem podemos manipular, visualizar dados e,
# criar modelos utilizando técnicas de Machine Learning,
# O R também fornce recursos para construir aplicações com
# o R Markdown (sites, livros online, formulários...).
# Vantagens da utilização do R:
# - gratuito (open-source)
# - comunidade cada vez mmais ativa e em amplo crescimento
# - milhares de pacotes para diversas aplicações
# - multi-plataforma, compátivel com Windows, Mac, Linux,..
# Vamos para nosso primeiro código!
'Hello, World!'
# [1] "Hello, World!"
# Atribuíndo um valor a variável
x = 3
x
# [1] 3
# CONCEITOS BÁSICOS
# Declarando variávies, comentando código e imprimindo na tela:
# declarar uma variável podemos usar "=" e/ou "<-"
# para comentar use "#"
# para imprimir use o "print()"
# e utilize o "readline(prompt="...")" para receber uma entrada
# declarando duas variáveis
x = 'Hello, R!'
y <- 'Welcome, Fernando!!!'
# imprimindo as variáveis
print(x)
print(y)
# solicitando uma entrada ao usuário o seu nome e instanciando na variável "nome"
nome <- readline(prompt = 'Qual é o seu nome? ')
# vamos imprimir o valor da variável "nome" de duas formas
nome
print(nome)
# podemos exemplificar o comando de entrada, sem a necessidade do "promppt = "
# DADOS E OPERADORES
# Tipos de Dados(básicos)
# Character(“Texto”), neeste tipo inserimos caracteres, podendo ser números, textos, símbolos…;
# Integer (1L), Integer se refere a números inteiros e utilizamos o “L” para determina-lo,
# diferente do Numeric, onde os números são decimais: 1.0 e não 1;
# Numeric (7.25) Números decimais, mesmo que declarando-os como 12, o sistema retornará 12.0, por exemplo;
# Logical (T/F ou TRUE/FALSE) Tipo de dado com retorno TRUE ou FALSE, assim como T ou F. TRUE quando atendidas;
# as condições e FALSE quando não atendidas. Nos aprofundaremos logo logo neste tipo de dado: tenha paciência.
# character
caractere = 'Texto caractere'
class(caractere)
# integer
inteiro = 15L
class(inteiro)
# numeric
numero = 15
class(numero)
# numérico decimal
PI = 3.14
class(PI)
# logical
logico = TRUE
class(logico)
# OPERADORES
# Operador # Operação
# + adição
# - subtração
# * multiplicação
# / divisão
# ^ ou ** exponenciação
# %% retorna o resto da divisão
# %/% retorna o valor inteiro da divisão
# adição
100 + 100
# subtração
100 - 30
# multiplicação
100 * 2
# potenciação
3 ^ 2 # ou
3 ** 2
# divisão
10 / 3
# retorna o resto da divisão
10 %% 3
# retorna o valor inteiro da divisão
10 %/% 3
# extra - raiz quadrada
sqrt(16)
# ESTRUTURA DE DADOS
# Vetor
vet = 3
print(class(vet))
is.vector(vet) # retorna TRUE (verdadeiro)
# podemos instanciar diversos elementos em um vetor
# use a função "c()"
vet_01 = c(1,2,3, 'texto')
class(vet_01) # retorna character pois todos elementos serão do mesmo tipo
is.vector((vet_01))
# Lista (elementos de tipos diferentes)
# declarando as variáveis "x, y, z"
# e concatenando em outra variável "lista"
x = c('tetxto1', 'texto2')
y = c(1,2,3)
z = c(T,F,T)
lista = c(x,y,z)
# imprimindo o resultado das variáveis
print(x)
print(y)
print(z)
print(lista)
# imprimindo o tipo de cada variável
print(class(x))
print(class(y))
print(class(z))
print(class(lista))
# Matriz (todos os elementos serão do mesmo tipo)
# instanciando uma matriz "1 x 2" - uma linha por duas colunas
matriz = matrix(c(1,2),
nrow = 1, ncol = 2,
byrow = TRUE)
matriz
nrow(matriz) # retorna número de linhas do dataframe
ncol(matriz) # retorna número de colunas do dataframe
dim(matriz) # retorna número de linhas e colunas
# outro exemplo
# uma matriz "3 x 4" - de três linhas por quatro colunas
outra_matriz = matrix(c(1,2,3,4,5,6,7,
8,9,0,NaN,NaN),
nrow = 3, ncol = 4,
byrow = T)
outra_matriz
# Dataframe (estrutura de dados tabular)
# declarando um dataframe
x = data.frame(
# instanciando o nome das colunas
nome = c('Fernando','Kátia','Sara','Eloah'),
idade = c(43L,39L,21L,13L),
sexo = c('M','F','F','F')
)
is.data.frame(x) # retorna TRUE - verdadeiro
x
# imprimindo a class da variável "x"
class(x)
# CONDICIONAIS
# Quando você estiver programando, é importante inserir algumas regras em seu código
# para que a máquina saiba O QUE fazer, QUANDO fazer e COMO fazer.
# E para essas necessidades, temos o uso das CONDICIONAIS, ferramentas essenciais,
# uma vez que queremos a ação executada apenas se certa condição for atendida.
# Como resposta, temos um retorna lógico, TRUE ou FALSE (verdade ou falsa)
# Segue um exemplo simples e didático:
# você sai com guarda-chuva SE acredita que pode chover.
# Assim sendo você possui 1 condicional para levar o guarda chuva.
# Operadores lógicos
# Operador # Lógica
# < menor
# > maior
# <= menor ou igual
# >= maior ou igual
# == igual
# != diferente
# ! negação ("not")
# menor
3 < 5 # TRUE
# maior
3 > 5 # FALSE
# igual
3 == 3 # TRUE
# diferente
3 != 5 # TRUE
# "a" é igual a 1
'a' == 1 # FALSE
# "Ciência de Dados" é igual a "Ciẽncia de Dados"
'Ciência de Dados' == 'Ciência de Dados' # TRUE
# if()
# sintaxe
# if(condição) {
# comandos executados quando TRUE
# }
# exemplo 1
a = 1
b = 100
if (a < b) { # avaliando se "a" é menor que "b"
# se for verdade, retorna a mensagem abaixo
print('A é menor que B')
}
# exemplo 2
a = 'Fernando'
b = 43
if (a == b) { # "a" é igual "b"
# condição falsa, não retorna a mensagem
print('A é igual a B')
}
# if else()
# sintaxe
# if (condiçao) {
# comandos executados quando TRUE
# } else {
# comandos executados quando FALSE
# }
# definindo duas variáveis "a, b", entrada via teclado
# avaliando as entradas e retornando uma mensagem
a = as.integer(readline('Digite um número inteiro para A: '))
b = as.integer(readline('Digite outro valor inteiro para B: '))
if (a < b) {
print('A é menor que B')
} else {
print('A é maior que B')
}
# ifelse()
# sintaxe (possui uma sintaxe simplificada)
# ifelse(condição, 'retorno se TRUE', 'retorno se FALSE')
a = as.integer(readline('Digite um número inteiro para A: '))
b = as.integer(readline('Digite outro valor inteiro para B: '))
ifelse (a > b, 'A é maior que B', 'A é menor que B')
# Desafio condicionais em R
# Crie um chatbot simples que pergunte ao usuário sua idade.
# caso sua idade esteja compreendida entre 18 anos ou menos, retorne: você é uma criança!
# caso sua idade esteja compreendida entre 19 e 50 anos, retorne: você é um adulto!
# caso sua idade esteja compreendida entre 51 anos ou mais, retorne: você é um adulto experiente!
# solução
sua_idade <- as.integer(readline('Digite sua idade: '))
if (sua_idade <= 18) {
msg <- 'Você é uma criança!'
print(msg)
} else if (sua_idade <= 51) {
msg <- 'Você é um adulto!'
print(msg)
} else {
msg <- 'Você é um adulto experiente!'
print(msg)
}
# print(msg)
# outro exemplo
30 -> x
if (x == 30) {
res <- 'Numero igual a 30'
res
} else if (x > 30) {
res <- 'Número maior que 30'
res
} else {
res <- 'Número menor que 30'
res
}
# res
# LAÇOS DE REPETIÇÃO (loops)
# While e For
# sintaxe do while
# while (condição) {
# comandos
# }
# exemplo de uso
z <- 10
while (z <= 20) { # z é menor e/ou igual a 20
# se condição for verdade, o valor de z é impresso a cada iteração até a condição for falsa
print(z)
z <- z + 1
}
# outro exemplo
z <- 5
while (z < 15) { # z é menor que 15
# se condição for verdade, o valor de z é atualizado em + 1 até a condição for falsa
z <- z + 1
print(z) # imprimindo o valor de z a cada iteração
}
# para finalizar, vamos para mais um exemplo
z <- 8
while (z > 9) { # z é maior que 9
# se condição for verdade, o valor iterado de z é impresso
z <- z + 1
print(z)
}
# perceba que, nada é impresso
# já que a condição não é verdadeira
# sintaxe do for
# for (x in y) {
# comandos
# }
# exemplo
for (x in 1:15) { # para x de 1 à 15
print(x) # imprima x
}
# outro exemplo
for (i in 5:10) {
print(i)
}
# podemos também definir via entrada teclado
# onde usuário define o início e o fim do laço de repetição
x = as.integer(readline('Digite um número inteiro (início): '))
y = as.integer(readline('Digite um número inteiro (fim): '))
for (i in x:y) {
print(i)
}
# FUNÇÕES
# Funções nada mais são do que blocos de código “encapsulados” onde você armazena
# uma lista de instruções e posteriormente, utilizando o nome da função em seu código,
# você a chama para execução inserindo alguns parâmetros se necessário.
# Temos as funções “R base”, ou seja, aquelas que já são carregadas de forma nativa
# junto ao R como a função “mean()” que calcula a média, por exemplo, entre outras diversas.
# funções R Base
x = c(11,29,35,47,51,68,70,83,92,104)
# desvio padrão
sd(x)
# média
mean(x)
# mediana
median(x)
# informações estatísticas
summary(x)
# visualizando os quartis
quantile(x)
# Para um exemplo mais prático, vamos instanciar um dataframe,
# usaremos o já conhecido "iris", este dataframe já vem embutido no R
# instanciando o "iris" na variável "df_iris"
df_iris <- iris
# visualizando as 6 primeiras linhas
head(df_iris)
# se preferir, através do parâmetro "n = x"
# pode definir a quantidade de linhas para ser exibidas
head(df_iris, n = 3)
# visualizando as 6 últimas linhas
tail(df_iris)
# como no "head()", também podemos definir
# quantidade das últimas linhas para exibir
tail(df_iris, n = 10)
# visualizando a quantidade de linhas e colunas
print(nrow(df_iris)) # "nrow" - linhas
print(ncol(df_iris)) # "ncol" - colunas
# nome das colunas
colnames(df_iris)
# índices das linhas
row.names(df_iris)
# dados estatísticos do dataframe
summary(df_iris)
# dados estatíticos de uma coluna específica
summary(df_iris$Sepal.Width)
# visiualizando os quartis de uma coluna específica
quantile(df_iris$Petal.Length)
# Podemos chamar o dataframe "iris" diretamente
# e aplicar nossas análises
head(iris) # 6 primeiras linhas
tail(iris) # 6 primeiras linhas
# abre pasta para caminho de arquivos, utilize no RSTUDIO!
file.choose()
# Outro dataframe já no R, temos o "mtcars"
df_cars <- mtcars
# visualizando 6 primeiras linhas
head(df_cars)
# dados estatísticos
summary(df_cars)
# visualizando as correlações entre as variáveis
cor(df_cars)
# CRIAÇÃO DE FUNÇÕES
# Podemos criar nossas próprias funções em R de forma simples e intuitiva.
# cálculo de porcentagem
porcentagem <- function(num, por) {
num <- as.integer(readline('Digite o número para cálculo: '))
por <- as.integer(readline('Digite o valor da porcentagem: '))
return(num * por / 100)
}
porcentagem()
# Criamos uma função chamada “porcentagem”. Nela inserimos 2 parâmetros para funcionamento:
# “num”, que se refere ao número alvo, e
# “por”, referente a porcentagem a ser calculada.
# Desta forma utilizamos o “readline()” para pedir os números ao usuário e
# ao término lhe entregar o calculo de sua porcentagem.
# Por fim, apenas executamos a função e ela começa a interagir com o usuário.
# vamos pratocar com outros exemplos
# função aritmética - soma
# irar executar a soma de dois números entrada via teclado
# declarando a função
soma <- function(num_1, num_2) {
num_1 <- as.numeric(readline('Digite o primeiro número: '))
num_2 <- as.numeric(readline('Digite o segundo número: '))
return(num_1 + num_2)
}
soma()
# função aritmética - raiz quadrada
raiz_quadrada <- function(numero) {
numero <- as.numeric(readline('Digite o número que deseja saber a raiz quadrada: '))
return(sqrt(numero))
}
print(raiz_quadrada())
# cálculo da área de um retângulo
area_retangulo <- function(x, y) {
x <- as.numeric(readline('Digite o valor de x: '))
y <- as.numeric(readline('Digite o valor de y: '))
return(x * y)
}
area_retangulo()
# PACOTES
# Pacotes nada mais são que códigos prontos de terceiros, criados para solucionar problemas
# específicos ao implementar novas funções ao seu código R.
# Temos pacotes para Machine Learning, Matemática, Web Scraping, Desenvolvimento Web e
# outros milhares e aqui está o grande trunfo do movimento Open-Source: produção de pacotes
# em massa.
# Onde encontrar pacotes? há 3 formas, via RStudio:
# 1. CRAN
# Via linha de comando no próprio console RStudio com:
# install.packages(“nome-do-pacote”,dependencies=TRUE)
# 2. Manualmente, no CRAN formato .zip ou .tar.gz;
# No menu do Rstudio selecione Menu Tools > Install Packages > Install packages from pacage arquive file e selecione o pacote escolhido.
# 3. No R Studio, na tela 4, em “Packages”: busque, selecione e clique em “install”.
# instalando pacote
install.packages('ggplot2', dependencies = TRUE)
# carrega o pacote para ser utlizado no código
library(ggplot2)
# documentação
# https://github.com/rstudio/cheatsheets/blob/master/data-visualization-2.1.pdf
# Tipo Objeto
# dispersão (scatter plot) geom_point()
# gráfico de bolhas geom_point()
# histograma geom_histogram()
# boxplot geom_boxplot
# densidade geom_density()
# gráfico de linhas geom_line()
# TIDYVERSE
# Conjunto de pacotes focados em análise de dados
install.packages('tidyverse')
# instalará:
# ✔ ggplot2 (gráficos)
# ✔ purrr (programação funcional)
# ✔ tibble (criação dataframes)
# ✔ dplyr (manipulação de dataframes)
# ✔ tidyr (transformação de dataframes)
# ✔ stringr (lida com strings e regex)
# ✔ readr (leitura dados retangulares: csv, tsv...)
# ✔ forcats (lida com fatores) fatores são estruturas de dados utilizadas para ordenar strings
# Listando todos os datasets presentes no R Base
data(package = .packages(all.available = TRUE))
# ou apenas
data()
# SCATTERPLOT ou gráfico de dispersão
library(tidyverse) # chamando o pacote
# sintaxe de utlização ggplot
# ggplot(data = <DATA>) +
# <GEOM_FUNCTION>(mapping = aes(<MAPPING>),
# stat = >STAT>,
# position = <POSITION>) +
# <COORDINATE_FUNCTION> +
# <FACET_FUNCTION> # dividir o gráfico em subplots
# displ: o tamanho do motor de um carro, me litros
# hwy: a eficiência de combustível de um carro na estrada, em milhas por galão (mpg).
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ,
y = hwy))
# exibindo a variável "mpg"
mpg
# GRÁFICO DE BARRAS
ggplot(diamonds, aes(x = cut)) +
geom_bar()
# DPLYR
# link da documentação
# https://github.com/rstudio/cheatsheets/blob/master/data-transformation.pdf
# Pacote mais utilizado para transformação de dados.
# Funções principais:
# Função Objetivo
# filter() filtra linhas
# select() seleciona colunas
# mutate() cria/modifica colunas
# arrange() ordena a base
# summarise() sumarize a base
library(dplyr) # chamando o pacote
# usaremos o dataframe "mtcars"
# usando a função PIPE
mtcars %>%
filter(mpg > 20) # filtrando o dataframe em uma coluna específica e aplicando uma condicional - mpg maior que 20
mtcars %>%
select(mpg) # selecionando somente a coluna "mpg"
mtcars %>%
# usando a função "mutate()" e criando uma nova coluna "economy"
mutate (
economy = ifelse (
mpg > 20, 'CARRO ECONÔMICO', 'CARRO BRBERÃO') # aplicando uma condicional e instanciando uma descrição
) %>%
select(mpg, economy) # selecionando as colunas "mpg" e "economy"
mtcars %>%
# "arrange()" ordena as linhas de um quadro de dados pelos valores das colunas selecionadas
arrange(mpg) %>%
select(everything()) # com a função "select()" selecionando todas as colunas com o argumento "everything()"
mtcars %>%
# "arrange()"" ordena as linhas de um quadro de dados pelos valores das colunas selecionadas
arrange(hp) %>%
select(hp, mpg) # com a função "select()" selecionando as colunas "hp" e "mpg"
mtcars %>%
# com a função "summarise()", estamos sumarizando
# instanciando na variável "media_mpg", a média de consumo (mpg)
summarise(media_mpg = mean(mpg,
na.rm = TRUE))
# TIBBLE
# Tibbles são como dataframes, entretanto, apresentam melhorias de produtividade.
# TIDYR
# funções principais: gather() e spread()
# chamando os pacotes
library(tibble)
library(tidyr)
# instanciando manualmente um dataframe usando o "tibble"
df <- tibble(w = 1, x = 2, y = 3) # variáveis e seus respectivos valores
z = gather(df, 'variável', 'valor') # concatenando do df com nome da colunas, utilizando o gather e salvando na variável "z"
z # imprimindo o objeto criado "z"
# visualizando a classe da objeto criado
class(z)
# visualizando o df
df
z2 = spread(z, 'variável', 'valor')
z2
# exibindo em formato de tabela o objeto "z2"
View(z2)
# outro exemplo
# chamando os pacotes necessários
library(tibble)
library(tidyr)
# instanciando manualmente um dataframe
df_2 <- tibble('Fernando' = 1997,
'Kátia' = 1980,
'Sara' = 2000,
'Eloah' = 2007) # variáveis e seus respectivos valores
dados = gather(df_2, 'nome', 'ano') # nome da colunas
View(dados)
nome_ano = spread(dados, 'nome', 'ano')
nome_ano
View(nome_ano)
# STRINGR
# ideal para trabalhar com string
# chamando o pacote
library(stringr)
# contando os caracteres na variável "texto"
texto = 'Este é um texto para simples conferência de caracteres.'
str_length(texto)
# colocando toda variável em caixa alta
str_to_upper(texto)
# variável em caixa baixa
str_to_lower(c('FERNANDO GALVÃO'))
# variáveis com as iniciais em caixa alta
var_1 <- 'data science'
str_to_title(c(var_1))
# exemplo
s1 = 'Estou desenvolvendo'
s2 = ' '
s3 = 'habilidades em Ciência de Dados'
str_c(s1, s2, s3) # concatenando as variáveis
# outro exemplo
var1 <- 'A linguagem R'
var2 <- ' '
var3 <- 'é poderosa'
var4 <- '!'
str_c(var1, var2, var3, var4)
# exemplo
'Fernando' -> firstName
' ' -> space
'Galvão' -> lastName
fullName <- str_c(firstName, space, lastName)
print(fullName)
'fernando' -> firstName
' ' -> space
'galvão' -> lastName
fullName <- str_c(firstName,
space,
lastName)
str_to_title(fullName)
# READR
# Realiza leitura de dados nos formatos .txt, .csv, .fwf e .log, 10x mais veloz que os
# comandos usuais.
# Não há necessidade de converter caracteres em fatores(stringAsFactors = FALSE)
# Segue os tipos de dados que realiza a leitura:
# read_delim(), read_csv(), read_tsv(),
# read_csv2(), read_fwf(), read_table(),
# read_log(), read_file() # lê um arquivo em uma *string*.
# link da documentação
# https://rawgit.com/rstudio/cheatsheets/master/data-import.pdf
library(readr)
# Operador Pipe %>%
# O operador %>% (pipe) veio para tornar a leitura de códigos mais legível/compreensível.
# Foi introduzido junto ao pacote magrittr e já existem outros pacotes criados
# para sua utilização
# De inicio, vamos instalar e carregar o magrittr:
install.packages('magrittr')
# chamando o pacote
library(magrittr)
# funcao(a, b) # sem pipe
# a %>% f(b) # operador pipe
# instanciando alguns dados numéricos na variável "a"
a <- c(3,7,6)
# raiz quadrada sem uso do pipe
sqrt(sum(a))
# raiz quadrada com uso do pipe
a %>% sum()
# instanciando quatro valores
valores <- c(43, 39, 21, 13)
# somando os valores e gerando a média
sum(mean(valores)) # sem o uso do pipe
# somando os valores e gerando a média
valores %>% mean() %>% sum() # com uso do pipe
# entrada dos dados via teclado
v_1 <- as.numeric(readline('Digite o primeiro valor: '))
v_2 <- as.numeric(readline('Digite o segundo valor: '))
v_3 <- as.numeric(readline('Digite o terceiro valor: '))
v_4 <- as.numeric(readline('Digite o quarto valor: '))
media <- c(v_1, v_2, v_3, v_4) # instanciando os valores na variável "media"
media %>% mean() %>% sum() # utilizando o pipe para gerar o resultado - imprimindo a média dos valores
print(media %>% mean() %>% sum()) # utilizando o pipe para gerar o resultado - imprimindo a média dos valores
# entrada dos dados via teclado
v_1 <- as.numeric(readline('Digite o primeiro valor: '))
v_2 <- as.numeric(readline('Digite o segundo valor: '))
v_3 <- as.numeric(readline('Digite o terceiro valor: '))
v_4 <- as.numeric(readline('Digite o quarto valor: '))
# código simplificado - utilizando o pipe para gerar o resultado
sum(c(v_1, v_2, v_3, v_4) %>% mean()) # imprimindo a média dos valores
print(sum(c(v_1, v_2, v_3, v_4) %>% mean())) # imprimindo a média dos valores
|
d97488afe20c79d085c8b5711fee1d09e5acd5e6 | 191eb4a021427815a8a9c4a58bb7156106e9cb2f | /man/summary.xgb.Booster.Rd | b37276f687b1af96f860da8d52d487ee2d0fd5be | [
"MIT"
] | permissive | osofr/stremr | 65db4ed3720d5ea08e3b88a7cc408b8667c729cb | 5509d02786faf6f475331504934c5fb404c9f9b5 | refs/heads/master | 2022-02-13T15:22:02.706524 | 2022-01-21T20:31:15 | 2022-01-21T20:31:15 | 55,272,770 | 37 | 18 | MIT | 2017-12-09T23:57:32 | 2016-04-02T01:53:32 | R | UTF-8 | R | false | true | 687 | rd | summary.xgb.Booster.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summaryS3methods.R
\name{summary.xgb.Booster}
\alias{summary.xgb.Booster}
\alias{summary.xgb.cv.synchronous}
\title{S3 methods for fit summary from xgboost}
\usage{
\method{summary}{xgb.Booster}(object, ...)
\method{summary}{xgb.cv.synchronous}(object, ...)
}
\arguments{
\item{object}{The model fit object produced by xgboost (and extracted with \code{getmodel_byname}).}
\item{...}{Additional options (not used)}
}
\value{
The markdown-formated model summary returned by \code{pander::pander_return}.
}
\description{
Prints the modeling summary for the xgboost model fit (see \code{xgboost} R package).
}
|
97b23db9dba2ff370af4f5c8ebe8db44f5597cde | 39c5367005b26b0c9ea6246ebc00584f08582588 | /NewsData_Analysis.R | a8990f0e4d707d6ce6b48e81368ad2e6a5e13307 | [] | no_license | konrad-c/GlobalTerrorismAnalysis | a82273e88f76f761cdbd88ca01ee386c318a9654 | 96fa12b3b9632ec3a5f105bcf17ed66b3ede8361 | refs/heads/master | 2021-01-22T22:40:19.028125 | 2017-06-16T02:07:11 | 2017-06-16T02:07:11 | 92,785,285 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,433 | r | NewsData_Analysis.R | library(ggplot2)
library(dplyr)
library(reshape)
library(gridExtra)
library(MASS)
library(lubridate)
install.packages(devtools)
devtools::install_github("baptiste/egg")
library(egg)
rlm_eqn <- function(df){
x <- df[, 1]
y <- df[, 2]
m <- rlm(y ~ x, df);
eq <- substitute(italic(y) == a + b %.% italic(x),
list(a = format(coef(m)[1], digits = 2),
b = format(coef(m)[2], digits = 2)))
as.character(as.expression(eq));
}
# --Task---
#
sf <- read.csv2("Data/gtd/globalterrorism.csv", sep=",")
# read the data
articles_sf <- read.csv2("Crawlers/articleCount.csv", sep=",")
articles_sf$Year <- as.numeric(as.character(articles_sf$Year))
articles_sf$month <- as.character(articles_sf$month)
articles_sf$NumArticles <- as.numeric(as.character(articles_sf$NumArticles))
articles_sf$ArticleType <- "Total"
articles_sf$month[as.numeric(articles_sf$month) < 10] <- paste("0", articles_sf$month[as.numeric(articles_sf$month) < 10], sep="")
terror_articles_sf <- read.csv2("Crawlers/TerrorismArticleCount.csv", sep=",")
terror_articles_sf$Year <- as.numeric(as.character(terror_articles_sf$Year))
terror_articles_sf$month <- as.character(terror_articles_sf$month)
terror_articles_sf$NumArticles <- as.numeric(as.character(terror_articles_sf$NumArticles))
terror_articles_sf$ArticleType <- "Terror Article"
terror_articles_sf$month[as.numeric(terror_articles_sf$month) < 10] <- paste("0", terror_articles_sf$month[as.numeric(terror_articles_sf$month) < 10], sep="")
# Create Date columns:
terror_articles_sf$Date <- paste("01", terror_articles_sf$month, paste0(terror_articles_sf$Year," 00:00:00"), sep="/")
terror_articles_sf$Date <- as.Date(as.character(as.POSIXct(strptime(terror_articles_sf$Date, format="%d/%m/%Y %H:%M:%S"))))
articles_sf$Date <- paste("01", articles_sf$month, paste0(articles_sf$Year," 00:00:00"), sep="/")
articles_sf$Date <- as.Date(as.character(as.POSIXct(strptime(articles_sf$Date, format="%d/%m/%Y %H:%M:%S"))))
# Bind
sf_news <- rbind(articles_sf, terror_articles_sf)
sf_news$Year <- as.numeric(sf_news$Year)
sf_news <- na.omit(sf_news)
sf_news$NumArticles <- as.numeric(sf_news$NumArticles)
# ---- Plot Data ----
numarticle <- ggplot(sf_news, aes(x=Date, y=NumArticles, colour=ArticleType)) +
#geom_point() +
geom_line() +
theme_bw() +
scale_color_discrete(name="Article Type") +
labs(x="Year", y="# of Articles", title="New York Times Articles Mentioning Terrorism by Month")
sf_prop <- data.frame(
Date=articles_sf$Date,
Proportion=terror_articles_sf$NumArticles/articles_sf$NumArticles
)
proparticle_rlm <- ggplot(sf_prop, aes(x=Date, y=Proportion)) +
#geom_point() +
geom_line() +
theme_bw() +
labs(x="Year", y="Proportion of Terrorism Articles to Total Articles") +
geom_smooth(method="rlm", alpha=0.0) +
geom_text(x = 1970, y = 0.20, label = rlm_eqn(sf_prop), parse = TRUE) #+
#geom_vline(xintercept = as.numeric(as.Date("2001/09/11 00:00:00")), colour="red")
ggarrange(numarticle, proparticle_rlm, ncol=1)
# ---- Articles by Year: ----
count <- 1
year_vec <- vector(mode="numeric")
article_total <- vector(mode="numeric")
article_terror <- vector(mode="numeric")
for(year in unique(sf$iyear)){
year_vec[[count]] <- year
sub_sf_news <- subset(sf_news, Year == year)
article_total[[count]] <- round(sum(sub_sf_news[sub_sf_news$ArticleType == "Total",]$NumArticles))
article_terror[[count]] <- round(sum(sub_sf_news[sub_sf_news$ArticleType == "Terror Article",]$NumArticles))
count <- count+1
}
sf_news_year <- data.frame(
Year=year_vec,
Total=article_total,
'Terror Article'=article_terror
)
sf_prop_year <- data.frame(
Year=year_vec,
Proportion=article_terror/article_total
)
sf_news_year <- melt(data = sf_news_year, id.vars="Year", variable_name = "ArticleType")
numarticle_year <- ggplot(sf_news_year, aes(x=Year, y=value, colour=ArticleType)) +
#geom_point() +
geom_line() +
theme_bw() +
scale_color_discrete(name="Article Type") +
labs(x="Year", y="# of Articles", title="New York Times Articles Mentioning Terrorism by Year")
proparticle_year_rlm <- ggplot(sf_prop_year, aes(x=Year, y=Proportion)) +
#geom_point() +
geom_line() +
theme_bw() +
labs(x="Year", y="Ratio of Terrorism Articles to Total Articles") +
geom_smooth(method="rlm", alpha=0.0) +
geom_smooth(method="rlm", data=sfsubset, aes(x=Year, y=Proportion)) +
geom_text(x = 1975, y = 0.050, label = rlm_eqn(sf_prop_year), parse = TRUE) +
geom_text(x = 1975, y = 0.030, label = rlm_eqn(sfsubset), parse = TRUE)
ggarrange(numarticle_year, proparticle_year_rlm, ncol=1)
# ---- Terror Articles vs Attacks & Deaths
# Clean bad data
sf <- sf[sf$imonth != "0", ]
# graoh the number of deads in the last 45 years i neach of the countries
sub_countries <- sf$country_txt
sub_years <- sf$iyear
sub_months <- sf$imonth
sub_deaths <- sf$nkill
sub_attacktype <- sf$attacktype1_txt
# Casting of variables
sub_countries <- as.character(sub_countries)
sub_deaths <- as.numeric(as.character(unlist(sub_deaths)))
sub_years <- as.numeric(as.character(unlist(sub_years)))
sub_months <- as.character(unlist(sub_months))
sub_attacktype <- as.character(unlist(sub_attacktype))
sub_months[as.numeric(sub_months) < 10] <- paste("0", sub_months[as.numeric(sub_months) < 10], sep="")
# Create Date columns:
sub_date <- paste("01", sub_months, paste0(sub_years," 00:00:00"), sep="/")
sub_date <- as.Date(as.character(as.POSIXct(strptime(sub_date, format="%d/%m/%Y %H:%M:%S"))))
# Create reduced data.frame
reduced_sf <- data.frame(sub_years, sub_date, sub_countries, sub_deaths, sub_attacktype)
reduced_sf <- reduced_sf[reduced_sf$sub_countries == "United States", ]
reduced_sf <- na.omit(reduced_sf)
count <- 1
year_vec <- vector(mode="numeric")
death_vec <- vector(mode="numeric")
attack_vec <- vector(mode="numeric")
date_vec <- list()#vector(mode="numeric")
for(date in unique(sub_date)){
#date_vec[[count]] <- as.Date(as.character(as.POSIXct(strptime(date, format="%d/%m/%Y %H:%M:%S"))))
death_set <- subset(reduced_sf, sub_date == date)
death_vec[[count]] <- round(sum(death_set$sub_deaths))
attack_vec[[count]] <- nrow(death_set)
count <- count+1
}
us_sf <- data.frame(
Date=unique(sub_date),#unlist(date_vec),
Deaths=death_vec,
Attacks=attack_vec
#TerrorArticles=sf_news[sf_news$ArticleType=="Terror Article", ]$NumArticles[1:45]
)
us_sf_melted <- melt(data = us_sf, id.vars = "Year")
us_plot <- ggplot(us_sf_melted, aes(x=Year,y=value, colour=variable)) +
geom_line() +
theme_bw() +
scale_colour_discrete(name="") +
theme(legend.position = "left") +
labs(x="", y="Number of", title="United States Terror & News Statistics")
us_plot_zoomed <- ggplot(us_sf_melted, aes(x=Year,y=value, colour=variable)) +
geom_line() +
theme_bw() +
scale_y_continuous(limits=c(0,180))+
scale_colour_discrete(name="") +
geom_smooth(method='lm') +
theme(legend.position = "left") +
labs(x="Year", y="Number of", title="Reduced Y-axis range")
ggarrange(us_plot, us_plot_zoomed, proparticle_year_rlm, ncol=1)
#geom_line(data=us_sf, aes(x=Year, y=Deaths)) +
#geom_line(data=us_sf, aes(x=Year, y=Attacks)) +
#geom_line(data=sf_prop, aes(x=Year, y=Proportion))
r2 <- function(x){
SSe <- sum((x$resid)^2);
observed <- x$resid+x$fitted;
SSt <- sum((observed-mean(observed))^2);
value <- 1-SSe/SSt;
return(value);
}
|
d2cb98758e0d28871ca459df900cea8a3e860dbb | 5702e55d934ebabd9d61c01bc5b496ea17dc4435 | /man/specificity.Rd | d5150c1cc39228a6ef9a35bcc7532e77cb2bac11 | [] | no_license | selva86/woe | 438fbb7bc151360c97d5a8f300d79a8c02720c75 | 38481226136ff360b1e605dce9d34727d1dbed31 | refs/heads/master | 2021-01-15T10:47:39.246174 | 2015-07-15T09:24:14 | 2015-07-15T09:24:14 | 39,127,020 | 3 | 4 | null | null | null | null | UTF-8 | R | false | false | 1,812 | rd | specificity.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Main.R
\name{specificity}
\alias{specificity}
\title{specificity}
\usage{
specificity(actuals, predictedScores, threshold = 0.5)
}
\arguments{
\item{actuals}{The actual binary flags for the response variable. It can take a numeric vector containing values of either 1 or 0, where 1 represents the 'Good' or 'Events' while 0 represents 'Bad' or 'Non-Events'.}
\item{predictedScores}{The prediction probability scores for each observation. If your classification model gives the 1/0 predcitions, convert it to a numeric vector of 1's and 0's.}
\item{threshold}{If predicted value is above the threshold, it will be considered as an event (1), else it will be a non-event (0). Defaults to 0.5.}
}
\value{
The specificity of the given binary response actuals and predicted probability scores, which is, the number of observations without the event AND predicted to not have the event divided by the nummber of observations without the event.
}
\description{
Calculate the specificity for a given logit model.
}
\details{
For a given given binary response actuals and predicted probability scores, specificity is defined as number of observations without the event AND predicted to not have the event divided by the number of observations without the event. Specificity is particularly useful when you are extra careful not to predict a non event as an event, like in spam detection where you dont want to classify a genuine mail as spam(event) where it may be somewhat ok to occasionally classify a spam as a genuine mail(a non-event).
}
\examples{
data('ActualsAndScores')
specificity(actuals=ActualsAndScores$Actuals, predictedScores=ActualsAndScores$PredictedScores)
}
\author{
Selva Prabhakaran \email{selva86@gmail.com}
}
|
f4433433593c78ea822f1940a053c10aab6f7d2e | a97fcd5dd4476719add56cf3d2b5abda7f5551bc | /man/configShow.Rd | f55117b9252070e5f2d9fbc5e223dcdee66b9a2d | [] | no_license | cran/phyloTop | e80ceeb2370d8d2da8580faa8f15633fdeb98480 | 38204136f9ed299a02236a53f0bf025a4caa9a44 | refs/heads/master | 2023-02-07T07:22:57.555086 | 2023-01-24T14:20:02 | 2023-01-24T14:20:02 | 17,719,233 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,015 | rd | configShow.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/configShow.R
\name{configShow}
\alias{configShow}
\title{Plot a tree highlighting configurations}
\usage{
configShow(tree, configSize, mainCol = "black", configCol = "red", ...)
}
\arguments{
\item{tree}{a tree of class \code{phylo4}}
\item{configSize}{an integer giving the configuration size of interest}
\item{mainCol}{colour for branches which are not in configurations of the chosen size (default is black)}
\item{configCol}{colour for branches which are in such configurations (default is red)}
\item{...}{further arguments to be passed to plot.phylo}
}
\value{
A plot of the tree, highlighting the configurations of the given size.
}
\description{
Plot a tree, highlighting configurations of a given size.
}
\examples{
## Highlight pitchforks in a random tree with 20 tips:
configShow(rtree(20),3, edge.width=2)
}
\author{
Michelle Kendall \email{michelle.louise.kendall@gmail.com}
Michael Boyd \email{mboyd855@gmail.com}
}
|
399228d087e2ebf63801d9b4377c8afef7e90d1e | 2bf11ad7ba3acccdac7f5e7d2dedcbe7ca7029b8 | /Recursion_in_R.r | cfe0cf9319f0860da761043237608462284b4abb | [] | no_license | SainiManisha/r-tutorial | c6ba612ceaf6e326a660c279d645b82b6090ff32 | f83e8578a34f5b643bae6af387332fd93c208a77 | refs/heads/main | 2023-03-24T08:45:23.120828 | 2021-03-22T08:21:03 | 2021-03-22T08:21:03 | 335,750,797 | 2 | 3 | null | null | null | null | UTF-8 | R | false | false | 144 | r | Recursion_in_R.r | recur_factorial <- function(N)
{
if (N == 0)
return(1)
else{
return( N * recur_factorial (N-1))
}
}
# calling a fuction in R
recur_factorial(5)
|
f27a9a11b78910f5c4c769489b5962475dc7c717 | 1e24762ee424a22667d890fbb6a42ac2fad38d4f | /bioconductor_docker/install.R | bc9ea9c2b06c977fa46821fe729af727187aa3eb | [
"Artistic-2.0"
] | permissive | ahalfpen727/Docker-Resources | ffad1005718ca58173dd20ba2efea36d3e4251f0 | f57f8801e45cea00155fcc195fb60b625ef4ed71 | refs/heads/master | 2023-07-09T15:01:00.559909 | 2021-08-16T20:05:42 | 2021-08-16T20:05:42 | 298,450,065 | 1 | 0 | null | 2021-05-19T09:47:14 | 2020-09-25T02:45:21 | Jupyter Notebook | UTF-8 | R | false | false | 162 | r | install.R | install.packages("BiocManager", repos="https://cran.rstudio.com")
BiocManager::install(version="3.12", update=TRUE, ask=FALSE)
BiocManager::install('devtools')
|
210cf7c0a2f7abb4fddf4a86901ab5f491695fbb | 76b152ff172124168f80893fd8a267bd7e72315e | /man/nonLinearNoiseReduction.Rd | 8dc839c6d93d98bbd3c2a71aa9382c1d89d299a1 | [] | no_license | constantino-garcia/nonlinearTseries | c303b79504f357b4e250f86edf2113798e01eefc | 1e311c272176cda17d27a7a293eb40e0542f2c65 | refs/heads/master | 2023-08-05T04:40:22.257391 | 2023-07-31T13:02:30 | 2023-07-31T13:02:30 | 101,268,975 | 31 | 19 | null | 2020-06-12T07:27:52 | 2017-08-24T07:53:46 | C++ | UTF-8 | R | false | true | 1,098 | rd | nonLinearNoiseReduction.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nonLinearNoiseReduction.R
\name{nonLinearNoiseReduction}
\alias{nonLinearNoiseReduction}
\title{Nonlinear noise reduction}
\usage{
nonLinearNoiseReduction(time.series, embedding.dim, radius)
}
\arguments{
\item{time.series}{The original time series to denoise.}
\item{embedding.dim}{Integer denoting the dimension in which we shall embed
the \emph{time.series}.}
\item{radius}{The radius used to looking for neighbours in the phase space
(see details).}
}
\value{
A vector containing the denoised time series.
}
\description{
Function for denoising a given time series using nonlinear analysis
techniques.
}
\details{
This function takes a given time series and denoises it. The denoising
is achieved by averaging each Takens' vector in an m-dimensional space
with his neighbours (time lag=1). Each neighbourhood is specified with balls
of a given radius
(max norm is used).
}
\references{
H. Kantz and T. Schreiber: Nonlinear Time series Analysis
(Cambridge university press)
}
\author{
Constantino A. Garcia
}
|
55282ced61f7a3c9aae28b015736e50b88283a3a | 17f54c156da4481ca94defe43c6017c87fc72045 | /code/textAnalytics.R | 61a3cb2d72926e30a2026c447869d56314f27bb0 | [] | no_license | dk27/dk-website | 86b58b3361ad20576b6ac3571680b7c560e78a00 | 159a5c0abba586cd1f4409e09c949622f03eb7f1 | refs/heads/master | 2021-07-10T12:00:08.618333 | 2021-02-23T04:05:49 | 2021-02-23T04:05:49 | 231,258,766 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,980 | r | textAnalytics.R | install.packages(c("tm", "wordlcoud", "ggplot2", "rvest", "udpipe", "lattice", "igraph", "ggraph"))
library(tm)
library(wordcloud)
library(RColorBrewer)
library(ggplot2)
library(rvest)
library(stringr)
library(udpipe)
library(lattice)
library(igraph)
library(ggraph)
# webscrape the following page
text<-read_html("https://www.rev.com/blog/transcripts/donald-trump-joe-biden-final-presidential-debate-transcript-2020")
text1 <- text %>%
html_nodes("p") %>%
html_text()
# create three speeches
kristen <- ''
joe <- ''
trump <- ''
for (i in text1)
{
if(substr(i,1,5)=='Krist') {
kristen <- c(kristen,paste(str_sub(i, start= gregexpr('\n', i)[[1]][1]+1)))
}
if(substr(i,1,5)=='Joe B') {
joe <- c(joe,paste(str_sub(i, start= gregexpr('\n', i)[[1]][1]+1)))
}
if(substr(i,1,5)=='Donal') {
trump <- c(trump,paste(str_sub(i, start= gregexpr('\n', i)[[1]][1]+1)))
}
}
length(joe)
length(trump)
### Biden
# create a corpus
c <- VCorpus(VectorSource(joe))
# do some basic data cleaning
txt <- tm_map(c, removeNumbers)
txt <- tm_map(txt, removePunctuation)
txt <- tm_map(txt, stripWhitespace)
txt <- tm_map(txt, content_transformer(tolower))
txt <- tm_map(txt, removeWords, stopwords("english"))
txt <- tm_map(txt, removeWords, c("’re"))
# document term matrix
tdm<-TermDocumentMatrix(txt)
m <- as.matrix(tdm)
words <- sort(rowSums(m), decreasing=TRUE)
words <- words[c(-1,-24, -57, -109, -10)] # remove 're, 'll, 've, don't and didn't as they don't add any value
df <- data.frame(word=names(words), freq=words)
head(df)
#### Trump
# create a corpus
c1 <- VCorpus(VectorSource(trump))
# do some basic data cleaning
#txt1 <- tm_map(c1, removeNumbers)
txt1 <- tm_map(txt1, removePunctuation)
txt1 <- tm_map(txt1, stripWhitespace)
txt1 <- tm_map(txt1, content_transformer(tolower))
txt1 <- tm_map(txt1, removeWords, stopwords("english"))
txt1 <- tm_map(txt1, removeWords, c("’re"))
# document term matrix
tdm1<-TermDocumentMatrix(txt1)
m1 <- as.matrix(tdm1)
words1 <- sort(rowSums(m1), decreasing=TRUE)
words1 <- words1[c(-1,-5, -12, -16, -40)] # remove 're, 'll, 've, don't and didn't
df1 <- data.frame(word=names(words1), freq=words1)
head(df1)
par(mfrow=c(1,2))
barplot(df[1:15,]$freq, # specify y values
names.arg=df[1:15,]$word, # specify x values
las = 2,#rotate x-labels
col="#10a4d4",
ylab= "Word Frequencies",
main = "15 Most Frequent Words Biden")
barplot(df1[1:15,]$freq, # specify y values
names.arg=df1[1:15,]$word, # specify x values
las = 2,#rotate x-labels
col="coral2",ylab= "Word Frequencies",
main = "15 Most Frequent Words Trump")
par(mfrow=c(1,1))
# generate a word cloud Biden
set.seed(3)
wordcloud(words=df$word,
freq = df$freq,
min.freq = 5,
random.order=FALSE,
colors=brewer.pal(8,'Set1'))
# generate a word cloud Trump
set.seed(3)
wordcloud(words=df1$word,
freq = df1$freq,
min.freq = 5,
random.order=FALSE,
colors=brewer.pal(8,'Set1'))
#### word associations Joe Biden
ud_model <- udpipe_download_model(language="english")
ud_model <- udpipe_load_model(ud_model$file_model)
x <- udpipe_annotate(ud_model, x=joe)
x <- as.data.frame(x)
x$token<-tolower(x$token)
str(x)
# most are verbs followed by nouns
table(x$upos)
# most occurring verbs
verbs <- subset(x, upos %in% c("VERB"))
verbs <- txt_freq(tolower(verbs$token))
verbs$key <- factor(verbs$key, levels=rev(verbs$key))
verbs <- subset(verbs, !key %in% c("’s", "is", "’re") )
# can't say this is very informative
barchart(key~freq,
data=head(verbs, 20),
col="#10a4d4",
main = "Most Occurring Verbs in Biden's Speech",
xlab = "Freq")
# most occurring nouns
nouns <- subset(x, upos %in% c("NOUN"))
nouns <- txt_freq(nouns$token)
nouns$key <- factor(nouns$key, levels=rev(nouns$key))
barchart(key~freq,
data=head(nouns, 20),
col="#10a4d4",
main = "Most Occurring Nouns in Biden's Speech",
xlab = "Freq")
# most occurring adjectives
adj <- subset(x, upos %in% c("ADJ"))
adj <- txt_freq(adj$token)
adj$key <- factor(adj$key, levels=rev(adj$key))
barchart(key~freq,
data=head(adj, 20),
col="#10a4d4",
main = "Most Occurring Adjectives in Biden's Speech",
xlab = "Freq")
# finding keywords
#RAKE
kw <- keywords_rake(x=x,
term="lemma",
group = c("doc_id", "paragraph_id", "sentence_id"),
relevant = x$upos %in% c("NOUN", "ADJ", "VERB" ))
kw$key <- factor(kw$keyword, levels = rev(kw$keyword))
barchart(key ~ rake,
data = head(subset(kw, freq>3), 15),
col="#10a4d4",
main="Key Words")
# visualize as a network
kw1 <- keywords_collocation(x = x,
term = "token",
group = c("doc_id", "paragraph_id", "sentence_id"),
ngram_max = 4)
kw1 <- cooccurrence(x$lemma,
relevant = x$upos %in% c("NOUN", "ADJ", "VERB") ,
skipgram=1)
head(kw1)
wordnetwork <- head(kw1, 30)
wordnetwork <- graph_from_data_frame(wordnetwork)
ggraph(wordnetwork, layout = "fr") +
geom_edge_link(aes(width = cooc, edge_alpha = cooc), edge_colour = "mediumorchid1") +
geom_node_text(aes(label = name), col = "#10a4d4", size = 4) +
theme_graph(base_family = "Arial Narrow") +
theme(legend.position = "none") +
labs(title = "Words following one another in a sentence", subtitle = "Nouns, Adjectives & Verbs")
##### Trump
x <- udpipe_annotate(ud_model, x=trump)
x <- as.data.frame(x)
x$token <- tolower(x$token)
str(x)
# most are verbs followed by nouns
table(x$upos)
# most occurring verbs
verbs <- subset(x, upos %in% c("VERB"))
verbs <- txt_freq(verbs$token)
verbs$key <- factor(verbs$key, levels=rev(verbs$key))
verbs <- subset(verbs, !key %in% c("’s", "is", "’re") )
# also not very informative
barchart(key~freq,
data=head(verbs, 20),
col="coral2",
main = "Most Occurring Verbs in Trump's Speech",
xlab = "Freq")
# most occurring nouns
nouns <- subset(x, upos %in% c("NOUN"))
nouns <- txt_freq(nouns$token)
nouns$key <- factor(nouns$key, levels=rev(nouns$key))
barchart(key~freq,
data=head(nouns, 20),
col="coral2",
main = "Most Occurring Nouns in Trump's Speech",
xlab = "Freq")
# most occurring adjectives
adj <- subset(x, upos %in% c("ADJ"))
adj <- txt_freq(adj$token)
adj$key <- factor(adj$key, levels=rev(adj$key))
barchart(key~freq,
data=head(adj, 20),
col="coral2",
main = "Most Occurring Adjectives in Trump's Speech",
xlab = "Freq")
# finding keywords
#RAKE
kw <- keywords_rake(x=x,
term="lemma",
group = c("doc_id", "paragraph_id", "sentence_id"),
relevant = x$upos %in% c("NOUN", "ADJ", "VERB" ))
kw$key <- factor(kw$keyword, levels = rev(kw$keyword))
barchart(key ~ rake,
data = head(subset(kw, freq>3), 15),
col='coral2',
main="Key Words")
# visualize as a network
kw1 <- keywords_collocation(x = x,
term = "token",
group = c("doc_id", "paragraph_id", "sentence_id"),
ngram_max = 4)
kw1 <- cooccurrence(x$lemma,
relevant = x$upos %in% c("NOUN", "ADJ", "VERB") ,
skipgram=1)
head(kw1)
wordnetwork <- head(kw1, 30)
wordnetwork <- graph_from_data_frame(wordnetwork)
ggraph(wordnetwork, layout = "fr") +
geom_edge_link(aes(width = cooc, edge_alpha = cooc), edge_colour = "mediumorchid1") +
geom_node_text(aes(label = name), col = "#10a4d4", size = 4) +
theme_graph(base_family = "Arial Narrow") +
theme(legend.position = "none") +
labs(title = "Words following one another in a sentence (Trump)", subtitle = "Nouns, Adjectives & Verbs")
|
cd849cc8f803bd592fe59408743d137a88837eff | 59247616dbc503b0efb48b2725514e1310a47ada | /cachematrix.R | d09edf802933f0d51279203155829fefbae76cc1 | [] | no_license | mbemowski/ProgrammingAssignment2 | fc031e84398c5d6edd47ec54d663f69895c594ad | 93f94206f6c00f5ab9800d43ff4db1c4eba3fa7e | refs/heads/master | 2021-01-16T20:52:01.843966 | 2015-02-22T07:02:15 | 2015-02-22T07:02:15 | 31,138,693 | 0 | 0 | null | 2015-02-21T19:57:41 | 2015-02-21T19:57:40 | null | UTF-8 | R | false | false | 1,168 | r | cachematrix.R | ## A pair of functions that allows to create special matrix object that can
## cache its inverse and calculate the inverse or get the cached value.
## Creates a matrix object that can cache its inverse.
## Argument x is a matrix which will be represented by this object.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Returns an inverse of a matrix object created by the makeCacheMatrix
## function. Argument x is the matrix object to be inverted.
## If the inverse of the matrix has already been calculated then
## the inverted matrix is obtained from the cache. Otherwise it is
## calculated and stored in the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(is.null(inv)) {
message("calculating matrix inverse")
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
}else{
message("getting cached data")
}
inv
}
|
ec345cd0ceab36e6fa8798b04e73a4839f9d563b | a757ed0562e0bbe12fc142cb5f44bd84a0e1ee98 | /R/autoplotECRResult.R | 2ffd787af9ee3c49cb058edccd85977e924fb6bc | [] | no_license | kerschke/ecr | 5cc7d7bec3592fdbf80d1991a281cccddcc2e88c | eb4902e95acbb12d24b6701edaa81256c6efda31 | refs/heads/master | 2021-01-18T21:01:23.212562 | 2015-09-23T17:23:14 | 2015-09-23T17:23:14 | 43,227,729 | 0 | 1 | null | 2015-09-27T00:01:46 | 2015-09-27T00:01:45 | null | UTF-8 | R | false | false | 4,339 | r | autoplotECRResult.R | #' @title
#' Plot optimization trace.
#'
#' @description
#' Call this function on the result object of an \code{\link{doTheEvolution}} function
#' call to visualize the optimization trace.
#'
#' @param object [\code{ecr_result}]\cr
#' ecr result object.
#' @param xlim [\code{numeric(2)} | NULL]\cr
#' Lower and upper bound for generation. If \code{NULL}, this is set automatically.
#' @param ylim [\code{numeric(2)} | NULL]\cr
#' Lower and upper bound for fitness. If \code{NULL}, this is set automatically.
#' @param show.process [\code{logical(1)}]\cr
#' Should the function itself with the population be plotted as well? Default is
#' \code{FALSE}.
#' @param log.fitness [\code{logical(1)}]\cr
#' Log-transform fitness values? Default is \code{FALSE}.
#' @param complete.trace [\code{logical(1)}]\cr
#' Direct show the plot with the fitness for all generations. Default is \code{FALSE}.
#' @param ... [any]\cr
#' Not used.
#' @return [\code{invisible(TRUE)}]
#' @export
autoplot.ecr_single_objective_result = function(object, xlim = NULL, ylim = NULL, show.process = FALSE
, log.fitness = FALSE, complete.trace = FALSE, ...) {
assertFlag(show.process, na.ok = FALSE)
assertFlag(complete.trace, na.ok = FALSE)
obj.fun = object$task$fitness.fun
n.params = getNumberOfParameters(obj.fun)
op = as.data.frame(object$opt.path)
# we start with the second dob, since otherwise there is not enough info to plot
unique.dobs = unique(op$dob)[-1]
if (complete.trace) {
unique.dobs = tail(unique.dobs, 1)
}
for (dob in unique.dobs) {
pl.trace = plotTrace(op[which(op$dob <= dob), ], xlim, ylim, log.fitness, ...)
pl.trace = pl.trace + ggtitle(sprintf("Optimization trace for function '%s'", getName(obj.fun)))
if (show.process) {
if (n.params > 2L || isMultiobjective(obj.fun)) {
stopf("Visualization not possible for multi-objective functions or functions with greater than 2 parameters.")
}
if (!length(object$control$save.population.at)) {
stopf("Cannot visualize population since no population was stored! Take a glance a the 'save.population.at' control parameter.")
}
pl.fun = autoplot(obj.fun)
population = object$population.storage[[paste0("gen.", dob)]]
if (n.params == 2L) {
df.points = as.data.frame(do.call(rbind, population$individuals))
colnames(df.points) = paste("x", 1:n.params, sep = "")
df.points$y = as.numeric(population$fitness)
pl.fun = pl.fun + geom_point(data = df.points, aes_string(x = "x1", y = "x2"), colour = "tomato")
} else {
fitness = as.numeric(population$fitness)
df.points = data.frame(x = do.call(c, population$individuals), y = fitness)
pl.fun = pl.fun + geom_point(data = df.points, aes_string(x = "x", y = "y"), colour = "tomato")
pl.fun = pl.fun + geom_hline(yintercept = min(fitness), linetype = "dashed", colour = "gray")
}
#FIXME: this seems to fail!
BBmisc::requirePackages(c("grid", "gridExtra"), why = "ecr")
#FIXME: next line returns errors in 'test_autoplot.R'
pl = do.call(gridExtra::arrangeGrob, list(pl.fun, pl.trace, ncol = 1))
#pl = pl.trace
} else {
pl = pl.trace
}
print(pl)
if (dob != tail(unique.dobs, 1))
pause()
}
return(invisible(TRUE))
}
# autoplot function for opt.path used by ecr
plotTrace = function(df, xlim, ylim, log.fitness, ...) {
ggdf = df[c("dob", "pop.min.fitness", "pop.mean.fitness", "pop.median.fitness", "pop.max.fitness")]
xlim = BBmisc::coalesce(xlim, c(0, max(ggdf$dob)))
ylim = BBmisc::coalesce(ylim, c(min(ggdf$pop.min.fitness), max(ggdf$pop.max.fitness)))
assertNumeric(ylim, len = 2L, any.missing = FALSE)
assertNumeric(xlim, len = 2L, any.missing = FALSE)
assertFlag(log.fitness)
requirePackages("reshape2", why = "ecr")
ggdf = melt(ggdf, c("dob"))
ggdf$variable = as.factor(ggdf$variable)
pl = ggplot(data = ggdf, mapping = aes_string(x = "dob", y = "value", linetype = "variable"))
pl = pl + geom_line()
pl = pl + xlab("Generation") + ylab("Fitness")
pl = pl + xlim(xlim) + ylim(ylim)
pl = pl + scale_linetype_discrete(name = "Type")
if (log.fitness) {
pl = pl + scale_y_log10()
pl = pl + ylab("log(Fitness)")
}
return(pl)
}
|
f4eb0d4f759fcd3b1962431f95a74660c006225b | 081c62f36f7703d7987218c1c22931e083198e73 | /CML/tales/fig3pzrz.R | a00c9efe9f89849d3b8046716e40befc408c9ef2 | [] | no_license | radivot/myelo | be7ed23a6d1772e55310ced91270aa1d09da6735 | 2498bed404c98f096fcda4075c34a2881265e24b | refs/heads/master | 2022-12-15T00:11:22.751773 | 2022-12-04T14:24:36 | 2022-12-04T14:24:36 | 6,070,078 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,863 | r | fig3pzrz.R | library(phaseR)
hahnYZp<-function(Time, State, Pars) {
with(as.list(c(Time, State, Pars)),{
dy = py*y*(1-y/Ky) - m*y*z
dz = rz - a*z + pz*y*z/(Kz^2+y^2)
list(c(dy,dz))
})
}
(pars=c(pz=4.1e3,Kz=1e3, py = 0.25, Ky = 1e+06, a = 2, rz = 200, m = 1e-04))
equi=findEquilibrium(hahnYZp,parameters=pars, state.names=c("y","z"),y0 = c(200, 2e3))
# (y0=matrix(c(1, pars["rz"]/pars["a"],equi$ystar[1], 0),ncol=2,byrow=T))
(y0=matrix(c(1, pars["rz"]/pars["a"]),ncol=2,byrow=T))
hahnYZ_flowField <- flowField(hahnYZp,
parameters=pars,
state.names=c("y","z"),
xlim = c(0, 5e3),
ylim = c(0, 1e4),
add = FALSE)
grid()
hahnYZ_nullclines <- nullclines(hahnYZp,add.legend=FALSE,
parameters=pars,
state.names=c("y","z"),
xlim = c(0, 5e3),
ylim = c(0, 1e4))
hahnYZ_trajectory <- trajectory(hahnYZp,
parameters=pars,
state.names=c("y","z"),
y0 = y0,
tlim = c(0, 100))
title("4.1")
dev.copy2pdf(file="~/Results/twoCities/pzrz4_1.pdf",width=4,height=3.5)
##################
(pars=c(pz=4.1e3,Kz=1e3, py = 0.25, Ky = 1e+06, a = 2, rz = 400, m = 1e-04))
equi=findEquilibrium(hahnYZp,parameters=pars, state.names=c("y","z"),y0 = c(200, 2e3))
(y0=matrix(c(1, pars["rz"]/pars["a"],equi$ystar[1], 0),ncol=2,byrow=T))
hahnYZ_flowField <- flowField(hahnYZp,
parameters=pars,
state.names=c("y","z"),
xlim = c(0, 5e3),
ylim = c(0, 1e4),
add = FALSE)
grid()
hahnYZ_nullclines <- nullclines(hahnYZp,add.legend=FALSE,
parameters=pars,
state.names=c("y","z"),
xlim = c(0, 5e3),
ylim = c(0, 1e4))
hahnYZ_trajectory <- trajectory(hahnYZp,
parameters=pars,
state.names=c("y","z"),
y0 = y0,
tlim = c(0, 100))
title("4.1x2")
dev.copy2pdf(file="~/Results/twoCities/pzrz4_1x2.pdf",width=4,height=3.5)
(pars=c(pz=4e3,Kz=1e3, py = 0.25, Ky = 1e+06, a = 2, rz = 200, m = 1e-04))
equi=findEquilibrium(hahnYZp,parameters=pars, state.names=c("y","z"),y0 = c(200, 2e3))
# (y0=matrix(c(1, pars["rz"]/pars["a"],equi$ystar[1], 0),ncol=2,byrow=T))
(y0=matrix(c(1, pars["rz"]/pars["a"]),ncol=2,byrow=T))
hahnYZ_flowField <- flowField(hahnYZp,
parameters=pars,
state.names=c("y","z"),
xlim = c(0, 5e3),
ylim = c(0, 1e4),
add = FALSE)
grid()
hahnYZ_nullclines <- nullclines(hahnYZp,add.legend=FALSE,
parameters=pars,
state.names=c("y","z"),
xlim = c(0, 5e3),
ylim = c(0, 1e4))
hahnYZ_trajectory <- trajectory(hahnYZp,
parameters=pars,
state.names=c("y","z"),
y0 = y0,
tlim = c(0, 100))
title("4")
dev.copy2pdf(file="~/Results/twoCities/pzrz4.pdf",width=4,height=3.5)
##################
(pars=c(pz=4e3,Kz=1e3, py = 0.25, Ky = 1e+06, a = 2, rz = 400, m = 1e-04))
equi=findEquilibrium(hahnYZp,parameters=pars, state.names=c("y","z"),y0 = c(200, 2e3))
(y0=matrix(c(1, pars["rz"]/pars["a"],equi$ystar[1], 0),ncol=2,byrow=T))
hahnYZ_flowField <- flowField(hahnYZp,
parameters=pars,
state.names=c("y","z"),
xlim = c(0, 5e3),
ylim = c(0, 1e4),
add = FALSE)
grid()
hahnYZ_nullclines <- nullclines(hahnYZp,add.legend=FALSE,
parameters=pars,
state.names=c("y","z"),
xlim = c(0, 5e3),
ylim = c(0, 1e4))
hahnYZ_trajectory <- trajectory(hahnYZp,
parameters=pars,
state.names=c("y","z"),
y0 = y0,
tlim = c(0, 100))
title("4x2")
dev.copy2pdf(file="~/Results/twoCities/pzrz4x2.pdf",width=4,height=3.5)
|
d28a46ef562a397e274c852c90ab0faee291990e | c79f532b3393594c6be183db4344fa7430a4247d | /src/old-files/old_spray.R | 967b7cbc96639e4ea7f9ef4057e2466ef6f1cc07 | [] | no_license | kahultman/west-nile-virus | eaa6cd5ec968f9857ce67a8f7be18da9d7673b8d | d09be7ce7fa512e7b21c35121d7c0b9df03f3394 | refs/heads/master | 2020-06-12T18:43:36.063725 | 2017-06-01T15:21:02 | 2017-06-01T15:21:02 | 75,773,193 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,962 | r | old_spray.R | # Load Spray data
library(tidyverse)
spray <- tbl_df(read.csv("./data/spray.csv"))
spray$Date <- as.Date(spray$Date, "%Y-%m-%d")
spray$Date2 <- as.POSIXlt(spray$Date)
spray$Year <- spray$Date2$year+1900
spray$Week <- floor((spray$Date2$yday - spray$Date2$wday + 7) /7)
spray$Date2 <- NULL
save(spray, file = "./data/spray.RData")
# ## Combine spray data with training data
# #
# # Variables to add
# # Distance to last spray
# # Time since last spray
# # minSprayTD2 = min(time since last spray * distance to spray ^ 2)
#
# load("./data/trainset1.RData")
# trainset1 <- tbl_df(trainset1)
#
#
# trainset1
# spray
#
# distance_to_spray <- function(){
# ?dist()
# }
#
#
#
# spray <- filter(spray, Latitude < 42.1)
# spray$Year <- as.integer(spray$Year)
#
# ggplot(spray, aes(x=Longitude, y=Latitude, color=Date)) + geom_point()
#
# + facet_wrap(~Year)
#
# spray %>% group_by(Date) %>% summarise(Num = n()) %>% ggplot(.,aes(x=Date, y=Num)) + geom_point()
# ggplot(spray, aes(x=Date)) + geom
#
# spraysample <- filter(spray, Date<"2011-09-08")
# ggplot(spraysample, aes(x=Longitude, y=Latitude, color=Date)) + geom_point()
#
#
#
# ggplot(trainset1, aes(x=Date, y=NumMosquitos)) + geom_point()
#
# # Take samples from 2013 to see Spray effect
# unique(spray$Date)
# spraysample <- filter(spray, Date=="2013-07-17")
#
# ggplot(spraysample, aes(x=Longitude, y=Latitude)) + geom_point(aes(color=Date, alpha=0.5))
#
# trainset1sample <- filter(trainset1, Year==2013 & Date < "2013-08-07" )
#
# library(pdist)
#
# spraylocations <- select(spraysample, Latitude, Longitude)
# spraylocations <- as.matrix(spraylocations)
#
# trainset1locations <- select(trainset1sample, Latitude, Longitude)
# trainset1locations <- as.matrix(trainset1locations)
#
# spraytrain.dist <- pdist(spraylocations, trainset1locations)
#
# str(spraytrain.dist)
# spraytrain.dist <- as.matrix(spraytrain.dist)
# spraytrain.dist <- spraytrain.dist * spraytrain.dist
|
5f6c8dd21eb9392e108e0cdc51099f35e6a898b9 | b80d5cc2a076fd461f0478c5b265bc1d4ade5f75 | /man/lookup.Rd | 1be69cccb0a6a97821c0dde43bf10b4549f060e0 | [
"MIT"
] | permissive | minghao2016/DOPE | 0b3355db8220a1302e1c86cc92e8b175af2fc2c2 | 59b5c5c52cb27782225cd5dcffe098a85184a23a | refs/heads/master | 2023-06-02T15:44:07.358443 | 2021-06-18T18:05:58 | 2021-06-18T18:05:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,111 | rd | lookup.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lookup.R
\name{lookup}
\alias{lookup}
\title{Make a table with the class and category for a drug name}
\usage{
lookup(
drug_vec = NULL,
...,
searchClass = TRUE,
searchCategory = TRUE,
searchSynonym = TRUE
)
}
\arguments{
\item{drug_vec}{a vector of strings holding possible drug names}
\item{...}{multiple strings holding possible drug names}
\item{searchClass}{Should the substances listed in \code{...} be searched
for in column \code{class}? Defaults to TRUE.}
\item{searchCategory}{Should the substances listed in \code{...} be searched
for in column \code{category}? Defaults to TRUE.}
\item{searchSynonym}{Should the substances listed in \code{...} be searched
for in column \code{synonym}? Defaults to TRUE.}
}
\value{
A lookup table with category \code{data.frame} having four columns:
original search term, drug class, drug category, and drug street name.
}
\description{
This function provides a table with drug class and category
information all of the known drugs.
}
\examples{
lookup("zip", "shrooms")
}
|
9a59e32ad7cb15911d7c40073a5d58db949e2904 | 8c15573683b8849dcd8b9022fcc6db81ed9b9b3a | /r/metaprep.r | 37db413f16107cb277beb04d30a3d5ff274a3286 | [] | no_license | seananderson/ext-meta | 3b9c94a7d62b756b9a136258b3acd14b98ee3889 | d03a13364e164a33825954808d4ef83a95fbbc85 | refs/heads/master | 2021-01-02T09:34:45.390037 | 2015-03-11T16:55:19 | 2015-03-11T16:55:19 | 11,616,353 | 0 | 0 | null | 2014-10-22T04:13:35 | 2013-07-23T18:53:14 | R | UTF-8 | R | false | false | 8,980 | r | metaprep.r | ############################
# Prep data and functions for the meta-analysis
#
#
# Changelog
# 20121030 - added centered environmental variables to ext
############################
#library and data
library(metafor)
library(plyr)
library(lme4)
library(ggplot2)
#######
# helper functions
#######
#read in a set of formulate for calculating effect sizes and variances
source("../r/conversions.R")
#check the levels of a slice of a data set with a particular trait set
checkLevels<-function(adf, trait) levels(factor(adf[which(adf$Trait==trait),]$Trait.category))
####
#functions and class to take a trait and output a metafor object and subsetted data file for diagnosis
#####
traitMod<-function(adf, trait, nafilter=F, ...){
adf<-subset(adf, adf$Aggregate.Trait ==trait)
if(nafilter){
adf<-subset(adf, !is.na(adf$lnor))
adf<-subset(adf, !is.na(adf$vlnor))
}
adf$Trait.category<-factor(adf$Trait.category)
res<-rma(yi=lnor, vi=vlnor, data=adf, mods=~Trait.category+0, ...)
ret<-list(model=res, data=adf)
class(ret)<-"traitMeta"
ret
}
datacheck<-function(adf, trait, nafilter=F){
adf<-subset(adf, adf$Aggregate.Trait == trait)
if(nafilter){
adf<-subset(adf, !is.na(adf$lnor))
adf<-subset(adf, !is.na(adf$vlnor))
}
adf$Trait.category<-factor(adf$Trait.category)
print(levels(adf$Trait.category))
adf
}
levelcheck<-function(adf, trait){
adf<-subset(adf, adf$Aggregate.Trait == trait)
#adf<-adf[-which(is.na(adf$Trait.category)),]
# print(levels(factor(adf$Trait.category)))
print(ddply(adf, .(Trait.category), function(x) nrow(x)))
}
print.traitMeta<-function(obj) print(obj$model)
summary.traitMeta<-function(obj) summary(obj$model)
coef.traitMeta<-function(obj) coef(obj$model)
plot.traitMeta<-function(obj) plot(obj$model)
#ggplotting
#plotting effect sizes using ggplot2
ggplotMetaforCoefs<-function(obj){
oCoefs<-coef(obj)
oCoefs$categories<-gsub("Trait.category", "", rownames(oCoefs))
n<-colSums(obj$X, na.rm=T)
oCoefs$categories<-paste(oCoefs$categories, n, sep="\nn=")
ggplot(data=oCoefs)+geom_pointrange(aes(x=categories, y=estimate, ymin=ci.lb, ymax=ci.ub), size=1.5) +
geom_abline(aes(intercept=0, slope=0), size=2, linetype=2) +
xlab("")+
ylab("Extinction Selectivity\n(Log Odds Ratio)\n")
}
ggplotMetaforCoefs.traitMeta<-function(obj) ggplotMetaforCoefs(obj$model)
#rangeFit<-traitMod(bivalves, "Geographic Range")
#summary(rangeFit)
#################
# analysis and plotting for covariates
#################
#does expand grid over a range of values
fullcurve<-function(amodel, ...) {
adf<-as.data.frame(expand.grid(...))
apred<-predict(amodel, adf, se.fit =T, type="response")
return(cbind(adf, data.frame(fit=apred[[1]], se.fit=apred[[2]])))
}
getCols<-function(rmaObj) colnames(predict(rmaObj, addx=T)$X)
traitCols<-function(cols) 1:(which(cols=="acovariate")-1)
#make a predicted data frame over all possible levels for an RMA object with a covariate
makePredDF<-function(adf, rmaFit, points=100, time=FALSE, usetime=244.15, method="rma"){
m <- lnor ~ Trait.category * acovariate + 0
if(time){
m <- lnor ~ Trait.category * acovariate + meanDate + 0
}
covMax<-max(adf$acovariate, na.rm=T)
covMin<-min(adf$acovariate, na.rm=T)
ndf<-expand.grid(Trait.category=levels(adf$Trait.category),
acovariate=seq(covMin, covMax, abs(covMin-covMax)/points))
if(time){ndf<-cbind(ndf, meanDate=usetime)}
ndf$lnor<-1
nf<-model.frame(m, ndf)
mm<-model.matrix(m, nf)
if(method=="rma"){
#get predictions based on the new data frame
predDF<-predict(rmaFit, mm)
sink("/dev/null") #supress pringint in next statement
predDF<-cbind(ndf, as.data.frame(print(predDF)))
sink()
predDF<-within(predDF, {
se<-as.numeric(se)
pred<-as.numeric(pred)
ci.lb<-as.numeric(ci.lb)
ci.ub<-as.numeric(ci.ub)
cr.lb<-as.numeric(cr.lb)
cr.ub<-as.numeric(cr.ub)
})
}
#for lme4 - based on methods from http://glmm.wikidot.com/faq
#as there is no predict for lme4 on CRAN as of 9/2012
if(method=="lmer"){
predDF<-ndf
predDF$pred<-mm %*% fixef(rmaFit) #yes, I know it's not an rma model
pvar1 <- diag(mm %*% tcrossprod(vcov(rmaFit),mm))
tvar1 <- pvar1+VarCorr(rmaFit)$study.ID[1] ## must be adapted for more complex models
predDF<-within(predDF, {
se<-2*sqrt(pvar1)
pred<-as.numeric(pred)
ci.lb<-predDF$pred-2*sqrt(pvar1)
ci.ub<-predDF$pred+2*sqrt(pvar1)
cr.lb<-predDF$pred-2*sqrt(tvar1)
cr.ub<-predDF$pred+2*sqrt(tvar1)
})
}
predDF
}
covariateAnalysis<-function(adf, covariate, return="analysis", points=100, time=F, plot=T, usetime=244.15, method="rma", ci="fixed", ...){
adf$acovariate<-adf[[covariate]]
if(method=="rma"){
if(time){
ret<-rma(yi=lnor, vi=vlnor, data=adf, mods=~Trait.category*acovariate+meanDate +0, ...)
}else{
ret<-rma(yi=lnor, vi=vlnor, data=adf, mods=~Trait.category*acovariate +0, ...)
}
#make the output tables intelligible
rownames(ret$b)<-gsub("acovariate", covariate, rownames(ret$b))
}
if(method=="lmer"){
if(time){
ret<-lmer(lnor~Trait.category*acovariate+meanDate +0 + (1|study.ID), weights=1/vlnor, data=adf, ...)
}else{
ret<-lmer(lnor~Trait.category*acovariate +0 + (1|study.ID), weights=1/vlnor, data=adf, ...)
}
#make the output tables intelligible
names(attr(ret, "fixef"))<-gsub("acovariate", covariate, names(attr(ret, "fixef")))
}
##create new data for predict
predDF<-makePredDF(adf, ret, points=points, time=time, usetime=usetime, method=method)
retPlot<-ggplot(data=adf, aes( x=acovariate, y=lnor, ymin=lnor-1.96*sqrt(vlnor), ymax=lnor+1.96*sqrt(vlnor))) +
geom_pointrange(size=1.2) +
facet_wrap(~Trait.category) +
theme_bw(base_size=16) +
xlab(covariate)+
ylab("Extinction Selectivity\n(log odds ratio)\n") +
geom_hline(aes(y=0), lwd=1, lty=2)
if(ci=="fixed") {
retPlot<- retPlot + geom_ribbon(data=predDF, aes(x=acovariate, y=pred, ymin=ci.lb, ymax=ci.ub), fill="lightgrey", alpha=0.5) +
geom_line(data=predDF, aes(x=acovariate, y=pred, ymin=ci.lb, ymax=ci.ub), colour="blue", size=1)
}
if(ci=="random") {
retPlot<- retPlot + geom_ribbon(data=predDF, aes(x=acovariate, y=pred, ymin=cr.lb, ymax=cr.ub), fill="lightgrey", alpha=0.5) +
geom_line(data=predDF, aes(x=acovariate, y=pred, ymin=cr.lb, ymax=cr.ub), colour="blue", size=1)
}
if(plot){
print(retPlot)
}
if(return=="plot") ret<-retPlot
return(ret)
}
#covariateAnalysis(bivalvesRangeData, "Flood_basalt")
#analysis with covariates
covariateAnalysisOld<-function(adf, covariate, return="analysis"){
adf$acovariate<-adf[[covariate]]
ret<-rma(yi=lnor, vi=vlnor, data=adf, mods=~Trait.category*acovariate+0)
#weight=1/vlnor,
retPlot<-ggplot(data=adf, aes( x=acovariate, y=lnor, ymin=lnor-1.96*sqrt(vlnor), ymax=lnor+1.96*sqrt(vlnor))) +
geom_pointrange(size=1.2) +
facet_wrap(~Trait.category) +
stat_smooth(method="lm") +
theme_bw(base_size=16) +
xlab(covariate)
print(retPlot)
if(return=="plot") return(retPlot)
return(ret)
}
####################################
######data and cleaning
####################################
#read in the data
ext<-read.csv("../data/extinctionMetaClean.csv")
#clean out continuous studies
contExt<-ext[which(!is.na(ext$Trait.category)),]
#ext<-ext[-which(is.na(ext$Trait.category)),]
## add a few extra columns, and centered environmental predictors
ext$MultipleStages <- as.factor(with(ext, as.character(ext$Start.stage) == as.character(ext$End.stage)))
ext$Global.Regional <- as.factor(ext$Global.Regional)
centExt<-colwise(function(x) x-mean(x, na.rm=T))(ext[,60:94])
names(centExt)<-paste(names(centExt), ".cent", sep="")
ext <- cbind(ext, centExt)
#deal with repeat of Knoll data - 2007 study used same as 1996, but we harvested more from 1996
knoll2007IDX <- grep("Knoll et al. 2007", as.character(ext$In.text.Citation))
knoll1996IDX <- grep("Knoll et al. 1996", as.character(ext$In.text.Citation))
levels(ext$study.ID) <- gsub("Knoll 2007 PEP", "Knoll et al. 2007", levels(ext$study.ID))
ext$study.ID[knoll1996IDX] <- "Knoll et al. 2007"
ext$study.ID <- factor(ext$study.ID)
ext$In.text.Citation[knoll1996IDX] <- "Knoll et al. 2007"
ext$Year[knoll1996IDX] <- "2007"
ext <- ext[-knoll2007IDX,]
#Make colors for printing
library(RColorBrewer)
#based on answer from http://stackoverflow.com/questions/6181653/creating-a-more-continuous-color-palette-in-r-ggplot2-lattice-or-latticeextra
getBrewColors <- function(n, palette, range)
colorRampPalette(brewer.pal(n, palette))(diff(range))
#make colors for individual studies
len <- length(levels(ext$study.ID))
colorsForStudies <- getBrewColors(12, "Paired", c(0,len))
ext$color.ID = NA
for(i in 1:len){
study <- levels(ext$study.ID)[i]
ext$color.ID[which(as.character(ext$study.ID)==study)] <- colorsForStudies[i]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.