blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
327581dc4a9840fa476c0e7b7c02d881798ee5fd | 6f44fa8b69db185adad4fdd568141ab31aa9193f | /man/fitSubmodels.Rd | 4bec5422f6eedddeb897a105fd8aceadf2c03735 | [] | no_license | cran/semtree | c3df84fa4f712c299f18c68b4cd0f25731edd67f | b542f84cac056c4fa0c04f6c8475d6154edeac8b | refs/heads/master | 2022-06-03T00:39:18.868941 | 2022-05-13T19:20:02 | 2022-05-13T19:20:02 | 76,660,595 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 839 | rd | fitSubmodels.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitSubmodels.R
\name{fitSubmodels}
\alias{fitSubmodels}
\title{Fit multigroup model for evaluating a candidate split}
\usage{
fitSubmodels(
model,
subset1,
subset2,
control,
invariance = NULL,
return.models = FALSE
)
}
\arguments{
\item{model}{A model specification that is used as template for each of the two groups}
\item{subset1}{Dataset for the first group model}
\item{subset2}{Dataset for the second group model}
\item{control}{a \code{semtree.control} object}
\item{invariance}{fit models with invariant parameters if given. NULL otherwise (default).}
\item{return.models}{boolean. Return the fitted models
returns NA if fit fails}
}
\description{
Fit multigroup model for evaluating a candidate split
}
|
2745f8e644fd4e61b68d27492142c98d3717ce79 | 6830bb160593902fa71f49947d7aaa8aaf3c7f7a | /gdp-difference-teaching.R | 1ce760436e7a8e7b78ae7db24603eea276571ed3 | [] | no_license | egalion/rsnippets | 2a682c77176a845bf579bf24cd4cfd27f494c7ed | 3d9ec01823ba9826d8da423dc0a2195173b818f9 | refs/heads/master | 2016-09-15T16:52:04.409196 | 2016-05-23T16:56:19 | 2016-05-23T16:56:19 | 40,814,186 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,615 | r | gdp-difference-teaching.R | rm(list = ls())
library(tidyr)
library(ggplot2)
library(gridExtra)
library(scales)
growth <- as.data.frame(matrix(1, ncol = 3, nrow = 55))
head(growth)
growth$V1 <- seq(1:55)
attach(growth)
growth <- transform(V1, country1 = 1000*(1+0.1)^V1,
country2 = 10000*(1+0.05)^V1)
detach(growth)
colnames(growth) <- c("n_year", "country1", "country2")
head(growth)
comparison <- c(growth$country1 - growth$country2)
GDPdifference <- data.frame(seq(1:55), comparison)
colnames(GDPdifference) <- c("n_year", "GDPdiff")
head(GDPdifference)
df.growth <- gather(growth, key = "country", value = "GDP", 2:3)
str(df.growth)
# This option options(scipen = 3) is very important. Without it the
# graph will have on the axis 1e+05, etc.
options(scipen = 3)
# Note: for ggplot we can first load the library scales
# library(scales). Then add
# + scale_x_continuous(labels = comma)
# We also remove the axes with "axes = F" and add them
# later with axis(1, pos = 0) and axis(2, pos = 0)
# Note: if we want our axes to start at 0, we use the options
# xaxs = "i", yaxs = "i"
# Note: if we want to do it in ggplot, we add
# + scale_x_continuous(expand=c(0,0))
#
# par(mfrow = c(3,1), mar = c(3,3,1,1))
#
# with(GDPdifference, plot(GDPdiff ~ n_year, xlab = "",
# ylab = "Разлика в БВП, лв.",
# axes = F, xlim = c(0, 60), ylim = c(-30000, 40000),
# main = "Разлика в БВП \n на глава от населението \n между Страна 1 и Страна 2", type = "l", cex.main = 1.0))
# axis(1, pos=0)
# axis(2, pos=0)
# abline(v = 50, lty = 3, col = "darkblue")
#
# with(growth, plot(country1 ~ n_year, type = "l"))
# with(growth, lines(country2, col = "red"))
# abline(v = c(35,50), col = c("orange", "blue"), lty = 3)
#
# with(growth, plot(log(country1) ~ n_year, type = "l"))
# with(growth, lines(log(country2), col = "red"))
# abline(v = c(35,50), col = c("orange", "blue"), lty = 3)
plot1 <- ggplot(data = GDPdifference, aes(n_year, GDPdiff)) + geom_line() +
geom_vline(xintercept = c(35, 50), colour = c("orange", "blue"),
linetype = "longdash") +
scale_x_continuous(expand=c(0,0)) +
geom_abline(intercept=0, slope=0, colour = "grey") +
xlab("") + ylab("Разлика в БВП, лв.") +
ggtitle("Разлика в БВП на глава от населението \n на Страна 1 и Страна 2") +
theme_bw() +
theme(axis.text.y=element_text(angle=45, hjust=1, vjust=0.5))
plot2 <- ggplot(df.growth, aes(x = n_year, y = GDP, colour = country)) +
geom_line() +
geom_vline(xintercept = c(35, 50), colour = c("orange", "blue"),
linetype = "longdash") +
scale_x_continuous(expand=c(0,0)) +
xlab("") + ylab("БВП на глава от населението, лв.") +
ggtitle("Промяна в БВП на глава от населението, \n абсолютни стойности, лв.") +
scale_colour_manual("", labels = c("Страна 1", "Страна 2"),
values = c("darkgreen", "red")) +
theme_bw() + theme(legend.position = c(0.12, 0.91),
axis.text.y=element_text(angle=45, hjust=1, vjust=0.5))
plot3 <- ggplot(df.growth, aes(x = n_year, y = GDP, colour = country)) +
geom_line() +
geom_vline(xintercept = c(35, 50), colour = c("orange", "blue"),
linetype = "longdash") +
scale_x_continuous(expand=c(0,0)) +
xlab("Година") + ylab("БВП на глава от населението, логаритмична скала") +
ggtitle("Промяна в БВП на глава от населението, \n логаритмична скала на БВП") +
scale_colour_manual("", labels = c("Страна 1", "Страна 2"),
values = c("darkgreen", "red")) +
theme_bw() + theme(legend.position = c(0.12, 0.91),
axis.text.y=element_text(angle=45, hjust=0, vjust=0.5)) +
scale_y_continuous(trans=log2_trans() #, # or coord_trans(y="log2")
# breaks = trans_breaks("log2", function(x) 2^x),
# labels = trans_format("log2", math_format(2^.x))
)
grid.arrange(plot1, plot2, plot3, ncol = 1)
# matplot is a useful function to plot data in wide format
# matplot(x = growth[ ,1], y = growth[ , 2:3], type = "l", col = c("red", "blue"), lty = 1)
# abline(v = 50, col = "darkgreen", lty = 3, lwd = 2)
# legend(x = 1, y = 190000, legend = c("Страна 1", "Страна 2"),
# col = c("red", "blue"), fill = c("red", "blue"), cex = 0.8)
#
|
8a45809a58d2d665a58f3a8a8259db7aa2324b4f | 1f8d064f98fce1cb7a1e39a6223aec20dbeed5d3 | /man/ematrix.nhm.Rd | 8f5e9f5ba5f4a406c691894989ad4b16b0c67baa | [] | no_license | cran/nhm | c2234b2c9c698fa021ea31af9238c34ba11aaab3 | d77273020ba895b337f3d304886e72a7d8aff09e | refs/heads/master | 2020-12-22T01:37:56.389788 | 2019-10-11T08:10:05 | 2019-10-11T08:10:05 | 236,631,983 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,264 | rd | ematrix.nhm.Rd | \name{ematrix.nhm}
\alias{ematrix.nhm}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Compute the misclassification probability matrix from a fitted nhm model
}
\description{
Outputs the matrix of misclasification probabilities in a misclassification type hidden Markov multi-state model fitted using \code{\link{nhm}}.
}
\usage{
ematrix.nhm(object, covvalue=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
Fitted model object produced using \code{\link{nhm}}.
}
\item{covvalue}{
Optional vector of covariate vectors (should be given in the order specified in the \code{covariate} option in \code{nhm}). If omitted the function will use the mean values of the covariates.
}
}
\details{
The \code{emat_nhm} function used to fit the model is called to obtain the values of the misclassification probabilities at the supplied times for the supplied covariate value.}
\value{
Returns a list containing a matrix of misclassification probabilities and a matrix of corresponding standard errors computed using the delta method.
}
\author{
Andrew Titman \email{a.titman@lancaster.ac.uk}
}
\seealso{
\code{\link{nhm}}, \code{\link{plot.nhm}}, \code{\link{predict.nhm}}, \code{\link{qmatrix.nhm}}
}
|
085db0750920856aa97b6516797b9a4f5b995486 | bb0fb51530335b10a2e64557fb6c950be61b7968 | /Rscripts/5.2.createOverviewDF_HCV_2.ref.R | 4baefae7b4b970678e8a62afc26f36447030d522 | [] | no_license | kahot/HCV_project | bd0450d07c84906b13b3cf6b442de68cdc7f3e44 | 0047c945f9f1522ebbda2b1cb3adf7742ce01a9e | refs/heads/master | 2022-03-24T06:01:24.189668 | 2019-12-16T17:13:03 | 2019-12-16T17:13:03 | 187,297,639 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,328 | r | 5.2.createOverviewDF_HCV_2.ref.R | #Script to create an overview files with mutation frequency data and their associated features
#(mut freq/features calculated based on Ref NT, Results=Overview2)
library(dplyr)
library(tidyverse)
source("Rscripts/baseRscript.R")
#Get the file names (SeqData files)
HCVFiles_SeqData<-list.files("Output1A/SeqDataQ35/",pattern="SeqData")
#create a Overview file for each sample
Overview<-list()
for (i in 1:length(HCVFiles_SeqData)){
#for (i in 1:1){
id<-substr(paste(HCVFiles_SeqData[i]),start=9,stop=15)
print(id)
OverviewDF<-read.csv(paste0("Output1A/SeqDataQ35/",HCVFiles_SeqData[i]),stringsAsFactors=FALSE)
OverviewDF<-OverviewDF[,-1]
ref<-read.dna("Data/HCVref.fasta", format = "fasta",as.character=TRUE)
#replace ? with N
ref<-ref[262:8999]
TypeOfSite<-c()
TypeOfSite.tv1<-c()
TypeOfSite.tv2<-c()
for (codon in 1:(nrow(OverviewDF)/3)) {#for each codon in the sequence
positions <- c(codon*3-2,codon*3-1, codon*3)
WTcodon <- OverviewDF$ref[positions]
if (is.na(WTcodon[1])|is.na(WTcodon[2])|is.na(WTcodon[3])){
WTcodon<-c('n','n','n')
mutant1codon<-c('n','n','n')
mutant2codon<-c('n','n','n')
mutant3codon<-c('n','n','n')}
else{
mutant1codon <- c(transition(WTcodon[1]), WTcodon[2:3]) #If the first position has transistion mutation, it's labeld as mutatnt1codon.
mutant2codon <- c(WTcodon[1],transition(WTcodon[2]), WTcodon[3])
mutant3codon <- c(WTcodon[1:2], transition(WTcodon[3]))
#transversion mutation to 'a' or 'c'
mutant1codon.tv1 <- c(transv1(WTcodon[1]), WTcodon[2:3])
mutant2codon.tv1 <- c(WTcodon[1],transv1(WTcodon[2]), WTcodon[3])
mutant3codon.tv1 <- c(WTcodon[1:2], transv1(WTcodon[3]))
#transversion mutation to 'g' or 't'
mutant1codon.tv2 <- c(transv2(WTcodon[1]), WTcodon[2:3])
mutant2codon.tv2 <- c(WTcodon[1],transv2(WTcodon[2]), WTcodon[3])
mutant3codon.tv2 <- c(WTcodon[1:2], transv2(WTcodon[3]))
}
TypeOfSite<-c(TypeOfSite,typeofsitefunction(WTcodon,mutant1codon))
TypeOfSite<-c(TypeOfSite,typeofsitefunction(WTcodon,mutant2codon))
TypeOfSite<-c(TypeOfSite,typeofsitefunction(WTcodon,mutant3codon))
TypeOfSite.tv1<-c(TypeOfSite.tv1,typeofsitefunction(WTcodon,mutant1codon.tv1))
TypeOfSite.tv1<-c(TypeOfSite.tv1,typeofsitefunction(WTcodon,mutant2codon.tv1))
TypeOfSite.tv1<-c(TypeOfSite.tv1,typeofsitefunction(WTcodon,mutant3codon.tv1))
TypeOfSite.tv2<-c(TypeOfSite.tv2,typeofsitefunction(WTcodon,mutant1codon.tv2))
TypeOfSite.tv2<-c(TypeOfSite.tv2,typeofsitefunction(WTcodon,mutant2codon.tv2))
TypeOfSite.tv2<-c(TypeOfSite.tv2,typeofsitefunction(WTcodon,mutant3codon.tv2))
} # This creates a vector showing if there is a transition/transversion mutation at a particular codon, &
# whehter the mutation will be Syn, nonSyn, or Stop codon.
OverviewDF$Type<-TypeOfSite[1:length(OverviewDF$pos)]
OverviewDF$Type.tv1<-TypeOfSite.tv1[1:length(OverviewDF$pos)]
OverviewDF$Type.tv2<-TypeOfSite.tv2[1:length(OverviewDF$pos)]
#OverviewDF$consensus<-ref[1:length(OverviewDF$pos)]
Overview[[i]]<-OverviewDF[,-c(1:6)]
names(Overview)[i]<-id
}
###############################
#Mut rates from Geller paper
## 1. Using 12 different Mutation Frequencies baed on from/to nucleotides
mutrates<-read.csv("Data/Geller.mutation.rates.csv")
#Mut rates and sel coefficients from Abrams paper (for HIV)
#mutrates1<-read.csv("Data/HIVMutRates.csv")
Overview_summary<-list()
for (i in 1:length(Overview)){
OverviewDF<-Overview[[i]]
id<-substr(paste(HCVFiles_SeqData[i]),start=9,stop=15)
OverviewDF$TSmutrate[OverviewDF$ref=="a"]<-mutrates$mut.rate[mutrates$mutations=="AG"]
OverviewDF$TSmutrate[OverviewDF$ref=="c"]<-mutrates$mut.rate[mutrates$mutations=="CU"]
OverviewDF$TSmutrate[OverviewDF$ref=="g"]<-mutrates$mut.rate[mutrates$mutations=="GA"]
OverviewDF$TSmutrate[OverviewDF$ref=="t"]<-mutrates$mut.rate[mutrates$mutations=="UC"]
#OverviewDF$TSmutrate.hiv[OverviewDF$ref=="a"]<-mutrates1$Probability[mutrates1$Nucleotide.substitution=="AG"]
#OverviewDF$TSmutrate.hiv[OverviewDF$ref=="c"]<-mutrates1$Probability[mutrates1$Nucleotide.substitution=="CU"]
#OverviewDF$TSmutrate.hiv[OverviewDF$ref=="g"]<-mutrates1$Probability[mutrates1$Nucleotide.substitution=="GA"]
#OverviewDF$TSmutrate.hiv[OverviewDF$ref=="t"]<-mutrates1$Probability[mutrates1$Nucleotide.substitution=="UC"]
OverviewDF$TVSmutrate.tv1[OverviewDF$ref=="a"]<-mean(mutrates$mut.rate[mutrates$mutations=="AC"])
OverviewDF$TVSmutrate.tv1[OverviewDF$ref=="c"]<-mean(mutrates$mut.rate[mutrates$mutations=="CA"])
OverviewDF$TVSmutrate.tv1[OverviewDF$ref=="g"]<-mean(mutrates$mut.rate[mutrates$mutations=="GC"])
OverviewDF$TVSmutrate.tv1[OverviewDF$ref=="t"]<-mean(mutrates$mut.rate[mutrates$mutations=="UA"])
OverviewDF$TVSmutrate.tv2[OverviewDF$ref=="a"]<-mean(mutrates$mut.rate[mutrates$mutations=="AU"])
OverviewDF$TVSmutrate.tv2[OverviewDF$ref=="c"]<-mean(mutrates$mut.rate[mutrates$mutations=="CG"])
OverviewDF$TVSmutrate.tv2[OverviewDF$ref=="g"]<-mean(mutrates$mut.rate[mutrates$mutations=="GU"])
OverviewDF$TVSmutrate.tv2[OverviewDF$ref=="t"]<-mean(mutrates$mut.rate[mutrates$mutations=="UG"])
OverviewDF$TVSmutrate.tvs[OverviewDF$ref=="a"]<-mean(mutrates$mut.rate[mutrates$mutations=="AU"], mutrates$mut.rate[mutrates$mutations=="AC"])
OverviewDF$TVSmutrate.tvs[OverviewDF$ref=="c"]<-mean(mutrates$mut.rate[mutrates$mutations=="CG"],mutrates$mut.rate[mutrates$mutations=="CA"])
OverviewDF$TVSmutrate.tvs[OverviewDF$ref=="g"]<-mean(mutrates$mut.rate[mutrates$mutations=="GU"],mutrates$mut.rate[mutrates$mutations=="GC"])
OverviewDF$TVSmutrate.tvs[OverviewDF$ref=="t"]<-mean(mutrates$mut.rate[mutrates$mutations=="UG"],mutrates$mut.rate[mutrates$mutations=="UA"])
for (k in 1:length(OverviewDF$pos)){
OverviewDF$EstSelCoeff[k] <- EstimatedS(OverviewDF$TSmutrate[k],OverviewDF[k,colnames(OverviewDF)=='freq.Ts.ref'])
#OverviewDF$EstSelCoeff_hiv[k] <- EstimatedS(OverviewDF$TSmutrate.hiv[k],OverviewDF[k,colnames(OverviewDF)=='freq.Ts.ref'])
OverviewDF$EstSelCoeff_transv[k] <- EstimatedS(OverviewDF$TVSmutrate.tvs[k],OverviewDF[k,colnames(OverviewDF)=='freq.transv.ref'])
OverviewDF$EstSelCoeff_trans1[k] <- EstimatedS(OverviewDF$TVSmutrate.tv1[k],OverviewDF[k,colnames(OverviewDF)=='freq.transv1.ref'])
OverviewDF$EstSelCoeff_trans2[k] <- EstimatedS(OverviewDF$TVSmutrate.tv2[k],OverviewDF[k,colnames(OverviewDF)=='freq.transv2.ref'])
if (k%%3==1){
if (is.na(OverviewDF$MajNt[k])|is.na(OverviewDF$MajNt[k+1])|is.na(OverviewDF$MajNt[k+2])) { OverviewDF$MajAA[k]<-"NA"
OverviewDF$WTAA[k]<-"NA"
OverviewDF$MUTAA[k]<-"NA"
OverviewDF$TVS1_AA[k]<-"NA"
OverviewDF$TVS2_AA[k]<-"NA"}
else { OverviewDF$MajAA[k] = seqinr::translate(OverviewDF$MajNt[c(k,k+1,k+2)])
OverviewDF$WTAA[k] = seqinr::translate(OverviewDF$ref[c(k,k+1,k+2)])
OverviewDF$MUTAA[k] = seqinr::translate(c(transition(OverviewDF$ref[k]),OverviewDF$ref[c(k+1,k+2)]))
OverviewDF$TVS1_AA[k] = seqinr::translate(c(transv1(OverviewDF$ref[k]),OverviewDF$ref[c(k+1,k+2)]))
OverviewDF$TVS2_AA[k] = seqinr::translate(c(transv2(OverviewDF$ref[k]),OverviewDF$ref[c(k+1,k+2)]))}
}
if (k%%3==2){
if (is.na(OverviewDF$MajNt[k-1])|is.na(OverviewDF$MajNt[k])|is.na(OverviewDF$MajNt[k+1])) {OverviewDF$MajAA[k]<-"NA"
OverviewDF$WTAA[k]<-"NA"
OverviewDF$MUTAA[k]<-"NA"
OverviewDF$TVS1_AA[k]<-"NA"
OverviewDF$TVS2_AA[k]<-"NA"}
else { OverviewDF$MajAA[k] = seqinr::translate(OverviewDF$MajNt[c(k-1,k,k+1)])
OverviewDF$WTAA[k] = seqinr::translate(OverviewDF$ref[c(k-1,k,k+1)])
OverviewDF$MUTAA[k] = seqinr::translate(c(OverviewDF$ref[c(k-1)],transition(OverviewDF$ref[k]),OverviewDF$ref[c(k+1)]))
OverviewDF$TVS1_AA[k] = seqinr::translate(c(OverviewDF$ref[c(k-1)],transv1(OverviewDF$ref[k]),OverviewDF$ref[c(k+1)]))
OverviewDF$TVS2_AA[k] = seqinr::translate(c(OverviewDF$ref[c(k-1)],transv2(OverviewDF$ref[k]),OverviewDF$ref[c(k+1)]))}
}
if (k%%3==0){
if (is.na(OverviewDF$MajNt[k-2])|is.na(OverviewDF$MajNt[k-1])|is.na(OverviewDF$MajNt[k])) { OverviewDF$MajAA[k]<-"NA"
OverviewDF$WTAA[k]<-"NA"
OverviewDF$MUTAA[k]<-"NA"
OverviewDF$TVS1_AA[k]<-"NA"
OverviewDF$TVS2_AA[k]<-"NA"}
else { OverviewDF$MajAA[k] = seqinr::translate(OverviewDF$MajNt[c(k-2,k-1,k)])
OverviewDF$WTAA[k] = seqinr::translate(OverviewDF$ref[c(k-2,k-1,k)])
OverviewDF$MUTAA[k] = seqinr::translate(c(OverviewDF$ref[c(k-2,k-1)],transition(OverviewDF$ref[k])))
OverviewDF$TVS1_AA[k] = seqinr::translate(c(OverviewDF$ref[c(k-2,k-1)],transv1(OverviewDF$ref[k])))
OverviewDF$TVS2_AA[k] = seqinr::translate(c(OverviewDF$ref[c(k-2,k-1)],transv2(OverviewDF$ref[k])))}
}
}
#Add whether AA change is drastic & makes CpG
OverviewDF$bigAAChange<-0
OverviewDF$bigAAChange.tv1<-0
OverviewDF$bigAAChange.tv2<-0
OverviewDF$makesCpG <- 0
OverviewDF$makesCpG.tvs <- 0
OverviewDF$makesCpG.tv1 <- 0
OverviewDF$makesCpG.tv2 <- 0
#OverviewDF$makesCpG_all <- 0
#OverviewDF$color<-""
for(j in 2:nrow(OverviewDF)-1){
WT <- amCat(OverviewDF[j,'WTAA'])
MUT <- amCat(OverviewDF[j,'MUTAA'])
MUT1<-amCat(OverviewDF[j,'TVS1_AA'])
MUT2<-amCat(OverviewDF[j,'TVS2_AA'])
if (WT != MUT) OverviewDF$bigAAChange[j] <- 1
if (WT != MUT1) OverviewDF$bigAAChange.tv1[j] <- 1
if (WT != MUT2) OverviewDF$bigAAChange.tv2[j] <- 1
trip <- OverviewDF$ref[c(j-1, j,j+1)]
if (is.na(trip[1])|is.na(trip[2])|is.na(trip[3]))
next
else{
if (trip[1] == "c" & trip[2] == "a" ) OverviewDF$makesCpG[j] <- 1
if (trip[2] == "t" & trip[3] == "g") OverviewDF$makesCpG[j] <- 1
if (trip[1] == "c" & (trip[2]=="c"|trip[2]=='t')) OverviewDF$makesCpG.tvs[j] <- 1
if (trip[3] == "g" & (trip[2]=="a"|trip[2]=="g")) OverviewDF$makesCpG.tvs[j] <- 1
if (trip[1] == "c" & (trip[2]=="c"|trip[2]=='t')) OverviewDF$makesCpG.tv2[j] <- 1
if (trip[3] == "g" & (trip[2]=="a"|trip[2]=="g")) OverviewDF$makesCpG.tv1[j] <- 1
#if (trip[1] == "c" & trip[2] != "g") OverviewDF$makesCpG_all[j] <- 1
#if (trip[2] != "c" & trip[3] == "g") OverviewDF$makesCpG_all[j] <- 1
}
}
write.csv(OverviewDF,paste0("Output1A/Overview2/",id,"overview2.csv"))
Overview_summary[[i]]<-OverviewDF
print(id)
}
###########################################
### Read depths for all files ###
HCVFiles_SeqData<-list.files("Output1A/SeqData/",pattern="SeqData")
ReadsSummary<-data.frame(Sample_ID=matrix(nrow=length(HCVFiles_SeqData)))
ReadsSummary$MaxDepth<-""
ReadsSummary$AveDepth<-""
for (i in 1:length(HCVFiles_SeqData)){
print(i)
id<-substr(paste(HCVFiles_SeqData[i]),start=9,stop=15)
ReadsSummary$Sample_ID[i]<-id
print(id)
SeqData<-read.csv(paste("Output1A/SeqDataQ35/",HCVFiles_SeqData[i],sep=""))
ReadsSummary$MaxDepth[i]<-max(SeqData$TotalReads,na.rm=T)
ReadsSummary$AveDepth[i]<-mean(SeqData$TotalReads,na.rm=T)
}
write.csv(ReadsSummary,"Output1A/SeqDataQ35/ReadsSummary_1A.csv")
|
07f8526de78e4510b559bc995f3d996e95f056e3 | eb4af2a181e073c9752385cccbbe5495709d2c1a | /precise_value/R/model_parameters.R | dd31287d383e45a9974e450a468346d622e726e6 | [
"MIT"
] | permissive | ndhendrix/precise-value | d50d029d6d7c9de48e603bce8d8f45e36d2f8a52 | a7c30631982af34e2c6006334a51bf77ae0f21f2 | refs/heads/master | 2022-05-31T05:13:40.208643 | 2022-05-16T23:03:46 | 2022-05-16T23:03:46 | 246,849,108 | 0 | 1 | null | 2022-05-13T18:15:24 | 2020-03-12T14:02:26 | R | UTF-8 | R | false | false | 3,128 | r | model_parameters.R | # Fixed model parameters
# Last updated: 01/12/21
#discount
discount <- 0.03
## Benefit pattern. Joyce updated on 01/12/2021
# Prevalence of CYP2C19 variants: Poor metabolizer and Intermediate metabolizer.
p_clo_a <- 0.5394
p_clo_b <- 0.3900
p_clo_w <- 0.3818
# Prevalence of CYP2C9, CYP4F2, VKORC1 variants. Updated: 09/22/2020: we don't model warfarin variant anymore.
p_war <- 1 #09/22: we don't model warfarin variant anymore.
# Probability being eligible to benefit from PGx. Updated: 09/22/2020. Identified from Kimmel 2013.
p_eligible <- 0.67
##Provider behavior. Joyce updated on 01/12/2021
# probability of regimen change with alert
p_change_alert <- 0.25
# probability of regimen change without alert
p_change_no_alert <- 0.1
##Payoffs. Joyce updated on 01/12/2021
# payoffs: qalys and costs of PGx. Identified from literature review.
qaly_change_clo <- 0.05/0.28 #Updated on 01/23/2021: account for variant prevalence for White.
cost_change_clo <- 1972/0.28 #Updated on 01/23/2021: account for variant prevalence for White, adjust inflation by CPI.
qaly_change_war <- 0.008
cost_change_war <- -165 #Updated on 01/23/2021: adjust inflation by CPI
# payoffs: ADEs of PGx. Identified from literature review. - Clopidogrel
# Updated on 01/23/2021. (1) not adjust 1-year risk, (2) account for variant prevalence in White
NonFatalMI_change_clo <- -0.008/0.28 #PGx: risk reduction
StentThrombosis_change_clo <- -0.0042/0.28 #PGx: risk reduction
NonFatalIntracranial_change_clo <- 0.0002/0.28 #PGx: risk increase
NonFatalExtracranial_change_clo <- 0.00032/0.28 #PGx: risk increase
CABGBleeding_change_clo <- 0.0001/0.28 #PGx: risk increase
MinorBleeding_change_clo <- 0.0011/0.28 #PGx: risk increase
CABGRevascularization_change_clo <- -0.0006/0.28 #PGx: risk reduction
PCIRevascularization_change_clo <- -0.0049/0.28 #PGx: risk reduction
CVDeath_change_clo <- -0.0065/0.28 #PGx: risk reduction
NONCVDeath_change_clo <- -0.0008/0.28 #PGx: risk reduction
# payoffs: ADEs of PGx. Identified from literature review. - Warfarin
Bleeding_change_war <- -0.007 #PGx: risk reduction
Clot_change_war <- -0.002 #PGx: risk reduction
Death_change_war<--0.004 #PGx: risk reduction. Joyce added 04/10/2021
##Risk of getting clopidogrel for ACS. Joyce updated on 01/12/2021
p_new_rx_clo_18_24 <- 0.000003
p_new_rx_clo_25_34 <- 0.000021
p_new_rx_clo_35_44 <- 0.000173
p_new_rx_clo_45_49 <- 0.000457
p_new_rx_clo_50_54 <- 0.000775
p_new_rx_clo_55_59 <- 0.001160
p_new_rx_clo_60_64 <- 0.001637
p_new_rx_clo_65_69 <- 0.002344
p_new_rx_clo_70_74 <- 0.003454
p_new_rx_clo_75_79 <- 0.004334
p_new_rx_clo_80_84 <- 0.004971
p_new_rx_clo_85_100<- 0.004706
rr_new_rx_clo <- 1
##Risk of getting warfarin for AF. Joyce updated on 01/12/2021
p_new_rx_war_18_24 <- 0.000005
p_new_rx_war_25_34 <- 0.000012
p_new_rx_war_35_44 <- 0.000039
p_new_rx_war_45_49 <- 0.000094
p_new_rx_war_50_54 <- 0.000173
p_new_rx_war_55_59 <- 0.000333
p_new_rx_war_60_64 <- 0.000601
p_new_rx_war_65_69 <- 0.001184
p_new_rx_war_70_74 <- 0.002170
p_new_rx_war_75_79 <- 0.003191
p_new_rx_war_80_84 <- 0.004050
p_new_rx_war_85_100 <- 0.003816
rr_new_rx_war <- 1
|
3fdc017a9d925c8b691db4aa8b410607c4942ef2 | d45bc588dbbee43609f67712791b984ae6224c07 | /10-adam_functions.R | f6f16e3cf9e632b0d1f8e2195316f19e13f8cd02 | [] | no_license | patyk11/RkaKrakCompetition2 | 6a0b4f71019d7e1711d9b1a7d2140ede86972ebe | 59472d93fa09bf72d9325d98cb269df7b1e0e8ff | refs/heads/master | 2021-01-16T21:07:56.355392 | 2016-08-07T18:26:46 | 2016-08-07T18:26:46 | 64,416,513 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,730 | r | 10-adam_functions.R | check_method_different_stop_times <- function(DT_fun) {
DT_testing <- copy(DT_fun) # copying to avoid issues with data.table references
# means for shrinked data set, second possibility: take values from not shrinked DT and save them
questions_means_shrinked_dt <- DT_testing[, lapply(.SD, mean, na.rm = TRUE), .SDcols = Sc_cols]
means_cumulative_shrinked_dt <- apply(questions_means_shrinked_dt, 1, function(x){cumsum(x)/seq_along(x)})
score_answered_questions <- apply(DT_testing[, Sc_cols, with = FALSE], 1, sum, na.rm = TRUE)
# determining for how many questions participant answered
questions_answered <- apply(DT_testing[, Sc_cols, with = FALSE], 1, function(x) sum(!is.na(x)))
# means for questions
questions_means <- score_answered_questions/questions_answered
# not trivial code :D
factors <- questions_means/means_cumulative_shrinked_dt[questions_answered]
predictions <- numeric(length(score_answered_questions)) # vector of zeros
for (it in 1:length(questions_answered)) {
predictions[it] <- factors[it] * sum(unlist(questions_means_shrinked_dt)[-(1:questions_answered[it])])
}
# adapting to scoring environment - we should have scores for separate questions, not only final score
DT_testing$preds_col <- predictions
DT_testing[, Sc48 := as.numeric(Sc48)]
DT_testing[is.na(Sc48), Sc48 := preds_col]
for(col in Sc_cols[Sc_cols != 'Sc48']) {
setnames(DT_testing, old = col, new = 'temporary_name')
DT_testing[is.na(temporary_name), temporary_name := 0]
setnames(DT_testing, new = col, old = 'temporary_name')
}
return(list(DT_testing = DT_testing, questions_answered = questions_answered))
} |
f2f78c70532b8450805b5d803356c0ccc03bac2f | 24944f7d1dfe96cd14016b6c3143c24a8d7b802b | /AMS PA Paper/FigureCode/Procrustes Rotations to compare intra- and inter-party NOMINATE.R | 24ad92be276e97c51a95ae92a34da3125608b4e2 | [] | no_license | ballardao/RCDim | 19f720df46fbae0502f93a04a0ab440ce2145a9c | 229cc0fa4608a9d28fcc64a4b2aac3f436fdea70 | refs/heads/master | 2020-12-24T17:54:46.404030 | 2018-05-18T04:25:58 | 2018-05-18T04:25:58 | 27,838,009 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,693 | r | Procrustes Rotations to compare intra- and inter-party NOMINATE.R | rm(list = ls())
setwd("~/Dropbox/Dimensionality/PAFinal/")
#################
# Load packages #
#################
installPackages <- FALSE
toInstall <- c("mvtnorm", "pscl", "wnominate", "ggplot2", "arm", "snow",
"oc", "MASS", "akima", "vegan",
"plyr", "fields", "foreach", "multicore","doMC",
"wnominate", "ggplot2", "pscl", "wnominate", "ggplot2",
"parallel", "proxy")
if(installPackages){install.packages(toInstall, repos = "http://cran.r-project.org")}
lapply(toInstall, library, character.only = TRUE)
empiricalVoteMargins <- as.matrix(read.csv("EmpiricalCode/DataFiles/Observed Frequency of Vote Margins.csv"))
nTypicalSenateVotes <- 525
parameterSettings <- list(
nDimensions = 3,
nDraws = 5000,
typicalSenateVote =525, # c(1000, 2000, 5000), # A typical recent Senate will range from 500 to 900 or so votes.
nObservations = c(101),
normalMean = 0, # This really should not be varied
normalVariance = 1, # Neither should this
nSimulations = 1:25, # We can run several iterations of each setting.
nSeparateDimensions = 3, # c(1,2,3,4,999), # c(1,2,3,4,999), ## 999 means separation on all dimensions
partySeparation = c(0, 1, 2, 4, 6), # c(0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5,5.5, 6),
majorityPartySize = 51,
scalePartySubsets = c(TRUE), # This could also be false, in which case the script returns only the chamber-level NOMINATE.
beta = c(.5,2),
radius=c(9, 11),
lop=c(.03)
)
# Combine parameter options into a frame to sweep:
parametersToSweep <- expand.grid(parameterSettings)
dim(parametersToSweep)
### Modify (and remove) some of the parameter settings to sweep ###
# 999 separate dimensions gets recoded to the maximum number of dimensions:
parametersToSweep$nSeparateDimensions[parametersToSweep$nSeparateDimensions == 999] <- parametersToSweep$nDimensions[parametersToSweep$nSeparateDimensions == 999]
# Remove settings where the number of separate dimensions is greater than the total number of dimensions
parametersToSweep <- parametersToSweep[!(parametersToSweep$nDimensions < parametersToSweep$nSeparateDimensions), ]
# Remove all where ndSeparate dimension is greater than 1, but partysepartion = 0
#parametersToSweep <- parametersToSweep[!(parametersToSweep$partySeparation == 0 & parametersToSweep$nSeparateDimension > 1), ]
# Review the final set of parameters:
rownames(parametersToSweep) <- 1:nrow(parametersToSweep)
apply(parametersToSweep, 2, table)
# Read in functions
installPackages <- TRUE
source("SimulationsMainText/SimulationCodeMain.R", local = TRUE, echo = TRUE)
################################# WORK DONE HERE ############################################################
procrustesFitCollection <- list()
for(rrr in 1:nrow(parametersToSweep)){
whichRow <- rrr
print(rrr)
rowParameters <- unlist(parametersToSweep[whichRow, ])
rowParameters
idealPoints <- preferenceGenerator(mPSize = rowParameters["majorityPartySize"],
nObs = rowParameters["nObservations"],
pSep = rowParameters["partySeparation"],
nDim = rowParameters["nDimensions"],
nSDim = rowParameters["nSeparateDimensions"],
normV = rowParameters["normalVariance"])
.radius <- rowParameters["radius"]
proposalPoints <- nBallDraw(nDim=rowParameters["nDimensions"],nDraws = rowParameters["nDraws"],
.radius=.radius )
statusQuoPoints <- nBallDraw(nDim=rowParameters["nDimensions"],nDraws = rowParameters["nDraws"],
.radius=.radius )
proposalsAndStatusQuos <- list(proposalPoints = proposalPoints, statusQuoPoints = statusQuoPoints)
rollCallMatrix <- generateRollCalls(.iPoints = idealPoints,
pASQ = proposalsAndStatusQuos,
beta=rowParameters["beta"], lop=rowParameters["lop"])
reverseThese <- sample(1:rowParameters["typicalSenateVote"], rowParameters["typicalSenateVote"]/2, replace=FALSE)
rollCallMatrix[,reverseThese] <- (rollCallMatrix[,reverseThese]-1)*-1
###FLAG: Reverse coding roll calls to ensure easy replication
# Fit Roll Call Matrix to empirical distributions
partyUnityScores <- partyUnityCalculator(iPoints = idealPoints,
rcMat = rollCallMatrix)
nominateObject <- dwNominateFunction(rcMat = rollCallMatrix,
nDim = rowParameters["nDimensions"],
lop=rowParameters["lop"])
simulationSummary <- summarizeSimulation(wNom = nominateObject,
pUnity = partyUnityScores,
whichRow = whichRow,
label = "fullChamber")
### Subsection for party-subset scaling ###
#if(rowParameters["scalePartySubsets"] == 1){
if(1 == 0){
print("Scaling party subsets...")
partySubsetSummaries <- partySubsetScaler(iPoints = idealPoints,
rcMat = rollCallMatrix,
nDim = rowParameters["nDimensions"],
pUnity = partyUnityScores,
whichRow = whichRow, numDupes=numDupes,
maxDupes=maxDupes)
simulationSummary <- data.frame(rbind(simulationSummary, partySubsetSummaries))
} # End party-subset conditional
rownames(simulationSummary) <- NULL
iPoints = idealPoints
pUnity = partyUnityScores
leftRollCalls <- rollCallMatrix[iPoints$Party == -1, ]
rightRollCalls <- rollCallMatrix[iPoints$Party == 1, ]
leftNominateObject <- dwNominateFunction(rcMat = leftRollCalls,
nDim = rowParameters["nDimensions"],
lop=rowParameters["lop"])
rightNominateObject <- dwNominateFunction(rcMat = rightRollCalls,
nDim = rowParameters["nDimensions"],
lop=rowParameters["lop"])
leftSimulationSummary <- summarizeSimulation(wNom = leftNominateObject,
pUnity = pUnity,
whichRow = whichRow,
label = "leftPartyOnly")
rightSimulationSummary <- summarizeSimulation(wNom = rightNominateObject,
pUnity = pUnity,
whichRow = whichRow,
label = "rightPartyOnly")
output <- data.frame(rbind(leftSimulationSummary, rightSimulationSummary))
############################### ROTATION HERE #################################
par(mfcol = c(1, 1))
head(idealPoints)
nominateObject$legislators[, substr(colnames(nominateObject$legislators), 1, 5) == "coord"]
rightNominateObject$legislators[, substr(colnames(rightNominateObject$legislators), 1, 5) == "coord"]
leftNominateObject$legislators[, substr(colnames(leftNominateObject$legislators), 1, 5) == "coord"]
fullNOM3D <- nominateObject$legislators[, substr(colnames(nominateObject$legislators), 1, 5) == "coord"][, 1:3]
rightNOM3D <- rightNominateObject$legislators[, substr(colnames(rightNominateObject$legislators), 1, 5) == "coord"][, 1:3]
leftNOM3D <- leftNominateObject$legislators[, substr(colnames(leftNominateObject$legislators), 1, 5) == "coord"][, 1:3]
fullIP3D <- idealPoints[, 1:3]
rightIP3D <- idealPoints[iPoints$Party == 1, 1:3]
leftIP3D <- idealPoints[iPoints$Party == -1, 1:3]
fullRotation <- procrustes(X = fullIP3D, Y = fullNOM3D)
leftRotation <- procrustes(X = leftIP3D, Y = leftNOM3D)
rightRotation <- procrustes(X = rightIP3D, Y = rightNOM3D)
plot(fullRotation)
plot(leftRotation)
plot(rightRotation)
summary(fullRotation)
summary(leftRotation)
summary(rightRotation)
fullTest <- protest(X = fullIP3D, Y = fullNOM3D)
leftTest <- protest(X = leftIP3D, Y = leftNOM3D)
rightTest <- protest(X = rightIP3D, Y = rightNOM3D)
(fullTest)
(leftTest)
(rightTest)
procFrame <- data.frame(fullSoS = fullTest$ss, fullCorr = fullTest$t0, fullSig = fullTest$signif,
leftSoS = leftTest$ss, leftCorr = leftTest$t0, leftSig = leftTest$signif,
rightSoS = rightTest$ss, rightCorr = rightTest$t0, rightSig = rightTest$signif)
outFrame <- data.frame(t(rowParameters), procFrame)
procrustesFitCollection[[rrr]] <- outFrame
}
procrustesFitFrame <- do.call(rbind, procrustesFitCollection)
head(procrustesFitFrame)
procrustesFitFrame$combinedSoS <- with(procrustesFitFrame, sqrt(leftSoS^2+rightSoS^2))
procrustesFitFrame$partySepLabel <- paste0("D = ", procrustesFitFrame$partySeparation)
#write.csv(procrustesFitFrame, "/Results of procrustes comparison.csv", row.names = F)
zzp6 <- ggplot(procrustesFitFrame[is.element(procrustesFitFrame$partySeparation, c(0, 2, 4, 6)), ],
aes(x = fullSoS, y = combinedSoS))
zzp6 <- zzp6 + geom_abline(intercept = 0, slope = 1, lty = 3, colour = "GRAY")
zzp6 <- zzp6 + geom_point()
#zzp6 <- zzp6 + geom_smooth(method = "lm", alpha = 1/4)
zzp6 <- zzp6 + facet_wrap( ~ partySepLabel)
zzp6 <- zzp6 + theme_bw()
zzp6 <- zzp6 + coord_equal()
zzp6 <- zzp6 + xlab("RMSE for Full-Chamber NOMINATE")
zzp6 <- zzp6 + ylab("RMSE for Intra-Party NOMINATE")
#zzp6 <- zzp6 + ggtitle("Comparing fidelity of NOMINATE configurations to \"true\" ideal points")
print(zzp6)
ggsave(plot = zzp6, "PaperFiles/TexFiles/Procrustes_plot.ps", h = 4.5, w = 6.5)
|
9a40ed8a8b29ff2133ce2aff8d979bcecc3fc62e | 8b6b6b11a2e720daeeb7ca26309dea7110e67901 | /R/falling-limb.R | 711213daedd0b4213bebf468ac93d16e34f3dacf | [] | no_license | tbradley1013/dwqr | 2f10a3fdcac882aad32e8a83ece67af883aea4b2 | f94c1b4114a2197a6a898523a1003cb131999eea | refs/heads/master | 2023-01-13T00:07:25.981734 | 2023-01-08T03:29:29 | 2023-01-08T03:31:00 | 150,794,592 | 1 | 0 | null | 2023-01-08T03:27:40 | 2018-09-28T21:06:06 | R | UTF-8 | R | false | false | 5,081 | r | falling-limb.R | #' classify the falling limb of chlorine trend
#'
#' \code{falling_limb} use the first and second derivative of a
#' time series slope to classify portions of it as the falling limb
#' of the curve
#'
#' @param data a data frame with chlorine residual results
#' @param method the method used to classify the falling limb of the chlorine
#' curve. See Details.
#' @param value_col unqouted column name of column containing the chlorine
#' results for the time series
#' @param first_deriv unquoted column name of column containing first derivative
#' of chlorine time series
#' @param group_col vector of unqouted column names of any grouping columns
#'
#' @details
#' the method argument must be set to one of the following:
#' - "simple" - A simple classification method that classifies any negative
#' first derivative value as a part of the falling limb. Taking the first
#' derivative of the moving average of the chlorine values is likely to reduce
#' false classification rates when this model type is selected
#' - "hmm" - This method uses the depmixS4 package to fit a hidden markov model
#' using the time trend of the first derivative of the total chlorine trend
#' - "cp" - This method uses the strucchange package to identify change points
#' in the first derivative trned and classify values based on median first derivative
#' values between changepoints
#'
#'
#' @export
falling_limb <- function(data, method = c("simple", "hmm", "cp"), value_col, first_deriv, group_col){
if (!"data.frame" %in% class(data)) stop("data must be a data.frame or a tibble")
# value_col <- rlang::enquo(value_col)
# first_deriv <- rlang::enquo(first_deriv)
# group_cols <- rlang::enquos(...)
if (method == "simple"){
out <- fl_class_simple(data, value_col = {{value_col}}, first_deriv = {{first_deriv}}, group_col = {{group_col}})
} else if (method == "hmm"){
out <- fl_class_hmm(data, value_col = {{value_col}}, first_deriv = {{first_deriv}}, group_col = {{group_col}})
} else if (method == "cp"){
out <- fl_class_cp(data, value_col = {{value_col}}, first_deriv = {{first_deriv}}, group_col = {{group_col}})
}
return(out)
}
fl_class_hmm <- function(data, value_col, first_deriv, group_col){
# first_deriv <- rlang::enquo(first_deriv)
# group_cols <- rlang::enquos(...)
data <- dplyr::group_nest(data, dplyr::across({{group_col}}))
out <- data %>%
dplyr::mutate(
data = purrr::map(data, ~{
.x <- .x %>%
dplyr::mutate(x = {{first_deriv}})
mod <- depmixS4::mix(
list(x ~ 1),
data = .x,
nstates = 2,
family = list(stats::gaussian())
)
fm <- depmixS4::fit(mod)
states <- depmixS4::posterior(fm)
out <- .x %>%
dplyr::mutate(
est_state = as.character(states$state)
) %>%
dplyr::group_by(est_state) %>%
dplyr::mutate(
state_avg_deriv = mean(x, na.rm = TRUE)
) %>%
dplyr::ungroup() %>%
dplyr::mutate(
falling_limb = ifelse(state_avg_deriv == min(state_avg_deriv), "Falling Limb", "Other")
) %>%
dplyr::select(-c(x, state_avg_deriv, est_state))
return(out)
})
) %>%
tidyr::unnest(data)
return(out)
}
fl_class_simple <- function(data, value_col, first_deriv, group_col){
# value_col <- rlang::enquo(value_col)
# first_deriv <- rlang::enquo(first_deriv)
# group_cols <- rlang::enquos(...)
# if (!is.null(group_col)) {
# data <- dplyr::group_by(data, dplyr::across({{group_col}}))
# }
output <- data %>%
dplyr::group_by(dplyr::across({{group_col}})) %>%
dplyr::mutate(
falling_limb = dplyr::case_when(
{{first_deriv}} < 0 & lag({{first_deriv}}) < 0 ~ "Falling Limb",
TRUE ~ "Other"
),
)
output <- dplyr::ungroup(output)
return(output)
}
fl_class_cp <- function(data, value_col, first_deriv, group_col){
# value_col <- rlang::enquo(value_col)
# first_deriv <- rlang::enquo(first_deriv)
# group_cols <- rlang::enquos(...)
data <- dplyr::group_nest(data, dplyr::across({{group_col}}))
out <- data %>%
dplyr::mutate(
data = purrr::map(data, ~{
.x <- .x %>%
dplyr::mutate(x = {{first_deriv}})
bps <- strucchange::breakpoints(.x$x ~ 1, breaks = 5)
bp_tbl <- tibble(
bp = c(1, bps$breakpoints),
bp_num = seq_along(c(1, bps$breakpoints))
)
out <- .x %>%
dplyr::mutate(row = dplyr::row_number()) %>%
dplyr::left_join(
bp_tbl, by = c("row" = "bp")
) %>%
tidyr::fill(bp_num, .direction = "down") %>%
dplyr::group_by(bp_num) %>%
dplyr::mutate(bp_group_median = median(x, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::mutate(falling_limb = ifelse(bp_group_median < 0, "Falling Limb", "Other")) %>%
dplyr::select(-c(x, row, bp_num, bp_group_median))
return(out)
})
) %>%
tidyr::unnest(data)
return(out)
}
|
0afc5d34780f754043a219f5c93c20a44727d856 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/sharpshootR/examples/plotSoilRelationChordGraph.Rd.R | 4b2c54f85abd2c199798c07c653c9aac249f75fe | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 329 | r | plotSoilRelationChordGraph.Rd.R | library(sharpshootR)
### Name: plotSoilRelationChordGraph
### Title: Vizualize Soil Relationships via Chord Diagram.
### Aliases: plotSoilRelationChordGraph
### Keywords: hplots
### ** Examples
## Not run:
##D data(amador)
##D m <- component.adj.matrix(amador)
##D plotSoilRelationChordGraph(m, 'amador')
## End(Not run)
|
980e96dc70d2951046284cd89ee42b5aa8fa1048 | d123ea08fcb9291515e9180ea98ba4c897bbb835 | /man/std_in.Rd | af807d996b5881acbf17c33b77538e277aaf8096 | [] | no_license | cran/SSSR | 33e04f0e52c4e704f232563b4034979d0e2681b3 | dec2dec99f9deede4fa033d0cc025204ede7ccc5 | refs/heads/master | 2020-05-17T03:50:43.143096 | 2011-07-27T00:00:00 | 2011-07-27T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 159 | rd | std_in.Rd | \name{std_in}
\Rdversion{1.1}
\alias{std_in}
\docType{data}
\title{
std_in
}
\description{
A public variable that stores the data came from STDIN device.
}
|
809ea3ba2de9bfc573cf3fe3d0a430a13a7e7797 | b0756c2b2fe8d84ebe08e6f6366302154631a16d | /R/kmciFunctions.R | 765b98dc4c105a8585a3349e7b8e5dd128c00a5e | [] | no_license | cran/bpcp | 3bd9ec222e7bfebef44657e7bf5a4fe332263efd | 8a4c3eee0e16a13c0af564e326dfd35f5a4469e4 | refs/heads/master | 2022-05-01T09:05:24.289735 | 2022-03-11T22:10:11 | 2022-03-11T22:10:11 | 17,694,872 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 57,873 | r | kmciFunctions.R | ### this file contains most of the functions
### for the kmci package
# uvab: take mean and var of Beta, get back beta parameters
# u,v may be vectors
uvab<-function(u,v){
a<-u^2 * (1-u)/v - u
b<-u*(1-u)^2/v -1+u
a[u==1 & v==0]<-1
b[u==1 & v==0]<-0
a[u==0 & v==0]<-0
b[u==0 & v==0]<-0
out<-list(a=a,b=b)
out
}
# extend qbeta to allow a=0 or b=0 (but not both)
# not needed in R versions >= 3.1.1 (since limits of parameters are defined in qbeta, etc)
#
if (compareVersion(as.character(getRversion()),"3.1.1")<0){
qqbeta<-function(x,a,b){
out<-rep(0,length(a))
out[b==0]<-1
out[a==0]<-0
I<-a>0 & b>0
if (any(I)) out[I]<-qbeta(x,a[I],b[I])
# added NA for a==0 and b==0
# this is for thoroughness, it does not come up in
# calculations for bpcp
out[a==0 & b==0]<-NA
out
}
} else {
# at or after R Version 3.1.1
# note that qbeta(c(.2,.8),0,0) gives c(0,1)
qqbeta<-function(x,a,b){ qbeta(x,a,b) }
}
# get bpcp MC from a and b Beta product parameters
betaprodcimc<-function(a,b,nmc=10^4,alpha=.05){
np<-length(a)
# create a matrix of beta distributions
B<-matrix(rbeta(np*nmc,rep(a,each=nmc),
rep(b,each=nmc)),nmc,np)
# multiply columns to create beta product random numbers
for (j in 2:np){
B[,j]<-B[,j-1]*B[,j]
}
quant<-function(x){ quantile(x,probs=c(alpha/2,1-alpha/2)) }
apply(B,2,quant)
}
# get bpcp MM from a and b Beta product parameters
# a,b are vectors of parameters
# returns method of moments estimators for
# BP(a[1],b[1]), BP(a[1:2],b[1:2]), ...
betaprodcimm<-function(a,b,alpha=.05){
## use notation from Fan,1991
## first get Method of moments estimator for the
## cumprod of beta distns
## then get associated CIs
S<-cumprod( a/(a+b) )
T<-cumprod( (a*(a+1))/((a+b)*(a+b+1)) )
cc<- ((S-T)*S)/(T-S^2)
dd<- ((S-T)*(1-S))/(T-S^2)
ci<-matrix(NA,2,length(a))
ci[1,]<-qqbeta(alpha/2,cc,dd)
ci[2,]<-qqbeta(1-alpha/2,cc,dd)
ci
}
#betaprodcimc(c(30:20),rep(1,11),nmc=10^6)
#betaprodcimm(c(30:20),rep(1,11))
kmgw.calc<-function(time,status,keepCens=TRUE){
## calculate K-M estimator
## if keepCens=TRUE then
## output y is all observed times (death or censoring)
N<-length(time)
tab<-table(time,status)
## if all died or all censored then tab is
## not right dimensions
dstat<-dimnames(tab)$status
if (length(dstat)==2){
di<-tab[,2]
ci<-tab[,1]
} else if (length(dstat)==1){
if (dstat=="1"){
di<-tab[,1]
ci<-rep(0,length(tab))
} else if (dstat=="0"){
ci<-tab[,1]
di<-rep(0,length(tab))
}
} else stop("status should be 0 or 1")
y<-as.numeric(dimnames(tab)[[1]])
k<-length(y)
ni<- c(N,N - cumsum(ci)[-k] - cumsum(di)[-k])
names(ni)<-names(di)
## to avoid overflow problems, make ni, di numeric instead
## of interger
ni<-as.numeric(ni)
di<-as.numeric(di)
ci<-as.numeric(ci)
KM<-cumprod( (ni-di)/ni )
gw<- KM^2 * cumsum( di/(ni*(ni-di)) )
## define variance estimator as 0 after data
gw[KM==0]<-0
if (keepCens){
I<-rep(TRUE,length(y))
} else {
I<-di>0
}
## output
## time=vector of times
## ci[i]=number censored at time[i]
## di[i]=number failed at time[i]
## ni[i]=number at risk just before time[i]
## KM[i]=Kaplan-Meier at time[i]
## gw[i]=Greenwood variance at time[i]
out<-list(time=y[I],ci=ci[I],di=di[I],ni=ni[I],
KM=KM[I],gw=gw[I])
out
}
kmcilog<-function(x,alpha=0.05){
### log transformation normal approximation to
### 100(1-alpha) percent CI
Za<-qnorm(1-alpha/2)
lower<-exp(log(x$KM) - sqrt(x$gw/x$KM^2) * Za)
lower[x$KM==0]<-0
upper<-pmin(1,exp(log(x$KM) + sqrt(x$gw/x$KM^2)*Za))
upper[is.na(upper)]<-1
out<-list(time=x$time,surv=x$KM,
lower=lower,upper=upper,conf.level=1-alpha)
#class(out)<-"kmci"
out
}
# intChar creates a character vector describing
# intervals based on whether L and R is included
# or not
intChar<-function(L,R,Lin=rep(FALSE,length(L)),
Rin=rep(TRUE,length(L)),digits=NULL){
### check that makes a real interval
if (length(L)!=length(R)){
stop("length of L and R must be the same")
}
if (any(R-L<0)) stop("L must be less than R")
Lint<-rep("(",length(L))
Lint[Lin]<-"["
Rint<-rep(")",length(L))
Rint[Rin]<-"]"
Rint[R==Inf]<-")"
### default is to round to the nearest number
### of digits to differentiate
### adjacent times
if (is.null(digits)){
x<-sort(unique(c(L,R)))
md<-min(diff(x))
if (md<1){
### if the minimum distance between adjacent
### times is md
### then if we round to digits should
### be able to differentiate
### Example:md=.01 then log10(md)=-2 so digits=2
### md=.03 then log10(md)= -1.52 so digits=2
digits<-ceiling(-log10(md))
} else {
digits<-0
}
}
Interval<-paste(Lint,round(L,digits),",",
round(R,digits),Rint,sep="")
Interval
}
#intChar(c(2,4,65),c(3,5,Inf))
# get marks for right censoring times
getmarks<-function(time,status){
x<-kmgw.calc(time,status)
## fix July 29, 2015: get marks when there is
## failure there also
#x$time[x$ci>0 & x$di==0]
x$time[x$ci>0]
}
## save time if already have the results from kmgw.calc, use
getmarks.x<-function(x){
## fix July 29, 2015: get marks when there is
## failure there also
#x$time[x$ci>0 & x$di==0]
x$time[x$ci>0]
}
borkowf.calc<-function(x,type="log",alpha=.05){
## calculate the Borkowf CIs
## type="log" ... do log transformation
## type="logs"...log transformation with shrinkage
## type="norm" ... usual method, no log transformation
## type="norms"...usual method with shrinkage
## x is output from kmgw function
## take k values and expand for the 2k+1 intervals
k<-length(x$time)
d<-rep(x$di,each=2)
d[2*(1:k)]<-0
cens<-rep(x$ci,each=2)
cens[2*(1:k)-1]<-0
## add values for interval (0, t1): d=0,n=N,S=1,gw=0
d<-c(0,d)
cens<-c(0,cens)
n<-c(x$ni[1],rep(x$ni,each=2))
S<-c(1,rep(x$KM,each=2))
gw<-c(0,rep(x$gw,each=2))
y<-x$time
L<-c(0,rep(y,each=2))
R<-c(rep(y,each=2),Inf)
Lin<-Rin<-c(rep(c(FALSE,TRUE),k),FALSE)
#Interval<-intChar(L,R,Lin,Rin)
#Lint<-rep("(",2*k+1)
#Lint[Lin]<-"["
#Rint<-rep(")",2*k+1)
#Rint[Rin]<-"]"
#Interval<-paste(Lint,L,",",R,Rint,sep="")
me<-cumsum(d)
mc<-cumsum(cens)
n<-x$ni[1]
bmax<- 1 - me/n
w<-rep(NA,length(d))
KM<-S
w[.5<=KM]<- KM[.5<=KM]
w[KM<.5 & .5<=bmax]<- .5
w[bmax<.5]<-bmax[bmax<.5]
VarH<- w*(1-w)/(n - mc)
## shrunken KM
KMs<- KM*(1- 1/n) + 1/(2*n)
ws<- w*(1- 1/n) + 1/(2*n)
VarHs<- ws*(1-ws)/(n - mc)
Za<-qnorm(1-alpha/2)
lowerNorm<- KM - Za*sqrt(VarH)
upperNorm<- KM + Za*sqrt(VarH)
lowerLog<- KM * exp(- Za*sqrt(VarH)/KM )
upperLog<- KM * exp(+ Za*sqrt(VarH)/KM )
lowerNorms<- KMs - Za*sqrt(VarHs)
upperNorms<- KMs + Za*sqrt(VarHs)
lowerLogs<- KMs * exp(- Za*sqrt(VarHs)/KMs )
upperLogs<- KMs * exp(+ Za*sqrt(VarHs)/KMs )
SEG<-sqrt(gw)
SEH<-sqrt(VarH)
SEHs<-sqrt(VarHs)
n.minus.mc<-n-mc
fixup<-function(x){ x[x>1 | is.na(x)]<-1; x }
fixlo<-function(x){ x[x<0 | is.na(x)]<-0;x }
lowerNorm<-fixlo(lowerNorm)
lowerNorms<-fixlo(lowerNorms)
lowerLog<- fixlo(lowerLog)
lowerLogs<-fixlo(lowerLogs)
upperNorm<-fixup(upperNorm)
upperNorms<-fixup(upperNorms)
upperLog<-fixup(upperLog)
upperLogs<-fixup(upperLogs)
#out<-data.frame(time,n.minus.mc,SEG,SEH,SEHs,
# ci,di,ni,me,mc,KM,bmax,w,KMs,ws,VarH,VarHs,
# lowerNorm,upperNorm,lowerLog,upperLog,
# lowerNorms,upperNorms,lowerLogs,upperLogs)
# out<-list(y=x$time,d=d,n=n,S=S,gw=gw)
if (type=="log"){
surv<-KM
lower<-lowerLog
upper<-upperLog
} else if (type=="logs"){
surv<-KMs
lower<-lowerLogs
upper<-upperLogs
} else if (type=="norm"){
surv<-KM
lower<-lowerNorm
upper<-upperNorm
} else if (type=="norms"){
surv<-KMs
lower<-lowerNorms
upper<-upperNorms
}
#out<-data.frame(L=L,Lin=Lin,R=R,Rin=Rin,
# SEG=SEG,SEH=SEH,SEHs=SEHs,
# d=d,cens=cens,n=n,surv=surv,KM=KM,
# me=me,mc=mc,bmax=bmax,w=w,KMs=KMs,
# ws=ws,VarH=VarH,VarHs=Vars,gw=gw,
# lowerNorm=lowerNorm,upperNorm=upperNorm,
# lowerLog=lowerLog,upperLog=upperLog,
# lowerNorms=lowerNorms,upperNorms=upperNorms,
# lowerLogs=lowerLogs,
# upperLogs=upperLogs,lower=lower,
# upper=upper,row.names=Interval)
#
# getmarks.x gets censoring marks for plotting
out<-list(cens=getmarks.x(x),L=L,Lin=Lin,R=R,Rin=Rin,
surv=surv,lower=lower,upper=upper,conf.level=1-alpha)
out
}
kmciBorkowf<-function(time,status,type="log",alpha=0.05){
x<-kmgw.calc(time,status,keepCens=TRUE)
out<-borkowf.calc(x,type,alpha)
class(out)<-"kmciLR"
out
}
kmci1TG<-function(time,status,tstar,alpha=.05){
x<-kmgw.calc(time,status,keepCens=FALSE)
I<- x$time <= tstar & (x$ni>x$di)
if (any(I)){
ni<-x$ni[I]
di<-x$di[I]
R<-function(lambda){
sum( (ni-di)*log(1+lambda/(ni-di)) -
ni*log(1+lambda/ni) )
}
Chisq<- qchisq(1-alpha,1)
rootfunc<-function(lambda){
nlam<-length(lambda)
out<-rep(NA,nlam)
for (i in 1:nlam){
out[i]<- -2*R(lambda[i]) - Chisq
}
out
}
lam1<-uniroot(rootfunc,c(0,10^6))$root
lam2<-uniroot(rootfunc,c(-min(ni-di),0))$root
Supper<- prod( (ni+lam1-di)/(ni+lam1) )
Slower<- prod( (ni+lam2-di)/(ni+lam2) )
KM<-prod( (ni-di)/(ni) )
} else {
### no time<tstar
KM<-1
Supper<-1
Slower<-1
}
out<-list(time=tstar,surv=KM,upper=Supper,lower=Slower)
class(out)<-"kmci"
out
}
kmciTG<-function(time,status,alpha=.05){
utime<-sort(unique(time[status==1]))
ntime<-length(utime)
surv<-upper<-lower<-rep(NA,ntime)
for (i in 1:ntime){
x<-kmci1TG(time,status,utime[i],alpha)
surv[i]<-x$surv
lower[i]<-x$lower
upper[i]<-x$upper
}
## if largest observation is censored,
## then need to add on extra value at the end
if (max(time)>max(utime)){
tmax<-max(time)
k<-length(surv)
out<-list(cens=getmarks(time,status),
time=c(utime,tmax),surv=c(surv,surv[k]),
upper=c(upper,upper[k]),
lower=c(lower,lower[k]),conf.level=1-alpha)
} else {
out<-list(cens=getmarks(time,status),
time=utime,surv=surv,upper=upper,
lower=lower,conf.level=1-alpha)
}
class(out)<-"kmci"
out
}
kmConstrain<-function(tstar,pstar,x,alpha=.05){
## KM given that S(t)=pstar
## first find index such that
##
I<- x$time <= tstar
if (any(I)){
nj<-x$ni[I]
dj<-x$di[I]
rootfunc<-function(lambda){
nl<-length(lambda)
out<-rep(NA,nl)
for (i in 1:nl){
out[i]<- prod( (nj + lambda[i] - dj)/
(nj+lambda[i]) ) - pstar
}
out
}
lambda<-uniroot(rootfunc,c(-min(nj-dj),
10^3 * nj[1]))$root
## now calculate constrained variance
pbar<-(nj + lambda -dj)/(nj+lambda)
Sbar<- cumprod( pbar )
out<-list(time=x$time[I],Sc=Sbar)
} else {
out<-list(time=tstar,Sc=pstar)
}
out
}
rejectFromInt<-function(theta,interval,thetaParm=FALSE){
## thetaParm=TRUE means theta is the true value,
## and interval is a confidence interval
## thetaParm=FALSE means theta is an estimate and
## interval is the null distribution
if (length(interval)!=2) stop("interval should be length 2")
int<-c(min(interval),max(interval))
reject<-rep(0,3)
names(reject)<-c("estGTnull","estLTnull","two.sided")
## if thetaParm=TRUE then theta is the parameter
## under the null and
## interval is a confidence interval
## so if theta<int[1], then
## the estimate is greater than the null hypothesized
## value of the parameter
if (thetaParm){
reject["estGTnull"]<- ifelse(theta<int[1],1,0)
reject["estLTnull"]<- ifelse(theta>int[2],1,0)
} else {
## if thetaParm=FALSE then theta is an estimate and
## interval are quantile from a null distribution
## so if theta<int[1], then
## the estimate is less than the null hypothesized
## value of the parameter
reject["estGTnull"]<- ifelse(theta>int[2],1,0)
reject["estLTnull"]<- ifelse(theta<int[1],1,0)
}
### two-sided rejection is sum of one sided rejections
reject[3]<-sum(reject)
reject
}
kmtestBoot<-function(time,status,tstar,pstar,M=1000,alpha=0.05){
x<-kmgw.calc(time,status,keepCens=FALSE)
### pick out KM at tstar
Sx<-function(x,tstar){
I<-x$time<=tstar
if (!any(I)) out<-1
else out<-x$KM[max((1:length(x$KM))[I])]
out
}
# get observed value
Sobs<-Sx(x,tstar)
n<-length(time)
SB<-rep(NA,M)
for (i in 1:M){
ii<-sample(n,replace=TRUE)
temp<-kmgw.calc(time[ii],status[ii],keepCens=FALSE)
SB[i]<-Sx(temp,tstar)
}
### use type=4 quantile so that equals value defined in
### Barber and Jennison S[M*0.025] = S[25] when M=1000
quantilesNullDistribution<-quantile(SB,c(alpha/2,
1-alpha/2),type=4)
reject<-rejectFromInt(pstar,quantilesNullDistribution,
thetaParm=TRUE)
reject
}
kmtestConstrainBoot<-function(time,status,tstar,pstar,M=1000,alpha=0.05){
x<-kmgw.calc(time,status,keepCens=FALSE)
### pick out KM at tstar
Sx<-function(x,tstar){
I<-x$time<=tstar
if (!any(I)) out<-1
else out<-x$KM[max((1:length(x$KM))[I])]
out
}
Sobs<-Sx(x,tstar)
xcon<-kmConstrain(tstar,pstar,x)
## calculate censoring distribution by reversing status
xcens<-kmgw.calc(time,1-status,keepCens=FALSE)
n<-length(time)
xSurv<-c(xcon$time,Inf)
## get density function, add extra element
## for all survival times after tstar
dSurv<- -diff(c(1,xcon$Sc))
dSurv<- c(dSurv,1-sum(dSurv))
## in case last element is death, so KM of
## censoring distribution does not
## go to zero, add extra element at max(time)+1
xCens<- c(xcens$time,max(time)+1)
dCens<- -diff(c(1,xcens$KM))
dCens<-c(dCens,1-sum(dCens))
SB<-rep(NA,M)
for (i in 1:M){
if (length(dCens)>1){
Ci<-sample(xCens,n,prob=dCens,replace=TRUE)
} else Ci<-rep(xCens,n)
Xi<-sample(xSurv,n,prob=dSurv,replace=TRUE)
Time<-Xi
Time[Xi>Ci]<-Ci[Xi>Ci]
StatusTF<- Xi==Time
Status<-rep(0,n)
Status[StatusTF]<-1
temp<-kmgw.calc(Time,Status)
### pick out KM at tstar
SB[i]<-Sx(temp,tstar)
}
### use type=4 quantile so that equals value defined
### in Barber and Jennison S[M*0.025] = S[25] when M=1000
quantilesNullDistribution<-quantile(SB,c(alpha/2,
1-alpha/2),type=4)
reject<-rejectFromInt(Sobs,quantilesNullDistribution,
thetaParm=FALSE)
reject
}
kmConstrainBeta.calc<-function(tstar,pstar,x,alpha=.05){
## KM given that S(t)=pstar
## first find index such that
##
if (length(tstar)>1) stop("tstar must be a scalar")
I<- x$time <= tstar
nj<-x$ni[I]
dj<-x$di[I]
if (all(I==FALSE)){
### no x$time before tstar
### then dj=0, and it does not matter
### what nj and lamba are since when dj=0
### pbar=1 for all lambda
### and qbar=0
### so vc=0 and just define Sobs=1 (KM before
### first death),
### lower=1 and upper=1
Sobs<-1
qlower<-1
qupper<-1
} else {
rootfunc<-function(lambda){
pstar - prod( (nj + lambda - dj)/(nj+lambda) )
}
lambda<-uniroot(rootfunc,c(-min(nj-dj),
10^3 * nj[1]))$root
## now calculate constrained variance
pbar<-(nj + lambda -dj)/(nj+lambda)
qbar<-1-pbar
Sbar<- cumprod( pbar )
Shat<-x$KM[I]
## use Sbar and Shat just before tj, so add 1
## to beginning and delete last
ns<-length(Sbar)
Sbar<-c(1,Sbar[-ns])
Shat<-c(1,Shat[-ns])
vc<- (pstar^2)*sum( (Shat*qbar)/(nj*Sbar*pbar) )
abc<-uvab(pstar,vc)
### beta confidence interval
qlower<-qqbeta(alpha/2,abc$a,abc$b)
qupper<-qqbeta(1-alpha/2,abc$a,abc$b)
### pick out KM at tstar
Sx<-function(x,tstar){
I<-x$time<=tstar
if (!any(I)) out<-1
else out<-x$KM[max((1:length(x$KM))[I])]
out
}
Sobs<-Sx(x,tstar)
}
quantilesNullDistribution<-c(qlower,qupper)
reject<-rejectFromInt(Sobs,quantilesNullDistribution,
thetaParm=FALSE)
reject
}
kmtestConstrainBeta<-function(time,status,tstar,pstar,alpha=.05){
x<-kmgw.calc(time,status,keepCens=FALSE)
out<-kmConstrainBeta.calc(tstar,pstar,x,alpha)
out
}
#x<-kmgw.calc(leuk$time,leuk$status,keepCens=FALSE)
#kmtestConstrainBeta(leuk$time,leuk$status,10,.5)
#kmtestConstrainBoot(leuk$time,leuk$status,10,.5)
kmciSW<-function(time,status,alpha=.05){
## This function gives confidence intervals following
## Strawderman and Wells (1997, JASA, 1356-1374)
## notation follows
## Strawderman, Parzen, and Wells (1997,
## Biometrics 1399-1415
##
## for this method we need the Nelson-Aalen estimator
x<-kmgw.calc(time,status,keepCens=FALSE)
## here we allow grouped survival data, and
## use the usual N-A estimator for it
Lambda<-cumsum(x$di/x$ni)
## eq 2: sig= sigmahat_A
sig<- sqrt(cumsum(1/x$ni^2))
## eq 10: kappa
kappa<- (1/sig^3) * cumsum(1/x$ni^3)
## eq 9: we need to use this twice, once for the
## lower interval, once for upper
## first lower interval (upper for Lambda, lower for S)
Za<- qnorm(alpha/2)
LambdaSWlower<-Lambda - sig*(Za +
(sig/4 - kappa/3)*Za^2 - (sig/4+kappa/6))
Slower<-exp(-LambdaSWlower)
## now upper (lower for Lambda, upper for S)
Za<- qnorm(1-alpha/2)
LambdaSWupper<-Lambda - sig*(Za +
(sig/4 - kappa/3)*Za^2 - (sig/4+kappa/6))
Supper<-exp(-LambdaSWupper)
## if largest observation is censored, then need to add
## on extra value at the end
## so that the KM plots all the way until the last
## censored observation
## lower and upper also stay the same out to the
## last censoring value
if (max(time)>max(x$time)){
tmax<-max(time)
k<-length(x$time)
x$time<-c(x$time,tmax)
x$KM<-c(x$KM,x$KM[k])
Slower<-c(Slower,Slower[k])
Supper<-c(Supper,Supper[k])
}
out<-list(cens=getmarks(time,status),time=x$time,
surv=x$KM,lower=Slower,upper=Supper,
conf.level=1-alpha,
ni=x$ni,di=x$di,Lambda=Lambda,SNA=exp(-Lambda),
LambdaLower=LambdaSWlower,LambdaUpper=LambdaSWupper)
class(out)<-"kmci"
out
}
kmtestBinomial<-function(time,status,cens,t0,S0,alpha=0.05){
## test H0: S(t0)=S0
## using exact binomial test with only individuals
## who had censoring times after t0
##
I<- cens>t0
## this is a slow way to do it, but it is easier to program
out<-bpcp(time[I],status[I],nmc=0,alpha=alpha,
Delta=0,stype="km")
class(out)<-"kmciLR"
sci<-StCI(out,t0)
out<-rejectFromInt(S0,sci[3:4],thetaParm=TRUE)
out
}
kmtestALL<-function(time,status,t0,S0,cens=NULL,M=1000,NMC=10^5,alpha=0.05){
maxDeath.or.Cens<-max(time)
### find reject for normal log transform method
rnormlog<-function(){
x<-kmcilog(kmgw.calc(time,status,keepCens=FALSE),alpha)
sci<-StCI(x,tstar=t0)
reject<-rejectFromInt(S0,c(sci$lower,sci$upper),
thetaParm=TRUE)
reject
}
### Strawderman-Wells method
rSW<-function(){
x<-kmciSW(time,status,alpha)
sci<-StCI(x,t0)
reject<-rejectFromInt(S0,c(sci$lower,sci$upper),
thetaParm=TRUE)
reject
}
### Borkowf method
rBlog<-function(){
x<-kmciBorkowf(time,status,type="log",alpha)
sci<-StCI(x,t0)
reject<-rejectFromInt(S0,c(sci$lower,sci$upper),
thetaParm=TRUE)
reject
}
### Borkowf method shrinkage
rBlogs<-function(){
x<-kmciBorkowf(time,status,type="logs",alpha)
sci<-StCI(x,t0)
reject<-rejectFromInt(S0,c(sci$lower,sci$upper),
thetaParm=TRUE)
reject
}
### constrained bootstrap method
rcboot<-function(){
kmtestConstrainBoot(time,status,t0,S0,M,alpha=alpha)
}
### constrained beta method
rcbeta<-function(){
kmtestConstrainBeta(time,status,t0,S0,alpha)
}
### binomial method
rbinom<-function(){
kmtestBinomial(time,status,cens,t0,S0,alpha)
}
### likelihood ratio (Thomas and Grunkemeier) method
rTG<-function(){
sci<-kmci1TG(time,status,t0,alpha)
reject<-rejectFromInt(S0,c(sci$lower,sci$upper),
thetaParm=TRUE)
reject
}
### Repeated Beta method with method of moments
rRBmm<-function(){
x<-bpcp(time,status,nmc=0,alpha)
sci<-StCI(x,t0)
reject<-rejectFromInt(S0,c(sci$lower,sci$upper),
thetaParm=TRUE)
reject
}
### Repeated Beta method with Monte Carlo
rRBmc<-function(){
x<-bpcp(time,status,nmc=NMC,alpha=.05)
sci<-StCI(x,t0)
reject<-rejectFromInt(S0,c(sci$lower,sci$upper),
thetaParm=TRUE)
reject
}
## get names for rejection values from rnormlog()
normlog<-rnormlog()
Rejections<-matrix(NA,10,3,dimnames=list(
c("normlog","SW","Blog","Blogs","cbeta",
"TG","cboot","binom","RBmm","RBmc"),
names(normlog)))
Rejections["normlog",]<-normlog
Rejections["SW",]<-rSW()
Rejections["Blog",]<-rBlog()
Rejections["Blogs",]<-rBlogs()
Rejections["cbeta",]<-rcbeta()
Rejections["TG",]<-rTG()
Rejections["RBmm",]<-rRBmm()
if (!is.null(cens)) Rejections["binom",]<-rbinom()
if (M>0) Rejections["cboot",]<-rcboot()
if (NMC>0) Rejections["RBmc",]<-rRBmc()
Rejections
}
#kmtestALL(1:20,rep(1,20),10,.9)
getDefault.mark.time<-function(inmt,inx){
### if mark.time=NULL then: stype="mue" then do
### not use mark.time, if stype="km" use mark.time
if (is.null(inmt)){
if (is.null(inx$stype)){ inmt<-TRUE
} else inmt<- inx$stype=="km"
}
inmt
}
plot.kmciLR<-function(x,XLAB="time",YLAB="Survival",
YLIM=c(0,1),ciLTY=2,
ciCOL=gray(.8),mark.time=NULL,linetype="both",...){
mark.time<-getDefault.mark.time(mark.time,x)
### if xlab, ylab, or ylim is NOT given explicitly,
### then replace with default XLAB, YLAB or YLIM
### so we need to get the ... from the call
### if xlab, ylab or ylim are not in ... then add
### the default
### then when we do the plot, we use the call function
### so we can use anything
### else that was in the ...
md<-match.call(expand.dots=FALSE)
dots<-as.list(md)[["..."]]
class(dots)<-"list"
if (is.null(dots[["xlab"]])) dots$xlab<-XLAB
if (is.null(dots[["ylab"]])) dots$ylab<-YLAB
if (is.null(dots[["ylim"]])) dots$ylim<-YLIM
do.call("plot",c(list(
x=c(x$L,x$R[x$R<Inf]),
y=c(x$surv,x$surv[x$R<Inf]),
type="n"), dots) )
#plot(c(x$L,x$R[x$R<Inf]),c(x$surv,x$surv[x$R<Inf]),
# ylim=YLIM,type="n",xlab=XLAB,ylab=YLAB,...)
n<-length(x$L)
if (linetype=="both" | linetype=="surv"){
## horizontal KM
segments(x$L,x$surv,x$R,x$surv,...)
## vertical lines connecting KM estimates
segments(x$L[-1],x$surv[-1],x$R[-n],x$surv[-n],...)
### add crosses for censored objects
if (mark.time){
xcens<-x$cens
if (is.null(xcens)){
## get censoring times when x$cens does
## not exist
if (!is.null(x$n.censor) & !is.null(x$time)){
xcens<- x$time[x$n.censor>0]
} else {
warning("censoring times not plotted")
xcens<-numeric(0)
}
}
## if no censoring times, then xcens=numeric(0)
if (length(xcens)>0){
out<-StCI(x,xcens)
points(out$time,out$survival,pch=3)
}
}
}
if (linetype=="both" | linetype=="ci"){
segments(x$L,x$lower,x$R,
x$lower,lty=ciLTY,col=ciCOL,...)
segments(x$L[-1],x$lower[-1],x$R[-n],
x$lower[-n],lty=ciLTY,col=ciCOL,...)
segments(x$L,x$upper,x$R,
x$upper,lty=ciLTY,col=ciCOL,...)
segments(x$L[-1],x$upper[-1],x$R[-n],
x$upper[-n],lty=ciLTY,col=ciCOL,...)
}
}
plot.kmciLRtidy <- function(x, ...) {
tidyout <- tidykmciLR(x)
if (ncol(tidyout) == 5) {
ggplot(tidyout, aes_string(x = "time", y = "surv", ymin = "lower", ymax = "upper", linetype = "group")) +
geom_line() + geom_ribbon(alpha = .2) + labs(linetype = x[[1]]$groupVarName) + xlab("Time") + ylab("Survival")
}
else {
ggplot(tidyout, aes_string(x = "time", y = "surv", ymin = "lower", ymax = "upper")) +
geom_line() + geom_ribbon(alpha = .2) + xlab("Time") + ylab("Survival")
}
}
plot.kmciLRgroup <- function(x,XLAB="Time",YLAB="Survival",
YLIM=c(0,1),ciLTY=2,
ciCOL=gray(.8), linetype="both",...) {
tidyout <- tidykmciLR(x)
md<-match.call(expand.dots=FALSE)
dots<-as.list(md)[["..."]]
class(dots)<-"list"
if (is.null(dots[["xlab"]])) dots$xlab<-XLAB
if (is.null(dots[["ylab"]])) dots$ylab<-YLAB
if (is.null(dots[["ylim"]])) dots$ylim<-YLIM
do.call("plot",c(list(
x=c(tidyout$time,tidyout$time[tidyout$time<Inf]),
y=c(tidyout$surv,tidyout$surv[tidyout$time<Inf]),
type="n"), dots) )
#plot(c(x$L,x$R[x$R<Inf]),c(x$surv,x$surv[x$R<Inf]),
# ylim=YLIM,type="n",xlab=XLAB,ylab=YLAB,...)
n<-length(tidyout$time)
#If a group variable is provided, change the line width based on the level of the treatment variable
#and provide a legend
#If no group variable is provided, don't change the line width
if (linetype=="both" | linetype=="surv"){
segments(tidyout$time,tidyout$surv,tidyout$time,tidyout$surv, lwd=as.numeric(factor(tidyout$group)), ...)
segments(tidyout$time[-1],tidyout$surv[-1],tidyout$time[-n],tidyout$surv[-n], lwd=as.numeric(factor(tidyout$group)), ...)
legend("topright", legend = unique(tidyout$group), lwd = unique(as.numeric(factor(tidyout$group))))
}
if (linetype=="both" | linetype=="ci"){
segments(tidyout$time,tidyout$lower,tidyout$time,
tidyout$lower,lty=ciLTY,col=ciCOL,...)
segments(tidyout$time[-1],tidyout$lower[-1],tidyout$time[-n],
tidyout$lower[-n],lty=ciLTY,col=ciCOL,...)
segments(tidyout$time,tidyout$upper,tidyout$time,
tidyout$upper,lty=ciLTY,col=ciCOL,...)
segments(tidyout$time[-1],tidyout$upper[-1],tidyout$time[-n],
tidyout$upper[-n],lty=ciLTY,col=ciCOL,...)
}
}
citoLR<-function(x){
## add L and R to kmci object
x$L<-c(0,x$time)
x$R<-c(x$time,Inf)
x$Lin<-rep(FALSE,length(x$time)+1)
x$Rin<-c(rep(TRUE,length(x$time)),FALSE)
x$surv<-c(1,x$surv)
x$lower<-c(1,x$lower)
x$upper<-c(1,x$upper)
x
}
plot.kmci<-function(x,...){
xLR<-citoLR(x)
### because plot.kmciLR calls StCI, need to make sure call StCI.kmciLR, not default
class(xLR)<-"kmciLR"
plot.kmciLR(xLR,...)
}
lines.kmci<-function(x,...){
xLR<-citoLR(x)
if (!is.null(xLR$cens)){
xLR$R[xLR$R==Inf]<-max(xLR$cens)
} else {
xLR$R[xLR$R==Inf]<-max(xLR$time)
}
lines.kmciLR(xLR,...)
}
lines.kmciLR<-function(x,lty=c(2,1),col=c(gray(.8),gray(0)),linetype="both",mark.time=NULL,...){
if (length(lty)==1) lty<-rep(lty,2)
mark.time<-getDefault.mark.time(mark.time,x)
n<-length(x$L)
if (linetype=="ci" | linetype=="both"){
segments(x$L,x$lower,x$R,x$lower,
lty=lty[1],col=col[1],...)
segments(x$L[-1],x$lower[-1],x$R[-n],x$lower[-n],
lty=lty[1],col=col[1],...)
segments(x$L,x$upper,x$R,x$upper,
lty=lty[1],col=col[1],...)
segments(x$L[-1],x$upper[-1],x$R[-n],x$upper[-n],
lty=lty[1],col=col[1],...)
}
if (length(lty)==1) lty<-rep(lty,2)
if (length(col)==1) col<-rep(col,2)
if (linetype=="surv" | linetype=="both"){
segments(x$L,x$surv,x$R,x$surv,
lty=lty[2],col=col[2],...)
segments(x$L[-1],x$surv[-1],x$R[-n],x$surv[-n],
lty=lty[2],col=col[2],...)
if (mark.time & length(x$cens)>0){
out<-StCI(x,x$cens)
points(out$time,out$survival,pch=3,col=col[2])
}
}
}
summary.kmci<-function(object,...){
## since summary method in stats uses object, use
## that, change to x because thats what I had originally
x<-object
out<-data.frame(x$time,x$surv,x$lower,x$upper)
dimnames(out)[[2]]<-c("time","survival",
paste("lower ",100*x$conf.level,"% CL",sep=""),
paste("upper ",100*x$conf.level,"% CL",sep=""))
#print(out,row.names=FALSE)
#invisible(out)
out
}
summary.kmciLR<-function(object,...){
x<-object
if (is.null(x$Lin)) x$Lin<-rep(FALSE,length(x$L))
if (is.null(x$Rin)) x$Rin<-rep(TRUE,length(x$R))
Interval<-intChar(x$L,x$R,x$Lin,x$Rin)
out<-data.frame(Interval,x$surv,x$lower,x$upper)
dimnames(out)[[2]]<-c("time interval","survival",
paste("lower ",100*x$conf.level,"% CL",sep=""),
paste("upper ",100*x$conf.level,"% CL",sep=""))
#print(out,row.names=FALSE)
#invisible(out)
out
}
#summary(norm)
summary.kmciLRgroup <- function(object, ...) {
for (i in 1:length(object)) {
assign(paste("ddat",i,sep="_"), cbind(summary(object[[i]]), rep(names(object)[i], length(object[[i]]$Interval))))
}
df <- mget(ls(pattern = "ddat"))
out <- as.data.frame(NULL)
for (i in 1:length(df)) {
out <- rbind(out, df[[i]])
}
names(out)[5] <- object[[1]]$groupVarName
return(out)
}
summary.kmciLRtidy <- function(object, ...) {
if (length(object) == 1) {
out <- summary(object[[1]])
}
else {
class(object) <- "kmciLRgroup"
out <- summary.kmciLRgroup(object, ...)
}
return(out)
}
StCI<-function(x,tstar,afterMax="continue",...){
UseMethod("StCI")
}
StCI.default<-function(x,tstar,afterMax="continue",...){
### get survival and confidence interval at t from a survfit or kmci object
#if (class(x)=="survfit"){
if (is(x,"survfit")){
x$conf.level<-x$conf.int
}
if (length(x$strata)>1){
stop("does not work for objects with more than one strata")
}
time<-x$time
k<-length(time)
index<-1:k
nt<-length(tstar)
### important to set I to 0, if any tstar< min(time) stays
### equal to 0, then we will fix them at the end
I<-rep(0,nt)
for (j in 1:nt){
J<- time<=tstar[j]
if (any(J)){
I[j]<-max(index[J])
}
}
### afterMax determines what to do after the maximum time
### afterMax="continue"
### - surv, lower, and upper continue at value
### at time[nt]
### afterMax="zero"
### - surv, lower go to zero, upper continues at value
### at time[nt]
### afterMax="zeroNoNA"
### - surv, lower go to zero, upper continues at value
### at time[nt] (unless it is NA, then take the
### last non-missing value
### afterMax="half"
### - surv goes to half value at time[nt]
### - lower goes to zero, upper continues at value
### at time[nt]
if (afterMax!="continue" && any(I==k)){
### default is to continue,
### no need to do anything if afterMax="continue"
if (afterMax=="zero"){
x$surv[k]<-0
x$lower[k]<-0
} else if (afterMax=="zeroNoNA"){
if (is.na(x$lower[k])) x$lower[k]<-0
if (is.na(x$upper[k])){
x$upper[k]<-x$upper[max(index[!is.na(x$upper)])]
}
} else if (afterMax=="half"){
x$surv[k]<- .5*x$surv[k]
x$lower[k]<-0
} else stop("afterMax must be 'continue',
'zero','zeroNoNA', or 'half' ")
}
### I==0 are when tstar[j]< min(time), set all to 1
S<-L<-U<-rep(1,nt)
## when I>0, i.e., tstar[j]>=min(time), plug in values
## note: I<-c(0,2,4), x<-1:4, then x[I] gives c(2,4),
## zeros ignored
S[I>0]<-x$surv[I]
L[I>0]<-x$lower[I]
U[I>0]<-x$upper[I]
out<-data.frame(time=tstar,survival=S,lower=L,upper=U)
## changing the name of the column in the data.frame
## after the conf.level, was a bad idea.
## It is cleaner to add conf.level as an attribute.
#dimnames(out)[[2]]<-c("time","survival",
# paste("lower ",100*x$conf.level,"% CL",sep=""),
# paste("upper ",100*x$conf.level,"% CL",sep=""))
attr(out,"conf.level")<- x$conf.level
#print(out,row.names=FALSE)
#invisible(out)
out
}
StCI.kmciLR<-function(x,tstar,...){
### get survival and confidence interval at t
### from kmciLR object
nt<-length(tstar)
I<-rep(NA,nt)
index<-1:length(x$surv)
## picki gives TRUE/FALSE vector, TRUE where tval fits
## into interval
picki<-function(tval){
(x$L<tval & x$R>tval) | (x$L==tval & x$Lin) |
(x$R==tval & x$Rin)
}
for (j in 1:nt){
I[j]<-index[picki(tstar[j])]
}
out<-data.frame(time=tstar,survival=x$surv[I],
lower=x$lower[I],upper=x$upper[I])
## changing the name of the column in the data.frame
## after the conf.level, was a bad idea.
## It is cleaner to add conf.level as an attribute.
#dimnames(out)[[2]]<-c("time","survival",
# paste("lower ",100*x$conf.level,"% CL",sep=""),
# paste("upper ",100*x$conf.level,"% CL",sep=""))
#print(out,row.names=FALSE)
attr(out,"conf.level")<- x$conf.level
#invisible(out)
out
}
quantile.kmciLR<-function(x,probs=c(.25,.5,.75),...){
lower<-upper<-surv<-rep(NA,length(probs))
k<-length(x$surv)
q1<-function(x,p){
lower.i<-min((1:k)[x$lower<=p])
upper.i<-max((1:k)[x$upper>=p])
if (any(x$surv==p)){
q<-min(c(x$L[x$surv==p],x$R[x$surv==p]))
} else if (any(x$surv<p)){
q<- x$L[min((1:k)[x$surv<p])]
} else {
q<-NA
}
lower<-x$L[lower.i]
upper<-x$R[upper.i]
c(q,lower,upper)
}
out<-matrix(NA,length(probs),4,dimnames=list(NULL,
c("S(q)","q","lower","upper")))
for (i in 1:length(probs)){
out[i,2:4]<-q1(x,probs[i])
}
out[,1]<-probs
out
}
quantile.kmci<-function(x,probs=c(.25,.5,.75),...){
k<-length(x$surv)
newx<-list(
surv=c(1,x$surv),
L=c(0,x$time),
Lin=c(FALSE,rep(TRUE,k)),
R=c(x$time,Inf),
Rin=c(rep(FALSE,k+1)),
lower=c(x$lower[1],x$lower),
upper=c(1,x$upper))
quantile.kmciLR(newx,probs)
}
quantile.kmciLRgroup <- function(x,probs=c(.25,.5,.75),...) {
out <- list()
for (i in 1:length(x)) {
out[[i]] <- quantile(x[[i]], probs)
names(out)[i] <- names(x)[i]
}
return(out)
}
quantile.kmciLRtidy <- function(x,probs=c(.25,.5,.75),...) {
if (length(x) == 1) {
out <- quantile(x[[1]], probs)
}
else {
class(x) <- "kmciLRgroup"
out <- quantile.kmciLRgroup(x, probs)
}
return(out)
}
median.kmciLR<-function(x,...){
quantile.kmciLR(x,probs=.5)
}
median.kmci<-function(x,...){
quantile.kmci(x,probs=.5)
}
median.kmciLRtidy <- function(x, ...) {
quantile.kmciLRtidy(x,probs=.5)
}
median.kmciLRgroup <- function(x, ...) {
quantile.kmciLRgroup(x,probs=.5)
}
abmm<-function(a1,b1,a2=NULL,b2=NULL){
## Jan 6, 2016: add NULL defaults, so can leave off a2 and b2
## use notation from Fan,1991
a<-c(a1,a2)
b<-c(b1,b2)
## March 25, 2016: problem if a=4 b=0, gives NaN
## June 14, 2016: problem if a=c(201,200) and b=c(0,0)
## output 2 dimensional vector. Should be list(a=200,b=0)
## also list(a=201,b=0) would give same answers
if (length(a)==1 | all(b==0) ){
out<-list(a=a[length(a)],b=b[length(a)])
} else if (any(a==0)) {
out<-list(a=0,b=b[1])
} else {
S<-prod( a/(a+b) )
T<-prod( (a*(a+1))/((a+b)*(a+b+1)) )
newa<- ((S-T)*S)/(T-S^2)
newb<- ((S-T)*(1-S))/(T-S^2)
out<-list(a=newa,b=newb)
}
out
}
bpcp.mm<-function(x, alpha=0.05){
# Jan 6, 2016: totally rewrote function according to Discrete notes. New convention!
h<- length(x$ni)
A<- x$ni-x$di+1
B<- x$di
# Calculate A+,A- and B+,B- for all unique time points on the input data set
# (in notes: g2,g4,...,g2h)
# where W^+(g_{2j}) ~ Beta(A+[j],B+[j])
# and W^-(g_{2j}) ~ Beta(A+[j],B+[j])
Aplus<-Bplus<-Aminus<-Bminus<-rep(NA,h)
for (i in 1:h){
ab<-abmm(A[1:i],B[1:i])
Aminus[i]<-ab$a
Bminus[i]<-ab$b
if (i<h){
ab<-abmm(A[1:i],B[1:i],x$ni[i+1],1)
Aplus[i]<-ab$a
Bplus[i]<-ab$b
} else {
# after last time, Wplus is a point mass at 0
Aplus[i]<-0
Bplus[i]<-1
}
}
# There are 2h+1 intervals
# [g0,g1), [g1,g2),...,[g_{2h}, g_{2h+1})
#
# In [g_j, g_{j+1}), for the upper limit we use
# W^-(g_j).
# So for the W^-(g), we need to evaluate at
# g0,g1,g2,g3,g4,....,g2h
# But W^-(g3) = W^-(g2),
# because di=0 and ci=0 for (g2,g3]
# and similarly for all odd values
aupper<- c(1,1,rep(Aminus[-h],each=2),Aminus[h])
bupper<- c(0,0,rep(Bminus[-h],each=2),Bminus[h])
upper<- qqbeta(1-alpha/2,aupper,bupper)
# In [g_j, g_{j+1}), for the lower limit we use
# W^+(g_{j+1}).
# So for the W^+(g), we need to evaluate at
# g1,g2,g3,g4,....,g_{2h+1}
# But W^+(g3) = W^+(g2),
# because di=0 and ci=0 for (g2,g3]
# and similarly for all odd values
alower<- c(x$ni[1],rep(Aplus,each=2))
blower<- c(1,rep(Bplus,each=2))
lower<- qqbeta(alpha/2,alower,blower)
list(upper=upper,lower=lower,alower=alower,
blower=blower,aupper=aupper,bupper=bupper)
}
bpcpMidp.mm<-function(x,alpha=0.05,midptol=.Machine$double.eps^0.25){
## first calculate the usual bpcp.mm
z<- bpcp.mm(x,alpha=alpha)
## extract a and b Beta parameters for lower and upper
a1<-z$alower
b1<-z$blower
a2<-z$aupper
b2<-z$bupper
m<-length(a1)
lowerRootFunc<-function(x,i){
qqbeta(alpha/2-x,a1[i],b1[i]) -
qqbeta(alpha/2+x,a2[i],b2[i])
}
upperRootFunc<-function(x,i){
qqbeta(1-alpha/2-x,a1[i],b1[i]) -
qqbeta(1-alpha/2+x,a2[i],b2[i])
}
lower<-upper<-rep(NA,m)
for (i in 1:m){
if (b2[i]==0){
# Recall: W=U*Bl + (1-U)*Bu, where
# U ~ Bernoulli(.5)
# Bl~ Random Variable for lower
# Bu~ Random variable for upper
# if b2[i]==0, then Bu is a point mass at 1
# Let q(x,W) be the xth quantile of a RV W
# so the quantile of W at alpha/2 is q(alpha/2,W)
# and
# q(alpha/2,W) = q(alpha,Bl) for all 0<alpha<1
lower[i]<-qqbeta(alpha,a1[i],b1[i])
upper[i]<-1
} else if (a1[i]==0){
# if a1[i]==0, then Bl is a point mass at 0
# and
# q(1-alpha/2,W) = q(1-alpha,Bu)
# for all 0<alpha<1
lower[i]<-0
upper[i]<-qqbeta(1-alpha,a2[i],b2[i])
} else if (i>1 & (a1[i]==a1[i-1] & b1[i]==b1[i-1] &
a2[i]==a2[i-1] & b2[i]==b2[i-1])){
## this if condition is just for
## saving computation time
lower[i]<-lower[i-1]
upper[i]<-upper[i-1]
} else {
lowerRoot<-uniroot(lowerRootFunc,interval=
c(-alpha/2,alpha/2),i=i,tol=midptol)$root
# see paper, we want
# Q(alpha1,a1,b1)=Q(alpha2,ab,b2),
# where alpha1+alpha2=alpha
# so we solve that using uniroot,
# then Pr[W<=Q]=alpha/2
lower[i]<-qqbeta(alpha/2-lowerRoot,a1[i],b1[i])
upperRoot<-uniroot(upperRootFunc,interval=
c(-alpha/2,alpha/2),i=i,tol=midptol)$root
upper[i]<-qqbeta(1-alpha/2-upperRoot,a1[i],b1[i])
}
}
out<-list(lower=lower,
upper=upper,
alower=a1,aupper=a2,blower=b1,bupper=b2)
out
}
#outmm<-bpcpMidp.mm(x)
#out<-bpcpMidp.mc(x,nmc=10^5)
#max( abs(outmm$lower-out$lower) )
#max( abs(outmm$upper-out$upper) )
bpcpControl<-function(midpMMTol=.Machine$double.eps^0.25,
seed=49911,tolerance=.Machine$double.eps^0.5){
# if you put seed=NA change it to seed=NULL
if (!is.null(seed) & is.na(seed)){ seed<-NULL }
if (tolerance<=0){
stop("tolerance must be positive.
It is the lowest positive value such that if
abs(x-y) is less than tolerance,
then numerics x and y are treated as equal") }
list(midpMMTol=midpMMTol,seed=seed,tolerance=tolerance)
}
# bpcp.mc is a function to calculate the bpcp CIs by Monte Carlo simulation
bpcp.mc<-function(x,nmc=100,alpha=.05, testtime=0, DELTA=0, midp=FALSE){
# Jan 6, 2016: totally rewrote function according to Discrete notes. New convention!
### for each time t_j there are 2 intervals
### representing
## [t_{j-1},t_j-Delta)
## [t_j-Delta, t_j)
##
## and at the end add [t_k, Inf)
##
## if Delta=0 then the second interval of the pair is
## not needed
## but we keep it in and delete in bpcp after the
## call to this .calc function
k<-length(x$time)
lower<-upper<-rep(1,2*k+1)
S<-rep(1,nmc)
q<-function(slo,shi,Midp=midp){
if (Midp){
S<-c(slo,shi)
quantile(S,probs=c(alpha/2,1-alpha/2))
} else {
c( quantile(slo, probs=alpha/2), quantile(shi,1-alpha/2) )
}
}
# function to pick out time points
# so t_(j-1) = t_{j-1}, for j=1,..k
t_<-function(j){
tt<-c(0,x$time)
tt[j+1]
}
Smc<-list(Slo=NA,Shi=NA)
for (j in 1:k){
## case 1: t_j is a death time (and perhaps censor
## time also)
if (x$di[j]>0){
Shi<-S
## Slo=look ahead one failure
Slo<-S*rbeta(nmc,x$ni[j],1)
lohi<-q(Slo,Shi)
## for (t_{j-1},t_j-Delta]
if (t_(j-1)< testtime &
testtime<=t_(j)-DELTA) Smc<-list(Slo=Slo,Shi=Shi)
lower[2*j-1]<-lohi[1]
upper[2*j-1]<-lohi[2]
Slo<-S*rbeta(nmc,x$ni[j] - x$di[j] + 1,x$di[j])
lohi<-q(Slo,Shi)
## for (t_j-Delta, t_j]
if (t_(j)-DELTA< testtime &
testtime<=t_(j)) Smc<-list(Slo=Slo,Shi=Shi)
lower[2*j]<-lohi[1]
upper[2*j]<-lohi[2]
S<-Slo
Shi<-Slo
lohi<-q(Slo,Shi)
} else {
Shi<-S
## Slo=look ahead one failure
Slo<-S*rbeta(nmc,x$ni[j],1)
lohi<-q(Slo,Shi)
## for (t_{j-1},t_j]
if (t_(j-1)< testtime &
testtime<=t_(j)) Smc<-list(Slo=Slo,Shi=Shi)
lower[(2*j-1):(2*j)]<-lohi[1]
upper[(2*j-1):(2*j)]<-lohi[2]
}
}
## for (t_k, Inf)
if (testtime>t_(k)) Smc<-list(Slo=S,Shi=rep(0,nmc))
lohi<- q(rep(0,nmc),S)
upper[2*k+1]<-lohi[2]
lower[2*k+1]<-lohi[1]
list(upper=upper,lower=lower, Smc=Smc)
}
bpcp<-function(time,status,nmc=0,alpha=.05,Delta=0,stype="km", midp=FALSE, monotonic=NULL, control=bpcpControl()){
# Jan 6, 2016: totally rewrote function according to Discrete notes. New convention!
if (is.null(monotonic)){
if (nmc==0){
monotonic<-TRUE
} else {
monotonic<-FALSE
}
}
midpMMTol<-control$midpMMTol
# so we get the same answer with the same data set,
# default to set seed...control$seed=NULL is for
# simulations on Monte Carlo
if (nmc>0 & !is.null(control$seed)) set.seed(control$seed)
##
## get Kaplan-Meier and Greenwood variances
x<-kmgw.calc(time,status,keepCens=TRUE)
k<-length(x$time)
# Check Delta:
if (Delta<0) stop("Delta must be greater than 0")
minTimeDiff<-min(diff(c(0,x$time)))
# instead of using minTimeDiff<Delta, use the following
# in the if condition, so that machine error does not
# create problems (see default for all.equal tolerance)
tolerance<-control$tolerance
if (minTimeDiff-Delta+
tolerance<0) stop(
"Either negative times or
Delta is not less than or equal to
the minimum difference in times")
## each time, t_j, represents 2 intervals
## [t_{j-1},t_j-Delta)
## [t_j-Delta, t_j)
## add on KM at t=0,
## for the two intervals: [0,t1-Delta), (t1-Delta,t1]
KM<-c(rep(1,2),rep(x$KM[-k],each=2),x$KM[k])
L<-c(0,rep(x$time,each=2)) - c(0,rep(c(Delta,0),k))
R<-c(rep(x$time,each=2),Inf) - c(rep(c(Delta,0),k),0)
Lin<- rep(TRUE,2*k+1)
Rin<- rep(FALSE,2*k+1)
if (midp){
if (nmc==0){
hilo<-bpcpMidp.mm(x,alpha=alpha,midptol=midpMMTol)
} else hilo<-bpcp.mc(x,nmc,alpha,midp=TRUE)
} else {
if (nmc==0){ hilo<-bpcp.mm(x,alpha=alpha)
} else hilo<-bpcp.mc(x,nmc,alpha)
}
## we do not need to keep all the intervals in all cases
## 1) if Delta==0 then do not need 2nd interval
## of each pair
if (Delta==0){
keep<-c(rep(c(TRUE,FALSE),k),TRUE)
} else {
## 2) if Delta>0, sometimes you have deaths or
## censoring in adjascent
## intervals. For example if Delta=1 and you have
## deaths in
## (0,1] then the death "time" is 1
## and the 2 intervals for t_j=1 are
## [t0,t1-Delta) = [0,0)
## [t1-Delta, t1) =[0,1)
## and the first interval is not needed. If the
## next death or censoring
## time was t2=3
## the next 2 intervals are:
## [t1,t2-Delta) = [1,3-1) =[1,2)
## [t2-Delta, t2) = [2,3)
## and we do not need to delete any.
##
## So basically if t_j-Delta=t_{j-1} then we
## delete the first interval
## of the pair.
# We cannot use the following code for keepfirst
# because there may be machine error problems
# such as 3.1-.1 != 3 being TRUE
# keepfirst<- diff(c(0,x$time))!=Delta
# So we call numerics within .Machine$double.eps^0.5
# as equal (see default for all.equal)
#
keepfirst<- abs(diff(c(0,x$time))-
rep(Delta,length(x$time)))>=
tolerance
keep<-rep(TRUE,2*k+1)
first<-c(rep(c(TRUE,FALSE),k),FALSE)
keep[first]<-keepfirst
}
if (stype=="km"){
SURV<-KM[keep]
} else {
if (stype!="mue"){
warning("assuming stype='mue' ")
esimate<-"mue"
}
## use recursive call... fix this later to
## make it faster
outtemp<-bpcp(time,status,nmc,
alpha=1,Delta,stype="km",midp=midp)
SURV<- .5*outtemp$lower + .5*outtemp$upper
}
## create list of beta parameters to go with the
## lower and upper CIs
betaParms<-NULL
if (nmc==0) betaParms<-list(
alower=hilo$alower[keep],
blower=hilo$blower[keep],
aupper=hilo$aupper[keep],
bupper=hilo$bupper[keep])
lower<-hilo$lower[keep]
upper<-hilo$upper[keep]
if (monotonic){
lower<-cummin(lower)
upper<-cummin(upper)
}
out<-list(cens=getmarks(time,status),surv=SURV,
lower=lower,
upper=upper,
L=L[keep],Lin=Lin[keep],R=R[keep],Rin=Rin[keep],
Interval=intChar(L,R,Lin,Rin)[keep],stype=stype,
betaParms=betaParms, conf.level=1-alpha)
class(out)<-"kmciLR"
out
}
#Function to create "tidy" output from bpcp function
#Transform kmciLR object into dataframe
#Takes in a list of kmciLR objects
tidykmciLR <- function(x) {
#Create empty dataframe to store output
tidyout <- data.frame(NULL)
#For each group, "tidy" the output into a new dataframe
if (attr(x, "class") %in% c("kmciLRtidy", "kmciLRgroup")) {
num <- length(x)
for (i in 1:length(x)) {
new <- with(x[[i]], data.frame(time = sort(c(L, R)), surv = rep(surv, each = 2),
lower = rep(lower, each = 2), upper = rep(upper, each = 2)))
#Add group variable if it exists
if (num != 1) {
new$group <- names(x[i])
}
#Add to tidy dataframe
tidyout <- rbind(tidyout, new)
}
}
else if (attr(x, "class") == "kmciLR") {
tidyout <- with(x, data.frame(time = sort(c(L, R)), surv = rep(surv, each = 2),
lower = rep(lower, each = 2), upper = rep(upper, each = 2)))
}
return(tidyout)
}
bpcpfit <- function(time, ...) {
UseMethod("bpcpfit")
}
#Takes same inputs as bpcp function
bpcpfit.formula <- function(formula, data, subset, na.action, ...) {
#Borrowed from survfit
Call <- match.call()
Call[[1]] <- as.name('bpcpfit')
indx <- match(c('formula', 'data'), names(Call), nomatch=0)
#Make sure formula is given
if (indx[1]==0) {
stop("a formula argument is required")
}
if (missing(data))
data <- environment(formula)
#Change to model.frame format using data
temp <- model.frame(formula, data=data)
#Evaluate
mf <- eval.parent(temp)
#Get terms of formula
Terms <- terms(formula)
#Make sure there are no interaction terms present in the formula
ord <- attr(Terms, 'order')
if (length(ord) & any(ord !=1)) {
stop("Interaction terms are not valid for this function")
}
n <- nrow(mf)
#Get out Y variable (time and censoring)
Y <- model.extract(mf, 'response')
#Ensure a Surv object is provided to the formula
if (!is.Surv(Y)) {
stop("Response must be a survival (Surv) object")
}
#Get labels from formula (group variable)
ll <- attr(Terms, 'term.labels')
if (length(ll) > 1) {
stop("Only a treatment/grouping variable can be specified in this function. No other covariates should be included.")
}
#Determine if group variable was provided
if (length(ll) == 0) {
output <- do.call("bpcpfit.default", list(Y[ ,1], Y[ ,2], ...))
}
else {
X <- strata(mf[ll], shortlabel = TRUE)
#Combine outcome and response variables
Z <- data.frame(cbind(Y, X))
#Split my levels of group (treatment) variable
newZ <- split(Z, Z$X)
#Iterate through each level of the treatment variable, and do the bpcp function of the corresponding data. Store the results in a list
for (i in 1:length(newZ)) {
if (length(unique(newZ[[i]]$status)) > 2) {
stop("Interval censoring is not supported by bpcp or bpcpfit.")
}
}
output <- do.call("bpcpfit.default", list(Z[ ,1], Z[ ,2], Z[ ,3], ...))
names(output) <- levels(factor(X))
for (i in 1:length(output)) {
output[[i]]$groupVarName <- ll
}
}
return(output)
}
bpcpfit.default <- function(time, status = NULL, group = NULL, formula=NULL, nmc=0, alpha=NULL, conf.level=0.95, Delta=0, stype="km", midp=FALSE,
monotonic=NULL, control=bpcpControl(), plotstyle = "ggplot", data=NULL, subset=NULL, na.action=NULL, ...) {
if (is.null(time)) {
stop("Time is required.")
}
if (!is.null(alpha)) {
print("Warning: alpha is out of date. Use conf.level instead. Setting conf.level to 1-alpha and ignoring conf.level.")
}
else {
alpha <- 1-conf.level
}
Call <- match.call()
if (is.null(group)) {
if (length(unique(status)) > 2) {
stop("Interval censoring is not supported by bpcp or bpcpfit.")
}
out <- bpcp(time, status, nmc=nmc, alpha=alpha, Delta=Delta, stype=stype, midp=midp,
monotonic=monotonic, control=control)
out$num <- length(time)
out$events <- length(which(status == max(status)))
if (plotstyle == "ggplot") {
results <- list()
results <- append(results, list(out))
class(results) <- "kmciLRtidy"
}
else {
results <- out
}
}
else {
results <- list()
Z <- as.data.frame(cbind(time, status, group))
names(Z) <- c("V1", "V2", "V3")
newZ <- split(Z, Z$V3)
results <- list()
#Iterate through each level of the treatment variable, and do the bpcp function of the corresponding data. Store the results in a list
for (i in 1:length(newZ)) {
if (length(unique(newZ[[i]]$V2)) > 2) {
stop("Interval censoring is not supported by bpcp or bpcpfit.")
}
results <- append(results, list(bpcp(newZ[[i]]$V1, newZ[[i]]$V2, nmc=nmc, alpha=alpha, Delta=Delta, stype=stype, midp=midp,
monotonic=monotonic, control=control)))
results[[i]]$num <- length(newZ[[i]]$V1)
results[[i]]$events <- length(which(newZ[[i]]$V2 == max(newZ[[i]]$V2)))
results[[i]]$groupVarName <- rev(strsplit(as.character(Call["group"]), "$", fixed = TRUE)[[1]])[1]
}
#Name each output from the bpcp function with the corresponding treatment
names(results) <- levels(factor(group))
if (plotstyle == "ggplot") {
class(results) <- "kmciLRtidy"
}
else if (plotstyle == "standard") {
class(results) <- "kmciLRgroup"
}
else {
stop('Plot style must be either "ggplot" or "standard"')
}
}
return(results)
}
print.kmciLRgroup <- function(x, ...) {
#Store results in a matrix
out<-matrix(NA,length(x),5,dimnames=list(NULL, c("n", "events", "median", paste0(x[[1]]$conf.level, "LCL"), paste0(x[[1]]$conf.level, "UCL"))))
#For each level of treatment variable, also get number of subjects and number of events and print those as well
for (i in 1:length(x)) {
out[i,3:5]<- median(x)[[i]][2:4]
out[i, 1] <- x[[i]]$num
out[i, 2] <- x[[i]]$events
}
#Row names are levels of treatment variable
rownames(out) <- names(x)
invisible(x)
print(out)
}
#Output mirrors output of survfit function
print.kmciLRtidy <- function(x, ...) {
if (length(x) == 1) {
print(x[[1]])
}
else{
class(x) <- "kmciLRgroup"
print.kmciLRgroup(x)
}
invisible(x)
}
print.kmciLR <- function(x, ...) {
#Store results in a matrix
out<-matrix(NA,1,5,dimnames=list(NULL, c("n", "events", "median", paste0(x$conf.level, "LCL"), paste0(x$conf.level, "UCL"))))
out[1,3:5]<- median(x)[2:4]
out[1, 1] <- x$num
out[1, 2] <- x$events
#Row names are levels of treatment variable
row.names(out) <- NULL
invisible(x)
print(out)
}
|
693ca1c43956c09c5e40e1bdcfdb786da86ab17c | 049b6e37472c3d460bb30911cd7d470d563c612d | /man/ti_phenopath.Rd | 1442eb3bc2d4bba8112dfe1516d2d557a451c125 | [] | no_license | ManuSetty/dynmethods | 9919f4b1dc30c8c75db325b4ddcd4e9ada5e488b | 337d13b7a6f8cac63efdeb0d06d80cd2710d173d | refs/heads/master | 2020-03-21T11:34:35.406210 | 2018-06-24T20:25:50 | 2018-06-24T20:25:50 | 138,512,485 | 1 | 0 | null | 2018-06-24T20:16:12 | 2018-06-24T20:16:11 | null | UTF-8 | R | false | true | 2,042 | rd | ti_phenopath.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ti_phenopath.R, R/ti_container.R
\name{ti_phenopath}
\alias{ti_phenopath}
\alias{ti_phenopath}
\title{Inferring trajectories with PhenoPath}
\usage{
ti_phenopath(docker = dynwrap::test_docker_installation(), thin = 40,
z_init = "1", model_mu = FALSE, scale_y = TRUE)
ti_phenopath(docker = dynwrap::test_docker_installation(), thin = 40,
z_init = "1", model_mu = FALSE, scale_y = TRUE)
}
\arguments{
\item{docker}{Whether to use the docker container or the R wrapper}
\item{thin}{The number of iterations to wait each time before
re-calculating the elbo}
\item{z_init}{The initialisation of the latent trajectory. Should be one of
\enumerate{
\item A positive integer describing which principal component of the data should
be used for initialisation (default 1), \emph{or}
\item A numeric vector of length number of samples to be used
directly for initialisation, \emph{or}
\item The text character \code{"random"}, for random initialisation
from a standard normal distribution.
}}
\item{model_mu}{Logical - should a gene-specific intercept term be modelled?}
\item{scale_y}{Logical - should the expression matrix be centre scaled?}
}
\value{
The trajectory model
}
\description{
Inferring trajectories with PhenoPath
Will generate a trajectory using \href{https://doi.org/10.1101/159913}{PhenoPath}. This method was wrapped inside a \href{https://github.com/dynverse/dynmethods/tree/master/containers/phenopath}{container}.
}
\details{
This methods was first wrapped inside R, see \link{ti_phenopath}
The original code of this method is available \href{https://github.com/kieranrcampbell/phenopath}{here}.
The method is described in: \href{https://doi.org/10.1101/159913}{Campbell, K., Yau, C., 2017. Uncovering genomic trajectories with heterogeneous genetic and environmental backgrounds across single-cells and populations.}
}
\seealso{
\code{\link[phenopath:phenopath]{phenopath::phenopath()}}, \code{\link[phenopath:clvm]{phenopath::clvm()}}
}
|
d82c3e9e1a78128113e96a91d4dac094f6846be0 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/VLF/examples/vlfFun.Rd.R | 6e0fcf074a643a23622a5bd7e69e075b580b1523 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 202 | r | vlfFun.Rd.R | library(VLF)
### Name: vlfFun
### Title: Nucleotide VLF Assessment Function
### Aliases: vlfFun
### ** Examples
## Not run:
##D data(birds)
##D bird_vlfAnalysis <- vlfFun(birds)
## End(Not run)
|
b4f79c31c101a1acaacd2cdc43a39f79e68f2605 | 55b430f8b24f521dc5860413c1bc607cda807049 | /webapp/run.R | d61a898a1ffefdd3d30ab606562370b14c3139d4 | [
"MIT"
] | permissive | alan-turing-institute/DetectorCheckerWebApp | b4480ca58d1d37585c4931926200d1751ade31c7 | 68b802907d6b07ae5154c0979c8adfce65b56348 | refs/heads/master | 2021-06-26T16:42:36.017045 | 2020-11-30T09:35:16 | 2020-11-30T09:35:16 | 147,701,683 | 2 | 0 | MIT | 2020-11-30T09:35:18 | 2018-09-06T16:24:47 | R | UTF-8 | R | false | false | 170 | r | run.R |
source("global.R")
source("ui.R")
source("server.R")
source("gui_utils.R")
shiny::runApp(
appDir = ".",
port = 1111,
host = "0.0.0.0",
launch.browser = FALSE
)
|
fd8514396ccaa43f98943bd35903b4ee13754752 | 6fe9e93d478301e087b8af377f4ae39f1da75724 | /plot4.R | 538ae2f5858e7a21e133eb1a9cb335fa58078ee8 | [] | no_license | cshelley/ExData_Plotting1 | c5c8537e65c317fc282e560f7327c9dda0a327af | 3c10a2b7e9efe209f0c55009f4f4073a2c86e7e3 | refs/heads/master | 2021-05-29T01:00:05.118960 | 2014-09-04T23:10:27 | 2014-09-04T23:10:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,892 | r | plot4.R | #### Read in only relevant subset of data set
file<-"household_power_consumption.txt"
power <- read.table(file, header = TRUE, sep = ";", stringsAsFactors=FALSE)
power$Date <- as.Date(power$Date, format="%d/%m/%Y") # Convert $Date into searchable dates
power$Time <- strptime(power$Time, format="%T") # Convert $Time into searchable times
data <- power[power$Date >= "2007-02-01" & power$Date <= "2007-02-02", ] # shortened data set for analysis
#### PLOT 4: 4-Panel Plot
png(file = "plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
## Top left: original plot2
plot.ts(data$Global_active_power, xlab = NA, ylab = "Global Active Power",
axes = FALSE)
axis(1, at = 0, "Thu")
axis(1, at = 1450, "Fri")
axis(1, at = 2900, "Sat")
axis(2, c(0,2,4,6), labels = TRUE)
box()
## Top right: datatime v. Voltage
plot.ts(data$Voltage, xlab = "datetime", ylab = "Voltage", axes = FALSE)
axis(1, at = 0, "Thu")
axis(1, at = 1450, "Fri")
axis(1, at = 2900, "Sat")
axis(2, c(234,238,242,246), labels = TRUE)
axis(2, c(236,240,244), labels = FALSE)
box()
## Bottom left: original plot3
plot.ts(data$Sub_metering_1, col = "darkgrey", xlab = NA, ylab = "Energy sub metering", axes = FALSE)
lines(data$Sub_metering_2, col = "red")
lines(data$Sub_metering_3, col = "blue")
legend("topright", lty=c(1,1,1), bty = "n", lwd=c(2,2,2),col=c("darkgrey", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex=0.7)
axis(1, at = 0, "Thu")
axis(1, at = 1450, "Fri")
axis(1, at = 2900, "Sat")
axis(2, c(0,10,20,30), labels = TRUE)
box()
## Bottom right: Global Reactive Power
plot.ts(data$Global_reactive_power, xlab = "datetime", ylab = "Global_reactive_power", axes = FALSE)
axis(1, at = 0, "Thu")
axis(1, at = 1450, "Fri")
axis(1, at = 2900, "Sat")
axis(2, c(0.0, 0.1, 0.2, 0.3, 0.4, 0.5), labels = TRUE)
box()
dev.off()
|
15c93596fee268b3c4fe94e71b29ed12f5324663 | b0b3634623fad77cde5f77b19d40db6eb048c7e8 | /man/post_Gauss.Rd | 493deb981893a119ae6bee8a92baf650e5965d23 | [] | no_license | sgolchi/sepsis | 4a7a3c2ce7606df23fe8d5e7840bf5e3571a0edb | 45259dd6ea2a33a49313f34e38bd5e52dbe54f7c | refs/heads/master | 2023-01-24T00:12:27.800592 | 2020-11-28T17:08:32 | 2020-11-28T17:08:32 | 291,101,741 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 718 | rd | post_Gauss.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim_funs.R
\name{post_Gauss}
\alias{post_Gauss}
\title{Closed form Gaussian Posterior}
\usage{
post_Gauss(yb, xb, prior, ysd)
}
\arguments{
\item{yb}{vector of new responses, length equals number of patients in the most recent batch}
\item{xb}{design matrix for the most recent batch}
\item{prior}{matrix of Gaussian parameters up to the most recent batch, number of rows is the
number of arms, first column contains the means and second column the sd's.}
\item{ysd}{observation variance.}
}
\value{
matrix of means and sd's for the Gaussian posterior
}
\description{
Returns updated Gaussian posterior parameters for the effect sizes
}
|
8fdecfa512d22389d5a0d4cec865cad81368d424 | 96ef61c09bbccb62d8dda37d7b680c0f2e4f0322 | /R/mixing_distribution_predictive.R | 7043aef506486c6e9468a81185c8e377799a17f6 | [] | no_license | cran/dirichletprocess | c8115fa9572799c5cb585054e8ddbaa68c2296ce | 862545127839b7404b8de7082c1ffaba5a21028c | refs/heads/master | 2023-03-15T13:22:38.733647 | 2023-03-10T10:50:02 | 2023-03-10T10:50:02 | 119,422,815 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 245 | r | mixing_distribution_predictive.R | #' Calculate how well the prior predicts the data.
#'
#' @param mdObj The distribution
#' @param x The data
#' @return The probability of the data being from the prior.
#' @export
Predictive <- function(mdObj, x) UseMethod("Predictive", mdObj)
|
0f11fe7b8562204b6571b83a81128e5164182d28 | 9b469a38a8a26dce43e8d9eb820140bf90241210 | /Exploratory Data Analysis/Project 2/plot2.R | a463e08e2a2a35dfb008dab44acfa8b5ac5eea0b | [] | no_license | KDThinh/R-Programming | e4485756230f8fdb03a1824f8f4eb70601e129a1 | 7eabd4a4fba70037747e89c2819abe7a3b2ef9e0 | refs/heads/master | 2021-01-01T18:23:09.238087 | 2015-06-21T11:37:54 | 2015-06-21T11:37:54 | 26,592,418 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 777 | r | plot2.R | #This script is for Q2 in Exploratory Data Course - 2nd project
#Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
#(fips == "24510") from 1999 to 2008? Use the base plotting system to make
#a plot answering this question.
#Create a subset dataframe contains only data for fips == "24510" (Baltimore)
Baltimore_NEI <- subset(NEI,NEI$fips == "24510")
#Use tapply to calculate the total PM2.5 for each year in the subset dataframe
Q2 <- with(Baltimore_NEI,tapply(Emissions,year,sum))
#Making plot
png(file = "plot2.png", height = 480, width = 480)
barplot(Q2,
xlab = "Year",
ylab = "Total emission PM2.5 (Tons)",
ylim = c(0,3500),
main = "Total emission vs Year in Baltimore",
lwd = 1,
col = "black",
)
dev.off()
|
5df4c00954ce2d7004ec78b6e9d2cc3c0c61853c | c49197d22ea4788802e64b837e639a0085fcc4c6 | /man/sann_generate.Rd | 038d42d5bc23e7ba079a2c73827dd714c1ea3a49 | [
"MIT"
] | permissive | stevehoang/pbayes | 3559bc112fda5875f1fb7f4ae8495b43845ff1fd | 62611a3c2e60243e792bdf2043bbfab0f5b902a3 | refs/heads/master | 2021-09-06T05:10:25.443028 | 2018-02-02T16:33:49 | 2018-02-02T16:33:49 | 104,502,287 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 299 | rd | sann_generate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sann_generate.R
\name{sann_generate}
\alias{sann_generate}
\title{Generate a new point for simulated annealing}
\usage{
sann_generate(...)
}
\arguments{
\item{...}{The arguments to the negative log likelihood function}
}
|
e91a24c2bbc74bf73571f5c503ce81e3a0dc737b | d09c16a6dc4452160ef777cc3346eb08b8eb4caa | /man/Diversity.Rd | 59f61c46c45bfb53912800b549c8608310f7bd57 | [] | no_license | JohnsonHsieh/SpadeR | 978f72b52e9c46e0029f82e129dc07c7b9dc4763 | 7501ce163ef187450b7e907352ea91d79f920ae2 | refs/heads/master | 2021-01-18T15:08:32.482927 | 2015-07-07T09:24:52 | 2015-07-07T09:24:52 | 38,677,027 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,030 | rd | Diversity.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/spader.R
\name{Diversity}
\alias{Diversity}
\title{Etimation of species diversity}
\usage{
Diversity(X, datatype = c("abundance", "incidence"))
}
\arguments{
\item{X}{a vector of species sample frequencies (for abundance data), or incidence-based sample frequencies (1st entry must be the number of sampling unit).}
\item{datatype}{a character of data type,"abundance" or "incidence".}
}
\value{
a list of species diversity estimator with order q and its confidence interval.
}
\description{
\code{Diversity} This part features various diversity indices including the Shannon??s index and its effective number of species (diversity of order 1, or Shannon diversity), the Simpson??s index and its effective number of species (diversity order 2, or Simpson diversity), species richness (diversity of order 0).
}
\examples{
\dontrun{
data(DivDemoAbu)
Diversity(DivDemoAbu,datatype="abundance")
}
}
\author{
Anne Chao, K. H. Ma and T. C. Hsieh
}
|
dd039c03f22b935d1d6f6529599d25247fc5a577 | d60a4a66919a8c54d29a4677574b418107b4131d | /man/symplot.Rd | de7cf40b33b82fb0ce0faa523a431593575cf322 | [] | no_license | cran/tsapp | 65203e21a255e832f0ad9471f9ee308793eb7983 | f2679a3d5ee0e3956a4ba013b7879324f77cf95f | refs/heads/master | 2021-11-12T21:18:18.835475 | 2021-10-30T10:30:02 | 2021-10-30T10:30:02 | 248,760,597 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 361 | rd | symplot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smaniplot.r
\name{symplot}
\alias{symplot}
\title{\code{symplot} produces a symmetry plot}
\usage{
symplot(y)
}
\arguments{
\item{y}{the series, a vector or a time series}
}
\description{
\code{symplot} produces a symmetry plot
}
\examples{
\donttest{
data(LYNX)
symplot(LYNX) }
}
|
9ce9338f60fba7733c1a9742f58ac9d97cd2e7f6 | c9e0c41b6e838d5d91c81cd1800e513ec53cd5ab | /man/gtkTableSetColSpacing.Rd | b96ca38ab0543179872e475b73645ce1c5137113 | [] | no_license | cran/RGtk2.10 | 3eb71086e637163c34e372c7c742922b079209e3 | 75aacd92d4b2db7d0942a3a6bc62105163b35c5e | refs/heads/master | 2021-01-22T23:26:26.975959 | 2007-05-05T00:00:00 | 2007-05-05T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 558 | rd | gtkTableSetColSpacing.Rd | \alias{gtkTableSetColSpacing}
\name{gtkTableSetColSpacing}
\title{gtkTableSetColSpacing}
\description{Alters the amount of space between a given table column and the following
column.}
\usage{gtkTableSetColSpacing(object, column, spacing)}
\arguments{
\item{\code{object}}{[\code{\link{GtkTable}}] a \code{\link{GtkTable}}.}
\item{\code{column}}{[numeric] the column whose spacing should be changed.}
\item{\code{spacing}}{[numeric] number of pixels that the spacing should take up.}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
126b253a123de0e41c5a643f758cfa22e04a4422 | db603c24a01b17d275a117b2aa6fc0bc10a6615d | /covid_library_mine.R | fa8e120660a27a87ba2652d23878e413393948a4 | [] | no_license | briesser1/covid_reports | e56bda8d9f8e8303b2cc6a97029379fed9066841 | d995f986ab24e3ad1a201d0701e2dc7283657dd3 | refs/heads/master | 2023-02-15T12:31:37.053098 | 2021-01-01T13:24:16 | 2021-01-01T13:24:16 | 276,073,684 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 725 | r | covid_library_mine.R | library(COVID19)
library(tidyverse)
library(skimr)
cd_city_us <- covid19("US", level = 3)
states_df <- covid19("US", level = 2)
states_df <- states_df %>% ungroup()
skim(states_df)
names(cd_city_us)
df1 <- cd_city_us %>%
filter(administrative_area_level_2 == "South Carolina") %>%
filter(administrative_area_level_3 == "Greenville")
skim(df1)
View(names(df1))
states_df %>%
ggplot(aes(x = date, y = confirmed, colour = administrative_area_level_2)) +
geom_line() +
theme(legend.position = "none")
states_df %>%
filter(date == max(date)) %>%
select(administrative_area_level_2, confirmed) %>%
top_n(5) %>%
ggplot(aes(x = administrative_area_level_2, y = confirmed)) +
geom_col()
|
14f94d67ab5608b940ea664126dd9853aed6dc21 | 37580f0f76b251d1ba217fb30137f7cd7e024d07 | /person_clustering.R | 1c05682e64b4a29bbcd7505901291bf1d9f31a06 | [] | no_license | poletaev/russir-2015-hackathon-on-point-of-interest-recommendation | 6757b000f83961dfdf8280f1c0dc70ffa48188ec | 4c267718c8d21ddc67aca49f82a5c0272b6f2b2b | refs/heads/master | 2021-01-10T20:03:35.332402 | 2015-09-09T03:00:28 | 2015-09-09T03:00:28 | 42,153,788 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,700 | r | person_clustering.R | library(cluster)
genders <- tolower(data$body$person$gender)
genders[genders == "f"] = "female"
person <- data.frame(data$body$person$id)
person$location.name <- tolower(data$body$location[,5])
person$age <- as.integer(data$body$person$age)
person$normalized.age <- apply(person[3], 2,
function(x, na.rm){(x - mean(x, na.rm=na.rm))/mad(x, na.rm=na.rm)}, na.rm=TRUE)[,1]
person$gender <- as.factor(genders)
person$group <- as.factor(tolower(data$body$group))
person$season <- as.factor(tolower(data$body$season))
person$duration <- as.factor(tolower(data$body$duration))
person$trip.type <- as.factor(tolower(data$body$trip_type))
full.profiles <- person[complete.cases(person),]
num.profiles <- dim(person)[1]
num.full.profiles <- dim(full.profiles)[1]
## distances <- daisy(full.profiles[4:9])
distances <- daisy(full.profiles[names(person) %in%
c("genders", "group", "season", "duration", "trip.type")])
# Hierarchical clustering
clusterPersons <- hclust(distances, method = "ward.D")
# Plot the dendrogram
# plot(clusterPersons, main="User clustering")
full.profiles$cluster <- cutree(clusterPersons, k = 5)
colnames(full.profiles) <- c("id", "location.name", "age", "normalized.age",
"gender", "group", "season", "duration", "trip.type",
"cluster")
target.users.clusters <- full.profiles[192:194,
colnames(full.profiles) %in%
c("id", "age", "gender", "group",
"season", "duration", "trip.type",
"cluster")]
|
b8d2613a6d9f0929f309a1d5e26360239f6e29c8 | c262aa9d1819623e627386fd43e61e0d988d405a | /pipeline/scripts/peakFiltering.R | 44fe9f39c0ee3c52f58653a571ea074d12904b39 | [
"MIT"
] | permissive | UMCUGenetics/DIMS | bf818ebefd272f2b4726b9db26b6326a5070911f | dd98c1e4fb3cf8fbe0a08761b6583e7930696e21 | refs/heads/master | 2023-08-08T03:11:34.213700 | 2023-03-28T09:23:11 | 2023-03-28T09:23:11 | 175,600,531 | 1 | 3 | MIT | 2023-08-25T15:27:21 | 2019-03-14T10:34:21 | R | UTF-8 | R | false | false | 1,609 | r | peakFiltering.R | #!/usr/bin/Rscript
.libPaths(new="/hpc/local/CentOS7/dbg_mz/R_libs/3.2.2")
run <- function(indir, outdir, scanmode, thresh, resol, version, scripts) {
# scripts="./scripts"
# outdir="./results"
# version=2.0
# indir="./data"
# scanmode="positive"
# filtering moved to grouping! Only in 2.0
startcol=7
tmp=NULL
rdata = list.files(paste(outdir, "peak_grouping", sep="/"), full.names=TRUE, pattern=paste(scanmode, "*", sep="_"))
for (i in 1:length(rdata)){
load(rdata[i])
tmp=rbind(tmp, outpgrlist)
rm(outpgrlist)
}
outpgrlist=tmp[order(as.numeric(tmp[,"mzmed.pgrp"])),]
# filtering moved to grouping! Only in 2.0
source(paste(scripts, "AddOnFunctions/sourceDir.R", sep="/"))
sourceDir(paste(scripts, "AddOnFunctions", sep="/"))
outlist.single = remove.dupl(outpgrlist) # 4738 => 4735
save(outlist.single, file=paste(outdir, paste("filtered_", scanmode, ".RData", sep=""), sep="/"))
dir.create(paste(outdir, "samplePeaks", sep="/"))
for (i in startcol:ncol(outlist.single)) {
samplePeaks=outlist.single[,i]
names(samplePeaks)=outlist.single[,"mzmed.pgrp"]
save(samplePeaks, file=paste(outdir,"/samplePeaks/", colnames(outlist.single)[i],"_", scanmode, ".RData", sep=""))
}
}
message("\nStart peakFiltering.R")
cat("==> reading arguments:\n", sep = "")
cmd_args = commandArgs(trailingOnly = TRUE)
for (arg in cmd_args) cat(" ", arg, "\n", sep="")
run(cmd_args[1], cmd_args[2], cmd_args[3], as.numeric(cmd_args[4]), as.numeric(cmd_args[5]), as.numeric(cmd_args[6]), cmd_args[7])
message("Ready peakFIltering.R")
|
ac1e30ef34a6440b15541d5807eee38acbdb5c25 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.security.identity/R/identitystore_operations.R | 9f3772c4f85fab9cc4287dd08115610033416815 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | false | 32,514 | r | identitystore_operations.R | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include identitystore_service.R
NULL
#' Creates a group within the specified identity store
#'
#' @description
#' Creates a group within the specified identity store.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_create_group/](https://www.paws-r-sdk.com/docs/identitystore_create_group/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store.
#' @param DisplayName A string containing the name of the group. This value is commonly
#' displayed when the group is referenced. "Administrator" and
#' "AWSAdministrators" are reserved names and can't be used for users or
#' groups.
#' @param Description A string containing the description of the group.
#'
#' @keywords internal
#'
#' @rdname identitystore_create_group
identitystore_create_group <- function(IdentityStoreId, DisplayName = NULL, Description = NULL) {
op <- new_operation(
name = "CreateGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .identitystore$create_group_input(IdentityStoreId = IdentityStoreId, DisplayName = DisplayName, Description = Description)
output <- .identitystore$create_group_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$create_group <- identitystore_create_group
#' Creates a relationship between a member and a group
#'
#' @description
#' Creates a relationship between a member and a group. The following identifiers must be specified: `GroupId`, `IdentityStoreId`, and `MemberId`.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_create_group_membership/](https://www.paws-r-sdk.com/docs/identitystore_create_group_membership/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store.
#' @param GroupId [required] The identifier for a group in the identity store.
#' @param MemberId [required] An object that contains the identifier of a group member. Setting the
#' `UserID` field to the specific identifier for a user indicates that the
#' user is a member of the group.
#'
#' @keywords internal
#'
#' @rdname identitystore_create_group_membership
identitystore_create_group_membership <- function(IdentityStoreId, GroupId, MemberId) {
op <- new_operation(
name = "CreateGroupMembership",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .identitystore$create_group_membership_input(IdentityStoreId = IdentityStoreId, GroupId = GroupId, MemberId = MemberId)
output <- .identitystore$create_group_membership_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$create_group_membership <- identitystore_create_group_membership
#' Creates a user within the specified identity store
#'
#' @description
#' Creates a user within the specified identity store.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_create_user/](https://www.paws-r-sdk.com/docs/identitystore_create_user/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store.
#' @param UserName A unique string used to identify the user. The length limit is 128
#' characters. This value can consist of letters, accented characters,
#' symbols, numbers, and punctuation. This value is specified at the time
#' the user is created and stored as an attribute of the user object in the
#' identity store. "Administrator" and "AWSAdministrators" are reserved
#' names and can't be used for users or groups.
#' @param Name An object containing the name of the user.
#' @param DisplayName A string containing the name of the user. This value is typically
#' formatted for display when the user is referenced. For example, "John
#' Doe."
#' @param NickName A string containing an alternate name for the user.
#' @param ProfileUrl A string containing a URL that might be associated with the user.
#' @param Emails A list of `Email` objects containing email addresses associated with the
#' user.
#' @param Addresses A list of `Address` objects containing addresses associated with the
#' user.
#' @param PhoneNumbers A list of `PhoneNumber` objects containing phone numbers associated with
#' the user.
#' @param UserType A string indicating the type of user. Possible values are left
#' unspecified. The value can vary based on your specific use case.
#' @param Title A string containing the title of the user. Possible values are left
#' unspecified. The value can vary based on your specific use case.
#' @param PreferredLanguage A string containing the preferred language of the user. For example,
#' "American English" or "en-us."
#' @param Locale A string containing the geographical region or location of the user.
#' @param Timezone A string containing the time zone of the user.
#'
#' @keywords internal
#'
#' @rdname identitystore_create_user
identitystore_create_user <- function(IdentityStoreId, UserName = NULL, Name = NULL, DisplayName = NULL, NickName = NULL, ProfileUrl = NULL, Emails = NULL, Addresses = NULL, PhoneNumbers = NULL, UserType = NULL, Title = NULL, PreferredLanguage = NULL, Locale = NULL, Timezone = NULL) {
op <- new_operation(
name = "CreateUser",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .identitystore$create_user_input(IdentityStoreId = IdentityStoreId, UserName = UserName, Name = Name, DisplayName = DisplayName, NickName = NickName, ProfileUrl = ProfileUrl, Emails = Emails, Addresses = Addresses, PhoneNumbers = PhoneNumbers, UserType = UserType, Title = Title, PreferredLanguage = PreferredLanguage, Locale = Locale, Timezone = Timezone)
output <- .identitystore$create_user_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$create_user <- identitystore_create_user
#' Delete a group within an identity store given GroupId
#'
#' @description
#' Delete a group within an identity store given `GroupId`.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_delete_group/](https://www.paws-r-sdk.com/docs/identitystore_delete_group/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store.
#' @param GroupId [required] The identifier for a group in the identity store.
#'
#' @keywords internal
#'
#' @rdname identitystore_delete_group
identitystore_delete_group <- function(IdentityStoreId, GroupId) {
op <- new_operation(
name = "DeleteGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .identitystore$delete_group_input(IdentityStoreId = IdentityStoreId, GroupId = GroupId)
output <- .identitystore$delete_group_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$delete_group <- identitystore_delete_group
#' Delete a membership within a group given MembershipId
#'
#' @description
#' Delete a membership within a group given `MembershipId`.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_delete_group_membership/](https://www.paws-r-sdk.com/docs/identitystore_delete_group_membership/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store.
#' @param MembershipId [required] The identifier for a `GroupMembership` in an identity store.
#'
#' @keywords internal
#'
#' @rdname identitystore_delete_group_membership
identitystore_delete_group_membership <- function(IdentityStoreId, MembershipId) {
op <- new_operation(
name = "DeleteGroupMembership",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .identitystore$delete_group_membership_input(IdentityStoreId = IdentityStoreId, MembershipId = MembershipId)
output <- .identitystore$delete_group_membership_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$delete_group_membership <- identitystore_delete_group_membership
#' Deletes a user within an identity store given UserId
#'
#' @description
#' Deletes a user within an identity store given `UserId`.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_delete_user/](https://www.paws-r-sdk.com/docs/identitystore_delete_user/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store.
#' @param UserId [required] The identifier for a user in the identity store.
#'
#' @keywords internal
#'
#' @rdname identitystore_delete_user
identitystore_delete_user <- function(IdentityStoreId, UserId) {
op <- new_operation(
name = "DeleteUser",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .identitystore$delete_user_input(IdentityStoreId = IdentityStoreId, UserId = UserId)
output <- .identitystore$delete_user_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$delete_user <- identitystore_delete_user
#' Retrieves the group metadata and attributes from GroupId in an identity
#' store
#'
#' @description
#' Retrieves the group metadata and attributes from `GroupId` in an identity store.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_describe_group/](https://www.paws-r-sdk.com/docs/identitystore_describe_group/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store, such as
#' `d-1234567890`. In this example, `d-` is a fixed prefix, and
#' `1234567890` is a randomly generated string that contains numbers and
#' lower case letters. This value is generated at the time that a new
#' identity store is created.
#' @param GroupId [required] The identifier for a group in the identity store.
#'
#' @keywords internal
#'
#' @rdname identitystore_describe_group
identitystore_describe_group <- function(IdentityStoreId, GroupId) {
op <- new_operation(
name = "DescribeGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .identitystore$describe_group_input(IdentityStoreId = IdentityStoreId, GroupId = GroupId)
output <- .identitystore$describe_group_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$describe_group <- identitystore_describe_group
#' Retrieves membership metadata and attributes from MembershipId in an
#' identity store
#'
#' @description
#' Retrieves membership metadata and attributes from `MembershipId` in an identity store.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_describe_group_membership/](https://www.paws-r-sdk.com/docs/identitystore_describe_group_membership/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store.
#' @param MembershipId [required] The identifier for a `GroupMembership` in an identity store.
#'
#' @keywords internal
#'
#' @rdname identitystore_describe_group_membership
identitystore_describe_group_membership <- function(IdentityStoreId, MembershipId) {
op <- new_operation(
name = "DescribeGroupMembership",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .identitystore$describe_group_membership_input(IdentityStoreId = IdentityStoreId, MembershipId = MembershipId)
output <- .identitystore$describe_group_membership_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$describe_group_membership <- identitystore_describe_group_membership
#' Retrieves the user metadata and attributes from the UserId in an
#' identity store
#'
#' @description
#' Retrieves the user metadata and attributes from the `UserId` in an identity store.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_describe_user/](https://www.paws-r-sdk.com/docs/identitystore_describe_user/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store, such as
#' `d-1234567890`. In this example, `d-` is a fixed prefix, and
#' `1234567890` is a randomly generated string that contains numbers and
#' lower case letters. This value is generated at the time that a new
#' identity store is created.
#' @param UserId [required] The identifier for a user in the identity store.
#'
#' @keywords internal
#'
#' @rdname identitystore_describe_user
identitystore_describe_user <- function(IdentityStoreId, UserId) {
op <- new_operation(
name = "DescribeUser",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .identitystore$describe_user_input(IdentityStoreId = IdentityStoreId, UserId = UserId)
output <- .identitystore$describe_user_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$describe_user <- identitystore_describe_user
#' Retrieves GroupId in an identity store
#'
#' @description
#' Retrieves `GroupId` in an identity store.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_get_group_id/](https://www.paws-r-sdk.com/docs/identitystore_get_group_id/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store.
#' @param AlternateIdentifier [required] A unique identifier for a user or group that is not the primary
#' identifier. This value can be an identifier from an external identity
#' provider (IdP) that is associated with the user, the group, or a unique
#' attribute. For the unique attribute, the only valid path is
#' `displayName`.
#'
#' @keywords internal
#'
#' @rdname identitystore_get_group_id
identitystore_get_group_id <- function(IdentityStoreId, AlternateIdentifier) {
op <- new_operation(
name = "GetGroupId",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .identitystore$get_group_id_input(IdentityStoreId = IdentityStoreId, AlternateIdentifier = AlternateIdentifier)
output <- .identitystore$get_group_id_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$get_group_id <- identitystore_get_group_id
#' Retrieves the MembershipId in an identity store
#'
#' @description
#' Retrieves the `MembershipId` in an identity store.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_get_group_membership_id/](https://www.paws-r-sdk.com/docs/identitystore_get_group_membership_id/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store.
#' @param GroupId [required] The identifier for a group in the identity store.
#' @param MemberId [required] An object that contains the identifier of a group member. Setting the
#' `UserID` field to the specific identifier for a user indicates that the
#' user is a member of the group.
#'
#' @keywords internal
#'
#' @rdname identitystore_get_group_membership_id
identitystore_get_group_membership_id <- function(IdentityStoreId, GroupId, MemberId) {
op <- new_operation(
name = "GetGroupMembershipId",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .identitystore$get_group_membership_id_input(IdentityStoreId = IdentityStoreId, GroupId = GroupId, MemberId = MemberId)
output <- .identitystore$get_group_membership_id_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$get_group_membership_id <- identitystore_get_group_membership_id
#' Retrieves the UserId in an identity store
#'
#' @description
#' Retrieves the `UserId` in an identity store.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_get_user_id/](https://www.paws-r-sdk.com/docs/identitystore_get_user_id/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store.
#' @param AlternateIdentifier [required] A unique identifier for a user or group that is not the primary
#' identifier. This value can be an identifier from an external identity
#' provider (IdP) that is associated with the user, the group, or a unique
#' attribute. For the unique attribute, the only valid paths are `userName`
#' and `emails.value`.
#'
#' @keywords internal
#'
#' @rdname identitystore_get_user_id
identitystore_get_user_id <- function(IdentityStoreId, AlternateIdentifier) {
op <- new_operation(
name = "GetUserId",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .identitystore$get_user_id_input(IdentityStoreId = IdentityStoreId, AlternateIdentifier = AlternateIdentifier)
output <- .identitystore$get_user_id_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$get_user_id <- identitystore_get_user_id
#' Checks the user's membership in all requested groups and returns if the
#' member exists in all queried groups
#'
#' @description
#' Checks the user's membership in all requested groups and returns if the member exists in all queried groups.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_is_member_in_groups/](https://www.paws-r-sdk.com/docs/identitystore_is_member_in_groups/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store.
#' @param MemberId [required] An object containing the identifier of a group member.
#' @param GroupIds [required] A list of identifiers for groups in the identity store.
#'
#' @keywords internal
#'
#' @rdname identitystore_is_member_in_groups
identitystore_is_member_in_groups <- function(IdentityStoreId, MemberId, GroupIds) {
op <- new_operation(
name = "IsMemberInGroups",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .identitystore$is_member_in_groups_input(IdentityStoreId = IdentityStoreId, MemberId = MemberId, GroupIds = GroupIds)
output <- .identitystore$is_member_in_groups_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$is_member_in_groups <- identitystore_is_member_in_groups
#' For the specified group in the specified identity store, returns the
#' list of all GroupMembership objects and returns results in paginated
#' form
#'
#' @description
#' For the specified group in the specified identity store, returns the list of all `GroupMembership` objects and returns results in paginated form.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_list_group_memberships/](https://www.paws-r-sdk.com/docs/identitystore_list_group_memberships/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store.
#' @param GroupId [required] The identifier for a group in the identity store.
#' @param MaxResults The maximum number of results to be returned per request. This parameter
#' is used in all `List` requests to specify how many results to return in
#' one page.
#' @param NextToken The pagination token used for the
#' [`list_users`][identitystore_list_users],
#' [`list_groups`][identitystore_list_groups] and
#' [`list_group_memberships`][identitystore_list_group_memberships] API
#' operations. This value is generated by the identity store service. It is
#' returned in the API response if the total results are more than the size
#' of one page. This token is also returned when it is used in the API
#' request to search for the next page.
#'
#' @keywords internal
#'
#' @rdname identitystore_list_group_memberships
identitystore_list_group_memberships <- function(IdentityStoreId, GroupId, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListGroupMemberships",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults", result_key = "GroupMemberships")
)
input <- .identitystore$list_group_memberships_input(IdentityStoreId = IdentityStoreId, GroupId = GroupId, MaxResults = MaxResults, NextToken = NextToken)
output <- .identitystore$list_group_memberships_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$list_group_memberships <- identitystore_list_group_memberships
#' For the specified member in the specified identity store, returns the
#' list of all GroupMembership objects and returns results in paginated
#' form
#'
#' @description
#' For the specified member in the specified identity store, returns the list of all `GroupMembership` objects and returns results in paginated form.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_list_group_memberships_for_member/](https://www.paws-r-sdk.com/docs/identitystore_list_group_memberships_for_member/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store.
#' @param MemberId [required] An object that contains the identifier of a group member. Setting the
#' `UserID` field to the specific identifier for a user indicates that the
#' user is a member of the group.
#' @param MaxResults The maximum number of results to be returned per request. This parameter
#' is used in the [`list_users`][identitystore_list_users] and
#' [`list_groups`][identitystore_list_groups] requests to specify how many
#' results to return in one page. The length limit is 50 characters.
#' @param NextToken The pagination token used for the
#' [`list_users`][identitystore_list_users],
#' [`list_groups`][identitystore_list_groups], and
#' [`list_group_memberships`][identitystore_list_group_memberships] API
#' operations. This value is generated by the identity store service. It is
#' returned in the API response if the total results are more than the size
#' of one page. This token is also returned when it is used in the API
#' request to search for the next page.
#'
#' @keywords internal
#'
#' @rdname identitystore_list_group_memberships_for_member
identitystore_list_group_memberships_for_member <- function(IdentityStoreId, MemberId, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListGroupMembershipsForMember",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults", result_key = "GroupMemberships")
)
input <- .identitystore$list_group_memberships_for_member_input(IdentityStoreId = IdentityStoreId, MemberId = MemberId, MaxResults = MaxResults, NextToken = NextToken)
output <- .identitystore$list_group_memberships_for_member_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$list_group_memberships_for_member <- identitystore_list_group_memberships_for_member
#' Lists all groups in the identity store
#'
#' @description
#' Lists all groups in the identity store. Returns a paginated list of complete `Group` objects. Filtering for a `Group` by the `DisplayName` attribute is deprecated. Instead, use the [`get_group_id`][identitystore_get_group_id] API action.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_list_groups/](https://www.paws-r-sdk.com/docs/identitystore_list_groups/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store, such as
#' `d-1234567890`. In this example, `d-` is a fixed prefix, and
#' `1234567890` is a randomly generated string that contains numbers and
#' lower case letters. This value is generated at the time that a new
#' identity store is created.
#' @param MaxResults The maximum number of results to be returned per request. This parameter
#' is used in the [`list_users`][identitystore_list_users] and
#' [`list_groups`][identitystore_list_groups] requests to specify how many
#' results to return in one page. The length limit is 50 characters.
#' @param NextToken The pagination token used for the
#' [`list_users`][identitystore_list_users] and
#' [`list_groups`][identitystore_list_groups] API operations. This value is
#' generated by the identity store service. It is returned in the API
#' response if the total results are more than the size of one page. This
#' token is also returned when it is used in the API request to search for
#' the next page.
#' @param Filters A list of `Filter` objects, which is used in the
#' [`list_users`][identitystore_list_users] and
#' [`list_groups`][identitystore_list_groups] requests.
#'
#' @keywords internal
#'
#' @rdname identitystore_list_groups
identitystore_list_groups <- function(IdentityStoreId, MaxResults = NULL, NextToken = NULL, Filters = NULL) {
op <- new_operation(
name = "ListGroups",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults", result_key = "Groups")
)
input <- .identitystore$list_groups_input(IdentityStoreId = IdentityStoreId, MaxResults = MaxResults, NextToken = NextToken, Filters = Filters)
output <- .identitystore$list_groups_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$list_groups <- identitystore_list_groups
#' Lists all users in the identity store
#'
#' @description
#' Lists all users in the identity store. Returns a paginated list of complete `User` objects. Filtering for a `User` by the `UserName` attribute is deprecated. Instead, use the [`get_user_id`][identitystore_get_user_id] API action.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_list_users/](https://www.paws-r-sdk.com/docs/identitystore_list_users/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store, such as
#' `d-1234567890`. In this example, `d-` is a fixed prefix, and
#' `1234567890` is a randomly generated string that contains numbers and
#' lower case letters. This value is generated at the time that a new
#' identity store is created.
#' @param MaxResults The maximum number of results to be returned per request. This parameter
#' is used in the [`list_users`][identitystore_list_users] and
#' [`list_groups`][identitystore_list_groups] requests to specify how many
#' results to return in one page. The length limit is 50 characters.
#' @param NextToken The pagination token used for the
#' [`list_users`][identitystore_list_users] and
#' [`list_groups`][identitystore_list_groups] API operations. This value is
#' generated by the identity store service. It is returned in the API
#' response if the total results are more than the size of one page. This
#' token is also returned when it is used in the API request to search for
#' the next page.
#' @param Filters A list of `Filter` objects, which is used in the
#' [`list_users`][identitystore_list_users] and
#' [`list_groups`][identitystore_list_groups] requests.
#'
#' @keywords internal
#'
#' @rdname identitystore_list_users
identitystore_list_users <- function(IdentityStoreId, MaxResults = NULL, NextToken = NULL, Filters = NULL) {
op <- new_operation(
name = "ListUsers",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults", result_key = "Users")
)
input <- .identitystore$list_users_input(IdentityStoreId = IdentityStoreId, MaxResults = MaxResults, NextToken = NextToken, Filters = Filters)
output <- .identitystore$list_users_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$list_users <- identitystore_list_users
#' For the specified group in the specified identity store, updates the
#' group metadata and attributes
#'
#' @description
#' For the specified group in the specified identity store, updates the group metadata and attributes.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_update_group/](https://www.paws-r-sdk.com/docs/identitystore_update_group/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store.
#' @param GroupId [required] The identifier for a group in the identity store.
#' @param Operations [required] A list of `AttributeOperation` objects to apply to the requested group.
#' These operations might add, replace, or remove an attribute.
#'
#' @keywords internal
#'
#' @rdname identitystore_update_group
identitystore_update_group <- function(IdentityStoreId, GroupId, Operations) {
op <- new_operation(
name = "UpdateGroup",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .identitystore$update_group_input(IdentityStoreId = IdentityStoreId, GroupId = GroupId, Operations = Operations)
output <- .identitystore$update_group_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$update_group <- identitystore_update_group
#' For the specified user in the specified identity store, updates the user
#' metadata and attributes
#'
#' @description
#' For the specified user in the specified identity store, updates the user metadata and attributes.
#'
#' See [https://www.paws-r-sdk.com/docs/identitystore_update_user/](https://www.paws-r-sdk.com/docs/identitystore_update_user/) for full documentation.
#'
#' @param IdentityStoreId [required] The globally unique identifier for the identity store.
#' @param UserId [required] The identifier for a user in the identity store.
#' @param Operations [required] A list of `AttributeOperation` objects to apply to the requested user.
#' These operations might add, replace, or remove an attribute.
#'
#' @keywords internal
#'
#' @rdname identitystore_update_user
identitystore_update_user <- function(IdentityStoreId, UserId, Operations) {
op <- new_operation(
name = "UpdateUser",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .identitystore$update_user_input(IdentityStoreId = IdentityStoreId, UserId = UserId, Operations = Operations)
output <- .identitystore$update_user_output()
config <- get_config()
svc <- .identitystore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.identitystore$operations$update_user <- identitystore_update_user
|
fdf73d7fb95ce3fac05bb58be51d34bba1395b76 | 43c872cf62f47b010cb81761f8bb7e49b922b236 | /backend.R | 1c9ba0d2982a0bc0382668eadb0234883396d042 | [] | no_license | mmsee/PAT-seq-explorer | 4042d15b75dda169a24ec58b3ee972316661276d | 2b0e6356783f9498205f73dd88bca1959a2a0996 | refs/heads/master | 2021-01-17T07:36:31.218721 | 2015-12-22T03:54:12 | 2015-12-22T03:54:12 | 48,205,366 | 0 | 0 | null | 2015-12-18T00:23:27 | 2015-12-18T00:23:26 | null | UTF-8 | R | false | false | 17,696 | r | backend.R | # Returns a list of bam files from the nominated directory
setwd("/data/home/apattison/ShinyApps/andrew/")
find_bam_files <- function(file_path) {
if(file.exists(paste0(file_path,'/','plotter-config.json'))){
json <- fromJSON(paste0(file_path,'/', "plotter-config.json"))
bam_files <- json$samples
}
else{
bam_files <- list.files(paste (file_path), pattern = '*.bam$')
}
return(bam_files)
}
# Returns a list of gff files from the nominated directory
find_gff_files <- function(file_path) {
if(file.exists(paste0(file_path,'/','plotter-config.json'))){
json <- fromJSON(paste0(file_path,'/', "plotter-config.json"))
gff_files <- json$peaks
}
else{
gff_files <- list.files(paste(file_path), pattern = '*.gff$')
}
return(gff_files)
}
selected_data<- function (data){
return (paste(data))
}
# Outpus the rows matching the input gene or peak name
filter_gff_for_rows<- function (gff,names){
split_names <- strsplit(names, split = " ")
empty <- data.frame()
for (name in split_names[[1]]){
index1 <- with(gff, grepl
(ignore.case = T,paste('[=/]{1}',name,'[;/,]',sep=""), gff[,'Information']))
# Would be nice to find some better regex to get rid of this if statement.
# Maybe do this with a GFF parser
index2 <- with(gff, grepl
(ignore.case = T,paste('=',name,'$',sep=""), gff[,'Information']))
output <-gff[index1 | index2, ]
if (nrow (output) == 0){
stop('There are no reads this gene/peak in your selected samples')
}
output$input_gene_or_peak <- name
empty <- rbind(empty, output)
}
return(empty)
}
# This function gets the poly (A) counts for all given gff rows
get_a_counts <- function(bam_file_path,gff_rows, bam_files, groups, names_from_json){
reads_report <- data.frame()
for (gff_row in 1:nrow(gff_rows)){
counts_frame <- get_a_counts_gff_row(bam_file_path, gff_rows[gff_row,],
bam_files, groups, names_from_json)
if (nrow(counts_frame) == 0){
next
}
counts_frame$gene_or_peak_name <- gff_rows[gff_row, 'input_gene_or_peak']
reads_report <-rbind(reads_report,counts_frame)
}
return(reads_report)
}
# Parses the BAM files for eah GFF file entry that we are given
get_a_counts_gff_row <- function(bam_file_path,peak, bam_files, groups,names_from_json){
if (peak[,"Orientation"]== "-"){
ori <- TRUE
}
else{
ori <- FALSE
}
bam_frame <- data.frame()
count <- 1
for (bam_file in bam_files){
if (substring(bam_file,1,1)=="/"){
full_file_path <-bam_file
}
else{
full_file_path <-paste(bam_file_path,"/", bam_file, sep ="")
}
param <- ScanBamParam(what=c('qname','pos','qwidth','strand', 'seq'),
tag=c('AN','AD'),flag=scanBamFlag(isMinusStrand=ori) ,
which=GRanges(peak [,'Chromosome'],IRanges(
peak[,'Peak_Start'], peak[,'Peak_End'] )))
#Grabs reads overlapping the range specified by the gff row
result <- scanBam (full_file_path , param = param, isMinusStrand = ori)
# A check to make sure the adapter bases column is present.
#If not, I make a fake one of 0s.
if (length(result [[1]][[6]][[1]])!= length(result [[1]][[5]])){
result [[1]][[6]][[1]] <- rep(0, length(result [[1]][[5]]))
}
if (length(result [[1]][[6]][[2]])!= length(result [[1]][[5]])){
result [[1]][[6]][[2]] <- rep(0, length(result [[1]][[5]]))
}
result[[1]][["seq"]] <- as.character(result[[1]][["seq"]])
if (length(result [[1]][[5]]) == 0){
stop(paste('There are no reads for at least one peak in ', bam_file))
}
single_bam_frame <- data.frame(result)
colnames(single_bam_frame)<- c("qname", "strand", "pos",
"width", "sequence", "number_of_as", "number_of_ad_bases")
#If the read is on the forward strand, add width to pos to obtain 3' end.
if (ori == FALSE ){
single_bam_frame$pos <- single_bam_frame$pos+ single_bam_frame$width
}
single_bam_frame <- single_bam_frame[single_bam_frame$pos >=
peak[,'Peak_Start']&
single_bam_frame$pos <= peak[,'Peak_End'] ,]
if (nrow(single_bam_frame) == 0){
next
}
if (substring(bam_file,1,1)=="/"){
single_bam_frame$sample <- names_from_json$name [names_from_json$bam ==paste(bam_file)]
}
else{
single_bam_frame$sample <- paste(bam_file)
}
single_bam_frame$group<- paste("group", groups[count])
bam_frame <- rbind(bam_frame,single_bam_frame)
count <- count +1
}
return(bam_frame)
}
#Strips whitespace out of names
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
#
names_string <- function(s_frame, groups, all_reads){
# The data frame is split by samples here
to_print <- character()
for (frame in s_frame){
#The data frame is split into genes here
split_peaks <- split(frame ,frame$gene_or_peak_name, drop =T)
for (peak_frame in split_peaks){
if (all_reads == T){
tail_reads <- " "
}
else{
tail_reads <- " with a poly (A)-tail "
}
if (groups == T){
str <- paste("The number of reads ",tail_reads,"for ", peak_frame$group[1]," ",
peak_frame$gene_or_peak_name[1], " is: ",nrow(peak_frame),".", "\n", sep ="")
}
else{
str <- paste("The number of reads",tail_reads,"for ", peak_frame$sample[1]," ",
peak_frame$gene_or_peak_name[1], " is: ",nrow(peak_frame),".", "\n", sep ="")
}
to_print <- c(to_print, str)
}
}
return(to_print)
}
# Handles overlapping peaks in the gff file
modify_gff_inplace <- function (gff_file, name) {
saved_gff <- paste0("/data/home/apattison/ShinyApps/dev/PAT-seq-explorer/gff_dump/",
name, ".gff")
if (file.exists(saved_gff)){
gff <- read.delim(saved_gff, header=T,
comment.char="",stringsAsFactors=F)
return(gff)
}
start_gff_file <- read.delim(gff_file, header=FALSE,
comment.char="",stringsAsFactors=F)
colnames(start_gff_file)<- c('Chromosome', 'Generated_By', 'Feature_Type',
'Peak_Start','Peak_End','-',
'Orientation', '--','Information')
plus_frame <- start_gff_file[start_gff_file[,'Orientation'] == '+',]
plus_frame [,c('Peak_Start', 'Peak_End')] <- plus_frame [,c('Peak_Start', 'Peak_End')]+12
plus_reads <- plus_frame[
with(plus_frame,order(
Chromosome,Orientation,Peak_Start)
),
]
minus_frame <- start_gff_file[start_gff_file[,'Orientation'] == '-',]
minus_frame [,c('Peak_Start', 'Peak_End')] <- minus_frame [,c('Peak_Start', 'Peak_End')]-12
minus_reads<- minus_frame[
with(minus_frame,order(
Chromosome,Peak_Start)
),
]
for (row in 1:nrow(plus_reads)){
if (row == 1){
next
}
if (plus_reads[row, 'Chromosome'] != plus_reads[row-1,'Chromosome']){
next
}
if (plus_reads[row,'Peak_Start'] <= plus_reads[row-1,'Peak_End']){
plus_reads[row,'Peak_Start'] <-
plus_reads[row-1,'Peak_End']+1
}
}
for (row in 1:nrow(minus_reads)){
if (row==nrow(minus_reads)){
next
}
if (minus_reads[row, 'Chromosome'] != minus_reads[row+1,'Chromosome']){
next
}
if (minus_reads[row,'Peak_End'] >= minus_reads[row+1,'Peak_Start']){
minus_reads[row,'Peak_End'] <- minus_reads[row+1,'Peak_Start']-1
}
}
new_frame <- rbind(plus_reads, minus_reads)
#will new to become getwd
write.table(x= new_frame, file =saved_gff , append = F,
quote = F, sep = "\t", row.names = F, col.names = T)
return(new_frame)
}
# Makes the menas and medians frame shown in info tab of the app
make_means_and_meds_frame <- function (poly_a_counts){
if (poly_a_counts[1,"group"]== "group NULL"){
into_samples <- split(poly_a_counts,
list(poly_a_counts$sample, poly_a_counts$gene_or_peak_name))
mm_frame <- data.frame()
for (sample in into_samples){
sample_mean <- mean(sample$number_of_as, na.rm =T)
sample_median <- median(sample$number_of_as, na.rm =T)
name <- paste(sample[1, "sample"], sample[1, "gene_or_peak_name"])
to_bind <- cbind(name,sample_mean, sample_median)
mm_frame <- rbind(mm_frame,to_bind)
}
}
else{
into_samples <- split(poly_a_counts, poly_a_counts$group)
mm_frame <- data.frame()
for (sample in into_samples){
sample_mean <- mean(sample$number_of_as, na.rm =T)
sample_median <- median(sample$number_of_as, na.rm =T)
to_bind <- cbind(sample[1, "group"],sample_mean, sample_median)
mm_frame <- rbind(mm_frame,to_bind)
}
}
colnames (mm_frame) <- c("Sample Name", "Mean Poly (A)-Tail Length", "Median Poly (A)-Tail Length")
return(mm_frame)
}
poly_a_plot <- function (processed_frame, ranges,names, leg = F,group = F){
new_frame <- processed_frame
if (group == T){
samples <- split(new_frame, new_frame$group, drop =T)
}
else {
samples <- split(new_frame, new_frame$sample, drop =T)
}
dummy_ecdf <- ecdf(1:10)
curve((-1*dummy_ecdf(x)*100)+100, from=ranges[1], to=ranges[2],
col="white", xlim=ranges, main= paste(names),
axes=F, xlab= 'Poly (A) tail length', ylab = 'Percent population (%)', ylim =c(0,100))
axis(1, pos=0, tick = 25)
axis(2, pos= 0, at= c(0,25,50,75,100), tick = 25)
count <- 1
for (df in samples){
split_peak <- split(df,df$gene_or_peak_name, drop =T)
for(gene_or_peak in split_peak){
colours <- rainbow(length(samples)*length(split_peak))
ecdf_a <- ecdf(gene_or_peak[,"number_of_as"])
curve((-1*ecdf_a(x)*100)+100, from=ranges[1], to=ranges[2],
col=colours[count], xlim=ranges, main= paste(names),
add=T)
count <- count +1
}
}
# This loop makes a list for the legend.
leg_names <- list()
for (name in names(samples)){
leg_names <- c(leg_names, paste(name, names(split_peak)))
}
if (leg ==T){
x_offset <- length(strsplit(paste(leg_names), "")[[1]])
legend("topright",
legend = leg_names, fill = colours, bty ="n")
}
}
get_genomic_seq <- function(chr, start, end){
seq <- getSeq(Hsapiens,chr,start,end)
c_seq <- as.character(seq)
return(c_seq)
}
igv_plot <- function (processed_frame, ranges,names, leg,group = F,
order_alt = T, alt_cumu_dis,show_poly_a =F, poly_a_pileup=T, gffin){
start <- gffin[1, "Peak_Start"]
end <- gffin[1,"Peak_End"]+300
# start <- gffin[1, "Peak_Start"]
# end <- gffin[1,"Peak_End"]+400
chr <- gffin[1, "Chromosome"]
# in_chr <- as.numeric(as.roman (substring(chr, 4)))
# str_chr <- paste0("chr",in_chr)
# sequence <- get_genomic_seq(str_chr, start, end)
# sequence <- strsplit(sequence , "")
new_frame <- processed_frame
if (gffin[1, "Orientation"] =="-"){
new_frame <- new_frame[
with(new_frame,order(
group, sample, -pos+width, -number_of_as)
),
]
}
else{
new_frame <- new_frame[
with(new_frame,order(
group, sample, pos+width, -number_of_as)
),
]
}
if (group == T){
group_status <- "group"
samples <- split(new_frame, new_frame$group, drop =T)
}
else {
group_status <- "sample"
samples <- split(new_frame, new_frame$sample, drop =T)
}
count <- list()
for (sample in samples){
count <- c(count, 1:nrow(sample))
}
count <- as.numeric(unlist(count))
new_frame$count <- as.numeric(unlist(count))
if (gffin[1, "Orientation"] =="-"){
new_frame$pos <- new_frame$pos+new_frame$width
new_frame$bam_read_ends <- new_frame$pos-new_frame$width
new_frame$poly_a_extension <- new_frame$bam_read_ends -new_frame$number_of_as
}
else{
new_frame$bam_read_ends <- new_frame[,"pos"] - new_frame[,"width"]
new_frame$poly_a_extension <- new_frame$pos + new_frame$number_of_as
}
rt <- ggplot(data = new_frame, aes(x= pos, y = count))+
# scale_x_discrete(labels= sequence[[1]])+
facet_wrap(as.formula(paste("~", group_status)),ncol = 2)+
geom_segment(aes(x= pos,xend=bam_read_ends, y= count ,
yend= count, colour = "Alligned reads"))+
xlab(paste(names,"\n","chr", chr,"\n", start,"to", end))+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
strip.background = element_blank())+
ylab("Number of reads")+
theme(axis.text.x = element_text(colour="black"),
axis.text.y = element_text(colour="black"))+
scale_colour_manual(values = c("Alligned reads"="green", "Poly (A) tail"="blue" ))
if (show_poly_a==T){
if(gffin[1, "Orientation"] =="+"){
rt = rt+ geom_segment(aes(x= pos, xend=poly_a_extension, y= count ,
yend= count, colour = "Poly (A) tail"))
}
else{
rt = rt+ geom_segment(aes(x=bam_read_ends, xend=poly_a_extension, y= count ,
yend= count, colour = "Poly (A) tail"))
}
rm <- regmatches(gffin[,9], regexpr("Name=[^;\\s]+",gff[,9],perl=T)
names_list <- gsub(x=rm,pattern="(id=)",
replacement="",perl=T)
rt <- rt + geom_segment(data=gffin, aes_string(x="Peak_Start", xend="Peak_End", y=-1, yend=-1), colour="RED")
# rt <- rt + geom_text(data=gffin, aes)
}
return(rt)
}
pileup_plot <- function (processed_frame, ranges,names, leg,group = F,
order_alt = T, alt_cumu_dis,show_poly_a =F, poly_a_pileup=T ){
if(order_alt==T){
new_frame <- processed_frame[
with(processed_frame,order(
-width, -number_of_as)
),
]
ylab <- "Sorted Read Number"
}
else{
new_frame <- processed_frame
ylab <- "Read Number"
}
if (group == T){
samples <- split(new_frame, new_frame$group, drop =T)
}
else {
samples <- split(new_frame, new_frame$sample, drop =T)
}
par(bty="l", ps = 10, mar=c(5.1,4.1,4.1,8.1), xpd =T)
if (poly_a_pileup == T ){
if (length(samples) == 1){
par(mfrow= c(1,1))
}
else if ((length(samples)/2)%%1 == 0){
par(mfrow= c(as.integer(length(samples)/2),2))
}
else{
par(mfrow= c(as.integer(length(samples)/2)+1,2))
}
for (sample in samples) {
points <- data.frame(sample$width, sample$number_of_as)
ymax <- nrow(points)
count <- 1:ymax
plot(NA,xlim=ranges, ylim = c(0, ymax), xlab= "Number of Bases", ylab = ylab,
main= paste(sample[1,'sample']))
for (i in 1:ymax){
segments(x0= 0, y0= i,x1= points[i,1], col="purple")
segments(x0= points[i,1], y0= i,x1= points[i,1] +points[i,2] , col="pink")
}
}
return()
}
ymax <- 0
for (sample in samples){
title <- sample[1, 'gene_or_peak_name']
if (nrow (sample) > ymax){
ymax <- nrow(sample)
}
}
if (alt_cumu_dis ==T) {
dummy_ecdf <- ecdf(1:10)
curve((-1*dummy_ecdf(x)*100)+100, from=ranges[1], to=ranges[2],
col="white", xlim=ranges, main= paste(names),
axes=F, xlab= "Number of Bases", ylab = 'Percent Population (%)', ylim =c(0,100))
axis(1, pos=0, tick = 25)
axis(2, pos= 0, at= c(0,25,50,75,100), tick = 25)
count <- 1
for (df in samples){
split_peak <- split(df,df$gene_or_peak_name, drop =T)
for(gene_or_peak in split_peak){
colours <- rainbow(length(samples)*length(split_peak))
ecdf_a <- ecdf(gene_or_peak[,"width"])
curve((-1*ecdf_a(x)*100)+100, from=ranges[1], to=ranges[2],
col=colours[count], xlim=ranges, main= paste(names),
add=T)
count <- count +1
}
# This loop makes a list for the legend.
leg_names <- list()
for (name in names(samples)){
leg_names <- c(leg_names, paste(name, names(split_peak)))
}
if (leg ==T){
x_offset <- length(strsplit(paste(leg_names), "")[[1]])
legend(ranges[2]-30-(x_offset)*2,110 +(length(samples)*-0.8),
legend = leg_names, fill = colours, bty ="n")
}
}
}
}
gene_expression_plot <- function(processed_bame_frame){
if (processed_bame_frame[1,"group"] == "group NULL"){
samples <- split(processed_bame_frame,processed_bame_frame$sample, drop =T)
xlab <- "Sample"
}
else{
samples <- split(processed_bame_frame,processed_bame_frame$group, drop =T)
xlab <- "Group"
}
df <- data.frame("Sample"= character(), "Count"= numeric())
for (i in 1:length(samples)){
row <- data.frame(names(samples[i]),nrow(samples[i][[1]]))
df <- rbind(df,row)
}
colnames(df) <- c("Sample", "Count")
gplot <- ggplot(data= df, aes(x=factor(Sample), y = Count))+
geom_bar(stat = "identity",colour = "green",, fill ="blue", width =0.5 )+
xlab(xlab)+
ylab("Raw number of reads")
return(gplot)
}
#help_text (filters out reads that
#were not sequenced completely to the end of the poly (A)-tail
# gets reads that had a
# genomic aligment within the selected length
|
81e5b5080865c9323bb73f7a011afaf4add14a2d | 975e84bbd9861fa51a9ca9f5a321460435ef19c6 | /AllGraphics.R | 699737e59a336343ab545b8d92413725fa4bc469 | [] | no_license | srinithish/Location-Matching-Engine | bbd3b4c0f2edc699c3f21ada06254a1d5ff2863b | 0ee0b461821dd69ebce28b4143f497c80f6e0583 | refs/heads/master | 2020-04-15T19:26:07.371786 | 2019-01-18T02:09:29 | 2019-01-18T02:09:29 | 164,949,556 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,794 | r | AllGraphics.R |
theme_update(plot.title = element_text(hjust = 0.5))
##Bargraph for AIG Participation
## To do change the space between the columns
ParticipationPre = PolicyLevelTermsCompDF$`AIG Participation %_Pre`
ParticipationPost = PolicyLevelTermsCompDF$`AIG Participation %_Post`
AIGParticipationPlotExp = ggplot() +
geom_col(mapping = aes(x = factor(c("Pre","Post"),levels = c("Pre","Post")),
y = c(ParticipationPre,ParticipationPost),
fill = factor(c("Pre","Post"),levels = c("Pre","Post"))),
width = 0.25,position = position_dodge(width = 2)
)+
guides(fill = FALSE)+
geom_text(label = c(ParticipationPre,ParticipationPost) )+
labs( x = "",y = "Participation %" ,title = "AIG Participation")
AIGParticipationPlot_Out = renderPlot({AIGParticipationPlotExp})
#### Bar graph for Limits
## to do display value at the Bar top
LimitPre = PolicyLevelTermsCompDF$Limit_Pre
LimitPost = PolicyLevelTermsCompDF$Limit_Post
PolicyLimitPlotExp = ggplot() +
geom_col(mapping = aes(x = factor(c("Pre","Post"),levels = c("Pre","Post")),
y = c(LimitPre,LimitPost),
fill = factor(c("Pre","Post"),levels = c("Pre","Post"))),
width = 0.25,position = position_dodge(width = 0.01)
)+
guides(fill = FALSE)+
geom_text(label = c(LimitPost,LimitPost))+
labs( x = "",y = paste0("Limit in ", AccountDetailsPreDF$X__1[12]) ,title = "Limit")
PolicyLimitPlot_Out = renderPlot(PolicyLimitPlotExp)
######Bar graph for TCP
# TCPCompDFForGraph = rbind(cbind(PrePost = "Pre",TCPPreDF),
# cbind(PrePost = "Post",TCPPostDF))
#
# TCPCompDFForGraph = TCPCompDFForGraph %>%
# mutate(`Layer Name` = as.factor(`Layer Name`))
#
# for(i in colnames(TCPCompDFForGraph)){
# if(i %in% c("EQ TCP","FL TCP","WS TCP")){
# TCPCompDFForGraph[,i] = as.numeric(TCPCompDFForGraph[,i])
# tempGraph = ggplot()+
# assign(paste0(i,"Graph"),)
# }
# }
#
# TCPCompDFForGraph = gather(TCPCompDFForGraph,c(`EQ TCP`,`FL TCP`,`WS TCP`),
# key = "Peril",value = "TCP",factor_key = TRUE)
#
# ggplot(data = TCPCompDFForGraph) +
#
# class(TCPCompDFForGraph$`EQ TCP`)
####### Bar Graph for Locations
NoOfLocationsPlotExp = ggplot() +
geom_col(mapping = aes(x = factor(c("Pre","Post"),levels = c("Pre","Post")),
y = c(nrow(LocationDetailsPreDF),nrow(LocationDetailsPostDF)),
fill = factor(c("Pre","Post"),levels = c("Pre","Post"))),
width = 0.25,position = position_dodge(width = 0.01)
)+
guides(fill = FALSE)+
geom_text(aes( x = factor(c("Pre","Post"),levels = c("Pre","Post")),
y = c(nrow(LocationDetailsPreDF),nrow(LocationDetailsPostDF)),
label = c(nrow(LocationDetailsPreDF),nrow(LocationDetailsPostDF))),
nudge_y = 15)+
labs( x = "",y = "Location Count" ,title = "No of Locations")
NoOfLocationsPlot_Out = renderPlot(NoOfLocationsPlotExp)
|
dda85078f5e7119772fa22cc33f710b4ae2e6d97 | d38e3540c42dc8019d83b410599166db3acd9de2 | /data-raw/creando_mis_datasets.R | 3eb1b4cd69d2119eb32c68945f276d9ed0d6ada2 | [
"MIT"
] | permissive | perezp44/pjpv.datos.01 | e55c2b8177d7794a8baf2583dd2ab9e05c74e3aa | 90ad8f4b99b6af25ab35117c6599237399acd7ea | refs/heads/master | 2023-04-09T11:06:53.157945 | 2021-04-24T15:55:07 | 2021-04-24T15:55:07 | 355,845,724 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,341 | r | creando_mis_datasets.R | #- creando los datasets q voy alojando en el pkg ----------------
library(tidyverse)
#- pob municipal -----------------
pob_1996_2020 <- readr::read_rds("/home/pjpv/Escritorio/my_datos_2021/datos/INE/pob_mun/ine_pob_mun_1996_2020.rds")
pob_muni_1996_2020 <- pob_1996_2020 %>%
pivot_wider(names_from = poblacion, values_from = values) %>%
dplyr::rename(pob_total = Total) %>%
dplyr::rename(pob_hombres = Hombres) %>%
dplyr::rename(pob_mujeres = Mujeres) %>%
select(1:4, pob_total, pob_hombres, pob_mujeres, everything())
zz <- pob_muni_1996_2020 %>% group_by(year) %>% count()
str(pob_muni_1996_2020)
#- usethis::use_data(pob_muni_1996_2020, overwrite = TRUE) #- 2021-04-08
#- pob provincial -----------------
pob_muni <- pjpv.datos.01::pob_muni_1996_2020
pob_prov_1996_2020 <- pob_muni %>%
select(ine_prov, ine_prov.n, year, pob_total, pob_hombres, pob_mujeres, ine_ccaa, ine_ccaa.n) %>%
group_by(year, ine_prov) %>%
#mutate(across(where(is.numeric), sum)) %>% #- suma la pob municipal (uso where()
mutate(across(starts_with("pob_"), sum)) %>% #- suma la pob municipal
distinct() %>%
ungroup()
zz <- pob_prov_1996_2020 %>% group_by(year) %>% count()
str(pob_prov_1996_2020)
#- usethis::use_data(pob_prov_1996_2020, overwrite = TRUE) #- 2021-04-08
#- pob por CC.AA -----------------
pob_muni <- pjpv.datos.01::pob_muni_1996_2020
pob_ccaa_1996_2020 <- pob_muni %>%
select(ine_ccaa, ine_ccaa.n, year, pob_total, pob_hombres, pob_mujeres) %>%
group_by(year, ine_ccaa) %>%
#mutate(across(where(is.numeric), sum)) %>% #- suma la pob municipal (uso where()
mutate(across(starts_with("pob_"), sum)) %>% #- suma la pob municipal
distinct() %>%
ungroup()
zz <- pob_ccaa_1996_2020 %>% group_by(year) %>% count()
str(pob_ccaa_1996_2020)
#- usethis::use_data(pob_ccaa_1996_2020, overwrite = TRUE) #- 2021-04-08
#- geometrias LAU2 ------------------------------------------
#- las geometrias de LAU2 pero con Canarias desplazada
#- geometrias municipales
library(sf)
LAU2_muni_2020_canarias <- readr::read_rds("/home/pjpv/Escritorio/my_datos_2021/datos/geo_datos_mios/geo_muni_2020_LAU2_canarias.rds")
zz <- LAU2_muni_2020_canarias %>% sf::st_set_geometry(NULL)
names(LAU2_muni_2020_canarias)
str(LAU2_muni_2020_canarias)
#- usethis::use_data(LAU2_muni_2020_canarias, overwrite = TRUE) #- 2021-04-08
#- geometrias provinciales
LAU2_prov_2020_canarias <- readr::read_rds("/home/pjpv/Escritorio/my_datos_2021/datos/geo_datos_mios/geo_prov_2020_LAU2_canarias.rds")
zz <- LAU2_prov_2020_canarias %>% sf::st_set_geometry(NULL)
names(LAU2_prov_2020_canarias)
str(LAU2_prov_2020_canarias)
#- usethis::use_data(LAU2_prov_2020_canarias, overwrite = TRUE) #- 2021-04-08
#- geometrias CC.AA
LAU2_ccaa_2020_canarias <- readr::read_rds("/home/pjpv/Escritorio/my_datos_2021/datos/geo_datos_mios/geo_ccaa_2020_LAU2_canarias.rds")
zz <- LAU2_ccaa_2020_canarias %>% sf::st_set_geometry(NULL)
names(LAU2_ccaa_2020_canarias)
str(LAU2_ccaa_2020_canarias)
#- usethis::use_data(LAU2_ccaa_2020_canarias, overwrite = TRUE) #- 2021-04-08
#- Poblacion EUROSTAT ---------------------------------
eurostat_poblacion_NUTS_3 <- rio::import("/home/pjpv/Escritorio/my_datos_2021/datos/EUROSTAT/poblacion/eurostat_poblacion_NUTS-3.rds")
#- usethis::use_data(eurostat_poblacion_NUTS_3, overwrite = TRUE) #- 2021-04-24
|
f0d011865593083e6935a613fddea6f6a3e5e959 | c8d782557b9e25775936dd9a9436203d2abb0fdc | /R Assignment Nilesh.R | 96529873cb1c34cf16e18b100851b6b2cf96e172 | [] | no_license | ndixit2862/R-Assignment | bfc6f9fa218b76f542704ee63e377b60539cb4a2 | 55e2fe9b8f68e309b80b7e4e5abd22b6a6358a21 | refs/heads/main | 2023-04-30T15:56:34.991664 | 2021-05-16T13:25:43 | 2021-05-16T13:25:43 | 367,887,196 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,764 | r | R Assignment Nilesh.R | 1.#Try to write a code for printing sequence of numbers from 1 to 50 with the differences of 3, 5, 10
s1<-seq(1,50,3)
s1
s2<-seq(1,50,5)
s2
s3<-seq(1,50,10)
s3
2.#What are the different data objects in R? and write syntax and example for each and every object
a.vector
x<-c(1,2,3)
b.Matrix
x<-matrix(c(1,2,3,4,5),nrow=5)
x
c.Array
x<-array(c(1:6),dim=c(2,4,3))
x
d.List
List<-list(id=c(1,2,3,4),name=c("Nilesh","Jyotsna"),marks=c(98,97))
List
e.Dataframe
dataframe<-data.frame(id =c(1,2,3),name=c("Tom","Jerry","John"), marks=c(98,97,99))
dataframe
f.Factor
x<-c("5","4","5","6","6","4","4")
class(x)
factor<-factor(x)
factor(x)
3.#Create Data frame with 3 columns and 5 rows and write a code to fetch and delete row and a column using index and add new column and row to the existed data frame
x<-data.frame(id =c(1,2,3,4,5),name=c("Kathy","Chris","Albert","John","Jacky"), marks=c(98,97,99,87,85))
x
# to fetch the 4th row
x[c(4),]
# To fetch 2 and 4th column
x[,c(2,3)]
# to delete 1st row
x<-x[-c(1),]
x
#to delete 3rd column
x<-x[,-c(3)]
x
# To add new row 1
x1<-rbind(c(1,"sasha"),x)
x1
# To add new column
x1[,"marks"]<-c(98,97,99,87,85)
x1
4.#Write nested if else statements to print whether the given number is negative, positive or Zero
x <- 4
if (x < 0) {
print("Negative number")
} else if (x > 0) {
print("Positive number")
} else
print("Zero")
5. #write a program to input any value and check whether it is character, numeric or special character
input<-'&##'
if((input>='a'&input<='z')|(input>='A'&input<='Z')){
print("charcter")
}else if((input>='0'&input<='9')){
print("numeric")
}else {
print("spl")
}
6.#write difference between break and next also write examples for both
#Break - Terminates the loop statement
x<-"Hello World"
cnt<-1
repeat {
print (x)
cnt<-cnt +1
if (cnt>5){
break
}
}
#Next - Skips the current iteration of a loop without terminating it.
a<-LETTERS[1:6]
for (i in a){
if (i=="D"){
next
}
print(i)
}
7.#write a program to print a given vector in reverse format
x= c(1,5.6,3,10,3.5,5)
rev(x)
#without using function
length(x)
i=length(x)
z<-c()
for( i in length(x):1)
{
y<-x[i];
z<-append(z,y);
}
print(z)
8.#write a program to get the mode value of the given vector ('a','b','c','t','a','c','r','a','c','t','z','r','v','t','u','e','t')
v= c("a","b","c","t","a","c","r","a","c","t","z","r","v","t","u","e","t")
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
v= c("a","b","c","t","a","c","r","a","c","t","z","r","v","t","u","e","t")
result <- getmode(v)
print(result)
9.#write a function to filter only data belongs to 'setosa' in species of Iris dataset.( using dplyr package)
data("iris")
View(iris)
install.packages('dplyr')
library(dplyr)
filter(select(iris,Sepal.Length,Sepal.Width,Petal.Length,Petal.Width,Species),Species=="setosa")
10.#Create a new column for iris dataset with the name of Means_of_obs, which contains mean value of each row.( using dplyr package)
iris$Means_of_obs<-apply(iris[,1:4],1,mean)
11.#Filter data for the "versicolor" and get only 'sepel_length' and Sepel _width' columns.( using dplyr package)
install.packages('dplyr')
library(dplyr)
filter(select(iris,Sepal.Length,Sepal.Width,Species),Species=="versicolor")
12.#create below plots for the mtcars also write your inferences for each and every plot (use ggplot package) Use Different ( Size , Colour )
install.packages('ggplot')
library(ggplot)
data("mtcars")
View(mtcars)
#a)scatterplot
#syntax;plot(x, y, main, xlab, ylab, xlim, ylim, axes)
input <- mtcars[,c('wt','hp')]
input
plot(x = input$wt,y = input$hp,
xlab = "Weight",
ylab = "horsepower",
xlim = c(2.5,5),
ylim = c(90,200),
main = "Weight vs HP"
)
dev.off()
#b)Boxplots
#syntax:boxplot(x, data, notch, varwidth, names, main)
input <- mtcars[,c('mpg','cyl')]
boxplot(mpg ~ cyl, data = mtcars, xlab = "Number of Cylinders",
ylab = "Miles Per Gallon", main = "Mileage Data")
#dev.off()
#c)Histogram
#syntax:hist(v,main,xlab,xlim,ylim,breaks,col,border)
str(mtcars)
vec <- as.numeric(as.character(unlist(mtcars[[1]])))
class(vec)
hist(vec,main="Miles per gallon",xlab="mpg",col = "yellow",border = "blue")
#d)Line graph
#syntax:plot(v,type,col,xlab,ylab)
vec <- as.numeric(as.character(unlist(mtcars[[4]])))
plot(vec,type="o",xlab="hourse power",main="Hourse power chart",col="blue")
#e)Bar graph
#syntax:barplot(H,xlab,ylab,main, names.arg,col)
vec <- as.numeric(as.character(unlist(mtcars[[5]])))
barplot(vec,xlab="weight",main="WEIGHTS CHART")
|
8f04cc4eb64a472a071a441172bb498b44dff10f | 08a2a7468e3f09e803afb74616b9c37fd4f05335 | /R/utils_page.R | cc9961a2ba8d1af385df4f12ae16ec739358bd3d | [
"MIT"
] | permissive | ginberg/brochure | 8b2e9fb6551d045730fb3e14f6950ebafb583d2e | 33a1c2fe59e5ec43cb800bc0864eb388638eefd9 | refs/heads/main | 2023-03-05T00:43:56.760095 | 2021-02-23T07:11:48 | 2021-02-23T07:11:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 615 | r | utils_page.R | with_class <- function(res, pop_class){
class(res) <- c(pop_class, class(res))
res
}
check_redirect_code <- function(code){
attempt::stop_if(
code,
~ !.x %in% c(301:308, 310),
sprintf(
"Redirect code should be one of %s.",
paste(c(301:308, 310), collapse = " ")
)
)
}
extract <- function(content, class){
vapply(content, function(x) {
inherits(x, class)
}, logical(1))
}
build_redirect <- function(redirect){
do.call(
rbind,
lapply(redirect, function(x){
data.frame(
from = x$from,
to = x$to,
code = x$code
)
})
)
}
|
87cf8330f4b953744363ea50138cf6d23422ae21 | 8a659785a0346a35671e71bbe0cf4ae9940e46f1 | /test-nlar-model/test.R | 14af0388398f7c903638cc2cd75f5c615e2ac71b | [] | no_license | ashtonbaker/beetle-model | 56b4f244d5650ba20e73dea84ace46f6ba40ddf6 | 50337898aab105971ce39d010f7880d1fd87008e | refs/heads/master | 2021-01-12T17:31:22.601637 | 2017-06-14T18:17:14 | 2017-06-14T18:17:14 | 71,592,813 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 518 | r | test.R | library(pomp)
library(magrittr)
library(reshape2)
source('./models/nlar-model/nlar-model.R')
read.csv("../data/data.csv") %>%subset(weeks <= 40, select=c(weeks,rep,L_obs,P_obs,A_obs)) -> dat
cls_params <- c(b=10.45, mu_L=0.2000, mu_A=0.007629, cel=0.01731, cea=0.1310, cpa=0.004619,
sigma_1=1.621, sigma_2=0.7375, sigma_3=0.01212)
ll <- 0
for (i in 1:24) {
print(i)
model <- nlar.model(data=subset(dat, rep == i), params = cls_params)
pf <- pfilter(model, Np = 100000)
ll = ll + logLik(pf)
} |
841639d0d587df61672c8bf0609e3c90cf14c01e | f1971a5cbf1829ce6fab9f5144db008d8d9a23e1 | /packrat/lib/x86_64-pc-linux-gnu/3.2.5/rprojroot/tests/testthat/test-root.R | df694e5e8e493bad5a78fe9e3a89d7ad907ab420 | [] | no_license | harryprince/seamonster | cc334c87fda44d1c87a0436139d34dab310acec6 | ddfd738999cd302c71a11aad20b3af2f4538624f | refs/heads/master | 2021-01-12T03:44:33.452985 | 2016-12-22T19:17:01 | 2016-12-22T19:17:01 | 78,260,652 | 1 | 0 | null | 2017-01-07T05:30:42 | 2017-01-07T05:30:42 | null | UTF-8 | R | false | false | 3,390 | r | test-root.R | context("root")
test_that("has_file", {
wd <- normalizePath(getwd(), winslash = "/")
hierarchy <- function(n = 0L) {
do.call(file.path, list(wd, "hierarchy", "a", "b", "c")[seq_len(n + 1L)])
}
stop_path <- hierarchy(1L)
path <- hierarchy(4L)
with_mock(
`rprojroot:::is_root` = function(x) x == stop_path,
expect_equal(find_root("a", path = path), hierarchy(3L)),
expect_equal(find_root("b", path = path), hierarchy(3L)),
expect_equal(find_root("b/a", path = path), hierarchy(2L)),
expect_equal(find_root_file("c", criterion = "b/a", path = path),
file.path(hierarchy(2L), "c")),
expect_equal(find_root("c", path = path), hierarchy(1L)),
expect_equal(find_root("d", path = path), hierarchy(4L)),
expect_equal(find_root(has_file("DESCRIPTION", "^Package: ", 1), path = path), hierarchy(1L)),
expect_equal(find_root(has_file("DESCRIPTION", "^Package: "), path = path), hierarchy(1L)),
expect_error(find_root("test-root.R", path = path),
"No root directory found.* file '.*'"),
expect_error(find_root("rprojroot.Rproj", path = path),
"No root directory found.* file '.*'"),
expect_error(find_root(has_file("e", "f"), path = path),
"No root directory found.* file '.*' with contents"),
expect_error(find_root(has_file("e", "f", 1), path = path),
"No root directory found.* file '.*' with contents .* in the first .* lines")
)
})
test_that("has_file_pattern", {
wd <- normalizePath(getwd(), winslash = "/")
hierarchy <- function(n = 0L) {
do.call(file.path, list(wd, "hierarchy", "a", "b", "c")[seq_len(n + 1L)])
}
stop_path <- hierarchy(1L)
path <- hierarchy(4L)
with_mock(
`rprojroot:::is_root` = function(x) x == stop_path,
expect_equal(find_root(has_file_pattern(glob2rx("a")), path = path), hierarchy(3L)),
expect_equal(find_root(has_file_pattern(glob2rx("b")), path = path), hierarchy(3L)),
expect_equal(find_root(has_file_pattern("[ab]", "File b"), path = path),
hierarchy(3L)),
expect_equal(find_root(has_file_pattern("[ab]", "File b in root"), path = path),
hierarchy(1L)),
expect_equal(find_root(has_file_pattern(glob2rx("c")), path = path), hierarchy(1L)),
expect_equal(find_root(has_file_pattern(glob2rx("d")), path = path), hierarchy(4L)),
expect_equal(find_root(has_file_pattern(glob2rx("DESCRIPTION"), "^Package: ", 1), path = path), hierarchy(1L)),
expect_equal(find_root(has_file_pattern(glob2rx("DESCRIPTION"), "^Package: "), path = path), hierarchy(1L)),
expect_error(find_root(has_file_pattern(glob2rx("test-root.R")), path = path),
"No root directory found.* file matching "),
expect_error(find_root(has_file_pattern(glob2rx("rprojroot.Rproj")), path = path),
"No root directory found.* file matching "),
expect_error(find_root(has_file_pattern(glob2rx("e"), "f"), path = path),
"No root directory found.* with contents"),
expect_error(find_root(has_file_pattern(glob2rx("e"), "f", 1), path = path),
"No root directory found.* with contents .* in the first .* lines")
)
})
test_that("finds root", {
skip_on_cran()
# Checks that search for root actually terminates
expect_error(find_root("/"), "No root directory found.* file '.*'")
})
|
5c5175266d8169cf0f8ef1f5eacd7a73e356f0b1 | 90fd7aea435253900e6f376162e702191bca0756 | /plot2.R | 92f0ba800a06ce82d62dba1b87015c23715028f4 | [] | no_license | qhong/ExData_Plotting1 | 30b3106b80086234f475397e94d80e68f226d77e | 3cd255b4a204ca9911113179edd3d44011a10511 | refs/heads/master | 2021-01-18T16:16:58.982088 | 2016-05-24T21:34:43 | 2016-05-24T21:34:43 | 59,598,577 | 0 | 0 | null | 2016-05-24T18:32:40 | 2016-05-24T18:32:39 | null | UTF-8 | R | false | false | 972 | r | plot2.R | rm(list = ls())
library(rCharts)
setwd('C:\\Users\\qhong-10\\coursera\\Exploratory data analysis\\project\\assignment1\\ExData_Plotting1')
top <- read.table('../../exdata-data-household_power_consumption/household_power_consumption.txt',
header = T, sep = ';', nrow = 5)
classes <- sapply(top, class)
data <- read.table('../../exdata-data-household_power_consumption/household_power_consumption.txt',
header = T, sep = ';', colClasses = classes, na.strings = '?')
data$Date <- as.Date(data$Date, format = '%d/%m/%Y')
data <- data['2007-02-01' <= data$Date & data$Date <= '2007-02-02',]
data$Date <- as.Date(data$Date, format = '%d/%m/%Y')
data$datetime <- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S")
head(data)
class(data$datetime)
png('plot2.png', width = 480, height = 480, units = "px")
plot(data$datetime, data$Global_active_power, type = 'l',
ylab = 'Global Active Power(kilowatts)', xlab = '')
dev.off()
|
07162455fea7474d5c6d615bb32eaaf04fbe2f59 | 46a9bf820a271c574149329e627afa2e511d42b0 | /man/arl_aewma.Rd | ae0ad962e5ff4214865197a1493e7b1cc66494f0 | [] | no_license | samuelanyaso/DyAEWMA | 5dfaf7788a3b2249b2fe1c178700fc1bad13be9a | 0e8f9ab26daa8030ac279f4762725d427a846c91 | refs/heads/master | 2020-09-29T08:22:03.523641 | 2020-02-28T22:38:43 | 2020-02-28T22:38:43 | 216,295,837 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 813 | rd | arl_aewma.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/arl_aewma.R
\name{arl_aewma}
\alias{arl_aewma}
\title{ARL value of the Adaptive EWMA control chart (with p-values)}
\usage{
arl_aewma(alpha, w = 50, nsimul, shift = 0)
}
\arguments{
\item{alpha}{a real number; the level of significance}
\item{w}{an integer; the sample size needed to reach steady-state, default value is 50.}
\item{nsimul}{an integer; the number of replications}
\item{shift}{a real number; shift size. If shift=0, IC ARL is returned, else OC ARL is returned. Default value is 0.0.}
}
\value{
ARL
}
\description{
ARL value of the Adaptive EWMA control chart (with p-values)
}
\details{
Estimates the ARL for the AEWMA chart (with p-values)
}
\examples{
arl_aewma(0.025,50,100,0.0)
}
\author{
Samuel Anyaso-Samuel
}
|
1d09e74a2486e8b7beb54da381093f03fffb28e9 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610387773-test.R | fa796b158866362f8a0097feb4f9154d3c6fe15e | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 380 | r | 1610387773-test.R | testlist <- list(a = 751965952L, b = 71319807L, x = c(16777215L, -183L, 1229539657L, 1229539657L, 1229539657L, 1229539657L, 1229539657L, 1229539657L, 1229539657L, 1229539657L, 1229539657L, -768856879L, -768856879L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -234L, 370546198L, 370546198L, 0L, 771L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
8bf96b6ded5663b3f3ef396dfbbfd4f1dcac932a | d52c2f7fcc59530a3aebf56b099810f12cc82660 | /R/sem_tables.R | ef7335c4570d1455d0494612f43ea9a706bcf378 | [] | no_license | admahood/dissertation | 61f1a74a23ec3b9a94bf6562a837ec483aa17687 | 50981750702b146b1c523a00fd4412759877541d | refs/heads/main | 2023-04-14T15:07:45.741154 | 2021-08-27T16:48:18 | 2021-08-27T16:48:18 | 354,950,697 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 9,529 | r | sem_tables.R | # creating sem tables
library(tidyverse)
lut_et<- c("c"="Contrast", "i"= "Indirect (single pathway)","d" = "Direct",
"I"= "Indirect (multiple pathways)","T"="Total")
# scn_boot<-readRDS("data/scn_boot.RDS")
load("data/bootstrapped_sems.Rda")
# Tables
# soil C and N, stages 1 & 2 ===================================================
scn_1_cis <- scn_1_boot %>%
as.data.frame() %>%
pivot_longer(cols = names(.),names_to = "var", values_to = "vals") %>%
group_by(var) %>%
dplyr::summarise(ci_025 = HDInterval::hdi(vals)%>% pluck(1),
ci_975 = HDInterval::hdi(vals) %>% pluck(2),
median = quantile(vals, probs = 0.5)) %>%
ungroup() %>%
mutate(type = ifelse(str_detect(var, "~"), "regression", "user"),
sig = ifelse(ci_025 *ci_975 > 0, "*", ""))
# whatever direct effects are needed in the table we can add if necessary (pre-bootstrap)
scn_c_1_tab <- scn_1_cis %>%
filter(type == "user") %>%
mutate(effect_type = lut_et[str_sub(var, 1,1)],
var = str_extract(var,"^(.*?):"),
var = str_sub(var, 3, nchar(var)-1),
exogenous_var = str_extract(var,"^(.*?)_")%>%
str_sub(1,nchar(.)-1),
resp = str_sub(var, nchar(var), nchar(var)),
mediator = str_match(var, "_(.*?)_c")[,2] %>% str_replace("_"," ")) %>%
filter(effect_type != "Contrast") %>%
filter(resp == "c") %>%
dplyr::select(exogenous_var, effect_type, "mediator(s)" = mediator, median, ci_025, ci_975, sig) %>%
arrange(exogenous_var,effect_type);scn_c_1_tab
scn_c_1_tab %>%
write_csv("data/booted_sc_1_table.csv")
scn_n_1_tab <- scn_1_cis %>%
filter(type == "user") %>%
mutate(effect_type = lut_et[str_sub(var, 1,1)],
var = str_extract(var,"^(.*?):"),
var = str_sub(var, 3, nchar(var)-1),
exogenous_var = str_extract(var,"^(.*?)_")%>%
str_sub(1,nchar(.)-1),
resp = str_sub(var, nchar(var), nchar(var)),
mediator = str_match(var, "_(.*?)_n")[,2] %>% str_replace("_"," ")) %>%
filter(effect_type != "Contrast") %>%
filter(resp == "n") %>%
dplyr::select(exogenous_var, effect_type, "mediator(s)" = mediator, median, ci_025, ci_975, sig) %>%
arrange(exogenous_var,effect_type);scn_n_1_tab
scn_n_1_tab %>%
write_csv("data/booted_sn_1_table.csv")
# soil C and N, stages 3 & 4 ===================================================
scn_3_cis <- scn_3_boot %>%
as.data.frame() %>%
pivot_longer(cols = names(.),names_to = "var", values_to = "vals") %>%
group_by(var) %>%
dplyr::summarise(ci_025 = HDInterval::hdi(vals)%>% pluck(1),
ci_975 = HDInterval::hdi(vals) %>% pluck(2),
median = quantile(vals, probs = 0.5)) %>%
ungroup() %>%
mutate(type = ifelse(str_detect(var, "~"), "regression", "user"),
sig = ifelse(ci_025 *ci_975 > 0, "*", ""))
# whatever direct effects are needed in the table we can add if necessary (pre-bootstrap)
scn_c_3_tab <- scn_3_cis %>%
filter(type == "user") %>%
mutate(effect_type = lut_et[str_sub(var, 1,1)],
var = str_extract(var,"^(.*?):"),
var = str_sub(var, 3, nchar(var)-1),
exogenous_var = str_extract(var,"^(.*?)_")%>%
str_sub(1,nchar(.)-1),
resp = str_sub(var, nchar(var), nchar(var)),
mediator = str_match(var, "_(.*?)_c")[,2] %>% str_replace("_"," ")) %>%
filter(effect_type != "Contrast") %>%
filter(resp == "c") %>%
dplyr::select(exogenous_var, effect_type,
"mediator(s)" = mediator, median, ci_025, ci_975, sig) %>%
arrange(exogenous_var,effect_type);scn_c_3_tab
scn_c_3_tab %>%
write_csv("data/booted_sc_3_table.csv")
scn_n_3_tab <- scn_3_cis %>%
filter(type == "user") %>%
mutate(effect_type = lut_et[str_sub(var, 1,1)],
var = str_extract(var,"^(.*?):"),
var = str_sub(var, 3, nchar(var)-1),
exogenous_var = str_extract(var,"^(.*?)_")%>%
str_sub(1,nchar(.)-1),
resp = str_sub(var, nchar(var), nchar(var)),
mediator = str_match(var, "_(.*?)_n")[,2] %>% str_replace("_"," ")) %>%
filter(effect_type != "Contrast") %>%
filter(resp == "n") %>%
dplyr::select(exogenous_var, effect_type, "mediator(s)" = mediator, median, ci_025, ci_975, sig) %>%
arrange(exogenous_var,effect_type);scn_n_3_tab
scn_n_3_tab %>%
write_csv("data/booted_sn_3_table.csv")
# bromus c:n ===================================================================
bcn_cis <- bcn_boot %>%
as.data.frame() %>%
pivot_longer(cols = names(.),names_to = "var", values_to = "vals") %>%
group_by(var) %>%
dplyr::summarise(ci_025 = quantile(vals, probs = 0.025),
ci_975 = quantile(vals, probs = 0.975),
median = quantile(vals, probs = 0.5)) %>%
ungroup() %>%
mutate(type = ifelse(str_detect(var, "~"), "regression", "user"),
sig = ifelse(ci_025 *ci_975 > 0, "*", ""))
bcn_tab<-bcn_cis %>%
filter(type == "user") %>%
mutate(effect_type = lut_et[str_sub(var, 1,1)],
var = str_extract(var,"^(.*?):"),
var = str_sub(var, 3, nchar(var)-1),
exogenous_var = str_extract(var,"^(.*?)_")%>%
str_sub(1,nchar(.)-1),
mediator = str_match(var, "_(.*?)_bcn")[,2] %>% str_replace_all("_"," ")) %>%
filter(effect_type != "Contrast") %>%
dplyr::select(exogenous_var, effect_type, "mediator(s)" = mediator, median, ci_025, ci_975, sig) %>%
arrange(exogenous_var,effect_type);bcn_tab
bcn_tab%>%write_csv("data/booted_bcn_table.csv")
# poa c:n ======================================================================
pcn_cis <- pcn_boot %>%
as.data.frame() %>%
pivot_longer(cols = names(.),names_to = "var", values_to = "vals") %>%
group_by(var) %>%
dplyr::summarise(ci_025 = quantile(vals, probs = 0.025),
ci_975 = quantile(vals, probs = 0.975),
median = quantile(vals, probs = 0.5)) %>%
ungroup() %>%
mutate(type = ifelse(str_detect(var, "~"), "regression", "user"),
sig = ifelse(ci_025 *ci_975 > 0, "*", ""))
pcn_tab<- pcn_cis%>%
filter(type == "user") %>%
mutate(effect_type = lut_et[str_sub(var, 1,1)],
var = str_extract(var,"^(.*?):"),
var = str_sub(var, 3, nchar(var)-1),
exogenous_var = str_extract(var,"^(.*?)_")%>%
str_sub(1,nchar(.)-1),
mediator = str_match(var, "_(.*?)_pcn")[,2] %>% str_replace_all("_"," ")) %>%
filter(effect_type != "Contrast") %>%
dplyr::select(exogenous_var, effect_type, "mediator(s)" = mediator, median, ci_025, ci_975, sig) %>%
arrange(exogenous_var,effect_type);pcn_tab
pcn_tab%>%write_csv("data/booted_pcn_table.csv")
# other c:n ====================================================================
ocn_cis <- ocn_boot %>%
as.data.frame() %>%
pivot_longer(cols = names(.),names_to = "var", values_to = "vals") %>%
group_by(var) %>%
dplyr::summarise(ci_025 = quantile(vals, probs = 0.025),
ci_975 = quantile(vals, probs = 0.975),
median = quantile(vals, probs = 0.5)) %>%
ungroup() %>%
mutate(type = ifelse(str_detect(var, "~"), "regression", "user"),
sig = ifelse(ci_025 *ci_975 > 0, "*", ""))
ocn_tab<-ocn_cis %>%
filter(type == "user") %>%
mutate(effect_type = lut_et[str_sub(var, 1,1)],
var = str_extract(var,"^(.*?):"),
var = str_sub(var, 3, nchar(var)-1),
exogenous_var = str_extract(var,"^(.*?)_")%>%
str_sub(1,nchar(.)-1),
mediator = str_match(var, "_(.*?)_ocn")[,2] %>% str_replace_all("_"," ")) %>%
filter(effect_type != "Contrast") %>%
dplyr::select(exogenous_var, effect_type, "mediator(s)" = mediator, median, ci_025, ci_975, sig) %>%
arrange(exogenous_var,effect_type);ocn_tab
ocn_tab%>%write_csv("data/booted_ocn_table.csv")
# figures ======================================================================
# scn_boot %>%
# as.data.frame() %>%
# pivot_longer(cols = names(.),names_to = "var", values_to = "vals") %>%
# mutate(type = ifelse(str_detect(var, "~"), "regression", "user"))%>%
# filter(type == "user")%>%
# mutate(var = str_extract(var,"^(.*?):"))%>%
# ggplot(aes(x=vals)) +
# geom_vline(xintercept = 0, lty=2)+
# geom_density() +
# facet_wrap(~var, scales="free") +
# theme(axis.title = element_blank(),
# axis.ticks = element_blank(),
# axis.text = element_blank())
# covariance matrices =====================
load("data/sem_fits.Rda")
library(lavaan)
# fitted(scn_fit)$cov %>%
# rbind(fitted(scn_fit)$mean)%>%
# round(3) %>%
# as_tibble(rownames = "x") %>%
# mutate(x = replace(x, x == "", "mean")) %>%
# write_csv("data/cm_scn.csv")
fitted(scn_12_fit)$cov %>%
rbind(fitted(scn_12_fit)$mean)%>%
round(3) %>%
as_tibble(rownames = "x") %>%
mutate(x = replace(x, x == "", "mean")) %>%
write_csv("data/cm_scn12.csv")
fitted(scn_34_fit)$cov %>%
rbind(fitted(scn_34_fit)$mean)%>%
round(3) %>%
as_tibble(rownames = "x") %>%
mutate(x = replace(x, x == "", "mean")) %>%
write_csv("data/cm_scn34.csv")
fitted(bcn_fit)$cov %>%
rbind(fitted(bcn_fit)$mean)%>%
round(3) %>%
as_tibble(rownames = "x") %>%
mutate(x = replace(x, x == "", "mean"))%>%
write_csv("data/cm_bcn.csv")
fitted(ocn_fit)$cov %>%
rbind(fitted(ocn_fit)$mean)%>%
round(3) %>%
as_tibble(rownames = "x") %>%
mutate(x = replace(x, x == "", "mean"))%>%
write_csv("data/cm_ocn.csv")
fitted(pcn_fit)$cov %>%
rbind(fitted(pcn_fit)$mean)%>%
round(3) %>%
as_tibble(rownames = "x") %>%
mutate(x = replace(x, x == "", "mean"))%>%
write_csv("data/cm_pcn.csv")
|
4ea88b79fa9374e506a57913751784c5b237149a | c3035443d312d175595aac17220607b0b9009351 | /Ill-coinflip_02.R | 1a5f2b4122cb4057afaac628e246f9387945daab | [] | no_license | Oleg-Krivosheev/Statistical_Inference | 2015883b346e4708a1e63a84987dd5893ddbaefa | 0ec636b3096dbfce2f91cb6e18ca28be1537181a | refs/heads/master | 2021-01-13T14:30:20.800412 | 2016-12-01T19:45:04 | 2016-12-01T19:45:04 | 72,881,996 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,040 | r | Ill-coinflip_02.R | library(ggplot2)
set.seed(22345)
## parameters of the distributions
n <- 100
p <- 0.5
## number of simulation
nosim <- 10000
## expected binomial mean/variance
mu <- p
s2 <- p * (1.0 - p)
## sampling
r <- matrix(rbinom(nosim * n, size=1, prob = p), nosim)
u <- apply(r, 1, sum)
m <- apply(r, 1, mean)
s <- apply(r, 1, sd)
## plotting
g <- ggplot()
g <- g + aes(m)+ geom_histogram(binwidth=0.01, aes(y = ..density..), colour="black", fill="blue")
g <- g + labs(title = paste('Nsim = ', nosim, ', N = ', n))
g <- g + stat_function(fun = function(x) dnorm(x, mean = mu, sd = sqrt(s2/n)), colour="salmon", size=1)
#g <- g + geom_vline(xintercept = mu + 1.96*sqrt(s2/n), colour="pink", size=1)
#g <- g + geom_vline(xintercept = mu - 1.96*sqrt(s2/n), colour="pink", size=1)
g <- g + geom_vline(xintercept = mean(m) + 1.96*sd(m), colour="pink", size=1)
g <- g + geom_vline(xintercept = mean(m) - 1.96*sd(m), colour="pink", size=1)
g <- g + geom_vline(xintercept = 0.34, colour="red", size=1)
print(g)
pnorm(0.34, mean = mean(m), sd=sd(m))
|
35702cfad87a9649be4b4f1081ec92e22f3c708b | 4abf320205d58409cd2dc5b667d6767a24411ab3 | /cachematrix.R | 17dbaf9bddb2aaeecdb2b9e85c8141e8a336abaf | [] | no_license | caramirezal/ProgrammingAssignment2 | f4f0349f9e687a30862355681b72bf08cedd54ee | ef87b62392e80d0093746deede018ce3bde073b2 | refs/heads/master | 2021-01-22T10:13:38.265364 | 2017-02-15T11:43:10 | 2017-02-15T11:43:10 | 81,994,618 | 0 | 0 | null | 2017-02-14T22:16:14 | 2017-02-14T22:16:14 | null | UTF-8 | R | false | false | 1,452 | r | cachematrix.R | ## Definition of a matrix like object which stores both
## a data matrix and also its inverse if it is already calculated.
## makeCacheMatrix constructs an object that stores
## a data matrix and its inverse.
makeCacheMatrix <- function(x = matrix()) {
## set default values for the inverse
inv <- NULL
## x setter definition
setx <- function(y) { x <<- y }
## x getter definition
getx <- function() x
## inv.x setter definition
setinv <- function(val) inv <<- val
## inv.x getter definition
getinv <- function() inv
res <- list(set = setx, get = getx,
setinv = setinv, getinv = getinv)
## return a list of setters and getters
return(res)
}
## cacheSolve takes a makeCacheMatrix object as argument, extract
## the data matrix defined inside and calculates its inverse
## if it is not already stored in the object.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
## checks if the inverse has been already calculated
if ( ! is.null(inv) ) {
message("Getting inverse from cache")
res <- inv
# otherwise it performs the calculation
} else {
data.m <- x$get()
res <- solve(data.m,
...)
}
x$setinv(res)
## Return a matrix that is the inverse of 'x'
return(res)
}
|
249383b1f48ba244da18ef5cc6cb7fe7d7957ef9 | 7c1d52fa5ffae4b03301603c5ff84022851a4b2e | /experiment3/timepoint1/data/read.R | 7518c66fc887e28253e51bd0858de8a8a56a734b | [] | no_license | WhiteJP/AmbiguityAversionProject | eff01981918443cee4e48f31557227b5262fda87 | 6e91803f3bb1bc1e64166de1a55c85360a2d8507 | refs/heads/master | 2022-06-22T22:10:31.673041 | 2022-05-26T16:06:56 | 2022-05-26T16:06:56 | 167,326,441 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,121 | r | read.R | library(here)
library(tidyverse)
library(jsonlite)
location <- here("experiment3", "timepoint1", "data")
setwd(location)
# read data
rawData <- read.csv(file="rawresults-exp3a.csv", header=TRUE)
# convert json data to dataframe.
d <- rawData %>%
mutate(json = map(content, ~ fromJSON(.) %>% as.data.frame())) %>%
unnest(json) %>%
select(-content, -exp)
#make data long
#steps
#1. make character first for this
#2. make data very long
#3 separate order(trialn) number from type (vignette, or response)
#4 widen out
d <- d %>% mutate(
across(V1:yprob10, as.character)
)
d <- d %>%
# 1
pivot_longer(cols = c(V1:yprob10),
names_to = "trialn",
values_to = "condition") %>%
# 2
extract(trialn,
c("type", "trialn"),
"([a-zA-Z]+)([0-9]*$)"
) %>%
# 3
pivot_wider(names_from = "type",
values_from = "condition")
#rename columns and order columns nicely
d <- d %>%
rename(vignette = "V",
datetime = "date") %>%
relocate(datetime, .after = last_col())
#write to csv
write_csv(d, "exp3a-data.csv")
|
3ca7672397341135bd8d386885a7da1c3fef1d16 | 15f8232b8a574ae94266927e4df5182cfc99f517 | /man/path_to_root.Rd | fb688f6f874f82ab6843a83ab5fab7798b4e60a8 | [] | no_license | cran/autoharp | a2c6d51ad22354a276145098200e92aecd7bc3fd | d2efbb0a76285ba95ed6950a61e05f1398b5e656 | refs/heads/master | 2023-09-06T05:29:22.580401 | 2021-11-12T21:50:02 | 2021-11-12T21:50:02 | 334,082,346 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 733 | rd | path_to_root.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree_routines.R
\name{path_to_root}
\alias{path_to_root}
\title{Extract a path from node to root.}
\usage{
path_to_root(th, node_num)
}
\arguments{
\item{th}{A TreeHarp object.}
\item{node_num}{A node number to start tracking upwards from.}
}
\value{
A vector of 1's and 0's that can be used to carve out the branch
alone, using \code{\link{carve_subtree}}.
}
\description{
Identifies the nodes on the path from a node up to the root of a TreeHarp
object.
}
\details{
This function allows the user to identify the branch from a node
up to the root of a tree.
}
\examples{
ex1 <- quote(x <- f(y, g(5)))
th1 <- TreeHarp(ex1, TRUE)
path_to_root(th1, 5)
}
|
4cb24e7f9d9c3cec4b435f1bf9dca21d4acff127 | 1e820fe644a039a60bfbee354e50c775af675f6b | /ProbStatsR/Chapter 6 Point Estimation.R | 735c3e9df5b83c922910429c585ac4374f583b5e | [] | no_license | PyRPy/stats_r | a334a58fca0e335b9b8b30720f91919b7b43d7bc | 26a3f47977773044d39f6d8ad0ac8dafb01cce3f | refs/heads/master | 2023-08-17T00:07:38.819861 | 2023-08-16T14:27:16 | 2023-08-16T14:27:16 | 171,056,838 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 779 | r | Chapter 6 Point Estimation.R | # Chapter 6 Point Estimation ----------------------------------------------
# 6.1 DESCRIPTIVE STATISTICS ----------------------------------------------
# Ex6.1-1 -----------------------------------------------------------------
dat <- scan("Data-R/E6_1-01.txt")
dat
# find sample mean
xbar <- mean(dat)
xbar
# sample variance
xvar <- var(dat)
xvar
# find sample standard deviation
xsd <- sd(dat)
xsd
# Ex6.1-3 -----------------------------------------------------------------
plungers <- scan("Data-R/E6_1-03.txt")
# a) sample mean and sd
mean(plungers)
sd(plungers)
# b) histograms
range(plungers)
length(plungers)
hist(plungers)
class_boundaries <- 10:22 + 0.95
class_boundaries
hist_obj <- hist(plungers, breaks = class_boundaries)
hist_obj$breaks
hist_obj$counts
|
8266e7bdadc3405ab3b168b06a887b30f5d666f7 | b3bc093f56f7a211e54cda4a289688d2bb545500 | /man/sdemodel_display.Rd | a7799395aeea22ab0bcd3e3ebabd4a387e9dcc80 | [] | no_license | ogarciav/resde | 730499e376a6a66a7f87e1047cc554324a70f50f | 776449729e833359250d13c83a1cb81b18a070e8 | refs/heads/master | 2023-06-10T13:28:04.436257 | 2023-05-20T01:27:59 | 2023-05-20T01:29:13 | 296,492,518 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 436 | rd | sdemodel_display.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sdemodel.R
\name{sdemodel_display}
\alias{sdemodel_display}
\title{Display the model specification}
\usage{
sdemodel_display(model)
}
\arguments{
\item{model}{SDE model specification, as produced by sdemodel()}
}
\value{
Invisibly returns its argument
}
\description{
Display the model specification
}
\examples{
mod <- sdemodel(); sdemodel_display(mod)
}
|
d5fb3ff4b6a12a3b919949d9bdc71dac99e755e8 | 0d5568144297e43517ee75e40daf5c034e9fd454 | /R/Mapping_functions.R | 1b56a9fa71ae2f7989d115b6bfec9a1abf51ca1b | [] | no_license | DanielAyllon/EarthQuakeAnalyzer | ef118152bb2144269ae1caa8dfd54a5b69359cd7 | cbcd11f210fc2b2e7d5b5fb8c89f77913db1d8b5 | refs/heads/master | 2021-01-20T00:01:09.807430 | 2017-07-04T08:38:38 | 2017-07-04T08:38:38 | 89,068,802 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,212 | r | Mapping_functions.R | #' Function to visualize the epicenters of earthquakes with annotation in a leaflet interactive map
#'
#' This function reads a filtered data frame with earthquakes to visualize. The function maps the epicenters (LATITUDE/LONGITUDE) and annotates each point in a pop-up window containing annotation data stored in a column of the data frame. The user is able to choose which column is used for the annotation in the pop-up with the function argument named annot_col.
#' Each earthquake is shown with a circle, and the radius of the circle is proportional to the earthquake's magnitude.
#'
#' @param dataframe A filtered dataframe with the earthquakes to visualize.
#' @param annot_col A character string indicating the column used for the annotation in the pop-up window.
#'
#' @return A leaflet fully interactive map plotting the epicenters of selected earthquakes.
#'
#' @importFrom leaflet leaflet
#' @importFrom leaflet addTiles
#' @importFrom leaflet addCircleMarkers
#' @importFrom dplyr %>%
#'
#' @examples
#' \dontrun{
#' eq_clean_data("signif.txt") %>% dplyr::filter(COUNTRY == "MEXICO" & lubridate::year(DATETIME) >= 2000) %>% eq_map(annot_col = "DATETIME")
#' }
#'
#' @export
eq_map<-function(dataframe,annot_col){
leaflet::leaflet() %>%
leaflet::addTiles() %>%
leaflet::addCircleMarkers(
data = dataframe,
radius = ~ ifelse(!is.na(EQ_PRIMARY),EQ_PRIMARY,2),
lng = ~ LONGITUDE,
lat = ~ LATITUDE,
popup = ~ dataframe[[annot_col]])
}
#' Function to create annotations to be displayed in the maps created by eq_map()
#'
#' This function takes the filtered and cleaned dataframe as an argument and creates an HTML label that can be used as the annotation text in a leaflet map created by eq_map().
#' This function puts together a character string for each earthquake that shows the cleaned location (as cleaned by the eq_location_clean() function), the magnitude, and the total number of deaths, with boldface labels for each. If an earthquake is missing values for any of these, both the label and the value are skipped for that element of the tag.
#'
#' @param dataframe A filtered and cleaned (as cleaned by the eq_location_clean() function) dataframe with the earthquakes to visualize.
#'
#' @return A dataframe containing a column named "popup_text" with the character string to be used as input in the eq_map() function.
#'
#' @importFrom dplyr select
#' @importFrom dplyr mutate
#' @importFrom dplyr %>%
#'
#' @examples
#' \dontrun{
#' eq_clean_data("signif.txt") %>% dplyr::filter(COUNTRY == "MEXICO" & lubridate::year(DATETIME) >= 2000) %>% eq_create_label() %>% eq_map(annot_col = "popup_text")
#' }
#'
#' @export
eq_create_label<-function(dataframe){
mydataframe<-dplyr::select(dataframe,LATITUDE,LONGITUDE,LOCATION_NAME,EQ_PRIMARY,TOTAL_DEATHS) %>%
dplyr::mutate(popup_text=paste("<b>Location:</b>", LOCATION_NAME, "<br />"),popup_text=ifelse(!is.na(EQ_PRIMARY),paste(popup_text,"<b>Magnitude:</b>", EQ_PRIMARY, "<br />"),popup_text),
popup_text=ifelse(!is.na(TOTAL_DEATHS),paste(popup_text, "<b>Total deaths:</b>", TOTAL_DEATHS, "<br />"),popup_text))
}
|
9f8b0b06025b6f47f9bc70a6c51cf766d91f6920 | 1b637d140ccad852fc1431ce48ae4da50629d0e6 | /R/sumstat_seg_sites.R | b5f9c151a5de2585ec05b4d0618f23b038ddec4b | [] | no_license | ijwilson/coala | aaa671daf9763057ea5a34efc90c98ba8dfbacf5 | 909e6f237d42e3972e3e54384473efde9d8841e7 | refs/heads/master | 2021-03-16T06:04:40.515782 | 2017-04-01T09:49:28 | 2017-04-01T09:49:28 | 91,571,290 | 0 | 0 | null | 2017-05-17T11:57:55 | 2017-05-17T11:57:55 | null | UTF-8 | R | false | false | 2,250 | r | sumstat_seg_sites.R | #' @importFrom R6 R6Class
stat_segsites_class <- R6Class("stat_segsites", inherit = sumstat_class,
private = list(req_segsites = TRUE),
public = list(
calculate = function(seg_sites, trees, files, model, sim_tasks = NULL) {
seg_sites
}
)
)
#' Summary Statistic: Segregating Sites
#'
#' This summary statistics generates a matrix of segregating sites.
#' This is useful for calculating summary statistics that \pkg{coala}
#' does not support..
#'
#' @inheritParams sumstat_four_gamete
#' @return A list of \code{\link[=create_segsites]{segsites}} objects.
#' These can be treated as a matrix for
#' most applications. Rows are individuals, columns are SNPs.
#' @export
#' @seealso For a description of the segregating sites class: \code{\link{create_segsites}}
#' @template summary_statistics
#' @examples
#' model <- coal_model(5, 1) +
#' feat_mutation(5) +
#' sumstat_seg_sites("segsites")
#' stats <- simulate(model)
#' print(stats$segsites)
sumstat_seg_sites <- function(name = "seg_sites", transformation = identity) {
stat_segsites_class$new(name, transformation)
}
conv_for_trios <- function(seg_sites, model) {
for (i in seq(along = seg_sites)) {
locus_length <- get_locus_length(model, i, total = FALSE)
if (length(locus_length) == 1) next
total_length <- sum(locus_length)
borders <- cumsum(locus_length[1:4] / total_length)
pos <- get_positions(seg_sites[[i]])
left <- pos < borders[1]
middle <- pos >= borders[2] & pos < borders[3]
right <- pos >= borders[4]
pos[left] <- pos[left] * total_length / locus_length[1]
pos[middle] <- (pos[middle] - borders[2]) * total_length / locus_length[3]
pos[right] <- (pos[right] - borders[4]) * total_length / locus_length[5]
trio_segsites <- seg_sites[[i]][, left | middle | right]
seg_sites[[i]] <- create_segsites(as.matrix(trio_segsites),
pos[left | middle | right],
c(rep(-1, sum(left)),
rep(0, sum(middle)),
rep(1, sum(right))),
FALSE)
assert_that(nrow(seg_sites[[i]]) > 0)
}
seg_sites
}
|
4bbaaa165a5f8fb3a817f849fb7215f28641eaa1 | 4359d75816ac645b6b80e72b75068f1d4ffc5164 | /R/plot_lollipop.R | 9f22e900a5f23931ab79103080da61106b94e003 | [] | no_license | Changing-Cities-Research-Lab/seattleViz | 04e5e3cfad30a57b632614fed310729ebc2b0a7b | fbcb42776e3dbf74153f24d509801d7b5cfb288d | refs/heads/main | 2023-04-13T15:55:07.183707 | 2021-04-12T23:06:48 | 2021-04-12T23:06:48 | 337,885,525 | 0 | 2 | null | 2021-02-25T06:54:14 | 2021-02-10T23:48:54 | null | UTF-8 | R | false | false | 3,428 | r | plot_lollipop.R | #' Produce lollipop plot by King County HRA.
#'
#' This function takes in data and produces a horizontal lollipop plot by
#' King County HRA The order of these categories can be adjusted by changing
#' the factor levels of the facet variable. Input data needs columns for
#' variable of interest (titled "var") and HRA.
#'
#' @param data Data with column for variable of interest with "facet" and "facet_col"
#' @param var Column name of variable of interest.
#' @param limits Y-axis limits.
#' @param title Plot title
#' @param x_title Title to display along x-axis
#' @param scale_type Y-axis scale type: "numeric" or "percent"
#' @param save T if user would like to return plot object and save file, F (default) to just return object.
#' @param savename File name of map for saving.
#' @param caption Figure caption
#' @return Lollipop plot of variable by HRA and SES.
#' @export
# Lollipop Plot
plot_lollipop <- function(
data,
var,
limits,
title = NULL,
x_title = NULL,
scale_type = "numeric",
save = F,
savename = "plot.png",
caption = paste0(frb_acs_caption_splitline, ses_caption)
) {
library('tidyverse')
#labels = c("Overall", gent_cat, race_short, inc_cat, ses_lollipop_cat)
#colors = c("white", gent_cat_colors, race_short_colors, inc_cat_colors, ses_lollipop_colors)
#names(colors) = labels
if (scale_type == "percent") {
label_type = scales::percent
} else if (scale_type == "numeric") {
label_type = scales::comma
} else if (scale_type == "dollar") {
label_type = scales::dollar
} else if (scale_type == "dollark") {
label_type = function(x) {scales::dollar(x, scale = 0.001, suffix = "k")}
} else {
return("Please select percent or numeric")
}
# Have line segment start at 0
ystart = 0
if (limits[1] > 0) {
ystart = limits[1]
}
plot <-
ggplot(data, aes(x = cat, y = {{var}}, fill = facet_col)) +
ggtitle(title) +
geom_segment(aes(x=cat, xend=cat,
y=ystart, yend={{var}}), size=0.25,
show.legend = FALSE) +
geom_point(aes(color = factor(cat)), size = 3.25, shape = 21,
colour = "black", show.legend = TRUE) +
geom_hline(yintercept=0, linetype="dashed") +
scale_y_continuous(limits = limits,
expand = c(0, 0),
labels = label_type) +
#scale_color_manual(values = colors,
# labels = labels) +
scale_fill_manual(values = ses_cat_colors) +
facet_grid(rows = vars(facet),
cols = vars(facet_col),
scale = "free",
space = "free"
#space = "fixed"
) +
theme_bw() +
theme(
# Panel
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.spacing.x = unit(1, "lines"),
# Axis
axis.line = element_line(colour = "black"),
axis.text.x = element_text(angle = 45, hjust = 1),
axis.title = element_blank(),
axis.title.x = element_text(size = 9),
# Legend
legend.position = "none",
# Caption
plot.caption = element_text(size = 7, hjust = 0)) +
labs(y = x_title, caption = caption) +
coord_flip()
final_plot = plot
height = 8
width = 9
if (save) {
ggsave(savename, final_plot, height = height, width = width)
}
return(final_plot)
}
|
588af74ab6c661b500f64dec9fdf99e88061ecb4 | 7a4d8e8be2fc97eb81e71f58a43849a3f806eb5d | /man/gglasso_forstab.Rd | 409aa3289602b51309038dc3e55b773324bce77c | [] | no_license | Marie-PerrotDockes/Fus2mod | eb1033ea78e903e9f0184825e53197531b857786 | 889ba6df691260f18708833ac2cba636a5b16529 | refs/heads/master | 2020-03-11T04:14:08.265434 | 2018-08-17T11:28:49 | 2018-08-17T11:28:49 | 129,771,020 | 0 | 2 | null | 2018-08-17T12:27:22 | 2018-04-16T16:07:03 | R | UTF-8 | R | false | true | 1,132 | rd | gglasso_forstab.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grp_lasso.R
\name{gglasso_forstab}
\alias{gglasso_forstab}
\title{Description of the function}
\usage{
gglasso_forstab(x, y, group, ord, q)
}
\arguments{
\item{group}{a vector with two levels. (The group of the ANCOVA)}
\item{response}{a vector response variable}
\item{regressors}{a quantitative matrix of regressor}
\item{a}{the parameters that indicate how much the coefficients will be fused}
\item{lambda}{if the user wants to use it owns values of lambdas}
}
\value{
The coefficients of the fused lasso ANCOVA for the different value of lambda
}
\description{
Description of the function
}
\examples{
B <- c(1, -1, 1.5, 1.5, rep(0, 6), 2, 0, 2, 0)
group <- c(rep('M1', 10), rep('M2', 10))
regressors <- matrix(rnorm(6*20), ncol = 6)
X <- model.matrix(~group + group:regressors - 1)
y <- X\%*\%B + rnorm(20)
y <- scale(y)
mod <- fl2(y, regressors, group)
colors <- c(rep("grey",2), rep('green',2),rep('black', 6), rep(c("orange","blue"), 2), 'darkgreen', rep('yellow',3), rep('purple',2))
matplot(mod$lambda ,t(mod$beta),type='l',col=colors)
}
|
9477a5698ad5aac4fbb18d255563f3dc991cc346 | de6f15325cc65450595fe52e5d03f0a8b79fc3da | /cachematrix.R | 885b36a215bedf8b1295a19f8e8944c0b048bb37 | [] | no_license | keyurkulkarni/rprogramming | 4ee3678e142081c857b042aafc2e7f1c9e3282c8 | b7aa9d8bc02bb7cc9e2877cbebf9a4f84f0b005d | refs/heads/master | 2021-01-13T13:11:28.300431 | 2016-11-03T06:58:14 | 2016-11-03T06:58:14 | 72,717,417 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 771 | r | cachematrix.R | # This function gets called first and generates a list with 3 functions, getting i , setting and getting inverse
makeCacheMatrix <- function(x = matrix()) {
i = NULL
get = function() x
setinv = function(inverse) i <<- inverse
getinv = function() i
list(get=get, setinv=setinv, getinv=getinv)
}
# This function actually calculates the inverse , but first checks if getinv object is not NULL. If not NULL , it calls setinv first before returing.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i = x$getinv()
if (!is.null(i)){
message("getting cached data")
return(i)
}
m = x$get()
i = solve(m, ...)
x$setinv(i)
i
}
|
6816d3170e9b8c79998041542d68ea0247a7bdbb | a2f5e5bbad23093de07d356032573a53e880b6bf | /R/arrayIndex.R | 4dde209124f103726a0a451be5e439a655dc67b6 | [] | no_license | AntonKrasikov/R.utils | 4bfdba6e3f27b20c723e354d3231adcea5264fd9 | 5d6f194b30356fb7118e0d290bce2b5c39b8fb06 | refs/heads/master | 2020-05-29T12:33:06.471354 | 2019-02-14T21:51:03 | 2019-02-14T21:51:03 | 189,131,159 | 0 | 0 | null | 2019-05-29T15:47:16 | 2019-05-29T01:48:47 | R | UTF-8 | R | false | false | 1,410 | r | arrayIndex.R | ###########################################################################/**
# @RdocDefault arrayIndex
#
# @title "Converts vector indices to array indices"
#
# @synopsis
#
# \description{
# @get "title" assuming last array dimension to "move fastest", e.g.
# matrices are stored column by column.
# }
#
# \arguments{
# \item{i}{A @vector of vector indices to be converted to array
# indices.}
# \item{dim}{A non-empty @numeric @vector specifying the dimension of
# the array.}
# \item{...}{Not used.}
# }
#
# \value{
# Returns an @integer @matrix of \code{length(i)} rows and
# \code{length(dim)} columns.
# }
#
# \examples{\donttest{
# @include "../incl/arrayIndex.Rex"
# }}
#
# \references{
# [1] H. Bengtsson, \emph{Bayesian Networks - a self-contained
# introduction with implementation remarks}, Master's Thesis in
# Computer Science, Mathematical Statistics, Lund Institute of
# Technology, 1999.\cr
# }
#
# \seealso{
# From R v2.11.0 there is @see "base::arrayInd", which does
# the same thing as this method.
# @see "base::which" with argument \code{arr.ind=TRUE}.
# }
#
# @keyword manip
# @keyword utilities
# @keyword internal
#*/###########################################################################
setMethodS3("arrayIndex", "default", function(i, dim, ...) {
.Defunct(msg = "R.utils::arrayIndex() is defunct. Instead, use base::arrayInd().")
})
|
225b88303677b6b3a90499d8ed3f392b366541c2 | 36ae6e4e2e726125534f74044101f4a57c13c223 | /plot1.R | c0488a75447735ff9b5096c4b851e926b3803150 | [] | no_license | LadyHema/ExData_Plotting1 | dce4048933b3c322902dcc57bf7848fa68b86508 | 9d7cbae89979395c7273d4a5feb3ac70f00fe678 | refs/heads/master | 2021-04-27T00:27:26.383647 | 2018-03-11T18:25:22 | 2018-03-11T18:25:22 | 123,819,283 | 0 | 0 | null | 2018-03-04T19:02:16 | 2018-03-04T19:02:16 | null | UTF-8 | R | false | false | 857 | r | plot1.R | #read data
download.file(url = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
method = "curl", destfile = "household_power_consumption.zip")
unzip("household_power_consumption.zip")
household <- read.table("household_power_consumption.txt",
sep = ";", dec = ".", na.strings = "?", skip = 66637, nrows = 2880)
names(household) <- names(read.table("household_power_consumption.txt", header = TRUE,
sep = ";", dec = ".", na.strings = "?", nrows = 1))
household$Date <- as.Date(strptime(household$Date, "%d/%m/%Y"))
household$Time <- strptime(paste(household$Date, household$Time), "%Y-%m-%d %H:%M:%S")
hist(household$Global_active_power, col = "red",
xlab = "Global active power (kilowatts)", main = "Global active power")
dev.copy(png,'plot1.png')
dev.off()
|
7c4e06733db863161534318a3f11f64639ab3fdf | 9c3dbd18c38c5316549079b17620d945da9f6393 | /R/ChurnReductionEDA.R | cc8103f1c6b966f31c76d79e3f0528a532070819 | [] | no_license | snowdj/CustomerChurnReduction | 3536394e3bcc7513ded1a7f32cc0c516e7902beb | 714658b6d678426ec5206e6af439fdceb759f1fb | refs/heads/master | 2020-03-22T07:04:42.645291 | 2018-05-31T11:33:34 | 2018-05-31T11:33:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,202 | r | ChurnReductionEDA.R | # Customer churn reduction EDA
rm(list=ls())
#loading requried libraries
library(dplyr)
library(ggplot2)
library(stringr)
library(corrplot)
fillColor = "#FFA07A"
fillColorRed = "#56B4E9"
train_data <-
read.csv("Train_data.csv",
sep = ',',
na.strings = c(" ", "NA"))
#looking at dimensions
dim(train_data)
#Train data set consist of 3333 observations and 21 varaiables
#checking structure of dataset
str(train_data)
# Visualizing target class freqquencies
train_data %>%
count(Churn) %>%
ggplot(aes(x = Churn,
y = n)) +
geom_bar(stat = 'identity',
colour = "white",
fill = fillColor) +
labs(x = 'Churn rate', y = 'count ', title = 'Customer churn count') +
theme_bw()
table(train_data$Churn)
#Looking at the frequencies of churn , it is not looking like highly imbalance problem.
# Now looking for any missing values
sapply(train_data, function(x) {
sum(is.na(x))
}) # There are no missing values in dataset
#selecting numeric variables
numCols <- unlist(lapply(train_data,is.numeric))
numVarDataset <- train_data[,numCols]
# Visualizing correlation
par(mfrow = c(1, 1))
corr <- cor(numVarDataset)
corrplot(
corr,
method = "color",
outline = TRUE,
cl.pos = 'n',
rect.col = "black",
tl.col = "indianred4",
addCoef.col = "black",
number.digits = 2,
number.cex = 0.60,
tl.cex = 0.70,
cl.cex = 1,
col = colorRampPalette(c("green4", "white", "red"))(100)
)
# From corrplot we can see that dataset consist of multicollinearity
# total.day.minutes and total.day.charge are highly collinear
# total.eve.minutes and total.eve.charge are highly collinear
# total.night.minutes and total.night.charge are highly collinear
# total.intl.minutes and total.intl are highly collinear
# we can exclude one of these predictors later during modeling
############## Generic EDA function for continous variables
plot_continous <- function(dataset, variable,targetVariable) {
var_name = eval(substitute(variable), eval(dataset))
target_var = eval(substitute(targetVariable), eval(dataset))
par(mfrow = c(1, 2))
print(summary(var_name))
print(summary(target_var))
possible_outliers <- (boxplot.stats(var_name)$out)
print(possible_outliers)
print(paste("Total possible outliers", length(possible_outliers)))
table(possible_outliers)
ggplot(train_data, aes(target_var, var_name, fill = target_var)) +
geom_boxplot(alpha = 0.8) + theme(legend.position = "null")
}
############################### looking at 'state' variable. It is a factor variable
train_data %>%
count(state) %>%
ggplot(mapping = aes(x = state, y = n)) +
geom_bar(stat = 'identity',
colour = 'white',
fill = fillColor) +
labs(x = "states", y = "count", "Customers per state") +
coord_flip()
# Fom the plot we can that maximum customers are from west vergenia and lowest are from California
# looking at each variable
plot_continous(train_data, account.length,Churn)
# As we can see, that there are some possible outliers but they are not very extreme. Ignoring them
##################################### analysing international.plan #################################
str(train_data$international.plan) # it is a categorical variable
table(train_data$international.plan)
train_data %>%
count(international.plan) %>%
ggplot(mapping = aes(x = international.plan, y = n)) +
geom_bar(stat = 'identity',
colour = 'white',
fill = fillColor)
# From the plot we can see that most customers dont have international plan.
# next examining for the churn rate percentage of customers with national and internation plan
national_cust_churnRate <- train_data %>%
select(international.plan, Churn) %>%
filter(str_detect(international.plan, "no")) %>%
group_by(Churn) %>%
summarise (n = n()) %>%
mutate(percantage = (n / sum(n)) * 100)
#Only 11.49 % customer with national plan churn out.
international_cust_churnRate <- train_data %>%
select(international.plan, Churn) %>%
filter(str_detect(international.plan, "yes")) %>%
group_by(Churn) %>%
summarise (n = n()) %>%
mutate(percantage = (n / sum(n)) * 100)
# 42.42 % customers with international plan had churn out. It means that the telecom company
# is mainly loosing customers with internation plans.
#####################################Analysing voice.mail.plan ####################################
table(train_data$voice.mail.plan)
# customers with voice plan and their churn rate
voice_plan_churnRate <- train_data %>%
select(voice.mail.plan, Churn) %>%
filter(str_detect(voice.mail.plan, "yes")) %>%
group_by(Churn) %>%
summarise(n = n()) %>%
mutate(churnRatePercentage = (n / sum(n)) * 100)
ggplot(data = voice_plan_churnRate,
mapping = aes(x = Churn, y = churnRatePercentage)) +
geom_bar(stat = 'identity',
colour = 'white',
fill = fillColorRed) +
labs(title = 'Voice main plan customers churn rate')
# 922 customers have voice mail plan and 80 (8.68 %) customers out of 922 churn out.
#customers without voice plan and their churn rate
non_voice_plan_churnRate <- train_data %>%
select(voice.mail.plan, Churn) %>%
filter(str_detect(voice.mail.plan, "no")) %>%
group_by(Churn) %>%
summarise(n = n()) %>%
mutate(churnRatePercentage = (n / sum(n)) * 100)
ggplot(data = non_voice_plan_churnRate,
mapping = aes(x = Churn, y = churnRatePercentage)) +
geom_bar(stat = 'identity',
colour = 'white',
fill = fillColor) +
labs(title = 'Non voice plan Customer churn rate')
# 2411 customers dont have voice mail plan and 403 (16.7 %) out of 2411 churn out
#So customers without voice plan have higher churn rate
# removing parameters that dosn't seem to be logical parameter for customer churn.
#So removing state, area code and phone number
train_data$state <- NULL
train_data$area.code <- NULL
train_data$phone.number <- NULL
############################ Analysing number.vmail.messages ################################
str(train_data$number.vmail.messages)
plot_continous(train_data, number.vmail.messages,Churn)
# no extreme outliers detected.
############################ Analysing total.day.minutes ################################
str(train_data$total.day.minutes)
plot_continous(train_data, total.day.minutes,Churn)
# no extreme outliers detected
############################ Analysing total.day.calls ################################
str(train_data$total.day.calls)
plot_continous(train_data, total.day.calls,Churn)
# no extreme outliers detected
############################ Analysing total.day.charge ################################
str(train_data$total.day.charge)
plot_continous(train_data, total.day.charge, Churn)
# no extreme outliers detected
############################ Analysing total.eve.minutes ################################
str(train_data$total.eve.minutes)
plot_continous(train_data, total.eve.minutes, Churn)
# no extreme outliers detected
############################ Analysing total.eve.calls ################################
str(train_data$total.eve.calls)
plot_continous(train_data, total.eve.calls, Churn)
# no extreme outliers detected
############################ Analysing total.eve.charge ################################
str(train_data$total.eve.charge)
plot_continous(train_data, total.eve.charge, Churn)
# no extreme outliers detected
############################ Analysing total.night.minutes ################################
str(train_data$total.night.minutes)
plot_continous(train_data, total.night.minutes, Churn)
# no extreme outliers detected
############################ Analysing total.night.calls ################################
str(train_data$total.night.calls)
plot_continous(train_data, total.night.calls, Churn)
# no extreme outliers detected
############################ Analysing total.night.charge ################################
str(train_data$total.night.charge)
plot_continous(train_data, total.night.charge, Churn)
# no extreme outliers detected
############################ Analysing total.intl.minutes ################################
str(train_data$total.intl.minutes)
plot_continous(train_data, total.intl.minutes, Churn)
# no extreme outliers detected
############################ Analysing total.intl.calls ################################
str(train_data$total.intl.calls)
plot_continous(train_data, total.intl.calls, Churn)
# no extreme outliers detected
############################ Analysing total.intl.charge ################################
str(train_data$total.intl.charge)
plot_continous(train_data, total.intl.charge, Churn)
# no extreme outliers detected
######################## Analysing number.customer.service.calls #############################
str(train_data$number.customer.service.calls)
plot_continous(train_data , number.customer.service.calls, Churn)
table(train_data$number.customer.service.calls)
|
7bc63bbcf69d6f44824091e3974fe7b1a68eeb63 | 82e6137d5d2a0e9114f76c7e427514bba62aaaf3 | /ercel/app.R | 15ef32977eeb43df10bc62e13dcdf0a7980ecaa5 | [] | no_license | lukuiR/Rpublic | 3a0c25519d10457bc08d6d3a8510865212943a37 | daa067ca078ddce54bb4d822666d4e9f9335c6a5 | refs/heads/master | 2022-06-28T16:10:50.713523 | 2022-06-21T14:48:48 | 2022-06-21T14:48:48 | 119,683,495 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,265 | r | app.R | #dt ext https://rstudio.github.io/DT/extensions.html
#DT ino: https://yihui.shinyapps.io/DT-info/
###
library(shinydashboard)
library(shiny)
library(DT)
library(plotly)
library(shinyalert)
library(readxl)
library(tidyverse)
# Use purrr's split() and map() function to create the list
# needed to display the name of the airline but pass its
# Carrier code as the value
#select only required columns
ui <- dashboardPage(skin = "green",
dashboardHeader(title = "Mercel" #,titleWidth = 200
),
dashboardSidebar(
sidebarMenu(
id = "tabs",
menuItem("Start", icon = icon("info"), tabName = "tab1"),
menuItem("Supply", icon = icon("line-chart"), tabName = "dashboard")
),
selectInput(
inputId = "typ",
label = "Cherf/Not:",
choices = c('Yes','No','NA'),
multiple = TRUE),
selectInput(
inputId = "kod",
label = "Gender:",
choices = c('Male','Female', 'NA'),
multiple = TRUE),
sidebarMenu(
selectInput(
"month",
"Month:",
list(
"All Year" = 99,
"January" = '01',
"February" = '02',
"March" = '03',
"April" = '04',
"May" = '05',
"June" = '06',
"July" = '07',
"August" = '08',
"September" = '09',
"October" = '10',
"November" = '11',
"December" = '12'
) ,
selected = "All Year",
selectize = FALSE),
actionLink("remove", "Wyczy dodatkowe karty")
)
),
dashboardBody(
tabItems(
tabItem(tabName = "tab1",
tabsetPanel(id = "tabs",
tabPanel(
title = "ZZJD Dashboard",icon = icon("glyphicon glyphicon-saved",lib ="glyphicon"),
value = "page1",
#useShinyalert(),
fluidRow(
column(3,
selectInput("Gender",label="Choose the Gender",choices=c('Male','Female', 'NA'), multiple = TRUE)),
column(3,
selectInput("chef",label="Choose the chef/not",choices=c('Yes','No','NA'), multiple = TRUE)),
column(3,
sliderInput("slage", label="Choose the age",
min =34, max = 81, value = c(34, 81))),
column(3,
checkboxInput("addd", "Add data series", value = F))
),
fluidRow(
fileInput("uploadFile", "XLSX file")
),
br(),
fluidRow(
conditionalPanel(
condition = "output.fileUploaded",
plotlyOutput("plot2")
)
),
fluidRow(
dataTableOutput(outputId = "dt2")
)
),
tabPanel("Plot/Filter tab", icon = icon("line-chart"),
radioButtons("radio2", h3("filter "),
choices = list("Waga" = "WAGA", "Supergrupa" = "SUPERGRUPA",
"Memonik" = "MNEMONIK_ODBIORCY"),inline = TRUE,selected = "WAGA"),
plotlyOutput("plot1"),plotlyOutput("plot11")
)
)
),
tabItem(tabName = "dashboard",
h4('Filter tab'),
radioButtons("radio", h3("filte"),
choices = list("Waga" = "WAGA", "Supergrupa" = "SUPERGRUPA",
"Memonik" = "MNEMONIK_ODBIORCY"),inline = TRUE,selected = "WAGA"),
fluidRow(
column(1,br(),h3(2018)),
column(2,
textInput("text1", h5("Text input"),
value = "Enter text...")) ,
column(2,
textInput("text2", h5("Text input"),
value = "Enter text...")) ,
column(2,
textInput("text3", h5("Text input"),
value = "Enter text...")) ,
column(2,
textInput("text4", h5("Text input"),
value = "Enter text..."))
)
)
)
)
)
server <- function(input, output, session) {
# shinyalert(
# title = "Witaj",
# text = "Wprowadz: uzytkownik/haslo",
# closeOnEsc = TRUE,
# closeOnClickOutside = FALSE,
# html = FALSE,
# type = "input",
# inputType = "text",
# inputValue = "",
# inputPlaceholder = "",
# showConfirmButton = TRUE,
# showCancelButton = FALSE,
# confirmButtonText = "OK",
# confirmButtonCol = "#AEDEF4",
# timer = 0,
# imageUrl = "",
# animation = TRUE
# )
dataset<-reactive({
inFile <- input$uploadFile
if (is.null(inFile))
return(NULL)
datraw<-read_excel(inFile$datapath, col_names = TRUE,skip=1)
colnames(datraw)[9] <- "Job Family"
colnames(datraw)[55] <- "Employment__1"
dat <- select(datraw, Gender, Employment__1 , `Career Level` ,`Chief/Not` , `Company Name`, `SBU/MFG/CF`, Subfamily , Family , RegionCode, `Level 1`, `Year of retirement`, `Job Family`, `Encrypted Ids`)
#calculate field
dat <- mutate(dat,
`SBU/MFG/CF Name`= case_when(`SBU/MFG/CF` == "SBU" ~ `Level 1`, `SBU/MFG/CF` == "MFG Sites" ~ `Company Name`, TRUE ~ `SBU/MFG/CF`)
)
#Remove not needed job Family
dat$`Job Family`[!(dat$`Job Family` %in% c('Professionals', 'Technicians', 'Executives', 'Advisor & Consultant', 'Managers', 'Supervisor',
'Administrators', 'Operators', 'Superintendent & Section Head',
'Security & Safety', 'Para Professional'))] <- 'Other'
#Aggregate, count id
ag_dat <- group_by(dat, Employment__1 , `Career Level` ,`Chief/Not` , `Company Name`, `SBU/MFG/CF`, Subfamily , Family , RegionCode, `Level 1`, `Year of retirement`, `Job Family`, `SBU/MFG/CF Name`) %>%
summarise(
n = n()
)
#spread
#BC
ag_dat1 <- group_by(dat, Subfamily , Family ,`SBU/MFG/CF`, `SBU/MFG/CF Name`, RegionCode, Employment__1 )%>%
summarise(
n = n()
) %>%
spread(key = Employment__1, value = n) #, fill =0)
ag_dat2 <- group_by(dat, Subfamily , Family ,`SBU/MFG/CF`, `SBU/MFG/CF Name`, RegionCode, `Chief/Not` )%>%
summarise(
n = n()
) %>%
spread(key = `Chief/Not`, value = n) #, fill =0)
colnames(ag_dat2)[dim(ag_dat2)[2]] <- "`Chief/Not NA`"
ag_dat3 <- group_by(dat, Subfamily , Family ,`SBU/MFG/CF`, `SBU/MFG/CF Name`, RegionCode, `Career Level` )%>%
summarise(
n = n()
) %>%
spread(key = `Career Level`, value = n) #, fill =0)
colnames(ag_dat3)[dim(ag_dat3)[2]] <- "`Career Level NA`"
ag_dat4 <- group_by(dat, Subfamily , Family ,`SBU/MFG/CF`, `SBU/MFG/CF Name`, RegionCode, Gender )%>%
summarise(
n = n()
) %>%
spread(key = Gender, value = n) #, fill =0)
colnames(ag_dat4)[dim(ag_dat4)[2]] <- "`Gender NA`"
ag_dat5 <- group_by(dat, Subfamily , Family ,`SBU/MFG/CF`, `SBU/MFG/CF Name`, RegionCode, `Job Family` )%>%
summarise(
n = n()
) %>%
spread(key = `Job Family`, value = n) #, fill =0)
ag_dat6 <- group_by(dat, Subfamily , Family ,`SBU/MFG/CF`, `SBU/MFG/CF Name`, RegionCode, `Year of retirement` )%>%
summarise(
n = n()
) %>%
spread(key = `Year of retirement`, value = n) #, fill =0)
#remove not needed years of retirement
#ag_dat66=ag_dat6[, c('Subfamily' , 'Family' ,'SBU/MFG/CF', 'SBU/MFG/CF Name', 'RegionCode', '2018', '2019', '2020','2021','2022', '2023')] #1:5,17:22)]
ag_dat66 <- select( ag_dat6, Subfamily , Family ,`SBU/MFG/CF`, `SBU/MFG/CF Name`, RegionCode, '2018',`2019`,`2020`,`2021`,`2022`,`2023`)
ag_dat6 <- group_by(dat, Subfamily , Family ,`SBU/MFG/CF`, `SBU/MFG/CF Name`, RegionCode )%>%
summarise(
n = n()
)
z <- ag_dat1 %>%
inner_join(ag_dat2)%>%
inner_join(ag_dat3)%>%
inner_join(ag_dat4)%>%
inner_join(ag_dat5)%>%
inner_join(ag_dat66)%>%
inner_join(ag_dat6)
head(z,10)
return(head(z,10))
})
output$fileUploaded <- reactive({
return(!is.null(dataset()))
})
outputOptions(output, 'fileUploaded', suspendWhenHidden=FALSE)
output$dt2<- renderDataTable({
datatable(dataset(),
extensions = c('FixedHeader', 'Buttons', 'ColReorder', 'Scroller'),
options = list(
dom = 'Bfrti',
autoWidth = FALSE,
colReorder = TRUE,
deferRender = TRUE,
scrollX = TRUE,
scrollY = "51vh",
scroller = TRUE,
scollCollapse = TRUE,
fixedHeader = TRUE,
columnDefs = list(
list(orderable = FALSE, className = 'details-control', targets = 0)
)
))
})
####plot
output$plot2 <- renderPlotly({
plot_ly(x=dataset()$RegionCode, y=dataset()$n, mode = "bar"
)
})
zz=data.frame(
cbind(c('actual', 'future'), c(220,140), c('grey', 'yellow'))
)
colnames(zz) <- c('Categorie', 'values','color')
output$plot1 <- renderPlotly({
plot_ly(zz, labels = ~Categorie, values = ~values,
textposition = 'inside',
textinfo = 'label',
insidetextfont = list(color = '#000000'),
hoverinfo = 'text',
marker = list(colors = ~color), type = 'pie', rotation=160)
})
zz1=data.frame(
cbind(c('actual', 'future'), c(140,220), c('grey', 'red'))
)
colnames(zz1) <- c('Categorie', 'values','color')
output$plot11 <- renderPlotly({
plot_ly(zz1, labels = ~Categorie, values = ~values,
textposition = 'inside',
textinfo = 'label',
insidetextfont = list(color = '#000000'),
hoverinfo = 'text',
marker = list(colors = ~color), type = 'pie', rotation=-20)
})
#####
#####NHR
{
}
#####end event
}
shinyApp(ui, server)
|
e5ea32cfe87111218b4d2ca13aee2461829e8bb3 | 4fe0de9dd230e8d4b5a01032c0bb2aa72459aa8c | /DivvyMapping.R | 9eb39db40c428dbc05fcff51df1a74399ea283c6 | [] | no_license | dan-peters/EvanstonDivvy | de3dee447031fb728eb561661907cc89a66018b4 | c1d35bf1c234306d4cc868c33cc51d28e466e674 | refs/heads/master | 2020-04-25T23:18:45.918349 | 2019-04-02T02:17:18 | 2019-04-02T02:17:18 | 173,141,519 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,858 | r | DivvyMapping.R | library(sp);library(dplyr);library(ggplot2);library(osmplotr);library(tidyr);library(readr)
#load data tables from DivvyMappingData.R output
divvystations <- read_csv('divvystations_mapping.csv')
evanstontrips <- read_csv('evanstontrips_mapping.csv')
evanstonstations <- read_csv('evanstonstations_mapping.csv')
#create vectors with coordinate start and end points for all bike trips in evanstontrips (trips to, from, and in evanston)
x1 <- divvystations$longitude [match (evanstontrips$start_station_id, divvystations$stn_id)]
y1 <- divvystations$latitude [match (evanstontrips$start_station_id, divvystations$stn_id)]
x2 <- divvystations$longitude [match (evanstontrips$end_station_id, divvystations$stn_id)]
y2 <- divvystations$latitude [match (evanstontrips$end_station_id, divvystations$stn_id)]
#create dataframe for ggplot mapping
ggdf <- data.frame(x1=x1,x2=x2,y1=y1,y2=y2,ntrips=evanstontrips$numtrips,
before1990=evanstontrips$before1990,later1989=evanstontrips$later1989)
ggdflong <- gather(ggdf, demographic, ntrips, before1990:later1989)
#rename records to proper facet titles
ggdflong$demographic[ggdflong$demographic=='before1990'] <- 'Riders Born Before 1990'
ggdflong$demographic[ggdflong$demographic=='later1989'] <- 'Riders Born 1990 or Later'
##EVANSTON MAP of JUST TRIPS WITHIN EVANSTON
#determine bounding boxes from coordinates
evanstonbbox <- get_bbox(c(-87.66, 42.02, -87.71, 42.07))
#create basemap
mapevanston <- osm_basemap (bbox = evanstonbbox, bg = 'gray40')
#extract OSM street data and overlay on map
evanstonhighway <- extract_osm_objects (key = 'highway', bbox = evanstonbbox)
mapevanston <- add_osm_objects (mapevanston, evanstonhighway, col = 'gray60')
#Map 1: Within Evanston Total Trips
#subset ggdf for trips that start AND end in evanston, plot lines with color to show trip frequency
evanstontotalplot <- mapevanston +
geom_segment(data = ggdf %>%
filter(x1 %in% evanstonstations$longitude &
x2 %in% evanstonstations$longitude),
aes(x = x1, y = y1, xend = x2, yend = y2,col=ntrips),
size=1.8,alpha=0.7,show.legend=TRUE) +
scale_color_gradient(low='#3399FF',high='#FF0099',trans='log10') +
geom_point(data=evanstonstations,aes(x=longitude,y=latitude),
shape = 19, colour = "blue", size = 3, stroke = 3,show.legend = TRUE)+
theme(legend.position="bottom",plot.title = element_text(hjust=0.5),
plot.margin=unit(c(5.5,5.5,5.5,5.5),"points")) +
ggtitle('Total Evanston Divvy Trip Frequency Heatmap')+
labs(color='Number of Divvy Trips') + scale_size(guide = "none") +
geom_text(data=evanstonstations,size=2,color='white',
aes(x=longitude,y=latitude,label=name)) +
guides(color = guide_colorbar(title.position="top"))
#Map 2: Within Evanston Trips by Birth Year of Rider
evanstonageplot <- mapevanston +
geom_segment(data = ggdflong %>%
filter(ntrips>0) %>%
filter(x1 %in% evanstonstations$longitude & x2 %in%
evanstonstations$longitude),
aes(x = x1, y = y1, xend = x2, yend = y2,col=ntrips),
size=1.4,alpha=0.7,show.legend=TRUE) +
scale_color_gradient(low='#3399FF', high='#FF0099',trans='log10') +
geom_point(data=evanstonstations,aes(x=longitude,y=latitude),
shape = 21, colour = "blue", fill = "lightblue",
size = 1, stroke = 3,show.legend = TRUE)+
theme(legend.position="right",plot.title =
element_text(hjust=0.5),plot.margin=unit(c(5.5,5.5,5.5,5.5),"points")) +
ggtitle('Evanston Divvy Trips By Age') +
labs(color='Number of \nDivvy Trips') + scale_size(guide = "none") +
facet_wrap(~demographic) +
guides(color = guide_colorbar(title.position="top", title.hjust = 0.5))
#Now do Chicago
#determine bounding boxes from coordinates
chibbox <- get_bbox(c(-87.75,41.84,-87.6,42.07))
#create basemap
mapchi <- osm_basemap (bbox = chibbox, bg = 'gray10')
#extract OSM street data and overlay on map
chihighway <- extract_osm_objects (key = 'highway', bbox = chibbox)
mapchi <- add_osm_objects (mapchi, chihighway, col = 'gray20')
smallevanstonbbox <- get_bbox(c(-87.66, 42.03, -87.7, 42.07))
dat_B <- extract_osm_objects (key = 'highway', bbox = smallevanstonbbox)
mapchi<- add_osm_objects(mapchi,dat_B,col='grey40')
# Map 3: Leaving Evanston Total Trips
leavingevanstonplot <- mapchi +
geom_segment(data=ggdf %>% filter(ntrips>0)
#filter to start in evanston
%>% filter(x1 %in% evanstonstations$longitude) %>%
#filter out trips that start and end in evanston
filter(!(x1 %in% evanstonstations$longitude & x2 %in%
evanstonstations$longitude)),
aes(x = x1, y = y1, xend = x2, yend = y2,col=ntrips),
size=0.08,alpha=1,show.legend=TRUE,
arrow = arrow(length = unit(0.25,"cm"))) +
scale_color_gradient(trans= "log10", low='#3399FF', high='#FF0099') +
geom_point(data=divvystations %>%
filter(longitude %in% (ggdf$x1) | longitude %in% (ggdf$x2)),
aes(x=longitude,y=latitude),
shape = 21, colour = "blue", fill = "lightblue",
size = 0.1,show.legend = TRUE)+
theme(legend.position="right",plot.margin=unit(c(5.5,5.5,5.5,5.5),"points")) +
ggtitle('Divvy Trips Leaving Evanston') +
labs(color='Number of \nDivvy Trips') + scale_size(guide = "none")
# Map 4: Entering Evanston Total Trips
enteringevanstonplot <- mapchi +
geom_segment(data=ggdf %>% filter(ntrips>0) %>%
#filter to end in evanston
filter(x2 %in% evanstonstations$longitude) %>%
#filter out trips that start and end in evanston
filter(!(x1 %in% evanstonstations$longitude & x2 %in%
evanstonstations$longitude)),
aes(x = x1, y = y1, xend = x2, yend = y2,col=ntrips),
size=.08,alpha=1,show.legend=TRUE,
arrow = arrow(length = unit(0.25,"cm"))) +
scale_color_gradient(trans= "log10", low='#3399FF', high='#FF0099') +
geom_point(data=divvystations %>%
filter(longitude %in% (ggdf$x1) | longitude %in% (ggdf$x2)),
aes(x=longitude,y=latitude),
shape = 21, colour = "blue", fill = "lightblue",
size = 0.1,show.legend = TRUE)+
theme(legend.position="right",plot.margin=unit(c(5.5,5.5,5.5,5.5),"points")) +
ggtitle('Divvy Trips Entering Evanston') +
labs(color='Number of \nDivvy Trips') + scale_size(guide = "none")
#save all plots to pdf
ggsave('Map1.1.pdf',evanstontotalplot,dpi=400)
ggsave('Map1.2.pdf',evanstonageplot,dpi=400)
ggsave('Map2.1.pdf',leavingevanstonplot,dpi=400)
ggsave('Map2.2.pdf',enteringevanstonplot,dpi=400) |
d0f2a15b42e921f65ca8404c27d8ce7436b5dc1b | 419378b1a49d1b19fbf84f1673c7bfa77fe0bd9d | /Zajecia3/Zajecia3.R | 9b1c86e8f1a72b87d869f9ed2553059e17a0daf2 | [] | no_license | mkazber/AI | a6d7a42fe903360b5526aae18d7507f72c9209f7 | 87b31a8e63f6bea12f1aecbe165caf0394312c00 | refs/heads/main | 2023-07-15T12:01:56.982462 | 2021-08-25T20:38:44 | 2021-08-25T20:38:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 485 | r | Zajecia3.R | library(class)
library(tree)
library(mlbench)
dane1 <-read.table("/home/student/Pobrane/Zajecia3/iris_train.csv",header =T ,sep =",")
dane2 <-read.table("/home/student/Pobrane/Zajecia3/iris_test.csv",header =T ,sep =",")
tree.iris <- tree(klasa ~., dane1)
plot(tree.iris)
text(tree.iris)
tree2.iris <-prune.tree(tree.iris,best=2)
plot(tree2.iris)
text(tree2.iris)
result <-predict(tree.iris,dane2[-5],type = 'class')
tablica <-dane2[,5]
blad<-1-sum(result==tablica)/length(tablica)
|
4224396e9a7c6d51e2f96a7565ee7727d3ca51ed | c4dd2b2a2416a62dd5219bee0191077bce8600f5 | /AlexaMarketingShiny/app.R | f29fc561d45ca6bfa57b82fa0b022605e98b5a15 | [] | no_license | william-hatzar/Shiny-app | 0f552230772cfebce3b2f250c3fec658e153b382 | 1644347b90e7285f91b3d09701547cc2b7447a35 | refs/heads/master | 2020-04-14T20:40:13.710175 | 2019-01-04T12:07:45 | 2019-01-04T12:07:45 | 164,102,574 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,125 | r | app.R | library(shiny)
library(tm)
library(wordcloud)
library(memoise)
library(dplyr)
setwd("C:/Users/Admin/Documents/WordCloudWork")
review <- read.csv("amazon_alexa2.csv", stringsAsFactors = FALSE)
reviewsum <- read.csv("summary2.csv")
reviewQuote <- review
getTermMatrix <- memoise(function(sorter){
myCorpus = Corpus(VectorSource(sorter))
myCorpus = tm_map(myCorpus, content_transformer(tolower))
myCorpus = tm_map(myCorpus, removeWords,
c(stopwords("SMART"), "I", "the", "and", "but", "alexa", "echo", "amazon"))
myCorpus = tm_map(myCorpus, removePunctuation)
myCorpus = tm_map(myCorpus, removeNumbers)
return(myCorpus)
})
review$verified_reviews <- getTermMatrix(review$verified_reviews)
variantsorter <- function(vreview){
specificvariantsubset <- subset(review,variation == vreview)
variantreviews <- specificvariantsubset$verified_reviews
return(variantreviews)
}
ratingsorter <- function(freview){
specificratingsubset <- subset(review,rating == freview)
ratingreviews <- specificratingsubset$verified_reviews
return(ratingreviews)
}
shinyApp(ui = ui, server = server)
|
67fe47d5e9e04ec3338f617d98ad9313031d0577 | 069bb009df38d9adc4cfcc6dcd20437c476f3077 | /GlobalRed.r | 2a06fa44fef05d1b90508784baf6f57b548f82c6 | [] | no_license | kazitoufiq/conversion-prediction-model-in-R | 210420717761087c56e872df887e681a70c4a57f | 3333361784a24cdbb7b96489609b3ba036e0eb9d | refs/heads/master | 2022-09-02T11:03:56.627235 | 2020-05-21T02:33:25 | 2020-05-21T02:33:25 | 265,735,470 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,263 | r | GlobalRed.r | library("caret")
#data collection
setwd("E:/final_data")
data <- read.csv("final_data.csv", header=F)
#data preparation - adding column header
header_name<-c("year", "month", "day", "atime", "placement_id", "exchange_id", "hour",
"name", "exchange_name", "site_id", "site_name", "size", "target")
colnames(data) <- header_name
colnames(data)
#near zero variance checking
nzv <- nearZeroVar(data)
nzv
#just removed year
selected_header <- c("month", "day", "atime", "placement_id", "exchange_id",
"hour", "site_id", "size", "target")
data1 <- (data[,selected_header])
#View(data1)
dim(data1)
#to derive weekday
x <- as.character((data$atime))
y <-strptime(x,"%Y-%m-%d %H:%M:%S", tz = "")
data1$weekday <- y$wday
data1$size <- as.character(data1$size)
selected_header_2 <- c("month", "day", "placement_id", "exchange_id",
"hour", "site_id", "size", "weekday", "target")
data2 <- (data1[,selected_header_2])
feature.names <- names(data2)[1:(ncol(data2)-1)]
#to change every char type to numeric equivalent
for (f in feature.names) {
if (class(data2[[f]])=="character") {
levels <- unique(data2[[f]])
data2[[f]] <- as.integer(factor(data2[[f]], levels=levels))
}
}
data2 <- data.frame(lapply(data2, as.numeric))
summary(data2)
#missing value - impute by -1 [ site id]
data2[is.na(data2)] <- -1
#CONVERT TARGET AS FACTOR WITH - YES - NO LABEL
data2$target <- as.factor(data2$target)
data2$target <- ifelse(data2$target==1,"YES", "NO")
data2$target <- as.factor(data2$target)
###data preparation completed
##data split - training and test
set.seed(1234)
train <- data2[sample(nrow(data2)),]
split <- floor(nrow(train)/2)
trainData <- train[0:split,]
testData <- train[(split+1):(split*2),]
str(trainData)
labelName <- 'target'
predictors <- names(train)[1:(ncol(train)-1)]
#checking the stats for target variable distribution for each set
table(trainData$target)
table(testData$target)
#sampling down - training set
EN_DATA_YES <- trainData[which(trainData$target=="YES"),]
EN_DATA_NO <- trainData[sample(nrow(trainData[which(trainData$target=="NO"),]),nrow(EN_DATA_YES)),]
balanced_train <- rbind(EN_DATA_YES, EN_DATA_NO)
balanced_train <- balanced_train[sample(nrow(balanced_train)),]
######
library("e1071")
m <- naiveBayes(target ~ ., data = balanced_train)
t1 <- table(predict(m, testData[,predictors]), testData[,9])
t1 <- as.data.frame.matrix(t1)
#t1[1,1]
accu_naive <-sum(t1[1,1] + t1[2,2])/sum(t1)
precision_naive <- t1[2,2]/sum(t1$YES)
accu_naive
precision_naive
library("randomForest")
rfm <- randomForest(target ~ ., data = balanced_train, ntry=3, ntree=25)
t <- table(predict(rfm, testData[,predictors]), testData[,9])
t <- as.data.frame.matrix(t)
#t[1,1]
accu_rf <-sum(t[1,1] + t[2,2])/sum(t)
precision_rf <- t[2,2]/sum(t$YES)
accu_rf
precision_rf
varImpPlot(rfm)
#####to provide probability of conversion of the data set####3
prob_score <- predict(rfm, data2[,predictors], "prob")
######################################################
library("caret")
myControl <- trainControl(method='cv', number=10, returnResamp='none')
#benchmark model - gbm
test_model <- train(balanced_train[,predictors], balanced_train[,labelName], method='gbm', trControl=myControl)
preds <- predict(object=test_model, testData[,predictors])
head(preds)
t <- table(preds, testData[,9])
t_gbm <- as.data.frame.matrix(t)
#t[1,1]
accu_gbm <-sum(t[1,1] + t[2,2])/sum(t)
precision_gbm <- t[2,2]/sum(t_gbm$YES)
accu_gbm
precision_gbm
set.seed(1234)
train <- data2[sample(nrow(data2)),]
split <- floor(nrow(train)/3)
ensembleData <- train[0:split,]
blenderData <- train[(split+1):(split*2),]
testingData <- train[(split*2+1):nrow(train),]
# train 3 the models with balanced_train data
model_gbm <- train(balanced_train[,predictors], balanced_train[,labelName], method='gbm', trControl=myControl)
model_rf <- train(balanced_train[,predictors], balanced_train[,labelName], method='rf', ntree=50)
model_rpart <- train(balanced_train[,predictors], balanced_train[,labelName], method='rpart', trControl=myControl)
# get predictions for each ensemble model for two last data sets
# and add them back to themselves
blenderData$gbm_PROB <- predict(object=model_gbm, blenderData[,predictors])
blenderData$rf_PROB <- predict(object=model_rf, blenderData[,predictors])
blenderData$rpart_PROB <- predict(object=model_rpart, blenderData[,predictors])
testingData$gbm_PROB <- predict(object=model_gbm, testingData[,predictors])
testingData$rf_PROB <- predict(object=model_rf, testingData[,predictors])
testingData$rpart_PROB <- predict(object=model_rpart, testingData[,predictors])
# see how each individual model performed on its own
## GBM performance
t <- table(testingData$gbm_PROB, testingData[,9])
t_gbm <- as.data.frame.matrix(t)
accu_gbm <-sum(t[1,1] + t[2,2])/sum(t)
precision_gbm <- t[2,2]/sum(t_gbm$YES)
accu_gbm
precision_gbm
#RF -performance
t <- table(testingData$rf_PROB, testingData[,9])
t_rf <- as.data.frame.matrix(t)
accu_rf <-sum(t[1,1] + t[2,2])/sum(t)
precision_rf <- t[2,2]/sum(t_gbm$YES)
accu_rf
precision_rf
#Rpart -performance
t <- table(testingData$rpart_PROB, testingData[,9])
t_rpart <- as.data.frame.matrix(t)
accu_rpart <-sum(t[1,1] + t[2,2])/sum(t)
precision_rpart <- t[2,2]/sum(t_rpart$YES)
accu_rpart
precision_rpart
predictors <- names(blenderData)[names(blenderData) != labelName]
BL_DATA_YES <- blenderData[which(blenderData$target=="YES"),]
head(BL_DATA_YES)
BL_DATA_NO <- blenderData[sample(nrow(blenderData[which(blenderData$target=="NO"),]),nrow(BL_DATA_YES)),]
balanced_blender <- rbind(BL_DATA_YES, BL_DATA_NO)
head(balanced_blender)
final_blender_model <- train(balanced_blender[,predictors], balanced_blender[,labelName], method='rf', ntree=25)
# See final prediction and performance of blended ensemble
preds <- predict(object=final_blender_model, testingData[,predictors])
t <- table(preds, testingData[,9])
t_rf <- as.data.frame.matrix(t)
accu_rf <-sum(t[1,1] + t[2,2])/sum(t)
precision_rf <- t[2,2]/sum(t_gbm$YES)
accu_rf
precision_rf
|
fbb0a2e52e6ff5cae4ae9706718cc84f5830159d | 3d2aa9f86c2b169a662a2fa0c685c7aa052172e0 | /R/corr2t2d.R | 55c947cf86de3b6c83714fe68e0e3fcff5b2c5a0 | [] | no_license | clacor/corr2D | 970f367e04ce4a704c28cde0a645c7eac7f3a143 | 5d68ee2a6ccfff62823c612a2206c3c89b83cbe2 | refs/heads/master | 2022-07-21T15:49:10.519634 | 2022-07-14T08:59:10 | 2022-07-14T08:59:10 | 60,271,071 | 0 | 1 | null | 2016-06-03T09:53:38 | 2016-06-02T14:31:10 | HTML | UTF-8 | R | false | false | 3,705 | r | corr2t2d.R | #' Two-trace two-dimensional (2T2D) correlation spectroscopy
#'
#' \code{corr2t2d} compares a pair of spectra in the form of a cross
#' correlation analysis.
#'
#' \code{corr2t2d} implements the Two-trace two-dimensional (2T2D) approach
#' as described by I. Noda (2018) <DOI:10.1016/j.molstruc.2018.01.091>.
#' The idea is to compare two spectra in a 2D correlation-like
#' approach which was previously not possible as 2D correlation analysis
#' usually needs at least three spectra.
#'
#' @param Sam Numeric vector containing the sample spectrum to be correlated.
#' Can contain the spectral variable of the sample and reference spectrum
#' as \code{names}.
#' @param Ref Numeric vector containing the sample spectrum to be correlated.
#' Can contain the spectral variable of the sample and reference spectrum
#' as \code{names}.
#' @param Wave Numeric vector containing the spectral variable. Needs to be
#' specified if names of \code{Sam} and \code{Ref} are undefined.
#' @param preview Logical: Should a 3D preview of the asynchronous codistribution
#' spectrum be drawn at the end? Uses \code{\link[rgl]{persp3d}} from \pkg{rgl}
#' package.
#'
#' @return \code{corr2t2d} returns a list of class "corr2d" containing the
#' complex correlation matrix (\code{$FT}), the correlation and
#' disrelation coefficient as a complex matrix ($coef), the sample
#' \code{$Ref1} and reference spectrum \code{$Ref2} as well as the
#' spectral variable \code{$Wave1} and \code{$Wave2}.
#'
#' @references
#' I. Noda (2018) <DOI:10.1016/j.molstruc.2018.01.091>
#'
#' @seealso For plotting of the resulting list containing the 2D correlation
#' spectra or correlation coefficient see \code{\link{plot_corr2d}} and
#' \code{\link{plot_corr2din3d}}.
#'
#' @examples
#' testdata <- sim2ddata()
#'
#' twodtest <- corr2t2d(testdata[4, ], testdata[5, ])
#'
#' plot_corr2d(twodtest, Im(twodtest$FT))
#'
#' @export
corr2t2d <- function(Sam, Ref, Wave = NULL, preview = FALSE)
{
if (!identical(length(Sam), length(Ref))) {
stop("length(Sam) and length(Ref) must be equal")
}
if (is.null(names(Sam)) && is.null(names(Ref)) &&
is.null(Wave)) {
stop("Spectral variable must be specified at Wave,
names(Sam) or names(Ref)")
}
if(is.null(Wave) && is.null(names(Sam))) {
Wave1 <- as.numeric(names(Ref))
Wave2 <- as.numeric(names(Ref))
} else if(is.null(Wave)) {
Wave1 <- as.numeric(names(Sam))
Wave2 <- as.numeric(names(Sam))
} else {
Wave1 <- Wave
Wave2 <- Wave
}
# Calculate 2T2D spectra
syn2t2d <- Sam %o% Sam + Ref %o% Ref
asyn2t2d <- Sam %o% Ref - Ref %o% Sam
# Caluclate 2T2D correlation and disrelation coefficient
corrcoef <- syn2t2d / sqrt(diag(syn2t2d) %o% diag(syn2t2d))
disrcoef <- asyn2t2d / sqrt(diag(syn2t2d) %o% diag(syn2t2d))
Obj <- list(FT = syn2t2d + asyn2t2d*1i, coef = corrcoef + disrcoef*1i,
Ref1 = Sam, Ref2 = Ref, Wave1 = Wave1, Wave2 = Wave2)
# 3d preview of the asynchronous 2T2D spectrum ----------------------------
if (preview == TRUE) {
if (dim(asyn2t2d)[1] > 700) {
tmp1 <- round(seq(1, dim(asyn2t2d)[1], length = 700))
} else {
tmp1 <- seq(1, dim(asyn2t2d)[1], 1)
}
if (dim(asyn2t2d)[2] > 700) {
tmp2 <- round(seq(1, dim(asyn2t2d)[2], length = 700))
} else {
tmp2 <- seq(1, dim(asyn2t2d)[2], 1)
}
rgl::persp3d(Wave1[tmp1], Wave2[tmp2], asyn2t2d[tmp1, tmp2], col = "grey")
}
class(Obj) <- "corr2d"
return(Obj)
}
|
fd2b75c53995c4d9fbfaeb67d591ee8344d3338f | 97fce78c83f0c42fc930d957053d20cef71a6f74 | /BetterUniversities.R | 4ce97130e9f7f86f40545431234be10fa8de9197 | [] | no_license | jarechalde/FinalProject2 | 95cbc88ea7ae3096a51ffd9b5a52925888371c24 | 0edabee8f85265f3ae3073d7fa19f466f70f5a33 | refs/heads/master | 2020-03-18T02:14:53.509021 | 2018-05-20T19:50:30 | 2018-05-20T19:50:30 | 134,181,477 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,851 | r | BetterUniversities.R | #Loading libraries
library(stringr)
library(gmapsdistance)
#Setting the working directory
#setwd('C:/Users/Javier/Documents/GitHub/FinalProject')
setwd('/home/javier/Work/FinalProject')
getwd()
#We read the data
data = read.csv('UniversitiesScorecard.csv', header = TRUE, sep = ",")
#datafeat = read.csv('DataFeatures.csv', header = TRUE, sep = ",")
#Select universities from MA
data<-data[data$STABBR=='MA',]
#Getting the different school names
#Schools<-unique(data$INSTNM)
Schools<-data$INSTNM
#Getting only unique schools
#data<-data[data$INSTNM %in% names]
#Creating the new dataframe with the schools from MA
newdata<-data.frame(Schools)
newdata$Latitude<-NA
newdata$Longitude<-NA
newdata$City<-NA
for (i in 1:length(Schools)){
name<-as.character(Schools[i])
#Getting the data from that school
datasch<-data[data$INSTNM==name,]
latitude<-as.character(datasch$LATITUDE)
longitude<-as.character(datasch$LONGITUDE)
city<-as.character(datasch$CITY)
newdata$Longitude[newdata$Schools == name]<-longitude
newdata$Latitude[newdata$Schools == name]<-latitude
newdata$City[newdata$Schools == name]<-city
}
newdata$Schools<-factor(newdata$Schools)
options(digits = 12)
data$PCIP01[data$PCIP01=='NULL']<-0
data$PCIP01<-as.numeric(as.character(data$PCIP01))
data$PCIP01[data$PCIP01!=0]<-1
factor(data$PCIP01)
newdata$PCIP01<-data$PCIP01
data$PCIP03[data$PCIP03=='NULL']<-0
data$PCIP03<-as.numeric(as.character(data$PCIP03))
data$PCIP03[data$PCIP03!=0]<-1
factor(data$PCIP03)
newdata$PCIP03<-data$PCIP03
data$PCIP05[data$PCIP05=='NULL']<-0
data$PCIP05<-as.numeric(as.character(data$PCIP05))
data$PCIP05[data$PCIP05!=0]<-1
factor(data$PCIP05)
newdata$PCIP05<-data$PCIP05
data$PCIP09[data$PCIP09=='NULL']<-0
data$PCIP09<-as.numeric(as.character(data$PCIP09))
data$PCIP09[data$PCIP09!=0]<-1
factor(data$PCIP09)
newdata$PCIP09<-data$PCIP09
data$PCIP11[data$PCIP11=='NULL']<-0
data$PCIP11<-as.numeric(as.character(data$PCIP11))
data$PCIP11[data$PCIP11!=0]<-1
factor(data$PCIP11)
newdata$PCIP11<-data$PCIP11
data$PCIP13[data$PCIP13=='NULL']<-0
data$PCIP13<-as.numeric(as.character(data$PCIP13))
data$PCIP13[data$PCIP13!=0]<-1
factor(data$PCIP13)
newdata$PCIP13<-data$PCIP13
data$PCIP14[data$PCIP14=='NULL']<-0
data$PCIP14<-as.numeric(as.character(data$PCIP14))
data$PCIP14[data$PCIP14!=0]<-1
factor(data$PCIP14)
newdata$PCIP14<-data$PCIP14
data$PCIP15[data$PCIP15=='NULL']<-0
data$PCIP15<-as.numeric(as.character(data$PCIP15))
data$PCIP15[data$PCIP15!=0]<-1
factor(data$PCIP15)
newdata$PCIP15<-data$PCIP15
data$PCIP16[data$PCIP16=='NULL']<-0
data$PCIP16<-as.numeric(as.character(data$PCIP16))
data$PCIP16[data$PCIP16!=0]<-1
factor(data$PCIP16)
newdata$PCIP16<-data$PCIP16
data$PCIP23[data$PCIP23=='NULL']<-0
data$PCIP23<-as.numeric(as.character(data$PCIP23))
data$PCIP23[data$PCIP23!=0]<-1
factor(data$PCIP23)
newdata$PCIP23<-data$PCIP23
data$PCIP26[data$PCIP26=='NULL']<-0
data$PCIP26<-as.numeric(as.character(data$PCIP26))
data$PCIP26[data$PCIP26!=0]<-1
factor(data$PCIP26)
newdata$PCIP26<-data$PCIP26
data$PCIP27[data$PCIP27=='NULL']<-0
data$PCIP27<-as.numeric(as.character(data$PCIP27))
data$PCIP27[data$PCIP27!=0]<-1
factor(data$PCIP27)
newdata$PCIP27<-data$PCIP27
data$PCIP30[data$PCIP30=='NULL']<-0
data$PCIP30<-as.numeric(as.character(data$PCIP30))
data$PCIP30[data$PCIP30!=0]<-1
factor(data$PCIP30)
newdata$PCIP30<-data$PCIP30
data$PCIP38[data$PCIP38=='NULL']<-0
data$PCIP38<-as.numeric(as.character(data$PCIP38))
data$PCIP38[data$PCIP38!=0]<-1
factor(data$PCIP38)
newdata$PCIP38<-data$PCIP38
data$PCIP40[data$PCIP40=='NULL']<-0
data$PCIP40<-as.numeric(as.character(data$PCIP40))
data$PCIP40[data$PCIP40!=0]<-1
factor(data$PCIP40)
newdata$PCIP40<-data$PCIP40
data$PCIP42[data$PCIP42=='NULL']<-0
data$PCIP42<-as.numeric(as.character(data$PCIP42))
data$PCIP42[data$PCIP42!=0]<-1
factor(data$PCIP42)
newdata$PCIP42<-data$PCIP42
data$PCIP45[data$PCIP45=='NULL']<-0
data$PCIP45<-as.numeric(as.character(data$PCIP45))
data$PCIP45[data$PCIP45!=0]<-1
factor(data$PCIP45)
newdata$PCIP45<-data$PCIP45
data$PCIP50[data$PCIP50=='NULL']<-0
data$PCIP50<-as.numeric(as.character(data$PCIP50))
data$PCIP50[data$PCIP50!=0]<-1
factor(data$PCIP50)
newdata$PCIP50<-data$PCIP50
data$PCIP51[data$PCIP51=='NULL']<-0
data$PCIP51<-as.numeric(as.character(data$PCIP51))
data$PCIP51[data$PCIP51!=0]<-1
factor(data$PCIP51)
newdata$PCIP51<-data$PCIP51
data$PCIP52[data$PCIP52=='NULL']<-0
data$PCIP52<-as.numeric(as.character(data$PCIP52))
data$PCIP52[data$PCIP52!=0]<-1
factor(data$PCIP52)
newdata$PCIP52<-data$PCIP52
data$PCIP54[data$PCIP54=='NULL']<-0
data$PCIP54<-as.numeric(as.character(data$PCIP54))
data$PCIP54[data$PCIP54!=0]<-1
factor(data$PCIP54)
newdata$PCIP54<-data$PCIP54
hola<-data[data$INSTNM=='University of Massachusetts-Dartmouth',]
##SAVING RESULTS##
write.csv(newdata, file = "UniversitiesMA.csv")
|
6072bd8bb4fad08bcc88f7d712ce47bb68d6494c | fcc6f2050afa406a13c3746bffe287b890a3ee12 | /plot4.R | 6739820da2ce2de079ce0abe313026f4f62eb866 | [] | no_license | james08/ExData_Plotting1 | 64a7b9d7e5b4e6be474184ecb72fe030920dbc95 | 07c64e5a5b70721a3a24f55e767dae3863e5d83b | refs/heads/master | 2020-12-31T02:22:20.758273 | 2016-08-02T19:50:07 | 2016-08-02T19:50:07 | 64,592,986 | 0 | 0 | null | 2016-07-31T12:35:26 | 2016-07-31T12:35:26 | null | UTF-8 | R | false | false | 1,449 | r | plot4.R | ## This script assumes dataset file is in the current working directory.
library(lubridate)
library(dplyr)
## Read in data from file
consump <- read.csv("household_power_consumption.txt", sep = ";",
stringsAsFactors = FALSE, as.is = c(3:9), na.strings = "?")
## Convert to tbl_df for dplyr and remove original
consum <- tbl_df(consump)
rm(consump)
## Add date_time column
consum1 <- mutate(consum, date_time = dmy_hms(paste(consum$Date, consum$Time)))
consum1 <- mutate(consum, date = dmy(consum$Date))
consum2 <- consum1[consum1$date == ymd("2007-02-01"),]
consum2 <- rbind(consum2, consum1[consum1$date == ymd("2007-02-02"),])
consum2 <- mutate(consum2, date_time = dmy_hms(paste(consum2$Date, consum2$Time)))
#plot4
png(filename = "plot4.png", width = 480, height = 480)
par(mfrow = c(2, 2))
with(consum2, {
plot(date_time, Global_active_power, type = "l",
xlab = "", ylab = "Global Active Power (kilowatts)")
plot(date_time, Voltage, type = "l", xlab = "datetime")
plot(date_time, Sub_metering_1, type = "l",
xlab = "", ylab = "Energy sub metering")
lines(date_time, Sub_metering_2, col="red")
lines(date_time, Sub_metering_3, col="blue")
legend("topright", lwd = 2, bty = "n", col = c("black", "blue", "red"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
with(consum2, plot(date_time, Global_reactive_power, type = "l", xlab = "datetime"))})
dev.off()
|
6d3105428dc46832eb4faa881fc50cbed580bc14 | c7c07988b39a67180ba22c20a78a8fc26b5f7907 | /generate_copynumberheatmap1.R | 20142bb3e81b1f03e8ea00fae3fa6645863add20 | [] | no_license | xwang234/copynumber | 0919c2ce3489e4972ee84f9559948f499108cd54 | d2ba998f4e5362a1b85661e1f9fe6fe105747c5b | refs/heads/master | 2021-01-17T15:57:38.339124 | 2017-04-18T23:41:18 | 2017-04-18T23:41:18 | 84,119,084 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,189 | r | generate_copynumberheatmap1.R | #!/usr/bin/env Rscript
getwholetable=function(alllessiontable,wgstumors)
{
alllessiontable$Descriptor=gsub(" ","",alllessiontable$Descriptor,fixed=T)
uniq_cytoband=unique(alllessiontable$Descriptor)
cytobandtable=data.frame(matrix(" ",nrow=length(uniq_cytoband),ncol=length(wgstumors)))
rownames(cytobandtable)=uniq_cytoband
colnames(cytobandtable)=wgstumors
for (i in 1:ncol(cytobandtable)) cytobandtable[,i]=as.character(cytobandtable[,i])
c_gain=0.1
c_amp=0.9
c_loss=-0.1
c_del=-1.2
for (i in 1:nrow(cytobandtable))
{
cytoband=rownames(cytobandtable)[i]
idx=which(alllessiontable$Descriptor==cytoband)
if (length(idx)>1) #multiple peaks
{
idx=idx[which.min(alllessiontable$q.values[idx])]
}
for (j in 10:(9+length(wgstumors)))
{
if (alllessiontable[idx,j]<c_del)
{
cytobandtable[i,j-9]="Deletion"
}else
{
if (alllessiontable[idx,j]<c_loss)
{
cytobandtable[i,j-9]="Loss"
}else
{
if (alllessiontable[idx,j]>c_amp)
{
cytobandtable[i,j-9]="Amplification"
}else
{
if (alllessiontable[idx,j]>c_gain)
{
cytobandtable[i,j-9]="Gain"
}
}
}
}
}
}
return(cytobandtable)
}
getampdeltable=function(alllessiontable,wgstumors)
{
amplessiontable=alllessiontable[grepl("Amplification",alllessiontable$Unique.Name),]
ampcytotable=getwholetable(amplessiontable,wgstumors)
for (i in 1:nrow(ampcytotable))
{
for (j in 1:ncol(ampcytotable))
{
ampcytotable[i,j]=gsub("Loss"," ",ampcytotable[i,j])
ampcytotable[i,j]=gsub("Deletion"," ",ampcytotable[i,j])
}
}
dellessiontable=alllessiontable[! grepl("Amplification",alllessiontable$Unique.Name),]
delcytotable=getwholetable(dellessiontable,wgstumors)
for (i in 1:nrow(delcytotable))
{
for (j in 1:ncol(delcytotable))
{
delcytotable[i,j]=gsub("Gain"," ",delcytotable[i,j])
delcytotable[i,j]=gsub("Amplification"," ",delcytotable[i,j])
}
}
result=list(ampcytotable=ampcytotable,delcytotable=delcytotable)
return(result)
}
library(ComplexHeatmap)
wgstumors=c("SRR1001842","SRR1002713","SRR999423","SRR1001466","SRR1002670","SRR1001823","SRR999489","SRR1002343","SRR1002722","SRR1002656",
"SRR1002929","SRR999438","SRR1001915","SRR999594","SRR1001868","SRR1001635")
#gisticdir="/fh/fast/dai_j/CancerGenomics/Tools/GISTIC/nwgs_rx1_conf0.95_armpeel0_brlen0.98_broad1"
gisticdir="/fh/scratch/delete30/dai_j/gistic/dulak_ploid2degree3force0_cnv_rx1_conf0.95_armpeel0_brlen0.98_broad1"
name="US-EA"
allsegfile="/fh/scratch/delete30/dai_j/gistic/dulak_ploid2degree3force0_cnv.combinedfreecseg.txt"
wgstumors=paste0(c(3,11,13,15,17,25,29,33,37,41),"A")
gisticdir="/fh/scratch/delete30/dai_j/henan/gistic/henan_ploid2degree3force0_cnv_rx1_conf0.95_armpeel0_brlen0.98_broad1"
name="CH-EA"
allsegfile="/fh/scratch/delete30/dai_j/henan/gistic/henan_ploid2degree3force0_cnv.combinedfreecseg.txt"
wgstumors=paste0("T",c(1:6,8:18))
wgstumors=paste0("T",c(1:4,6,8:18))
gisticdir="/fh/scratch/delete30/dai_j/escc/gistic/escc_ploid2degree3force0_cnv_rx1_conf0.95_armpeel0_brlen0.98_broad1"
name="CH-ESCC"
allsegfile="/fh/scratch/delete30/dai_j/escc/gistic/escc_ploid2degree3force0_cnv.combinedfreecseg.txt"
alllessionsfile=paste0(gisticdir,"/all_lesions.conf_95.txt")
alllessiontable=read.table(alllessionsfile,header=T,sep="\t",stringsAsFactors=F)
alllessiontable1=alllessiontable[1:(nrow(alllessiontable)/2),]
idxkeep=which(rowSums(alllessiontable1[,10:(10+length(wgstumors)-1)],na.rm = T)>1)
alllessiontable=alllessiontable[(nrow(alllessiontable)/2+1):nrow(alllessiontable),]
alllessiontable=alllessiontable[idxkeep,]
#cytobandtable=getwholetable(alllessiontable)
twotables=getampdeltable(alllessiontable,wgstumors)
#count number of alterations
numdelalt=numampalt=numalt=rep(0,length(wgstumors))
allseg=read.table(file=allsegfile,header=F,sep="\t")
for (i in 1:length(wgstumors))
{
tmptable=allseg[allseg[,1]==wgstumors[i],]
numalt[i]=sum(tmptable[,6] < -0.1 | tmptable[,6] > 0.1)
numdelalt[i]=sum(tmptable[,6] < -0.1)
numampalt[i]=sum(tmptable[,6] > 0.1)
}
copynumber_fun = list(
background = function(x, y, w, h) {
grid.rect(x, y, w-unit(1, "mm"), h-unit(1, "mm"), gp = gpar(fill = "#CCCCCC", col = NA))
},
Amplification=function(x, y, w, h) {
grid.rect(x, y, w-unit(1, "mm"), h-unit(1, "mm"), gp = gpar(fill = "red", col = NA))
},
Gain=function(x, y, w, h) {
grid.rect(x, y, w-unit(1, "mm"), h-unit(1, "mm"), gp = gpar(fill = "orange", col = NA))
},
Loss=function(x, y, w, h) {
grid.rect(x, y, w-unit(1, "mm"), h-unit(1, "mm"), gp = gpar(fill = "skyblue", col = NA))
},
Deletion = function(x, y, w, h) {
grid.rect(x, y, w-unit(1, "mm"), h*0.33, gp = gpar(fill = "blue", col = NA))
}
)
col = c("Amplification" = "red", "Gain"="orange",
"Loss"="skyblue", "Deletion" = "blue")
# oncoPrint(as.matrix(cytobandtable), get_type = function(x) strsplit(x, ";")[[1]],
# row_order = NULL,column_order = NULL,
# remove_empty_columns = TRUE,
# alter_fun = copynumber_fun, col = col,
# column_title = name,
# column_title_gp = gpar(fontsize = 22),
# show_column_names = FALSE,
# show_pct = FALSE,
# axis_gp = gpar(fontsize = 16),# size of axis
# row_names_gp = gpar(fontsize = 16), # set size for row names
# pct_gp = gpar(fontsize = 16), # set size for percentage labels
# row_barplot_width = unit(4, "cm"), #size barplot
# heatmap_legend_param = list(title = "Copynumber", at = c("Amplification", "Gain", "Loss","Deletion"),
# labels = c("Amplification", "Gain", "Loss","Deletion")))
ha = HeatmapAnnotation(total_alterations= anno_barplot(numampalt,axis=T,axis_gp = gpar(fontsize = 12),axis_side="right",border=F),
show_annotation_name = T,annotation_name_offset = unit(2, "cm"),gap = unit(3, "mm"))
oncoPrint(as.matrix(twotables$ampcytotable), get_type = function(x) strsplit(x, ";")[[1]],
row_order = NULL,column_order = NULL,
remove_empty_columns = TRUE,
alter_fun = copynumber_fun, col = col,
column_title =name,
column_title_gp = gpar(fontsize = 18),
show_column_names = FALSE,
show_pct = FALSE,
axis_gp = gpar(fontsize = 16),# size of axis
row_names_gp = gpar(fontsize = 16), # set size for row names
pct_gp = gpar(fontsize = 16), # set size for percentage labels
row_barplot_width = unit(4, "cm"), #size barplot
bottom_annotation=ha,
bottom_annotation_height=unit(3,"cm"),
heatmap_legend_param = list(title = "Copynumber", at = c("Amplification", "Gain", "Loss","Deletion"),
labels = c("Amplification", "Gain", "Loss","Deletion")))
ha = HeatmapAnnotation(total_alterations= anno_barplot(numdelalt,axis=T,axis_gp = gpar(fontsize = 12),axis_side="right",border=F),
show_annotation_name = T,annotation_name_offset = unit(2, "cm"),gap = unit(3, "mm"))
oncoPrint(as.matrix(twotables$delcytotable), get_type = function(x) strsplit(x, ";")[[1]],
row_order = NULL,column_order = NULL,
remove_empty_columns = TRUE,
alter_fun = copynumber_fun, col = col,
column_title = name,
column_title_gp = gpar(fontsize = 18),
show_column_names = FALSE,
show_pct = FALSE,
axis_gp = gpar(fontsize = 16),# size of axis
row_names_gp = gpar(fontsize = 10), # set size for row names
pct_gp = gpar(fontsize = 16), # set size for percentage labels
row_barplot_width = unit(4, "cm"), #size barplot
bottom_annotation=ha,
bottom_annotation_height=unit(3,"cm"),
heatmap_legend_param = list(title = "Copynumber", at = c("Amplification", "Gain", "Loss","Deletion"),
labels = c("Amplification", "Gain", "Loss","Deletion"))) |
c9db8ad57dde4e5025b3a8ba4302a2c0a784f641 | f2bb9dc756f74ccfd1aa7bb4e1aa9d682d93e628 | /R/javaFunctions.R | a5748cbe84c448abf32b619238cb31cd0c3cd3c5 | [] | no_license | HenrikBengtsson/R.io | 660437c62f692db4fecfbae08648eb56237e9ca2 | 7ff13117d31299027e9029c2ec9d92ec5079273b | refs/heads/master | 2021-01-01T19:43:25.136246 | 2014-06-19T04:11:28 | 2014-06-19T04:11:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,255 | r | javaFunctions.R | #########################################################################/**
# @RdocDefault writeJavaByte
#
# @title "Deprecated. Writes a byte (8 bits) to a connection in Java format"
#
# \description{
# Writes one or several byte's (8 bits) to a connection in Java
# format so they will be readable by Java.
# }
#
# @synopsis
#
# \arguments{
# \item{con}{Binary connection to be written to.}
# \item{b}{Vector of bytes to be written.}
# }
#
# \details{
# This method is included for consistency reasons only.
# }
#
# @author
#
# \seealso{
# @see "base::writeBin".
# @see "writeJavaShort", @see "writeJavaInt",
# @see "writeJavaUTF".
# }
#
# \keyword{internal}
#*/#########################################################################
setMethodS3("writeJavaByte", "ANY", function(con, b, ...) {
warning("writeJavaByte() is deprecated since R.oo v0.49 [2002/12/15]. Please use Java$writeByte() instead.");
writeBin(con=con, as.integer(b), size=1);
}, deprecated=TRUE)
#########################################################################/**
# @RdocDefault writeJavaShort
#
# @title "Deprecated. Writes a short (16 bits) to a connection in Java format"
#
# \description{
# Writes one or several short's (16 bits) to a connection in Java
# format so they will be readable by Java.
# }
#
# @synopsis
#
# \arguments{
# \item{con}{Binary connection to be written to.}
# \item{s}{Vector of shorts to be written.}
# }
#
# @author
#
# \seealso{
# @see "base::writeBin".
# @see "writeJavaShort", @see "writeJavaInt",
# @see "writeJavaUTF".
# }
#
# \keyword{internal}
#*/#########################################################################
setMethodS3("writeJavaShort", "ANY", function(con, s, ...) {
warning("writeJavaShort() is deprecated since R.oo v0.49 [2002/12/15]. Please use Java$writeShort() instead.");
writeBin(con=con, as.integer(s), size=2, endian="big");
}, deprecated=TRUE)
#########################################################################/**
# @RdocDefault writeJavaInt
#
# @title "Deprecated. Writes a integer (32 bits) to a connection in Java format"
#
# \description{
# Writes one or several integer's (32 bits) to a connection in Java
# format so they will be readable by Java.
# }
#
# @synopsis
#
# \arguments{
# \item{con}{Binary connection to be written to.}
# \item{i}{Vector of integers to be written.}
# }
#
# @author
#
# \seealso{
# @see "base::writeBin".
# @see "writeJavaShort", @see "writeJavaInt",
# @see "writeJavaUTF".
# }
#
# \keyword{internal}
#*/#########################################################################
setMethodS3("writeJavaInt", "ANY", function(con, i, ...) {
warning("writeJavaInt() is deprecated since R.oo v0.49 [2002/12/15]. Please use Java$writeInt() instead.");
i <- matrix(i, nrow=1);
bfr <- apply(i, MARGIN=2, FUN=function(x) {
c(x %/% 256^3, x %/% 256^2, x %/% 256, x %% 256);
})
bfr <- as.vector(bfr);
writeBin(con=con, as.integer(bfr), size=1);
}, deprecated=TRUE)
#########################################################################/**
# @RdocDefault writeJavaUTF
#
# @title "Deprecated. Writes a string to a connection in Java format (UTF-8)"
#
# \description{
# Writes a string to a connection in Java format (UTF-8)
# so it will be readable by Java.
# }
#
# @synopsis
#
# \arguments{
# \item{con}{Binary connection to be written to.}
# \item{str}{String to be written.}
# }
#
# @author
#
# \seealso{
# @see "base::writeBin".
# @see "writeJavaShort", @see "writeJavaInt",
# @see "writeJavaUTF".
# }
#
# \keyword{internal}
#*/#########################################################################
setMethodS3("writeJavaUTF", "ANY", function(con, str, ...) {
warning("writeJavaUTF() is deprecated since R.oo v0.49 [2002/12/15]. Please use Java$writeUTF() instead.");
str <- as.character(str);
writeJavaShort(con, nchar(str));
writeChar(con=con, str, eos=NULL);
}, deprecated=TRUE)
#########################################################################/**
# @RdocDefault readJavaByte
#
# @title "Deprecated. Reads a Java formatted byte (8 bits) from a connection"
#
# \description{
# Reads one or several Java formatted byte's (8 bits) from a connection.
# }
#
# @synopsis
#
# \arguments{
# \item{con}{Binary connection to be read from.}
# \item{n}{Number of byte's to be read.}
# }
#
# \value{
# Returns a @vector of @integers.
# }
#
# @author
#
# \seealso{
# @see "base::readBin".
# @see "readJavaShort", @see "readJavaInt",
# @see "readJavaUTF".
# }
#
# \keyword{internal}
#*/#########################################################################
setMethodS3("readJavaByte", "ANY", function(con, n=1, ...) {
warning("writeJavaByte() is deprecated since R.oo v0.49 [2002/12/15]. Please use Java$writeByte() instead.");
as.integer(readBin(con=con, what=integer(), size=1, n=n));
}, deprecated=TRUE)
#########################################################################/**
# @RdocDefault readJavaShort
#
# @title "Deprecated. Reads a Java formatted short (16 bits) from a connection"
#
# \description{
# Reads one or several Java formatted short's (16 bits) from a connection.
# }
#
# @synopsis
#
# \arguments{
# \item{con}{Binary connection to be read from.}
# \item{n}{Number of short's to be read.}
# }
#
# \value{
# Returns a @vector of @integers.
# }
#
# @author
#
# \seealso{
# @see "base::readBin".
# @see "readJavaShort", @see "readJavaInt",
# @see "readJavaUTF".
# }
#
# \keyword{internal}
#*/#########################################################################
setMethodS3("readJavaShort", "ANY", function(con, n=1, ...) {
warning("writeJavaShort() is deprecated since R.oo v0.49 [2002/12/15]. Please use Java$writeShort() instead.");
# From java.io.DataOutput.writeShort():
# The byte values to be written, in the order shown, are:
# (byte)(0xff & (v >> 8))
# (byte)(0xff & v)
# readBin(con=con, what=integer(), size=2, n=n, endian="big");
bfr <- readBin(con=con, what=integer(), size=1, n=2*n, signed=FALSE);
bfr <- matrix(bfr, ncol=2, byrow=TRUE);
bfr[,1] <- bfr[,1]*256;
bfr <- rowSums(bfr);
neg <- (bfr > 2^15)
bfr[neg] <- bfr[neg] - 2^16;
bfr;
}, deprecated=TRUE)
#########################################################################/**
# @RdocDefault readJavaInt
#
# @title "Deprecated. Reads a Java formatted int (32 bits) from a connection"
#
# \description{
# Reads one or several Java formatted int's (32 bits) from a connection.
# }
#
# @synopsis
#
# \arguments{
# \item{con}{Binary connection to be read from.}
# \item{n}{Number of int's to be read.}
# }
#
# \value{
# Returns a @vector of @integers.
# }
#
# @author
#
# \seealso{
# @see "base::readBin".
# @see "readJavaShort", @see "readJavaInt",
# @see "readJavaUTF".
# }
#
# \keyword{internal}
#*/#########################################################################
setMethodS3("readJavaInt", "ANY", function(con, n=1, ...) {
warning("writeJavaInt() is deprecated since R.oo v0.49 [2002/12/15]. Please use Java$writeInt() instead.");
# readBin(con=con, what=integer(), size=4, n=n, endian="big");
bfr <- readBin(con=con, what=integer(), size=1, n=4*n, signed=FALSE);
bfr <- matrix(bfr, ncol=4, byrow=TRUE);
bfr[,1] <- bfr[,1] * 256^3;
bfr[,2] <- bfr[,2] * 256^2;
bfr[,3] <- bfr[,3] * 256;
bfr <- rowSums(bfr);
neg <- (bfr > 2^31)
bfr[neg] <- bfr[neg] - 2^32;
bfr;
}, deprecated=TRUE)
#########################################################################/**
# @RdocDefault readJavaUTF
#
# @title "Deprecated. Reads a Java (UTF-8) formatted string from a connection"
#
# \description{
# Reads a Java (UTF-8) formatted string from a connection.
# }
#
# @synopsis
#
# \arguments{
# \item{con}{Binary connection to be read from.}
# \item{as.character}{If @TRUE, the read string converted,
# i.e. translated, into an \R character string before
# returned, otherwise an integer vector representation of
# the Unicode string is returned.}
# }
#
# \value{
# Returns a @character string or an @integer @vector.
# }
#
# \details{
# Currently only 8-bit UTF-8 byte sequences are supported, i.e. plain
# ASCII sequences.
# }
#
# @author
#
# \seealso{
# @see "base::readBin".
# @see "readJavaShort", @see "readJavaInt",
# @see "readJavaUTF".
# }
#
# \keyword{internal}
#*/#########################################################################
setMethodS3("readJavaUTF", "ANY", function(con, as.character=TRUE, ...) {
warning("writeJavaUTF() is deprecated since R.oo v0.49 [2002/12/15]. Please use Java$writeUTF() instead.");
# BUG:
nbrOfBytes <- readJavaShort(con);
if (as.character) {
readChar(con=con, nchars=nbrOfBytes);
} else {
readBin(con=con, what=integer(), size=1, n=nbrOfBytes);
}
}, deprecated=TRUE)
############################################################################
# HISTORY:
# 2003-04-21
# o Made all the methods deprecated by adding warnings to them.
# 2003-04-16 [deprecated?]
# o Updated the Rdocs.
# 2002-09-03
# o Cleaned up the code and wrote up the Rdoc comments.
# 2002-08-26
# o Created.
############################################################################
|
a9ca48b7c55e50c7c801fa9b093ee198d4ea188f | 8a6c123dce13e5a0a96f258cd0fd78f32f166014 | /man/FLTable.Rd | 4f945f999b7c64aa2196441050473d82e8df4dba | [] | no_license | mcetraro/AdapteR | c707cd5bd4918cee093e5f151248e0ed047eacda | 4ea6ba9226ad187b0a61ab95286043c1eb5fbec3 | refs/heads/master | 2021-01-18T12:45:14.416907 | 2015-07-30T14:41:48 | 2015-07-30T14:41:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 842 | rd | FLTable.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/FLTable.R
\name{FLTable}
\alias{FLTable}
\title{Constructor function for FLTable}
\usage{
FLTable(connection, database, table)
}
\arguments{
\item{connection}{ODBC connection handle as returned by \code{\link[RODBC]{odbcConnect}}}
\item{database}{name of the database in \code{Teradata} which contains the table}
\item{table}{name of the table}
}
\value{
\code{FLTable} returns an object of class FLTable mapped to a table
in Teradata.
}
\description{
\code{FLTable} constructs an object of class \code{FLTable}. This object is
used as the input for analytic functions
}
\examples{
\dontrun{
connection <- odbcConnect("Gandalf")
database <- "FL_R_WRAP"
table_name <- "tblAutoMpg"
tbl <- FLTable(connection, database, table_name)
}
}
|
1d78abfabb207528851cad34ac0fb2410e0bb73f | 0795ee23a87aa1a0339848e3a7c682e58ce3f398 | /SimulateData.R | 484dc72bfd7adabf0d32ddd5e15e9f453284ddc5 | [] | no_license | alessandro992/CodeMetaAfterDataCollection | fb05aea963749ec149b4734f5a5062736bda7faa | ac579c33ca4795a74c3c5d8191391b64cbfdd6a7 | refs/heads/main | 2023-08-21T14:29:11.972916 | 2021-09-16T15:35:02 | 2021-09-16T15:35:02 | 407,217,086 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,876 | r | SimulateData.R | library(dplyr)
#simulate a dataset for the stress meta-analysis
#General info of a study
set.seed(123)
publication_year <- NA
researchDesign <-sample(c(1,2,3,4,5),346, replace=T)
# 1 = RCT, between-subjects design 2 = between-subjects design, comparing pre-existing groups (no randomization into groups) 3 = within-subjects design
#4 = correlational study 5 = mixed-effects (multilevel) model.
journal <-sample(c("Psychological Bulletin","Trends in Cognitive Sciences","American Psychologist","Child Development"),346, replace=T)
source <-sample(c(1,2,3,4),346, replace=T)
# 1=proquest, 2=pubmed, 3=scopus, 4=hand search
country <-sample(c(NA,"italy","france","singapore","US","slovakia","netherlands"),346, replace=T)
nMale <-as.integer(runif(346,min=1, max=60))
nFemale <-as.integer(runif(346,min=1, max=60))
journalH5 <-as.integer(runif(346,min=0, max=10))
predictedDirection <-sample(c(-1,1,0),346, replace=T)
items <-as.integer(runif(346,min=0, max=10))
# for items= (0 if the DV is not a discrete/likert variable,
#otherwise an integer count of, e.g., Likert scale items comprising the DV)
inLabAdministration <-sample(c(1,2,3),346, replace=T)
#1 = lab study; 2 = field study; 3 = online data collection
doi <-sample(c(NA,"http://dx.doi.org/10.1037/a0028242","http://dx.doi.org/10.1037/a02328240"),346, replace=T)
citations <-NA
#citations by Google Scholar
mean1 <- round(rnorm(346,5,1), 2)
mean2 <- round(rnorm(346,5,2), 2)
#mean of group1 (experimental) and of group2 (control)
sd1 <- round(rnorm(346,2,1), 2)
sd2 <- round(rnorm(346,2,1), 2)
#sd of group1 (experimental) and of group2 (control)
se1 <- rnorm(346,2,1)
se2 <- rnorm(346,2,1)
#se of group1 (experimental) and of group2 (control)
p.reported <- NA
mean_age <-as.integer(runif(346,min=18, max=60))
n1 <- as.integer(runif(346,min=1, max=100))
n2 <- as.integer(runif(346,min=1, max=100))
n3 <- as.integer(runif(346,min=1, max=100))
#sample size per cells, n1,n2,n3
############################################################################################
#Intervention characteristics
presence_of_individual_differences <-sample(c(1,0),346, replace=T)
# 1=if there are individual differences in the study, 0= If there aren't individual differences in the study
focal_variable <-sample(c(0,1),346, replace=T)
# Added the focal variable; 0= not a focal variable, 1= it is a focal variable
presence_of_stressTest <-sample(c(1,2),346, replace=T)
# 1=present, 2=absent
MASdata<-as.data.frame(presence_of_stressTest)
Type_of_StressTest<-ifelse(MASdata$presence_of_stressTest==1,
c(1,2,3,NA), NA)
MASdata$Type_of_StressTest <-Type_of_StressTest
#If the stressTest is present I code: 1= TSST, 2=stroop task, 3=others 999= no stress test
typePopulation <-sample(c(1,2,3),346, replace=T)
#1=student non-clinical 2= non-student non-clinical 3= clinical
typeComparisonGroup <-sample(c(1,2),346, replace=T)
# 1=passive control group, 2= active control group
typeStressComponent <-sample(c(1,2,3,4,5,6),346, replace=T)
#1= Affect: low arousal, negative valence, 2= Affect: high arousal, negative valence, 3= Affect: low arousal, positive valence 4= Affect: high arousal, positive valence,
#5= cognitive component 6=physiological component
Affective_consequences_of_stress <-sample(c(1,2,3,4),346, replace=T)
#1= Low arousal, negative valence, 2= High arousal, negative valence, 3= Low arousal, positive valence 4= High arousal, positive valence.
exact_type_of_population <- sample(c(NA,"nursersy students","employers","psychology students","patients with depression","COPD patients","managers"),346, replace=T)
frequency_of_intervention <- as.integer(runif(346,min=0, max=5))
#how many times each week the participants receive the intervention
duration_of_intervention <- as.integer(runif(346,min=0, max=80))
#total duration of the intervention in hours
number_of_intervention <- as.integer(runif(346,min=0, max=90))
#total number of SEM session, biofeedback trainings, exposure to nature, social support received
Instrument <-sample(c("PSS","STAI","cortisol","heart-rate","RSS","HADS"),346, replace=T)
#Type of scale used in the experiment (e.g. PSS)
nationality <-sample(c(1,2),346, replace=T)
#is it coded? 1= yes 2= no
timing_of_effect <-sample(c(1,2),346, replace=T)
#whether the effect size is measured 1= after intervention, 2=after last follow up
######################################################################
#Specific coding for categories
strategy <-sample(c(1,2),346, replace=T)
#Divide each article by categories for the meta-analysis: 1= self-administered mindfulness, 2=biofeedback 3= being in nature 4=social support
data1<-as.data.frame(strategy)
Type_of_Sam<-ifelse(data1$strategy==1,
c(1,2,3,4,5), NA)
#If the article is on self-administered mindfulness, I code 1 = internet 2= smartphone app 3=book 4= audio 5= mixed
data1$Type_of_Sam<-Type_of_Sam
#type_of_environmet <-ifelse(data1$strategy==2,
# c(1,2), NA)
#If the article is on being in nature, I code 1 = nature env, 2= built environment
#data1$type_of_environmet <-type_of_environmet
#type_of_exposure <-ifelse(data1$strategy==2,
#c(1,2,3), NA)
#If the article is on being in nature, I code 1 = outdoor walk 2 = nature viewing 3= outdoor gardening
#data1$type_of_exposure<-type_of_exposure
#type_of_SocialSupport <-ifelse(data1$strategy==4,
# c(1,2,3,4), NA)
#data1$type_of_SocialSupport<-type_of_SocialSupport
#If the article is on social support I code 1 =no support 2= physical contact 3= verbal social support 4= mixed
#source_of_SocialSupport <-ifelse(data1$strategy==4,
#c(1,2,3), NA)
#data1$source_of_SocialSupport <- source_of_SocialSupport
#If the article is on social support I code 1 =partner 2= friends 3=stranger
#Code the source of SocialSupport
MetaData <-cbind(publication_year,nMale,nFemale,mean_age,doi,citations,inLabAdministration,journalH5,source,predictedDirection,items,mean1,mean2,sd1,sd2,se1,se2,p.reported,n1,n2,n3,researchDesign,focal_variable,journal,country,number_of_intervention,Instrument,presence_of_individual_differences,MASdata,typePopulation,typeComparisonGroup,typeStressComponent,Affective_consequences_of_stress,exact_type_of_population,frequency_of_intervention,duration_of_intervention,nationality,timing_of_effect,data1)
#Create the first simulated dataset with the info encoded until now
paperID <- 1:nrow(MetaData)
studyID <-1:nrow(MetaData)
published <-sample(c(0,1),346, replace=T)
#1=published, 0=unpublished
StressData <- cbind(MetaData,studyID,paperID,published)
#Merging a simulated dataset on stress, with the real dataset of social thermoregulation (in order to take the effect sizes from that and other statistics)
rob2 <- read.csv("Rob_2.csv", sep = ";")
#bringing-in the Rob2 for each study, after having used the Rob2 excel sheet for each study
rob2 <- rob2[ , which(names(rob2) %in% c("Domain.1.risk.of.bias","Domain.2.risk.of.bias","Domain.3.risk.of.bias","Domain.4.risk.of.bias","Domain.5.risk.of.bias","Overall.risk.of.bias"))]
#taking from the raw Rob2 dataset just the columns we need (Rob for all domains and overall)
#rob2 <- rob2 %>%
#select("Domain.1.risk.of.bias","Domain.2.risk.of.bias","Domain.3.risk.of.bias","Domain.4.risk.of.bias","Domain.5.risk.of.bias","Overall.risk.of.bias")
#View(rob2)
#Here I tried to do the same with dplyr, it was just and exercise
StressData <-cbind(StressData, rob2)
#View(rob2)
#Merging the simulated dataset with the Rob2 simulated dataset
gender <- c("MALE","FEMALE","FEMALE","UNKNOWN","MALE")
ifelse(gender == "MALE", 1, ifelse(gender == "FEMALE", 2, 3))
StressData$Domain.1.risk.of.bias<- ifelse(StressData$Domain.1.risk.of.bias == "Low", 1, ifelse(StressData$Domain.1.risk.of.bias == "High",3,2))
StressData$Domain.2.risk.of.bias<- ifelse(StressData$Domain.2.risk.of.bias == "Low", 1, ifelse(StressData$Domain.2.risk.of.bias == "High",3,2))
StressData$Domain.3.risk.of.bias<- ifelse(StressData$Domain.3.risk.of.bias == "Low", 1, ifelse(StressData$Domain.3.risk.of.bias == "High",3,2))
StressData$Domain.4.risk.of.bias<- ifelse(StressData$Domain.4.risk.of.bias == "Low", 1, ifelse(StressData$Domain.4.risk.of.bias == "High",3,2))
StressData$Domain.5.risk.of.bias<- ifelse(StressData$Domain.5.risk.of.bias == "Low", 1, ifelse(StressData$Domain.5.risk.of.bias == "High",3,2))
StressData$overallRiskOfBias<- ifelse(StressData$Overall.risk.of.bias == "Low", 1, ifelse(StressData$Overall.risk.of.bias == "High", 3, 2))
data <- StressData
write.csv("StressData")
#StressData <- StressData %>% mutate (Domain.1.risk.of.bias = ifelse(Domain.1.risk.of.bias == "Low", 1,
#ifelse(Domain.1.risk.of.bias == "High",3,2))) #altro modo di fare ifelse
|
af1dcea979666591a65e6c5d72fec3330833f678 | cb4809896e29fc708a38c25a8b1c0b568c96bd6f | /inst/experiments/wvs/wvs_csv_to_country_data_frames.R | 4b5e11d4cd1adf5c30206a30398cc41725ef675b | [] | no_license | matloff/polyanNA | de45bb422cfd2dd9d28447e1ab34018bde1ed280 | be403cfbc9b8be1cd216ab715b5d710c7b71b793 | refs/heads/master | 2020-03-23T06:46:50.410088 | 2019-05-04T23:03:29 | 2019-05-04T23:03:29 | 141,229,142 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,067 | r | wvs_csv_to_country_data_frames.R | # wvs_make_data
# returns nested listed with X_train, y_train, X_test, y_test, by country
# after perfoming a series of tests to ensure crossvalidation is possible
# in spite of the large number of categorical variables and other nuissances.
# if W is provided, PCA is run on W (at the country level, not the world level)
# and the scores corresponding to 90\% variance are combined with X.
# (W should not contain missing data but is treated as an 'other' category where it occurs.)
# N_min minimum observations per level within country. Observed levels observed less
# frequently than N_min (in that country) are converted to 'other' for cv and/or pca.
# returns comlete cases...
# examples:
# X <- wvs %>% select(country, birthyear, edu,
# income_bracket,
# contains("econ_attitudes"))
# W <- wvs %>% select(region, religion)
# wv <- wvs_make_data(wvs$rightist, X, W)
# colMeans(wv$Argentina$Xy_train)
# colMeans(wv$Argentina$X_test)
# mean(wv$Argetina$y_test, na.rm=TRUE)
# out <- lm(y ~ ., wv$Argentina$Xy_train)
# y_hat_arg <- predict(out, wv$Argentina$X_test)
# MAPE_arg <- mean(abs(unlist(y_hat_arg - wv$Argentina$y_test)))
# # 1.37 MAPE for Argentina on 10 point scale
#
# loop over each country
# MAPE <- data.frame(country = wv$countries)
# MAPE$MAPE <- NA
#for(i in 1:nrow(MAPE)){
# out <- lm(y ~ ., wv[[wv$countries[i]]][["Xy_train"]])
# y_hat <- predict(out, wv[[wv$countries[i]]][["X_test"]])
# MAPE$MAPE[i] <- mean(abs(unlist(y_hat - wv[[wv$countries[i]]][["y_test"]])))
#}
#barplot(MAPE$MAPE, names.arg = MAPE$country, las=2)
wvs_make_data <- function(y, X, W=NULL, noisy=TRUE, N_min=50){
require(dplyr)
W[is.na(W)] <- "other"
stopifnot(all_equal(nrow(X), nrow(W), length(y)))
X <- as.data.frame(X)
stopifnot("country" %in% colnames(X))
for(i in 1:ncol(X)){
if(is.factor(X[,i]))
X[,i] <- as.character(X[,i])
if(is.character(X[,i])){
X[,i] <- gsub(" ", "_", X[,i])
X[,i] <- gsub(":", "_", X[,i])
}
}
N <- nrow(wvs)
N_countries <- n_distinct(X$country)
countries <- unique(X$country)
all_data <- cbind(X, y)
if(!is.null(W))
all_data <- cbind(W, X, y)
# drop countries which were not asked or which only had one level on a particular variable ethnicity
var_names <- colnames(all_data)
var_names <- var_names[-which(var_names == "country")]
non_missing_unique <- function(x, dat){
u <- unique(dat[[x]])
return(length(u) - (NA %in% u))
}
for(i in 1:N_countries){
country_data <- filter(all_data, country == countries[i])
drop_country <- (var_names %>% lapply(non_missing_unique, country_data) %>% unlist %>% min) < 2
if(drop_country)
all_data <- filter(all_data, country != countries[i])
}
rare2other <- function(x, N_min = 50){
if(is.numeric(x)){
return(x)
}else{
tallies <- table(x)
rare <- names(tallies)[which(tallies < N_min)]
return(ifelse(x %in% rare, "_other", x))
}
}
newW <- all_data[,1:ncol(W)]
newW <- as.data.frame(lapply(newW, rare2other, N_min), stringsAsFactors = FALSE)
# pca could be done about here if it made sense too...
# pca <- prcomp(model.matrix(~., newW))
Xy <- all_data[,-c(1:ncol(W))]
#Xy <- data.frame(lapply(Xy, rare2other, N_min), stringsAsFactors = FALSE)
if(noisy) print(summary(Xy))
countries <- unique(Xy$country)
N_countries <- length(countries)
out <- list()
out[["countries"]] <- countries
for(i in 1:N_countries){
if(noisy) cat("preparing data for ", countries[i], "\n")
W_country <- filter(newW, Xy$country == countries[i])
pca <- prcomp(model.matrix(~., W_country)[,-1], center = TRUE, scale. = TRUE)
keepers <- which(summary(pca)[["importance"]][3,] < 0.9)
N_country <- nrow(W_country)
test <- sample(c(FALSE, TRUE), N_country,
replace = TRUE, prob = c(.9, .1))
Xy_country <- filter(Xy, country == countries[i]) %>% select(-country)
Xy_country <- cbind(pca$x[,keepers], Xy_country)
Xy_test <- Xy_country[test,]
Xy_train <- Xy_country[!test,]
Xy_train <- data.frame(lapply(Xy_train, rare2other, N_min), stringsAsFactors = FALSE)
out[[countries[i]]][["Xy_train"]] <- model.matrix(~ ., Xy_train, na.action=na.pass)[,-1] %>% as.data.frame(stringsAsFactors=FALSE)
X_test <- model.matrix(y ~ ., Xy_test, na.action=na.pass)[,-1] %>% as.data.frame(stringsAsFactors=FALSE)
#browser()
training_features <- colnames(out[[countries[i]]][["Xy_train"]])[-ncol(out[[countries[i]]][["Xy_train"]])]
MIA <- setdiff(training_features, colnames(X_test))
X_test_MIA <- matrix(0, nrow(X_test), length(MIA))
colnames(X_test_MIA) <- MIA
X_test <- cbind(X_test, X_test_MIA)
out[[countries[i]]][["X_test"]] <- X_test
out[[countries[i]]][["y_test"]] <- Xy_test %>% filter(!is.na(y)) %>% select(y)
if(mean(is.na(out[[countries[i]]][["y_test"]])))
cat("y_test missing for", countries[[i]])
}
return(out)
}
|
a91d6d17acec30183a42a5104d0a2ae0a94585c9 | e4caa843c38e349a7c8312276f1d94de1925fb9f | /simpleExpo.R | a542305213fcc453600b2077e3c56ca3cda19c7b | [] | no_license | jfgilman/ST740 | f29342e8ec4bd2053a86fdf38a8633f959cf53a5 | 91efc8d4bf142b0c21d5908fa840ba470639cd99 | refs/heads/master | 2021-05-15T16:54:13.212415 | 2018-01-16T15:26:32 | 2018-01-16T15:26:32 | 107,549,377 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,674 | r | simpleExpo.R |
simpExpoMCMC <- function(data, samples = 50000, shapePriorA = .001,
shapePriorB = .001, lamPriorA = .001, lamPriorB = .001,
theta1PriorA = .001, theta1PriorB = .001, theta2PriorA = .001,
theta2PriorB = .001, theta1Start = 1, theta2Start = 1, tuning = 1,
burnin = 35000, thin = 10){
# matrix for keeping MCMC draws for each parameter
lam_draws <- rep(0, samples)
lam_draws[1] <- 1
theta1_draws <- rep(0, samples)
theta1_draws[1] <- theta1Start
theta2_draws <- rep(0, samples)
theta2_draws[1] <- theta2Start
# phase obs counts
phase2Count <- 0
phase3Count <- 0
for(i in 1:length(data)){
phase2Count <- phase2Count + length(which(data[[i]]$Phase == 2 & data[[i]]$Censor == 0))
phase3Count <- phase3Count + length(which(data[[i]]$Phase == 3 & data[[i]]$Censor == 0))
}
noCenCount <- 0
for(i in 1:length(data)){
noCenCount <- noCenCount + sum(data[[i]]$Censor == 0)
}
# MCMC draws
for (i in 2:samples) {
datSum <- 0
for(j in 1:length(data)){
datSum <- datSum + sum(data[[j]]$MBF[which(data[[j]]$Phase == 1)],
(theta1_draws[i-1] * data[[j]]$MBF[which(data[[j]]$Phase == 2)]),
(theta1_draws[i-1] * theta2_draws[i-1] * data[[j]]$MBF[which(data[[j]]$Phase == 3)]))
}
lam_draws[i] <- rgamma(1, noCenCount + lamPriorA, datSum + lamPriorB)
# Phase sums
phase2Sum <- 0
phase3Sum4 <- 0
phase3Sum5 <- 0
for(k in 1:length(data)){
phase2Sum <- phase2Sum + lam_draws[i] * sum(data[[k]]$MBF[which(data[[k]]$Phase == 2)])
# for the first theta
phase3Sum4 <- phase3Sum4 + theta2_draws[i-1] * lam_draws[i] * sum(data[[k]]$MBF[which(data[[k]]$Phase == 3)])
# for second theta
phase3Sum5 <- phase3Sum5 + theta1_draws[i-1] * lam_draws[i] * sum(data[[k]]$MBF[which(data[[k]]$Phase == 3)])
}
theta1_draws[i] <- rgamma(1, phase2Count + phase3Count + theta1PriorA, phase2Sum + phase3Sum4 + theta1PriorB)
theta2_draws[i] <- rgamma(1, phase3Count + theta2PriorA, phase3Sum5 + theta2PriorB)
}
lam_finalDraws <- lam_draws[seq(from = burnin + 1, to = samples, by = thin)]
theta1_finalDraws <- theta1_draws[seq(from = burnin + 1, to = samples, by = thin)]
theta2_finalDraws <- theta2_draws[seq(from = burnin + 1, to = samples, by = thin)]
# DIC
d <- rep(0, length(lam_finalDraws))
for(i in 1:length(lam_finalDraws)){
for(j in 1:length(data)){
d[i] <- d[i] -
2*(sum(dexp(data[[j]]$MBF[which(data[[j]]$Censor == 0 & data[[j]]$Phase == 1 & data[[j]]$trun == F)], lam_finalDraws[i], log = T)) +
sum(dexp(data[[j]]$MBF[which(data[[j]]$Censor == 0 & data[[j]]$Phase == 2 & data[[j]]$trun == F)], lam_finalDraws[i]*theta1_finalDraws[i], log = T)) +
sum(dexp(data[[j]]$MBF[which(data[[j]]$Censor == 0 & data[[j]]$Phase == 3 & data[[j]]$trun == F)], lam_finalDraws[i]*theta1_finalDraws[i]*theta2_finalDraws[i], log = T)) -
sum(lam_finalDraws[i]*(data[[j]]$MBF[which(data[[j]]$Censor == 1 & data[[j]]$Phase == 1 & data[[j]]$trun == F)])) -
sum(lam_finalDraws[i]*theta1_finalDraws[i]*(data[[j]]$MBF[which(data[[j]]$Censor == 1 & data[[j]]$Phase == 2 & data[[j]]$trun == F)])) -
sum(lam_finalDraws[i]*theta1_finalDraws[i]*theta2_finalDraws[i]*(data[[j]]$MBF[which(data[[j]]$Censor == 1 & data[[j]]$Phase == 3 & data[[j]]$trun == F)])) +
sum(lam_finalDraws[i]*(data[[j]]$Lower[which(data[[j]]$Phase == 1 & data[[j]]$trun == T)]) -
lam_finalDraws[i]*(data[[j]]$Upper[which(data[[j]]$Phase == 1 & data[[j]]$trun == T)])) +
sum(lam_finalDraws[i]*theta1_finalDraws[i]*(data[[j]]$Lower[which(data[[j]]$Phase == 2 & data[[j]]$trun == T)]) -
lam_finalDraws[i]*theta1_finalDraws[i]*(data[[j]]$Upper[which(data[[j]]$Phase == 2 & data[[j]]$trun == T)])) +
sum(lam_finalDraws[i]*theta1_finalDraws[i]*theta2_finalDraws[i]*(data[[j]]$Lower[which(data[[j]]$Phase == 3 & data[[j]]$trun == T)]) -
lam_finalDraws[i]*theta1_finalDraws[i]*theta2_finalDraws[i]*(data[[j]]$Upper[which(data[[j]]$Phase == 3 & data[[j]]$trun == T)])))
}
}
davg <- mean(d)
dthetahat <- 0
for(j in 1:length(data)){
dthetahat <- dthetahat - 2*(sum(dexp(data[[j]]$MBF[which(data[[j]]$Censor == 0 & data[[j]]$Phase == 1 & data[[j]]$trun == F)], mean(lam_finalDraws), log = T)) +
sum(dexp(data[[j]]$MBF[which(data[[j]]$Censor == 0 & data[[j]]$Phase == 2 & data[[j]]$trun == F)], mean(lam_finalDraws)*mean(theta1_finalDraws), log = T)) +
sum(dexp(data[[j]]$MBF[which(data[[j]]$Censor == 0 & data[[j]]$Phase == 3 & data[[j]]$trun == F)], mean(lam_finalDraws)*mean(theta1_finalDraws)*mean(theta2_finalDraws), log = T)) -
sum(mean(lam_finalDraws)*(data[[j]]$MBF[which(data[[j]]$Censor == 1 & data[[j]]$Phase == 1 & data[[j]]$trun == F)])) -
sum(mean(lam_finalDraws)*mean(theta1_finalDraws)*(data[[j]]$MBF[which(data[[j]]$Censor == 1 & data[[j]]$Phase == 2 & data[[j]]$trun == F)])) -
sum(mean(lam_finalDraws)*mean(theta1_finalDraws)*mean(theta2_finalDraws)*(data[[j]]$MBF[which(data[[j]]$Censor == 1 & data[[j]]$Phase == 3 & data[[j]]$trun == F)])) +
sum(mean(lam_finalDraws)*(data[[j]]$Lower[which(data[[j]]$Phase == 1 & data[[j]]$trun == T)]) -
mean(lam_finalDraws)*(data[[j]]$Upper[which(data[[j]]$Phase == 1 & data[[j]]$trun == T)])) +
sum(mean(lam_finalDraws)*mean(theta1_finalDraws)*(data[[j]]$Lower[which(data[[j]]$Phase == 2 & data[[j]]$trun == T)]) -
mean(lam_finalDraws)*mean(theta1_finalDraws)*(data[[j]]$Upper[which(data[[j]]$Phase == 2 & data[[j]]$trun == T)])) +
sum(mean(lam_finalDraws)*mean(theta1_finalDraws)*mean(theta2_finalDraws)*(data[[j]]$Lower[which(data[[j]]$Phase == 3 & data[[j]]$trun == T)]) -
mean(lam_finalDraws)*mean(theta1_finalDraws)*mean(theta2_finalDraws)*(data[[j]]$Upper[which(data[[j]]$Phase == 3 & data[[j]]$trun == T)])))
}
pd <- davg - dthetahat
dic <- davg + pd
return(list(lam_draws = lam_finalDraws,
theta1_draws = theta1_finalDraws,
theta2_draws = theta2_finalDraws,
DIC = dic,
PD = pd,
Deviance = d))
} |
76409811ba287b29dee7f3dd115538e2f763c6a2 | c9034013cb33396a94a1614bce890a1dc472a3d0 | /scripts/Half_done_ideas.R | b65da114ef6bebef4d970d30785c8edca66fa2f9 | [] | no_license | goldenberg-lab/LFS_fibroblasts | 0fe02ed6aebeaafd1b82abd3f5b8a866c83ce0c9 | d772ef38c49e1c82799d80398dba2eac23cffa2f | refs/heads/master | 2020-06-10T01:06:23.509583 | 2020-04-07T14:27:54 | 2020-04-07T14:27:54 | 193,541,030 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,534 | r | Half_done_ideas.R | # Half finished, might be useful functions that part way through implementing I
# realised will not help in the way I want them to.
##### This idea was that if I separate the rows into different training and
##### testing sets then I could compare the performance where the same rows are
##### not in the training set that are in the test set and when the same rows
##### are in both, but now I'm thinking there isn't any reason
##### to assume that separating by different rows wouldn't perform as well if it
##### is a row effect, since the row difference could still hold even if I split
##### the sets in this way so that it would still learn this difference in the
##### test set. i.e. the row effect would generalize to all instances of
##### different rows I have sample in so it could still be a well performing
##### model.
#' Split the LFS Fibroblast data
#'
#' Split the data into n bins for ten fold cross validation. If grps is not
#' NULL then the columns in grps will be kept together in each fold, and
#' splitting the bins to have their number of observations as even as possible.
#' If grps is NULL then it is assumed each row is it's own group.
#'
#' @param dat a tibble of all data to be split into 10 bins.
#' @param bincol a sym to be the column name for the bin indicator column
#' @param groups a list of quos which are the columns that define the groups.
#' @param response a sym which is the column name that contains the classes
#' @param n an integer indicating the number of bins to split the data into.
#'
#' @return a tibble with an additional column indicating bin.
#'
#' @export
split_dat <- function(dat, bincol, grps, response, n = 10){
stopifnot(require(tidyverse))
browser()
if(is.null(grps)){
dat <- dat %>% mutate(grp = 1:nrow(dat))
grps <- list(quo(grp))
}
bincol <- enquo(bincol); response <- enquo(response)
classes <- unique(dat %>% select(!!response) %>% .[[1]])
sep_by_class <- lapply(classes, function(class, x){x %>% filter(!!response == class)}, dat)
grpsizes <- dat %>%
group_by(!!! grps) %>%
mutate(grp_id = group_indices()) %>%
group_by(grp_id) %>%
summarize(n = n()) %>%
arrange(desc(n))
bincounts <- rep(0, n)
binassignments <- rep(0, nrow(grpsizes))
for(i in 1:nrow(grpsizes)){
bin <- min(which(bincounts == min(bincounts)))
bincounts[bin] <- bincounts[[bin]] + grpsizes$n[[i]]
binassignments[i] <- bin
}
grpsizes %>% mutate(!!bincol := binassignments) %>%
left_join(dat, ., by = !!!groups)
}
|
beb859d89e926aa751ed6de222fc17787d966f27 | 20fb140c414c9d20b12643f074f336f6d22d1432 | /man/NISTpointComputerTOmeter.Rd | cce86d438bb9cb512eee077270bf6d4ef1386789 | [] | no_license | cran/NISTunits | cb9dda97bafb8a1a6a198f41016eb36a30dda046 | 4a4f4fa5b39546f5af5dd123c09377d3053d27cf | refs/heads/master | 2021-03-13T00:01:12.221467 | 2016-08-11T13:47:23 | 2016-08-11T13:47:23 | 27,615,133 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 802 | rd | NISTpointComputerTOmeter.Rd | \name{NISTpointComputerTOmeter}
\alias{NISTpointComputerTOmeter}
\title{Convert point to meter }
\usage{NISTpointComputerTOmeter(pointComputer)}
\description{\code{NISTpointComputerTOmeter} converts from point (computer) (1/72 in) to meter (m) }
\arguments{
\item{pointComputer}{point (computer) (1/72 in) }
}
\value{meter (m) }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTpointComputerTOmeter(10)
}
\keyword{programming} |
0589a94ee0e61115c05e988415503d706087ba8d | c5235bf92b10b622d67ceb30b48835f2f8293f6f | /single_cell_workflow/workflow/scripts/infercnv_analysis.R | c433aba4f294e6ea7710ff5cb0f0a1268880486e | [] | no_license | wlchin/IFNsignalling | 2f601533466cadf7d5b8e72fced40ed00666bbc2 | 4b94c464e685224157e951e22daa6585e647caae | refs/heads/master | 2023-04-09T09:32:50.099304 | 2022-09-12T03:34:00 | 2022-09-12T03:34:00 | 282,825,263 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,156 | r | infercnv_analysis.R |
library(infercnv)
library(Seurat)
x <- readRDS(snakemake@input[[1]])
genes_ <- read.table("resources/mm10.data")
genes <- data.frame(genes_[,2:4], row.names = genes_[,1])
annots <- data.frame(label = paste0(Idents(x),"_", x$seurat_clusters), row.names = colnames(x))
annots <- data.frame(label = x$analysis_ident, row.names = colnames(x))
data = x@assays$RNA@counts[,colnames(x)]
testvec <- as.character(unique(annots$label))[-1]
infercnv_obj <- infercnv::CreateInfercnvObject(raw_counts_matrix=data,
gene_order_file=genes,
annotations_file=annots,
ref_group_names=testvec)
infercnv_obj <- infercnv::run(infercnv_obj,
cutoff=0.1,
out_dir=snakemake@output[[1]],
cluster_by_groups=TRUE,
denoise=TRUE,
HMM=FALSE,
num_threads=12,
no_plot=FALSE)
saveRDS(infercnv_obj, file = snakemake@output[[2]])
|
e4a23e5a47926e601867354a7c56843d45939535 | 13674bb67f76e685de0dbf1f245d2eee614d397c | /bonemarow_asapseq/code/34_TF_pseudotime.R | 19275c836855511fa4a0a43224696ffa095de001 | [] | no_license | liuxiaoping2020/asap_reproducibility | 38101126d37563a4a5f593ee5b7cf7a3ab2690d2 | 9968e6151fb1e3d0d98ee6ad6e30e729838f06d2 | refs/heads/master | 2023-05-14T08:13:22.943175 | 2021-06-11T02:30:24 | 2021-06-11T02:30:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,728 | r | 34_TF_pseudotime.R | library(Seurat)
library(viridis)
library(scales)
library(Matrix)
library(ComplexHeatmap)
library(BuenColors)
library(dplyr)
library(data.table)
"%ni%" <- Negate("%in%")
# Import ATAC processed data
mdf <- readRDS("../output/ArchR_main_metadata.rds")
mdf$barcode <- gsub("ASAP_marrow_hg38#", "", rownames(mdf))
# Order the cells and the gene / tags
mat <- readRDS("../output/TF_cell_matrix_bagged.rds")
colnames(mat) <- gsub("ASAP_marrow_hg38#", "", colnames(mat))
ordered_cells <- mdf[!is.na(mdf$monocyte_PS),] %>% arrange((monocyte_PS))
tf_mat_ordered <- mat[,ordered_cells$barcode]
# Now group/order/smooth the cell states and make ArchR's nice heatmaps
# see: https://github.com/GreenleafLab/ArchR/blob/5f855d7b7ff3f57eb0d28f312c5ea5d373d27ebd/R/Trajectory.R
n_bins <- 100
groups_tf <- sapply(1:n_bins, function(idx){
multiplier <- 100/n_bins
bcs <- ordered_cells %>% dplyr::filter(monocyte_PS >= (idx-1)*multiplier & monocyte_PS < idx*multiplier) %>% pull(barcode)
rowMeans(tf_mat_ordered[,bcs], na.rm = TRUE)
})
# Filter based on indices
smoothWindow = 11
smooth_groups_tf <- data.matrix((apply((groups_tf), 1, function(x) ArchR:::.centerRollMean((x), k = smoothWindow))))
smooth_groups_minmax_tf <- t(apply(groups_tf, 2, function(x)(x-min(x))/(max(x)-min(x))))
pdf("../plots/monocytic_heatmap_tf.pdf", width = 4, height = 2)
Heatmap(t(smooth_groups_minmax_tf)[c("CEBPB", "GATA1","JUND","SPIB", "KLF1"),],
col=as.character(jdb_palette("solar_rojos",type="continuous")),
show_row_names = TRUE,
cluster_columns = FALSE,
cluster_rows = FALSE,
row_names_gp = gpar(fontsize = 4),
column_names_gp = gpar(fontsize = 0),
show_column_names = FALSE)
dev.off()
|
7c1d7826d251805b996b99143158108ebc787e2d | 691b2f580c409e4d257b4934c566083cbd14d4d1 | /Listings/Chapter 2/2-4-eikon.R | 372624dfa619f148845b4d1f3a48b722f34816d3 | [] | no_license | momokeith1123/automated_trading_with_R | d18c9679314cc360b384be634ae6e5eaffe01362 | 93340bf1c71d71d6ffecbf739c1918c88492c6d1 | refs/heads/master | 2021-05-02T17:08:00.076023 | 2021-03-31T15:58:51 | 2021-03-31T15:58:51 | 120,684,366 | 0 | 0 | null | 2018-02-07T23:20:01 | 2018-02-07T23:20:01 | null | UTF-8 | R | false | false | 805 | r | 2-4-eikon.R | require(tidyverse)
# Load "invalid.R" file if available
invalid <- character(0)
setwd(rootdir)
if("invalid.R" %in% list.files()) source("invalid.R")
# Find all symbols not in directory and not missing
setwd(datadir)
toload <- setdiff(S[!paste0(S, ".csv") %in% list.files()], invalid)
# Fetch symbols with yahoo function, save as .csv or missing
source(paste0(functiondir, "eikon.R"))
histdata <- map(toload, ~eikon_get(.x)) %>% set_names(toload)
savestockdata <-
function (stockdata, invalid) {
if(!is.null(stockdata)) {
stockname <- (paste0(stockdata$RIC[1], ".csv"))
write_csv(stockdata, stockname)
} else {
invalid <- c(invalid, stockname)
}
}
map(histdata, ~ savestockdata (.x), invalid)
setwd(rootdir)
dump(list = c("invalid"), "invalid.R") |
de4703011f2f2a5976dcd9a477981354c442ff02 | 1e78df69ad033f9229d7d668cfd355190356db7e | /cachematrix.R | 23a0181f7b19f55e9d6aad6c2fea541f2f0cea41 | [] | no_license | tdailey80/ProgrammingAssignment2 | 254f091d2065d5e66febd5c35357fa2ee5b82143 | aece48f3ce81e4e1858b49543ee9ed0d33c37dbe | refs/heads/master | 2021-01-20T21:49:04.310508 | 2015-03-22T03:20:12 | 2015-03-22T03:20:12 | 32,651,601 | 0 | 0 | null | 2015-03-21T21:54:37 | 2015-03-21T21:54:37 | null | UTF-8 | R | false | false | 1,932 | r | cachematrix.R | ## The below functions calculate the inverse of a provided matrix using
##the solve function. When called the cahced version of the matrix is
##retreived using lexical scoping.
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
# sets m to a default value of NULL
m <- NULL
set <- function(y) {
# stores the matrix passed in the function argument in the cache
x <<- y
# sets m (ultimately the inverse matrix) to a default value of NULL
m <<- NULL
}
#initializes the list variables
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
# creates a list to store the four cache functions
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix"
##returned by makeCacheMatrix above. If the inverse has already
##been calculated (and the matrix has not changed), then
##cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
#retrieves the inverse matrix and stores it
m <- x$getinverse()
# checks to see if there is a matrix in the cached variable m
if(!is.null(m)) {
# check if the matrix in cache has changed.
if(identical(x$setmatrix,x$getmatrix)) {
message("matrix unchanged, retrieving and returning cached matrix")
return(m)
}
else{
break #continues to solve if matrix has changed
}
}
# if the cached matrix changed then...
# stores the matrix passed in the function argument
data <- x$get()
# solves for the inverse of the matrix passed in
m <- solve(data, ...)
# caches the inverse functions
x$setinverse(m)
# returns the inverse function
m
} |
191bb00210825b2172e3abc94920987572423737 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/mistr/examples/nbinomdist.Rd.R | f0eb5e63fc4d850311191e2995abb6ee553d1b16 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 210 | r | nbinomdist.Rd.R | library(mistr)
### Name: nbinomdist
### Title: Creates an Object Representing Negative Binomial Distribution
### Aliases: nbinomdist
### ** Examples
N <- nbinomdist(10, 0.5)
d(N, c(2, 3, 4, NA))
r(N, 5)
|
2d0101f690fa46f45c6132955b4d19b6434b597c | 4814eeccf3b50778214fbee7b0eac1ddbd4d50c1 | /man/table_names.Rd | 5784e289430b22e640fb95cfa09fdcf9d91f8732 | [
"BSD-3-Clause"
] | permissive | marcpaterno/h5tables | 61abd6bea82794653435c28035daf11f33c08422 | 9b2af22b875e4af7407963926516b3bbdc6aa576 | refs/heads/master | 2022-11-21T01:40:25.115608 | 2020-07-14T02:27:50 | 2020-07-14T02:27:50 | 278,502,859 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 638 | rd | table_names.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funcs.R
\name{table_names}
\alias{table_names}
\title{Return the names of all tables in a tabular HDF5 file.}
\usage{
table_names(file, pattern = NULL)
}
\arguments{
\item{file}{an H5File object representing an HDF5 "tabular data" file, or the name of such a file.}
\item{pattern}{a regular expression to match against files; NULL if none is to be used}
}
\value{
a vector (mode character) containing the names of the tables in the file.
}
\description{
If a regular expression \code{pattern} is supplied, only the names matching that pattern
are returned.
}
|
919b41cfb636c8531772f4c271b8c934cc8782c1 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /paws/R/acmpca_service.R | 52051c213f536418c9e68d15274956242a12722b | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | false | 9,691 | r | acmpca_service.R | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config merge_config
NULL
#' AWS Certificate Manager Private Certificate Authority
#'
#' @description
#' This is the *Amazon Web Services Private Certificate Authority API
#' Reference*. It provides descriptions, syntax, and usage examples for
#' each of the actions and data types involved in creating and managing a
#' private certificate authority (CA) for your organization.
#'
#' The documentation for each action shows the API request parameters and
#' the JSON response. Alternatively, you can use one of the Amazon Web
#' Services SDKs to access an API that is tailored to the programming
#' language or platform that you prefer. For more information, see [Amazon
#' Web Services SDKs](https://aws.amazon.com/developer/tools/#SDKs).
#'
#' Each Amazon Web Services Private CA API operation has a quota that
#' determines the number of times the operation can be called per second.
#' Amazon Web Services Private CA throttles API requests at different rates
#' depending on the operation. Throttling means that Amazon Web Services
#' Private CA rejects an otherwise valid request because the request
#' exceeds the operation's quota for the number of requests per second.
#' When a request is throttled, Amazon Web Services Private CA returns a
#' ThrottlingException error. Amazon Web Services Private CA does not
#' guarantee a minimum request rate for APIs.
#'
#' To see an up-to-date list of your Amazon Web Services Private CA quotas,
#' or to request a quota increase, log into your Amazon Web Services
#' account and visit the Service Quotas console.
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#' \itemize{
#' \item{\strong{credentials}:} {\itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
#' \item{\strong{region}:} {The AWS Region used in instantiating the client.}
#' }}
#' \item{\strong{close_connection}:} {Immediately close all HTTP connections.}
#' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
#' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e. `http://s3.amazonaws.com/BUCKET/KEY`.}
#' \item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
#' }
#' @param
#' credentials
#' Optional credentials shorthand for the config parameter
#' \itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' }
#' @param
#' endpoint
#' Optional shorthand for complete URL to use for the constructed client.
#' @param
#' region
#' Optional shorthand for AWS Region used in instantiating the client.
#'
#' @section Service syntax:
#' ```
#' svc <- acmpca(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string",
#' close_connection = "logical",
#' timeout = "numeric",
#' s3_force_path_style = "logical",
#' sts_regional_endpoint = "string"
#' ),
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- acmpca()
#' svc$create_certificate_authority(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=acmpca_create_certificate_authority]{create_certificate_authority} \tab Creates a root or subordinate private certificate authority (CA)\cr
#' \link[=acmpca_create_certificate_authority_audit_report]{create_certificate_authority_audit_report} \tab Creates an audit report that lists every time that your CA private key is used\cr
#' \link[=acmpca_create_permission]{create_permission} \tab Grants one or more permissions on a private CA to the Certificate Manager (ACM) service principal (acm\cr
#' \link[=acmpca_delete_certificate_authority]{delete_certificate_authority} \tab Deletes a private certificate authority (CA)\cr
#' \link[=acmpca_delete_permission]{delete_permission} \tab Revokes permissions on a private CA granted to the Certificate Manager (ACM) service principal (acm\cr
#' \link[=acmpca_delete_policy]{delete_policy} \tab Deletes the resource-based policy attached to a private CA\cr
#' \link[=acmpca_describe_certificate_authority]{describe_certificate_authority} \tab Lists information about your private certificate authority (CA) or one that has been shared with you\cr
#' \link[=acmpca_describe_certificate_authority_audit_report]{describe_certificate_authority_audit_report} \tab Lists information about a specific audit report created by calling the CreateCertificateAuthorityAuditReport action\cr
#' \link[=acmpca_get_certificate]{get_certificate} \tab Retrieves a certificate from your private CA or one that has been shared with you\cr
#' \link[=acmpca_get_certificate_authority_certificate]{get_certificate_authority_certificate} \tab Retrieves the certificate and certificate chain for your private certificate authority (CA) or one that has been shared with you\cr
#' \link[=acmpca_get_certificate_authority_csr]{get_certificate_authority_csr} \tab Retrieves the certificate signing request (CSR) for your private certificate authority (CA)\cr
#' \link[=acmpca_get_policy]{get_policy} \tab Retrieves the resource-based policy attached to a private CA\cr
#' \link[=acmpca_import_certificate_authority_certificate]{import_certificate_authority_certificate} \tab Imports a signed private CA certificate into Amazon Web Services Private CA\cr
#' \link[=acmpca_issue_certificate]{issue_certificate} \tab Uses your private certificate authority (CA), or one that has been shared with you, to issue a client certificate\cr
#' \link[=acmpca_list_certificate_authorities]{list_certificate_authorities} \tab Lists the private certificate authorities that you created by using the CreateCertificateAuthority action\cr
#' \link[=acmpca_list_permissions]{list_permissions} \tab List all permissions on a private CA, if any, granted to the Certificate Manager (ACM) service principal (acm\cr
#' \link[=acmpca_list_tags]{list_tags} \tab Lists the tags, if any, that are associated with your private CA or one that has been shared with you\cr
#' \link[=acmpca_put_policy]{put_policy} \tab Attaches a resource-based policy to a private CA\cr
#' \link[=acmpca_restore_certificate_authority]{restore_certificate_authority} \tab Restores a certificate authority (CA) that is in the DELETED state\cr
#' \link[=acmpca_revoke_certificate]{revoke_certificate} \tab Revokes a certificate that was issued inside Amazon Web Services Private CA\cr
#' \link[=acmpca_tag_certificate_authority]{tag_certificate_authority} \tab Adds one or more tags to your private CA\cr
#' \link[=acmpca_untag_certificate_authority]{untag_certificate_authority} \tab Remove one or more tags from your private CA\cr
#' \link[=acmpca_update_certificate_authority]{update_certificate_authority} \tab Updates the status or configuration of a private certificate authority (CA)
#' }
#'
#' @return
#' A client for the service. You can call the service's operations using
#' syntax like `svc$operation(...)`, where `svc` is the name you've assigned
#' to the client. The available operations are listed in the
#' Operations section.
#'
#' @rdname acmpca
#' @export
acmpca <- function(config = list(), credentials = list(), endpoint = NULL, region = NULL) {
config <- merge_config(
config,
list(
credentials = credentials,
endpoint = endpoint,
region = region
)
)
svc <- .acmpca$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.acmpca <- list()
.acmpca$operations <- list()
.acmpca$metadata <- list(
service_name = "acm-pca",
endpoints = list("*" = list(endpoint = "acm-pca.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "acm-pca.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "acm-pca.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "acm-pca.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "ACM PCA",
api_version = "2017-08-22",
signing_name = "acm-pca",
json_version = "1.1",
target_prefix = "ACMPrivateCA"
)
.acmpca$service <- function(config = list()) {
handlers <- new_handlers("jsonrpc", "v4")
new_service(.acmpca$metadata, handlers, config)
}
|
f7f7604672f51021a08fe9b8ba1529456c232ef2 | 33d91334004f2b8db94430ac5778bd977c99e0ac | /R/denom_batter.R | 102858e7d791e040d787af22d47ae5de01aaae24 | [] | no_license | BillPetti/fgrapher | fa78fad07af6344c79f5352773b17b3079229392 | 58446ef6f3b293724fb39598e45ef63f485cc02e | refs/heads/master | 2020-07-04T20:55:13.045018 | 2017-08-15T18:13:10 | 2017-08-15T18:13:10 | 74,143,798 | 2 | 2 | null | 2016-12-06T16:21:03 | 2016-11-18T15:50:21 | R | UTF-8 | R | false | false | 83 | r | denom_batter.R | #' denom_batter
#'
#' A data set
#'
#' @docType data
#' @usage data(denom_batter) |
1a1d0ac45e6a12b0aaa2f0691aeeb06fa6ef9b5d | 8ff63b3ecfcc72e98faff13578dfab9a6f2b79a5 | /R/json_list.R | dad259be47b9667455303580ef4b4de332653aa3 | [
"MIT"
] | permissive | lstmemery/rmangal | 8c767619d20deea1ba7877940697ed1dcb6df898 | 902697c123e0a7000b2cca465a4460c561d83278 | refs/heads/master | 2020-04-10T11:07:47.355941 | 2018-04-27T15:40:31 | 2018-04-27T15:40:31 | 160,984,354 | 0 | 0 | null | 2018-12-08T22:33:39 | 2018-12-08T22:33:39 | null | UTF-8 | R | false | false | 857 | r | json_list.R | #' @title Dataframe into json list
#'
#' @description Convert a dataframe into a list of json data
#'
#' @param df A dataframe, header must be the names of attributes
#'
#' @return
#'
#' A list of json data, each level of the list is ready for injection in Mangal
#'
#' @author Gabriel Bergeron
#'
#' @keywords manip
#'
#' @importFrom jsonlite toJSON
#'
#' @export
json_list <- function(df){
# Object df must be a dataframe
if(any(class(df) == "data.frame")){
# Set df into a list with attribute names as levels
df <- as.list(setNames(split(df, seq(nrow(df))), rownames(df)))
# Set each list level into json
for (i in 1:length(df)) df[[i]] <- jsonlite::toJSON(df[[i]], auto_unbox = TRUE, digits = 12)
return(df)
} else {
ls <- list()
ls[[1]] <- jsonlite::toJSON(df, auto_unbox = TRUE, digits = 12)
return(ls)
}
}
|
0d04b2e95373a2dd812dc3716f504ec32b2c9a4c | 3e6bea01f47934e55bda80a6e796da59eacb8ba7 | /R/loading_gen.R | 381a32049002603f21ff9fad4a4a0192621ea3a8 | [] | no_license | nathansam/MSc-diss-code | b581c411b773d435d7f00bc65624712fb77d7c4d | 1f3f4af207bc840e0056fa3027567874621b7aed | refs/heads/master | 2021-05-21T01:01:15.706062 | 2020-04-02T14:27:34 | 2020-04-02T14:27:34 | 252,478,457 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 838 | r | loading_gen.R | #' @export
LoadingGen <- function(genenumber) {
loading_values <- rep(0, 20)
for (i in 1:20) {
loading_values[i] <- round(genenumber/20 * i)
}
return(loading_values)
}
#' @export
LoadingPrint <- function(iteration, loading_values) {
if (iteration == 1) {
cat(crayon::red(noquote("This may take a while if using a large dataset! \n")))
cat(noquote("Progress: \n"))
cat(noquote(paste(replicate(20, "□"), collapse = "")))
cat(noquote("\n"))
}
if (iteration %in% loading_values) {
position <- match(iteration, loading_values)
hashes <- paste(replicate(position, "■"), collapse = "")
dashes <- paste(replicate(20 - position, "□"), collapse = "")
cat(noquote(paste("\r", hashes, dashes, "\n", sep = "")))
flush.console()
}
}
|
3ad3e4991c9a2c8b4fb9fbe24929641d71145174 | b65f2ec075f7b95009a8b7998f44a50fc814f0b7 | /man/data_prep_func.Rd | 169cf9e6e634c305db6f9943bba759ee472e769f | [] | no_license | ssh352/sumots | 6eb5e887aa18f4eb7fc1855dab1990b3efccc0ce | ce517ce9a37290837bc4808db3f82508c03f3349 | refs/heads/master | 2023-03-30T17:18:51.145725 | 2021-04-05T20:50:13 | 2021-04-05T20:50:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,318 | rd | data_prep_func.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepare_ml_data.R
\name{data_prep_func}
\alias{data_prep_func}
\title{Prepares data for modeling}
\usage{
data_prep_func(
data,
outcome_var,
negative_to_zero = FALSE,
fix_gap_size = FALSE,
max_gap_size,
remove_one_obs = FALSE,
trailing_zero = FALSE,
transformation = "none",
use_holidays = FALSE,
holidays_to_use_1,
holidays_to_use_2,
use_seasonal_lag = TRUE,
seasonal_frequency,
horizon,
clean = FALSE,
drop_na = TRUE,
use_holiday_to_clean = FALSE,
holiday_for_clean,
use_abc_category = FALSE,
pacf_threshold = 0.2,
no_fourier_terms = 5,
fourier_k = 5,
slidify_period = c(4, 8),
use_own_fourier = FALSE,
fourier_terms,
recursive_data = FALSE,
no_recursive_lag,
xreg = NULL,
fill_na_with_zero = TRUE
)
}
\arguments{
\item{data}{Data frame with data on the right date format e.g. daily, weekly, monthly and with column named 'id'}
\item{outcome_var}{Name of the outcome variable. The function will change the outcome variable name to 'outcome'}
\item{negative_to_zero}{Recodes negative values as zero, defaults to TRUE}
\item{max_gap_size}{The maximum length that the outcome can be zero. If the interval is larger than max_gap_size then only use data after the interval}
\item{trailing_zero}{Extends all time series, back and forward, so they will be the same length. Defaults to FALSE}
\item{transformation}{Should the series be transformed, e.g. log or log1p. Defaults to none}
\item{use_holidays}{Should national holidays be included. As of now this has to be a dataframe supplied by the user to the holidays_to_use argument}
\item{holidays_to_use_1}{Data frame of dummy holidays. Outcome of the create_holiday() function: fridagar_tbl}
\item{holidays_to_use_2}{Data frame of holidays, one variable.}
\item{use_seasonal_lag}{Should lag of outcome variable, equal to the seasonality, be used. Defaults to TRUE.}
\item{seasonal_frequency}{The frequency of the data. E.g. 52 for weekly data}
\item{horizon}{The forecast horizon}
\item{clean}{Should the data be cleand for outliers. Defaults to FALSE}
\item{drop_na}{When creating data_prepared_tbl, should NA's be dropped. Defaults to TRUE}
\item{use_holiday_to_clean}{Uses fridagar_one_var from the create_holiday() function to revert series to original value if cleand}
\item{pacf_threshold}{Threshold for where to cut the PACF to choose terms for the fourier calculation}
\item{no_fourier_terms}{Number of fourier terms, defultas to 5}
\item{fourier_k}{The fourier term order, defaults to 5}
\item{slidify_period}{The window size, defaults to c(4, 8)}
\item{use_own_fourier}{Should you use your own fourier terms? Defaults to FALSE}
\item{fourier_terms}{The fourier terms to include.}
\item{recursive_data}{Should the data be prepared for a recursive forecasting. Defaults to FALSE.}
\item{no_recursive_lag}{The number of lags to be.}
\item{xreg}{External regressors to add}
\item{fill_na_with_zero}{Used when drop_na = FALSE to fill missing values with zero instead of dropping them.}
}
\value{
List with data_prepared, future_data, train_data, splits and horizon
}
\description{
Prepares data for modeling
}
|
96daa89ec151a54930e5731a1fefdefcb1943656 | 8d85451aa39c842322ae4eb08c0be46c64dbaf72 | /man/add_gnomAD_AF.Rd | ef3e722dd62cb20979ecb3557c920e2377baaaf4 | [
"MIT"
] | permissive | KalinNonchev/tMAE | e9b1d13ae2c3054bce66a73311d3faf0216cce34 | ef0e45ceb2997550485464b534b9dcf24ae2a372 | refs/heads/master | 2023-05-06T12:35:01.111897 | 2020-12-31T15:44:43 | 2020-12-31T15:44:43 | 371,254,091 | 1 | 0 | MIT | 2021-05-27T05:13:56 | 2021-05-27T05:13:55 | null | UTF-8 | R | false | true | 1,231 | rd | add_gnomAD_AF.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_gnomAD_AF.R
\name{add_gnomAD_AF}
\alias{add_gnomAD_AF}
\title{Add allele frequencies from gnomAD}
\usage{
add_gnomAD_AF(
data,
genome_assembly = c("hg19", "hs37d5", "hg38", "GRCh38"),
max_af_cutoff = 0.001,
populations = c("AF", "AF_afr", "AF_amr", "AF_eas", "AF_nfe", "AF_popmax"),
...
)
}
\arguments{
\item{data}{A data.frame containing allelic counts.}
\item{genome_assembly}{either 'hg19/hs37d5' or 'hg38/GRCh38' indicating the genome assembly of the variants.}
\item{max_af_cutoff}{cutoff for a variant to be considered rare. Default is .001.}
\item{populations}{The population to be annotated.}
\item{...}{Used for backwards compatibility (gene_assembly -> genome_assembly)}
}
\value{
A data.table with the original contents plus columns containing allele frequencies from different gnomAD populations.
}
\description{
Add allele frequency information from gnomAD.
}
\examples{
file <- system.file("extdata", "allelic_counts_HG00187.csv", package = "tMAE", mustWork = TRUE)
maeCounts <- fread(file)
maeRes <- DESeq4MAE(maeCounts)
maeRes <- add_gnomAD_AF(maeCounts, genome_assembly = 'hg19', pop="AF")
}
\author{
Vicente Yepez
}
|
5ed014c625db8edf0349b7fe555b6c318bacc01d | 21b37e4bc292073d4347931b40bb2afcc511f8c0 | /run_cces_cumulative.R | 88a02fca449c3b17f108e6d90f00d5d4c9b8ba95 | [] | no_license | stevenworthington/cces | b8d515085cf09481e659f97fd1f174471695ee73 | 013336dc6f3c19d72ecd554460c76d8fabd34bb5 | refs/heads/master | 2022-02-24T20:14:44.076336 | 2019-10-08T16:14:29 | 2019-10-08T16:14:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,958 | r | run_cces_cumulative.R | library(tidyverse)
library(brms)
library(mapdata)
library(parallel)
# vote 1 means voted for trump, vote 0 means hillary
# sex -0.5 means male, 0.5 female
preprocess = function(df) {
df %>%
mutate(male = -0.5 + 1 * (sex == "Male"),
educ = factor(educ, levels = c("noHS", "HS", "some_or_assoc", "bach", "bachplus")),
age = factor(age, levels = c("18-29", "30-44", "45-64", "65+"))) %>%
select(-sex)
}
cces = read_delim("cces_regression_cumulative.delim", delim = " ") %>%
mutate(state = str_to_lower(state)) %>%
preprocess
pums = read_delim("poststrat_table.delim", delim = " ") %>%
mutate(state = str_to_lower(state)) %>%
preprocess
for(name in c("male", "race", "educ", "state", "age", "marstat")) {
unique_cces = unique(cces %>% pull(name))
unique_pums = unique(pums %>% pull(name))
if(length(setdiff(unique_cces, unique_pums)) > 0) {
stop(paste("Column", name, "does not have the same elements in the cces and pums data"))
}
}
#vote ~ sex +
#(1 | state) + (1 | race) + (1 | educ) + (1 | age) + (1 | marstat) +
# (1 | marstat:age) + (1 | marstat:educ) + (1 | marstat:race) + (1 | marstat:state) + (1 | marstat:sex) +
# (1 | state:age) + (1 | state:educ) +(1 | state:race) + (1 | state:sex) +
# (1 | race:age) + (1 | race:educ) + (1 | race:sex) +
# (1 | educ:age) + (1 | educ:sex) +
# (1 | age:sex)
#vote ~ sex +
# (1 | state) + (1 | race) + (1 | educ) + (1 | age) +
# (1 | race:state) + (1 | educ:state) + (1 | age:state)
cces_binomial = cces %>%
group_by(male, state, race, educ, age, marstat, year) %>%
summarise(N = n(),
econ_betters = sum(econ_better),
approve_pres = sum(approve_pres)) %>%
arrange(-N) %>%
ungroup()
years = cces_binomial %>%
pull(year) %>%
unique
efits = list()
afits = list()
for(i in 1:length(years)) {
cces_binomial_year = cces_binomial %>%
filter(year == years[i])
efits[[i]] = brm(econ_betters | trials(N) ~ male +
(1 | race) + (1 | educ) + (1 | age) + (1 | marstat) +
(1 + male | state) + (male - 1 | race) + (male - 1 | educ) + (male - 1 | age) + (male - 1 | marstat) +
(1 | race:educ) + (1 | race:age) + (1 | race:marstat) +
(1 | educ:age) + (1 | educ:marstat) +
(1 | age:marstat),
data = cces_binomial_year,
family = "binomial",
cores = 8,
init = 0,
iter = 500,
prior = set_prior("normal(0, 1)", class = "b") +
set_prior("normal(0, 1)", class = "Intercept") +
set_prior("normal(0, 1)", class = "sd"),
control = list(adapt_delta = 0.9))
afits[[i]] = brm(approve_pres | trials(N) ~ male +
(1 | race) + (1 | educ) + (1 | age) + (1 | marstat) +
(1 + male | state) + (male - 1 | race) + (male - 1 | educ) + (male - 1 | age) + (male - 1 | marstat) +
(1 | race:educ) + (1 | race:age) + (1 | race:marstat) +
(1 | educ:age) + (1 | educ:marstat) +
(1 | age:marstat),
data = cces_binomial_year,
family = "binomial",
cores = 8,
init = 0,
iter = 500,
prior = set_prior("normal(0, 1)", class = "b") +
set_prior("normal(0, 1)", class = "Intercept") +
set_prior("normal(0, 1)", class = "sd"),
control = list(adapt_delta = 0.9))
}
predictdf = mclapply(1:length(years), function(i) {
epredicted_d = fitted(efits[[i]], newdata = pums, allow_new_levels = TRUE, summary = FALSE)
apredicted_d = fitted(afits[[i]], newdata = pums, allow_new_levels = TRUE, summary = FALSE)
bind_rows(epredicted_d[sample(1:nrow(epredicted_d), 20), ] %>% t %>%
as_tibble(.name_repair = function(cols) { paste0("predicted_", 1:length(cols)) }) %>%
bind_cols(pums) %>%
pivot_longer(starts_with("predicted_"), names_to = c("rep"), names_pattern = "predicted_([0-9]+)", values_to = "predicted") %>%
mutate(predicted = predicted / N) %>%
mutate(year = years[i],
outcome = "econ_better"),
apredicted_d[sample(1:nrow(apredicted_d), 20), ] %>% t %>%
as_tibble(.name_repair = function(cols) { paste0("predicted_", 1:length(cols)) }) %>%
bind_cols(pums) %>%
pivot_longer(starts_with("predicted_"), names_to = c("rep"), names_pattern = "predicted_([0-9]+)", values_to = "predicted") %>%
mutate(predicted = predicted / N) %>%
mutate(year = years[i],
outcome = "approve_pres"))
}, mc.cores = 2) %>% bind_rows
saveRDS(predictdf, "shiny/basic.Rds")
write_delim(predictdf, "shiny/basic.delim")
|
06a860fe8c0291b53034902e7f32e7fa7e13c70e | a16e440a2f04998e7a2d2c56b0d61070d5d75422 | /lib/regression.R | f2241378b6646a981b62e86bd48e0872bf04115b | [] | no_license | wesleytao/BigDataElectricityAnalysis | 530a56408aaa7d1f8e36a588333d67e9fee0bd70 | c510e1943acc490737b39c43c2d0d14b93c5a848 | refs/heads/master | 2020-03-18T22:38:37.225325 | 2018-07-24T15:01:23 | 2018-07-24T15:01:23 | 135,356,105 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,576 | r | regression.R |
#regression
#input 18foodprice_unit.csv
#
# encoding UTF-8
library(foreign)
rm(list=ls())
setwd("C:/Users/Wesle/Documents/R")
mydata<-read.csv("18foodprice_unit.csv")
all_names<-names(mydata)
drop<-grep("item_id",all_names,ignore.case=F,fixed=T)
all_numbers<-1:length(all_names)
final<-setdiff(all_numbers,drop)
mydata<-mydata[final[-1]]
# 分析5977 产品
# myvar_5977<-c("5977","5992","5969","5976","6034","5984","6040")
#
# temp<-myvar_5977
# fetch<-as.numeric(apply(as.data.frame(temp),1,function(x){grep(x,names(mydata),ignore.case=F,fixed=T)}))#从中去取元素下标
#
# regression<-log(mydata[fetch])#需要取对数
# length(names(regression))
#回归
# log(1)#0 有些天数只卖出去1个 取对数后就是0 啊
# summary(mydata)
#
#
# # regression<-log(mydata[-1])
# class(regression)
# is.infinite(regression)
#
# summary(regression)
# # 5976 应该删除
# drop<-c("X5976.daily_sold","X5976.av_P")
# bool<-names(regression)%in%drop
# regression<-regression[!bool]
#
# # inf NA omit
# regression
#
#
# na.omit(regression)
#
# result<-lm(X5977.daily_sold~X5977.av_P+X5992.av_P+X5969.av_P+X6034.av_P+X5984.av_P+X6040.daily_sold,data=na.omit(regression))
# summary(result)
# 试着批量处理
regression<-log(mydata[-1])
regression[sapply(regression,is.infinite)]<-NA
#回归
library(car)
result_5977<-lm(X5977.daily_sold~X5977.av_P+X5992.av_P+X11399.av_P+X5984.av_P+X5991.av_P+X6040.av_P,data=(regression))
summary(result_5977)
vif(result_5977)
result_6106<-lm(X6106.daily_sold~X6106.av_P+X5992.av_P+X5969.av_P+X5968.av_P+X5991.av_P,data=regression)
summary(result_6106)
vif(result_6106)
result_5992<-lm(X5992.daily_sold~X5992.av_P+X11399.av_P+X6107.av_P+X5968.av_P+X6186.av_P,data=regression)
summary(result_5992)
vif(result_5992)
result_11399<-lm(X11399.daily_sold~X11399.av_P+X5977.av_P+X5968.av_P+X6186.av_P+X5383.av_P+X11149.av_P,data=regression)
summary(result_11399)
vif(result_11399)
result_5969<-lm(X5969.daily_sold~X5969.av_P+X5969.av_P+X5306.av_P+X12324.av_P+X11537.av_P+X5991.av_P,data=regression)
summary(result_5969)
vif(result_5969)
result_5306<-lm(X5306.daily_sold~X5306.av_P+X5969.av_P+X5991.av_P,data=regression)
vif(result_5306)
result_6107<-lm(X5306.daily_sold~X5306.av_P+X5992.av_P+X5969.av_P+X5991.av_P,data=regression)
vif(result_6107)
result_5968<-lm(X5968.daily_sold~X5968.av_P+X5992.av_P+X5969.av_P+X12324.av_P,data=regression)
vif(result_5968)
result_12324<-lm(X12324.daily_sold~X12324.av_P+X5992.av_P+X5969.av_P+X6107.av_P+X5968.av_P,data=regression)
vif(result_12324)
summary(result_12324)
|
980961813b1e0c67d574fe8d9c1b88de7723d543 | e4c1422348ae1cd4aa316aad156fefe59670e776 | /pkgs/debias/man/MLEw2p_cpp.Rd | 45700b0836503fc12ae96aa2a071ba34e008faa2 | [] | no_license | thomas4912/pkgs | 161170c78068340a82ddde7535293f812cc77a73 | 8f060661cca439e13b16990bcd2268cc96aac4b3 | refs/heads/master | 2021-01-22T07:27:29.068120 | 2017-02-13T12:17:52 | 2017-02-13T12:17:52 | 81,818,568 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,505 | rd | MLEw2p_cpp.Rd | \name{MLEw2p_cpp}
\alias{MLEw2p_cpp}
\title{Weibull 2-parameter MLE calculation.}
\description{
\code{MLEw2p_cpp} is a wrapper function to a fast C++ implementation optimizing parameters of the 2-parameter
Weibull distribution for a set of data consisting of failures, or alternatively failures and suspensions.
}
\usage{
MLEw2p_cpp(x, s=NULL, MRRfit=NULL)
}
\arguments{
\item{x}{A vector of failure data.}
\item{s}{An optional vector of suspension data.}
\item{MRRfit}{An optional vector such as produced by MRRw2pxy having parameter order [1] Eta, [2] Beta.
If not provided, this function will calculate a suitable estimate of Beta to initiate the optimization.}
}
\value{
A vector containing results in the following order: Eta (scale), Beta (shape), Log-Likelihood.
}
\details{
This function calls a C++ function that performs the root identification of the derivative of the likelihood function
with respect to Beta, then given the optimal Beta calculate Eta from as the root of the derivative of the
likelihood function with respect to Eta. The optimization algorithm employed is a discrete Newton, or secant, method
as demonstrated in a FORTRAN program published by Tao Pang.
}
\references{
Dr. Robert B. Abernethy, (2008) "The New Weibull Handbook, Fifth Edition"
Tao Pang,(1997) "An Introduction to Computational Physics"
}
\examples{
failures<-c(90,96,30,49,82)
suspensions<-c(100,45,10)
fit_result<-MLEw2p_cpp(failures,suspensions)
}
\keyword{ likelihood } |
760f5205fb5259d4d0ed76b91eb9bcdc8312b11f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/mmpf/examples/makePermutedDesign.Rd.R | 9c5336a7731b2af39bd24b5159fea5f1bff7c358 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 237 | r | makePermutedDesign.Rd.R | library(mmpf)
### Name: makePermutedDesign
### Title: creates a 'data.frame' with some columns permuted
### Aliases: makePermutedDesign
### ** Examples
data = data.frame(x = 1:3, y = letters[1:3])
makePermutedDesign(data, "x", 3)
|
3673dcae8c1c531f947e0ed765334b8ac13e5b01 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/bibliometrix/examples/lotka.Rd.R | f6ead8fff3052dda2cd69e2df6b0c5054674ae40 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 209 | r | lotka.Rd.R | library(bibliometrix)
### Name: lotka
### Title: Lotka's law coefficient estimation
### Aliases: lotka
### ** Examples
data(scientometrics)
results <- biblioAnalysis(scientometrics)
L=lotka(results)
L
|
066e569fa829c1ace85502234b0fdea38873dbdc | a176626eb55b6525d5a41e2079537f2ef51d4dc7 | /Uni/Projects/code/P006.NAS/cn009_SA.r | 3c442595f32e19e4840d1cbc7b0baa6616850c28 | [] | no_license | zeltak/org | 82d696b30c7013e95262ad55f839998d0280b72b | d279a80198a1dbf7758c9dd56339e8a5b5555ff2 | refs/heads/master | 2021-01-21T04:27:34.752197 | 2016-04-16T04:27:57 | 2016-04-16T04:27:57 | 18,008,592 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 9,248 | r | cn009_SA.r | library(foreign)
library(stats)
library(mgcv)
library(splines)
library(MASS)
library(nlme)
# IMPORTS
#import mod1 DB (PM-AOD)
mb1 <- read.csv("c:/Users/ekloog/Documents/$Doc/3.PostDoc/3.1.Projetcs/3.1.6.NAS/3.1.6.4.Work/3.Analysis/MB_analysis/mb1_met.csv", header=T)
names(mb1)
# Outcomes: Fibrinogen, CRP, ICAM-1, VCAM-1
#########################################################################################################################
##############################################################################a###########################################
# ICAM
#create results table
ICAM_restable <- data.frame(lag=character(9),beta=numeric(9),se=numeric(9),pc=numeric(9),L_CI=numeric(9),H_CI=numeric(9),sig=numeric(9),ciw=numeric(9))
ICAM_restable$lag <- c("lag24h", "lag3day", "lagweek", "lag2week", "lag3week", "lagmonth", "lag2month", "lag3month", "lagyear")
#mlag001
mlag001_irq<- IQR(mb1$lag24h)
mlag001<-glmmPQL(logicam ~ lag24h +PREDICTED +age + cwtemplag24h + bmi+as.factor(smk2)+cwhumlag24h + diabete+statin + cos+ sin,
random=~1|id,family=gaussian, data=mb1,na=na.omit)
summary(mlag001)$tTable
ICAM_restable$beta[1] <- mlag001$coef$fixed[2] #extract Betas
ICAM_restable$se[1] <-(summary(mlag001)$tTable[2,2]) #extract SE
ICAM_restable$sig[1] <-(summary(mlag001)$tTable[2,5]) #extract sig
# Percent change for 1 IQR change if outcome is logged:
ICAM_restable$pc[1] <- (exp(ICAM_restable$beta[1]*mlag001_irq)-1)*100
# Low CI bound
ICAM_restable$L_CI[1] <- (exp((ICAM_restable$beta[1]-1.96*ICAM_restable$se[1])*mlag001_irq)-1)*100
# High CI bound
ICAM_restable$H_CI[1] <- (exp((ICAM_restable$beta[1]+1.96*ICAM_restable$se[1])*mlag001_irq)-1)*100
ICAM_restable$ciw[1] <-ICAM_restable$L_CI[1]+ICAM_restable$H_CI[1]
#mlag003
mlag003_irq<- IQR(mb1$lag3day)
mlag003<-glmmPQL(logicam ~ lag3day +PREDICTED +age +cwtemplag3day+ bmi+as.factor(smk2)+cwhumlag3day + diabete+statin + cos+ sin,random=~1|id,family=gaussian, data=mb1,na=na.omit)
summary(mlag003)$tTable
ICAM_restable$beta[2] <- mlag003$coef$fixed[2] #extract Betas
ICAM_restable$se[2] <-(summary(mlag003)$tTable[2,2]) #extract SE
ICAM_restable$sig[2] <-(summary(mlag003)$tTable[2,5]) #extract sig
# Percent change for 1 IQR change if outcome is logged:
ICAM_restable$pc[2] <- (exp(ICAM_restable$beta[1]*mlag003_irq)-1)*100
# Low CI bound
ICAM_restable$L_CI[2] <- (exp((ICAM_restable$beta[2]-1.96*ICAM_restable$se[2])*mlag003_irq)-1)*100
# High CI bound
ICAM_restable$H_CI[2] <- (exp((ICAM_restable$beta[2]+1.96*ICAM_restable$se[2])*mlag003_irq)-1)*100
ICAM_restable$ciw[2] <-ICAM_restable$L_CI[2]+ICAM_restable$H_CI[2]
#mlagweek
mlagweek_irq<- IQR(mb1$lagweek)
mlagweek<-glmmPQL(logicam ~ lagweek +PREDICTED +age +cwtemplagweek+ bmi+as.factor(smk2)+cwhumlagweek + diabete+statin + cos+ sin,
random=~1|id,family=gaussian, data=mb1,na=na.omit)
summary(mlagweek)$tTable
ICAM_restable$beta[3] <- mlagweek$coef$fixed[2] #extract Betas
ICAM_restable$se[3] <-(summary(mlagweek)$tTable[2,2]) #extract SE
ICAM_restable$sig[3] <-(summary(mlagweek)$tTable[2,5]) #extract sig
# Percent change for 1 IQR change if outcome is logged:
ICAM_restable$pc[3] <- (exp(ICAM_restable$beta[3]*mlagweek_irq)-1)*100
# Low CI bound
ICAM_restable$L_CI[3] <- (exp((ICAM_restable$beta[3]-1.96*ICAM_restable$se[3])*mlagweek_irq)-1)*100
# High CI bound
ICAM_restable$H_CI[3] <- (exp((ICAM_restable$beta[3]+1.96*ICAM_restable$se[3])*mlagweek_irq)-1)*100
ICAM_restable$ciw[3] <-ICAM_restable$L_CI[3]+ICAM_restable$H_CI[3]
#mlag2week
mlag2week_irq<- IQR(mb1$lag2week)
mlag2week<-glmmPQL(logicam ~ lag2week +PREDICTED +age +cwtemplag2week+ bmi+as.factor(smk2)+cwhumlag2week + diabete+statin + cos+ sin,
random=~1|id,family=gaussian, data=mb1,na=na.omit)
summary(mlag2week)$tTable
ICAM_restable$beta[4] <- mlag2week$coef$fixed[2] #extract Betas
ICAM_restable$se[4] <-(summary(mlag2week)$tTable[2,2]) #extract SE
ICAM_restable$sig[4] <-(summary(mlag2week)$tTable[2,5]) #extract sig
# Percent change for 1 IQR change if outcome is logged:
ICAM_restable$pc[4] <- (exp(ICAM_restable$beta[4]*mlag2week_irq)-1)*100
# Low CI bound
ICAM_restable$L_CI[4] <- (exp((ICAM_restable$beta[4]-1.96*ICAM_restable$se[4])*mlag2week_irq)-1)*100
# High CI bound
ICAM_restable$H_CI[4] <- (exp((ICAM_restable$beta[4]+1.96*ICAM_restable$se[4])*mlag2week_irq)-1)*100
ICAM_restable$ciw[4] <-ICAM_restable$L_CI[4]+ICAM_restable$H_CI[4]
#mlag3week
mlag3week_irq<- IQR(mb1$lag3week)
mlag3week<-glmmPQL(logicam ~ lag3week +PREDICTED +age +cwtemplag3week+ bmi+as.factor(smk2)+cwhumlag3week + diabete+statin + cos+ sin,
random=~1|id,family=gaussian, data=mb1,na=na.omit)
summary(mlag3week)$tTable
ICAM_restable$beta[5] <- mlag3week$coef$fixed[2] #extract Betas
ICAM_restable$se[5] <-(summary(mlag3week)$tTable[2,2]) #extract SE
ICAM_restable$sig[5] <-(summary(mlag3week)$tTable[2,5]) #extract sig
# Percent change for 1 IQR change if outcome is logged:
ICAM_restable$pc[5] <- (exp(ICAM_restable$beta[1]*mlag3week_irq)-1)*100
# Low CI bound
ICAM_restable$L_CI[5] <- (exp((ICAM_restable$beta[5]-1.96*ICAM_restable$se[5])*mlag3week_irq)-1)*100
# High CI bound
ICAM_restable$H_CI[5] <- (exp((ICAM_restable$beta[5]+1.96*ICAM_restable$se[5])*mlag3week_irq)-1)*100
ICAM_restable$ciw[5] <-ICAM_restable$L_CI[5]+ICAM_restable$H_CI[5]
#mlagmonth
mlagmonth_irq<- IQR(mb1$lagmonth)
mlagmonth<-glmmPQL(logicam ~ lagmonth +PREDICTED +age +cwtemplagmonth+ bmi+as.factor(smk2)+cwhumlagmonth + diabete+statin + cos+ sin,
random=~1|id,family=gaussian, data=mb1,na=na.omit)
summary(mlagmonth)$tTable
ICAM_restable$beta[6] <- mlagmonth$coef$fixed[2] #extract Betas
ICAM_restable$se[6] <-(summary(mlagmonth)$tTable[2,2]) #extract SE
ICAM_restable$sig[6] <-(summary(mlagmonth)$tTable[2,5]) #extract sig
# Percent change for 1 IQR change if outcome is logged:
ICAM_restable$pc[6] <- (exp(ICAM_restable$beta[1]*mlagmonth_irq)-1)*100
# Low CI bound
ICAM_restable$L_CI[6] <- (exp((ICAM_restable$beta[6]-1.96*ICAM_restable$se[6])*mlagmonth_irq)-1)*100
# High CI bound
ICAM_restable$H_CI[6] <- (exp((ICAM_restable$beta[6]+1.96*ICAM_restable$se[6])*mlagmonth_irq)-1)*100
ICAM_restable$ciw[6] <-ICAM_restable$L_CI[6]+ICAM_restable$H_CI[6]
#mlag2month
mlagmonth_irq<- IQR(mb1$lag2month)
mlagmonth<-glmmPQL(logicam ~ lag2month +PREDICTED +age +cwtemplag2month+ bmi+as.factor(smk2)+cwhumlag2month + diabete+statin + cos+ sin,
random=~1|id,family=gaussian, data=mb1,na=na.omit)
summary(mlagmonth)$tTable
ICAM_restable$beta[7] <- mlagmonth$coef$fixed[2] #extract Betas
ICAM_restable$se[7] <-(summary(mlagmonth)$tTable[2,2]) #extract SE
ICAM_restable$sig[7] <-(summary(mlagmonth)$tTable[2,5]) #extract sig
# Percent change for 1 IQR change if outcome is logged:
ICAM_restable$pc[7] <- (exp(ICAM_restable$beta[1]*mlagmonth_irq)-1)*100
# Low CI bound
ICAM_restable$L_CI[7] <- (exp((ICAM_restable$beta[7]-1.96*ICAM_restable$se[7])*mlagmonth_irq)-1)*100
# High CI bound
ICAM_restable$H_CI[7] <- (exp((ICAM_restable$beta[7]+1.96*ICAM_restable$se[7])*mlagmonth_irq)-1)*100
ICAM_restable$ciw[7] <-ICAM_restable$L_CI[7]+ICAM_restable$H_CI[7]
#mlagmonth
mlagmonth_irq<- IQR(mb1$lag3month)
mlagmonth<-glmmPQL(logicam ~ lag3month +PREDICTED +age +cwtemplag3month+ bmi+as.factor(smk2)+cwhumlag3month + diabete+statin + cos+ sin,
random=~1|id,family=gaussian, data=mb1,na=na.omit)
summary(mlagmonth)$tTable
ICAM_restable$beta[8] <- mlagmonth$coef$fixed[2] #extract Betas
ICAM_restable$se[8] <-(summary(mlagmonth)$tTable[2,2]) #extract SE
ICAM_restable$sig[8] <-(summary(mlagmonth)$tTable[2,5]) #extract sig
# Percent change for 1 IQR change if outcome is logged:
ICAM_restable$pc[8] <- (exp(ICAM_restable$beta[1]*mlagmonth_irq)-1)*100
# Low CI bound
ICAM_restable$L_CI[8] <- (exp((ICAM_restable$beta[8]-1.96*ICAM_restable$se[8])*mlagmonth_irq)-1)*100
# High CI bound
ICAM_restable$H_CI[8] <- (exp((ICAM_restable$beta[8]+1.96*ICAM_restable$se[8])*mlagmonth_irq)-1)*100
ICAM_restable$ciw[8] <-ICAM_restable$L_CI[8]+ICAM_restable$H_CI[8]
#mlagyear
mlagyear_irq<- IQR(mb1$lagyear)
mlagyear<-glmmPQL(logicam ~ lagyear +PREDICTED +age +cwtemplagyear+ bmi+as.factor(smk2)+cwhumlagyear + diabete+statin + cos+ sin,
random=~1|id,family=gaussian, data=mb1,na=na.omit)
summary(mlagyear)$tTable
ICAM_restable$beta[9] <- mlagyear$coef$fixed[2] #extract Betas
ICAM_restable$se[9] <-(summary(mlagyear)$tTable[2,2]) #extract SE
ICAM_restable$sig[9] <-(summary(mlagyear)$tTable[2,5]) #extract sig
# Percent change for 1 IQR change if outcome is logged:
ICAM_restable$pc[9] <- (exp(ICAM_restable$beta[1]*mlagyear_irq)-1)*100
# Low CI bound
ICAM_restable$L_CI[9] <- (exp((ICAM_restable$beta[9]-1.96*ICAM_restable$se[9])*mlagyear_irq)-1)*100
# High CI bound
ICAM_restable$H_CI[9] <- (exp((ICAM_restable$beta[9]+1.96*ICAM_restable$se[9])*mlagyear_irq)-1)*100
ICAM_restable$ciw[9] <-ICAM_restable$L_CI[9]+ICAM_restable$H_CI[9]
##################################################################################################################################################################################################################################################
|
0d4763c8ced9423fec85708cd18194801f4fecbf | bc536251f89d76d70f702647fc63a0b8df50032c | /Estructuras-Programacion-R/f12.R | d994a32b2bc0cfb17f2a89a9b8db80f3ef6f1b78 | [] | no_license | Louiso/Curso-R | 50ff8439ff42e6cb987bee9ead88a795e04e51ed | 0c0880196ca086b51eb718983fc45cfdab1c3ad1 | refs/heads/master | 2021-01-24T00:03:55.885946 | 2016-06-11T03:13:49 | 2016-06-11T03:13:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 161 | r | f12.R | # Distribucion Geometrica, en ejemplo
n <- 1000
p <- 0.25
u <- runif(n)
k <- ceiling(log(1-u) / log(1-p)) - 1
# Es mas eficiente
k <- floor(log(u) / log(1-p))
|
fc497659da837513e3e99a503735fe744357b720 | 8b1e43bc1809dba3d7def4500cd096550c04228e | /SMU_phylogeny.R | 306a11fb6fa56260d7c2f722879c8fe6ba226b3a | [] | no_license | garlandxie/phylo-traits-BEF-green-roofs | 217f477e1edd0320bcc9ca1938d2be34cd1d2a75 | 0c2b6536ae02e7eef0505a21db191c11878882cd | refs/heads/master | 2021-10-08T04:08:15.868087 | 2018-12-07T15:39:04 | 2018-12-07T15:39:04 | 103,181,691 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,874 | r | SMU_phylogeny.R | ############################################################################
################# METADATA ##############################################
############################################################################
# mega.phy: time-calibrated molecular phylogeny of 13 000+ plant species
# (Zane et al. 2014. Nature;
# updated by Qian and Jin. 2015. Journal of PLant Ecology)
# comm1: pseudo community matrix that contains spp of interest
# sample: list of binomial latin names of all spp of interest
# phylo: pruned subtree that contains spp of interest
############################################################################
#### Load Libraries ####
library(ape)
library(picante)
#### Load megaphylogeny.new ####
mega.phy <- read.tree(file.choose())
### Create pruned phylogeny from species of interest ###
# Species of interest
# NOTE: Rhodiola_rhodantha acts as a congeneric relative (proxy) for Rhodiola_rosea
sample <- c("Empetrum_nigrum",
"Gaultheria_procumbens",
"Vaccinium_vitis-idaea",
"Danthonia_spicata",
"Deschampsia_flexuosa",
"Poa_compressa",
"Campanula_rotundifolia",
"Plantago_maritima",
"Sedum_acre",
"Solidago_bicolor",
"Sagina_procumbens",
"Rhodiola_rhodantha",
"Sedum_spurium")
# Create a pseudo community matrix
comm1 <- rbind(sample)
colnames(comm1) <- comm1
# Prune the megaphylogeny
phylo <- prune.sample(comm1, mega.phy)
# Rename Rhodiola_rhodantha as "Sedum_rosea" in the phylogeny
# Double-check indexing; 5 could be some other species
phylo$tip.label[5] <- "Sedum_rosea"
############################################################################
# FINAL RESULT: Plot the pruned phylogeny
plot.phylo(phylo, label.offset = 2)
axisPhylo()
|
a3538d3fd699d88c964d345f8d0ca53aa96b3af7 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/NISTunits/examples/NISTdegCtOdegCentigrade.Rd.R | a6c39d069a50c4e8ed42839b3748c43f4626e540 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 225 | r | NISTdegCtOdegCentigrade.Rd.R | library(NISTunits)
### Name: NISTdegCtOdegCentigrade
### Title: Convert degree Celsius to degree centigrade 15
### Aliases: NISTdegCtOdegCentigrade
### Keywords: programming
### ** Examples
NISTdegCtOdegCentigrade(10)
|
7d8f402fe4f1118d81fb72a83dc8a57a7cc6498c | e7febfbbb225fab212d0032457d1e27f6fffccfc | /cachematrix.R | 1c2cd0496213c5c1117f73a1dc8b4c52b952d8f6 | [] | no_license | Jules1980/ProgrammingAssignment2 | 13c57ecc6a654b56391236ab313f4bec0bddfe51 | b05ed326c132c9b3d9400e6b43fc064db72f1be3 | refs/heads/master | 2020-03-27T19:05:01.857120 | 2018-09-04T02:20:37 | 2018-09-04T02:20:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,030 | r | cachematrix.R | ## makeCacheMatric - Julia Hamilton - R Programming - Week 3 Assignment
#1. set the value of the maxtrix
#2. get the value of the matrix
#3. set the inverse of the matrix
#4. get the get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
invs <- NULL
set <- function(y) {
x <<- y
invs <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() invs
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## computes the inverse of the matric returned by makeCacheMatrix
## if the inverse has already been calculated then the cachesolve should retrieve the inverse
## from the cache
cacheSolve <- function(x, ...) {
invs <- x$getinverse()
if(!is.null(invs)){
message("getting cache data.")
return(invs)
}
data <- x$get()
invs <- solve(data)
x$setinverse(invs)
invs
}
|
3bf357761a595d2ce24d15ced2dbebfe99fbc7e7 | 221072e790a97e05eea0debadbe87955b81ae2e1 | /R/Squiggle-methods.R | 794c9cab41d280881ba949a49bd1fffb70aa43e4 | [
"Apache-2.0"
] | permissive | Shians/PorexploreR | 2ca3d93956ba4592564c8019f1a76252a0696283 | 5086f3e704c6c6036a80d103752d9bce984a0e15 | refs/heads/master | 2020-06-06T05:51:25.714267 | 2019-06-20T07:44:16 | 2019-06-20T07:44:16 | 192,655,626 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,979 | r | Squiggle-methods.R | #' @include Squiggle.R
#' @import ggplot2
#' @importFrom tibble tibble
#' @importFrom dplyr filter
NULL
#' @rdname Squiggle-class
#' @export
setGeneric("raw_signal", function(object) {
standardGeneric("raw_signal")
})
#' Extract raw signal
#'
#' @param object the Squiggle object
#'
#' @return numeric vector of raw signal values
#'
#' @export
setMethod("raw_signal", signature(object = "Squiggle"), function(object) {
object@raw_signal
})
#' @rdname Squiggle-class
#'
#' @export
setGeneric("signal", function(object) {
standardGeneric("signal")
})
#' Extract signal
#'
#' @param object the Squiggle object
#'
#' @return numeric vector of signal values transformed to pA scale
#' @export
setMethod("signal", signature(object = "Squiggle"), function(object) {
if (length(object@signal) != 0) {
as.numeric(object@signal)
} else {
as.numeric(object@scaling * (object@raw_signal + object@offset))
}
})
#' @rdname Squiggle-class
#'
#' @export
setGeneric("meta", function(object) {
standardGeneric("meta")
})
#' Extract signal metadata
#'
#' @param object the Squiggle object
#'
#' @return list containing various metadata for transforming the signal to pA scale
#' @export
setMethod("meta", signature(object = "Squiggle"), function(object) {
list(
range = object@range,
digitisation = object@digitisation,
offset = object@offset,
sampling_rate = object@sampling_rate,
scaling = object@scaling
)
})
#' Plot nanopore signal
#'
#' @param object the Squiggle object to plot
#' @param time_span the time span in seconds to plot (default: c(0, 0.5))
#'
#' @export
setGeneric("plot_squiggle", function(object, time_span = c(0, 0.5)) {
standardGeneric("plot_squiggle")
})
.plot_squiggle <- function(object, time_span) {
signal <- signal(object)
meta <- meta(object)
plot_data <- tibble::tibble(
time = seq_along(signal) / meta$sampling_rate,
signal = signal
) %>%
dplyr::filter(.data$time > time_span[1] & .data$time < time_span[2])
ylim_offset <- 0.05 * (max(plot_data$signal) - min(plot_data$signal))
ylim_expanded <- c(
min(plot_data$signal) - ylim_offset,
max(plot_data$signal) + ylim_offset
)
plot_data %>%
ggplot2::ggplot(ggplot2::aes_string(x = "time", y = "signal")) +
ggplot2::geom_step() +
ggplot2::coord_cartesian(
ylim = ylim_expanded,
expand = FALSE
) +
ggplot2::theme_classic() +
ggplot2::theme(panel.background = ggplot2::element_rect(colour = "black", size = 1.5)) +
ggplot2::xlab("time (s)") +
ggplot2::ylab("current (pA)")
}
#' @describeIn plot_squiggle
#'
#' @export
setMethod("plot_squiggle", c(object = "Squiggle", time_span = "numeric"),
.plot_squiggle
)
#' @describeIn plot_squiggle
#'
#' @export
setMethod("plot_squiggle", c(object = "Squiggle", time_span = "missing"),
.plot_squiggle
)
|
f6d5d45a12d5142588f4f2ba1dacfcc0401824a9 | 0c93e201530984c1dfd41db112457c017fd55a83 | /gyokaikyor/R/handle_str.R | 684754be3070aab7bdf161f5f8b4c23a54274f83 | [] | no_license | Rindrics/reformr | a763e4a0885f2913d24e687cc2e5ab8ceb7b14db | 5f1994f9febe8d9adeb3f0003e93af6bb444b9c3 | refs/heads/master | 2021-10-20T00:11:22.350559 | 2019-02-25T01:05:50 | 2019-02-25T01:05:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 145 | r | handle_str.R | get_col2load <- function(target, regex, offset) {
match <- stringr::str_detect(target, regex)
out <- which(match == TRUE) + offset
out
}
|
b5ad2d4c45051afbb3c0f4fb02f20f26c245d263 | eb6c9070f548b6e21b7439b247997665c951b6f7 | /man/get_sub.Rd | d99164746395f3a5ffbba6e2376216c0519c26f2 | [
"MIT"
] | permissive | billspat/azrunr | 48b7330d003a1247c198de4bf76d2c59d564b815 | 256e6cc253e76d6b5b6faf6e2c7c9dc1411f2c77 | refs/heads/master | 2023-03-23T00:18:57.121185 | 2021-03-17T18:52:12 | 2021-03-17T18:52:12 | 276,467,750 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 750 | rd | get_sub.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/azure_setup.R
\name{get_sub}
\alias{get_sub}
\title{get the azure subscription object for the given sub id value
can also be used to test if a the azuresub is valid (will return NULL)
requires Azure login and this function will initiate that}
\usage{
get_sub(azuresub = getOption("azuresub"))
}
\arguments{
\item{azuresub}{optional string of subscriptoin id e.g xxxxxxxx-xxx-xxx-xxxx-xxxxxxxxxxxx}
}
\value{
AzureRMR subscription object, or NULL if invalid sub id
}
\description{
get the azure subscription object for the given sub id value
can also be used to test if a the azuresub is valid (will return NULL)
requires Azure login and this function will initiate that
}
|
45de4b33919bc7aa8a0894f782df0215a807b8d8 | 97a10d4612014d6f1b3cae279d70983f876b7586 | /inst/expt/topography3d.R | 9ee6a7b8bfde7f9f815b88f08369d8946ccbd25f | [
"MIT"
] | permissive | mdsumner/pproj | c3dfe25404bb1c0063dd0c8960e5d48ff8559d93 | 9aa2c45c8efd71c5bc19ad1d6b58a93834cbbebb | refs/heads/main | 2023-02-06T08:47:59.650271 | 2023-02-05T22:55:54 | 2023-02-05T22:55:54 | 24,184,076 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,017 | r | topography3d.R | library(marmap)
library(geosphere)
library(raster)
library(geometry)
## one part of this is upside down
llh2xyz <- function(lonlatheight, rad = 500) {
cosLat = cos(lonlatheight[,2] * pi / 180.0)
sinLat = sin(lonlatheight[,2] * pi / 180.0)
cosLon = cos(lonlatheight[,1] * pi / 180.0)
sinLon = sin(lonlatheight[,1] * pi / 180.0)
x = rad * cosLat * cosLon
y = rad * cosLat * sinLon
z = lonlatheight[,3] + rad * -sinLat
cbind(x, y, z)
}
b <- getNOAA.bathy(-180, 180, -90, 0, resolution = 60, keep=TRUE, antimeridian=FALSE)
r <- raster(list(x = seq(-180 + 360/nrow(b)/2, 180 - 360/nrow(b)/2, length = nrow(b)),
y = seq(-90 + 90/ncol(b)/2 , 90 - 90/ncol(b)/2, length = ncol(b)), z = b))
xyz <- randomCoordinates(76000)
xyz <- xyz[xyz[,2] <= 0, ]
xyz <- cbind(xyz, extract(r, xyz[,1:2], method = "bilinear"))
#xyz[,1:2] <- project(xyz[,1:2], "+proj=laea +lat_0=-90")
f <- 50
xyz <- llh2xyz(xyz, rad = 637800)
ind <- t(delaunayn(xyz[,1:2]))
aspect3d("iso")
rgl.triangles(xyz[ind, 1], xyz[ind, 2], xyz[ind, 3] )
|
00c5e6ddb00da671ed0bd5e3bbee9a03df09535e | 304fe6596b84657320497f82c73bd319497faf9a | /man/ManualAlignImages.Rd | 6c236af02d739b1f4f3b4299665c27b808fe930f | [] | no_license | diegosainzg/STUtility | 30e76fd1c323b3d0806e43c85ca0b05922ed2ff4 | 1817a5dab56589459e0442216cbd9ee219842be3 | refs/heads/master | 2023-07-18T12:37:37.321338 | 2021-09-08T14:06:37 | 2021-09-08T14:06:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,010 | rd | ManualAlignImages.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/image_processing.R
\name{ManualAlignImages}
\alias{ManualAlignImages}
\alias{ManualAlignImages.Staffli}
\alias{ManualAlignImages.Seurat}
\title{Manual alignment of images}
\usage{
ManualAlignImages(
object,
type = NULL,
reference.index = 1,
edges = TRUE,
verbose = FALSE,
limit = 0.3,
maxnum = 1000,
fix.axes = FALSE,
custom.edge.detector = NULL
)
\method{ManualAlignImages}{Staffli}(
object,
type = "masked.masks",
reference.index = 1,
edges = TRUE,
verbose = FALSE,
limit = 0.3,
maxnum = 1000,
fix.axes = FALSE,
custom.edge.detector = NULL
)
\method{ManualAlignImages}{Seurat}(
object,
type = "masked.masks",
reference.index = 1,
edges = TRUE,
verbose = FALSE,
limit = 0.3,
maxnum = 1000,
fix.axes = FALSE,
custom.edge.detector = NULL
)
}
\arguments{
\item{object}{Seurat object}
\item{type}{Image type to use as input for alignment [default: 'masked.masks']}
\item{reference.index}{Specifies reference sample image for alignment [default: 1]}
\item{edges}{Uses the tissue edges for alignment}
\item{verbose}{Print messages}
\item{limit}{Pixel intensity limit for thresholding}
\item{maxnum}{Maximum number of points to display in the app}
\item{custom.edge.detector}{Custom function used to detect edges in tissue image. If a function is provided, the
edges option will be overridden.}
}
\value{
A Staffli object
A Seurat object
}
\description{
Creates an interactive shiny application to align images manually
}
\examples{
# Create a new Staffli object, mask, align and plot images (will start an interactive shiny session)
st.obj <- CreateStaffliObject(imgs, meta.data)
st.obj <- LoadImages(st.obj, verbose = TRUE) \%>\% MaskImages() \%>\% ManualAlignImages()
plot(st.obj)
# Load, mask, align and plot images (will start an interactive shiny session)
se <- LoadImages(se, verbose = TRUE) \%>\% MaskImages() \%>\% ManualAlignImages()
ImagePlot(se)
}
|
b6b2eeff8ab6c8f9ed5099514b6a8ff6b9eb5da3 | eb159b0bc342c6b7e698ecce39001cbd0361b240 | /man/findoneBP.Rd | 44ed776951cbcf591372d0b9df425b1d6b8abf87 | [] | no_license | tudou2015/pwlPackage | a1a51a5a6d1fbf2c20a81dc1932635781a6b0261 | 98da97128e0626750e490f9b20859021fb53e230 | refs/heads/master | 2021-01-12T10:21:15.033645 | 2016-12-10T21:09:32 | 2016-12-10T21:09:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 513 | rd | findoneBP.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/findoneBP.R
\name{findoneBP}
\alias{findoneBP}
\title{A function that is used to find one breakpoint only, without creating the SSR matrix.}
\usage{
findoneBP(data, l)
}
\arguments{
\item{data}{The dataset that wants to be approximated}
\item{l}{The minimum size of a segment}
}
\value{
The breakpoint that gives the minimum ssr
}
\description{
A function that is used to find one breakpoint only, without creating the SSR matrix.
}
|
d38a1a952c4ecf9615d1b925775b027e6af9a730 | e0c2333f3d040ca611556ed4fdf922ee40048a40 | /R/mongrel_sim.R | 524e691341b84f03f4423896d9869121b91a0f70 | [] | no_license | jimhester/mongrel | 3d174970ec1cadb3fd73bce9c2ac5dd0245c4f4a | d21bbcf10e6b9c94cf4a8e3d1fff67dc80c6340f | refs/heads/master | 2020-03-22T18:54:56.060946 | 2018-07-10T21:54:46 | 2018-07-10T21:54:46 | 140,491,115 | 0 | 0 | null | 2018-07-10T21:55:28 | 2018-07-10T21:55:28 | null | UTF-8 | R | false | false | 1,603 | r | mongrel_sim.R | #' Simulate simple mongrel dataset and priors (for testing)
#'
#' @param D number of multinomial categories
#' @param N number of samples
#' @param Q number of covariates (first one is an intercept, must be > 1)
#' @param use_names should samples, covariates, and categories be named
#' @param true_priors should Xi and upsilon be chosen to have mean at true
#' simulated value
#' @return list
#' @export
#' @importFrom driver alrInv
#' @importFrom stats rnorm rmultinom
#' @examples
#' sim <- mongrel_sim()
mongrel_sim <- function(D=10, N=30, Q=2, use_names=TRUE, true_priors=FALSE){
# Simulate Data
Sigma <- diag(sample(1:8, D-1, replace=TRUE))
Sigma[2, 3] <- Sigma[3,2] <- -1
Gamma <- diag(sqrt(rnorm(Q)^2))
Theta <- matrix(0, D-1, Q)
Phi <- Theta + t(chol(Sigma))%*%matrix(rnorm(Q*(D-1)), nrow=D-1)%*%chol(Gamma)
X <- matrix(rnorm(N*(Q-1)), Q-1, N)
X <- rbind(1, X)
Eta <- Phi%*%X + t(chol(Sigma))%*%matrix(rnorm(N*(D-1)), nrow=D-1)
Pi <- t(driver::alrInv(t(Eta)))
Y <- matrix(0, D, N)
for (i in 1:N) Y[,i] <- rmultinom(1, sample(5000:10000), prob = Pi[,i])
if (use_names){
colnames(X) <- colnames(Y) <- paste0("s", 1:N)
rownames(Y) <- paste0("c", 1:D)
rownames(X) <- paste0("x", 1:Q)
}
# Priors
if (true_priors){
upsilon <- D+10
Xi <- Sigma*(upsilon-D-2)
} else {
upsilon <- D
Xi <- diag(D-1)
}
# Precompute
K <- solve(Xi)
A <- solve(diag(N)+ t(X)%*%Gamma%*%X)
return(list(Sigma=Sigma, Gamma=Gamma, D=D, N=N, Q=Q, Theta=Theta, Phi=Phi,
X=X, Y=Y, Eta=Eta, upsilon=upsilon, Xi=Xi, K=K, A=A))
} |
caf5df427a250d4ecd9f44bca89b6be751fab906 | 754868022840f77a82c56e9c1f2632dd6412c004 | /man/loadModels.Rd | 581972d74facc16fdcf42d07219503637371b035 | [] | no_license | tetratech/baytrends | d0ef7f96aff8be56193f76d6fdb8f6148d3bf6b2 | f5227d506b88db28da73f015ce81ef98fa0fe622 | refs/heads/main | 2023-06-09T18:41:34.007828 | 2023-05-25T12:09:29 | 2023-05-25T12:09:29 | 113,074,527 | 12 | 4 | null | 2023-04-05T00:53:05 | 2017-12-04T17:40:27 | R | UTF-8 | R | false | true | 993 | rd | loadModels.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loadModels.R
\name{loadModels}
\alias{loadModels}
\title{Load Built-in GAM formulas}
\usage{
loadModels(gamSelect = "gam4")
}
\arguments{
\item{gamSelect}{character vector of models (Current options include gam0,
gam1, gam2, gam3, gam4, gam5)}
}
\value{
Returns a list with GAM formulas
}
\description{
Returns built-in GAM formulas
}
\details{
By default, the function analysisOrganizeData will store the formulas for
gam0-gam4 in the variable analySpec$gamModels as a list. The user can
customize this list with the function loadModels (see example).
}
\examples{
# run analysisOrganizeData function to create the list analySpec
dfr <- analysisOrganizeData (dataCensored, report=NA)
df <- dfr[["df"]]
analySpec <- dfr[["analySpec"]]
# current models in analySpec
analySpec$gamModels
# set models in analySpec to gam0, gam1, and gam2 only
analySpec$gamModels <- loadModels(c('gam0','gam1','gam2'))
}
|
9d0823dca3245af7de31b87111608b125917e295 | 2c38fc71287efd16e70eb69cf44127a5f5604a81 | /man/tar_assert.Rd | 9203cead7d0a6f74092fd60c444cca685e810fbc | [
"MIT",
"Apache-2.0"
] | permissive | ropensci/targets | 4ceef4b2a3cf7305972c171227852338dd4f7a09 | a906886874bc891cfb71700397eb9c29a2e1859c | refs/heads/main | 2023-09-04T02:27:37.366455 | 2023-09-01T15:18:21 | 2023-09-01T15:18:21 | 200,093,430 | 612 | 57 | NOASSERTION | 2023-08-28T16:24:07 | 2019-08-01T17:33:25 | R | UTF-8 | R | false | true | 3,998 | rd | tar_assert.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_assert.R
\name{tar_assert}
\alias{tar_assert}
\alias{tar_assert_chr}
\alias{tar_assert_dbl}
\alias{tar_assert_df}
\alias{tar_assert_equal_lengths}
\alias{tar_assert_envir}
\alias{tar_assert_expr}
\alias{tar_assert_flag}
\alias{tar_assert_file}
\alias{tar_assert_finite}
\alias{tar_assert_function}
\alias{tar_assert_function_arguments}
\alias{tar_assert_ge}
\alias{tar_assert_identical}
\alias{tar_assert_in}
\alias{tar_assert_not_dirs}
\alias{tar_assert_not_dir}
\alias{tar_assert_not_in}
\alias{tar_assert_inherits}
\alias{tar_assert_int}
\alias{tar_assert_internet}
\alias{tar_assert_lang}
\alias{tar_assert_le}
\alias{tar_assert_list}
\alias{tar_assert_lgl}
\alias{tar_assert_name}
\alias{tar_assert_named}
\alias{tar_assert_names}
\alias{tar_assert_nonempty}
\alias{tar_assert_not_expr}
\alias{tar_assert_nzchar}
\alias{tar_assert_package}
\alias{tar_assert_path}
\alias{tar_assert_match}
\alias{tar_assert_nonmissing}
\alias{tar_assert_positive}
\alias{tar_assert_scalar}
\alias{tar_assert_store}
\alias{tar_assert_target}
\alias{tar_assert_target_list}
\alias{tar_assert_true}
\alias{tar_assert_unique}
\alias{tar_assert_unique_targets}
\title{Assertions}
\usage{
tar_assert_chr(x, msg = NULL)
tar_assert_dbl(x, msg = NULL)
tar_assert_df(x, msg = NULL)
tar_assert_equal_lengths(x, msg = NULL)
tar_assert_envir(x, msg = NULL)
tar_assert_expr(x, msg = NULL)
tar_assert_flag(x, choices, msg = NULL)
tar_assert_file(x)
tar_assert_finite(x, msg = NULL)
tar_assert_function(x, msg = NULL)
tar_assert_function_arguments(x, args, msg = NULL)
tar_assert_ge(x, threshold, msg = NULL)
tar_assert_identical(x, y, msg = NULL)
tar_assert_in(x, choices, msg = NULL)
tar_assert_not_dirs(x, msg = NULL)
tar_assert_not_dir(x, msg = NULL)
tar_assert_not_in(x, choices, msg = NULL)
tar_assert_inherits(x, class, msg = NULL)
tar_assert_int(x, msg = NULL)
tar_assert_internet(msg = NULL)
tar_assert_lang(x, msg = NULL)
tar_assert_le(x, threshold, msg = NULL)
tar_assert_list(x, msg = NULL)
tar_assert_lgl(x, msg = NULL)
tar_assert_name(x)
tar_assert_named(x, msg = NULL)
tar_assert_names(x, msg = NULL)
tar_assert_nonempty(x, msg = NULL)
tar_assert_not_expr(x, msg = NULL)
tar_assert_nzchar(x, msg = NULL)
tar_assert_package(package)
tar_assert_path(path, msg = NULL)
tar_assert_match(x, pattern, msg = NULL)
tar_assert_nonmissing(x, msg = NULL)
tar_assert_positive(x, msg = NULL)
tar_assert_scalar(x, msg = NULL)
tar_assert_store(store)
tar_assert_target(x, msg = NULL)
tar_assert_target_list(x)
tar_assert_true(x, msg = NULL)
tar_assert_unique(x, msg = NULL)
tar_assert_unique_targets(x)
}
\arguments{
\item{x}{R object, input to be validated. The kind of object depends on the
specific assertion function called.}
\item{msg}{Character of length 1, a message to be printed to the console
if \code{x} is invalid.}
\item{choices}{Character vector of choices of \code{x} for certain assertions.}
\item{args}{Character vector of expected function argument names.
Order matters.}
\item{threshold}{Numeric of length 1, lower/upper bound for
assertions like \code{tar_assert_le()}/\code{tar_assert_ge()}.}
\item{y}{R object, value to compare against \code{x}.}
\item{class}{Character vector of expected class names.}
\item{package}{Character of length 1, name of an R package.}
\item{path}{Character, file path.}
\item{pattern}{Character of length 1, a \code{grep} pattern for certain
assertions.}
\item{store}{Character of length 1, path to the data store of the pipeline.}
}
\description{
These functions assert the correctness of user inputs
and generate custom error conditions as needed. Useful
for writing packages built on top of \code{targets}.
}
\examples{
tar_assert_chr("123")
try(tar_assert_chr(123))
}
\seealso{
Other utilities to extend targets:
\code{\link{tar_condition}},
\code{\link{tar_language}},
\code{\link{tar_test}()}
}
\concept{utilities to extend targets}
|
e31bbe83eb650cf27835335a5fa5e70733c1b700 | dc78e2e767d9a9b817f8915420f62cffba8bd390 | /app.R | 20324c419809c8315cc9062fdfe2f7073462d1ad | [] | no_license | enjieli/BAILA_ShinyR | 03da0647adacaa7964ef710952d8a295f0791f4b | 46ec7367d2413b8378e05a5e888921863195a630 | refs/heads/master | 2021-08-08T08:35:05.851209 | 2020-08-06T01:25:56 | 2020-08-06T01:25:56 | 208,141,638 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,865 | r | app.R | rm(list=ls())
library(leaflet)
library(geojsonsf)
library(sf)
library(tidyverse)
library(shiny)
library(ggmap)
library(classInt)
library(RColorBrewer)
rsconnect::setAccountInfo(name='janeli',
token='YOUR API TOKEN',
secret='YOUR API secret')
typology <- geojson_sf("bg.geojson")
typology$urban_types <- as.factor(typology$urban_types)
levels(typology$urban_types)
levels(typology$urban_types) <- c("Low development with natural vegetation",
"Dams, reservoirs, and wetlands",
"Foothill areas",
"Urban parks and open space",
"Valley arterial areas",
"Valley less developed areas",
"Basin less developed areas",
"Most developed areas",
"Furthest from regional parks with natural vegetation")
####ggmap to display the sites#####
library(ggmap)
register_google(key = api_key)
#######################################
#######################################
#######################################
#######################################
server <- function(input, output) {
# Get latitude and longitude
geocode_origin <- eventReactive(input$go,{
geocode(input$Location)
})
return_urban_type<-
function(x) {
x <- geocode_origin()
pt_sf <- st_as_sf(x, coords = c("lon", "lat"), crs = 4326, agr = "constant")
result_types<- st_intersection(pt_sf, typology)
return(as.character(result_types$urban_types))
}
get_urban_type<-
function(x) {
x <- geocode_origin()
pt_sf <- st_as_sf(x, coords = c("lon", "lat"), crs = 4326, agr = "constant")
intersect_df<- st_intersection(pt_sf, typology)
coords <- st_coordinates(intersect_df)
intersection_result <- cbind(intersect_df, coords)
}
output$type <- renderPrint({return_urban_type(geocode_origin)})
output$histgram <- renderPlot({
pt_df <-
get_urban_type(geocode_origin) %>%
st_set_geometry(NULL) %>%
select(input$Variable)
names(pt_df)[1] <- "variable"
hist_df <-
typology %>%
st_set_geometry(NULL) %>%
select(input$Variable)
names(hist_df)[1] <- "variable"
hist_df %>%
ggplot(aes(x=variable)) +
geom_histogram(bins= input$bins, fill = "lightblue", color= "white") +
geom_vline(aes(xintercept = pt_df$variable), color="red", linetype="dashed", size=1 )+
theme_classic() +
xlab(str_replace_all(paste0(input$Variable), "_", " ") ) +
ylab("frequency")
})
output$map <- renderLeaflet({
# generate base leaflet map
newcol <- colorRampPalette(brewer.pal(9,"Spectral"))
newcolor <- rev (newcol(9))
newcolor
pal <- colorFactor(newcolor, levels = levels(typology$urban_types))
popup <- paste0( "<br><strong> urban type: </strong>", typology$urban_types,
"<br><strong> population: </strong>", typology$population,
"<br><strong> annual mean precipitation: </strong>", typology$annual_precipitation,
"<br><strong> annual mean temperature: </strong>", typology$average_temperature,
"<br><strong> elevation: </strong>", typology$elevation,
"<br><strong> percentage of impervious surface: </strong>", typology$percentage_of_impervious_surface,
"<br><strong> percentage of tree canopy coverage: </strong>", typology$percentage_of_tree_canopy_coverage,
"<br><strong> percentage of water and wetlands: </strong>", typology$percentage_of_water_and_wetlands,
"<br><strong> distance to natural areas: </strong>", typology$distance_to_natural_areas,
"<br><strong> traffic density: </strong>", typology$traffic_density,
"<br><strong> traffic noise: </strong>", typology$traffic_noise)
leaflet(typology) %>%
addProviderTiles(providers$Stamen.Toner,group="Stamen Toner") %>%
addProviderTiles(providers$OpenStreetMap.Mapnik,group="Open Street Map") %>%
addProviderTiles(providers$Esri.WorldImagery,group="Esri WorldImagery") %>%
addProviderTiles(providers$Stamen.Terrain,group="Stamen Terrain") %>%
addPolygons(stroke = FALSE, smoothFactor = 0.3, fillOpacity = 0.8,
fillColor = ~pal(urban_types), popup = popup,
highlightOptions = highlightOptions(fillColor = "yellow", weight = 2,
bringToFront = TRUE)) %>%
addLegend("bottomleft", pal = pal, opacity = 1,
values = ~urban_types,
title = "Urban Type") %>%
addLayersControl(
baseGroups = c("Stamen Toner", "Open Street Map", "Esri WorldImagery", "Stamen Terrain")) %>%
clearPopups()
})
observeEvent(input$go,{
df<-get_urban_type(geocode_origin)
leafletProxy("map", data = df) %>%
setView( lng = df$X, lat=df$Y,zoom=15) %>%
clearMarkers() %>%
addMarkers(lng = ~X, lat = ~Y,
popup = paste0( "<br><strong> urban type: </strong>", df$urban_types,
"<br><strong> population: </strong>", df$population,
"<br><strong> annual mean precipitation: </strong>", df$annual_precipitation,
"<br><strong> annual mean temperature: </strong>", df$average_temperature,
"<br><strong> elevation: </strong>", df$elevation,
"<br><strong> percentage of impervious surface: </strong>", df$percentage_of_impervious_surface,
"<br><strong> percentage of tree canopy coverage: </strong>", df$percentage_of_tree_canopy_coverage,
"<br><strong> percentage of water and wetlands: </strong>", df$percentage_of_water_and_wetlands,
"<br><strong> distance to natural areas: </strong>", df$distance_to_natural_areas,
"<br><strong> traffic density: </strong>", df$traffic_density,
"<br><strong> traffic noise: </strong>", df$traffic_noise)
)
})
}
ui <- fluidPage(
titlePanel("LA urban habitat classification"),
sidebarLayout(
sidebarPanel( textInput("Location", "Type your adress here:", ""),
actionButton("go", "Enter"),
br(),
h4("This location is in type:"),
textOutput("type"),
br(),
br(),
selectInput(inputId = "Variable",
label = "Display histogram of:",
choices = sort(unique(names(typology %>%
st_set_geometry(NULL) %>%
select(-c(urban_types, bg_id)))))),
sliderInput(inputId = "bins",
label = "Number of bins:",
min = 1,
max = 50,
value = 30),
plotOutput(outputId = "histgram", height="320px"),
h6("Blue histogram is the distribution of
selected variable for the whole study area;
red line represents the data of the entered location")),
mainPanel(leafletOutput("map", width="100%",height="800px"))
))
shinyApp(ui = ui, server = server)
|
1d8cc762a0a112e3f4ecb6d123ab56230118cdd4 | 07c1415fbd3bd67accf767aa5df03d4ec8361258 | /create_data/create_crosswalk.R | b6954550d1d8ab158dea149392225d254f3952eb | [] | no_license | Meeris/kuntar | ab4df7c175b96b765dda4f3cd4bb39b0ac1fa54b | 382c0b6f804abe0d9f8d7e94c7439ce140752151 | refs/heads/main | 2020-07-25T01:53:49.982674 | 2019-11-10T22:01:36 | 2019-11-10T22:01:36 | 208,121,156 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,352 | r | create_crosswalk.R | ## ---------------------------
##
## Script name: create_croswalk
##
## Purpose of script: create crosswalk files
##
## Author: Meeri Seppa
##
## Date Created: 2019-07-11
##
## Copyright (c) Meeri Seppa, 2019
## Email: meeri.seppa@helsinki.fi
##
## ---------------------------
##
## Notes:
##
##
## ---------------------------
# packages functions and data ---------------------------------------------
## packages
library(tidyverse)
## functions
source("./functions/functions.R")
## map data
load("./mapdata/mapfiles_fixed_no_overlaps_or_errors")
load("./mapdata/mapfiles_00")
load("./mapdata/mapfiles_all_years")
# Create crosswalks -------------------------------------------------------
cross <- map(names(files)[2:12],
~ get_intersection(files = files, to = "1860", from = as.character(.x)))
cross <- map(cross,~ .x %>% filter(kerroin > 0.01))
map(files, ~ head(files))
temp <- get_intersection(files, from = 2013, to = 2019)
temp <- get_intersection(files, from = 1970, to = 2019)
temp <- temp %>% filter(kerroin > 0.01)
files[[10]] %>% ggplot() + geom_sf()
files[[1]] %>% ggplot() + geom_sf()
# save --------------------------------------------------------------------
dsn <- "./crosswalk_files/crosswalk_1860_"
map2(cross, names(files)[2:5], ~ write_csv(.x, path = paste0(dsn, .y )))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.