blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6946c0a18c98bfbf89aaf7bdc0a2096de27af091 | f2a7091926c21c42b6daf8cfc2eef7682a4cc338 | /scripts/03-load-data.R | b6aacfbdfcd6c62582ac61ffc641c0ee915054e3 | [] | no_license | mneunhoe/wahlkreis_vorhersage | b17d7d4d6da6054714bd86e6d61a94eb09c7f6aa | 4b7e50c426544446043e80dbd45f7b0d97024340 | refs/heads/master | 2023-01-14T21:49:47.482057 | 2020-11-17T12:44:18 | 2020-11-17T12:44:18 | 180,787,479 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 579 | r | 03-load-data.R | # Handcoded Election Results for 09, 13 and 17
c("CDU", "SPD", "LINKE", "GRUENE", "FDP", "AFD", "And")
res13 <- c(41.5, 25.7, 8.6, 8.4, 4.8, 4.7, 6.3) / 100
res09 <- c(33.8, 23, 11.9, 10.7, 14.6, 0.1, 6) / 100
res05 <- c(35.2, 34.2, 8.7, 8.1, 9.8, 0.1, 3.9) / 100
btw_bund_res <- rbind(res13, res09, res05)
colnames(btw_bund_res) <-
c("CDU", "SPD", "LINKE", "GRUENE", "FDP", "AFD", "And")
rownames(btw_bund_res) <- c("2013", "2009", "2005")
# Read in the candidate data.
full_df <-
read.csv2("../raw-data/btw_candidates_1983-2017.csv",
stringsAsFactors = F) |
6635d559d4663e7179d4f572d29e3bbc0cde7dc8 | 331234b7eabbe4daf51ee8143c6dcbc768da0595 | /Composite Bar Graph in GGPlot.R | dbdcdda7a941944d3cde2c3bb11e49f1597cfc0a | [] | no_license | IanGBrennan/Convenient_Scripts | 6e3959ad69e594b21a5b4b2ca3dd6b30d63170be | 71f0e64af8c08754e9e3d85fe3fb4770a12ec2d4 | refs/heads/master | 2021-05-12T03:57:32.339140 | 2020-01-30T01:58:19 | 2020-01-30T01:58:19 | 117,630,023 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 343 | r | Composite Bar Graph in GGPlot.R | library(ggplot2)
data("mtcars")
mtcars
data("diamonds")
tail(diamonds)
data<-read.csv("Pygopodoidea.inward.outward.Transitions.csv")
freq.out<-data$freq.out
freq.in<-data$freq.in
biome.out<-data$biome.out
biome.in<-data$biome.in
ggplot(data, aes(biome.out, fill=freq.out)) + geom_bar()
ggplot(data, aes(biome.in, fill=freq.in)) + geom_bar()
|
eae4e37223fc183614259ad75158b8477c9da7ef | 1fdc4cdd4d22eb15e37a7d5248a5ab7466f913e4 | /man/coef.glinternet.Rd | fefcb8a3babae6b3f151527226548f9dee26dedc | [] | no_license | YaohuiZeng/glinternet | 028a2d69d58de7751cec94d50b788df3f283fffc | 650bda683b2a46955468ce452a7cae26db2c3181 | refs/heads/master | 2020-12-03T02:29:55.498913 | 2015-03-27T00:00:00 | 2015-03-27T00:00:00 | 53,518,422 | 1 | 0 | null | 2016-03-09T17:44:39 | 2016-03-09T17:44:39 | null | UTF-8 | R | false | false | 2,560 | rd | coef.glinternet.Rd | \name{coef.glinternet}
\alias{coef.glinternet}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Return main effect and interaction coefficients.
}
\description{Returns the actual main effect and interaction coefficients
that satisfy the sum constraints in a linear interaction model. See
the paper below for details.
}
\usage{
\method{coef}{glinternet}(object, lambdaIndex = NULL, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{Fitted \code{"glinternet"} model object.}
\item{lambdaIndex}{Index of \code{lambda} value at which to extract
coefficients. If NULL, return the coefficients for all values of
\code{lambda} in the path.}
\item{\dots}{Not used.}
}
\details{Returns the actual main effect and interaction
coefficients. These satisfy the sum constraints in the original linear
interaction model.}
\value{A list of length \code{lambda} if \code{lambdaIndex} is not
provided. Otherwise, of length \code{lambdaIndex}. Each component (for
each lambda) is
itself a list, with components
\item{mainEffects}{A list with components \code{cat} and \code{cont},
each an index vector of the
categorical and continuous (resp) main-effect
variables. Just as in \code{activeSet}, the indexing is separate for
each type of variable. See ?glinternet for details.}
\item{mainEffectsCoef}{List of coefficients for the main effects in
\code{mainEffects}, also with names \code{cat} and \code{cont}}
\item{interactions}{List of interactions, with components
\code{contcont}, \code{catcont} and \code{catcat}, each 2-column
matrices of variable indices.}
\item{interactionsCoef}{List of interaction coefficients for
\code{interactions}, also with names \code{contcont}, \code{catcont}
and \code{catcat}. For categorical-categorical interactions, each
is provided as a L1 x L2 matrix.}
}
\references{
"Learning interactions via hierarchical group-lasso regularization"
}
\author{Michael Lim and Trevor Hastie\cr Maintainer: Michael Lim \email{michael626@gmail.com}}
\seealso{
\code{glinternet.cv}, \code{predict.glinternet},
\code{predict.glinternet.cv}, \code{plot.glinternet.cv}, \code{glinternet}
}
\examples{
Y = rnorm(100)
X = matrix(rnorm(100*10), nrow=100)
numLevels = rep(1, 10)
fit = glinternet(X, Y, numLevels)
coeffs = coef(fit)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ models }
\keyword{ interactions }% __ONLY ONE__ keyword per line
\keyword{group-lasso} |
c84975fbdfffe258847b55790688ffb213022a84 | 338e3636bb81414f81a0ede83210d84cda41d9ce | /Tab&Graf.R | b0758d6d0625dafa41ebbf17f55e5948759cd9cd | [] | no_license | romulo82/descritiva_R | 827fed3d3d041bf7e3ff8c2c81d8832204044ab2 | 09d7a71c204c4b89bfb9cea151dda047d06269f4 | refs/heads/master | 2020-05-30T00:59:22.662275 | 2017-02-21T03:02:58 | 2017-02-21T03:02:58 | 82,622,618 | 0 | 0 | null | 2017-02-21T01:53:50 | 2017-02-21T01:40:52 | null | ISO-8859-1 | R | false | false | 3,378 | r | Tab&Graf.R | # Estatística Básica - Bussab/Morettin
# 2.1 - Distribuiçoes de Frequências
head(USArrests)
ex1 <- data.frame(head(USArrests)$Assault, row.names=row.names(head(USArrests)))
names(ex1) <- "Assaltos";ex1
ex1$Cidades <- row.names(ex1);ex1
row.names(ex1)=NULL;ex1 <- ex1[, c(2,1)]; ex1
ex1 <- cbind(ex1, Proporcao=round(prop.table(ex1$Assaltos),2));ex1
ex1$Porcentagem <- ex1$Proporcao*100;ex1
Tot <- data.frame(Cidades="Total", Assaltos=sum(ex1$Assaltos),
Proporcao=sum(ex1$Proporcao), Porcentagem=sum(ex1$Porcentagem));Tot
ex1 <- rbind(ex1,Tot);ex1
# Tabela com dados em classes
head(women)
ex2 <- as.data.frame(table(cut(women$height, breaks=6, right=F)));ex2
names(ex2) <- c("Classes", "Frequencia");ex2
ex2 <- cbind(ex2, Porcentagem=round(100*ex2$Frequencia/(sum(ex2$Frequencia)),2));ex2
Tot2 <- data.frame("Total", sum(ex2$Frequencia), ceiling(sum(ex2$Porcentagem)));Tot2
names(Tot2) <- names(ex2)
ex2 <- rbind(ex2, Tot2);ex2
#Tabela 2.1: Dados Pessoais
dadosp <- read.csv(choose.files(), sep=";", header=T); dadosp
civil <- as.data.frame(table(dadosp$Civil)); civil
names(civil) <- c("Civil", "Frequência"); civil
civil$Proporção <- round(civil$Frequência/36, 2)
civil$Percentual <- round(civil$Frequência/36, 2)*100
Tot3 <- data.frame("Total", sum(civil$Frequência), sum(civil$Proporção), sum(civil$Percentual))
names(Tot3) <- names(civil)
civil <- rbind(civil, Tot3); civil
regiao <- as.data.frame(table(dadosp$Região));regiao
names(regiao) <- c("Região", "Frequência")
regiao$Proporção <- round(regiao$Frequência/36,2)
regiao$Porcentagem <- regiao$Proporção*100; regiao
Tot4 <- data.frame("Total", sum(regiao$Frequência), sum(regiao$Proporção), sum(regiao$Porcentagem))
names(Tot4) <- names(regiao)
regiao <- rbind(regiao, Tot4); regiao
filhos <- as.data.frame(table(subset(dadosp[, 4], dadosp[, 2]=="casado"))); filhos
names(filhos) <- c("Filhos", "Frequência")
filhos$Proporção <- round(filhos$Frequência/20, 2)
filhos$Porcentagem <- filhos$Proporção*100
Tot5 <- data.frame("Total", sum(filhos$Frequência), sum(filhos$Proporção), sum(filhos$Porcentagem))
names(Tot5) <- names(filhos)
filhos <- rbind(filhos, Tot5); filhos
k <- ceiling(1+3.3*log10(36)); k # Número de classes
amp <- (max(dadosp$Anos)-min(dadosp$Anos))/k; amp # Amplitude
idade <- as.data.frame(table(cut(dadosp$Anos, breaks=7, right=F))); idade
names(idade) <- c("Classes", "Frequência")
idade$Proporção <- round(idade$Frequência/36, 2)
idade$Porcentagem <- idade$Proporção*100
Tot6 <- data.frame("Total", sum(idade$Frequência), sum(idade$Proporção), sum(idade$Porcentagem))
names(Tot6) <- names(idade)
idade <- rbind(idade, Tot6); idade
#2.3 Gráficos
# Barras
dadosp
dadosp$Instrução
inst <- as.data.frame(table(dadosp$Instrução));inst
names(inst) <- c("Grau", "Frequência")
barplot(inst$Frequência, ylim=c(0,20), ylab="Frequência",
col="light blue", names=c("Fundamental", "Médio", "Superior"), space=0.2)
# Setores
pie(inst$Frequência, labels=c("1 (12; 33,3%)", "2 (18; 50,0%)", "3 (6; 16,7%)"),
col=c("light green", "light blue", "grey"), sub=c("1 = Fundamental, 2 = Médio e 3 = Superior"))
# Gráfico de dispersão
filhos <- as.data.frame(table(subset(dadosp[, 4], dadosp[, 2]=="casado"))); filhos
plot(filhos, type="p")
|
40ec2a6f1c2f8d2284872c26afc2c7661c75bf5c | 6c54b425d221cd838353a49da646465f40155bb4 | /plot4.R | 54dfd3e0375959edbe43ea0190bbdd2d9ae94963 | [] | no_license | Andrewwaldert/ExData_Plotting1 | 911563c43e09cdc7345493cc74e508926ad24a71 | 3f4adfe9a351fede60996f459042a9ad3cdf2069 | refs/heads/master | 2021-01-22T20:59:51.246274 | 2016-03-31T11:46:41 | 2016-03-31T11:46:41 | 55,077,086 | 0 | 0 | null | 2016-03-30T16:00:46 | 2016-03-30T16:00:46 | null | UTF-8 | R | false | false | 3,099 | r | plot4.R |
## Reading in the data
> household_power_consumption <- read.csv("~/Desktop/household_power_consumption.txt", sep=";")
## Viewing the Data
> View(household_power_consumption)
## Changing "?" to NA
> household_power_consumption[household_power_consumption == "?"] <- NA
## Selecting the appropriate Data
> household_power_consumption$Date <- as.Date(household_power_consumption$Date, format = "%d/%m/%Y")
> Usable_Data <- household_power_consumption[household_power_consumption$Date >= as.Date("2007-02-01") & household_power_consumption$Date <= as.Date("2007-02-02"),]
## Checking the head of the data
> head(Usable_Data)
Date Time Global_active_power Global_reactive_power
66637 2007-02-01 00:00:00 0.326 0.128
66638 2007-02-01 00:01:00 0.326 0.130
66639 2007-02-01 00:02:00 0.324 0.132
66640 2007-02-01 00:03:00 0.324 0.134
66641 2007-02-01 00:04:00 0.322 0.130
66642 2007-02-01 00:05:00 0.320 0.126
Voltage Global_intensity Sub_metering_1 Sub_metering_2
66637 243.150 1.400 0.000 0.000
66638 243.320 1.400 0.000 0.000
66639 243.510 1.400 0.000 0.000
66640 243.900 1.400 0.000 0.000
66641 243.160 1.400 0.000 0.000
66642 242.290 1.400 0.000 0.000
Sub_metering_3
66637 0
66638 0
66639 0
66640 0
66641 0
66642 0
## Creating a new posix date
> Usable_Data$posix <- as.POSIXct(strptime(paste(Usable_Data$Date, Usable_Data$Time, sep = " "), format = "%Y-%m-%d %H:%M:%S"))
## Checking the Class of Usable_Data$Global_active_power
> class(Usable_Data$Global_active_power)
[1] "factor"
##as.numeric
> Usable_Data$Global_active_power <- as.numeric(Usable_Data$Global_active_power)
## Open Quartz
> quartz()
## Creating png file
png(file = "plot4.png", width = 480, height = 480, units = "px")
##arranging the plots
> par(mfrow = c(2, 2))
## Making Graph 1
> with(Usable_Data, plot(posix, Global_active_power, type = "l", xlab = "", ylab = "Global_Active_Power"))
## Making Graph 2
> with(Usable_Data, plot(posix, Voltage, type = "l", xlab = "datetime", ylab = "voltage"))
## Making Graph 3
> with(Usable_Data, plot(posix, Sub_metering_1, type = "l", xlab = "", ylab = "Energy Sub Metering"))
>
> with(Usable_Data, plot(posix, type = "l", Sub_metering_2, col = "red"))
>
> with(Usable_Data, points(posix, type = "l", Sub_metering_2, col = "red"))
>
> with(Usable_Data, points(posix, type = "l", Sub_metering_3, col = "Blue"))
## Creating the Legend
> legend("topright", col = c("black", "blue", "red"), legend = c("Sub_Metering_1", "Sub_Metering_2", "Sub_Metering_3"), lty = 1)
## Making Graph 4
> with(Usable_Data, plot(posix, Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power"))
|
e0edff0c8b7eda93606525762d2dbc8d012ceb37 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ColorPalette/examples/generateMonochromaticColors.Rd.R | d2b66479aaec778ef8dbbd05eaf7c87571676cd5 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 225 | r | generateMonochromaticColors.Rd.R | library(ColorPalette)
### Name: generateMonochromaticColors
### Title: Monochromatic
### Aliases: generateMonochromaticColors
### Keywords: color monochromatic
### ** Examples
generateMonochromaticColors("#121314", 5)
|
e06da6bee73be2c88104ea0c7ed1e1fa941ab460 | 851527c2a9663b2aa83462409e66f3da90583f5a | /man/offense.Rd | 5931afa5bc59ec5e5a616a602dcc557ebf4793ba | [] | no_license | gastonstat/plspm | 36511de2c95df73a010fb23847ef84f7ab680b56 | bd21cb153021aed6ac6ea51ecbd0b856495b2a16 | refs/heads/master | 2022-05-05T19:36:11.853523 | 2022-03-27T00:25:31 | 2022-03-27T00:25:31 | 13,868,484 | 48 | 33 | null | 2016-12-13T07:37:39 | 2013-10-25T18:17:15 | R | UTF-8 | R | false | false | 2,357 | rd | offense.Rd | \name{offense}
\alias{offense}
\docType{data}
\title{Offense dataset}
\description{
Dataset with offense statistics of American football teams from the NFL (2010-2011 season).
}
\usage{data(offense)}
\format{
A data frame with 32 teams on the following 17 variables. The variables may be used to construct five suggested latent concepts: 1) \code{RUSH}=Rushing Quality, 2) \code{PASS}=Passing Quality, 3) \code{SPEC}=Special Teams and Other, 4) \code{SCORING}=Scoring Success, 5)\code{OFFENSE}=Offense Performance
\tabular{llll}{
\emph{Num} \tab \emph{Variable} \tab \emph{Description} \tab \emph{Concept} \cr
1 \tab \code{YardsRushAtt} \tab Yards per Rush Attempt \tab \code{RUSH} \cr
2 \tab \code{RushYards} \tab Rush Yards per game \tab \code{RUSH} \cr
3 \tab \code{RushFirstDown} \tab Rush First Downs per game \tab \code{RUSH} \cr
4 \tab \code{YardsPassComp} \tab Yards Pass Completion \tab \code{PASS} \cr
5 \tab \code{PassYards} \tab Passed Yards per game \tab \code{PASS} \cr
6 \tab \code{PassFirstDown} \tab Pass First Downs per game \tab \code{PASS} \cr
7 \tab \code{FieldGoals} \tab Field Goals per game \tab \code{SPEC} \cr
8 \tab \code{OtherTDs} \tab Other Touchdowns (non-offense) per game \tab \code{SPEC} \cr
9 \tab \code{PointsGame} \tab Points per game \tab \code{SCORING} \cr
10 \tab \code{OffensTD} \tab Offense Touchdowns per game \tab \code{SCORING} \cr
11 \tab \code{TDGame} \tab Touchdowns per game \tab \code{SCORING} \cr
12 \tab \code{PassTDG} \tab Passing Touchdowns per game \tab \code{OFFENSE} \cr
13 \tab \code{RushTDG} \tab Rushing Touchdowns per game \tab \code{OFFENSE} \cr
14 \tab \code{PlaysGame} \tab Plays per game \tab \code{OFFENSE} \cr
15 \tab \code{YardsPlay} \tab Yards per Play \tab \code{OFFENSE} \cr
16 \tab \code{FirstDownPlay} \tab First Downs per Play \tab \code{OFFENSE} \cr
17 \tab \code{OffTimePossPerc}\tab Offense Time Possession Percentage \tab \code{OFFENSE} \cr
}
}
\source{
\url{http://www.teamrankings.com/nfl/stats/}
}
\examples{
# load data
data(offense)
# take a peek
head(offense)
}
\keyword{datasets}
|
8d479816710cf944cd7f6946db848e3192a323f4 | 39dd176988177f1b4d0f5c8659042fd2b09232c9 | /Quality Control Charts/T2 and Ellipse chart/R Code/T2 and Ellipse chart.R | 211e77fd8bc79925f42db5292b90db712d7f01c1 | [] | no_license | rsalaza4/R-for-Industrial-Engineering | 922719bc17a03a6c146b9393c6a9766e65dc3be4 | b22524e9e6811041fa8da4d3bb331d07e3bd447c | refs/heads/master | 2023-01-14T09:57:33.450167 | 2022-12-22T01:15:17 | 2022-12-22T01:15:17 | 235,593,780 | 49 | 24 | null | null | null | null | UTF-8 | R | false | false | 1,682 | r | T2 and Ellipse chart.R | ### T2 CHART AND ELLIPSE CHART ###
# Import qcc package
library(qcc)
# Create matrix corresponding to variable x1 with 20 groups of 4 observations each
x1 <- matrix(c(), byrow = FALSE, ncol = )
# Create matrix corresponding to variable x2 with 20 groups of 4 observations each
x2 <- matrix(c(), byrow = FALSE, ncol = )
# Create list with both matrices
X <- list(x1 = x1, x2 = x2)
# Create T2 chart
t2 <- mqcc(X, type = "T2")
# Get the summary for the chart
summary(t2)
# Create Ellipse chart
ellipseChart(t2)
# Create Ellipse chart with group id
ellipseChart(t2, show.id = TRUE)
# EXAMPLE
library(qcc)
x1 <- matrix(c(72, 56, 55, 44, 97, 83, 47, 88, 57, 26,
46, 49, 71, 71, 67, 55, 49, 72, 61, 35,
84, 87, 73, 80, 26, 89, 66, 50, 47, 39,
27, 62, 63, 58, 69, 63, 51, 80, 74, 38,
79, 33, 22, 54, 48, 91, 53, 84, 41, 52,
63, 78, 82, 69, 70, 72, 55, 61, 62, 41,
49, 42, 60, 74, 58, 62, 58, 69, 46, 48,
34, 87, 55, 70, 94, 49, 76, 59, 57, 46), byrow = FALSE, ncol = 4)
x2 <- matrix(c(23, 14, 13, 09, 36, 30, 12, 31, 14, 07,
10, 11, 22, 21, 18, 15, 13, 22, 19, 10,
30, 31, 22, 28, 10, 35, 18, 11, 10, 11,
08, 20, 16, 19, 19, 16, 14, 28, 20, 11,
28, 08, 06, 15, 14, 36, 14, 30, 08, 35,
19, 27, 31, 17, 18, 20, 16, 18, 16, 13,
10, 09, 16, 25, 15, 18, 16, 19, 10, 30,
09, 31, 15, 20, 35, 12, 26, 17, 14, 16), byrow = FALSE, ncol = 4)
X <- list(x1 = x1, x2 = x2)
t2 <- mqcc(X, type = "T2")
summary(t2)
ellipseChart(t2)
ellipseChart(t2, show.id = TRUE)
|
d46e734607be59a617974a6a9b66b3d6a082400a | 15f708e3e693044f4f9ef1e1700fae3eecc30f12 | /viz.R | b965cd0f9d69c75488eb452516f7b4b6763fcad8 | [] | no_license | LennertZonneveld/testrepo2 | 6f4be151be3eb4561751ac104c9522c7b1029767 | f2945b0913a2e2d0bf88f42f8a4c4dec2b3f9678 | refs/heads/master | 2023-04-17T15:05:03.282540 | 2021-04-27T12:14:35 | 2021-04-27T12:14:35 | 362,098,774 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 158 | r | viz.R | library(tidyverse)
install.packages("palmerpenguins")
library(palmerpenguins)
penguins %>%
ggplot(aes(x=bill_depth_mm)) +
geom_histogram()
# hi there!
|
3908f5c3a72d34986d1a6e45ada23bbce37732f4 | a2593441f650a2daa5585618b4d02ba7be9787fc | /ui.R | 848bab4fc6fa596229e4bcf8b7e559f1ad28270f | [] | no_license | tchovanec/Data-Products | 82904aab1c585bbb5d43b103d8f2385fc61cfd1a | a20f261d8456178b03a27ed2a3159838c168dc9b | refs/heads/master | 2021-01-20T00:46:06.054745 | 2017-04-24T02:05:52 | 2017-04-24T02:05:52 | 89,185,511 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,818 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(markdown)
shinyUI(navbarPage("Car Selection For Your Vacation Trip",
tabPanel("Table",
# Sidebar
sidebarLayout(
sidebarPanel(
helpText("Please provide information about your trip and any specs in a car you would like."),
# Starts the default value at 40 miles and allows a max of 2000 and a min of 10
numericInput('dist', 'Distance (in miles):', 40, min = 10, max = 2000),
# Starts the default gas price at 2 with a minumum of 1.90 and max of 7. Incrmenets the price by 0.01
numericInput('gas_cost', 'Price of Gas (per gallon):', 2.00, min = 1.90, max = 7, step=0.01),
# Max money spent on gas defaults to 25 with a min of 1 and a max of 1000
numericInput('max_gas_cost', 'Max money spent on gas for trip:', 25, min=1, max=1000),
# creates a checkbox to allow the selection of either an automatic or manual transmission
checkboxGroupInput('am', 'Transmission:', c("Automatic"=0, "Manual"=1), selected = c(0,1)),
# Creates a checkbox to allow the selection of either 4,6 or 8 cynlinder vehicles
checkboxGroupInput('cyl', 'Number of cylinders:', c("Four"=4, "Six"=6, "Eight"=8), selected = c(4,6,8)),
# Creates a slider input that allows for a minumum of 50 and a max of 340 horsepower
sliderInput('hp', 'Horsepower', min=50, max=340, value=c(50,340), step=10)
),
mainPanel(
dataTableOutput('table')
)
)
),
tabPanel("How To",
mainPanel(
includeMarkdown("HowTo.md")
)
)
)
)
|
e15fe4c3acd6f35c91de28d6c06f392f2540eac0 | 0ce488111b3b887131dee263dfda3dc6ba1fdb0f | /5_Basic_Demographic_function.R | 27b98d0c6922a4d36eb87cdbd47834a745c95f4d | [] | no_license | ktorresSD/PipelinePsycMeasures | f5e54bd94e353d6f49ebbbe20ef4ac915c95d438 | 9a1d7dcd1c13972bce0a5004db1a0ecc74ca0abf | refs/heads/master | 2020-03-22T16:28:58.571445 | 2020-03-17T21:22:02 | 2020-03-17T21:22:02 | 140,329,869 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,912 | r | 5_Basic_Demographic_function.R | #########################################################################################
# Last Date modified: 11/05/2018
# Author: Katy Torres
# Description: Subset of question 5, Basic Demographic and scoring functions
##########################################################################################
basicdemo<- function(dat0, exportdate)
{
#Only retain relevant variables
datdemo1 <- subset(dat0,
select= c(assessment_id,vista_lastname,visit_number,
date_created,
demo_gender_r,
demo_YOB_r,
demo_weight_r,
demo_heightft_r,
demo_heightinch_r,
demo_ethnic_r,
demo_racewhite,
demo_race_black,
demo_race_amind,
demo_race_pacisl,
demo_race_asian,
demo_race_decline,
demo_race_oth,
demo_race_oth_spec,
demo_relationship_r
))
datdemo1$year_assessed<- format(as.Date(datdemo1$date_created, format="%m/%d/%Y"),"%Y")
datdemo1$approx_age<- as.numeric(datdemo1$year_assessed) - datdemo1$demo_YOB_r
#________________________________________________________________________________________
# SCORING Functions Defined
#----------------------------------------------------------------------------------------
#Scoring function defined
demorace<- function(x)
{
for (v in 1:length(x)) assign(names(x)[v], x[[v]])
#One column for Diagnosis based on score
if(!(is.na(demo_racewhite))){
if(demo_racewhite==1)
{
race <- "White"}else{}
}else{race <- NA}
if(!(is.na(demo_race_black))){
if(demo_race_black==1){
race <- "Black"} else{}
}else{race <-NA}
if(!(is.na(demo_race_amind))){
if(demo_race_amind==1){
race <- "American Indian"}else{}
}else{race<-NA}
if(!(is.na(demo_race_pacisl))){
if(demo_race_pacisl==1){
race <- "Pacific Islander"}else{}
}else{race <-NA}
if(!(is.na(demo_race_asian))){
if(demo_race_asian==1){
race <- "Asian"}else{}
}else{race <-NA}
if(!(is.na(demo_race_decline))){
if(demo_race_decline==1){
race <- "Decline To Answer"}else{}
}else{race <-NA}
if(!(is.na(demo_race_oth))){
if(demo_race_oth==1){
race <- "Other"}else{}
}else{race <-NA}
if(!(is.na(demo_racewhite |demo_race_black |demo_race_amind|demo_race_pacisl|demo_race_asian|demo_race_oth))){
if(sum(c(demo_racewhite, demo_race_black, demo_race_amind, demo_race_pacisl, demo_race_asian, demo_race_oth)) > 1 ){
race <- "Multiple"} else{}
}else{race <-NA}
#completeness checks
race_complete <- as.numeric(
sum(
is.na(
c(demo_racewhite,
demo_race_black,
demo_race_amind,
demo_race_pacisl,
demo_race_asian,
demo_race_decline,
demo_race_oth))) == 0)
race_not_attempted<- as.numeric(
sum(
is.na(
c(demo_racewhite,
demo_race_black,
demo_race_amind,
demo_race_pacisl,
demo_race_asian,
demo_race_decline,
demo_race_oth))) == 7)
completeness_race<- "1"
if(!(is.na(race_not_attempted))){
if(race_not_attempted==1)
{
completeness_race <- "not attempted"}else{}
}else{completeness_race<-NA}
if(!(is.na(race_complete))){
if(race_complete==1){
completeness_race <- "complete"} else{}
}else{completeness_race<-NA}
if(race_not_attempted==0 & race_complete==0){
completeness_race <- "partially completed"}else{}
scoresisi <- data.frame(completeness_race, race)
return(scoresisi)
}
#Calculate summary scores in data
datdemorace <- adply(datdemo1, 1, demorace)
democomplete<- function(x)
{
for (v in 1:length(x)) assign(names(x)[v], x[[v]])
#completeness checks
demo_complete <- as.numeric(
sum(
is.na(
c(demo_gender_r,
demo_YOB_r,
demo_weight_r,
demo_heightft_r,
demo_heightinch_r,
demo_ethnic_r,
demo_racewhite,
demo_race_black,
demo_race_amind,
demo_race_pacisl,
demo_race_asian,
demo_race_decline,
demo_race_oth,
demo_relationship_r))) == 0)
demo_not_attempted<- as.numeric(
sum(
is.na(
c(demo_gender_r,
demo_YOB_r,
demo_weight_r,
demo_heightft_r,
demo_heightinch_r,
demo_ethnic_r,
demo_racewhite,
demo_race_black,
demo_race_amind,
demo_race_pacisl,
demo_race_asian,
demo_race_decline,
demo_race_oth,
demo_relationship_r))) == 14)
completeness_demo<- "1"
if(!(is.na(demo_not_attempted))){
if(demo_not_attempted==1)
{
completeness_demo <- "not attempted"}else{}
}else{completeness_demo<-NA}
if(!(is.na(demo_complete))){
if(demo_complete==1){
completeness_demo <- "complete"} else{}
}else{completeness_demo<-NA}
if(demo_not_attempted==0 & demo_complete==0){
completeness_demo <- "partially completed"}else{}
scoresdemo_comp <- data.frame(completeness_demo)
return(scoresdemo_comp)
}
#Calculate summary scores in data
datdemo_final <- adply(datdemorace, 1, democomplete)
#to anonymize data
datdemo_final1<- within(datdemo_final,
{
assessment_id <- NULL
vista_lastname <- NULL
})
#________________________________________________________________________________________
# Checking consistency
#----------------------------------------------------------------------------------------
attach(datdemo_final)
datdemo_final$sum_multiple <- sum(c(demo_racewhite, demo_race_black, demo_race_amind, demo_race_pacisl, demo_race_asian,
demo_race_decline, demo_race_oth),na.rm=T)
#________________________________________________________________________________________
# Descriptive Stats and plots
#----------------------------------------------------------------------------------------
#subset by visit to get report information
v1 <- datdemo_final[ which(datdemo_final$visit_number==1), ]
v2 <- datdemo_final[ which(datdemo_final$visit_number==2), ]
v3 <- datdemo_final[ which(datdemo_final$visit_number==3), ]
#completeness table
table(datdemo_final$completeness_demo, datdemo_final$visit_number)
table(datdemo_final$demo_gender_r)
#------------------------------------------------------------------------
#DATA TRANSFORMATION to see concordance between visits
#------------------------------------------------------------------------
datdemo_final0<- datdemo_final[,c(2,1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25)]
l.sort <- datdemo_final[order(datdemo_final$vista_lastname),]
demo_wide <- reshape(l.sort,
timevar = "visit_number",
idvar = c( "vista_lastname"),
direction = "wide")
#to see if responses change between visits 1 and 2
demo_wide$gen_F_TO_F<- ifelse(demo_wide$demo_gender_r.1 == 2 & demo_wide$demo_gender_r.2 == 2 , "1", "0")
demo_wide$gen_M_TO_M<- ifelse(demo_wide$demo_gender_r.1 == 1 & demo_wide$demo_gender_r.2 == 1 , "1", "0")
demo_wide$gen_match<- ifelse(demo_wide$demo_gender_r.1 == demo_wide$demo_gender_r.2, "1", "0")
demo_wide$eth_yes_to_yes<- ifelse(demo_wide$demo_ethnic_r.1 == 1 & demo_wide$demo_ethnic_r.2== 1 , "1", "0")
demo_wide$eth_no_to_no<- ifelse(demo_wide$demo_ethnic_r.1 == 0 & demo_wide$demo_ethnic_r.2== 0 , "1", "0")
demo_wide$eth_decline_twice<- ifelse(demo_wide$demo_ethnic_r.1 == 2 & demo_wide$demo_ethnic_r.2== 2 , "1", "0")
demo_wide$eth_match<- ifelse(demo_wide$demo_ethnic_r.1 == demo_wide$demo_ethnic_r.2, "1", "0")
demo_wide$race_match<- ifelse(demo_wide$race.1 == demo_wide$race.2, "1", "0")
#GENDER
sum(as.numeric(demo_wide$gen_F_TO_F), na.rm=TRUE)
sum(as.numeric(demo_wide$gen_M_TO_M), na.rm=TRUE)
sum(as.numeric(demo_wide$gen_match), na.rm=TRUE)
#ETHNICITY
sum(as.numeric(demo_wide$eth_yes_to_yes), na.rm=TRUE)
sum(as.numeric(demo_wide$eth_no_to_no), na.rm=TRUE)
sum(as.numeric(demo_wide$eth_decline_twice), na.rm=TRUE)
sum(as.numeric(demo_wide$eth_match), na.rm=TRUE)
#RACE
sum(as.numeric(demo_wide$race_match), na.rm=TRUE)
#BEWTWEEN VISITS 2 And 3
demo_wide$gen_match_23<- ifelse(demo_wide$demo_gender_r.2 == demo_wide$demo_gender_r.3, "1", "0")
demo_wide$eth_match_23<- ifelse(demo_wide$demo_ethnic_r.2 == demo_wide$demo_ethnic_r.3, "1", "0")
demo_wide$race_match_23<- ifelse(demo_wide$race.2 == demo_wide$race.3, "1", "0")
#________________________________________________________________________________________
#Export datBTBISa
#----------------------------------------------------------------------------------------
filename <- paste("~/Biobank/5_Basic_Demographic/Basic_Demographic_scored_data_export.csv", sep="")
write.csv(datdemo_final, filename,quote=T,row.names=F,na="NA")
filename <- paste("~/Biobank/5_Basic_Demographic/Basic_Demographic_wide.csv", sep="")
write.csv(demo_wide , filename,quote=T,row.names=F,na="NA")
filename <- paste("~/Biobank/5_Basic_Demographic/Basic_Demographic_scored_data_export_DEIDENTIFIED.csv", sep="")
write.csv(datdemo_final1, filename,quote=T,row.names=F,na="NA")
return(print("5_Basic_Demographic_done"))
}
|
f16a4d621666406df9a1e3f2f27da9c813f6e285 | 853e70cb8f22e7d32edd1311bff4168cdcca4791 | /Scripts/scripts-old/Analysis-STAR-Discrete.R | b7597b2fa0505addeff17bc32849e749b2389f2d | [] | no_license | loganstundal/EventData-SpaceTime-Iraq | 1cec19d00f0ba8d4ffc0126c83aa8b98a0f2880f | 83f34a306edfe581ec126e2f214d351a7d08a336 | refs/heads/main | 2023-08-10T18:30:54.994608 | 2021-09-29T02:52:29 | 2021-09-29T02:52:29 | 341,699,693 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,547 | r | Analysis-STAR-Discrete.R | #-----------------------------------------------------------------------------#
#
# Author: Logan Stundal
# Date: January 31, 2021
# Purpose: Analysis - Initial discrete space-time analysis
#
#
# Copyright (c): Logan Stundal, 2021
# Email: stund005@umn.edu
#
#-----------------------------------------------------------------------------#
#
# Notes:
#
#
#-----------------------------------------------------------------------------#
# ADMINISTRATIVE --------------------------------------------------------------
#---------------------------#
# Clear working environment
#---------------------------#
rm(list = ls())
#---------------------------#
# Load required packages
#---------------------------#
library(tidyverse)
library(spatialreg)
library(sf)
library(texreg)
library(sandwich)
#---------------------------#
# Set working directory
#---------------------------#
setwd("c:/users/logan/googledrive/umn/research/ra_john/2021 - Time Series - RINLA")
#---------------------------#
# Load data
#---------------------------#
load("Data/Data-All-Tidy-20210131.Rdata")
load("Data/Data-Spatial-Weights-20210131.Rdata")
rm(d_cs, d_ts, d_pn_yr)
#-----------------------------------------------------------------------------#
# MODEL FORMATTING ------------------------------------------------------------
# ----------------------------------- #
# Static formulas
# ----------------------------------- #
fs <- list("SIGACT" = formula(SIGACT_pc ~ SIGACT_pc_lag +
Pop_den +
# Pop_urban_pct + Unemp_rate +
CD_Coalition + CD_Insurgent + CD_Sectarian +
# Troop_Strgth + PRT + CMOC +
p_spentruzicka + p_spentcptotal),
"ICEWS" = formula(ICEWS_pc ~ ICEWS_pc_lag +
Pop_den +
# Pop_urban_pct + Unemp_rate +
CD_Coalition + CD_Insurgent + CD_Sectarian +
# Troop_Strgth + PRT + CMOC +
p_spentruzicka + p_spentcptotal),
"GED" = formula(GED_pc ~ GED_pc_lag +
Pop_den +
# Pop_urban_pct + Unemp_rate +
CD_Coalition + CD_Insurgent + CD_Sectarian +
# Troop_Strgth + PRT + CMOC +
p_spentruzicka + p_spentcptotal))
# ----------------------------------- #
# ----------------------------------- #
# Dynamic formulas - TO DO
# ----------------------------------- #
# To do
# ----------------------------------- #
# ----------------------------------- #
# Model Name formatting
# ----------------------------------- #
m_vnames = list("SIGACT_pc_lag" = "DV - Time Lag",
"ICEWS_pc_lag" = "DV - Time Lag",
"GED_pc_lag" = "DV - Time Lag",
"Pop_den" = "Pop. Density",
"Pop_urban_pct" = "Pop. Urban",
"Unemp_rate" = "Unemployment",
"CD_Coalition" = "CD Coalition",
"CD_Insurgent" = "CD Insurgent",
"CD_Sectarian" = "CD Sectarian",
"Troop_Strgth" = "Troop Strength",
"PRT" = "PRT",
"CMOC" = "CMOC",
"p_spentruzicka" = "PC Ruzicka",
"p_spentcptotal" = "PC CERP",
"(Intercept)" = "Intercept")
#-----------------------------------------------------------------------------#
# MODELS: NON-SPATIAL ---------------------------------------------------------
# ----------------------------------- #
# Linear models (no dynamics, no FEs)
# ----------------------------------- #
m1 <- lapply(fs, function(x){lm(formula = x,
data = d_pn_mo)})
m1_ses <- lapply(m1, function(x){sqrt(diag(vcovCL(x, cluster = ~ District, type = "HC1")))})
m1_pvs <- lapply(1:length(m1), function(x){
2 * (1 - pnorm(abs(coef(m1[[x]]))/m1_ses[[x]]))
})
# ----------------------------------- #
# Linear models (no dynamics, time, and fixed effects)
# ----------------------------------- #
m2 <- lapply(fs, function(x){lm(formula = update(x, . ~ . + Time_FE + District),
data = d_pn_mo)})
m2_ses <- lapply(m2, function(x){sqrt(diag(vcovCL(x, cluster = ~ District, type = "HC1")))})
m2_pvs <- lapply(1:length(m2), function(x){
2 * (1 - pnorm(abs(coef(m2[[x]]))/m2_ses[[x]]))
})
screenreg(l = c(m1,m2),
custom.model.names = rep(c("SIGACT","ICEWS","GED"),2),
custom.header = list("No FEs" = 1:3, "FEs" = 4:6),
custom.coef.map = m_vnames,
override.se = c(m1_ses, m2_ses),
override.pvalues = c(m1_pvs, m2_pvs))
#-----------------------------------------------------------------------------#
# SPATIAL DIAGNOSTICS ---------------------------------------------------------
# ----------------------------------- #
# Spatial diagnostic tests
# ----------------------------------- #
m2_diag <- lapply(m2, function(x){
spdep::lm.LMtests(model = x,
listw = W_list,
test = c('LMerr','LMlag','RLMerr','RLMlag'))
})
# The warning about row-standardization is a damn lie.
m2_diag$SIGACT # Spatial lag - robust spatial lag highly significant
m2_diag$ICEWS # Spatial lag - robust spatial lag significant, error more significant
m2_diag$GED # Does not support spatial lag or spatial error
#-----------------------------------------------------------------------------#
# SPATIAL LAG MODELS ----------------------------------------------------------
# ----------------------------------- #
# Spatial linear model (no FEs)
# ----------------------------------- #
# use eigenvalue method (Ord, 1975) for computational boost
m3 <- lagsarlm(formula = fs$SIGACT,
data = d_pn_mo,
listw = W_list,
method = "eigen",
control = list(pre_eig = W_eigen,
OrdVsign = -1))
# This ^ works! Convergence is slow, but produces parameter estimates with no
# errors or warning messages. Therefore, this is a good baseline to compare
# the custom NLS model against.
#-----------------------------------------------------------------------------#
# Marginal Effects: STAR-Lag Model --------------------------------------------
# ----------------------------------- #
# NB - I have modified this to account for the fact that I forgot the
# temporal lag the first time I executed m3
# ----------------------------------- #
n <- nrow(d_pn_mo)
rho <- coef(m3)['rho'] # Extract spatial lag parameter from model
# phi <- coef(m3)['ben95t1'] # Extract temporal lag parameter from model
beta <- coef(m3)["p_spentcptotal"] # Variable of interest
Id <- diag(1,n,n) # nxn Identity matrix (NOTE - HERE n SET one cross-section!
# Estimate Long-Run-Steady-State Multiplier Matrix
# M <- solve(Id - rho*W - phi*Id) # LRSS_multiplier
M <- solve(Id - rho*W) # Ah yes, this solve() is computationally intense. Doable. But likely not in an monte carlo.
bM <- beta * M
# Like with our SAR model, we could looks at the LRSS for one state:
# Note - this is the long-run steady state after the system reachs an
# equilibrium from a one-unit change in X.
# Here is an example using California again and our SAR map function
# sar_eff_map(unit = "CA",
# map = us,
# bM = bM,
# join_var = 'STUSPS',
# breaks = c(0.001,0.01,0.05,0.1,0.6))
# round(sort(bM[,'CA'], decreasing = T), 3)
# Thus, relative to the regression coefficient on the variable of interest
# you can see that the response in y to a unit change in x is much larger
# than the coefficient alone would imply. This response accounts for the
# temporal and spatial dynamics implied by our model.
# Effects:
dir.eff <- mean(diag(bM))
ind.eff <- (sum(bM) - sum(diag(bM))) / n
total <- sum(bM) / n
cbind('Direct' = dir.eff, 'Indirect' = ind.eff, 'Total' = total)
#-----------------------------------------------------------------------------#
# SAVE ------------------------------------------------------------------------
# save.image()
# rm(list = ls())
|
3109ebafdbdde872c57f2b78d76a666922f6f79a | d14bcd4679f0ffa43df5267a82544f098095f1d1 | /R/levels.groupm.out.R | dc9487e50f5716533cd006f8180d7f195c41dbda | [] | no_license | anhnguyendepocen/SMRD | 9e52aa72a5abe5274f9a8546475639d11f058c0d | c54fa017afca7f20255291c6363194673bc2435a | refs/heads/master | 2022-12-15T12:29:11.165234 | 2020-09-10T13:23:59 | 2020-09-10T13:23:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 286 | r | levels.groupm.out.R | levels.groupm.out <-
function (x)
{
`if`(!is.onlist("life.data", oldClass(x[[1]])),
groupm.out <- x[[1]],
groupm.out <- x)
stresses <- get.x.markers(groupm.out$data.ld,
groupm.out$group.var)
return(stresses)
}
|
c5866ca2c4148ee9039728dfa74afaa93deabd65 | 37c79103a2e06f47089284620aa712c9ab4da03d | /Code/FL103 Microbiome 9-2-20.R | f8a67c983958341a10a19c897d449da97faa04f4 | [] | no_license | rlh13/Resistant-Starch | 72c9d4ad4939a67413f98b12f331aadb610c24a9 | 744b5bdec6b4e882843d4a00b5a891b0c7a5e2d2 | refs/heads/main | 2023-02-26T04:08:24.865926 | 2021-02-01T19:07:29 | 2021-02-01T19:07:29 | 333,491,114 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 33,194 | r | FL103 Microbiome 9-2-20.R | #Load metadata and otu/tax tables from the above at genus, family, and phylum levels (relative abundance)
metadata <- read.csv("/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/FL103_Mapfile.csv",sep=",",header=T,stringsAsFactors=F)
otu_w_tax_genusrel <- read.csv("/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/otu_w_tax_genusrel.csv",sep=",",header = T)
otu_w_tax_genusrel$X <- NULL #remove X column
otu_w_tax_famrel <- read.csv("/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/otu_w_tax_famrel.csv",sep=",",header = T)
otu_w_tax_famrel$X <- NULL
otu_w_tax_phyrel <- read.csv("/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/otu_w_tax_phyrel.csv",sep=",",header = T)
otu_w_tax_phyrel$X <- NULL
#Define taxa of interest at genus, family, and phylum level
gen_taxa <- c("g__Bifidobacterium","g__Bacteroides","g__Prevotella","g__Faecalibacterium","g__Ruminococcus","g__[Eubacterium]","g__Gemmiger","g__Roseburia")
fam_taxa <- c("f__Bifidobacteriaceae", "f__Bacteroidaceae","f__Prevotellaceae", "f__Ruminococcaceae", "f__Eubacteriaceae")
phy_taxa <- c("p__Firmicutes","p__Bacteroidetes","p__Actinobacteria")
tax_names <- c("Kingdom","Phylum","Class","Order","Family","Genus","Species")
#Get taxa of interest and add to metadata
genus_table <- otu_w_tax_genusrel[which(otu_w_tax_genusrel$Genus %in% gen_taxa), ]
#Get rid of g__Ruminococcus belonging to f__Lachnospiraceae
genus_table <- genus_table[-which(genus_table$Genus %in% c("g__Ruminococcus") & genus_table$Family %in% c("f__Lachnospiraceae")), ]
fam_table <- otu_w_tax_famrel[which(otu_w_tax_famrel$Family %in% fam_taxa), ]
phy_table <- otu_w_tax_phyrel[which(otu_w_tax_phyrel$Phylum %in% phy_taxa), ]
rownames(genus_table) <- genus_table$Genus
genus_table <- genus_table[ ,-which(names(genus_table) %in% tax_names)] #remove tax columns
genus_table <- t(genus_table)
rownames(fam_table) <- fam_table$Family
fam_table <- fam_table[ ,-which(names(fam_table) %in% tax_names)] #remove tax columns
fam_table <- t(fam_table)
rownames(phy_table) <- phy_table$Phylum
phy_table <- phy_table[ ,-which(names(phy_table) %in% tax_names)] #remove tax columns
phy_table <- t(phy_table)
metadata_w_tax <- cbind(metadata,genus_table,fam_table,phy_table)
#Remove the mock community
metadata_w_tax <- metadata_w_tax[-which(metadata_w_tax$X.SampleID %in% "MockComm"), ]
#Calculate Shannon diversity (use relative abundance) and Chao1 richness (use counts) and add to metadata
#use non-glommed data to calculate
library(vegan)
library(fossil)
otu_w_noglomtax_rel <- read.csv("/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/otu_w_noglomtax_rel.csv",sep=",",header = T)
otu_w_noglomtax_rel$X <- NULL
otu_w_noglomtax <- read.csv("/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/otu_w_noglomtax.csv",sep=",",header = T)
otu_w_noglomtax$X <- NULL
shannon <- diversity(t(otu_w_noglomtax_rel[,1:121]), index = "shannon")
metadata_w_tax$Shannon_div <- head(shannon,-1) #removes mock community sample
chao <- as.matrix(apply(otu_w_noglomtax[,1:121],2,chao1))#exclude columns with taxonomy
colnames(chao) <- "Chao1"
metadata_w_tax$Chao1 <- head(chao,-1) #removes mock community sample
#Violin plots
library(ggplot2)
library(reshape2)
meta_gen_melt <- metadata_w_tax[,c("X.SampleID","Trt3",gen_taxa)]
meta_gen_melt <- melt(meta_gen_melt)
ggplot(meta_gen_melt,aes(x=factor(Trt3,level=c("Pre_C","C","Pre_RS","RS")),y=value,fill=variable))+
geom_violin(postition=position_dodge(1)) +
stat_summary(fun.y=mean, geom="point",position=position_dodge(1)) +
labs(x="Treatment",y="Relative Abundance") +
theme(plot.title = element_text(size=25, hjust = 0.5),
axis.text=element_text(size=15), axis.title=element_text(size=20),
legend.position="none",panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text.x = element_text(colour="black"),axis.text.y = element_text(colour="black")) +
facet_wrap(~variable)
#only genera that showed up in DESeq2
deseq_genus <- c("g__Bifidobacterium","g__Bacteroides","g__Faecalibacterium","g__Ruminococcus","g__Gemmiger","g__Roseburia")
meta_dsqgen_melt <- metadata_w_tax[,c("X.SampleID","Trt3",deseq_genus)]
meta_dsqgen_melt <- melt(meta_dsqgen_melt)
ggplot(meta_dsqgen_melt,aes(x=factor(Trt3,level=c("Pre_C","C","Pre_RS","RS")),y=value,fill=variable))+
geom_violin(postition=position_dodge(1)) +
stat_summary(fun.y=mean, geom="point",position=position_dodge(1)) +
labs(x="Treatment",y="Relative Abundance") +
theme(plot.title = element_text(size=25, hjust = 0.5),
axis.text=element_text(size=15), axis.title=element_text(size=20),
legend.position="none",panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text.x = element_text(colour="black"),axis.text.y = element_text(colour="black")) +
facet_wrap(~variable)
meta_fam_melt <- metadata_w_tax[,c("X.SampleID","Trt3",fam_taxa)]
meta_fam_melt <- melt(meta_fam_melt)
ggplot(meta_fam_melt,aes(x=factor(Trt2,level=c("Pre_C","C","Pre_RS","RS")),y=value,fill=variable))+
geom_violin(postition=position_dodge(1)) +
stat_summary(fun.y=mean, geom="point",position=position_dodge(1)) +
labs(x="Treatment",y="Relative Abundance") +
theme(plot.title = element_text(size=25, hjust = 0.5),
axis.text=element_text(size=15), axis.title=element_text(size=20),
legend.position="none",panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text.x = element_text(colour="black"),axis.text.y = element_text(colour="black")) +
facet_wrap(~variable)
meta_phy_melt <- metadata_w_tax[,c("X.SampleID","Trt3",phy_taxa)]
meta_phy_melt <- melt(meta_phy_melt)
ggplot(meta_phy_melt,aes(x=factor(Trt3,level=c("Pre_C","C","Pre_RS","RS")),y=value,fill=variable))+
geom_violin(postition=position_dodge(1)) +
stat_summary(fun.y=mean, geom="point",position=position_dodge(1)) +
labs(x="Treatment",y="Relative Abundance") +
theme(plot.title = element_text(size=25, hjust = 0.5),
axis.text=element_text(size=15), axis.title=element_text(size=20),
legend.position="none",panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text.x = element_text(colour="black"),axis.text.y = element_text(colour="black")) +
facet_wrap(~variable)
meta_div_melt <- metadata_w_tax[,c("X.SampleID","Trt3",c("Shannon_div","Chao1"))]
meta_div_melt <- melt(meta_div_melt)
div.labs <- c("Shannon Diversity", "Chao1 Richness")
names(div.labs) <- c("Shannon_div", "Chao1")
pdf("/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/Figures/Violin_shannon_chao.pdf", width=24, height=10)
ggplot(meta_div_melt,aes(x=factor(Trt3,level=c("Pre_C","C","Pre_RS","RS")),y=value,fill=variable))+
geom_violin(postition=position_dodge(1)) +
stat_summary(fun.y=mean, geom="point",position=position_dodge(1)) +
labs(x="Treatment",y="") +
theme(plot.title = element_text(size=70, hjust = 0.5),
axis.text=element_text(size=50), axis.title=element_text(size=60),
legend.position="none",panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text.x = element_text(colour="black"),axis.text.y = element_text(colour="black"),
strip.text = element_text(size=50)) +
facet_wrap(~variable,scales = "free_y",labeller = labeller(variable=div.labs))
dev.off()
#P/B ratio calculation
#using log2 transformed values according to Roager et al (2014)
PB_ratio <- log(metadata_w_tax$g__Prevotella/metadata_w_tax$g__Bacteroides,2)
PB_ratio <- data.frame(PB_ratio=PB_ratio[!is.infinite(PB_ratio)&!is.na(PB_ratio)])
mean(PB_ratio$PB_ratio) #-2.620479
ggplot(PB_ratio, aes(x=PB_ratio)) +
geom_histogram(colour="black", fill="white",binwidth = 1.75) +
#geom_density(alpha=.2, fill="#FF6666") +
geom_vline(aes(xintercept=mean(PB_ratio)), color="blue", linetype="dashed", size=1) +
labs(title="P/B ratio histogram",x="P/B ratio", y = "Count") +
theme(plot.title = element_text(size=25, hjust = 0.5),
axis.text=element_text(size=15), axis.title=element_text(size=20),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text.x = element_text(colour="black"),axis.text.y = element_text(colour="black"))
PB_table <- cbind(X.SampleID=metadata_w_tax$X.SampleID,PB_ratio)
samps <- data.frame(PB_table[!is.infinite(PB_ratio) & !is.na(PB_ratio),])$X.SampleID
#Too many values are NA or infinite (only 16 numeric values), try at family level
metadata_w_tax$PB_ratio_fam <- log(metadata_w_tax$f__Prevotellaceae/metadata_w_tax$f__Bacteroidaceae,2)
#Values are the same, double checked otu tables and found that all Bacteroidaceae and Prevotellaceae were classified at genus level (no additional family level only classification)
ggplot(metadata_w_tax[metadata_w_tax$X.SampleID %in% samps,],aes(x=log(g__Bacteroides,2),y=log(g__Prevotella,2),color=SubID))+
geom_point(size=5,aes(shape=Trt)) +
labs(x="log(Bacteroides)",y="log(Prevotella)") +
theme(plot.title = element_text(size=25, hjust = 0.5),
axis.text=element_text(size=15), axis.title=element_text(size=20),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text.x = element_text(colour="black"),axis.text.y = element_text(colour="black"))
#F/B ratio
metadata_w_tax$FB_ratio <- metadata_w_tax$p__Firmicutes/metadata_w_tax$p__Bacteroidetes
ggplot(metadata_w_tax,aes(x=p__Bacteroidetes,y=p__Firmicutes))+
geom_point() +
labs(x="Bacteroidetes (%)",y="Firmicutes (%)") +
theme(plot.title = element_text(size=25, hjust = 0.5),
axis.text=element_text(size=15), axis.title=element_text(size=20),
legend.position="none",panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text.x = element_text(colour="black"),axis.text.y = element_text(colour="black"))
ggplot(metadata_w_tax,aes(x=Trt3,y=FB_ratio))+
geom_violin(postition=position_dodge(1),fill="#F1C40F") +
stat_summary(fun.y=mean, geom="point",position=position_dodge(1)) +
labs(x="Treatment",y="Firmicutes/Bacteroidetes\nRatio") +
theme(plot.title = element_text(size=25, hjust = 0.5),
axis.text=element_text(size=15), axis.title=element_text(size=20),
legend.position="none",panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text.x = element_text(colour="black"),axis.text.y = element_text(colour="black"))
#Do abundances of taxa correlate with relevant health outcomes?
library(Hmisc)
library(dplyr)
library(corrplot)
library(Deducer)
library(ggpubr)
library(grid)
library(gridExtra)
library(purrr)
#FB ratio correlation with BMI at baseline?
metadata_base <- metadata_w_tax[(metadata_w_tax$Trt=="base"),]
rcorr(metadata_base$FB_ratio,metadata_base$BMI,type="pearson")$P #0.8737361, no correlation
#Get all taxa (not just those from DESeq)
#Get last filled tax level and make row names
otu_w_tax_id <- read.csv("/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/otu_w_tax_genusrel_OTUID.csv",sep=",",header = T)
otu_w_tax_id$X <- NULL #remove X column
tax_names <- c("Kingdom","Phylum","Class","Order","Family","Genus","Species")
tax_level <- apply(otu_w_tax_id[,tax_names], 1, function(x) tail(na.omit(x), 1))
otu_w_tax_id <- cbind(otu_w_tax_id, LastTax = tax_level)
rownames(otu_w_tax_id) <- paste(otu_w_tax_id$LastTax,otu_w_tax_id$Row.names,sep="_")
otu_w_tax_id[,c(tax_names,"Row.names","LastTax","MockComm")] <- NULL #remove non-numeric columns and Mock Community sample
all_otu_w_tax_id <- t(otu_w_tax_id)
#instances of [ or - mess up the function so have to remove these
colnames(all_otu_w_tax_id) <- gsub("\\[|\\]",replacement="",colnames(all_otu_w_tax_id))
colnames(all_otu_w_tax_id) <- gsub("-",replacement="",colnames(all_otu_w_tax_id))
colnames(all_otu_w_tax_id) <- paste(colnames(all_otu_w_tax_id),"per",sep="_")
metadata_w_alltax <- cbind(head(metadata,-1),all_otu_w_tax_id)
#with absolute abundance
otu_w_abstax_id <- read.csv("/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/otu_w_tax_genus_OTUID.csv",sep=",",header = T)
otu_w_abstax_id$X <- NULL #remove X column
tax_names <- c("Kingdom","Phylum","Class","Order","Family","Genus","Species")
tax_level <- apply(otu_w_abstax_id[,tax_names], 1, function(x) tail(na.omit(x), 1))
otu_w_abstax_id <- cbind(otu_w_abstax_id, LastTax = tax_level)
rownames(otu_w_abstax_id) <- paste(otu_w_abstax_id$LastTax,otu_w_abstax_id$Row.names,sep="_")
otu_w_abstax_id[,c(tax_names,"Row.names","LastTax","MockComm")] <- NULL #remove non-numeric columns and Mock Community sample
all_otu_w_abstax_id <- t(otu_w_abstax_id)
#instances of [ or - mess up the function so have to remove these
colnames(all_otu_w_abstax_id) <- gsub("\\[|\\]",replacement="",colnames(all_otu_w_abstax_id))
colnames(all_otu_w_abstax_id) <- gsub("-",replacement="",colnames(all_otu_w_abstax_id))
metadata_w_alltax <- cbind(metadata_w_alltax,all_otu_w_abstax_id)
write.csv(metadata_w_alltax,"/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/metadata_w_alltax_absrel.csv")
#get top 20 most abundant OTUs
means <- colMeans(all_otu_w_tax_id,na.rm=T)
top20_otu_w_tax_id <- all_otu_w_tax_id[,order(means)]
top20_otu_w_tax_id <- subset(top20_otu_w_tax_id,select = -c(1:(ncol(top20_otu_w_tax_id)-20)))
top20_tax <- colnames(top20_otu_w_tax_id)
metadata_w_top20tax <- cbind(head(metadata,-1),top20_otu_w_tax_id)
write.csv(metadata_w_top20tax,"/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/metadata_w_top20tax.csv")
#subset to only baseline taxa and replicate each value
base_samps <- metadata[which(metadata_w_alltax$Trt=="base"),]$X.SampleID
otu_w_tax_base <- all_otu_w_tax_id[which(rownames(all_otu_w_tax_id) %in% base_samps),]
#Subset to only top 20 taxa
base_means <- colMeans(otu_w_tax_base,na.rm=T)
top20_otu_w_tax_base <- otu_w_tax_base[,order(base_means)]
top20_otu_w_tax_base <- subset(top20_otu_w_tax_base,select = -c(1:(ncol(top20_otu_w_tax_base)-20)))
#instances of [ or - mess up the function so have to remove these
colnames(top20_otu_w_tax_base) <- gsub("\\[|\\]",replacement="",colnames(top20_otu_w_tax_base))
colnames(top20_otu_w_tax_base) <- gsub("-",replacement="",colnames(top20_otu_w_tax_base))
top20_tax_base <- colnames(top20_otu_w_tax_base)
top20_otu_w_tax_repbase <- as.data.frame(apply(top20_otu_w_tax_base,2,rep,each=4))
metadata_w_top20tax_base <- cbind(head(metadata,-1),top20_otu_w_tax_repbase)
write.csv(metadata_w_top20tax_base,"/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/metadata_w_top20tax_base.csv")
#subset to Pre-RS taxa and replicate each value
preRS_samps <- metadata[which(metadata_w_alltax$Trt3=="Pre_RS"),]$X.SampleID
otu_w_tax_preRS <- all_otu_w_tax_id[which(rownames(all_otu_w_tax_id) %in% preRS_samps),]
#Subset to only top 20 taxa
preRS_means <- colMeans(otu_w_tax_preRS,na.rm=T)
top20_otu_w_tax_preRS <- otu_w_tax_preRS[,order(preRS_means)]
top20_otu_w_tax_preRS <- subset(top20_otu_w_tax_preRS,select = -c(1:(ncol(top20_otu_w_tax_preRS)-20)))
#instances of [ or - mess up the function so have to remove these
colnames(top20_otu_w_tax_preRS) <- gsub("\\[|\\]",replacement="",colnames(top20_otu_w_tax_preRS))
colnames(top20_otu_w_tax_preRS) <- gsub("-",replacement="",colnames(top20_otu_w_tax_preRS))
top20_tax_preRS <- colnames(top20_otu_w_tax_preRS)
top20_otu_w_tax_rep_preRS <- as.data.frame(apply(top20_otu_w_tax_preRS,2,rep,each=4))
metadata_w_top20tax_preRS <- cbind(head(metadata,-1),top20_otu_w_tax_rep_preRS)
write.csv(metadata_w_top20tax_preRS,"/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/metadata_w_top20tax_preRS.csv")
#Health outcomes: glucose and insulin (fasting and iAUC), HOMA-IR, stool pH, hydrogen and methane (AUC), SCFA, BA
out <- c("GluciAUC","InsiAUC","Gluc0","Ins0","HOMAIR","stoolPH","CH4AUC","H2AUC",
"total_SCFA", "acetate", "propionate", "butyrate", "acetate_per", "propionate_per", "butyrate_per",
"total_BA", "total_primary_BA", "total_secondary_BA", "total_conj_BA", "CA", "CDCA", "TCDCA", "GCA",
"GCDCA", "DCA", "LCA", "TDCA", "GDCA", "UDCA")
glyc <- c("GluciAUC","InsiAUC","Gluc0","Ins0","HOMAIR")
ferm <- c("stoolPH","CH4AUC","H2AUC")
scfa <- c("total_SCFA", "acetate", "propionate", "butyrate", "acetate_per", "propionate_per", "butyrate_per")
bile <- c("total_BA", "total_primary_BA", "total_secondary_BA", "total_conj_BA", "CA", "CDCA", "TCDCA", "GCA",
"GCDCA", "DCA", "LCA", "TDCA", "GDCA", "UDCA")
flattenCorrMatrix <- function(cormat, pmat) {
ut <- upper.tri(cormat)
data.frame(
row = rownames(cormat)[row(cormat)[ut]],
column = rownames(cormat)[col(cormat)[ut]],
cor =(cormat)[ut],
p = pmat[ut]
)
}
outlierreplacement <- function(dataframe){
dataframe %>%
map_if(is.numeric, ~ replace(.x, .x %in% boxplot.stats(.x)$out, NA)) %>%
bind_cols
}
correltest <- function(data,vars,taxon,method){
dat <- data[,c(vars,taxon)]
#dat <- data.matrix(dat)
#browser()
#dat <- outlierreplacement(dat) #replace outliers with NA
corrtable <- rcorr(as.matrix(dat),type=method)
rtable <- round(corrtable$r,2)
ptable <- round(corrtable$P,2)
corrtable <- flattenCorrMatrix(corrtable$r,corrtable$P)
corrtable <- corrtable %>% mutate_if(is.numeric, ~round(., 2))
output <- list("corrtable"=corrtable,"rtable"=rtable,"ptable"=ptable)
return(output)
}
corrscat <- function(data,vars,taxon,subset=NULL,method){
dat <- correltest(data,vars,taxon,method)
corr <- dat$corrtable
sig <- corr[corr$p<=0.05,]
sig <- sig[!is.na(sig$row),] #remove rows with NA
sig <- sig[(sig$row %in% vars) & (sig$column %in% taxon),] #get rows with outcomes and taxa
if(!is.null(subset)){
sig <- sig[(sig$row %in% subset),]
}
sig <- sig %>% mutate_if(is.factor, as.character)
sig <- sig[order(sig$row),]
x_labels <- sub("_[^_]+$", "", sig[,"column"])
data <- outlierreplacement(data)
plots <- list()
for(i in 1:nrow(sig)){
yvar <- sig[i,1]
xvar <- sig[i,2]
plots[[i]] <- ggscatter(data, x = xvar, y = yvar,
add = "reg.line", conf.int = TRUE,
cor.coef = TRUE, cor.method = method,
xlab = x_labels[i], ylab = yvar)
}
return(plots)
}
alltax_out <- correltest(metadata_w_alltax,out,all_tax,method="pearson")
alltax_out_corr <- alltax_out$corrtable
alltax_out_sig <- alltax_out_corr[alltax_out_corr$p<=0.05,]
alltax_out_sig <- alltax_out_sig[!is.na(alltax_out_sig$row),] #remove rows with NA
alltax_out_sig <- alltax_out_sig[(alltax_out_sig$row %in% out) & (alltax_out_sig$column %in% all_tax),] #get rows with outcomes and taxa
#Plots for glycemic response
alltax_glyc_plots <- corrscat(metadata_w_alltax,out,all_tax,subset=glyc,method="pearson")
grid.arrange(grobs=alltax_glyc_plots,ncol=4) #1000x700
#Plots for fermentation response
alltax_ferm_plots <- corrscat(metadata_w_alltax,out,all_tax,subset=ferm,method="pearson")
grid.arrange(grobs=alltax_ferm_plots,ncol=3) #1015x600
#Plots for SCFA
alltax_scfa_plots <- corrscat(metadata_w_alltax,out,all_tax,subset=scfa,method="pearson")
grid.arrange(grobs=alltax_scfa_plots,ncol=5)
#Plots for BA
alltax_bile_plots <- corrscat(metadata_w_alltax,out,all_tax,subset=bile,method="pearson")
grid.arrange(grobs=alltax_bile_plots,ncol=6)
#Separate into categories: primary, secondary, conjugated, and totals
p_BA <- c("CA","CDCA")
s_BA <- c("DCA","LCA")
c_BA <- c("TCDCA", "GCA", "GCDCA", "TDCA", "GDCA", "UDCA")
tot_BA <- c("total_BA","total_conj_BA","total_primary_BA","total_secondary_BA")
alltax_pBA_plots <- corrscat(metadata_w_alltax,out,all_tax,subset=p_BA,method="pearson")
grid.arrange(grobs=alltax_pBA_plots,ncol=3)
alltax_sBA_plots <- corrscat(metadata_w_alltax,out,all_tax,subset=s_BA,method="pearson")
grid.arrange(grobs=alltax_sBA_plots,ncol=2)
alltax_cBA_plots <- corrscat(metadata_w_alltax,out,all_tax,subset=c_BA,method="pearson")
grid.arrange(grobs=alltax_cBA_plots,ncol=6)
alltax_totBA_plots <- corrscat(metadata_w_alltax,out,all_tax,subset=tot_BA,method="pearson")
grid.arrange(grobs=alltax_totBA_plots,ncol=6)
#repeat for all baseline taxa and difference in outcomes
for(i in 1:length(out)){
diff_var <- paste("diff",out[i],sep="_")
metadata_C <-metadata_w_alltax_base[metadata_w_alltax_base$Trt=="C",]
metadata_RS <-metadata_w_alltax_base[metadata_w_alltax_base$Trt=="RS",]
diff_val <- rep(metadata_RS[,out[i]] - metadata_C[,out[i]],each=4)
metadata_w_alltax_base[,diff_var] <- diff_val
}
diff_var <- paste("diff",out,sep="_")
metadata_RS <- metadata_w_alltax_base[(metadata_w_alltax_base$Trt3=="RS"),] #Subset to only RS samples
#correlation
allbasetax_diffout <- correltest(metadata_RS,diff_var,all_tax_base,method="pearson")
allbasetax_diffout_corr <- allbasetax_diffout$corrtable
allbasetax_diffout_sig <- allbasetax_diffout_corr[allbasetax_diffout_corr$p<=0.05,]
allbasetax_diffout_sig <- allbasetax_diffout_sig[!is.na(allbasetax_diffout_sig$row),] #remove rows with NA
allbasetax_diffout_sig <- allbasetax_diffout_sig[(allbasetax_diffout_sig$row %in% diff_var) & (allbasetax_diffout_sig$column %in% all_tax_base),] #get rows with outcomes and taxa
allbasetax_diffout_plots <- corrscat(metadata_RS,diff_var,all_tax_base,method="pearson")
grid.arrange(grobs=allbasetax_diffout_plots,ncol=8)
#Get subsets of plots
diff_glyc <- unique (grep(paste(glyc,collapse="|"),
diff_var, value=TRUE))
diff_ferm <- unique (grep(paste(ferm,collapse="|"),
diff_var, value=TRUE))
diff_scfa <- unique (grep(paste(scfa,collapse="|"),
diff_var, value=TRUE))
diff_ba <- unique (grep(paste(bile,collapse="|"),
diff_var, value=TRUE))
#Glycemic outcomes
allbasetax_diffglyc_plots <- corrscat(metadata_RS,diff_var,all_tax_base,subset=diff_glyc,method="pearson")
grid.arrange(grobs=allbasetax_diffglyc_plots,ncol=2)
#Fermentation outcomes
allbasetax_diffferm_plots <- corrscat(metadata_RS,diff_var,all_tax_base,subset=diff_ferm,method="pearson")
grid.arrange(grobs=allbasetax_diffferm_plots,ncol=2)
#SCFA outcomes
allbasetax_diffscfa_plots <- corrscat(metadata_RS,diff_var,all_tax_base,subset=diff_scfa,method="pearson")
grid.arrange(grobs=allbasetax_diffscfa_plots,ncol=2)
#Bile acid outcomes
allbasetax_diffba_plots <- corrscat(metadata_RS,diff_var,all_tax_base,subset=diff_ba,method="pearson")
grid.arrange(grobs=allbasetax_diffba_plots,ncol=6)
#Subset by category
diff_pBA <- paste("diff",p_BA,sep="_")
diff_sBA <- paste("diff",s_BA,sep="_")
diff_cBA <- paste("diff",c_BA,sep="_")
diff_totBA <- paste("diff",tot_BA,sep="_")
allbasetax_diffpBA_plots <- corrscat(metadata_RS,diff_var,all_tax_base,subset=diff_pBA,method="pearson")
grid.arrange(grobs=allbasetax_diffpBA_plots,ncol=2)
allbasetax_diffsBA_plots <- corrscat(metadata_RS,diff_var,all_tax_base,subset=diff_sBA,method="pearson")
grid.arrange(grobs=allbasetax_diffsBA_plots,ncol=3)
allbasetax_diffcBA_plots <- corrscat(metadata_RS,diff_var,all_tax_base,subset=diff_cBA,method="pearson")
grid.arrange(grobs=allbasetax_diffcBA_plots,ncol=4)
allbasetax_difftotBA_plots <- corrscat(metadata_RS,diff_var,all_tax_base,subset=diff_totBA,method="pearson")
grid.arrange(grobs=allbasetax_difftotBA_plots,ncol=3)
#Repeat with top 20 and top 20 baseline correlation with fermentation and SCFAs
metadata_w_top20tax <- read.csv("/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/metadata_w_top20tax.csv",sep = ",",row.names = 1,stringsAsFactors = FALSE)
metadata_w_top20tax_base <- read.csv("/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/metadata_w_top20tax_base.csv",sep = ",",row.names = 1,stringsAsFactors = FALSE)
top20tax_fermscfa <- correltest(metadata_w_top20tax,c(ferm,scfa),top20_tax,method="pearson")
top20tax_fermscfa_corr <- top20tax_fermscfa$corrtable
top20tax_fermscfa_corr <- top20tax_fermscfa_corr[!is.na(top20tax_fermscfa_corr$row),] #remove rows with NA
top20tax_fermscfa_corr <- top20tax_fermscfa_corr[(top20tax_fermscfa_corr$row %in% c(ferm,scfa)) & (top20tax_fermscfa_corr$column %in% top20_tax),] #get rows with outcomes and taxa
write.csv(top20tax_fermscfa_corr,"/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/top20tax_fermscfa_corr.csv")
top20tax_fermscfa_sig <- top20tax_fermscfa_corr[top20tax_fermscfa_corr$p<=0.05,]
top20tax_base_fermscfa <- correltest(metadata_w_top20tax_base,c(ferm,scfa),top20_tax_base,method="pearson")
top20tax_base_fermscfa_corr <- top20tax_base_fermscfa$corrtable
top20tax_base_fermscfa_corr <- top20tax_base_fermscfa_corr[!is.na(top20tax_base_fermscfa_corr$row),] #remove rows with NA
top20tax_base_fermscfa_corr <- top20tax_base_fermscfa_corr[(top20tax_base_fermscfa_corr$row %in% c(ferm,scfa)) & (top20tax_base_fermscfa_corr$column %in% top20_tax_base),] #get rows with outcomes and taxa
write.csv(top20tax_base_fermscfa_corr,"/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/top20tax_base_fermscfa_corr.csv")
top20tax_base_fermscfa_sig <- top20tax_base_fermscfa_corr[top20tax_base_fermscfa_corr$p<=0.05,]
#Repeat with top 20 baseline and top 20 pre-RS correlation with fermentation and SCFAs after RS treatment only
metadata_w_top20tax_base_RS <- subset(metadata_w_top20tax_base,metadata_w_top20tax_base$Trt3=="RS")
top20tax_base_fermscfa_RS <- correltest(metadata_w_top20tax_base_RS,c(ferm,scfa),top20_tax_base,method="pearson")
top20tax_base_fermscfa_RS_corr <- top20tax_base_fermscfa_RS$corrtable
top20tax_base_fermscfa_RS_corr <- top20tax_base_fermscfa_RS_corr[!is.na(top20tax_base_fermscfa_RS_corr$row),] #remove rows with NA
top20tax_base_fermscfa_RS_corr <- top20tax_base_fermscfa_RS_corr[(top20tax_base_fermscfa_RS_corr$row %in% c(ferm,scfa)) & (top20tax_base_fermscfa_RS_corr$column %in% top20_tax_base),] #get rows with outcomes and taxa
write.csv(top20tax_base_fermscfa_RS_corr,"/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/top20tax_base_fermscfa_RS_corr.csv")
top20tax_base_fermscfa_RS_sig <- top20tax_base_fermscfa_RS_corr[top20tax_base_fermscfa_RS_corr$p<=0.05,]
metadata_w_top20tax_preRS_RS <- subset(metadata_w_top20tax_preRS,metadata_w_top20tax_preRS$Trt3=="RS")
top20tax_preRS_fermscfa_RS <- correltest(metadata_w_top20tax_preRS_RS,c(ferm,scfa),top20_tax_preRS,method="pearson")
top20tax_preRS_fermscfa_RS_corr <- top20tax_preRS_fermscfa_RS$corrtable
top20tax_preRS_fermscfa_RS_corr <- top20tax_preRS_fermscfa_RS_corr[!is.na(top20tax_preRS_fermscfa_RS_corr$row),] #remove rows with NA
top20tax_preRS_fermscfa_RS_corr <- top20tax_preRS_fermscfa_RS_corr[(top20tax_preRS_fermscfa_RS_corr$row %in% c(ferm,scfa)) & (top20tax_preRS_fermscfa_RS_corr$column %in% top20_tax_preRS),] #get rows with outcomes and taxa
write.csv(top20tax_preRS_fermscfa_RS_corr,"/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/top20tax_preRS_fermscfa_RS_corr.csv")
top20tax_preRS_fermscfa_RS_sig <- top20tax_preRS_fermscfa_RS_corr[top20tax_preRS_fermscfa_RS_corr$p<=0.05,]
#Create heatmaps for top 20 taxa correlations
top20tax_fermscfa_corr <- read.csv("/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/top20tax_fermscfa_corr.csv",sep = ",",row.names = 1,stringsAsFactors = FALSE)
top20_matrix <- matrix(data = top20tax_fermscfa_corr$cor, nrow = length(unique(top20tax_fermscfa_corr$row)), ncol = length(unique(top20tax_fermscfa_corr$column)))
rownames(top20_matrix) <- unique(top20tax_fermscfa_corr$row)
colnames(top20_matrix) <- unique(top20tax_fermscfa_corr$column)
colnames(top20_matrix) <- gsub("(_[^_]+)_.*", "\\1", colnames(top20_matrix))
top20tax_base_fermscfa_corr <- read.csv("/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/top20tax_base_fermscfa_corr.csv",sep = ",",row.names = 1,stringsAsFactors = FALSE)
top20_base_matrix <- matrix(data = top20tax_base_fermscfa_corr$cor, nrow = length(unique(top20tax_base_fermscfa_corr$row)), ncol = length(unique(top20tax_base_fermscfa_corr$column)))
rownames(top20_base_matrix) <- unique(top20tax_base_fermscfa_corr$row)
colnames(top20_base_matrix) <- unique(top20tax_base_fermscfa_corr$column)
colnames(top20_base_matrix) <- gsub("(_[^_]+)_.*", "\\1", colnames(top20_base_matrix))
top20tax_base_RS_fermscfa_corr <- read.csv("/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/top20tax_base_fermscfa_RS_corr.csv",sep = ",",row.names = 1,stringsAsFactors = FALSE)
top20_base_RS_matrix <- matrix(data = top20tax_base_RS_fermscfa_corr$cor, nrow = length(unique(top20tax_base_RS_fermscfa_corr$row)), ncol = length(unique(top20tax_base_RS_fermscfa_corr$column)))
rownames(top20_base_RS_matrix) <- unique(top20tax_base_RS_fermscfa_corr$row)
colnames(top20_base_RS_matrix) <- unique(top20tax_base_RS_fermscfa_corr$column)
colnames(top20_base_RS_matrix) <- gsub("(_[^_]+)_.*", "\\1", colnames(top20_base_RS_matrix))
top20tax_preRS_RS_fermscfa_corr <- read.csv("/Users/Riley/Box Sync/Riley's Documents/Keim Lab/Resistant Starch/RS Data/Microbiome/DADA2_350_unfiltered/top20tax_preRS_fermscfa_RS_corr.csv",sep = ",",row.names = 1,stringsAsFactors = FALSE)
top20_preRS_RS_matrix <- matrix(data = top20tax_preRS_RS_fermscfa_corr$cor, nrow = length(unique(top20tax_preRS_RS_fermscfa_corr$row)), ncol = length(unique(top20tax_preRS_RS_fermscfa_corr$column)))
rownames(top20_preRS_RS_matrix) <- unique(top20tax_preRS_RS_fermscfa_corr$row)
colnames(top20_preRS_RS_matrix) <- unique(top20tax_preRS_RS_fermscfa_corr$column)
colnames(top20_preRS_RS_matrix) <- gsub("(_[^_]+)_.*", "\\1", colnames(top20_preRS_RS_matrix))
library("RColorBrewer")
library("pheatmap")
col <- colorRampPalette(brewer.pal(9, "RdBu"))(256)
newnames_20 <- lapply(colnames(top20_matrix),function(x) bquote(italic(.(x)))) #make taxa names italic
top20_heatmap <- pheatmap(t(top20_matrix), cluster_rows = FALSE, cluster_cols = FALSE,
col = col, scale="none", margins=c(5,10),angle_col = "45",
labels_row = as.expression(newnames_20))
newnames_20base <- lapply(colnames(top20_base_matrix),function(x) bquote(italic(.(x))))
top20_base_heatmap <- pheatmap(t(top20_base_matrix), cluster_rows = FALSE, cluster_cols = FALSE,
col = col, scale="none", margins=c(5,10),angle_col = "45",
labels_row = as.expression(newnames_20base))
newnames_20baseRS <- lapply(colnames(top20_base_RS_matrix),function(x) bquote(italic(.(x))))
top20_base_RS_heatmap <- pheatmap(t(top20_base_RS_matrix), cluster_rows = FALSE, cluster_cols = FALSE,
col = col, scale="none", margins=c(5,10),angle_col = "45",
labels_row = as.expression(newnames_20baseRS))
newnames_20preRSRS <- lapply(colnames(top20_preRS_RS_matrix),function(x) bquote(italic(.(x))))
top20_preRS_RS_heatmap <- pheatmap(t(top20_preRS_RS_matrix), cluster_rows = FALSE, cluster_cols = FALSE,
col = col, scale="none", margins=c(5,10),angle_col = "45",
labels_row = as.expression(newnames_20preRSRS))
|
7e0f75ab3e0af0e9b3e3466016b36bdb0d79268c | 4c58470c06204ed5a4ec2130f3ab233b3e58f006 | /E-Enrichment/runCactus.R | d8d3f621fd440601ee45b11cef390d078618f532 | [] | no_license | kdauria/IandI | e05cec6d3b7480883cad66bc0399f40da54764a4 | 059d41bd60df1e250f6e052ec1cf00964c1bd779 | refs/heads/master | 2016-09-05T20:34:44.276807 | 2014-03-10T22:19:55 | 2014-03-10T22:19:55 | 17,610,748 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,436 | r | runCactus.R | # A script that runs cyberT for all the different two class comparisons
load("./data/RData/esets.RData")
load("./data/annotation.RData/MsAnnGO.RData")
load("./data/annotation.RData/MsAnn.RData")
load("./data/annotation.RData/GSAnn.RData")
source("./E-Enrichment/cactus.R")
# Arrays were preprocessed in groups designated by time point (e.g. 2h, 6h or 16h)
# Here combine them only for the sake of simpler code
eset = new("ExpressionSet")
e1 = esets[["2hr"]]
e2 = esets[["6hr"]]
e3 = esets[["16hr"]]
exprs(eset) = cbind(exprs(e1),exprs(e2),exprs(e3))
pData(eset) = rbind(pData(e1),pData(e2),pData(e3))
comparisons = list( c("A2","Sham2"), c("B2","Sham2"), c("AB2","Sham2"),
c("A6","Sham6"), c("B6","Sham6"),
c("A16","Sham16"), c("B16","Sham16") )
# Probe set to gene mapping
mapping = MsAnn$MGI$Affy
# Run camera on different groups of gene sets
result = list()
result$mf = cactus(eset, MsAnnGO$MF$MGI, comparisons, mapping )
result$bp = cactus(eset, MsAnnGO$BP$MGI, comparisons, mapping )
result$cc = cactus(eset, MsAnnGO$CC$MGI, comparisons, mapping )
result$go = cactus(eset, c(MsAnnGO$CC$MGI,MsAnnGO$BP$MGI,MsAnnGO$MF$MGI),
comparisons, mapping )
result$reactome = cactus(eset, GSAnn$REACTOME$HumanENTREZ, comparisons,
MsAnn$HumanENTREZ$Affy )
cactus.result = result
save(cactus.result, file="./data/RData/cactus.RData")
|
5f65a24871cf30a878b3ab8a798fb49dbd98aea5 | 98711956527e728a301fb6f66e00d199be877871 | /man/make_small_netsim.Rd | 3b7de709e46cf8623179fb7a208e1f2512481db6 | [] | no_license | EmilyPo/ddaf | 534d6201e094fecc9a86c6d9157c76b45951be07 | b766216e1bee7556c8e4fb627b97af29fecd9381 | refs/heads/master | 2023-02-11T01:17:58.104864 | 2021-01-08T17:00:58 | 2021-01-08T17:00:58 | 289,988,230 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 354 | rd | make_small_netsim.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_small_netsim.R
\name{make_small_netsim}
\alias{make_small_netsim}
\title{Netsim Size Reduction}
\usage{
make_small_netsim(x)
}
\arguments{
\item{x}{is the netsim output}
}
\description{
Take objects and remove the cel.complete and misc variables I don't currently need
}
|
53ce1b7ed6293ae858741223ebac395fc71cb7d6 | d5fce963d5b6daf690ef499760b3a1fd43a00f65 | /chap05/exam0512.R | 09e80f24482f704847501b6d397cc8587948e5d0 | [] | no_license | KSDeng/Mathematical-Modeling-with-R | b4ab63952e0fb16bf647d46a01b3a90c532de497 | 8803a60744d6fa07a77f749cb19da2699e0f631b | refs/heads/master | 2020-04-17T14:34:03.813817 | 2019-01-20T13:08:05 | 2019-01-20T13:08:05 | 166,661,782 | 0 | 0 | null | null | null | null | ISO-8859-7 | R | false | false | 730 | r | exam0512.R | E<-c( 0,1, 0,4, 0,6, 0,5, 0,2, 1,4, 2,3, 2,5,
2,6, 3,4, 3,7, 4,9, 4,6, 5,8, 5,11, 6,7,
6,10, 6,5, 7,9, 7,10, 7,8, 8,10, 8,15, 8,11,
9,12, 9,15, 9,10, 10,13, 10,11, 11,13, 11,15, 11,14,
12,15, 13,12, 13,15, 13,14, 14,15) + 1
g<-graph(E)
vertex.connectivity(g, source = 1, target = 16)
edge.connectivity(g, source = 1, target = 16)
vertex.connectivity(as.undirected(g))
edge.connectivity(as.undirected(g))
vertex.disjoint.paths(g, source=1, target=16)
edge.disjoint.paths(g, source=1, target=16)
## »ΝΌ
par(mai=c(0,0,0,0) )
V(g)$label<-c("D", 1:14, "Y")
plot(g, layout = layout.fruchterman.reingold)
plot(g, layout = layout.kamada.kawai)
|
2eaf1d34c45faef0af3e9ca0b38474f0df4431b1 | f7a830d854fe7fa46f328f5586292f1bff8c2d68 | /man/Npct.Rd | b7924a75c955087a4e6ebd36868d49a48d96d76f | [] | no_license | cran/cursory | ef81c7f1090296b565d0d1fb6bd81b42fe05bf5d | 3980e6182ab9da4670976f0e2eb596ce20aab521 | refs/heads/master | 2020-12-21T22:10:39.145401 | 2019-08-22T07:40:02 | 2019-08-22T07:40:02 | 236,578,830 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,515 | rd | Npct.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Npct.R
\docType{methods}
\name{Npct}
\alias{Npct}
\alias{Npct,logical,missing-method}
\alias{Npct,logical,logical-method}
\alias{Npct,integer,integer-method}
\alias{Npct,numeric,numeric-method}
\title{Combine count with a percent}
\usage{
Npct(x, n, ...)
\S4method{Npct}{logical,missing}(x, n, ...)
\S4method{Npct}{logical,logical}(x, n, ...)
\S4method{Npct}{integer,integer}(x, n, ...)
\S4method{Npct}{numeric,numeric}(x, n, ...)
}
\arguments{
\item{x}{count or object to count}
\item{n}{see methods.}
\item{...}{formatting arguments for formatting the percent. See format.percent.}
}
\value{
A character vector formatted with number and percent of successes.
}
\description{
Combine count with a percent
}
\section{Methods (by class)}{
\itemize{
\item \code{x = logical,n = missing}: Count and give percent of \code{TRUE} from a logical vector.
\item \code{x = logical,n = logical}: Count and percent of a logical filtered by a second logical.
\item \code{x = integer,n = integer}: Provided with count(s) of cases and total(s)
\item \code{x = numeric,n = numeric}: Provided the actual count and the percent.
}}
\examples{
# Vector of cases only.
Npct(c(TRUE, FALSE, TRUE))
# Cases with indices
Npct( c(TRUE,FALSE,TRUE,FALSE,TRUE), c(TRUE,TRUE,TRUE,FALSE,FALSE))
# Successes/Total
Npct(2L, 3L)
# Count + percent directly, count must be integerish.
Npct(2, 2/3)
}
|
ec0042da54684821085d15a8da051adb83813012 | ad94d4f6d78af4dfe28187ec5d2d6a30ce699d22 | /tests/testthat/test_followup_priorities.R | eee90297e21dbe1a65f23f94a54528e91a4e31bf | [
"MIT"
] | permissive | ffinger/followup | e6e4456cc719ce105eaae567583f9e6587217bd0 | 7081a6cf24574ea0fa485c35026281dbc1cc44da | refs/heads/master | 2021-06-19T09:20:45.419451 | 2021-01-27T14:56:59 | 2021-01-27T15:02:22 | 170,578,436 | 3 | 1 | NOASSERTION | 2019-03-01T17:37:18 | 2019-02-13T20:57:00 | R | UTF-8 | R | false | false | 5,252 | r | test_followup_priorities.R | context("Testing followup_priorities function")
myrank <- function(x) {
res <- NA
res[order(x, decreasing = TRUE)] <- seq_along(x)
return(res)
}
inc <- c(0.1, 0.15, 0.2, 0.25, 0.3)
date_analysis <- as.Date("2019-02-15")
contact_list <- data.frame(
date_last_followup = as.Date(c(NA, "2019-02-11", "2019-02-13", "2019-02-15"))
)
contact_list$dates_exposure <- list(
as.Date(c("2019-02-11", "2019-02-12", "2019-02-13")),
as.Date(c("2019-02-11", "2019-02-12", "2019-02-13")),
as.Date(c("2019-02-11", "2019-02-12", "2019-02-13")),
as.Date(c("2019-02-11", "2019-02-12", "2019-02-13"))
)
p_d <- 0.3
contact_list_2 <- contact_list
contact_list_2$p_disease <- c(0.2, 0.3, 0.1, 0.4)
expected_p_onset <- c(
(0.1/3 + (0.1+0.15)/3 + (0.1+0.15+0.2)/3 + (0.15+0.2+0.25)/3),
(0.1/3 + (0.1+0.15)/3 + (0.1+0.15+0.2)/3 + (0.15+0.2+0.25)/3),
((0.1+0.15+0.2)/3 + (0.15+0.2+0.25)/3) / (1 - (0.1/3 + (0.1+0.15)/3)),
0
)
expected_p_symptoms <- expected_p_onset * p_d
expected_p_symptoms_2 <- expected_p_onset * contact_list_2$p_disease
expected_p_onset_3 <- rep((0.1/3 + (0.1+0.15)/3 + (0.1+0.15+0.2)/3 + (0.15+0.2+0.25)/3),4)
expected_p_symptoms_3 <- expected_p_onset_3 * contact_list_2$p_disease
expected_p_onset_4 <- c(
(0.1/3 + (0.1+0.15)/3 + (0.1+0.15+0.2)/3 + (0.15+0.2+0.25)/3),
((0.1+0.15)/3 + (0.1+0.15+0.2)/3 + (0.15+0.2+0.25)/3) / (1 - 0.1/3),
((0.15+0.2+0.25)/3) / (1 - (0.1/3 + (0.1+0.15)/3 + (0.1+0.15+0.2)/3))
)
expected_p_symptoms_4 <- expected_p_onset_4 * contact_list_2$p_disease[1:3]
test_that("output is right for constant p_disease", {
fp <- followup_priorities(contact_list, dates_exposure, last_followup = date_last_followup, p_disease = p_d, incubation_period = inc, date_analysis = date_analysis, include_last_follow_up = TRUE, sort = FALSE)
expect_equal(fp$p_onset, expected_p_onset)
expect_equal(fp$p_symptoms, expected_p_symptoms)
expect_equal(fp$followup_priority, myrank(expected_p_symptoms))
})
test_that("output is right for varying p_disease", {
fp <- followup_priorities(contact_list_2, dates_exposure, last_followup = date_last_followup, p_disease = p_disease, incubation_period = inc, date_analysis = date_analysis, include_last_follow_up = TRUE, sort = FALSE)
expect_equal(fp$p_onset, expected_p_onset)
expect_equal(fp$p_symptoms, expected_p_symptoms_2)
expect_equal(fp$followup_priority, myrank(expected_p_symptoms_2))
})
test_that("output is right for varying p_disease with sort = TRUE", {
fp <- followup_priorities(contact_list_2, dates_exposure, last_followup = date_last_followup, p_disease = p_disease, incubation_period = inc, date_analysis = date_analysis, include_last_follow_up = TRUE, sort = TRUE)
expect_equal(fp$p_onset, expected_p_onset[order(expected_p_symptoms_2, decreasing = TRUE)])
expect_equal(fp$p_symptoms, expected_p_symptoms_2[order(expected_p_symptoms_2, decreasing = TRUE)])
expect_equal(fp$followup_priority, 1:4)
})
test_that("output is right for varying p_disease with date last follow up null", {
fp <- followup_priorities(contact_list_2, dates_exposure, last_followup = NULL, p_disease = p_disease, incubation_period = inc, date_analysis = date_analysis, include_last_follow_up = TRUE, sort = FALSE)
expect_equal(fp$p_onset, expected_p_onset_3)
expect_equal(fp$p_symptoms, expected_p_symptoms_3)
expect_equal(fp$followup_priority, myrank(expected_p_symptoms_3))
})
test_that("output is right for varying p_disease with date last follow up null and include_last_follow_up = FALSE", {
fp <- followup_priorities(contact_list_2[1:3,], dates_exposure, last_followup = date_last_followup, p_disease = p_disease, incubation_period = inc, date_analysis = date_analysis, include_last_follow_up = FALSE, sort = FALSE)
expect_equal(fp$p_onset, expected_p_onset_4)
expect_equal(fp$p_symptoms, expected_p_symptoms_4)
expect_equal(fp$followup_priority, myrank(expected_p_symptoms_4))
})
test_that("error if analysis date before first exposure date past", {
expect_error(
followup_priorities(contact_list_2, dates_exposure, last_followup = NULL, p_disease = p_disease, incubation_period = inc, date_analysis = as.Date("2019-01-01"), include_last_follow_up = TRUE, sort = FALSE),
"date_analysis before first exposure date."
)
})
test_that("no error if analysis date >= first exposure date and before all followup dates", {
expect_warning(
fp <- followup_priorities(contact_list_2, dates_exposure, last_followup = date_last_followup, p_disease = p_disease, incubation_period = inc, date_analysis = as.Date("2019-02-11"), include_last_follow_up = TRUE, sort = FALSE),
"Some followup dates are after the analysis date. Ignoring them."
)
expect_equal(fp$p_onset, rep(0,4))
expect_equal(fp$p_symptoms, rep(0,4))
})
test_that("output is right for varying p_disease and analysis date far in future", {
fp <- followup_priorities(contact_list_2, dates_exposure, last_followup = NULL, p_disease = p_disease, incubation_period = inc, date_analysis = as.Date("2019-04-01"), include_last_follow_up = TRUE, sort = FALSE)
expect_equal(fp$p_onset, rep(1,4))
expect_equal(fp$p_symptoms, c(0.2, 0.3, 0.1, 0.4))
})
#TODO
#testing argument types and presence
#include_last_follow_up = FALSE
|
718cd590a1c8a6fe285a3f50ac8e28e847ea5d24 | 7513d0e80bd56a2f39cc73ad8c079aa78e13ac16 | /man/runSelectedCellsWithReplication.Rd | a69880473dccef725c0ff540ef8e4627a04892ac | [
"MIT"
] | permissive | mihaiconstantin/simulation-response-shift | 4fdc445947c076e9de231953d142a0952a9e63b2 | b94582b4ce612153eb5abdcdee3a2edf89af79e6 | refs/heads/master | 2022-01-12T02:47:47.495414 | 2019-06-25T16:26:48 | 2019-06-25T16:26:48 | 91,359,532 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 719 | rd | runSelectedCellsWithReplication.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{runSelectedCellsWithReplication}
\alias{runSelectedCellsWithReplication}
\title{Applies and replicates the procedure for a selected number of cells}
\usage{
runSelectedCellsWithReplication(selected_cells, design_replications)
}
\arguments{
\item{selected_cells}{(numeric matrix) The selected cell configurations to be ran, where
the columns respect the order indicated above.}
\item{design_replications}{(int) The number of times the selected cells will be replicated.}
}
\description{
Very similar with the runSelectedCells() function, the only difference
is that it also replicate the selected cells \code{n} times.
}
|
faf1b2c688b7ae98c2f1e5ca651ee3843f265371 | f3e487f20747e2debe11b32559582b6af04a24be | /complete.R | 282f9f09dbb2f8c2acc37baafb6131f461ade7f1 | [] | no_license | pcooman/Coursera_RProgramming_Project1 | 0db1de1fb2da5e517174776f358c8427fc09c048 | dec075db5c062992d5430f18ba0489bfa677cb39 | refs/heads/master | 2021-01-19T07:38:41.338110 | 2015-06-09T22:57:14 | 2015-06-09T22:57:14 | 37,163,098 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,021 | r | complete.R | work_dir <- getwd()
data_dir <- paste(work_dir,"/specdata",sep="")
complete <- function(directory, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
## x <-data.frame()
index<-c()
nobs<-c()
for (i in id) {
file_name <- paste(formatC(i, width=3, flag="0"),".csv",sep="")
file_path <- paste(directory,"/",file_name,sep="")
data_raw <- read.csv(file_name)
index <- rbind(index,i)
cumSum <- 0
# check for any NA's
for (j in 1:nrow(data_raw)) {
cumSum = cumSum + !any(is.na(data_raw[j,]))
}
nobs <- rbind(nobs,cumSum)
}
x <- cbind(index,nobs)
colnames(x) <- c("id","nobs")
rownames(x) <- 1:length(id)
x <- as.data.frame(x)
} |
8a4672823ef78de257b1549c429c8bed348b57c4 | 5a5032cdcbfde767e7e351475de1d7a6f55b9519 | /server.R | b22b1e325e7bb6dbceb949b2f6e8eaffec58efee | [] | no_license | maryxhuang/Project_1_ShinyApp | 8041babd86f554f9a52f96f74eafc221974aa2b1 | f2c9baa4dc8528a66ab8ba3922d66995bc5c1368 | refs/heads/master | 2020-03-23T22:18:54.698219 | 2018-07-25T02:35:29 | 2018-07-25T02:35:29 | 142,168,003 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,121 | r | server.R | shinyServer(function(input, output){
# MenuItem: "Overview"
output$overall <- renderPlot({
ushr %>%
group_by(., Year) %>% summarise(., incidence = n()) %>%
ggplot(aes(x = Year)) +
geom_line(aes(y = incidence), color = "#9933FF", size = 1) + xlab('Year') + ylab('Incidences') +
ggtitle('Total Number of Incidents') + theme_bw() + scale_y_continuous(labels = scales::comma)
}, width = 500, height = 300)
output$state_top10 <- renderPlot({
ushr %>%
group_by(., State) %>% summarise(., incidence = n(), solved = sum(Crime.Solved=="Yes"), unsolved = sum(Crime.Solved=="No")) %>%
top_n(10) %>%
ggplot(aes(x = reorder(State, incidence))) +
geom_bar(aes(x = reorder(State, incidence), y = incidence), fill = "#00BFC4", color = "lightblue4", stat = "identity", alpha = 0.2) +
geom_bar(aes(x = reorder(State, incidence), y = unsolved), fill = "#F8766D", color = "red", stat = "identity", alpha = 0.6) +
ggtitle("Top 10 States: Total Incidences and Unsolved Cases") + coord_flip() +
xlab("Number of Incidence") + scale_y_continuous(labels = scales::comma, limits = c(0, 110000))
}, width = 500, height = 300)
map_yearly_info <- reactive({
ushr %>%
filter(., Year == input$year_selected) %>%
group_by(., State) %>% summarise(., Unsolved.Rate = sum(Crime.Solved=='No')/n())
})
output$map <- renderGvis({
gvisGeoChart(map_yearly_info(), "State", "Unsolved.Rate",
options = list(region = "US", displayMode = "regions", resolution = "provinces",
width = "auto", height = "auto", title = 'Yearly Unsolved Rate',
colorAxis = "{colors:['#99CCFF', '#FFFFCC', '#FF3333']}"))
})
unsolved_ratio <- ushr %>% summarise(., unsolved_rate = sum(Crime.Solved == 'No')/n())
output$avgBoxAall <- renderInfoBox({
infoBox("AVG. 1980-2014 Unsolved Rate", paste0(round(unsolved_ratio$unsolved_rate*100, digits = 2), "%"), icon = icon("calculator"))
})
output$maxBox <- renderInfoBox({
max_value <- max(map_yearly_info()$Unsolved.Rate)
max_state <- map_yearly_info()$State[map_yearly_info()$Unsolved.Rate == max_value]
infoBox(max_state, paste0(round(max_value*100, digits = 2), "%"), icon = icon("arrow-circle-up"))
})
output$minBox <- renderInfoBox({
min_value <- min(map_yearly_info()$Unsolved.Rate)
min_state <- map_yearly_info()$State[map_yearly_info()$Unsolved.Rate == min_value]
infoBox(min_state, paste0(round(min_value*100, digits = 2), "%"), icon = icon("arrow-circle-down"))
})
output$avgBox <- renderInfoBox({
mean_value <- mean(map_yearly_info()$Unsolved.Rate)
infoBox(paste0("AVG.", input$year_selected), paste0(round(mean_value*100, digits = 2), "%"), icon = icon("balance-scale"))
})
### MenuItem: "Details", details
## Age
age_per_data <- ushr %>%
filter(., Crime.Solved == "Yes") %>% select(., Perpetrator.Age) %>% filter(., Perpetrator.Age > 5) %>%
group_by(., Age = Perpetrator.Age) %>% summarise(., age_per_count = n()) %>% arrange(., Age)
age_vic_data <- ushr %>%
filter(., Crime.Solved == "Yes") %>% select(., Victim.Age) %>% filter(., Victim.Age < 99) %>%
group_by(., Age = Victim.Age) %>% summarise(., age_vic_count = n()) %>% arrange(., Age)
age_all_data <- full_join(age_per_data, age_vic_data, by='Age')
age_all_data <- na.omit(age_all_data)
output$age_all_line <- renderPlot({
ggplot(data = age_all_data, aes(x = Age)) + theme_bw() +
geom_line(aes(y = age_per_count, color = 'age_per_count'), size = 1) +
geom_line(aes(y = age_vic_count, color = 'age_vic_count'), size = 1) +
ggtitle('Age Distributions (all years)') + theme(legend.title = element_blank()) + xlab('Age') + ylab('Total Numbers') + scale_y_continuous(labels = scales::comma) +
scale_colour_manual("", values=c("#F8766D","#00BFC4"), breaks=c('age_per_count', 'age_vic_count'), labels=c('Perpetrator', 'Victim'))
}, width = 500, height = 300)
output$age_all_stat1 <- renderTable({
age_all_data %>%
summarise(., Mean.Per.Age = weighted.mean(Age, age_per_count),
Sd.Per.Age = sqrt(sum(age_per_count*(Age - weighted.mean(Age, age_per_count))^2)/sum(age_per_count)),
Mean.Vic.Age = weighted.mean(Age, age_vic_count),
Sd.Vic.Age = sqrt(sum(age_vic_count*(Age - weighted.mean(Age, age_vic_count))^2)/sum(age_vic_count)))
})
output$age_all_stat2 <- renderTable({
age_all_data %>%
summarise(., Percent.Per.under18 = 100*sum(subset(age_per_count, Age < 18))/sum(age_per_count),
Percent.Vic.under18 = 100*sum(subset(age_vic_count, Age < 18))/sum(age_vic_count))
})
solved_cases <- reactive({
ushr %>% filter(., Crime.Solved == "Yes")
})
age_year_info_per <- reactive({
solved_cases() %>%
filter(., Year == input$year_selected) %>% select(., Perpetrator.Age) %>% filter(., Perpetrator.Age > 5) %>%
group_by(., Age = Perpetrator.Age) %>% summarise(., age_per_count = n()) %>% arrange(., Age)
})
age_year_info_vic <- reactive({
solved_cases() %>%
filter(., Year == input$year_selected) %>% select(., Victim.Age) %>% filter(., Victim.Age < 99) %>%
group_by(., Age = Victim.Age) %>% summarise(., age_vic_count = n()) %>% arrange(., Age)
})
age_year_info <-reactive({
full_join(age_year_info_per(), age_year_info_vic(), by='Age') %>% na.omit()
})
output$age_spe_line <- renderPlot({
age_year_info() %>%
ggplot(aes(x = Age)) + theme_bw() +
geom_line(aes(y = age_per_count, color = 'age_per_count'), size = 1) +
geom_line(aes(y = age_vic_count, color = 'age_vic_count'), size = 1) +
ggtitle('Age Distributions') + theme(legend.title = element_blank()) +
xlab('Age') + ylab('Total Numbers') + scale_y_continuous(labels = scales::comma) +
scale_colour_manual("", values=c("#F8766D","#00BFC4"), breaks=c('age_per_count', 'age_vic_count'), labels=c('Perpetrator', 'Victim'))
}, width = 500, height = 300)
output$age_spe_stat1 <- renderTable({
age_year_info() %>%
summarise(., Mean.Per.Age = weighted.mean(Age, age_per_count),
Sd.Per.Age = sqrt(sum(age_per_count*(Age - weighted.mean(Age, age_per_count))^2)/sum(age_per_count)),
Mean.Vic.Age = weighted.mean(Age, age_vic_count),
Sd.Vic.Age = sqrt(sum(age_vic_count*(Age - weighted.mean(Age, age_vic_count))^2)/sum(age_vic_count)))
})
output$age_spe_stat2 <- renderTable({
age_year_info() %>%
summarise(., Percent.Per.under18 = 100*sum(subset(age_per_count, Age < 18))/sum(age_per_count),
Percent.Vic.under18 = 100*sum(subset(age_vic_count, Age < 18))/sum(age_vic_count))
})
## Sex
sex_all_info <- reactive({
solved_cases() %>%
select(., Perpetrator.Sex, Victim.Sex) %>% filter(., Perpetrator.Sex != "Unknown" & Victim.Sex != "Unknown") %>%
mutate(., Incident = ifelse(Perpetrator.Sex == "Male" & Victim.Sex == "Male", 'A. Male.killed.Male',
ifelse(Perpetrator.Sex == "Male" & Victim.Sex == "Female", 'B. Male.killed.Female',
ifelse(Perpetrator.Sex == "Female" & Victim.Sex == "Male", 'C. Female.killed.Male',
'D. Female.killed.Female')))) %>%
group_by(., Incident) %>% summarise(., Count = n()) %>%
mutate(., Percentage = Count/sum(Count)) %>% arrange(., desc(Percentage))
})
output$sex_all_pie <- renderPlot({
sex_all_info() %>%
ggplot(aes(x = 1, y = Percentage)) +
geom_bar(aes(fill = Incident), position = "fill", stat = "identity") +
coord_polar(theta = 'y') + ggtitle("Overall Gender Info of Perpetrators and Victims") +
geom_text(aes(label = round(Percentage*100, digits = 1))) + scale_fill_brewer(palette = 'Set2')
}, width = 500, height = 300)
output$sex_all_tab <- renderTable({
sex_all_info() %>% mutate(., Percentage = Percentage*100)
})
sex_spe_info <- reactive({
solved_cases() %>%
filter(., Year == input$year_selected) %>% select(., Perpetrator.Sex, Victim.Sex) %>%
filter(., Perpetrator.Sex != "Unknown" & Victim.Sex != "Unknown") %>%
mutate(., Incident = ifelse(Perpetrator.Sex == "Male" & Victim.Sex == "Male", 'A. Male.killed.Male',
ifelse(Perpetrator.Sex == "Male" & Victim.Sex == "Female", 'B. Male.killed.Female',
ifelse(Perpetrator.Sex == "Female" & Victim.Sex == "Male", 'C. Female.killed.Male',
'D. Female.killed.Female')))) %>%
group_by(., Incident) %>% summarise(., Count = n()) %>%
mutate(., Percentage = Count/sum(Count)) %>% arrange(., desc(Percentage))
})
output$sex_spe_pie <- renderPlot({
sex_spe_info() %>%
ggplot(aes(x = 1, y = Percentage)) +
geom_bar(aes(fill = Incident), position = 'fill', stat = "identity") +
coord_polar(theta = 'y') + ggtitle("Specific Year Gender Info of Perpetrators and Victims") +
geom_text(aes(label = round(Percentage*100, digits = 1))) + scale_fill_brewer(palette = 'Set2')
}, width = 500, height = 300)
output$sex_spe_tab <- renderTable({
sex_spe_info() %>% mutate(., Percentage = Percentage*100)
})
## Weapon
weapon_all_data <- reactive({
solved_cases() %>%
filter(., Weapon != "Unknown") %>% select(., Year, Perpetrator.Sex, Perpetrator.Age, Weapon) %>%
mutate(., Weapon.Type = ifelse(Weapon=="Fall", "G. Fall",
ifelse(Weapon=="Knife", "B. Knife",
ifelse(Weapon=="Blunt Object", "C. Blunt.Obj",
ifelse(Weapon=="Poison"|Weapon=="Drug", "F. Poison.Drug",
ifelse(Weapon=="Explosives"|Weapon=="Fire", "E. Explo.Fire",
ifelse(Weapon=="Strangulation"|Weapon=="Suffocation"|Weapon=="Drowning", "D. Suffocation", "A. Firearm")))))))
})
output$wp_all_fillperc <- renderPlot({
weapon_all_data() %>%
group_by(., Year, Weapon.Type) %>% summarise(., Weapon.Portion = n()) %>%
ggplot(aes(x = Year, y = Weapon.Portion, group = Weapon.Type, fill = Weapon.Type)) +
geom_area(position = 'fill') + ggtitle("Overall Weapon Use") + scale_fill_brewer(palette = 'Spectral')
}, width = 500)
weapon_spe_data <- reactive({
if (input$wp_age_selected == 1 & input$wp_sex_selected == 1) {
weapon_all_data() %>%
filter(., Perpetrator.Age >= 18 & Perpetrator.Sex == "Male" )
} else if (input$wp_age_selected == 1 & input$wp_sex_selected == 2) {
weapon_all_data() %>%
filter(., Perpetrator.Age >= 18 & Perpetrator.Sex == "Female")
} else if (input$wp_age_selected == 2 & input$wp_sex_selected == 1) {
weapon_all_data() %>%
filter(., Perpetrator.Age < 18 & Perpetrator.Sex == "Male")
} else {
weapon_all_data() %>%
filter(., Perpetrator.Age < 18 & Perpetrator.Sex == "Female")
}
})
output$wp_spe_fillperc <- renderPlot({
weapon_spe_data() %>%
group_by(., Year, Weapon.Type) %>% summarise(., Weapon.Portion = n()) %>%
ggplot(aes(x = Year, y = Weapon.Portion, group = Weapon.Type, fill = Weapon.Type)) +
geom_area(position = 'fill') + ggtitle("Weapon Use of Specific Age and Sex Groups") +
scale_fill_brewer(palette = 'Spectral')
}, width = 500)
# MenuItem: "Relationships", relationships
overall_relation_data <- ushr %>%
filter(., Crime.Solved == "Yes" &Perpetrator.Sex != "Unknown" & Victim.Sex != "Unknown") %>%
select(., Perpetrator.Sex, Victim.Sex, Relationship) %>%
mutate(., in_short = paste0(substr(Perpetrator.Sex, 1, 1), ".k.", substr(Victim.Sex, 1, 1), ".", Relationship))
overall_relation_data$in_short[overall_relation_data$in_short ==
"M.k.M.Boyfriend/Girlfriend"] <- "M.k.M.Boyfriend"
overall_relation_data$in_short[overall_relation_data$in_short ==
"F.k.F.Boyfriend/Girlfriend"] <- "F.k.F.Girlfriend"
overall_relation_data$in_short[overall_relation_data$in_short ==
"F.k.M.Common-Law Husband"] <- "F.k.M.Husband"
overall_relation_data$in_short[overall_relation_data$in_short ==
"M.k.F.Common-Law Wife"] <- "M.k.F.Wife"
relation_drop_edit <- overall_relation_data %>%
filter(., in_short != "M.k.F.Ex-Husband" & in_short != "F.k.F.Father" & in_short != "M.k.M.Ex-Husband" & in_short != "F.k.M.Girlfriend" &
in_short != "M.k.M.Stepmother" & in_short != "F.k.F.Husband" & in_short != "M.k.M.Stepdaughter" & in_short != "F.k.M.Mother" &
in_short != "M.k.M.Common-Law Wife" & in_short != "M.k.F.Stepfather" & in_short != "F.k.F.Wife" & in_short != "M.k.M.Common-Law Husband" &
in_short != "F.k.M.Wife" & in_short != "M.k.F.Boyfriend" & in_short != "M.k.F.Stepson" & in_short != "M.k.F.Husband" &
in_short != "M.k.M.Sister" & in_short != "M.k.M.Husband" & in_short != "M.k.M.Ex-Wife" & in_short != "M.k.F.Brother" &
in_short != "F.k.F.Son" & in_short != "M.k.M.Daughter" & in_short != "M.k.F.Father" & in_short != "M.k.M.Mother" &
in_short != "M.k.M.Girlfriend" & in_short != "M.k.F.Son" & in_short != "M.k.M.Wife") %>%
group_by(., in_short) %>% summarise(., tot = n()) %>% filter(., tot >= 15) %>%
mutate(., ps = substr(in_short, 1, 1), vs = substr(in_short, 5, 5),
Victim = substr(in_short, 7, nchar(in_short))) %>%
mutate(., Type = ifelse(Victim == "Stranger" | Victim == "Unknown", "Stranger",
ifelse(Victim == "Employee" | Victim == "Employer", "Work",
ifelse(Victim == "Acquaintance" | Victim == "Neighbor", "Acquaintance",
ifelse(Victim == "Friend", "Friend",
ifelse(Victim == "Boyfriend" | Victim == "Girlfriend", "Close", "Family")))))) %>%
mutate(., Perpetrator = ifelse(Type == "Stranger", "Stranger",
ifelse(Victim == "Acquaintance", "Acquaintance",
ifelse(Victim == "Neighbor", "Neighbor",
ifelse(Victim == "Employee", "Employer",
ifelse(Victim == "Employer", "Employee",
ifelse(Victim == "Friend", "Friend",
ifelse(Victim == "Family", "Family",
ifelse(Victim == "In-Law", "In-Law", "placeholder")))))))))
relation_drop_edit$Victim[relation_drop_edit$Victim ==
"Unknown"] <- "Stranger"
relation_drop_edit$Perpetrator[relation_drop_edit$Type == "Close" &
relation_drop_edit$ps == "M"] <- "Boyfriend"
relation_drop_edit$Perpetrator[relation_drop_edit$Type == "Close" &
relation_drop_edit$ps == "F"] <- "Girlfriend"
relation_drop_edit$Perpetrator[relation_drop_edit$Victim == "Wife"] <- "Husband"
relation_drop_edit$Perpetrator[relation_drop_edit$Victim == "Husband"] <- "Wife"
relation_drop_edit$Perpetrator[relation_drop_edit$Victim == "Ex-Wife"] <- "Ex-Husband"
relation_drop_edit$Perpetrator[relation_drop_edit$Victim == "Ex-Husband"] <- "Ex-Wife"
relation_drop_edit$Perpetrator[(relation_drop_edit$Victim == "Son" |
relation_drop_edit$Victim == "Daughter") & relation_drop_edit$ps == "M"] <- "Father"
relation_drop_edit$Perpetrator[(relation_drop_edit$Victim == "Son" |
relation_drop_edit$Victim == "Daughter") & relation_drop_edit$ps == "F"] <- "Mother"
relation_drop_edit$Perpetrator[(relation_drop_edit$Victim == "Stepson" |
relation_drop_edit$Victim == "Stepdaughter") & relation_drop_edit$ps == "M"] <- "Stepfather"
relation_drop_edit$Perpetrator[(relation_drop_edit$Victim == "Stepson" |
relation_drop_edit$Victim == "Stepdaughter") & relation_drop_edit$ps == "F"] <- "Stepmother"
relation_drop_edit$Perpetrator[(relation_drop_edit$Victim == "Father" |
relation_drop_edit$Victim == "Mother") & relation_drop_edit$ps == "M"] <- "Son"
relation_drop_edit$Perpetrator[(relation_drop_edit$Victim == "Father" |
relation_drop_edit$Victim == "Mother") & relation_drop_edit$ps == "F"] <- "Daughter"
relation_drop_edit$Perpetrator[(relation_drop_edit$Victim == "Stepfather" |
relation_drop_edit$Victim == "Stepmother") & relation_drop_edit$ps == "M"] <- "Stepson"
relation_drop_edit$Perpetrator[(relation_drop_edit$Victim == "Stepfather" |
relation_drop_edit$Victim == "Stepmother") & relation_drop_edit$ps == "F"] <- "Stepdaughter"
relation_drop_edit$Perpetrator[(relation_drop_edit$Victim == "Sister" |
relation_drop_edit$Victim == "Brother") & relation_drop_edit$ps == "M"] <- "Brother"
relation_drop_edit$Perpetrator[(relation_drop_edit$Victim == "Sister" |
relation_drop_edit$Victim == "Brother") & relation_drop_edit$ps == "F"] <- "Sister"
relation_optimize <- reactive({
relation_drop_edit %>%
mutate(., Detail = ifelse(Type == "Family" | Type == "Close", paste0(Perpetrator, ".k.", Victim),
paste0(ps, ".", Perpetrator, ".k.", vs, ".", Victim))) %>%
group_by(., Type, Detail) %>% summarise(., Total = sum(tot))
})
output$rl_all_pie <- renderPlot({
relation_optimize() %>%
group_by(Type) %>% summarise(Total = sum(Total)) %>% arrange(desc(Total)) %>%
mutate(Percentage = Total/sum(Total)) %>%
mutate(Relationship = c("A. Stranger", "B. Acquaintance", "C. Family", "D. Close", "E. Friend", "F. Work")) %>%
ggplot(aes(x = 1, y = Percentage)) +
geom_bar(aes(fill = Relationship), position = 'fill', stat = "identity") +
coord_polar(theta = 'y') + ggtitle('Types of Relationships between Perpetrators and Victims') +
scale_fill_brewer(palette = 'Accent')
})
rl_spe_select <- reactive({
if (input$rl_selected == 1) {
relation_optimize() %>%
filter(., Type == "Family")
} else if (input$rl_selected == 2) {
relation_optimize() %>%
filter(., Type == "Close")
} else if (input$rl_selected == 3) {
relation_optimize() %>%
filter(., Type == "Friend")
} else if (input$rl_selected == 4) {
relation_optimize() %>%
filter(., Type == "Work")
} else if (input$rl_selected == 5) {
relation_optimize() %>%
filter(., Type == "Acquaintance")
} else {
relation_optimize() %>%
filter(., Type == "Stranger")
}
})
rl_spe_info <- reactive({
rl_spe_select() %>% group_by(., Detail) %>% summarise(., Total = sum(Total))
})
output$rl_spe_bar <- renderPlot({
rl_spe_info() %>% top_n(12) %>%
ggplot(aes(x = reorder(Detail, Total), y = Total)) + xlab('Total Number of Incidence') +
geom_bar(aes(fill = Detail), stat = "identity", color = "#666666") + coord_flip() +
scale_fill_brewer(palette = 'Set3') + ggtitle('Details of Relationship Type') +
scale_y_continuous(labels = scales::comma)
})
output$rl_spe_tab <- renderTable({
rl_spe_info() %>% arrange(., desc(Total))
})
}) |
95ac6a736b2cec8edf33b7c17a35ade919e44b27 | 29585dff702209dd446c0ab52ceea046c58e384e | /Rz/R/RzPlotElementLine.R | f29b1744d964ae4fe746d99a7575dd949aec603f | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,715 | r | RzPlotElementLine.R | rzplot.element.line <-
setRefClass("RzPlotElementLine",
fields = c("main", "name", "parent", "colour", "size", "size.rel", "linetype", "lineend", "blank", "script",
"color.widget", "size.button", "size.rel.button", "linetype.combo", "lineend.combo",
"inherit.from", "colour.inherit", "size.inherit",
"linetype.inherit", "lineend.inherit",
"inherit.colour.button", "inherit.size.button", "inherit.linetype.button",
"inherit.lineend.button", "button.blank", "linetypes", "lineends"),
methods = list(
initialize = function(...) {
initFields(...)
colour.inherit <<- FALSE
size.inherit <<- FALSE
linetype.inherit <<- FALSE
lineend.inherit <<- FALSE
if (class(colour) == "uninitializedField") {
colour <<- "#000000"
colour.inherit <<- TRUE
}
if (class(size) == "uninitializedField") {
size <<- 1
size.rel <<- FALSE
size.inherit <<- TRUE
}
if (class(linetype) == "uninitializedField"){
linetype <<- "solid"
linetype.inherit <<- TRUE
}
if (class(lineend) == "uninitializedField") {
lineend <<- "round"
lineend.inherit <<- TRUE
}
if (class(blank) == "uninitializedField") {
blank <<- FALSE
}
linetypes <<- c("blank", "solid", "dashed", "dotted",
"dotdash", "longdash", "twodash")
lineends <<- c("round", "butt", "square")
color.widget <<- gtkColorSelectionWidgetNew(spacing=2, parent=parent)
color.widget$setColor(colour)
size.adj <- gtkAdjustmentNew(size, 0, 99, 0.1)
size.button <<- gtkSpinButtonNew(size.adj, climb.rate=0.1, digits=1)
size.button$setValue(size)
size.rel.button <<- gtkCheckButtonNewWithLabel("rel")
size.rel.button$setActive(size.rel)
size.rel.button["tooltip-text"] <<- gettext("Relative sizing")
size.hbox <- gtkHBoxNew(spacing=2)
size.hbox$packStart(size.button)
size.hbox$packStart(size.rel.button, expand=FALSE)
linetype.combo <<- .self$buildCombo(linetypes, which(linetypes == linetype) - 1)
lineend.combo <<- .self$buildCombo(lineends , which(lineends == lineend ) - 1)
table <- gtkTableNew()
table$setBorderWidth(5)
table$attach(gtkLabelNew(gettext("Colour")) , 0, 1, 0, 1, "shrink", "shrink")
table$attach(color.widget , 1, 2, 0, 1, 5 , "shrink")
table$attach(gtkLabelNew(gettext("Size")) , 0, 1, 1, 2, "shrink", "shrink")
table$attach(size.hbox , 1, 2, 1, 2, 5 , "shrink")
table$attach(gtkLabelNew(gettext("Line\nType")) , 0, 1, 2, 3, "shrink", "shrink")
table$attach(linetype.combo , 1, 2, 2, 3, 5 , "shrink")
table$attach(gtkLabelNew(gettext("Line\nEnd")) , 0, 1, 3, 4, "shrink", "shrink")
table$attach(lineend.combo , 1, 2, 3, 4, 5 , "shrink")
table$setRowSpacings(5)
table$setColSpacings(2)
button.blank <<- gtkToggleButtonNewWithLabel(name)
button.blank["tooltip-markup"] <<- gettext("<span font_style='italic' font_weight='bold'>theme_blank()</span> when turn <span font_style='italic' font_weight='bold'>OFF</span> the button")
button.blank$setActive(!blank)
gSignalConnect(button.blank, "toggled", function(...){
blank <<- ! button.blank$getActive()
if(blank) {
table$setSensitive(FALSE)
} else {
table$setSensitive(TRUE)
}
.self$generateScript()
})
hbox.blank <- gtkHBoxNew(spacing=2)
hbox.blank$packStart(button.blank, expand=FALSE)
main <<- gtkFrameNew()
main$setLabelWidget(hbox.blank)
main$setShadowType(GtkShadowType["etched-in"])
main$add(table)
gSignalConnect(color.widget$getEntry(), "changed", function(obj){
colour <<- obj$getText()
.self$generateScript()
})
gSignalConnect(size.button, "value-changed", function(obj){
size <<- obj$getValue()
.self$generateScript()
})
gSignalConnect(size.rel.button, "toggled", function(obj){
size.rel <<- obj$getActive()
.self$generateScript()
})
gSignalConnect(linetype.combo, "changed", function(obj){
linetype <<- obj$getActiveText()
.self$generateScript()
})
gSignalConnect(lineend.combo , "changed", function(obj){
lineend <<- obj$getActiveText()
.self$generateScript()
})
tooltip.text <- character()
if (is.null(inherit.from)) {
tooltip.text <- gettext("Inherit from the current theme")
} else {
tooltip.text <- gettextf("Inherit from <span font_style='italic' font_weight='bold' font_size='large'>%s</span>", inherit.from)
}
inherit.colour.button <<- gtkToggleButtonNew()
inherit.size.button <<- gtkToggleButtonNew()
inherit.linetype.button <<- gtkToggleButtonNew()
inherit.lineend.button <<- gtkToggleButtonNew()
inherit.colour.button$setImage(gtkImageNewFromStock(GTK_STOCK_GO_DOWN, GtkIconSize["button"]))
inherit.size.button$setImage(gtkImageNewFromStock(GTK_STOCK_GO_DOWN, GtkIconSize["button"]))
inherit.linetype.button$setImage(gtkImageNewFromStock(GTK_STOCK_GO_DOWN, GtkIconSize["button"]))
inherit.lineend.button$setImage(gtkImageNewFromStock(GTK_STOCK_GO_DOWN, GtkIconSize["button"]))
inherit.colour.button["tooltip-markup"] <<- tooltip.text
inherit.size.button["tooltip-markup"] <<- tooltip.text
inherit.linetype.button["tooltip-markup"] <<- tooltip.text
inherit.lineend.button["tooltip-markup"] <<- tooltip.text
table$attach(inherit.colour.button , 2, 3, 0, 1, "shrink", "shrink")
table$attach(inherit.size.button , 2, 3, 1, 2, "shrink", "shrink")
table$attach(inherit.linetype.button, 2, 3, 2, 3, "shrink", "shrink")
table$attach(inherit.lineend.button , 2, 3, 3, 4, "shrink", "shrink")
gSignalConnect(inherit.colour.button, "toggled", function(obj){
colour.inherit <<- obj$getActive()
color.widget$setSensitive( !colour.inherit )
.self$generateScript()
})
gSignalConnect(inherit.size.button, "toggled", function(obj){
size.inherit <<- obj$getActive()
size.hbox$setSensitive( !size.inherit )
.self$generateScript()
})
gSignalConnect(inherit.linetype.button, "toggled", function(obj){
linetype.inherit <<- obj$getActive()
linetype.combo$setSensitive( !linetype.inherit )
.self$generateScript()
})
gSignalConnect(inherit.lineend.button, "toggled", function(obj){
lineend.inherit <<- obj$getActive()
lineend.combo$setSensitive( !lineend.inherit )
.self$generateScript()
})
inherit.colour.button$setActive(colour.inherit)
inherit.size.button$setActive(size.inherit)
inherit.linetype.button$setActive(linetype.inherit)
inherit.lineend.button$setActive(lineend.inherit)
.self$generateScript()
},
buildCombo = function(contents, active=NULL){
combo <- gtkComboBoxNewText()
for(i in contents) combo$appendText(i)
if (!is.null(active)) combo$setActive(active)
return(combo)
},
generateScript = function(){
if (colour == "NA") {
colour.script <- sprintf("colour = %s", if (colour.inherit) NULL else colour)
} else {
colour.script <- sprintf("colour = \"%s\"", if (colour.inherit) NULL else colour)
}
if (size.rel) {
size.script <- sprintf("size = rel(%s)", if (size.inherit) NULL else size)
} else {
size.script <- sprintf("size = %s", if (size.inherit) NULL else size)
}
linetype.script <- sprintf("linetype = \"%s\"", if (linetype.inherit) NULL else linetype)
lineend.script <- sprintf("lineend = \"%s\"" , if (lineend.inherit ) NULL else lineend )
script <<- paste(c(colour.script, size.script, linetype.script, lineend.script), collapse=", ")
if (blank) {
script <<- sprintf("%s = element_blank()", name)
} else if(nzchar(script)) {
script <<- sprintf("%s = element_line(%s)", name, script)
} else {
script <<- NULL
}
},
setValue = function(value){
if ("element_blank" %in% class(value)) {
button.blank$setActive(FALSE)
} else {
button.blank$setActive(TRUE)
if (is.null(value$colour)) {
inherit.colour.button$setActive(TRUE)
} else {
color.widget$setColor(value$colour)
inherit.colour.button$setActive(FALSE)
}
if (is.null(value$size)) {
inherit.size.button$setActive(TRUE)
} else {
size.button$setValue(value$size)
if (class(value$size) == "rel") {
size.rel.button$setActive(TRUE)
} else {
size.rel.button$setActive(FALSE)
}
inherit.size.button$setActive(FALSE)
}
if (is.null(value$linetype)) {
inherit.linetype.button$setActive(TRUE)
} else {
if (is.numeric(value$linetype)) {
linetype.combo$setActive(value$linetype)
} else {
linetype.combo$setActive( which(linetypes == value$linetype) - 1)
}
inherit.linetype.button$setActive(FALSE)
}
if (is.null(value$lineend)) {
inherit.lineend.button$setActive(TRUE)
} else {
lineend.combo$setActive( which(lineends == value$lineend) - 1)
inherit.lineend.button$setActive(FALSE)
}
}
},
reset = function(){
colour.inherit <<- TRUE
size.inherit <<- TRUE
linetype.inherit <<- TRUE
lineend.inherit <<- TRUE
inherit.colour.button$setActive(colour.inherit)
inherit.size.button$setActive(size.inherit)
inherit.linetype.button$setActive(linetype.inherit)
inherit.lineend.button$setActive(lineend.inherit)
button.blank$setActive(TRUE)
}
)
)
rzplot.element.line$accessors(c("main", "colour", "size", "linetype", "lineend", "script"))
|
078e40213fa9876b532e7c4fd362d29600f0bf84 | c2e440d65f3101c0e64fccd7df3bb63ce427f7b6 | /6. Predicting Sentiments.R | 0e2716acf81d4dd5ff7ae42bcf83283f8f54f33a | [] | no_license | AlicanTanacan/Big-Data-Sentiment-Analysis | fb44e39ad63a1bd6a31ae02f1215b11b3f2ba333 | 8d8dda7965db9c8e92619e77b9880cbb55f3d8ad | refs/heads/master | 2020-05-18T06:25:20.512563 | 2019-05-27T10:21:49 | 2019-05-27T10:21:49 | 184,233,884 | 3 | 0 | null | null | null | null | IBM852 | R | false | false | 2,976 | r | 6. Predicting Sentiments.R | ### ------- Ubiqum Module 4: Big Data ------- ###
### ---------- Sentiment Analysis ----------- ###
### ------------ Alican Tanašan ------------- ###
### - Version 6: Prediction on Large Matrix - ###
### ---- Libraries & Source ----
if(require("pacman") == "FALSE"){
install.packages("pacman")
}
p_load(dplyr, ggplot2, plotly, caret, corrplot, GGally,
doParallel, tidyverse, e1071, randomForest, caTools,
plyr, ROSE, kknn)
## Take models from previous versions with source
source(file = "D:/RStudio/R Studio Working Directory/M4-BigData/Sentiment Analysis v5.R")
### ---- Import Large Matrix ----
largematrix <- read.csv("LargeMatrix.csv")
iphone.small.matrix <- read.csv("iphone_smallmatrix_labeled_8d.csv")
galaxy.small.matrix <- read.csv("galaxy_smallmatrix_labeled_9d.csv")
### ---- Preprocessing ----
## Large Matrix
largematrix %>%
filter(iphone > 0) %>%
select(starts_with("iphone"),
-iphone) -> iphone.large.matrix
largematrix %>%
filter(samsunggalaxy > 0) %>%
select(starts_with("samsung"),
starts_with("galaxy"),
-samsunggalaxy) -> galaxy.large.matrix
## Recode sentiment to combine factor levels
iphone.small.matrix$iphonesentiment <- recode(iphone.small.matrix$iphonesentiment,
"0" = "N",
"1" = "N",
"2" = "N",
"3" = "P",
"4" = "P",
"5" = "P")
galaxy.small.matrix$galaxysentiment <- recode(galaxy.small.matrix$galaxysentiment,
"0" = "N",
"1" = "N",
"2" = "N",
"3" = "P",
"4" = "P",
"5" = "P")
iphone.small.matrix %>%
filter(iphone > 0) %>%
select(starts_with("iphone"),
-iphone) -> iphone.small.matrix
galaxy.small.matrix %>%
filter(samsunggalaxy > 0) %>%
select(starts_with("samsung"),
starts_with("galaxy"),
-samsunggalaxy) -> galaxy.small.matrix
### ---- Predictions ----
## Best performing models for both data is random forest
# iphone predictions
iphone.predictions <- predict(RFmodel1, newdata = iphone.large.matrix)
summary(iphone.predictions)
# probability comparison
prop.table(table(iphone.predictions))
prop.table(table(iphone.small.matrix$iphonesentiment))
# galaxy predictions
galaxy.predictions <- predict(RFmodel2, newdata = galaxy.large.matrix)
summary(galaxy.predictions)
# probability comparison
prop.table(table(galaxy.predictions))
prop.table(table(galaxy.small.matrix$galaxysentiment))
|
2efcd545fce13879302c71714b7efbfc5a16e5e6 | e573bc7fd968068a52a5144a3854d184bbe4cda8 | /Recommended/boot/man/smooth.f.Rd | c8f6e1af37b8646f812130f2059f1595ae9a036d | [] | no_license | lukaszdaniel/ivory | ef2a0f5fe2bc87952bf4471aa79f1bca193d56f9 | 0a50f94ce645c17cb1caa6aa1ecdd493e9195ca0 | refs/heads/master | 2021-11-18T17:15:11.773836 | 2021-10-13T21:07:24 | 2021-10-13T21:07:24 | 32,650,353 | 5 | 1 | null | 2018-03-26T14:59:37 | 2015-03-21T21:18:11 | R | UTF-8 | R | false | false | 4,629 | rd | smooth.f.Rd | \name{smooth.f}
\alias{smooth.f}
\title{
Smooth Distributions on Data Points
}
\description{
This function uses the method of frequency smoothing to find a distribution
on a data set which has a required value, \code{theta}, of the statistic of
interest. The method results in distributions which vary smoothly with
\code{theta}.
}
\usage{
smooth.f(theta, boot.out, index = 1, t = boot.out$t[, index],
width = 0.5)
}
\arguments{
\item{theta}{
The required value for the statistic of interest. If \code{theta} is a vector,
a separate distribution will be found for each element of \code{theta}.
}
\item{boot.out}{
A bootstrap output object returned by a call to \code{boot}.
}
\item{index}{
The index of the variable of interest in the output of \code{boot.out$statistic}.
This argument is ignored if \code{t} is supplied. \code{index} must be a scalar.
}
\item{t}{
The bootstrap values of the statistic of interest. This must be a vector of
length \code{boot.out$R} and the values must be in the same order as the bootstrap
replicates in \code{boot.out}.
}
\item{width}{
The standardized width for the kernel smoothing. The smoothing uses a
value of \code{width*s} for epsilon, where \code{s} is the bootstrap estimate of the
standard error of the statistic of interest. \code{width} should take a value in
the range (0.2, 1) to produce a reasonable
smoothed distribution. If \code{width} is too large then the distribution becomes
closer to uniform.
}}
\value{
If \code{length(theta)} is 1 then a vector with the same length as the data set
\code{boot.out$data} is returned. The value in position \code{i} is the probability
to be given to the data point in position \code{i} so that the distribution has
parameter value approximately equal to \code{theta}.
If \code{length(theta)} is bigger than 1 then the returned value is a matrix with
\code{length(theta)} rows each of which corresponds to a distribution with the
parameter value approximately equal to the corresponding value of \code{theta}.
}
\details{
The new distributional weights are found by applying a normal kernel smoother
to the observed values of \code{t} weighted by the observed frequencies in the
bootstrap simulation. The resulting distribution may not have
parameter value exactly equal to the required value \code{theta} but it will
typically have a value which is close to \code{theta}. The details of how this
method works can be found in Davison, Hinkley and Worton (1995) and Section
3.9.2 of Davison and Hinkley (1997).
}
\references{
Davison, A.C. and Hinkley, D.V. (1997) \emph{Bootstrap Methods and Their Application}. Cambridge University Press.
Davison, A.C., Hinkley, D.V. and Worton, B.J. (1995) Accurate and efficient
construction of bootstrap likelihoods. \emph{Statistics and Computing},
\bold{5}, 257--264.
}
\seealso{
\code{\link{boot}}, \code{\link{exp.tilt}}, \code{\link{tilt.boot}}
}
\examples{
# Example 9.8 of Davison and Hinkley (1997) requires tilting the resampling
# distribution of the studentized statistic to be centred at the observed
# value of the test statistic 1.84. In the book exponential tilting was used
# but it is also possible to use smooth.f.
grav1 <- gravity[as.numeric(gravity[, 2]) >= 7, ]
grav.fun <- function(dat, w, orig) {
strata <- tapply(dat[, 2], as.numeric(dat[, 2]))
d <- dat[, 1]
ns <- tabulate(strata)
w <- w/tapply(w, strata, sum)[strata]
mns <- as.vector(tapply(d * w, strata, sum)) # drop names
mn2 <- tapply(d * d * w, strata, sum)
s2hat <- sum((mn2 - mns^2)/ns)
c(mns[2] - mns[1], s2hat, (mns[2]-mns[1]-orig)/sqrt(s2hat))
}
grav.z0 <- grav.fun(grav1, rep(1, 26), 0)
grav.boot <- boot(grav1, grav.fun, R = 499, stype = "w",
strata = grav1[, 2], orig = grav.z0[1])
grav.sm <- smooth.f(grav.z0[3], grav.boot, index = 3)
# Now we can run another bootstrap using these weights
grav.boot2 <- boot(grav1, grav.fun, R = 499, stype = "w",
strata = grav1[, 2], orig = grav.z0[1],
weights = grav.sm)
# Estimated p-values can be found from these as follows
mean(grav.boot$t[, 3] >= grav.z0[3])
imp.prob(grav.boot2, t0 = -grav.z0[3], t = -grav.boot2$t[, 3])
# Note that for the importance sampling probability we must
# multiply everything by -1 to ensure that we find the correct
# probability. Raw resampling is not reliable for probabilities
# greater than 0.5. Thus
1 - imp.prob(grav.boot2, index = 3, t0 = grav.z0[3])$raw
# can give very strange results (negative probabilities).
}
\keyword{smooth}
\keyword{nonparametric}
% Converted by Sd2Rd version 1.15.
|
59560ca431e6c02fa185d46daead913068676bf6 | ce828d49e40d96aa975b792e23d0ed4172828ef7 | /Newton Raphson e Calculo de TIR/R/lib/RaizesQuadratica.r | bc9dfd56211cd5608e81095f49aa45773b5bf54b | [] | no_license | btebaldi/MetodosQuantitativos | bd5ee54a088707f0408636c3955f3087ee3f7ae2 | c38ae095064e0b021b3ebdd0427bd3d383742b80 | refs/heads/master | 2021-01-20T06:26:20.750079 | 2020-07-12T16:54:38 | 2020-07-12T16:54:38 | 89,879,517 | 0 | 0 | null | null | null | null | ISO-8859-2 | R | false | false | 521 | r | RaizesQuadratica.r | RaizesQuadratica = function(a, b, c){
## Estrutura de retorno
x = array(NaN,c(length(a),2));
## Looping principal
for (i in 1:length(a)) {
## Calcula o delta
delta = b[i]^2 - 4*a[i]*c[i];
## Calcula as raízes
if (delta==0){
x[i,1] = -b[i]/(2*a[i]);
} else{
if (delta > 0) {
x[i,1] = (-b[i]-delta^(1/2))/(2*a[i]);
x[i,2] = (-b[i]+delta^(1/2))/(2*a[i]);
}
}
}
return(x);
} |
6d0185f3d38dfb5b3952fde15e0dca88751a8d85 | 1b494e164bf619370655eea371727371f5aaff2f | /man/generateDescInfo.Rd | 48915d524d982b3fd55546fb083e2750e7d8dce6 | [] | no_license | gmbecker/gRAN | d4d1886a7b490de5f9b3bf53f2b2d7976ce6a177 | d243dc799759d491a3d0fe8eb35295e4848132e3 | refs/heads/master | 2023-06-29T19:23:31.405858 | 2023-06-13T15:40:52 | 2023-06-13T15:40:52 | 20,597,782 | 17 | 13 | null | 2023-06-13T15:40:54 | 2014-06-07T16:52:25 | R | UTF-8 | R | false | true | 829 | rd | generateDescInfo.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pkgHTML.R
\name{generateDescInfo}
\alias{generateDescInfo}
\title{Converts a DESCRIPTION file to a data.frame}
\usage{
generateDescInfo(pkg_path, encoding = "")
}
\arguments{
\item{pkg_path}{The path preceding the location of the DESCRIPTION file}
\item{encoding}{If there is an Encoding field, to what encoding should
re-encoding be attempted? The other values are as used by iconv, so the
default "" indicates the encoding of the current locale}
}
\value{
If a DESCRIPTION file for the given package is found and can
successfully be read, this function returns a data.frame ontaining
of the fields as headers and the tags as rows
}
\description{
Converts a DESCRIPTION file to a data.frame
}
\author{
Dinakar Kulkarni <kulkard2@gene.com>
}
|
42b3f65dc214d516acea26aef584d0a5e7107c8a | 368525a842bc7cd4f5ab44c409c343fc3f28f4d7 | /rankhospital.R | c208c542face255f6f9b47038fb9a457495ff681 | [] | no_license | anaflrs/SoftwareActuarial_III | 868f474259278a96ac382902c4d2c3ca76e48e8a | db838f5bab13bfc39f0a529fdda3caff3306064f | refs/heads/master | 2021-05-09T08:59:31.046680 | 2018-05-22T04:39:05 | 2018-05-22T04:39:05 | 119,414,474 | 0 | 0 | null | null | null | null | ISO-8859-2 | R | false | false | 1,658 | r | rankhospital.R | setwd("C:/Users/anav_/OneDrive/Escritorio/calhospitales")
direc <- "C:/Users/anav_/OneDrive/Escritorio/calhospitales"
rankhospital <-function(estado, resultado, ranking){
ldt <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
f<-nrow(ldt)
if (resultado == "ataque") {
cl<-11
}
else if (resultado == "falla"){
cl<-17
}
else if (resultado == "neumonia"){
cl<-23
}
else {
stop("resultado inválido")
}
if(ranking=="mejor" || ranking== 1 ){
md <- ldt[ldt$State == estado,]
md2 <- md[,c(2,cl)]
if (sum(md2[,2]=="Not Available") < 1) {
elec <- md2[order(as.numeric(md2[,2])),]
elec2 <- elec[which(elec[,2] == elec[1,2]),]
es <- elec2[order(elec2[,1]),]
es[1,1]
}
else {
final <- md2[- grep("Not", md2[,2]),]
elec <- final[order(as.numeric(final[,2])),]
elec2 <- elec[which(elec[,2] == elec[1,2]),]
es <- elec2[order(elec2[,1]),]
es[1,1]
}
}
else if(ranking=="peor"){
a <- matrix(ldt[,cl],f,1)
ldt[,cl] <- suppressWarnings(as.numeric(a))
ldt[,2] <- as.character(ldt[,2])
b <- ldt[grep(estado,ldt$State),]
or <- b[order(b[,cl], b[,2], na.last=NA),]
tail(or[,2],1)
}
else if(ranking>f){
print("NA")
}
else {
a <- matrix(ldt[,cl],f,1)
ldt[,cl] <- suppressWarnings(as.numeric(a))
ldt[,2] <- as.character(ldt[,2])
b <- ldt[grep(estado,ldt$State),]
or <- b[order(b[,cl], b[,2], na.last=NA),]
}
}
rankhospital("TX", "falla", "mejor" )
rankhospital("MD", "ataque", "peor")
rankhospital("MD", "ataque", "5000")
rankhospital("MD", "ataque", "7")
|
28a0d7d69e64bc48337e8ec8d773d25d6dd8d119 | 184180d341d2928ab7c5a626d94f2a9863726c65 | /valgrind_test_dir/ordersetdiff-test.R | 7577162f3b5aeda147b37c0df97fd498465d0159 | [] | no_license | akhikolla/RcppDeepStateTest | f102ddf03a22b0fc05e02239d53405c8977cbc2b | 97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5 | refs/heads/master | 2023-03-03T12:19:31.725234 | 2021-02-12T21:50:12 | 2021-02-12T21:50:12 | 254,214,504 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 265 | r | ordersetdiff-test.R | function (vector, remove)
{
e <- get("data.env", .GlobalEnv)
e[["ordersetdiff"]][[length(e[["ordersetdiff"]]) + 1]] <- list(vector = vector,
remove = remove)
.Call("_bnclassify_ordersetdiff", PACKAGE = "bnclassify",
vector, remove)
}
|
af301fd7dbe1507573fb5b6ce0f24744be313cf1 | 5032968fceb435ad9d47df6885782cbf71439564 | /fantloons r.R | 14748cf9a8db9e607577aaf7bfe608e8555c7c23 | [] | no_license | surjithcm7/EXCEL-R-ASSIGNMENTS | 5063f3f8434246de939a288163724067a95c3856 | c948bba5f35c31f39bfdbcf229d963654fc4afa8 | refs/heads/master | 2021-03-19T00:47:02.103035 | 2020-03-13T16:46:50 | 2020-03-13T16:46:50 | 247,115,828 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 320 | r | fantloons r.R | fant<- read.csv(file.choose())
View(fant)
summary(fant)
attach(fant)
?prop.test
table1<-table(Weekdays,Weekend)
table1
prop.test(x=c(66,167),n=c(233,167),conf.level = 0.95,correct = FALSE,alternative = "two.sided")
prop.test(x=c(66,167),n=c(233,167),conf.level = 0.95,correct = FALSE,alternative = "greater")
|
2ecb23e2a37f5bb333814b7ae9ea659351f89146 | e9c19721fc6c4b30ca8b0d2e5851401299daa729 | /run_analysis.R | 3be89da91b658d40b7a9e6df5a5a0d00e3f8bad0 | [] | no_license | hkyagnes/Getting_and_cleaning_data_project | 604ad656fbc1a6ac6924466749f4c872b4ac484a | 2eed1ea6d8c6e86b641e704bd8aa9c3a7a843746 | refs/heads/master | 2021-01-23T17:18:56.424383 | 2015-04-23T11:52:48 | 2015-04-23T11:52:48 | 34,411,584 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,661 | r | run_analysis.R | #You should create one R script called run_analysis.R that does the following.
#1. Merges the training and the test sets to create one data set.
#2. Extracts only the measurements on the mean and standard deviation for each measurement.
#3. Uses descriptive activity names to name the activities in the data set
#4. Appropriately labels the data set with descriptive variable names.
#5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(dplyr)
setwd("~/Desktop/assignment") ##set the directory where there are all the files needed
## Read all the data needed
X_test <- read.table("X_test.txt")
y_test <- read.table("y_test.txt")
X_train <- read.table("X_train.txt")
y_train <- read.table("y_train.txt")
subject_test <- read.table("subject_test.txt")
subject_train <- read.table("subject_train.txt")
features <- read.table("features.txt")
#The following steps are modification of variable names, to be matched to the measurements
features[,2] <- gsub("[()]","", features[,2]) ## remove "()"
features[,2] <- gsub("[-]","", features[,2]) ## remove "-"
features[,2] <- tolower(features[,2]) ## change the characters to lowercase
features[,2] <- gsub("^t", "time", features[,2]) ## replace "t" to "time
features[,2] <- gsub("^f", "freq", features[,2]) ## replace "f" to "freq"
features[,2] <- gsub("[,]","", features[,2]) ## remove the ","
#Merge datasets
test <- cbind(subject_test, y_test) ## Merge subjects with activities of the test group
names(test) <- c("subject", "activities") ## assign the column names to the subject and activities
train <- cbind(subject_train, y_train) ## Merge subjects with activities of the train group
names(train) <- c("subject", "activities") ## assign the column names to the subject and activities
names(X_test) <- features[,2] ## assign column names to the measurements of the test group
names(X_train) <- features[,2] ## assign column names to the measurements of the train group
Mean_std <- grepl("mean|std", features[,2]) ## logic vector for subsetting the mean and the standard deviation column from the measurements
test_meansd <- X_test[,Mean_std] ## subset only mean and SD column from test-measurement
train_meansd <- X_train[,Mean_std] ## subset only mean and SD column from train-measurement
test_combined <- cbind(test, test_meansd) ## Merge the subject, activities and measurements of test group
train_combined <- cbind(train, train_meansd) ## Merge the subject, activities and measurements of train group
test_train <- rbind(test_combined, train_combined) ##Merge the test and train data together
## add the activity name
test_train[,2] <- gsub("1", "walking", test_train[,2])
test_train[,2] <- gsub("2", "walking_upstairs", test_train[,2])
test_train[,2] <- gsub("3", "walking_downstairs", test_train[,2])
test_train[,2] <- gsub("4", "sitting", test_train[,2])
test_train[,2] <- gsub("5", "standing", test_train[,2])
test_train[,2] <- gsub("6", "laying", test_train[,2])
Final <- aggregate(test_train[, 3:88], by= list(subject = test_train$subject, activities = test_train$activities), mean)
## calculate the mean of each measurement for each subject and activities
Final <- arrange (Final, subject, activities) ##sort the final data frame with the subject and activities(alphabetical order)
write.file(Final, file ="tidy_data.txt", row.names = FALSE)
|
0f34991e7d360ae43f96b3511fbf73d3fb714430 | cb2e9f97913785f7d79bf1c6f6fea93298b19cf2 | /R/buildData.R | 104ab27c80e4bb7d3f8219c9d98abc11349f361f | [] | no_license | brian-bot/rCGH | 5aa6d08ea708f94c8e70e90a6549b919637819d8 | 4c27a21d4d84a5a45168369281ab6d5fe85df705 | refs/heads/master | 2016-09-07T01:52:14.147611 | 2015-02-03T19:50:43 | 2015-02-03T19:50:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,701 | r | buildData.R |
################################
# Build a Agilent object
################################
buildAgilent <- function(filePath, Ref="cy3", sampleName=NA, labName=NA, supFlags=TRUE){
# Load cghData from a synapse entity and build an AgilentObject.
fileName <- gsub("(.*)/", "", filePath)
object <- new("cghObj", info = c(fileName=fileName,
sampleName=sampleName,
labName=labName,
platform='Agilent'
)
)
object@info <- c(object@info, .readAgilentInfo(filePath))
object@cnSet <- .readAgilentMatrix(filePath)
if(supFlags)
object <- .suppressFlags(object)
object <- .suppressDuplic(object)
object <- .preset(object)
return (object)
}
.readAgilentInfo <- function(filePath){
cat('Reading information...')
arrayInfo <- read.csv(filePath, header = F, fill = T, skip = 1, nrows = 8, sep = "\t")
tmpNames <- as.vector(arrayInfo[1,])
barCode = as.character(arrayInfo[2, which(tmpNames == "FeatureExtractor_Barcode")])
gridName = as.character(arrayInfo[2, which(tmpNames == "Grid_Name")])
scanDate = as.character(arrayInfo[2, which(tmpNames == "Scan_Date")])
scanDate <- gsub("(.*)-(.*)-(.*) (.*)+", "\\3-\\1-\\2", scanDate)
programVersion = as.character(arrayInfo[2, which(tmpNames == "Protocol_Name")])
gridGenomicBuild = as.character(arrayInfo[2, which(tmpNames == "Grid_GenomicBuild")])
ref = 'Dual color hybridization'
cat('\tDone.\n')
return(c(barCode = barCode,
gridName = gridName,
scanDate = as.character(scanDate),
programVersion = programVersion,
gridGenomicBuild = gridGenomicBuild,
reference = ref,
analyseDate = format(Sys.Date(), "%Y-%m-%d")
)
)
}
.readAgilentMatrix <- function(filePath){
cat('Reading values...')
arrayInfo <- readLines(filePath, n = 15)
startAt <- grep("^FEATURES", arrayInfo)
cnSet <- read.csv(filePath, header = T, skip = startAt-1, sep = "\t", stringsAsFactors = FALSE)
cat('\tDone.\n')
cnSet <- .curateAgilentCnSet(cnSet)
return(cnSet)
}
.getRFlags <- function(cnSet){
flags <- which(cnSet$rIsSaturated == 1 | # 1 = non valid rIsSaturated
cnSet$rIsFeatNonUnifOL == 1 | # 1 = non valid rIsFeatureNonUnifOL
cnSet$rIsWellAboveBG == 0) # 0 = non valid rIsWellAboveBG
cat(length(flags), 'flagged probes on chromosome', unique(cnSet$ChrNum), '\n')
return(flags)
}
.getGFlags <- function(cnSet){
flags <- which(cnSet$gIsSaturated == 1 | # 1 = non valid gIsSaturated
cnSet$gIsFeatNonUnifOL == 1 | # 1 = non valid gIsFeatureNonUnifOL
cnSet$gIsWellAboveBG == 0) # 0 = non valid gIsWellAboveBG
cat(length(flags), 'flagged probes on chromosome', unique(cnSet$ChrNum), '\n')
return(flags)
}
.medFlag <- function(values, flagged, minpos, maxpos){
sapply(flagged, function(f){
ii <- max(minpos, f-8)
jj <- min(maxpos, f+8)
median(values[ii:jj], na.rm=TRUE)
})
}
.replaceFlags <- function(cnSet){
S <- split(cnSet, cnSet$ChrNum)
cat("\nRed channel:\n")
rflags <- sapply(S, function(subset) .getRFlags(subset))
cat("\nGreen channel:\n")
gflags <- sapply(S, function(subset) .getGFlags(subset))
newR <- lapply(names(rflags), function(chr){
chr <- as.numeric(chr)
flagged <- rflags[[chr]]
tmp <- S[[chr]]
tmp$rMedianSignal[flagged] <- .medFlag(tmp$rMedianSignal, flagged, 1, nrow(tmp))
as.numeric(tmp$rMedianSignal)
})
newG <- lapply(names(gflags), function(chr){
chr <- as.numeric(chr)
flagged <- gflags[[chr]]
tmp <- S[[chr]]
tmp$gMedianSignal[flagged] <- .medFlag(tmp$gMedianSignal, flagged, 1, nrow(tmp))
as.numeric(tmp$gMedianSignal)
})
cnSet$rMedianSignal <- do.call(c, newR)
cnSet$gMedianSignal <- do.call(c, newG)
return(cnSet)
}
.suppressFlags <- function(object){
if(grepl("Agilent", getInfo(object, 'platform'))){
cat('Suppressing flagged probes...\n')
cnSet <- getCNset(object)
cnSet <- .replaceFlags(cnSet)
flagNames <- c('gIsSaturated', 'rIsSaturated', 'gIsFeatNonUnifOL', 'rIsFeatNonUnifOL', 'gIsWellAboveBG', 'rIsWellAboveBG')
cnSet <- cnSet[,-which(colnames(cnSet) %in% flagNames)]
object@cnSet <- cnSet
}
cat("\n")
return(object)
}
.suppressDuplic <- function(object){
cnSet <- getCNset(object)
cnSet <- cnSet[order(cnSet$ProbeName),]
if (!any(colnames(cnSet) == 'ProbeName')) stop('None of the columns can be identifed as ProbeNames')
dup <- duplicated(cnSet$ProbeName)
if(any(dup)){
cat('Suppressing duplicated probes...')
duplicProbes <- as.character(unique(cnSet$ProbeName[dup]))
duplicSet <- subset(cnSet, cnSet$ProbeName %in% duplicProbes)
medianSet <- ddply(.data = duplicSet, .variables=.(ProbeName, SystematicName, ChrNum, ChrStart, ChrEnd), summarize,
rMedianSignal = median(rMedianSignal, na.rm=TRUE), gMedianSignal = median(gMedianSignal, na.rm=TRUE))
cnSet <- rbind.data.frame(cnSet[-which(cnSet$ProbeName %in% duplicProbes),], medianSet)
}
cnSet <- cnSet[order(cnSet$ChrNum, cnSet$ChrStart),]
rownames(cnSet) <- seq(1, nrow(cnSet))
object@cnSet <- cnSet[order(cnSet$ChrNum, cnSet$ChrStart),]
cat('Done.\n')
return(object)
}
.preset <- function(object, Ksmooth=1000, Kmax=20, Nmin=Kmax*8, Mwidth=2, UndoSD=0.75, Alpha=1e-10){
cat('Adding presettings...')
if(grepl("Affymetrix", getInfo(object, 'platform'))){
Ksmooth=7500; UndoSD=1
}
param <- list(Ksmooth = Ksmooth, Kmax = Kmax, Nmin = Nmin, Mwidth = Mwidth, UndoSD = UndoSD, Alpha = Alpha)
object@param <- param
cat('Done.\n')
return (object)
}
.curateAgilentCnSet <- function(cnSet){
keepCol <- which(as.character(colnames(cnSet)) %in%
c( "ProbeName", "SystematicName",
"gMedianSignal", "rMedianSignal",
"gIsSaturated", "rIsSaturated",
"gIsFeatNonUnifOL", "rIsFeatNonUnifOL",
"gIsWellAboveBG", "rIsWellAboveBG")
)
cat('Filtering control probes...')
isChr = grep('^chr[^Mrandom]*$', cnSet$SystematicName)
cnSet <- cnSet[isChr, keepCol]
cat('Done.\n')
cat('Checking chr nums...')
systNames <- cnSet$SystematicName
chr <- gsub(":(.*)", "", systNames)
chrNum <- gsub("(chr)(.*):(\\d+)-(\\d+)", "\\2", systNames)
chrNum[chrNum=="X"] <- 23
chrNum[chrNum=="Y"] <- 24
chrNum <- as.numeric(chrNum)
chrStart <- as.numeric(gsub("(chr)(.*):(\\d+)-(\\d+)", "\\3", systNames))
chrEnd <- as.numeric(gsub("(chr)(.*):(\\d+)-(\\d+)", "\\4", systNames))
cnSet <- cbind.data.frame(ProbeName = cnSet$ProbeName,
SystematicName = cnSet$SystematicName,
ChrNum=chrNum,
ChrStart=chrStart,
ChrEnd=chrEnd,
cnSet[,-c(1:2)]
)
cnSet <- cnSet[order(cnSet$ChrNum, cnSet$ChrStart), ]
cat('Done.\n')
return(cnSet)
}
################################
# Build a AffyCytoScan object
################################
buildAffyCytoScan <- function(filePath, sampleName=NA, labName=NA, useSNP=FALSE){
fileName <- gsub("(.*)/", "", filePath)
object <- new("cghObj", info = c(fileName=fileName,
sampleName=sampleName,
labName=labName,
platform='Affymetrix_CytoScanHD'
)
)
affyData <- .readCytoScan(filePath, useSNP)
object@info <- c(object@info, affyData$infos)
object@cnSet <- affyData$values
object <- .preset(object)
return (object)
}
.getTagValue <- function(arrayInfos, tag){
x <- arrayInfos[grep(tag, arrayInfos)]
return(unlist(strsplit(x, '='))[2])
}
.readCytoScan <- function(filePath, useSNP){
cat('Reading information...')
fileName <- gsub("(.*)/", "", filePath)
arrayInfos <- readLines(filePath, n = 1000)
oldVersion <- any(grepl("#%affymetrix-array-type", arrayInfos))
if(!oldVersion){
startAt=1
arrayType <- barCode <- gridName <- scanDate <- programVersion <- ucsc <- ensembl <- gridGenomicBuild <- ref <- NA
} else{
arrayType <- .getTagValue(arrayInfos, "#%affymetrix-array-type")
barCode <- .getTagValue(arrayInfos, "#%affymetrix-array-barcode")
gridName <- .getTagValue(arrayInfos, "#%affymetrix-algorithm-param-state-annotation-file")
scanDate <- .getTagValue(arrayInfos, "#%affymetrix-scan-date")
programVersion <- .getTagValue(arrayInfos, "#%affymetrix-algorithm-version")
ucsc <- .getTagValue(arrayInfos, "genome-version-ucsc")
ensembl <- .getTagValue(arrayInfos, "genome-version-ensembl")
gridGenomicBuild <- paste(ucsc, ensembl, sep = '/')
ref <- .getTagValue(arrayInfos, "#%affymetrix-algorithm-param-state-reference-file")
startAt <- grep("ProbeSetName", arrayInfos)
}
cat('\tDone.\n')
infos <- c(barCode=barCode, gridName=gridName,
scanDate=format(as.Date(scanDate), "%Y-%m-%d"), programVersion=programVersion,
gridGenomicBuild=gridGenomicBuild, reference=ref,
analyseDate=format(Sys.Date(), "%Y-%m-%d")
)
values <- .readCytoScanMatrix(filePath, oldVersion, startAt, useSNP)
return(list(infos=infos, values=values))
}
.readCytoScanMatrix <- function(filePath, oldVersion, startAt, useSNP){
cat('Reading values...')
cnSet <- read.csv(filePath, header=TRUE, skip=startAt-1, sep="\t", stringsAsFactors=FALSE)
colnames(cnSet) <- gsub("\\..*", "", colnames(cnSet))
colnames(cnSet)[1:3] <- c("ProbeName", "ChrNum", "ChrStart")
if(useSNP){
cnSet <- cnSet[grep("S-\\d", cnSet$ProbeName),]
} else{
cnSet <- cnSet[grep("C-\\d", cnSet$ProbeName),]
}
cnSet$ChrNum <- .renameChr(cnSet$ChrNum)
# if(!oldVersion){
# if(any(cnSet$ChrNum == 24))
# cnSet$ChrNum[cnSet$ChrNum == 24] <- 23
# if(any(cnSet$ChrNum == 25))
# cnSet$ChrNum[cnSet$ChrNum == 25] <- 24
# } else{
# if(any(cnSet$ChrNum == "X"))
# cnSet$ChrNum[cnSet$ChrNum == "X"] <- 23
# if(any(cnSet$ChrNum == "Y"))
# cnSet$ChrNum[cnSet$ChrNum == "Y"] <- 24
# }
for(i in 2:ncol(cnSet)) cnSet[,i] <- as.numeric(cnSet[,i])
cnSet <- cnSet[order(cnSet$ChrNum, cnSet$ChrStart), ]
idx <- which(is.na(cnSet$ChrNum) | is.na(cnSet$ChrStart) | cnSet$WeightedLog2Ratio==0 | is.na(cnSet$SmoothSignal))
cnSet <- cnSet[-idx,]
cnSet <- cnSet[order(cnSet$ChrNum, cnSet$ChrStart), ]
cat('\tDone.\n')
return(cnSet)
}
.renameChr <- function(ChrNum){
if(any(ChrNum == "X"))
ChrNum[ChrNum == "X"] <- 23
if(any(ChrNum == "Y"))
ChrNum[ChrNum == "Y"] <- 24
# On new Affy ChAS version, chr23 and chr 24 are coded 24 and 25, resp.
if(any(ChrNum == 25)){
ChrNum[ChrNum == 24] <- 23
ChrNum[ChrNum == 25] <- 24
}
return( as.numeric(ChrNum) )
}
############################
# Read SNP6 from local dir
############################
buildAffySNP6 <- function(filePath, sampleName=NA, labName=NA, useSNP=FALSE){
fileName <- gsub("(.*)/", "", filePath)
object <- new("cghObj", info = c(fileName=fileName,
sampleName=sampleName,
labName=labName,
synapseId=NA,
platform='Affymetrix_snp6'
)
)
affyData <- .readSNP6(filePath, useSNP)
object@info <- c(object@info, affyData$infos)
object@cnSet <- affyData$values
object <- .preset(object)
return (object)
}
.readSNP6 <- function(filePath, useSNP){
cat('Reading information...')
# if(any(grepl("bz2", filePath))){
# destname <- gsub("[.]bz2$", "", filePath, ignore.case=TRUE)
# arrayInfos <- readLines(gunzip(filePath, destname=destname, overwrite=TRUE, remove=FALSE), n = 750)
# } else{
# }
arrayInfos <- readLines(filePath, n = 750)
arrayType <- .getTagValue(arrayInfos, "#ArraySet")
barCode = NA
gridName <- .getTagValue(arrayInfos, "#state-annotation-file")
Date <- .getTagValue(arrayInfos, "#state-time-start")
Date <- unlist(strsplit(Date, ' '))
scanDate = paste(Date[5], Date[2], Date[3])#, sep = '-')
programVersion <- .getTagValue(arrayInfos, "#option-program-version")
ucsc <- .getTagValue(arrayInfos, "#genome-version-ucsc")
ensembl <- .getTagValue(arrayInfos, "#genome-version-ncbi")
gridGenomicBuild <- paste(ucsc, ensembl, sep = '/')
ref <- .getTagValue(arrayInfos, "#state-reference-file")
cat('\tDone.\n')
infos <- c(barCode=barCode, gridName=gridName,
scanDate=scanDate, programVersion=programVersion,
gridGenomicBuild=gridGenomicBuild, reference=ref,
analyseDate=format(Sys.Date(), "%Y-%m-%d")
)
startAt <- grep("ProbeSet", arrayInfos)
values <- .readSNP6Matrix(filePath, startAt, useSNP)
return(list(infos=infos, values=values))
}
.readSNP6Matrix <- function(filePath, startAt, useSNP){
cat('Reading values...')
cnSet <- read.csv(filePath, header=TRUE, skip=startAt-1, sep="\t", stringsAsFactors=FALSE)
colnames(cnSet)[1:3] <- c("ProbeName", "ChrNum", "ChrStart")
if(useSNP){
cnSet <- cnSet[grep("^SNP_A-\\d+", cnSet$ProbeName),]
} else{
cnSet <- cnSet[grep("CN_\\d+", cnSet$ProbeName),]
}
cnSet$ChrNum <- .renameChr(cnSet$ChrNum)
# cnSet$ChrNum[cnSet$ChrNum == "X"] <- 23
# cnSet$ChrNum[cnSet$ChrNum == "Y"] <- 24
# cnSet$ChrNum <- as.numeric(cnSet$ChrNum)
cnSet <- cnSet[order(cnSet$ChrNum, cnSet$ChrStart), ]
cat('\tDone.\n')
return(cnSet)
}
|
aabe964f093aae52e7aa23f916fcb7815cda0e85 | 47ff67dc83cb318d684afb58296ba9f2c4ec4a07 | /newspaper_group/code/topic_modeling.r | 836387f44ee50e97fb20a02004d69cd0f8d592f1 | [] | no_license | Islamicate-DH/hw | 378f0e8738a8c6b7bb438bea540af6d5af4c602a | 4af2cc99b8f75a57f32254a488df4e519ea81f49 | refs/heads/master | 2021-01-11T02:11:19.477315 | 2020-05-27T16:12:13 | 2020-05-27T16:12:13 | 70,803,965 | 2 | 12 | null | 2020-05-27T15:30:46 | 2016-10-13T12:26:32 | JavaScript | UTF-8 | R | false | false | 6,018 | r | topic_modeling.r | # Copyright Matthew Miller and Thomas Koentges
# Adapted by Tobias Wenzel
# In Course: Islamicate World 2.0
# University of Maryland, University Leipzig
#
# File description:
# This script is used to apply topic modelling to the corpora
# downloaded and cleaned in scrapeR and cleanR and save the results
# in csv format as well as a graphical interpretation by LDAvis.
rm(list=ls())
## libraries needed
libs<-c("tm","XML","RCurl","plyr","curl","lda","LDAvis","compiler")
for(i in 1:length(libs)){
suppressPackageStartupMessages(library(libs[i], character.only = TRUE))
}
enableJIT(3) # Enable JIT-compiling
## User settings:
K <- 15
G <- 5000 # num.iterations
alpha <- 0.02
eta <- 0.02
seed <- 37
terms_shown <- 40
source.folder <- "~/Dropbox/Dokumente/islamicate2.0/reduced/"
corpus.file <- paste(source.folder, "almasralyoum_clean.csv",sep = "")
base_corpus.almarsi <- read.table(
corpus.file, sep = ",", header = FALSE,
encoding = "UTF-8", stringsAsFactors = F
)
base_corpus <- NULL
base_corpus <- rbind(base_corpus, base_corpus.ahram, base_corpus.alwatan
,base_corpus.hespress, base_corpus.thawra ,base_corpus.almari)
research_corpus <- as.character(base_corpus$V2)
output_names <- as.character(base_corpus$V1) # used to identify articles
# Removing remaining control characters, whitespaces and numbers.
research_corpus <- gsub("[[:punct:]]", " ", research_corpus) # replace punctuation with space
research_corpus <- gsub("[[:cntrl:]]", " ", research_corpus) # replace control characters with space
research_corpus <- gsub("^[[:space:]]+", "", research_corpus) # remove whitespace at beginning of documents
research_corpus <- gsub("[[:space:]]+$", "", research_corpus) # remove whitespace at end of documents
research_corpus <- gsub("[0-9]", "", research_corpus) #remove numbers
# tokenize on space and output as a list:
doc.list <- strsplit(research_corpus, "[[:space:]]+")
# Stemming is left out. See paper.
all_words <- unlist(doc.list)
all_words<-all_words[all_words!=""]
corpus_words <- unique(all_words)
corpus_words <- sort(corpus_words)
# compute the table of terms:
term.table <- table(all_words)
term.table <- sort(term.table, decreasing = TRUE)
# remove terms that are stop words or occur fewer than "occurenses" times:
stop_words<-scan(file="/home/tobias/Dokumente/islamicate2.0/hw/newspaper_group/stopwords_ar.txt",what = "", sep="\n",encoding = "UTF-8")
occurences <- 10
del <- names(term.table) %in% stop_words | term.table < occurences
term.table <- term.table[!del]
vocab <- names(term.table)
# now put the documents into the format required by the lda package:
get.terms <- function(x) {
index <- match(x, vocab)
index <- index[!is.na(index)]
rbind(as.integer(index - 1), as.integer(rep(1, length(index))))
}
documents <- lapply(doc.list, get.terms)
#save(documents, file = "/home/tobias/Dropbox/Dokumente/islamicate2.0/reduced/documents.RData")
# Compute some statistics related to the data set:
D <- length(documents) # number of documents (2,000)
W <- length(vocab) # number of terms in the vocab (14,568)
doc.length <- sapply(documents, function(x) sum(x[2, ])) # number of tokens per document [312, 288, 170, 436, 291, ...]
N <- sum(doc.length) # total number of tokens in the data (546,827)
term.frequency <- as.integer(term.table) # frequencies of terms in the corpus [8939, 5544, 2411, 2410, 2143, ...]
# Fit the model:
set.seed(seed)
fit <- lda.collapsed.gibbs.sampler(documents = documents, K = K, vocab = vocab,
num.iterations = G, alpha = alpha,
eta = eta, initial = NULL, burnin = 0,
compute.log.likelihood = TRUE)
# Visualize
theta <- t(apply(fit$document_sums + alpha, 2, function(x) x/sum(x)))
phi <- t(apply(t(fit$topics) + eta, 2, function(x) x/sum(x)))
research_corpusAbstracts <- list(phi = phi,
theta = theta,
doc.length = doc.length,
vocab = vocab,
term.frequency = term.frequency)
# create the JSON object to feed the visualization:
json <- createJSON(phi = research_corpusAbstracts$phi,
theta = research_corpusAbstracts$theta,
doc.length = research_corpusAbstracts$doc.length,
vocab = research_corpusAbstracts$vocab,
term.frequency = research_corpusAbstracts$term.frequency,
R=terms_shown)
#Visulise and start browser
vis.folder <- paste(source.folder, "visTotal", sep = "")
serVis(json, out.dir = vis.folder, open.browser = FALSE)
## get topic-term distributions and export as csv
phi.t <- t(phi)
phi.t.df <- data.frame(matrix(nrow = length(phi.t[, 1]), ncol = K + 1))
phi.t.df[, 1] <- names(phi.t[, 1])
for (i in 1:K) {
phi.t.df[, i + 1] <- phi.t[, i]
}
phicolnames <- vector(mode = "character", length = K + 1)
phicolnames[1] <- "term"
for (i in 1:K) {
phicolnames[i + 1] <- paste(head(phi.t.df[order(phi.t.df[, i + 1], decreasing = TRUE), ], n =
7)[, 1], sep = "", collapse = "_")
}
colnames(phi.t.df) <- phicolnames
phi.filename <- paste(source.folder, "visTotal/phi.csv", sep = "")
write.table(
phi.t.df, file = phi.filename,
append = FALSE, quote = FALSE,
sep = ",", eol = "\n", na = "NA",
dec = ".", row.names = FALSE, col.names = TRUE
)
## get document-topic distributions and export as csv
theta.frame <-
data.frame(matrix(nrow = length(theta[, 1]), ncol = K + 1))
theta.frame[, 1] <- output_names
for (i in 1:K) {
theta.frame[, i + 1] <- theta[, i]
}
thetacolnames <- phicolnames
thetacolnames[1] <- "identifier"
colnames(theta.frame) <- thetacolnames
theta.filename <- paste(source.folder, "visTotal/theta.csv", sep = "")
write.table(
theta.frame, file = theta.filename,
append = FALSE, quote = FALSE,
sep = ",", eol = "\n", na = "NA",
dec = ".", row.names = FALSE,
col.names = TRUE
) |
c62b48f82ca756a3d9eac023264e2286c794358c | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612802285-test.R | 0190bdc237fe31bde14ed7d64e3a191ab799071d | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 489 | r | 1612802285-test.R | testlist <- list(bytes1 = c(-8519681L, 738197503L, -50342946L, -536870913L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -41635L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L, 1566399837L), pmutation = 5.59504565543767e+141)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) |
86c24c9e1bcb648952da706ffa2f7c5ee2f54990 | cde4585868ceab23990d85707e04dd48b8bb9274 | /R/SampPost2.R | 6491267fe8bb71ae3406f41bc05ae0bc2b8bc0d5 | [] | no_license | JHHatfield/TrendSummaries | 4e2789fef653e03583aa4f08912eb06a7f6f6f24 | 06f93c8048c19842dde22281899f1303cb58a776 | refs/heads/master | 2022-03-03T21:26:43.423107 | 2019-10-15T08:30:30 | 2019-10-15T08:30:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,846 | r | SampPost2.R | #' SampPost - Sample posterior function
#'
#' @description This function is used to extract a number of annual
#' occupancy estimates from any given posterior. The function loops
#' through Bugs output files within a given location.
#'
#' @param indata The file path to a location containing .rdata files
#' of the bugs outputs, one for each species
#' @param output_path The location where the output will be saved.
#' @param REGION_IN_Q The region for which occupancy estimates will be
#' extracted. UK based occupancy model examples include,
#' 'psi.fs', 'psi.fs.r_GB', 'psi.fs.r_ENGLAND'.
#' @param sample_n The number of values extracted from the posterior.
#' @param combined_output This specifies whether the output should be a
#' single .csv, containing all species (TRUE) or a single .csv file
#' per species (FALSE). Default is TRUE.
#'
#' @return A .csv file for each species containing annual occupancy
#' estiamtes for each year as columns, and an iteration and species
#' name column.
#' @export
SampPost2 <- function(indata = "../data/model_runs/",
output_path = "../data/sampled_posterior_1000/",
REGION_IN_Q = "psi.fs",
years = minmaxyear,
sample_n = 2,
combined_output = TRUE){
### set up species list we want to loop though ###
spp.list <- list.files(indata)[grepl(".rdata", list.files(indata))] # species for which we have models
# create empty dataframe of required size
samp_post <- data.frame(matrix(NA,
nrow = length(spp.list) * sample_n,
ncol = (minmaxyear["maxyear"] - minmaxyear["minyear"])+1))
colnames(samp_post) <- years["minyear"]:years["maxyear"]
samp_post$iteration <- rep(1:sample_n, length(spp.list))
samp_post$species <- rep(spp.list, rep(sample_n, length(spp.list)))
# loop through species
for (i in spp.list){
print(i)
out <- NULL
raw_occ <- NULL
load(paste(indata, i, sep = ""))
raw_occ <- data.frame(out$BUGSoutput$sims.list[REGION_IN_Q])
raw_occ <- raw_occ[sample(1:nrow(raw_occ), sample_n),]
# put output into samp_post dataframe
samp_post[samp_post$species == i, as.character(c(out$min_year:out$max_year))] <- raw_occ
if(combined_output == FALSE) {
write.csv(raw_occ, file = paste(output_path, gsub(".rdata", "" ,i), "_sample_", sample_n, "_post_", REGION_IN_Q, ".csv", sep = ""), row.names = FALSE)
}
}
if(combined_output == TRUE){ # can get rid of this line?
colnames(samp_post) <- c(paste('year_', years["minyear"]:years["maxyear"], sep = ''), 'iteration', 'species')
save(samp_post, file = paste(output_path, "all_spp_sample_", sample_n, "_post_", REGION_IN_Q, ".rdata", sep = ""))
}
}
|
dac5cb28ab9527907e04980fdf0b847144c1529c | 2fb65d442efadbc3a1db41fcf25fed8958c4e04f | /R/read_dti_tck.R | 258c657fad4f0157a2e8f14f73709bab5889c440 | [
"MIT"
] | permissive | dfsp-spirit/freesurferformats | 8f507d8b82aff7c34b12e9182893007064e373b9 | 6cf9572f46608b7bb53887edd10dfed10e16e13d | refs/heads/master | 2023-07-25T00:28:09.021237 | 2023-07-19T07:29:07 | 2023-07-19T07:29:07 | 203,574,524 | 22 | 3 | NOASSERTION | 2023-07-19T07:29:09 | 2019-08-21T11:57:16 | R | UTF-8 | R | false | false | 7,708 | r | read_dti_tck.R | # Functions for reading DTI tracking data from files in MRtrix TCK format.
# See http://mrtrix.readthedocs.io/en/latest/getting_started/image_data.html?highlight=format#tracks-file-format-tck for a spec.
#' @title Read DTI tracking data from file in MRtrix 'TCK' format.
#'
#' @param filepath character string, path to the \code{TCK} file to read.
#'
#' @examples
#' \dontrun{
#' tckf = "~/simple.tck";
#' tck = read.dti.tck(tckf);
#' }
#'
#' @return named list with entries 'header' and 'tracks'. The tracks are organized into a list of matrices. Each n x 3 matrix represents the coordinates for the n points of one track, the values in each row are the xyz coords.
#'
#' @export
read.dti.tck <- function(filepath) {
tck = list('header' = list('derived' = list()));
all_lines = suppressWarnings(readLines(filepath));
if(length(all_lines) < 4L) {
stop("File not in TCK format: too few lines.");
}
tck$header$id = all_lines[1];
if(tck$header$id != "mrtrix tracks") {
stop("File not in TCK format: Invalid first line.");
}
for(line_idx in 2L:length(all_lines)) {
current_line = all_lines[line_idx];
if(current_line == "END") {
break;
} else {
line_parts = unlist(strsplit(current_line, ':'));
lkey = trimws(line_parts[1]);
lvalue = trimws(line_parts[2]);
tck$header[[lkey]] = lvalue;
if(lkey == "file") {
file_parts = unlist(strsplit(lvalue, ' '));
tck$header$derived$filename_part = trimws(file_parts[1]); # for future multi-file support, currently always '.'
if(tck$header$derived$filename_part != ".") {
stop("Multi-file TCK files not supported.");
}
tck$header$derived$data_offset = as.integer(trimws(file_parts[2]));
}
}
}
all_lines = NULL; # free, no longer needed.
valid_datatypes = c('Float32BE', 'Float32LE', 'Float64BE', 'Float64LE');
if(! tck$header$datatype %in% valid_datatypes) {
stop("Invalid datatype in TCK file header");
}
if(is.null(tck$header$derived$data_offset)) {
stop("Invalid TCK file, missing file offset header entry.");
}
# Determine endianness of following binary data.
tck$header$derived$endian = "little";
if(endsWith(tck$header$datatype, 'BE')) {
tck$header$derived$endian = "big";
}
# Determine size of entries in bytes.
tck$header$derived$dsize = 4L; # default to 32bit
if(startsWith(tck$header$datatype, 'Float64')) {
tck$header$derived$dsize = 8L;
}
# Read binary track data.
fs = file.size(filepath);
num_to_read = (fs - tck$header$derived$data_offset) / tck$header$derived$dsize;
fh = file(filepath, "rb");
on.exit({ close(fh) }, add=TRUE);
seek(fh, where = tck$header$derived$data_offset, origin = "start");
tracks_rawdata = readBin(fh, numeric(), n = num_to_read, size = tck$header$derived$dsize, endian = tck$header$derived$endian);
# Rows consisting of NaNs are track separators, and the final EOF row is all Inf.
tracks_raw_matrix = matrix(tracks_rawdata, ncol = 3, byrow = TRUE);
# Filter separators and end marker, organize into tracks list (of matrices).
tck$tracks = list();
current_track_idx = 1L;
for(row_idx in 1L:nrow(tracks_raw_matrix)) {
if(any(is.nan(tracks_raw_matrix[row_idx, ])) | any(is.infinite(tracks_raw_matrix[row_idx, ]))) {
current_track_idx = current_track_idx + 1L;
next;
} else {
if(length(tck$tracks) < current_track_idx) {
tck$tracks[[current_track_idx]] = tracks_raw_matrix[row_idx, ];
} else {
tck$tracks[[current_track_idx]] = rbind(tck$tracks[[current_track_idx]], tracks_raw_matrix[row_idx, ]);
}
}
}
return(tck);
}
#' @title Read DTI tracking per-coord data from file in MRtrix 'TSF' format.
#'
#' @param filepath character string, path to the \code{TSF} file to read.
#'
#' @note The data in such a file is one value per track point, the tracks are not part of the file but come in the matching TCK file.
#'
#' @seealso \code{read.dti.tck}
#'
#' @examples
#' \dontrun{
#' tsff = "~/simple.tsf";
#' tsf = read.dti.tsf(tsff);
#' }
#'
#' @return named list with entries 'header' and 'scalars'. The scala data are available in 2 representations: 'merged': a vector of all values (requires external knowledge on track borders), and 'scalar_list': organized into a list of vectors. Each vector represents the values for the points of one track.
#'
#' @export
read.dti.tsf <- function(filepath) {
tsf = list('header' = list('derived' = list()), 'scalars' = list());
all_lines = suppressWarnings(readLines(filepath));
if(length(all_lines) < 4L) {
stop("File not in TSF format: too few lines.");
}
tsf$header$id = all_lines[1];
if(tsf$header$id != "mrtrix track scalars") {
stop("File not in TSF format: Invalid first line.");
}
for(line_idx in 2L:length(all_lines)) {
current_line = all_lines[line_idx];
if(current_line == "END") {
break;
} else {
line_parts = unlist(strsplit(current_line, ':'));
lkey = trimws(line_parts[1]);
lvalue = trimws(line_parts[2]);
tsf$header[[lkey]] = lvalue;
if(lkey == "file") {
file_parts = unlist(strsplit(lvalue, ' '));
tsf$header$derived$filename_part = trimws(file_parts[1]); # for future multi-file support, currently always '.'
if(tsf$header$derived$filename_part != ".") {
stop("Multi-file TSF files not supported.");
}
tsf$header$derived$data_offset = as.integer(trimws(file_parts[2]));
}
}
}
all_lines = NULL; # free, no longer needed.
valid_datatypes = c('Float32BE', 'Float32LE', 'Float64BE', 'Float64LE');
if(! tsf$header$datatype %in% valid_datatypes) {
stop("Invalid datatype in TSF file header");
}
if(is.null(tsf$header$derived$data_offset)) {
stop("Invalid TSF file, missing file offset header entry.");
}
# Determine endianness of following binary data.
tsf$header$derived$endian = "little";
if(endsWith(tsf$header$datatype, 'BE')) {
tsf$header$derived$endian = "big";
}
# Determine size of entries in bytes.
tsf$header$derived$dsize = 4L; # default to 32bit
if(startsWith(tsf$header$datatype, 'Float64')) {
tsf$header$derived$dsize = 8L;
}
# Read binary scalar data.
fs = file.size(filepath);
num_to_read = (fs - tsf$header$derived$data_offset) / tsf$header$derived$dsize;
fh = file(filepath, "rb");
on.exit({ close(fh) }, add=TRUE);
seek(fh, where = tsf$header$derived$data_offset, origin = "start");
scalar_rawdata = readBin(fh, numeric(), n = num_to_read, size = tsf$header$derived$dsize, endian = tsf$header$derived$endian);
# NaN and Inf values are track separators.
# Generate single vector representation. The user will have to split by tracks based on
# knowledge about track borders from the TCK file.
data_indices = which(!(is.nan(scalar_rawdata) | is.infinite(scalar_rawdata)));
tsf$scalars$merged = scalar_rawdata[data_indices];
# Generate the alternative list representation of the scalar data:
# Filter separators and end marker, organize into tracks list (of matrices).
tsf$scalars$scalar_list = list();
current_track_idx = 1L;
for(value_idx in 1L:length(scalar_rawdata)) {
current_value = scalar_rawdata[value_idx];
if(is.nan(current_value) | is.infinite(current_value)) {
current_track_idx = current_track_idx + 1L;
next;
} else {
if(length(tsf$scalars$scalar_list) < current_track_idx) {
tsf$scalars$scalar_list[[current_track_idx]] = current_value;
} else {
tsf$scalars$scalar_list[[current_track_idx]] = c(tsf$scalars$scalar_list[[current_track_idx]], current_value);
}
}
}
return(tsf);
}
|
ad8de22ff01698c99a625bd69f5587ec14a5f4b9 | 8be30af40f07aa5d529caa67d3b7e5090ec98dfd | /shiny/server.R | 81365506a3ee7665fa7daa7a89b41eabd0a06aca | [] | no_license | nchin212/Coursera_Data_Science_Capstone_Project | eeb3ed205a18ebc414035862d01c7db94aae58ab | 59fc9cafe76a73777adc6bda825326dd3d76b25a | refs/heads/main | 2023-02-11T01:21:33.675639 | 2020-11-02T04:58:39 | 2020-11-02T04:58:39 | 307,921,424 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,443 | r | server.R | # Load functions from other file
source('functions.R')
library(shiny)
# Define application
shinyServer(function(input, output) {
# Reactive statement for prediction function when user input changes
prediction = reactive( {
# Get input
input_text <- input$text
input1 <- fun.input(input_text)[1, ]
input2 <- fun.input(input_text)[2, ]
number_pred <- input$slider
# Predict
prediction <- fun.predict(input1, input2, n = number_pred)
})
# Output data table
output$table <- renderDataTable(prediction(),
option = list(pageLength = 5,
lengthMenu = list(c(5, 10, 50), c('5', '10', '50')),
columnDefs = list(list(visible = F, targets = 1)),
searching = F
)
)
# Output word cloud
wordcloud_rep <- repeatable(wordcloud)
output$wordcloud <- renderPlot(
wordcloud_rep(
prediction()$nextword,
prediction()$score,
rot.per=0.25,
colors = brewer.pal(8, 'Dark2'),
scale=c(4, 0.5),
max.words = 200
)
)
})
|
eb3a7bb10060ca68112913a912e8b32ea2f6ed8c | cef58f02be386e631ef9ceae43c0f3adc7c4de97 | /Codes/Figure_3/Figure_3_0_data_rearrange1.R | 5d0a4611970b7383f72c918e42188abc16c84bb0 | [
"MIT"
] | permissive | tzuliu/Towards-a-General-Methodology-of-Bridging-Ideological-Spaces | 5ff8239504bc042c82e65aec91ccb1e5fb6a5a3b | b01970d714f851cae0298957cc57e98d053267d4 | refs/heads/master | 2022-11-30T00:05:29.250348 | 2020-08-08T07:44:14 | 2020-08-08T07:44:14 | 281,132,075 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,688 | r | Figure_3_0_data_rearrange1.R | ## Rearranging UTAS 2012 (1)
## Author: Tzu-Ping Liu & Gento Kato
## Date: 07/25/2020
## Environment: R 4.0.2 on Ubuntu 20.04
## Clear Workspace
rm(list = ls())
## Set Working Directory (Automatically) ##
require(rprojroot); require(rstudioapi)
if (rstudioapi::isAvailable()==TRUE) {
setwd(dirname(rstudioapi::getActiveDocumentContext()$path));
}
projdir <- find_root(has_file("thisishome.txt"))
cat(paste("Working Directory Set to:\n",projdir))
setwd(projdir)
## Packages
require(haven)
# Variable List Data
lsv <- read.csv(paste0(projdir,"/Data/utas_data/utas_variable_list_utf-8.csv"),
stringsAsFactors = FALSE, fileEncoding = "UTF-8")[-c(1,2),]
# 2012 Data
# candidates
c12 <- read.csv(paste0(projdir,"/Data/utas_data/2012UTASP20150910.csv"),
stringsAsFactors = FALSE, fileEncoding="CP932")
# voters
v12 <- read_sav(paste0(projdir,"/Data/utas_data/2012-2013UTASV131129.sav"),
encoding="CP932")
#'
#' # Prepare 2012 Data
#'
## ID Data
id12 <- data.frame(id = c(c12$ID, v12$ID),
cv = c(rep("candidate",nrow(c12)),
rep("voter", nrow(v12))))
id12$psup <- NA
id12$psup_short <- NA
psuplab <- c("Liberal Democratic Party",
"Democratic Party of Japan",
"Tomorrow Party of Japan",
"Komei-to (Clean Government Party)",
"Japan Restoration Party",
"Japanese Communist Party",
"Your Party",
"Social Democratic Party",
"Other Parties/NA","Abstained")
psuplab_short <- c("LDP",
"DPJ",
"TPJ",
"CGP (Komei)",
"JRP",
"JCP",
"YP",
"SDP",
"Other/NA","Abstained")
id12$psup[id12$cv=="candidate"] <-
ifelse(c12$PARTY%in%1, psuplab[2],
ifelse(c12$PARTY%in%2, psuplab[1],
ifelse(c12$PARTY%in%3, psuplab[3],
ifelse(c12$PARTY%in%4, psuplab[4],
ifelse(c12$PARTY%in%5, psuplab[5],
ifelse(c12$PARTY%in%6, psuplab[6],
ifelse(c12$PARTY%in%7, psuplab[7],
ifelse(c12$PARTY%in%8, psuplab[8],
psuplab[9]))))))))
id12$psup_short[id12$cv=="candidate"] <-
ifelse(c12$PARTY%in%1, psuplab_short[2],
ifelse(c12$PARTY%in%2, psuplab_short[1],
ifelse(c12$PARTY%in%3, psuplab_short[3],
ifelse(c12$PARTY%in%4, psuplab_short[4],
ifelse(c12$PARTY%in%5, psuplab_short[5],
ifelse(c12$PARTY%in%6, psuplab_short[6],
ifelse(c12$PARTY%in%7, psuplab_short[7],
ifelse(c12$PARTY%in%8, psuplab_short[8],
psuplab_short[9]))))))))
# For Party Membership for Politicians (PARTY)
# 1. 民主党
# 2. 自民党
# 3. 未来の党
# 4. 公明党
# 5. 日本維新の会
# 6. 共産党
# 7. みんなの党
# 8. 社民党
# 9. 新党大地
# 10. 国民新党
# 11. 新党日本
# 12. 新党改革
# 13. 諸派
# 14. 無所属
## Abstained as 66
v12$Q010200[which(!v12$Q010100%in%2)] <- 66
id12$psup[id12$cv=="voter"] <-
ifelse(v12$Q010200%in%1, psuplab[2],
ifelse(v12$Q010200%in%2, psuplab[1],
ifelse(v12$Q010200%in%3, psuplab[3],
ifelse(v12$Q010200%in%4, psuplab[4],
ifelse(v12$Q010200%in%5, psuplab[5],
ifelse(v12$Q010200%in%6, psuplab[6],
ifelse(v12$Q010200%in%7, psuplab[7],
ifelse(v12$Q010200%in%8, psuplab[8],
ifelse(v12$Q010200%in%c(66,90),psuplab[10],
psuplab[9])))))))))
id12$psup_short[id12$cv=="voter"] <-
ifelse(v12$Q010200%in%1, psuplab_short[2],
ifelse(v12$Q010200%in%2, psuplab_short[1],
ifelse(v12$Q010200%in%3, psuplab_short[3],
ifelse(v12$Q010200%in%4, psuplab_short[4],
ifelse(v12$Q010200%in%5, psuplab_short[5],
ifelse(v12$Q010200%in%6, psuplab_short[6],
ifelse(v12$Q010200%in%7, psuplab_short[7],
ifelse(v12$Q010200%in%8, psuplab_short[8],
ifelse(v12$Q010200%in%c(66,90),psuplab_short[10],
psuplab_short[9])))))))))
# For PR Vote for Voters (Q010200)
# 238 1. 民主党
# 482 2. 自民党
# 79 3. 日本未来の党
# 163 4. 公明党
# 280 5. 日本維新の会
# 68 6. 共産党
# 119 7. みんなの党
# 30 8. 社民党
# 10 9. 新党大地
# 0 10. 国民新党
# 2 11. 新党改革
# 1 12. その他の政党
# 401 66. 非該当(無投票)
# 21 90. 白票・無効票など(投票所で棄権した)
# 6 99. 無回答
# Make Party Support Variables Factor
id12$psup <- factor(id12$psup, levels=psuplab)
table(id12$psup, useNA="always")
id12$psup_short <- factor(id12$psup_short, levels=psuplab_short)
table(id12$psup_short, useNA="always")
# Long-Term Party Leaning (not voted party) Variable for Voters
id12$pltsup <- NA
id12$pltsup_short <- NA
pltsuplab <- ifelse(psuplab=="Abstained","Independent (Muto-Ha)",
psuplab)
pltsuplab_short <- ifelse(psuplab_short=="Abstained","Independent",
psuplab_short)
id12$pltsup[id12$cv=="voter"] <-
ifelse(v12$Q013700%in%1, pltsuplab[2],
ifelse(v12$Q013700%in%2, pltsuplab[1],
ifelse(v12$Q013700%in%3, pltsuplab[3],
ifelse(v12$Q013700%in%4, pltsuplab[4],
ifelse(v12$Q013700%in%5, pltsuplab[5],
ifelse(v12$Q013700%in%6, pltsuplab[6],
ifelse(v12$Q013700%in%7, pltsuplab[7],
ifelse(v12$Q013700%in%8, pltsuplab[8],
ifelse(v12$Q013700%in%14,pltsuplab[10],
pltsuplab[9])))))))))
id12$pltsup_short[id12$cv=="voter"] <-
ifelse(v12$Q013700%in%1, pltsuplab_short[2],
ifelse(v12$Q013700%in%2, pltsuplab_short[1],
ifelse(v12$Q013700%in%3, pltsuplab_short[3],
ifelse(v12$Q013700%in%4, pltsuplab_short[4],
ifelse(v12$Q013700%in%5, pltsuplab_short[5],
ifelse(v12$Q013700%in%6, pltsuplab_short[6],
ifelse(v12$Q013700%in%7, pltsuplab_short[7],
ifelse(v12$Q013700%in%8, pltsuplab_short[8],
ifelse(v12$Q013700%in%14,pltsuplab_short[10],
pltsuplab_short[9])))))))))
# Make Long-Term Party Leaning Variables Factor
id12$pltsup <- factor(id12$pltsup, levels=pltsuplab)
table(id12$pltsup, useNA="always")
id12$pltsup_short <- factor(id12$pltsup_short, levels=pltsuplab_short)
table(id12$pltsup_short, useNA="always")
## Party/Koen-kai Membership
id12$pmem <- NA
id12$pmem[which(id12$cv=="voter")] <- ifelse(v12$Q014401%in%1,1,0)
table(id12$pmem, useNA="always")
## Demographic variables
# Gender (Female=1, Male=0)
id12$female <- NA
id12$female[which(id12$cv=="candidate")] <- ifelse(c12$SEX%in%2,1,
ifelse(c12$SEX%in%1, 0, NA))
id12$female[which(id12$cv=="voter")] <- ifelse(!v12$Q014100%in%c(1,2),NA,
ifelse(v12$Q014100==2,1,0))
table(id12$female, useNA="always")
# Age Cohort (Ordered Factor, No Raw Age Variable)
id12$agecat <- NA
id12$agecat[which(id12$cv=="voter")] <-
ifelse(v12$Q014200%in%1, "20s",
ifelse(v12$Q014200%in%2, "30s",
ifelse(v12$Q014200%in%3, "40s",
ifelse(v12$Q014200%in%4, "50s",
ifelse(v12$Q014200%in%5, "60s",
ifelse(v12$Q014200%in%6, "70s/over",NA))))))
id12$agecat <- as.factor(id12$agecat)
table(id12$agecat, useNA="always")
# Education
# Raw Categories (Not necessarily in the order of level)
id12$edu <- NA
id12$edu[which(id12$cv=="voter")] <-
ifelse(v12$Q014300%in%1, "Elementary/JHS",
ifelse(v12$Q014300%in%2, "Senior High School",
ifelse(v12$Q014300%in%3, "Vocational School",
ifelse(v12$Q014300%in%4, "Junior College",
ifelse(v12$Q014300%in%5, "University",
ifelse(v12$Q014300%in%6, "Graduate School",
ifelse(v12$Q014300%in%7, "Others", NA)))))))
id12$edu <- factor(id12$edu, levels=c("Elementary/JHS","Senior High School",
"Vocational School","Junior College",
"University", "Graduate School", "Others"))
table(id12$edu, useNA="always")
# 4 Categories, Ordered (Others to NA)
id12$edu4 <- NA
id12$edu4 <-
ifelse(id12$edu%in%c("Elementary/JHS"), "<=JHS",
ifelse(id12$edu%in%c("Senior High School"), "SHS",
ifelse(id12$edu%in%c("Vocational School", "Junior College"), ">SHS & <University",
ifelse(id12$edu%in%c("University","Graduate School"), ">=University", NA))))
id12$edu4 <- factor(id12$edu4, levels=c("<=JHS","SHS",">SHS & <University",">=University"))
table(id12$edu4, useNA="always")
# Jobs (nominal categories)
id12$job <- NA
id12$job[which(id12$cv=="voter")] <-
ifelse(v12$Q014500%in%1, "Company Employee",
ifelse(v12$Q014500%in%2, "Public Servant",
ifelse(v12$Q014500%in%3, "Self-Employed",
ifelse(v12$Q014500%in%4, "Agriculture/Fishery",
ifelse(v12$Q014500%in%5, "Part-Timer",
ifelse(v12$Q014500%in%6, "Homemaker",
ifelse(v12$Q014500%in%7, "Student",
ifelse(v12$Q014500%in%8, "Unemployed",
ifelse(v12$Q014500%in%9, "Others", NA)))))))))
id12$job <- factor(id12$job,
levels=c("Company Employee",
"Public Servant",
"Self-Employed",
"Agriculture/Fishery",
"Part-Timer",
"Homemaker",
"Student",
"Unemployed",
"Others"))
table(id12$job)
# Residential Prefecture (See code book for prefecture names)
id12$pref <- NA
id12$pref[which(id12$cv=="candidate")] <- as.numeric(ifelse(c12$PREFEC%in%seq(1,47),c12$PREFEC,NA)) # PR only candidates have missing values
id12$pref[which(id12$cv=="voter")] <- as.numeric(v12$PREFEC)
# House Electoral District (Numbering within prefecture)
id12$hdist <- NA
id12$hdist[which(id12$cv=="candidate")] <- as.numeric(ifelse(c12$DISTRICT%in%seq(1,25),c12$DISTRICT,NA)) # PR only candidates have missing values
id12$hdist[which(id12$cv=="voter")] <- as.numeric(v12$HRDIST)
## Policy Data
# Variables
lsv12 <- lsv[complete.cases(lsv[,c("cand12","voter12")]),]
nrow(lsv12) # 23 Variables
# Candidate
c12tr <- as.data.frame(c12[,lsv12$cand12])
c12tr <- sapply(1:nrow(lsv12), function(k) ifelse(c12tr[,k]%in%c(lsv12$min[k]:lsv12$max[k]),
c12tr[,k],NA))
colnames(c12tr) <- lsv12$qid
# Voter
v12tr <- as.data.frame(v12[,lsv12$voter12])
v12tr <- sapply(1:nrow(lsv12), function(k) ifelse(v12tr[,k]%in%c(lsv12$min[k]:lsv12$max[k]),
v12tr[,k],NA))
colnames(v12tr) <- lsv12$qid
## Combine Everyghing
d12 <- cbind(id12, rbind(c12tr,v12tr))
head(d12)
## Save Data
saveRDS(d12, paste0(projdir,"/Outputs/application/utas12_ooc.rds")) |
f9570b0c898e99a81f1983f405bf17103ad10e51 | d6fefc7986e9e912bc20a216381952c7c2dd56d4 | /functions/5'siteAS_HF_analyze.r | 5f71407fa6af27501d4ba755451aff20b0bf9120 | [] | no_license | YalanBi/AA | 74674ebfc778eedfd9f9221f9177f5f7d8b6b4fc | b9be902c90e4d86b5f0152d67479145051741db8 | refs/heads/master | 2021-01-10T20:44:20.030509 | 2013-10-03T14:25:22 | 2013-10-03T14:25:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,491 | r | 5'siteAS_HF_analyze.r | #
# Functions for analysing A. Thaliana Tiling Arrays
# last modified: 27-08-2013
# first written: 22-08-2013
# (c) 2013 GBIC Yalan Bi, Danny Arends, R.C. Jansen
#
#************************************************ this is the final version for analyzing results of testing 5'3'AS ! ^_^ ************************************************#
#**************************************************************** testing algorithm: Wilcox.test! ****************************************************************#
setwd("D:/Arabidopsis Arrays")
#calculate the threshold for 5'AS
asMatrix <- NULL
for(chr in 1:5){
asMatrix <- rbind(asMatrix, read.table(paste0("Data/AS/splicing5'site_chr", chr, "_wt_p2.txt"), row.names=NULL))
}
#Bonferroni correction
nrow(asMatrix)# = 16648 exons were tested in 5'AS
length(unique(asMatrix[ ,1]))# = 16648 genes were tested in 5'AS
-log10(0.05/nrow(asMatrix)/4)# 16648 exons were tested * 4 Env; => asThre=6.12
#calculate the numbers of sig exons and genes
asThre=round(-log10(0.05/30528/4), digits=2)# =6.39
matrixTU <- NULL #a matrix for numbers of exons that -log10(P) are higher than or equal to asThre in each env and across envs from chr1-chr5,
#NOTE: nExon=nGene(one first exon in one gene)
for(chr in 1:5){
aschr <- read.table(paste0("Data/AS/splicing5'site_chr", chr, "_wt_p2.txt"), row.names=NULL)
asGeneList <- list()
nTU <- NULL #number of exons that -log10(P) are higher than or equal to asThre in each env
for(env in 1:4){
nTU <- c(nTU, length(which(aschr[ ,env+2] >= asThre))) #now, the matrix has no rowname, so env+2
asGeneList[[paste0("env", env)]] <- aschr[aschr[ ,env+2] >= asThre, 1] #genes that its first exon is spliced out in each env separately
}
#cnt_mixEnv <- number of exons that -log10(P) are higher than or equal to asThre in ANY env
cnt_mixEnv <- 0
#gn_mixEnv <- genes having one or more exons that -log10(P) are higher than or equal to asThre in ANY env
gn_mixEnv <- NULL
for(e in 1:nrow(aschr)){
if(any(aschr[e, 3:6] >= asThre)){
cnt_mixEnv <- cnt_mixEnv + 1
gn_mixEnv <- c(gn_mixEnv, aschr[e, 1])
}
}
asGeneList$mixEnv <- gn_mixEnv
matrixTU <- rbind(matrixTU, c(nTU, cnt_mixEnv))
if(length(gn_mixEnv) > 0) save(asGeneList, file = paste0("Data/AS/splicing5'site_chr", chr, "_wt_p2.Rdata"))
else cat("chr", chr, "NO genes, no file saved!\n")
}
rownames(matrixTU) <- paste0("chr", 1:5)
colnames(matrixTU) <- c(paste0("Env", 1:4), "mixEnv")
matrixTU
|
7af8874659bebf71531916d7e03a7a26f4025935 | 37a1fc40671ce4eb1b888d918f891456429bc4bb | /R/sugm.plot.R | 97650c74c3bbe052703e3210710e83b4d391880e | [] | no_license | cran/flare | 8877ab049f1b74df41da3bc2d4911655f70a2b44 | c76c10285bf3c19c0a3bf637feaeac10cef9405b | refs/heads/master | 2022-06-06T00:55:27.579503 | 2022-05-23T07:02:21 | 2022-05-23T07:02:21 | 17,696,072 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,296 | r | sugm.plot.R | #-----------------------------------------------------------------------------------#
# Package: flare #
# sugm.plot(): graph visualization #
# Author: Xingguo Li #
# Email: <xingguo.leo@gmail.com> #
# Date: Dec 2nd 2013 #
# Version: 1.1.0 #
#-----------------------------------------------------------------------------------#
sugm.plot <- function(G, epsflag = FALSE, graph.name = "default", cur.num = 1, location=NULL){
gcinfo(FALSE)
if(missing(location)) location = getwd()
setwd(location)
g = graph.adjacency(as.matrix(G!=0), mode="undirected", diag=FALSE)
layout.grid = layout.fruchterman.reingold(g)
if(epsflag == TRUE) postscript(paste(paste(graph.name, cur.num, sep=""), "eps", sep="."), width = 8.0, height = 8.0)
par(mfrow = c(1,1))
plot(g, layout=layout.grid, edge.color='gray50',vertex.color="red", vertex.size=2, vertex.label=NA)
rm(g,location)
gc()
if(epsflag == TRUE) dev.off()
}
|
3489f548df782ab74052e2cd8cd88f969f6c7f5e | 41cff625d6d1352aac02d1f206279d16e86685a1 | /tmp/tmp_demo.R | f7da32029806ca450c1450dc3d1201b2e164b0e5 | [] | no_license | PeteHaitch/MethylationTuples | dae3cf80085d58f57ac633d99f3be44e6fb84daa | 4e127d2ad1ff90dbe8371e8eeba4babcb96e86f2 | refs/heads/master | 2020-12-11T22:52:16.651509 | 2015-04-24T13:26:56 | 2015-04-24T13:27:12 | 24,593,259 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,912 | r | tmp_demo.R | #methpat <- mp1
methpat_order <- order(methpat)
methpat_rd_sorted <- rowRanges(methpat)[methpat_order]
meth_level <- methLevel(methpat, min_cov = 5L)
# ipd <- 1:2000
ipd <- sort(unique(diff(start(methpat_rd_sorted))))
ipd <- ipd[ipd > 0]
in_feature <- rep(NA, nrow(methpat))
in_feature_levels <- unique(in_feature)
pair_feature_status <- sort(unique(rowSums(expand.grid(in_feature_levels,
in_feature_levels))),
na.last = FALSE)
id_dt <- setDT(expand.grid(IPD = ipd,
strand = levels(strand(methpat)),
pair_feature_status = pair_feature_status))
id_dt[, c("KEY", "ID") := list(paste(IPD, strand, pair_feature_status,
sep = ''),
seq_len(nrow(id_dt)))]
setkey(id_dt, ID)
seqnames <- as.character(seqnames(methpat_rd_sorted))
strand <- as.character(strand(methpat_rd_sorted))
pos <- start(methpat_rd_sorted)
method <- 'pearson'
system.time({pairs_idx <- .makeAllPairsCpp(
methpat_order, seqnames, strand, pos, in_feature, ipd, id_dt)})
system.time({pairs_idx <- .makeAdjacentPairsCpp(
methpat_order, seqnames, strand, pos, in_feature, id_dt)})
cors_list <- lapply(colnames(methpat), function(sample_name,
pairs_idx, betas) {
beta_pairs <- data.table(ID = pairs_idx[["ID"]],
sample = sample_name,
beta1 = betas[pairs_idx[["i"]], sample_name],
beta2 = betas[pairs_idx[["j"]], sample_name])
beta_pairs[, .myCor(beta1, beta2, method = method,
conf.level = conf.level), by = list(ID, sample)]
}, pairs_idx = pairs_idx, betas = betas)
cors <- setkey(rbindlist(cors_list), ID, sample)
val <- id_dt[cors]
val[, c("ID", "KEY") := list(NULL, NULL)] |
3237f2f816dd6dc9eb9cf76d34a4b0bae74c1f73 | 7d7bf5398ce600efbcf7e0dc74fd132ca02d496b | /R/detect.CP.state.R | 3edf13cfdf9c69446ed0290723f275cafdda2e8d | [] | no_license | rexdouglass/cdcar | 2e6a77829831e732a6530aae30aec5274e9a2236 | b7b0ff0a6706d3e068e3484dc2a9ab0a70cec5ab | refs/heads/master | 2023-03-20T09:34:44.468108 | 2021-01-01T17:05:31 | 2021-01-01T17:05:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,412 | r | detect.CP.state.R | #' COVID-19 Data Checking and Reparing (CDCAR)
#'
#' This function detects the abnormal data point and/or data period of state level covid-19 data (or similar type of data including the count time series and epidemic data).
#'
detect.CP.state <- function(dat = list(), state.show = NULL, plot.show = TRUE, cp.sd = 2, slope.r = 5, manual.decision = FALSE){
# Extract subset of the data based on state.show and county.show
if(length(state.show) > 0){
dat.sub <- dat %>% filter(State %in% state.show) ; # dim(dat.sub)
}else{
dat.sub <- dat
}
states = dat.sub$State
dates0 = names(dat.sub)[-1]
dates = as.Date(dates0, "X%Y.%m.%d")
dat.new = dat.sub
# CP detection
n.day = length(dates)
n.loc = nrow(dat.sub)
dates.CP = matrix(0, nrow = n.loc, ncol = n.day)
for(i in 1:nrow(dat.sub)){
y = dat.sub [i, -c(1)]
if(sum(!is.na(y)) == 0| sum(y >0) < 4 | length(unique(unlist(y)) )==2){
dat.new[i, -c(1)] = y
}else{
ind = (min(which(y[1,]>0))):ncol(y)
y.sub = y[1, ind]
y.sub = as.vector(unlist(y.sub))
x = 1:length(y.sub)
df = data.frame("x" = x , "y" = y.sub ); colnames(df) <- c("x", "y")
fit_lm = lm(y ~ 1 + x, data = df)
fit_segmented= try(segmented(fit_lm, seg.Z = ~x))
est =(fit_segmented)$psi
slope = slope(fit_segmented)$x[,1]
slope.inc = slope[-1] / slope[-length(slope)]
if(!is.null(est) & max(slope.inc)> slope.r & min(est[,3]) <cp.sd) {
cp.est = est[which(slope.inc> slope.r & est[,3]<cp.sd) ,2]
cp.x.pos = sapply(cp.est, ceiling)
cp = ind[cp.x.pos]
print(paste0("An abnormal point detected at " , states[i], " on ", dates[cp] ))
if(plot.show == TRUE){
plot(fit_segmented$fitted.values, type = "l", main = paste(states[i]), ylab = "Cumulative Counts", xlab = "Date", axes = FALSE)
points(df)
lines.segmented(fit_segmented)
points.segmented(fit_segmented)
axis(1, x, dates[ind])
axis(2)
legend("topleft", paste("Slope: ", as.vector(unlist(slope)) ))
box()
}
fitted = fit_segmented$fitted.values
res = fit_segmented$residuals
check = which(res[cp.x.pos[1] : min(cp.x.pos[1]+ 10, length(res))] * res[cp.x.pos[1]] <0)
if(length(check) == 0){
n.repair = 0
}else{
n.repair = min(check) - 1
}
if(manual.decision == TRUE){
decide <- readline(prompt="Do you want to adjust this change point? [y/n]: ")
if(decide =="y"){
dat.new [i, cp:(cp+n.repair)] = fitted[cp.x.pos[1]:(cp.x.pos[1]+ n.repair)]
dates.CP [i, cp:(cp+n.repair)] = 1
# Plot the repaired points
plot(fit_segmented$fitted.values, type = "l", main = paste(states[i], "CP repaired"), ylab = "Cumulative Counts", xlab = "Date", axes = FALSE)
points(df)
points(cp.x.pos[1]:(cp.x.pos[1]+ n.repair), fitted[cp.x.pos[1]:(cp.x.pos[1]+ n.repair)], col = "red")
lines.segmented(fit_segmented)
points.segmented(fit_segmented)
axis(1, x, dates[ind])
axis(2)
box()
}
}else{
dat.new [i, cp:(cp+n.repair)] = NA
dates.CP [i, cp:(cp+n.repair)] = 1
}
}
}
}
list(dat.sub = dat.sub, dat.new = dat.new,
dates.CP = dates.CP)
}
|
d735e420204093948308ca0848274488c9f472d3 | 3ed5bdb1598c238492d5279c7544cacc334b68bf | /Appendix_B_R/example06_data_frame.r | 18b5c2a5b8b4db7686c9af210c14f4f20cb6519f | [
"MIT"
] | permissive | itanaskovic/Data-Science-Algorithms-in-a-Week | 385975fcc91f03d9dd5bb297c4579567f70cc301 | 879cb4c96b35d57e593a85b54dcda41f91d27533 | refs/heads/master | 2020-04-16T13:49:24.075418 | 2019-01-15T08:49:16 | 2019-01-15T08:49:16 | 165,644,067 | 0 | 0 | MIT | 2019-01-14T10:43:48 | 2019-01-14T10:43:48 | null | UTF-8 | R | false | false | 122 | r | example06_data_frame.r | temperatures = data.frame(
fahrenheit = c(5,14,23,32,41,50),
celsius = c(-15,-10,-5,0,5,10)
)
print(temperatures)
|
6134c0fc052018517f318adf3dcfb3ba30d69d0c | 29585dff702209dd446c0ab52ceea046c58e384e | /plsdof/R/normalize.R | 60a85dda073ba40aecebca91a8d0042e722e3165 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 227 | r | normalize.R | normalize <-
function(v,w=NULL){
norm.v<-sqrt(sum(v^2))
v<-v/norm.v
if (is.null(w)==TRUE){
return(v)
}
if (is.null(w)==FALSE){
w<-w/norm.v
return(list(v=v,w=w))
}
}
|
7375a38a52d0ec2d67c0f673953abc396a51e56f | 3eea7b91250904903a2c618db45aacf4d8dbdbe4 | /lezione6/lezione6.R | e7284d5e74e293f716557dead395f7f20a585d29 | [] | no_license | andreaardenti/R | afb885030c6994ab8173898caa56878b34316b26 | fdc203a2cd7dfc66233d28d0ace4d94496d9d40b | refs/heads/master | 2020-05-14T10:43:51.952064 | 2019-04-16T21:01:17 | 2019-04-16T21:01:17 | 181,766,541 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,610 | r | lezione6.R | #Studiamo la funzione di Gauss
#rimuove la memoria di tutte le variabili presenti in memoria per partire "puliti"
rm(list = ls())
grades <- read.csv("~/Desktop/Esercitazioni/R/lezione6/grades.csv")
#esporiamo il dataset
summary(grades)
a <- grades$Test1
#indicatori centrali
min(a)
m <- mean(a)
median(a)
summary(a)
#indicatori di dispersione
#interquartile - distanza tra il 3° quartile i il 1° quartile
IQR(a)
#varianza - In statistica, con riferimento a una serie di valori,
#la media dei quadrati degli scarti dei singoli valori dalla loro media aritmetica.
#varianza è una misura di quanto i dati si allontanano dalla media
var(a)
hist(a,8)
plot(density(a))
#deviazione standard = la radice quadrata della varianza
s <- sd(a)
#skewness- indica se i dati sono bilanciati rispetto alla media
#bilanciamento del dataset
#cerchiamo la funzione skewness tra le librerie di R e la carichiamo...
#in packages si trovano i pacchetti disponibili - se non è presente lo installiamo cliccando nel bottone install
#install.packages("fBasics")
#library("fBasics", lib.loc="/Library/Frameworks/R.framework/Versions/3.5/Resources/library")
skewness(a)
kurtosis(a)
#approssimiamo i dati "test1" con una funzione gaussiana di media e deviazione standard uguale a quella calcolata
x <- seq(25,55,0.1)
gaussiana <- (1/sqrt(2*pi*s^2))*exp(-(1/2)*(x-m)^2/s^2)
points(x, gaussiana, type="l", col="red")
#facciamo il plot della densita di tutti i test
plot(density(grades$Test1, bw=5), xlim=c(0,100))
d2 <- density(grades$Test2, bw=5)
points(d2$x, d2$y, type="l", col="red")
d3 <- density(grades$Test3, bw=5)
points(d3$x, d3$y, type="l", col="green")
d4 <- density(grades$Test4, bw=5)
points(d4$x, d4$y, type="l", col="blue")
#dobbiamo normalizzare i dati che provengono da test diversi perchè hanno range/scale diverse,
#li dobbiamo riporta ad una scala unica
#normalizzazione lineare e z-scoring
#primo metodo - NORMALIZZAZIONE LINEARE
#x_normalizzato = x originale meno il minimo, diviso il massimo meno il minimo
#(x_originale - min) / (max-min)
tn1 <- (grades$Test1-min(grades$Test1))/(max(grades$Test1-min(grades$Test1)))
tn2 <- (grades$Test2-min(grades$Test2))/(max(grades$Test2-min(grades$Test2)))
tn3 <- (grades$Test3-min(grades$Test3))/(max(grades$Test3-min(grades$Test3)))
tn4 <- (grades$Test4-min(grades$Test4))/(max(grades$Test4-min(grades$Test4)))
plot(density(tn1))
d2 <- density(tn2)
points(d2$x, d2$y, type="l", col="red")
d3 <- density(tn3)
points(d3$x, d3$y, type="l", col="green")
d4 <- density(tn4)
points(d4$x, d4$y, type="l", col="blue")
#secondo metodo - NORMALIZZAZIONE Z-SCORE
#quanto si allontanano in nostri dati dalla media se la distribuzione fosse gaussiano
#z-score(x) = x-media/s dove s è la deviazione standard
tz1 <- scale(grades$Test1)
plot(density(tz1, bw=3), ylim=c(0,0.2)) #lo zero stavolta è nella media
tz2 <- (scale(grades$Test2))
tz3 <- (scale(grades$Test3))
tz4 <- (scale(grades$Test4))
d2 <- (density(tz2, bw=3))
points(d2$x, d2$y, type="l", col="red")
d3 <- (density(tz3, bw=3))
points(d3$x, d3$y, type="l", col="green")
d4 <- (density(tz4, bw=3))
points(d4$x, d4$y, type="l", col="blue")
#effetti dellla normalizzazione
par(mfrow=c(1,2))
plot(grades$Test1, grades$Test4, xlim=c(0,100), ylim=c(0,100))
plot(tz1, tz4, pch=6, col="red", xlim=c(-2,2), ylim=c(-2,2))
Z <- data.frame(tz1,tz2,tz3,tz4)
plot(Z)
#studiamo le relazione tra diverse variabili
#correlazioni - indice di pearson (covarianza diviso le varianze)
cor(tz1,tz2)
cor(Z)
#per essere importante una correlazione deve essere superiore a 0.5 (la scala va da -1 a 1)
par(mfrow=c(1,1))
plot(tz3,tz4)
#regressione lineare - formula di primo grado che mi fa prevedere y dato x
# Y = A *x + B .....A volte x più B
modello <- lm(tz4 ~ tz3) #tz4 in funzione di tz3
modello$coefficients #B è intercepet
points(tz3, modello$fitted.values, pch=18, col="red")
plot(tz3, modello$residuals) #mostra gli errori del nostro modello
abline(0,0) #plotta la linea orizzontale sullo zero
summary(modello)
#call - il modello è stato creato da tz4 ~ tz3
#residuals - gli errori del modello
#coefficents - restituisce i punti B e A
#Signif. codes - 3 stelle importante
#Adjusted R-squared - più vicino è a 1 piu il modello è affidabile (range da 0 a 1) è una misura di quanto il modello è vicino alla realtà
#p-value - vicino allo zero indica che il modello è buono
modello2 <- lm(tz4 ~ tz2)
plot(tz2,tz4)
abline(modello2$coefficients)
#correlazione bassa, modello scadente
summary(modello2)
|
154eaffb3e3fed9862dd0238fe437597f58d072c | 9308cd4898d94b9e86919e4b9680887fb703a595 | /src/1.eda.R | 22fffee09e254776999c6279198c591ea1e27e3f | [] | no_license | R-ladies/Titanic | 3057c8cb77b3bbf373da00f6c0849f8052c2b4e5 | f7dcdd468ab13e5867ddfb9016f61f044f5431b9 | refs/heads/master | 2016-08-04T06:47:32.955818 | 2014-10-02T00:49:39 | 2014-10-02T00:49:39 | 22,821,996 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,745 | r | 1.eda.R | setwd("/Users/gqueiroz/Dropbox/Rladies/Meetups/Kaggle.Competition/Titanic/")
library('ProjectTemplate')
load.project()
train2 <- read.csv("./data/train.csv", header = TRUE, stringsAsFactors = FALSE)
str(train2)
#' Look at the first 10 rows
head(train, 10)
#' Look at the last rows
tail(train)
#' See the structuture of the data
str(train)
#' Variables
names(train)
#'Summary Stats
summary(train)
#' How may people survived?
table(train$Survived) # 342 passengers survived, while 549 died
#' How about a proportion?
prop.table(table(train$Survived))
mean(train$Age, na.rm = TRUE)
dim(subset(x = train, subset = train$Age < 10))
#' How many men and women?
summary(train$Sex) # Majority of passengers were male.
# Now let’s expand the proportion table command we used last time to do a two-way comparison
# on the number of males and females that survived:
table(train$Sex, train$Survived)
prop.table(table(train$Sex, train$Survived), 1) # row-wise
# Ages?
summary(train$Age)
table(train$Survived)
# Making basic visualizations
barplot(table(train$Survived),
names.arg = c("Died", "Survived"),
main="Survived (passenger fate)", col="red")
barplot(table(train$Pclass),
names.arg = c("first", "second", "third"),
main="Pclass (passenger traveling class)", col="firebrick")
hist(train$Fare, main="Fare (fee paid for ticket[s])",
xlab = NULL,
col="darkgreen")
mosaicplot(train$Pclass ~ train$Survived,
main="Passenger Fate by Traveling Class", shade=FALSE,
color=TRUE, xlab="Pclass", ylab="Survived")
mosaicplot(train$Sex ~ train$Survived,
main="Passenger Fate by Gender", shade=FALSE,
color=TRUE,
xlab="Sex", ylab="Survived")
|
7debba2ecfdc58e4ceded0f15afba2c9ce5e3327 | 26c7c66568565e3e958a0350e633daf669861807 | /R/imports.R | e09bd26dd0e4b28dd077ea46f7e05ffea67f1c01 | [
"MIT"
] | permissive | Pflegermeister/wizirt | 3c08509e7ce94a63ad3ec3819c1a7731733edfc6 | 2a52d02369ca15fb2cf312e5e7530b4b4322bbcb | refs/heads/main | 2023-01-23T18:00:59.507321 | 2020-11-21T01:17:03 | 2020-11-21T01:17:03 | 290,293,374 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 572 | r | imports.R | #' @import mirt
#' @import PerFit
#' @import ltm
#' @import parsnip
#' @importFrom magrittr %>%
#' @export
magrittr::`%>%`
#' parsnip::set_engine
#'
#' @param engine for set_engine(). Character. Currently supported engines are 'mirt' and 'ltm' for Rasch, 1PL, 2PL, and 3PL models. 'eRm' is supported for Rasch models only.
#' @importFrom parsnip set_engine
#' @export
parsnip::set_engine
#' @importFrom parsnip set_mode
#' @export
parsnip::set_mode
.onLoad <- function(libname, pkgname) {
a <- capture.output(tryCatch(make_irt(), error = function(e) {}))
rm(a)
}
|
3a0ef4db32e37d05c1a0c55e815bfc25269df64c | f911fd924482e0d3ddaec777187370f5d5e54b19 | /R/pd_taxa.R | 2e468516da9215bd0fea47fdb3ea030ed3bf5e6c | [
"MIT"
] | permissive | ropensci/phylodiv | c1a78ce50a8cca26df205364e26ce8ff22822afa | 8c8f974af4ba1ad7dcc599bbeca87715b802ec0e | refs/heads/master | 2021-04-15T08:40:02.810757 | 2020-03-07T00:06:49 | 2020-03-07T00:06:49 | 126,215,982 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,071 | r | pd_taxa.R | #' Gather taxonomic hierarchies
#'
#' @export
#' @param x an object of class `phylodiv`
#' @param db (character) the database to use to get
#' taxonomic information. only option is "ncbi" for now
#' @param drop_no_data (logical) drop tips w/o taxonomic
#' hierarchy data. default: `TRUE`
#' @param ... ignored
#' @return an object of class `phylodiv`, a named list with slots:
#'
#' - tree
#' - tips
#' - nodes
#' - hierarchies
#'
#' @section taxonomic data:
#' Uses \pkg{taxizedb} internally
#'
#' @examples \dontrun{
#' library(ape)
#' data(chiroptera)
#' st <- ape::subtrees(chiroptera)[[393]]
#' (x <- pd_read(st))
#' (res <- pd_taxa(x))
#' res$trees
#' res$trees[[1]]$hierarchies
#' res$fetch_hierarchies()
#' }
pd_taxa <- function(x, db = "ncbi", drop_no_data = TRUE, ...) {
assert(x, "PhyloDiv")
# if (!is.character(x$tips)) stop("tip labels must be of class character")
# res <- taxize::classification(x$tips, db = db)
# res <- taxizedb::classification(x$tips, db = db)
invisible(lapply(x$trees, function(z) {
w <- taxizedb::classification(z$unique_names, db = db)
if (drop_no_data) {
keep <- w[vapply(w, NROW, 1) > 1]
throw <- w[vapply(w, NROW, 1) <= 1]
if (length(throw) > 0) {
message("dropping ", length(throw), " tips w/o hierarchies")
# prune tree with dropped taxa
z$tree <- ape::drop.tip(z$tree, gsub("\\s", "_", names(throw)))
}
}
z$hierarchies <- w
}))
return(x)
# # assign hierarchies
# x$hierarchies <- keep
# x
# structure(x, class = "phylodiv")
}
# using taxa::parse_tax_data
# pd_taxa1 <- function(x, db = "ncbi", ...) {
# assert(x, "phylodiv")
# if (!is.character(x$tips)) stop("tip labels must be of class character")
# # res <- taxize::classification(x$tips, db = db)
# res <- taxizedb::classification(x$tips, db = db)
# # drop those with no hierarchy data
# keep <- res[vapply(res, NROW, 1) > 1]
# throw <- res[vapply(res, NROW, 1) <= 1]
# message("dropping ", length(throw), " tips w/o hierarchies")
# # prune tree with dropped taxa
# x$tree <- ape::drop.tip(x$tree, gsub("\\s", "_", names(throw)))
# # assign hierarchies
# x$hierarchies <- keep
# # make tax_map object
# tax <- dt2df(tc(lapply(keep, function(z) {
# if (is.na(z)) return(NULL)
# tt <- z[z$rank != "no rank", ]
# ff <- tt$name
# names(ff) <- tt$rank
# data.frame(t(ff), stringsAsFactors = FALSE)
# })), idcol = FALSE)
# # tax$id <- seq_len(NROW(tax))
# tax$id <- gsub("\\s", "_", tax$species)
# ttree <- tidytree::as_tibble(x$tree)
# txmap <- taxa::parse_tax_data(tax,
# class_cols = seq_along(tax),
# datasets = list(tree = ttree),
# mappings = c("id" = "label")
# )
# x$taxmap <- txmap
# structure(x, class = "phylodiv")
# }
# using metacoder::parse_phylo
# pd_taxa2 <- function(x, db = "ncbi", ...) {
# assert(x, "phylodiv")
# if (!is.character(x$tips)) stop("tip labels must be of class character")
# x$taxmap <- metacoder::parse_phylo(x$tree)
# structure(x, class = "phylodiv")
# }
|
cd68aeba3e2e98c6f2f780c89c9d761deb6e2cb3 | 42827afff219530b5dda9856c6acda656e044afe | /R/stations.R | e5f3ee401ba62eae82b1c9c474dcec121fc83304 | [] | no_license | oranzani/inmetdown | 987465407c57aa63ba8b20ed3195ed5e9e8a57ff | 201213beb5dcc29264ee5361edce3d1d1989dfea | refs/heads/master | 2023-01-03T03:03:48.119344 | 2020-10-28T14:20:22 | 2020-10-28T14:20:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,137 | r | stations.R | #' INMET Weather Stations
#'
#' @export
#'
inmet_station <- function() {
httr::GET("https://mapas.inmet.gov.br/assets/js/estacao.js") %>%
httr::content(type = "text", encoding = "UTF-8") %>%
stringr::str_remove("var estacao = \n") %>%
jsonlite::fromJSON() %>%
'[['("features") %>%
'[['("properties") %>%
dplyr::as_tibble() %>%
dplyr::mutate(
dplyr::across(c(VL_LATITUDE, VL_LONGITUDE, VL_ALTITUDE), as.numeric),
DT_INICIO_OPERACAO = as.Date(lubridate::ymd_hms(DT_INICIO_OPERACAO)),
CD_ESTACAO_DESC = stringr::str_extract(CD_ESTACAO, "^.+(?= -)"),
CD_ESTACAO = stringr::str_trim(stringr::str_sub(CD_ESTACAO, -5, -1))
) %>%
dplyr::select(
CD_ESTACAO,
CD_ESTACAO_DESC,
CD_WMO,
CD_CATEGORIA = categoria,
VL_LATITUDE,
VL_LONGITUDE,
VL_ALTITUDE,
DT_INICIO_OPERACAO
) %>%
unique()
}
# API antiga
# inmet_estacoes <- function(proxy = ".", estacoes.br = TRUE) {
#
# sonabra_auto_raw <- "http://www.inmet.gov.br/sonabra/maps/pg_mapa.php" %>%
# rvest::html_session(proxy) %>%
# xml2::read_html() %>%
# rvest::html_nodes(xpath = "/html/head/script[2]/text()") %>%
# rvest::html_text() %>%
# stringr::str_split("\\/\\/\\*", simplify = TRUE) %>%
# '['(-1)
#
# sonabra_auto_df <- dplyr::data_frame(sonabra_auto_raw = list(sonabra_auto_raw)) %>%
# dplyr::mutate(
# id = purrr::map(sonabra_auto_raw, ~stringr::str_match(.x, "\\*\\* ESTACÃO (.*?) \\*\\*")[,2]),
# estado = purrr::map(sonabra_auto_raw, ~stringr::str_sub(gsub(".*label = '|';.*", "", .x), 1, 2)),
# municipio = purrr::map(sonabra_auto_raw, ~stringr::str_extract(gsub(".*<b>Estação: |<br>.*", "", .x), ".*(?=-)")),
# lat = purrr::map(sonabra_auto_raw, ~readr::parse_number(gsub(",", ".", gsub(".*Latitude: |º<br>.*", "", .x)))),
# lon = purrr::map(sonabra_auto_raw, ~readr::parse_number(gsub(",", ".", gsub(".*Longitude: |º<br>.*", "", .x)))),
# altitude = purrr::map(sonabra_auto_raw, ~readr::parse_number(gsub(",", ".", gsub(".*Altitude: | metros.*", "", .x)))),
# inicio = purrr::map(sonabra_auto_raw, ~lubridate::dmy(gsub(".*Aberta em: |<br>.*", "", .x))),
# url = purrr::map(sonabra_auto_raw, ~gsub(".*width=50><a href=| target=_new>.*", "", .x))
# ) %>%
# tidyr::unnest(id, estado, municipio, lon, lat, altitude, inicio, url) %>%
# dplyr::mutate(tipo = "Automática") %>%
# dplyr::select(id, tipo, dplyr::everything()) %>%
# {if (isTRUE(estacoes.br)) dplyr::filter(., !stringr::str_detect(id, "[UC]")) else .}
#
# sonabra_conv_raw <- "http://www.inmet.gov.br/sim/sonabra/index.php" %>%
# rvest::html_session(proxy) %>%
# xml2::read_html() %>%
# rvest::html_nodes(xpath = "/html/head/script[2]/text()") %>%
# rvest::html_text() %>%
# stringr::str_split("\\/\\/\\*", simplify = TRUE) %>%
# '['(-1)
#
# sonabra_conv_df <- dplyr::data_frame(sonabra_conv_raw = list(sonabra_conv_raw)) %>%
# dplyr::mutate(
# id = purrr::map(sonabra_conv_raw, ~gsub(".*OMM: |<br>.*", "", .x)),
# estado = purrr::map(sonabra_conv_raw, ~stringr::str_sub(gsub(".*label = '|';.*", "", .x), 1, 2)),
# municipio = purrr::map(sonabra_conv_raw, ~stringr::str_extract(gsub(".*<b>Estação: |<br>.*", "", .x), ".*(?=-)")),
# lat = purrr::map(sonabra_conv_raw, ~readr::parse_number(gsub(",", ".", gsub(".*Latitude: |º<br>.*", "", .x)))),
# lon = purrr::map(sonabra_conv_raw, ~readr::parse_number(gsub(",", ".", gsub(".*Longitude: |º<br>.*", "", .x)))),
# altitude = purrr::map(sonabra_conv_raw, ~readr::parse_number(gsub(",", ".", gsub(".*Altitude: | metros.*", "", .x)))),
# inicio = purrr::map(sonabra_conv_raw, ~lubridate::dmy(gsub(".*Aberta em: |<br>.*", "", .x))),
# url = purrr::map(sonabra_conv_raw, ~gsub(".*<br><a href=|= target=_new>.*", "", .x))
# ) %>%
# tidyr::unnest(id, estado, municipio, lon, lat, altitude, inicio, url) %>%
# dplyr::mutate(tipo = "Convencional") %>%
# dplyr::select(id, tipo, dplyr::everything())
#
# dplyr::bind_rows(sonabra_auto_df, sonabra_conv_df)
# }
|
206d9ffb3ff1a6f2e3a051849958c6d353b29913 | cef7c4702e2f8b98c3d5f309c8fe9534591dda78 | /03_observe_update/app.R | 73467ac0a7cb9f4a2057ac6ba68fd77938a229d8 | [
"Apache-2.0"
] | permissive | cdmuhs/gradual_shiny | 617a232e3e06e4b3b036fe4c04036ac554d3d7ca | 9d9f53822ca543b2102b4ef5b27e1b770fa69ff3 | refs/heads/master | 2021-04-26T21:51:00.233923 | 2018-03-07T03:57:14 | 2018-03-07T03:57:14 | 124,164,484 | 0 | 0 | Apache-2.0 | 2018-03-07T02:04:56 | 2018-03-07T02:04:55 | null | UTF-8 | R | false | false | 2,690 | r | app.R | ##03 - Observe/update
Sys.setlocale("LC_ALL", "C")
library(fivethirtyeight)
library(shiny)
library(tidyverse)
##Load in the helper functions
source("helper.R")
data(biopics)
myDataFrame <- biopics
##these functions return the categorical variables and
##the numeric variables, given a data.frame
##They're in helper.R
##Try running all the code before ui
##and look at what categoricalVars and numericVars contains
categoricalVars <- get_category_variables(myDataFrame)
numericVars <- get_numeric_variables(myDataFrame)
ui <- shinyUI(
fluidPage(
fileInput("file1", "Choose csv file to upload", accept = ".csv"),
selectInput("x_variable","Select X Variable",numericVars,
selected=numericVars[1]),
selectInput("y_variable", "Select Y Variable", numericVars,
selected = numericVars[2]),
##uncomment this code for step 4
#selectInput("color_variable", "Select Color Variable",
# names(categoricalVars),
# selected = names(categoricalVars[1])),
plotOutput("scatter_plot")
)
)
server <- function(input, output, session) {
myData <- reactive({
inFile <- input$file1
##need to test whether input$file1 is null or not
if (is.null(inFile)) {
d <- myDataFrame
} else {
d <- read.csv(inFile$datapath)
}
return(d)
})
output$scatter_plot <- renderPlot({
ggplot(myData(), aes_string(y=input$y_variable,
x=input$x_variable# uncomment this line and next line for step 4,
# color=input$color_variable
)) + geom_point()
})
##observe runs the code whenever myData() changes
observe({
#get the new numeric variables when the data is loaded
num_vars <- get_numeric_variables(myData())
##update the selectInput with choices
updateSelectInput(session, "x_variable",
choices = num_vars,
selected = num_vars[1])
##make the selected different for y_variable
updateSelectInput(session, "y_variable",
choices=num_vars,
selected= num_vars[2])
##get the new categorical variables when the data is loaded
#uncomment here for step 4
#cat_vars <- names(get_category_variables(myData()))
##update selectInput("color_variable") with the new categorical
##variables
#uncomment here for step 4
#updateSelectInput(session, "color_variable",
#choices=cat_vars,
# selected=cat_vars[1])
})
}
shinyApp(ui, server) |
315bdfdd24af19687976c129288a0c450b2bfa08 | ebe1fdde0f73718e630ad3458cc147256159ed5b | /TweetRating.R | 91ba86d69467e194bed738a24f77f17ea5c3ad0e | [] | no_license | nimishbajaj/ReviewRating | 5e3b5ea6a62a677b9ae95b63945a6f46af20867e | 1b283cf5c93ed02af1ead93a11c8fbbb1ae29751 | refs/heads/master | 2021-01-18T17:26:52.710068 | 2017-04-08T21:11:41 | 2017-04-08T21:11:41 | 86,800,557 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,002 | r | TweetRating.R | library(tm)
library(SnowballC)
library(caTools)
library(e1071)
tweets = read.csv("tweets.csv", stringsAsFactors=FALSE)
round1 = tweets$Avg*10 + 30
round2 = round(round1, digits=-1)/10
tweets$Avg = as.factor(round2)
corpus = Corpus(VectorSource(tweets$Tweet))
corpus = tm_map(corpus, content_transformer(tolower))
corpus = tm_map(corpus, removePunctuation)
corpus = tm_map(corpus, removeWords, c("apple", stopwords("english")))
corpus = tm_map(corpus, stemDocument)
corpus[[1]]$content
frequencies = DocumentTermMatrix(corpus)
sparse = removeSparseTerms(frequencies, 0.997)
#sparse = frequencies
tweetsSparse = as.data.frame(as.matrix(sparse))
colnames(tweetsSparse) = make.names(colnames(tweetsSparse))
tweetsSparse$Avg = tweets$Avg
write.csv(tweetsSparse, "tweetsSparse.csv")
set.seed(123)
split = sample.split(tweetsSparse$Avg, SplitRatio = 0.7)
train = subset(tweetsSparse, split==TRUE)
test = subset(tweetsSparse, split==FALSE)
write.csv(train, "trainCSV.csv")
write.csv(test, "testCSV.csv")
|
52272ebae456e55491081f4d43644e824f21a4e9 | 2d0764ef3f329287910c2dbf97b05ab5a690cd2b | /R/collapse.genomecache.R | 16714ce136a4e865ac28bc70638ed9360e18ea14 | [] | no_license | gkeele/miqtl | 2980190c96b6bdf89eb3528981cdf7a4f6c2ea72 | fd5005580e3745d66665a5235def551eee9bc31d | refs/heads/master | 2022-06-23T02:52:02.940857 | 2022-06-08T15:09:38 | 2022-06-08T15:09:38 | 94,711,930 | 8 | 1 | null | 2022-06-02T20:34:40 | 2017-06-18T20:40:19 | R | UTF-8 | R | false | false | 9,890 | r | collapse.genomecache.R | #' Reduce genome caches by averaging together loci that are very similar up to some set
#' tolerance level of a specified criterion
#'
#' This function takes an inpute genome cache directory, and produces a smaller version
#' with similar information. It is particularly useful for large, dense genome caches, with
#' redundant loci.
#'
#' @param original.cache The path of the genome cache to be reduced.
#' @param new.cache The path of the new genome cache to be created.
#' @param subjects DEFAULT: NULL. Allows for the specification of a reduced set of individuals from
#' the original genome cache to be included in the new one. The default includes all individuals.
#' @param criterion DEFAULT: "l2.norm". Option to specify criterion for collapsing loci founder
#' probabilities/dosages. "l2.norm" means that max l2 norm or Euclidean distance is used, which means
#' changes across all categories count. "max.category" means only changes in the max category (founder
#' dosage or diplotype) are used.
#' @param model DEFAULT: "additive". If "additive" is specified, criteria are based on dosages. If
#' "full", probabilities of diplotypes are used.
#' @param proportion.tol DEFAULT: 0.01. If the maximum criterion value at a pair of loci in a data set
#' exceeds this value, the loci are not averaged. When all criterion values are below it, the two loci
#' get averaged. The scale of the parameter is in terms of a proportion, regardless of criteria.
#' @export collapse.genomecache
#' @examples collapse.genomecache()
collapse.genomecache <- function(original.cache,
new.cache, subjects=NULL,
criterion=c("l2.norm", "max.category"),
model=c("additive", "full"),
proportion.tol=0.01){
criterion <- criterion[1]
model <- model[1]
h <- DiploprobReader$new(original.cache)
# Chromosomes in original cache
chr <- list.dirs(paste0(original.cache, "/additive/"), full.names=FALSE, recursive=FALSE)
chr <- chr[grepl(x=chr, pattern="chr")]
chr <- gsub(x=chr, pattern="chr", replacement="")
full.to.add.matrix <- straineff.mapping.matrix()
strains <- h$getFounders()
if(is.null(subjects)){ subjects <- h$getSubjects() }
for(i in 1:length(chr)){
dir.create(paste0(new.cache, "/additive", "/chr", chr[i], "/data"), recursive=TRUE, showWarnings=FALSE)
dir.create(paste0(new.cache, "/full", "/chr", chr[i], "/data"), recursive=TRUE, showWarnings=FALSE)
dir.create(paste0(new.cache, "/genotype", "/chr", chr[i]), recursive=TRUE, showWarnings=FALSE)
## Grab loci from this chr
load(paste0(original.cache, "/additive", "/chr", chr[i], "/markers.RData"))
these.loci <- markers
these.map <- h$getLocusStart(loci=these.loci, scale="cM")
# Re-ordering based on genetic position
this.order <- order(these.map)
these.loci <- these.loci[this.order]
total.loci <- length(these.loci)
left.marker <- these.loci[1]
reduced.loci <- reduced.map <- reduced.pos <- NULL
bin.matrix <- h$getLocusMatrix(locus=left.marker, model="full", subjects=subjects); bin.loci.count <- 1
for(j in 2:total.loci){
OUTPUT=FALSE
right.marker <- these.loci[j]
locus1 <- h$getLocusMatrix(locus=left.marker, model=model, subjects=subjects)
locus2 <- h$getLocusMatrix(locus=right.marker, model=model, subjects=subjects)
## Check to see if X changes between markers
if(criterion == "l2.norm"){
test <- check.l2.norm(locus1.matrix=locus1, locus2.matrix=locus2, proportion.tol=proportion.tol, model=model)
}
else if(criterion == "max.category"){
test <- check.max.category(locus1.matrix=locus1, locus2.matrix=locus2, proportion.tol=proportion.tol)
}
## Extend bin
if(test){
bin.matrix <- bin.matrix + h$getLocusMatrix(locus=right.marker, model="full", subjects=subjects)
bin.loci.count <- bin.loci.count + 1
## CASE: Muli-locus bin at end of chromosome
if(j == total.loci){
bin.marker <- paste0(left.marker, "_to_", right.marker) # Bin names
bin.matrix <- bin.matrix/bin.loci.count # Averaging markers
OUTPUT=TRUE
}
}
## End bin and start new one
if(!test){
## Bin is a single marker
if(bin.loci.count == 1){
bin.marker <- left.marker
}
else{
bin.marker <- paste0(left.marker, "_to_", these.loci[j-1]) ## Bin names
bin.matrix <- bin.matrix/bin.loci.count ## Averaging markers
}
OUTPUT=TRUE
}
#browser()
## Adding bin to output
if(OUTPUT){
reduced.map <- c(reduced.map, h$getLocusStart(loci=left.marker, scale="cM"))
reduced.pos <- c(reduced.pos, h$getLocusStart(loci=left.marker, scale="Mb"))
reduced.loci <- c(reduced.loci, bin.marker)
# Additive dosages
bin.dosages <- bin.matrix %*% full.to.add.matrix
colnames(bin.dosages) <- strains
rownames(bin.dosages) <- rownames(bin.matrix)
assign(bin.marker, bin.dosages)
add.fn <- paste0(new.cache, "/additive/chr", chr[i], "/data/", bin.marker, ".RData")
save(list=bin.marker, file=add.fn)
# Full probabilities
assign(bin.marker, bin.matrix)
full.fn <- paste0(new.cache, "/full/chr", chr[i], "/data/", bin.marker, ".RData")
save(list=bin.marker, file=full.fn)
# Removing locus matrix so memory doesn't get overloaded
rm(list=bin.marker)
}
## CASE: Last marker is alone in a bin
if(!test & j == total.loci){
bin.marker <- right.marker
bin.matrix <- h$getLocusMatrix(locus=bin.marker, model="full", subjects=subjects)
# Additive dosages
bin.dosages <- bin.matrix %*% full.to.add.matrix
colnames(bin.dosages) <- strains
rownames(bin.dosages) <- rownames(bin.matrix)
assign(bin.marker, bin.dosages)
add.fn <- paste0(new.cache, "/additive/chr", chr[i], "/data/", bin.marker, ".RData")
save(list=bin.marker, file=add.fn)
# Full probabilities
assign(bin.marker, bin.matrix)
full.fn <- paste0(new.cache, "/full/chr", chr[i], "/data/", bin.marker, ".RData")
save(list=bin.marker, file=full.fn)
# Removing locus matrix so memory doesn't get overloaded
rm(list=bin.marker)
reduced.map <- c(reduced.map, h$getLocusStart(loci=right.marker, scale="cM"))
reduced.pos <- c(reduced.pos, h$getLocusStart(loci=right.marker, scale="Mb"))
reduced.loci <- c(reduced.loci, right.marker)
}
# Reset
if(!test & j < total.loci){
left.marker <- right.marker
bin.matrix <- h$getLocusMatrix(locus=left.marker, model="full", subjects=subjects)
bin.loci.count <- 1
}
}
## Markers
markers <- reduced.loci
save(list="markers", file=paste0(new.cache, "/full/chr", chr[i], "/markers.RData"))
save(list="markers", file=paste0(new.cache, "/additive/chr", chr[i], "/markers.RData"))
save(list="markers", file=paste0(new.cache, "/genotype/chr", chr[i], "/markers.RData"))
## Map
#map <- unique(reduced.map)
map <- reduced.map
save(list="map", file=paste0(new.cache, "/full/chr", chr[i], "/map.RData"))
save(list="map", file=paste0(new.cache, "/additive/chr", chr[i], "/map.RData"))
save(list="map", file=paste0(new.cache, "/genotype/chr", chr[i], "/map.RData"))
## Pos
#bp <- unique(reduced.pos*1e6)
bp <- reduced.pos*1e6
save(list="bp", file=paste0(new.cache, "/full/chr", chr[i], "/bp.RData"))
save(list="bp", file=paste0(new.cache, "/additive/chr", chr[i], "/bp.RData"))
save(list="bp", file=paste0(new.cache, "/genotype/chr", chr[i], "/bp.RData"))
## Chromosome
chromosome <- rep(chr[i], length(reduced.loci))
save(list="chromosome", file=paste0(new.cache, "/full/chr", chr[i], "/chromosome.RData"))
save(list="chromosome", file=paste0(new.cache, "/additive/chr", chr[i], "/chromosome.RData"))
save(list="chromosome", file=paste0(new.cache, "/genotype/chr", chr[i], "/chromosome.RData"))
## Strains
save(list="strains", file=paste0(new.cache, "/full/chr", chr[i], "/strains.RData"))
save(list="strains", file=paste0(new.cache, "/additive/chr", chr[i], "/strains.RData"))
save(list="strains", file=paste0(new.cache, "/genotype/chr", chr[i], "/strains.RData"))
## Subjects
save(list="subjects", file=paste0(new.cache, "/full/chr", chr[i], "/subjects.RData"))
save(list="subjects", file=paste0(new.cache, "/additive/chr", chr[i], "/subjects.RData"))
save(list="subjects", file=paste0(new.cache, "/genotype/chr", chr[i], "/subjects.RData"))
cat(paste0("Finished Chr", chr[i], "\n"))
}
}
## Returns TRUE if l2norm for all individuals is below some tolerance level
check.l2.norm <- function(locus1.matrix, locus2.matrix, proportion.tol, model){
dif.matrix <- locus1.matrix - locus2.matrix
max.l2 <- ifelse(model=="additive", sqrt(8), sqrt(2))
l2.norm <- apply(dif.matrix, 1, function(x) sqrt(sum(x^2))/max.l2)
return(ifelse(any(l2.norm > proportion.tol), FALSE, TRUE))
}
## Returns TRUE if difference in max category for all individuals is below some tolerance level
## In practice, a max haplotype dosage or max diplotype
check.max.category <- function(locus1.matrix, locus2.matrix, proportion.tol){
max.category <- apply(locus1.matrix, 1, function(x) which.max(x))
cat.dif <- sapply(1:length(max.category),
function(x) abs(locus1.matrix[x, max.category[x]] - locus2.matrix[x, max.category[x]]))
return(ifelse(any(cat.dif > proportion.tol), FALSE, TRUE))
}
|
5c3826533a95cd88fae1d26ff1d42e3e16b9a31e | 3b5cf4528187a6bae64eb6be93fc1323eef97ea8 | /cachematrix.R | e1e1ae82226e58bd95ed154aefaaab373600cae3 | [] | no_license | jlehn/ProgrammingAssignment2 | 36ab870130a753275a79dc2b22af404b6f903af8 | 33527128c3c030b24c8fd27059a8e1f4ef0babbf | refs/heads/master | 2021-01-15T09:46:29.869003 | 2014-04-28T03:47:51 | 2014-04-28T03:47:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,921 | r | cachematrix.R | ## These functions are used together to cache the inverse of a
# given invertible matrix. makeCacheMatrix takes a given matrix, stores it and
# includes a getter/setter for its inverse if it has already been calculated.
# cacheSolve will take a makeCacheMatrix object, return already calculated inverse if
# one exists; otherwise calculates, stores, and returns the inverse.
##Sample Usage
# m <- matrix(1:4, 2,2) #Creates invertable matrix
# matrixToSolve <- makeCacheMatrix(m) #Initializes makeCacheMatrix object using m
# cacheSolve(matrixToSolve) #First call solves and stores the inverse of the matrix
# cacheSolve(matrixToSolve) #Second call retireves pre-caluculation, prints message and returns inverse
## makeCacheMatrix is a special matrix object that stores a matrix, and calculates, stores, and
# retrieves its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve checks to see if the inverse of makeCacheMatrix has
# already been computed. If so, it retieves and returns the already
# computed inverse. If not, it computes, caches, and returns the inverse.
cacheSolve <- function(x, ...) {
m <- x$getinverse() # retrieve pre-calculated inverse if one exists
if(!is.null(m)) { # if existing inverse was retrieved, print message and return matrix
message("getting cached data")
return(m)
}
data <- x$get() # if no inverse was retrieved, get matrix to calculate
m <- solve(data, ...) # use the solve function to create the inverse
x$setinverse(m) # store the calculation in the object for future use
m # Returns a matrix that is the inverse of 'x'
}
|
5ebd6a6604b51abb9414a6a6630ac47cab25f9df | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/cellranger/examples/cell_cols.Rd.R | 2e338b843966a8850889a5ad224f93d47d427cc0 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 274 | r | cell_cols.Rd.R | library(cellranger)
### Name: cell_cols
### Title: Specify cell limits only for columns
### Aliases: cell_cols
### ** Examples
cell_cols(c(NA, 3))
cell_cols(c(7, NA))
cell_cols(4:16)
cell_cols(c(3, NA, 10))
cell_cols("C:G")
cell_cols(c("B", NA))
cell_cols(LETTERS)
|
0ad9497b2a6d43a02df1c57a45e42ec4c87e6f55 | fe70d71c3d5e4fb2db7bcaf21764bad6e0779a88 | /tests/testthat.R | 5e2f6f3e022780bfda3776b6380428586829d548 | [] | no_license | andybega/states | ce9d2ed60b2dff05b8290c5e83cd198055f21d6c | ee601cc1c451a846b3a60aa4722b01bb1e30c19e | refs/heads/master | 2021-06-05T02:20:37.193922 | 2021-04-14T12:20:45 | 2021-04-14T12:20:45 | 99,799,694 | 9 | 0 | null | null | null | null | UTF-8 | R | false | false | 60 | r | testthat.R | library("testthat")
library("states")
test_check("states")
|
c9aa8608a43081708d0e3ea4385665004a65ac93 | 5ac0c6a8bfc18e60c203562719b0f5b25a4bb253 | /IST687/finalproject2.R | 7dd62ddd5bc98b3c62d4b52e0f5fc308b5390af8 | [] | no_license | KaiserKyle/Final_Projects_Misc | 494a112cae43d9f9fc42bfb6217f05c23eda9447 | 4f228ee3c5861ec3df2065363349c15792ffff87 | refs/heads/master | 2020-06-04T17:35:04.260056 | 2019-07-08T03:01:10 | 2019-07-08T03:01:10 | 192,126,785 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,696 | r | finalproject2.R | library(e1071)
library(ggplot2)
# Load the data set
rawData <- read.csv("d:\\data\\athlete_events.csv")
# There were a handful of Figure Skating events that took place in the Summer
# For the purpose of this study, we will move these to the Winter, as it is now
# traditionally known as a Winter event.
rawData[which(rawData$Season =="Summer" & rawData$Sport == "Figure Skating"),]$Season <- "Winter"
# Print Summary
print(str(rawData))
# Prep for SVM modeling
# Select 'modern' data, starting in 1970
svmData <- rawData[which(rawData$Year >= 1970),]
svmData <- svmData[c("Height", "Weight", "Age", "Sex", "Sport")]
svmData <- na.omit(svmData)
# Split data into male and female datasets
maleSvmData <- svmData[which(svmData$Sex == "M"),]
femaleSvmData <- svmData[which(svmData$Sex =="F"),]
# (Comment out one of these two lines in order to make a model)
# Create female model
#modelData <- femaleSvmData
# Create male model
modelData <- maleSvmData
# Pick only sports with over 100 participants
sportCounts <- as.data.frame(table(modelData$Sport))
sportCounts <- sportCounts[which(sportCounts$Freq >= 100),]
finalSvmData <- modelData[modelData$Sport %in% sportCounts$Var1,]
finalSvmData$Sport <- droplevels(finalSvmData$Sport)
# Split into test and training data set
randIndex <- sample(1:dim(finalSvmData)[1])
cutPoint <- floor(2 * dim(finalSvmData)[1] / 3)
trainData <- finalSvmData[randIndex[1:cutPoint],]
testData <- finalSvmData[randIndex[(cutPoint+1):dim(finalSvmData)[1]],]
# Create the models
svmOutput <- svm(Sport ~ Height + Weight + Age, data=trainData, probability=TRUE)
nbOutput <- naiveBayes(Sport ~ Height + Weight + Age, data=trainData)
# Test the model
svmPrediction <- predict(svmOutput, testData, probability=TRUE)
results <- attr(svmPrediction,"probabilities")
nbPrediction <- predict(nbOutput, testData, "raw")
# This function returns the match list for our test data based on the
# first n results in the probability
getAccuracy <- function(results, testData, n) {
# Grab the top n most probable sports for each test row
resultsList <- list()
index <- 1
for (row in 1:nrow(testData)) {
resultsList[[index]] <- colnames(results)[order(results[index,], decreasing = TRUE)][1:n]
index <- index + 1
}
resultTop5 <- data.frame(matrix(unlist(resultsList), nrow=nrow(testData), byrow=T))
matches <- c()
# Check if the actual sport is in the top five
for (row in 1:nrow(testData)) {
matches <- append(matches, as.character(testData$Sport[row]) %in% resultsList[[row]])
}
return(matches)
}
matchSvmPercents <- c()
matchNbPercents <- c()
nonProbMatchSvmPercents <- c()
nonProbMatchNbPercents <- c()
randMatchPercents <- c()
for (i in 1:44) {
temp <- getAccuracy(results, testData, i)
# Get the match percent for the TOP i by probability in our SVM model
matchSvmPercents <- append(matchSvmPercents, length(temp[temp == TRUE]) / length(temp))
temp <- getAccuracy(nbPrediction, testData, i)
# Get the match percent for the TOP i by probability in our NB model
matchNbPercents <- append(matchNbPercents, length(temp[temp == TRUE]) / length(temp))
# Get the match percent if we just random picked i sports
randMatchPercents <- append(randMatchPercents, i / 44)
# The match percent of a non-probabilistic SVM and NB output
nonProbMatchSvmPercents <- append(nonProbMatchSvmPercents, matchSvmPercents[1])
nonProbMatchNbPercents <- append(nonProbMatchNbPercents, matchNbPercents[1])
}
accDf <- data.frame(matchSvmPercents, matchNbPercents, randMatchPercents, nonProbMatchSvmPercents, nonProbMatchNbPercents)
matchPlot <- ggplot(data = accDf, aes(x = as.numeric(row.names(accDf)), y = matchSvmPercents))
matchPlot <- matchPlot + geom_line(aes(color="SVM"), size = 2) + geom_point(color = "mediumblue", size = 3)
matchPlot <- matchPlot + geom_line(aes(y=randMatchPercents, color = "Random Chance"), size = 2)
matchPlot <- matchPlot + geom_line(aes(y=nonProbMatchSvmPercents), color = "red", size = 2)
matchPlot <- matchPlot + geom_line(aes(y=matchNbPercents, color = "NB"), size = 2) + geom_point(aes(y=matchNbPercents), color = "darkgreen", size = 3)
matchPlot <- matchPlot + geom_line(aes(y=nonProbMatchNbPercents), color = "red", size = 2)
matchPlot <- matchPlot + xlab("Number of Sports Picked") + ylab("Correctness on Test Data") + ggtitle("Success Rates for NB and SVM Models - Male")
matchPlot <- matchPlot + scale_color_manual("Model", values=c("SVM"="cadetblue", "NB"="darkolivegreen3", "Random Chance"="orange"))
matchPlot <- matchPlot + theme(legend.position="bottom")
print(matchPlot)
|
eadc51ccd3693d38dd0bbe7894ab5f1a2c639807 | 1252aff04be9cc2c93f9d7cf8ba43274ef2c6c46 | /scripts/programacaoR_exercicio01.R | bb1fbac318d5bc922d312472772d1c14a432dd3a | [] | no_license | sandbergms/etl_com_r | 33130e4783e2b5ee69452f9388b3eac2002a1d14 | 4549da13215b3155e6f7f3e485cf98f25a046808 | refs/heads/main | 2023-07-01T18:27:33.208297 | 2021-08-11T23:43:36 | 2021-08-11T23:43:36 | 355,363,049 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 387 | r | programacaoR_exercicio01.R | nome <- c("Luiz", "Ana", "Amanda", "Pedro", "Maria")
tipoSanguineo <- c("AB-", "B+", "O+", "A-", "O+")
altura <- c(1.70, 1.82, 1.64, 1.90, 1.60)
# vetor booleano que diz se a pessoa tem menos do que 1,80m
menorQue1m80 <- altura < 1.80
informacoes <- data.frame(nome,
tipoSanguineo,
altura,
menorQue1m80)
|
81ca4ed34c621fce23435c3b3a63013350e34410 | a688f8924fc47c86c7f4aa913e4ddfec4df44e1c | /ui.r | f64f7b998ec4033859aaee4e54b7c9d8067b4d3d | [] | no_license | tarkomatas/satRday_contest | eec2b01c8e8ef184e6782f417691070a9948e76c | 49b070ec15f2a9054d406a5eae270828ce51a636 | refs/heads/master | 2020-04-17T23:18:05.752542 | 2016-08-31T15:57:08 | 2016-08-31T15:57:08 | 67,050,804 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,098 | r | ui.r | library(shiny)
library(ggplot2)
library(plotly)
library(openxlsx)
library(shinythemes)
library(DT)
#-------------------------------------
# LOAD DATABASE
#-------------------------------------
database = read.xlsx("BUD flights 2007-2012 v2.xlsx", startRow = 1, colNames = TRUE)
#-------------------------------------
# MODIFY DATABASE
#-------------------------------------
g_database = database
g_database$DATE = convertToDate(g_database$DATE)
g_database$DATE = format(g_database$DATE, "%Y-%m")
#-------------------------------------
# SHINY
#-------------------------------------
shinyUI(pageWithSidebar(
### HEADER
tags$div(class ="header",
tags$style(HTML("
.header {
background-color: black;
border-style: solid;
border-radius: 5px;
border: 2px solid #f5f5f5;
height: 165px;
}
")),
a(tags$img(id ="image",
tags$style(HTML("
#image {
width: 150px;
display: block;
margin-top: 10px;
margin-right: 10px;
display: block;
float: right;
}
")),
src="logo.png"),
href = "http://budapest.satrdays.org/"
),
tags$h2(id ="title1", "ANALYZE OF FERENC LISZT INTERNATIONAL AIRPORT DATABASE",
tags$style(HTML("
#title1 {
margin-top: 50px;
margin-left: 10px;
color: white;
text-align: center;
font-size: 250%;
}
"))
),
tags$h4(id ="title2", "DYNAMIC RSTUDIO-SHINY APPLICATION",
tags$style(HTML("
#title2 {
margin-top: 10px;
margin-left: 10px;
color: #5F9AE2;
text-align: center;
font-size: 175%;
}
"))
)
),
### NAVBAR
sidebarPanel(
wellPanel(
icon("wrench"),
HTML("<b>NAVIGATION BAR</b>"),
br(),
br(),
helpText("Use the radio-buttons to choose the right option for You."),
radioButtons("FL_DIR", "Select flight direction:", choices= unique(database$FLIGHT_DIRECTION)),
radioButtons("FL_TYPE", "Select fligt type:", choices= unique(database$FLIGHT_TYPE), selected = "Scheduled")
),
wellPanel(
HTML("<b>ABOUT ME</b>"),
br(),
HTML("Tamás Markó"),
br(a(icon("linkedin-square"),
href="https://www.linkedin.com/in/tam%C3%A1s-mark%C3%B3-b88505b9?trk=hp-identity-name"
),
a(icon("envelope "),
href="mailto:marko.tamas1991@gmail.com"
)
)
),
wellPanel(
HTML("<b>SOURCE CODE</b>"),
br(a(icon("github"),
href="https://github.com/tarkomatas/satRday_contest"
))
),
wellPanel(
HTML("<b>LAST UPDATE</b>"),
br(HTML("31/08/2016"))
),
br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),
br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),
width = 3
),
### MAIN PANEL
mainPanel(
tabsetPanel(type = "pills",
## Country specific aggregation TABPANEL
tabPanel("Country specific aggregation",
icon = icon("map-marker"),
tags$h3(id ="title3", "Country specific aggregation",
tags$style(HTML("
#title3 {
font-weight: bold;
text-align: center;
color: #5F9AE2;
}
"))
),
br(),
fluidRow(
column(4, wellPanel(
selectInput("select_var", "Select target value:", choices= c("Number of passangers" = "NBR_OF_PASSENGERS" , "Number of flights" = "NBR_OF_FLIGHTS", "Sum of cargo Weight" = "CARGO_WEIGHT", "Number of seat capacity" = "SEAT_CAPACITY"))
)),
column(4, wellPanel(
sliderInput("YEAR", "Select year:", min = 2007, max = 2012, value = 2007, sep = "", animate = animationOptions(interval = 3000, loop = F)),
checkboxInput("MONTHLY_FILTER", "Monthly filter"),
conditionalPanel(
condition = "input.MONTHLY_FILTER == true",
selectInput("MONTH", "Select month:", choices = c("January" = "-01", "Februray" = "-02", "March" = "-03", "April" = "-04", "May" = "-05", "June" = "-06", "July" = "-07", "August" = "-08", "September" = "-09", "October" = "-10", "November" = "-11", "December" = "-12"))
)
)),
column(3, wellPanel(
radioButtons("REGION", "Select region:", choices= c("Whole world" = "world", "Europe" = 150))
))
),
tags$h4(id ="title4", "Country map by target value"),
br(),
htmlOutput("gvis"),
htmlOutput("table")
),
## Capacity usage by destination TABPANEL
tabPanel("Capacity usage by destination",
icon = icon("line-chart"),
tags$h3(id ="title3", "Capacity usage by destination"),
br(),
column(4, wellPanel(
selectInput("COUNTRY", "Select country:", choices= sort(unique(database$COUNTRY)), selected = c("Austria"))
)),
column(4, wellPanel(
selectInput("CITY", "Select city:", choices= NULL)
)),
column(4, wellPanel(
selectInput("DEST", "Select destination:", choices= NULL)
)),
fluidRow(
column(12,
tags$h4(id ="title4", "Time series of seat capacity and number of passangers ",
tags$style(HTML("
#title4 {
font-weight: bold;
text-align: center;
}
"))
),
plotlyOutput("phonePlot"),
tags$h4(id ="title4", "Capacity usage per year"),
br(),
htmlOutput("gauge")
)
)
),
## Help TABPANEL
tabPanel("Help",
icon = icon("medkit"),
br(),
tags$h3(id ="title3", "Analyze of Ferenc Liszt International Airport database with RStudio Shiny application"),
tags$div(id ="help_format",
tags$style(HTML("
#help_format {
font-size: 130%;
}
")),
br("This application was built for", a("Budapest satRday Conference", href = "http://budapest.satrdays.org/"), "data visualization challenge, which will take place in 03/09/2016."),
br("The dataset was come from the", a("Hungarian Central Statistical Office", href = "https://www.ksh.hu/?lang=en"), "which includes the flights to and from the Budapest Ferenc Liszt International Airport between 2007 and 2012. You can download the Excel database", a("here", href = "http://budapest.satrdays.org/data/BUD%20flights%202007-2012%20v2.xlsx"), "."),
br("This is an interactive", a("RStudio Shiny", href = "http://shiny.rstudio.com/"), "web-based application so it was written in R language but this also includes some little HTML and CSS tricks."),
tags$img(id ="gif_size", src = "animation1.gif",
tags$style(HTML("
#gif_size {
width: 60%;
height: 60%;
display: block;
margin: 0 auto;
}"
))
),
br("The application has two main parts:"),
tags$ul(
tags$li("You can compare cities according to the key variables on", icon("map-marker") ,tags$b("the country specific report"), ", with a country map, and also with fancy table."),
tags$li("There is a capacity usage report which consists", icon("line-chart") ,tags$b("a year level aggregation"), "(which is a nice", a("googleVis", href = "https://github.com/mages/googleVis#googlevis"), "chart by the way) and also a seat capacity and number of passengers comparison.")
),
br("This application is", tags$b("fully customizable:"),"so for example in the capacity usage report You can select any of the possible destinations with a dynamic list. In the country specific aggregation not only the year is selectable (try to animate it!",icon("play") ,"), You can also filter the data by month (if You want) and It is also possible to narrow the aspect of the map. There are another useful customizable solutions if You check the",icon("wrench") ,"sidebar."),
tags$img(id ="gif_size", src = "animation2.gif"),
br("I think this is a nice tool for quick analysis to discover the key meaning of the data. There is a semi-closed structure: the possible key analysis aspects are seen, but the user can also discover some interesting facts, can check the details.")
)
)
)
)
))
|
dc79d85fca05069b8e3a6324141343d158454abe | 56340b8a4a787556a81c552b313e5ed50b8d38d6 | /uber-nyc/taxi_trip_analysis.R | 8258dab7f366338d2a71f7928b77243195dd3a53 | [] | no_license | andrewflowers/data-analysis | d04888568720605732338fec24c0e110faa88dfb | 87f2dd2d818d2e47f68e3110813a3d52e2e39c84 | refs/heads/master | 2021-05-30T04:07:22.453796 | 2015-12-16T23:28:06 | 2015-12-16T23:28:06 | 37,140,645 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,258 | r | taxi_trip_analysis.R | # Taxi trip analysis
# Andrew Flowers
setwd("~/data-analysis/uber-nyc/")
require(readr)
require(stringr)
require(lubridate)
require(dplyr)
require(ggplot2)
###########################################
## Yellow and Green Cab Trip Record Data ##
###########################################
## To download directly from TLC website: http://www.nyc.gov/html/tlc/html/about/statistics.shtml
# apr2014yel <- read_csv("https://storage.googleapis.com/tlc-trip-data/2014/yellow_tripdata_2014-04.csv")
## Read for doownloaded csv files
# apr2014 <- read_csv("taxi-trip-data/yellow_tripdata_2014-04.csv", col_types="cccinnnicnncnnnnnn")
# may2014 <- read_csv("taxi-trip-data/yellow_tripdata_2014-05.csv", col_types="cccinnnicnncnnnnnn")
# jun2014 <- read_csv("taxi-trip-data/yellow_tripdata_2014-06.csv", col_types="cccinnnicnncnnnnnn")
# jul2014 <- read_csv("taxi-trip-data/yellow_tripdata_2014-07.csv", col_types="cccinnnicnncnnnnnn")
# aug2014 <- read_csv("taxi-trip-data/yellow_tripdata_2014-08.csv", col_types="cccinnnicnncnnnnnn")
# sep2014 <- read_csv("taxi-trip-data/yellow_tripdata_2014-09.csv", col_types="cccinnnicnncnnnnnn")
vars <- c(2,6:7)
yellow <- rbind(apr2014[,vars], may2014[,vars], jun2014[,vars], jul2014[,vars], aug2014[,vars], sep2014[,vars])
names(yellow) <- c("datetime", "lon", "lat")
dateTimeSplit <- str_split_fixed(yellow$datetime, " ", n=2)
yellow$date <- ymd(dateTimeSplit[,1])
yellow$month <- month(yellow$date)
byDay <- yellow %>% group_by(date) %>% summarize(n=n())
byMonth <- yellow %>% group_by(month) %>% summarize(n=n())
#yellow$time <- hms(dateTimeSplit[,2])
#yellow$type <- 'yellow'
#
#yellow2 <- yellow %>% select(date, time, lon, lat, type)
#write_csv(yellow2, "clean-yellow-cab-rides.csv")
#
# yellow$day <- as.numeric(wday(yellow$date))
# yellow$hour <- as.numeric(hour(yellow$time))
# table(yellow$day)
# table(yellow$hour)
### Greeen ###
green_apr2014 <- read_csv("green/green_tripdata_2014-04.csv", col_types="cccinnnninnnnnnlninii")
green_may2014 <- read_csv("green/green_tripdata_2014-05.csv", col_types="cccinnnninnnnnnlninii")
green_jun2014 <- read_csv("green/green_tripdata_2014-06.csv", col_types="cccinnnninnnnnnlninii")
green_jul2014 <- read_csv("green/green_tripdata_2014-07.csv", col_types="cccinnnninnnnnnlninii")
green_aug2014 <- read_csv("green/green_tripdata_2014-08.csv", col_types="cccinnnninnnnnnlninii")
green_sep2014 <- read_csv("green/green_tripdata_2014-09.csv", col_types="cccinnnninnnnnnlninii")
vars2 <- c(1,5:6)
green <- rbind(green_apr2014[,vars], green_may2014[,vars], green_jun2014[,vars],
green_jul2014[,vars], green_aug2014[,vars], green_sep2014[,vars])
names(green) <- c("datetime", "lon", "lat")
green$type <- 'green'
dateTimeSplit <- str_split_fixed(green$datetime, " ", n=2)
green$date <- ymd(dateTimeSplit[,1])
green$month <- month(green$date)
byDay_gre <- green %>% group_by(date) %>% summarize(n=n())
byMonth_gre <- green %>% group_by(month) %>% summarize(n=n())
ggplot(data=byDay_gre, aes(x=date, n))+geom_point()+geom_smooth()
green$time <- hms(dateTimeSplit[,2])
green <- green %>% select(date, time, lon, lat, type)
green$day <- as.numeric(wday(green$date))
green$hour <- as.numeric(hour(green$time))
table(green$day)
table(green$hour)
|
faf99d8d53009386dc2884b17dbcdc95862ece98 | 829fdac9c9b62a67f6cd5e42ca3336f069c59dd0 | /man/make.genotypes.Rd | 831263827d076482e33c2da148180a5da7c07ea9 | [] | no_license | andrewparkermorgan/genodb | a8bb3f2349a8b5cc3bd6d6d1ceacb272b5db033d | 46da05a188d7ffc82e209f4df677b0a7a505d310 | refs/heads/master | 2021-01-10T13:46:12.959857 | 2017-02-13T02:54:03 | 2017-02-13T02:54:03 | 53,357,249 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,134 | rd | make.genotypes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/intensities.R
\name{make.genotypes}
\alias{make.genotypes}
\title{Create an \code{argyle::genotypes} object via a database query}
\usage{
make.genotypes(ids, snps = NULL, by = c("name", "id"),
keep.intensity = TRUE, rename = TRUE, make.names.markers = FALSE, ...)
}
\arguments{
\item{ids}{vector of sample IDs (names or internal database IDs)}
\item{snps}{a code{dataframe} containing marker map in \code{argyle} format: columns chromosome, marker name, cM position, physical position, allele 1, allele 2}
\item{by}{search for samples by unique numeric ID (\code{"id"}) or by human-readable sample name (\code{"name"})}
\item{keep.intensity}{logical; if \code{TRUE}, return both genotype calls and hybridization intensities}
\item{make.names.markers}{logical; if \code{TRUE}, sanitize marker names of some non-alphanumeric characters with \code{make.names()}}
\item{...}{other options passed to \code{fetch.intensities()}}
}
\value{
a \code{argyle::genotypes} object
}
\description{
Create an \code{argyle::genotypes} object via a database query
}
|
45e71b0820aa1479dd16129d69a93d36fcf5358a | f03731aae285c071a40b534c0664efee426e32ed | /source/inst/unitTests/test_rebet.R | 61dc6ef1d26c3067894bc203e0427375b9151afb | [] | no_license | binzhulab/REBET | 62a15d0f32309c78e861acbb4defbf9e54d4fc07 | f794c94bd131b57145c25e5489bba1355da3b417 | refs/heads/master | 2020-06-06T00:55:51.803134 | 2019-06-18T19:00:37 | 2019-06-18T19:00:37 | 192,594,314 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 728 | r | test_rebet.R | test_rebet <- function() {
data(data, package="REBET")
res <- rebet(response, genotypes, subRegions)
meta.beta <- res$Meta$beta
side1.beta <- res$Subset.1sided$beta
side1.se <- res$Subset.1sided$sd
side2.beta1 <- res$Subset.2sided$beta.1
side2.se1 <- res$Subset.2sided$sd.1
side2.p2 <- res$Subset.2sided$pval.2
checkEqualsNumeric(meta.beta, 4.398939, tolerance=1.0e-4)
checkEqualsNumeric(side1.beta, 7.037888, tolerance=1.0e-4)
checkEqualsNumeric(side1.se, 1.339332, tolerance=1.0e-4)
checkEqualsNumeric(side2.beta1, 7.037888, tolerance=1.0e-4)
checkEqualsNumeric(side2.se1, 1.348216, tolerance=1.0e-4)
checkEqualsNumeric(side2.p2, 0.644174, tolerance=1.0e-4)
}
|
b3501a3137484612fe4a0b23a04095506e48bbc2 | 4932a5a1ec374b0d4bcb076b8c815c8bd85d1afd | /R/readTxtTokenizeAndStore.R | 1fff378fe37150802a4826802e4b7ba3f33b6506 | [] | no_license | robiRagan/prePostChavezTextbooks | 4b48bb7a9c87821f674899e45619192752ab97bf | a107c21e76f890377e4c17d394a7ab9fafa9b282 | refs/heads/master | 2020-12-05T02:48:15.305646 | 2020-05-30T00:34:58 | 2020-05-30T00:34:58 | 231,985,306 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,023 | r | readTxtTokenizeAndStore.R | ################################################################
# readTxtTokenizeAndStore.R
#
# Reads in PDFs, Toekizes the text and Stores as a .csv
################################################################
#' @export
readTxtTokenizeAndStore <- function(pathToTxts, pathForOutput){
# # # # #### FOR TESTING #########
# pathToTxts <- "PDFs/preChavez/"
# pathForOutput <- "tokenizedText/"
# # # # #### FOR TESTING
# Exract the pdfSetNames from the fielpaths
setName <- str_remove(string = pathToTxts, pattern = "PDFs/")
setName <- str_remove(string = setName, pattern = "/")
setNameForWrite <- paste("tokenized",toupper(substring(setName, 1, 1)), substring(setName, 2), sep = "")
### TOKENIZE###
# Extract and tokenize the text from each of the preChavez PDFs
# # Combine the tokenized data into one dataframe
setTxtTokensRaw <- extractFromTxtAndTokenizeSet(filePath = pathToTxts, txtSetName = setName)
write_csv(x = setTxtTokensRaw, path = paste(pathForOutput,"/",setNameForWrite,".csv", sep="") )
}
|
1db32142533df3c2bc86c764e08547612ce4070f | 24abbc3f99551e2fabb18489bb2a4cca75f39cdb | /R/qr-helpers.R | bddf766bde4c10ce448ae0ebc204c6b6bd711f54 | [] | no_license | bcallaway11/qrme | aa73110a98d58af258f4fd00e8a115fca340d91d | a61590cf83b922368f3e8894c0d58b8858cc8dd4 | refs/heads/master | 2021-07-23T00:00:28.475923 | 2021-06-25T18:03:05 | 2021-06-25T18:03:05 | 133,422,979 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,033 | r | qr-helpers.R | #' @title betfun
#'
#' @description Turns matrices of QR parameter values into functions.
#' The returned function will be a map from (0,1) to a vector of dimension
#' K where K is the number of elements in X (also it is the number of columns
#' in the passed in matrix of QR parameter values). It works by linear
#' interpolation of parameter values (for interior values of tau) and
#' based on some assumptions to handle values in the tails.
#' @param betmat An L x K matrix of parameter values where
#' L is the number of tau and
#' K is the dimension of X
#' @param tau The particular quantiles that the parameters were estimated at.
#' It should be an L-dimensional vector.
#' @return A function that can be called on any element from 0 to 1 and
#' returns a vector of parameter values
#' @export
betfun <- function(betmat, tau) {
betmat <- as.matrix(betmat)
betfun.list <- lapply(1:ncol(betmat), function(j) {
if (j==1) { ##then we are on the constant
betfun.inner(betmat[,j], tau, isconst=TRUE)
} else {
betfun.inner(betmat[,j], tau)
}
})
bet <- function(u) {
unlist(lapply(betfun.list, function(b) b(u)))
}
return(bet)
}
#' @title betfun.inner
#'
#' @description Does the heavy lifting for betfun. Basically, betfun is just
#' a wrapper for this that can handle a matrix of values of parameters.
#' This function does the work, but only for a single vector of betas.
#' @param betvec vector of parameter values
#' @param tau corresponding vector of quantiles where beta was estimated
#' @param isconst ?? -- what is this doing?
#' @return function that takes argument from (0,1)
#' @keywords internal
#' @export
betfun.inner <- function(betvec, tau, isconst=FALSE) {
bet <- function(u) {
ul.pos <- tail(which(tau <= u),1) ## position of ul (in text)
uu.pos <- which(tau >= u)[1] ## position of uu (in text)
ul <- tau[ul.pos]
uu <- tau[uu.pos]
lam1 <- 1 - min(tau)
lam2 <- max(tau)
isconst <- 1*isconst
if (u >= 0 & u < min(tau)){ ## this is case with u between 0 and smallest element of tau
return(betvec[uu.pos] + isconst*log(u/min(tau))/lam1)
}
if (u <= 1 & u > max(tau)) { ## case with u < 1 but bigger than greatest element in tau
return(betvec[ul.pos] + isconst*log((1-u)/(1-max(tau)))/lam2)
}
if (is.na(uu) | is.na(ul)) {
stop("uu or ul not set, likely tried some value of u that is not between the minimum and maximum values in tau")
}
if (uu == ul) {
betvec[ul.pos]
} else {
betvec[ul.pos] + (u-ul)*(betvec[uu.pos] - betvec[ul.pos])/(uu-ul) ## this is linear interpolation from the text
}
}
return(bet)
}
## seems to be unused since we switched to EM algorithm
## #' @title getParams
## #'
## #' @description Helper function to take the result of the optimization routine
## #' and converts back to parameters
## #'
## #' @param optout results of call to optim
## #' @param ksig the dimension of the mixture model for the measurement error
## #'
## #' @return list of parameters
## #'
## #' @export
## getParams <- function(optout, formla, data, tau, nmix) {
## xformla <- formla
## xformla[[2]] <- NULL ## drop y variable
## x <- model.matrix(xformla, data)
## kx <- ncol(x)*length(tau)
## if (class(optout) == "numeric") {
## par <- optout
## } else { ## output of optim
## par <- optout$par
## }
## bet <- par[1:kx]
## k <- kx/length(tau)
## n <- nrow(x)
## ktau <- length(tau)
## bet <- split(bet,ceiling(seq_along(bet)/k))
## kmu <- nmix-1
## if (nmix > 1) {
## pi1 <- par[(kx+1):(kx+kmu)]
## mu1 <- par[(kx+kmu+1):(kx+kmu+kmu)]
## pi <- c(pi1, 1-sum(pi1))
## mu <- c(mu1, -sum(mu1*pi1)/(1-sum(pi1)))
## } else {
## pi <- 1
## mu <- 0
## }
## ksig <- nmix
## sig <- par[(kx+kmu+kmu+1):(kx+kmu+kmu+ksig)]
## out <- list(bet=bet, pi=pi, mu=mu, sig=sig)
## class(out) <- "PARAM"
## out
## }
#' @title makeRQS
#'
#' @description Take the results of the optimization and convert them
#' into a quantile regression object, so we can use all the tools from
#' the quantreg package (e.g. inverting the quantiles). The key step
#' here is rearrangement, because the objective function doesn't impose
#' any ordering -- see the discussion in HLLP. We follow HLLP's
#' recommendation and order the QR parameters by what makes the quantiles
#' to be increasing for the mean values of the x's. This means that
#' for any particular value of the x's, the quantile function may not
#' necessarily be increasing in tau. However, we can separately rearrange
#' those as needed. But this gives a way to report the QR parameters.
#'
#' @param params an LxK matrix of QR parameters where L is the number of
#' quantiles that parameters have been estimated at and K is the dimension
#' of the covariates.
#' @param formla y ~ x, a formula for the outcome on the regressors
#' @param data a data.frame containing y and x
#' @param tau the vector of quantiles where QR was estimated
#'
#' @return rqs object
#'
#' @export
makeRQS <- function(params, formla, data, tau) {
xformla <- formla
xformla[[2]] <- NULL ## drop y variable
x <- model.matrix(xformla, data)
optout <- list()
class(optout) <- c("rqs")
optout$terms <- terms(formla)
optout$formula <- formla
bet <- params$bet
#################
## OLD: this is for if you use ML as in HLLP
##bet <- split(bet1,ceiling(seq_along(bet1)/k))
## rearrangement (as in HLLP though double check)
## barx <- apply(x, 2, mean)
## betorder <- order(sapply(bet, function(b) sum(b*barx)))
## bet <- simplify2array(bet)
## if (class(bet)=="numeric") { ## i.e. there is just one element in bet
## bet <- as.matrix(bet)
## } else {
## bet <- t(bet)
## }
##bet <- as.matrix(bet[betorder,])
####################
optout$coefficients <- t(bet)
optout$tau <- tau
optout
}
|
682c8004adb26aca5a11615cf6262a81dbef003c | 7b7b63965ab05eceda7c25ecb41fc2a55579e7d6 | /zad4Plots.R | 1991a0f243fbe50f6e22ec9558e2f8381314ebc2 | [] | no_license | jakub-guner/qap_plots | 3b3671062f0937086ad3bdf4452b7cac20e596f7 | fad91e3fa3aec42e5b36d44a7ad04aec4b9b8d22 | refs/heads/master | 2020-08-02T16:26:10.803931 | 2016-12-04T10:36:56 | 2016-12-04T10:36:56 | 73,560,431 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 884 | r | zad4Plots.R | saveZad4Plots <- function(restartsDF){
restartsClean <- restartsDF %>%
group_by(instance, algorithm) %>%
mutate(noOfRestarts=row_number(),
bestSoFar=rollapply(value, FUN=min, width=noOfRestarts, align="right"),
meanSoFar=rollapply(value, FUN=mean, width=noOfRestarts, align="right")
) %>%
select(instance, algorithm, noOfRestarts, bestSoFar, meanSoFar) %>%
gather(typeOfResult, result, bestSoFar:meanSoFar)
for (qapInstance in levels(restartsClean$instance)){
dfZad4 <- restartsClean %>% filter(instance==qapInstance)
plotZad4 <- ggplot(dfZad4, aes(x=noOfRestarts, y=result, colour=typeOfResult)) +
facet_wrap(~ algorithm, ncol = 1) +
geom_point(size=0.2) +
theme_bw()
fileName <-paste(as.character(qapInstance), "_Restarts.png", sep="")
ggsave(fileName, plot = plotZad4, width = 7, height = 5)
}
} |
bd937c2e4a1eda9efec0eca127b5ad9da27dd6bd | 39dca50416c3e6d7c0f436e9a3a47a91efb8a20e | /man/STIDF-class.Rd | 61d3bdeb3beb10a4f415f6653ea25a26a7382a0f | [] | no_license | mages/spacetime | 5fda7c26023c53fc397b8c6ac3a0369738a3862e | 709d30a7751823aafc6fc77dd9196220edf9ecea | refs/heads/master | 2021-01-18T08:13:56.523213 | 2014-08-22T16:57:22 | 2014-08-22T16:57:22 | 23,264,992 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,136 | rd | STIDF-class.Rd | \name{STIDF-class}
\docType{class}
\alias{STIDF-class}
\alias{STI-class}
\alias{STIDF}
\alias{STI}
\alias{as.data.frame.STIDF}
\alias{as.data.frame.STI}
\alias{[,STIDF-method}
\alias{[,STI-method}
\alias{geometry,STIDF-method}
\alias{geometry,STI-method}
\alias{coerce,STIDF,STSDF-method}
\alias{plot,STI,missing-method}
\alias{rbind.STIDF}
\title{Class "STIDF"}
\description{ A class for unstructured spatio-temporal data; for
n spatial locations and times, n observations are available }
\section{Objects from the Class}{
Objects of this class carry full space/time grid data }
\section{Slots}{
\describe{
\item{\code{sp}:}{Object of class \code{"Spatial"}}
\item{\code{time}:}{Object holding time information, see \link{ST-class}}
\item{\code{data}:}{Object of class \code{data.frame}, which holds
the measured values }
}
}
\section{Methods}{
\describe{
\item{[}{\code{signature(x = "STIDF")}: selects spatial-temporal entities, and attributes }
% \item{plot}{\code{signature(x = "Spatial", y = "missing")}: plot method
% for spatial objects; does nothing but setting up a plotting region choosing
% a suitable aspect if not given(see below), colouring the plot background using either a bg= argument or par("bg"), and possibly drawing axes. }
% \item{summary}{\code{signature(object = "Spatial")}: summarize object}
}
}
\usage{
STI(sp, time, endTime)
STIDF(sp, time, data, endTime)
\S4method{[}{STIDF}(x, i, j, ..., drop = FALSE)
\S4method{coerce}{STIDF,STSDF}(from, to, strict=TRUE)
}
\arguments{
\item{sp}{object of class \link{Spatial}}
\item{time}{object holding time information; when STIDF is called,
a non-ordered vector with times, e.g. \link{POSIXct} will also work,
and rearrange the \code{sp} and \code{data} slots
according to the ordering of time; for this to work no
ties should exist. }
\item{endTime}{ vector of class \code{POSIXct}, indicating the end
points of time intervals for the observations. By default, for \code{STI}
objects \code{time} is taken, indicating that time intervals have zero width
(time instances) }
\item{data}{data frame with appropriate number of rows}
\item{x}{an object of class STFDF}
\item{i}{selection of record index (spatial/temporal entities),
or character string with temporal selection }
\item{j}{selection of attributes }
\item{...}{ignored}
\item{drop}{if TRUE and a single spatial entity is selected, an object
of class \link{xts} is returned (NOT yet implemented);
if TRUE and a single temporal entity is
selected, and object of the appropriate \code{Spatial} class is returned;
if FALSE, no coercion to reduced classes takes place}
\item{from}{object of class STFDF}
\item{to}{target class}
\item{strict}{ignored}
}
\note{ arguments \code{sp}, \code{time} and \code{data} need
to have the same number of records,
and regardless of the class of time (xts or POSIXct) have to be
in correspoinding order: the triple \code{sp[i]}, \code{time[i]}
and \code{data[i,]} refer to the same observation }
\author{ Edzer Pebesma, \email{edzer.pebesma@uni-muenster.de} }
\references{ http://www.jstatsoft.org/v51/i07/ }
\keyword{classes}
|
e4cca09a0779bf65fb628e7acebb65aacc56c235 | 9a987d4fe03c1426a442b05252e389f9e171937b | /rankall.R | f759d10fd78820be7de07656f634f6171c66390d | [] | no_license | andre-silva/ProgrammingAssignment3 | 78fece259b2b9803885402895c348c14bd650c0e | e035de0bfd2ba6115b085ae3e7c5dd1efb1ee202 | refs/heads/master | 2016-08-07T01:30:56.946705 | 2014-10-27T00:13:28 | 2014-10-27T00:13:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,550 | r | rankall.R | rankall <- function(outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv",colClasses="character")
## Check that state and outcome are valid
if(!is.element(outcome, c("heart attack","heart failure","pneumonia"))) {
stop("invalid outcome")
}
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the
## (abbreviated) state name
## First check relevant column
if(outcome == "heart attack")
col <- 11
if(outcome == "heart failure")
col <- 17
if(outcome == "pneumonia")
col <- 23
## Transform outcome to numeric and remove NAs
data[,col] <- suppressWarnings(as.numeric(data[,col]))
data <- data[!is.na(data[,col]),]
rankedList <- by(data,data$State,function(x)
x[order(x[,col],x$Hospital.Name),2])
stateRank <- sapply(rankedList,getRank,num)
data.frame(hospital=stateRank,state=names(stateRank),
row.names=names(stateRank))
}
getRank <- function(rankedList, num) {
## if the number is an integer
if(!is.na(suppressWarnings(as.integer(num)))) {
if(num > length(rankedList))
NA
else
rankedList[num]
} else {
if(num != "best" & num != "worst")
NA
else {
if(num == "best")
head(rankedList,1)
else
tail(rankedList,1)
}
}
}
|
8babdf52938c6cdbc1761edc6a963fb7842f5140 | c6ad1a533669dd72a8b8244d4b42898ec8fabc75 | /rankall.R | 06385f323c7779c4cfad477443223464c056ae10 | [] | no_license | sindhu-1/ProgrammingAssignment3-R | 94d8facb3ad3fb723e759e33cd82aebb7ac0bcb2 | 8a43b97374eed23a5326fa77c8e800660a8c8864 | refs/heads/master | 2021-01-10T03:19:52.504011 | 2015-10-23T18:36:52 | 2015-10-23T18:36:52 | 44,831,328 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,626 | r | rankall.R | rankall <- function(outcome,num="best"){
outcomedata <- read.csv("outcome-of-care-measures.csv", stringsAsFactors=FALSE)
outcomesubdata <- data.frame(HospitalName=outcomedata[,2],State=outcomedata[,7],heartattack=outcomedata[,11],heartfailure=outcomedata[,17],pneumonia=outcomedata[,23],stringsAsFactors=FALSE)
outcomesubdata[,3] <- suppressWarnings(as.numeric(outcomesubdata[,3]))
outcomesubdata[,4] <- suppressWarnings(as.numeric(outcomesubdata[,4]))
outcomesubdata[,5] <- suppressWarnings(as.numeric(outcomesubdata[,5]))
statenames<-unique(outcomesubdata$State)
statenames <- as.data.frame(statenames)
names(statenames)[1]<-"State"
statenames[,1]<-as.character(statenames[,1])
outc <- c("heart attack","heart failure","pneumonia")
outcome <- tolower(outcome)
allstates <-NULL
stateind<-NULL
hospital <- NULL
#outcome<- "pneumonia"
for(x in 1:54){
statedata <- subset(outcomesubdata,outcomesubdata$State == statenames[x,])
if(outcome == outc[1]){
statedata <-statedata[complete.cases(statedata[,3]),]
statedata1 <- statedata[order(statedata[,3],statedata[,1],statedata[,2]),]
statedata1<-cbind(statedata1,rank=seq(1,nrow(statedata1)))
}else if(outcome == outc[2]){
statedata <-statedata[complete.cases(statedata[,4]),]
statedata1 <- statedata[order(statedata[,4],statedata[,1],statedata[,2]),]
statedata1 <-cbind(statedata1,rank=seq(1,nrow(statedata1)))
}else if(outcome == outc[3]){
statedata <-statedata[complete.cases(statedata[,5]),]
statedata1 <-statedata[order(statedata[,5],statedata[,1],statedata[,2]),]
statedata1 <-cbind(statedata1,rank=seq(1,nrow(statedata1)))
}else{
stop("Invalid outcome")
}
allstates <- rbind(allstates,statedata1)
}
if(is.numeric(num)){
hospital1 <- subset(allstates,allstates$rank == num)
hospital <- merge(statenames,hospital1,by.x="State",all.x=TRUE)
hospital <- hospital[order(hospital$State),]
hospitalrank <- cbind(hospital=hospital[,2],state=hospital[,1])
}else if(num =="worst"){
for(i in 1:nrow(statenames)){
statedataW <- subset(allstates,allstates$State == statenames[i,])
n <- as.numeric(table(statedataW$State== statenames[i,])["TRUE"])
hospital1 <- statedataW[n,]
hospital<- rbind(hospital,hospital1)
}
hospital <- merge(statenames,hospital,by.x="State",all.x=TRUE)
hospital <- hospital[order(hospital$State),]
hospitalrank<- cbind(hospital=hospital[,2],state=hospital[,1])
}else if(num =="best"){
num <- 1
hospital <- subset(allstates,allstates$rank == num)
hospital <- merge(statenames,hospital,by.x="State",all.x=TRUE)
hospital <- hospital[order(hospital$State),]
hospitalrank<- cbind(hospital=hospital[,2],state=hospital[,1])
}
return(as.data.frame(hospitalrank))
}
|
f820558bad03c01b3c26165f4e12c84d6c45e989 | bf9f77e17111b590fe44905ebd9391009a2a1390 | /man/individus.Rd | b9f9f3ace5b2dafa58c397ac1506e26e96609ce7 | [
"MIT"
] | permissive | ove-ut3/apogee | 5cd9fed8e1cb4fc359b824fdb16ff269952d6320 | c08ff84497bbaab4af90a0eeb779a338ff158b87 | refs/heads/master | 2021-06-02T09:03:41.344113 | 2020-05-19T13:22:59 | 2020-05-19T13:22:59 | 115,185,672 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,120 | rd | individus.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{individus}
\alias{individus}
\title{Table individus}
\format{
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 172625 rows and 17 columns.
}
\usage{
individus
}
\description{
\preformatted{## Rows: 172,625
## Columns: 17
## $ code_etudiant <chr> "20000286", "20000313", "20000351", "20000393", "20000398", ...
## $ ine <chr> "1692154719H", "1693005364X", "1693019580Z", "1693008810T", ...
## $ nom <chr> "ARNOULD", "ESQUERRE", "DONADEL", "CABANAC", "VERGNES", "AZU...
## $ nom_usage <chr> NA, NA, NA, NA, NA, NA, NA, NA, NA, "LABAT ALLÉE", NA, "JAUZ...
## $ prenom <chr> "GUILLAUME", "ARIANE", "LORENE", "GUILLAUME", "VALERIE", "JA...
## $ sexe <chr> "M", "F", "F", "M", "F", "M", "F", "M", "F", "F", "M", "F", ...
## $ code_nationalite <chr> "100", "100", "100", "100", "100", "100", "100", "100", "100...
## $ date_naissance <date> 1980-11-28, 1982-11-08, 1982-10-19, 1982-03-08, 1982-03-04,...
## $ lib_ville_naissance <chr> "PERPIGNAN", "TOULOUSE", "KOUROU", "TOULOUSE", "PARIS", "AUC...
## $ annee_bac <int> 1999, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, ...
## $ code_departement_bac <chr> "065", "031", "031", "031", "012", "032", "031", "065", "081...
## $ code_etab_bac <chr> "0650041S", NA, NA, NA, NA, "0320067Z", "0310024H", "0650026...
## $ code_mention_bac <chr> "P", "B", "AB", "AB", "P", "AB", "B", "B", "AB", NA, "AB", "...
## $ code_bac <chr> "TIEL", "S", "SPC", "SMA", "S", "S", "SPC", "S", "S", "SMA",...
## $ code_type_etab_bac <chr> "LY", "LY", "LY", "LY", "LY", "LY", "LY", "LY", "LY", "LY", ...
## $ email_ups <chr> "guillaume.arnould@univ-tlse3.fr", "ariane.esquerre@univ-tls...
## $ code_departement_naissance <chr> "066", "031", "973", "031", "075", "032", "031", "065", "075...
}
}
\keyword{datasets}
|
341c599263d03c418497f65024a3e0e6c97d9015 | fcd2a5da3d9cc716e68b55ae476edf9432caf5e4 | /4a.R | 2873c2a8927ed703c4bdd4b6b629e9b5aa6b55cd | [] | no_license | bhuvanaLS/DSR | 0704048348adf85bd51a7381a38f2e88f21e18cf | 8b9d79a8dbc55e60dbb5731257895334afb43259 | refs/heads/master | 2020-08-01T06:10:57.170967 | 2019-11-21T05:40:24 | 2019-11-21T05:40:24 | 210,894,412 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 145 | r | 4a.R | install.packages('gcookbook')
library(gcookbook)
data('cabbage_exp')
cabbage_exp
barplot(table(cabbage_exp$Cultivar),xlab = 'Cultivar')
|
9b14ca306ebaf0caf5e00b619d5ec72bae25365a | 36f9bcfc3295f5685fa7e852315f79ff7627175d | /trigonometric_functions.R | 3ebd105181b81a620b7151df2393877ffd3948c7 | [] | no_license | Joseworks/Basic-R-Meetup-Demo | b64785895b0ae2b349a56eeece348d8d337daecb | 8610255b4c4aa3099a2ea99d3af66d71948973e1 | refs/heads/master | 2021-01-15T14:01:53.164039 | 2014-11-12T15:39:42 | 2014-11-12T15:39:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 246 | r | trigonometric_functions.R | # R always works with angles in radians!
# deg <- function(radians) 180*radians/pi
# rad <- function(degrees) degrees*pi/180
par(mfrow=c(1,1)) #Set margins
x=seq(0,2*pi,0.01)
y=sin(x)
z=cos(x)
plot(x,y,type='l')
par(col='red')
lines(x,z)
|
ecc858963b8ef4e62eed842647c472d89cab08c8 | def9f15eb43407d1cf27a2ff95bead1a3d03afcc | /0000-00-01ToolBox/updateStock.R | d7445ef3957e5ca25656be3ea7701ad61bff6fd3 | [] | no_license | wangrf/QuantYou | 14df557de4471a7d04901678287518b8acea3a83 | c3484aa7d00733dcc79096e8e1c6c8ed34be1977 | refs/heads/master | 2020-09-28T15:29:29.553764 | 2019-12-24T13:36:21 | 2019-12-24T13:36:21 | 226,805,895 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,577 | r | updateStock.R |
updateStock <-
function(sym,
startDate,
endDate,
indicators,
dataPath = dataPath) {
data.list <- dir(pattern = ".RData", path = dataPath)
idMatch <- grep(paste0(sym,".RData"), data.list)
nMatch <- length(idMatch)
if (nMatch == 0) {
warning(paste0(sym, " No history data and it is the first time"))
startDate <- as.Date(startDate, "%Y-%m-%d")
endDate <- as.Date(endDate, "%Y-%m-%d")
new.ind <-
union(c("open", "high", "low", "close"), indicators)
w_tdays_data<-w.tdays(endDate,endDate)$Data
if(length(w_tdays_data)==0){
endDate=w.tdaysoffset(0,endDate)$Data[1,1]
}
y <-
w.wsd(sym,
new.ind,
startDate,
endDate,
"Currency=CNY;PriceAdj=F;Fill=Previous")$Data
y <- as.xts(y[,-1], order.by = as.Date(y[[1]], "%Y-%m-%d"))
indexFormat(y) <- '%Y-%m-%d'
indexTZ(y) <- "UTC"
idx.open <- match.names("OPEN", colnames(y))
idx.high <- match.names("HIGH", colnames(y))
idx.low <- match.names("LOW", colnames(y))
idx.close <- match.names("CLOSE", colnames(y))
y[, idx.close] <-
ifelse(is.na(y[, idx.close]), rowMeans(y[, c(idx.open, idx.high, idx.low)], na.rm = T), y[, idx.close])
y[, idx.close] <-
ifelse(is.na(y[, idx.close]), 0, y[, idx.close])
y[, idx.high] <-
ifelse(is.na(y[, idx.high]), y[, idx.close], y[, idx.high])
y[, idx.low] <-
ifelse(is.na(y[, idx.low]), y[, idx.close], y[, idx.low])
y[, idx.open] <-
ifelse(is.na(y[, idx.open]), y[, idx.close], y[, idx.open])
colnames(y) <- paste0(sym, ".", new.ind)
assign(sym, y)
save(list = c(sym),
file = paste0(dataPath, "/", sym, ".RData"))
print(
paste0(
"Have updated ",
paste0(sym, collapse = ","),
"",
"Have updated the indicators of ",
paste0(new.ind, collapse = ","),
",and update the date to",
endDate
)
)
}else if (nMatch > 1) {
stop(paste0("Exits multiple RData", "for ", sym))
}else if (nMatch==1){
load(paste0(dataPath, "/", data.list[idMatch]), envir = .GlobalEnv)
x <- NULL
x <- get(sym)
last.date <- as.Date(tail(index(x), 1), "%Y-%m-%d")
tempS <-
sapply(names(x), function(y)
max(unlist(gregexpr(
"[.]", y, useBytes = T
)))) + 1
tempE <- sapply(names(x), nchar)
idx.name <-
tolower(unlist(mapply(
substr,
x = names(x),
start = tempS,
stop = tempE
)))
old.ind <- idx.name
new.ind <- setdiff(indicators, idx.name)
w_tdays_data<-w.tdays(endDate,endDate)$Data
if(length(w_tdays_data)==0){
endDate=w.tdaysoffset(0,endDate)$Data[1,1]
}
if ((last.date < as.Date(endDate, "%Y-%m-%d"))) {
last.date <- as.Date(tail(index(x), 1), "%Y-%m-%d")
startDate <- as.Date(startDate, "%Y-%m-%d")
endDate <- as.Date(endDate, "%Y-%m-%d")
startDate <- last.date + 1
### update the date
y <-
w.wsd(sym,
idx.name,
startDate,
endDate,
"Currency=CNY;PriceAdj=F")$Data
y <- as.xts(y[,-1], order.by = as.Date(y[[1]], "%Y-%m-%d"))
indexFormat(y) <- '%Y-%m-%d'
indexTZ(y) <- "UTC"
idx.open <- match.names("OPEN", colnames(y))
idx.high <- match.names("HIGH", colnames(y))
idx.low <- match.names("LOW", colnames(y))
idx.close <- match.names("CLOSE", colnames(y))
y[, idx.close] <-
ifelse(is.na(y[, idx.close]), rowMeans(y[, c(idx.open, idx.high, idx.low)], na.rm = T), y[, idx.close])
y[, idx.close] <-
ifelse(is.na(y[, idx.close]), 0, y[, idx.close])
y[, idx.high] <-
ifelse(is.na(y[, idx.high]), y[, idx.close], y[, idx.high])
y[, idx.low] <-
ifelse(is.na(y[, idx.low]), y[, idx.close], y[, idx.low])
y[, idx.open] <-
ifelse(is.na(y[, idx.open]), y[, idx.close], y[, idx.open])
names(y) <- names(x)
x <- rbind(x, y)
assign(sym, x)
save(list = c(sym),
file = paste0(dataPath, "/", sym, ".RData"))
print(
paste0(
"Have updated",
paste0(sym, collapse = ","),
"data",
",Have updated the indicators of ",
paste0(old.ind, collapse = ","),
",and update the date to",
endDate
)
)
}
if (length(new.ind) > 0) {
load(paste0(dataPath, "/", sym, ".RData"), envir = .GlobalEnv)
last.date <- as.Date(tail(index(x), 1), "%Y-%m-%d")
startDate <- as.Date(startDate, "%Y-%m-%d")
endDate <- as.Date(endDate, "%Y-%m-%d")
w_tdays_data<-w.tdays(endDate,endDate)$Data
if(length(w_tdays_data)==0){
endDate=w.tdaysoffset(0,endDate)$Data[1,1]
}
## update the indicators
if (length(new.ind) > 0) {
sDate <- head(index(x), 1)
eDate <- tail(index(x), 1)
y <-
w.wsd(sym, new.ind, sDate, eDate, "Currency=CNY;PriceAdj=F")$Data
y <-
as.xts(y[,-1], order.by = as.Date(y[[1]], "%Y-%m-%d"))
indexFormat(y) <- '%Y-%m-%d'
indexTZ(y) <- "UTC"
colnames(y) <- paste0(sym, ".", new.ind)
x <- cbind(x, y)
}
assign(sym, x)
save(list = c(sym),
file = paste0(dataPath, "/", sym, ".RData"))
print(
paste0(
"Have updated",
paste0(sym, collapse = ","),
"data",
",Have updated the indicators of ",
paste0(new.ind, collapse = ","),
",and update the date to",
endDate
)
)
}
}else{
print(paste0("There is no need to update,the ", sym, " is the newest"))
}
}
|
579520efab7246ceb87ca6fe751f5f8216f94b2b | 6c11f430941d2a0c7cc6a33d843ffa6a95e67068 | /R/colours.R | 2deeb06677ac8cf34170696c248cc212c29b7d3b | [] | no_license | jonathananolan/grattantheme | 183bc038cb55e6b3434159315e64f8e521775fdf | 7184bf045f95c0d536642c973b834da8c7a4ac51 | refs/heads/master | 2020-04-25T07:55:09.692320 | 2019-02-26T00:03:58 | 2019-02-26T00:03:58 | 172,628,526 | 1 | 0 | null | 2019-02-26T03:10:27 | 2019-02-26T03:10:26 | null | UTF-8 | R | false | false | 1,496 | r | colours.R | #' Hex code for the colour: Grattan dark orange (carrot soup)
#'
#' #D4582A
#'
"grattan_darkorange"
#' Hex code for the colour: Grattan dark red (kidney bean soup)
#'
#' #621214
#'
"grattan_darkred"
#' Hex code for the colour: Grattan grey 1 (truffle soup)
#'
#' #D9D9D9
#'
"grattan_grey1"
#' Hex code for the colour: Grattan grey 2 (cream of mushroom soup)
#'
#' #AEAEAE
#'
"grattan_grey2"
#' Hex code for the colour: Grattan grey 3 (morel mushroom soup)
#'
#' #828282
#'
"grattan_grey3"
#' Hex code for the colour: Grattan grey 4 (shiitake mushroom soup)
#'
#' #575757
#'
"grattan_grey4"
#' Hex code for the colour: Grattan grey 5 (blackbean soup)
#'
#' #2B2B2B
#'
"grattan_grey5"
#' Hex code for the colour: Grattan 'forecast shading' grey
#'
#' #E1E3E5
#'
"grattan_grey_alpha"
#' Hex code for the colour: Grattan title grey
#'
#' #6A737B
#'
"grattan_grey_title"
#' Hex code for the grey used for gridlines in Grattan charts
#'
#' #C3C7CB
#'
"grattan_gridlinegrey"
#' Hex code for the colour: Grattan light orange (sweet potato soup)
#'
#' #F68B33
#'
"grattan_lightorange"
#' Hex code for the colour: Grattan light yello (corn chowder)
#'
#' #FFE07F
#'
"grattan_lightyellow"
#' Hex code for the colour used for Grattan box backgrounds (orange alpha)
#'
#' #FEF0DE
#'
"grattan_orange_alpha"
#' Hex code for the colour: Grattan red (tomato soup)
#'
#' #A02226
#'
"grattan_red"
#' Hex code for the colour: Grattan yellow (butternut pumpkin soup)
#'
#' #FFC35A
#'
"grattan_yellow"
|
36c3b84aaddab290f63348758f7e1e5a454d9b2a | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/bcp/R/bcp.R | 3a3570d092123ae500a90f51e113007ec45e2c14 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 27,645 | r | bcp.R | #
# bcp: an R package for performing a Bayesian analysis
# of change point problems.
#
# Copyright (C) 2011 Chandra Erdman and John W. Emerson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, a copy is available at
# http://www.r-project.org/Licenses/
#
#-------------------
# FILE: bcp.R
#' @title Performs a Bayesian analysis of change point problems
#'
#' @description
#' \code{bcp()} implements the Bayesian change point analysis methods given in Wang and Emerson (2015), of which the Barry and Hartigan (1993) product
#' partition model for the normal errors change point problem is a specific case. 1. Multivariate (or univariate) Bayesian change point analysis: We assume there exists an unknown partition of a data series y
#' into blocks such that the mean is constant within each block. In the multivariate
#' case, a common change point structure
#' is assumed; means are constant within each block of each sequence, but may differ across sequences
#' within a given block. Conditional on the partition, the model assumes that observations are independent, identically distributed normal, with constant means within blocks and
#' constant variance throughout each sequence.
#' 2. Linear regression Bayesian change point analysis: As with the previous model, we assume the observations (x,y), where x may be multivariate, are partitioned into blocks, and that linear models are appropriate within each block.
#'
#' If an adjacency structure is provided, the data are assumed to reside on nodes of a graph with the given adjacency structure; additional parameters are used in this graph change point model. If no adjacency structure is provided, the data are assumed to be sequential and the blocks are forced to be contiguous.
#'
#' @details
#' The primary result is an estimate of the posterior mean (or its distribution if
#' \code{return.mcmc} is \code{TRUE}) at every location. Unlike a frequentist or
#' algorithmic approach to the problem, these estimates will not be constant within
#' regions, and no single partition is identified as best. Estimates of the
#' probability of a change point at any given location are provided, however.
#'
#' The user may set \code{.Random.seed} to control the MCMC iterations.
#'
#' The functions \code{\link{summary.bcp}}, \code{\link{print.bcp}}, and \code{\link{plot.bcp}} are
#' used to obtain summaries of the results; \code{\link{legacyplot}} is included
#' from package versions prior to 3.0.0 and will only work for univariate change
#' point analyses.
#'
#' @param y a vector or matrix of numerical data (with no missing values). For
#' the multivariate change point problems, each column corresponds to a series.
#' @param x (optional) a matrix of numerical data (with no missing values) representing the predicting variables for a linear regression.
#' @param id (optional) a vector of integers specifying the location ID for each observation in \code{y}, starting from location 1.
#' @param adj (optional) an adjacency list. Indexing the observations from 1 to \eqn{n}, the \eqn{i}-th element of the list is a vector of indices (offset by 1) for nodes that share an edge with node \eqn{i}.
#' @param w0 (optional) a single numeric value in the multivariate case or a vector of values in the regression case; in both, the value(s), between 0 and 1, is/are the parameter(s) in the uniform prior(s) on the signal-to-noise
#' ratio(s). If no value is specified, the default value of 0.2 is used, as
#' recommended by Barry and Hartigan (1993).
#' @param p0 (optional) a value between 0 and 1. For sequential data, it is the parameter of the prior on change point probabilities, \eqn{U(0,}\code{ p0}\eqn{)}, on the probability
#' of a change point at each location in the sequence; for data on a graph, it is the parameter in the partition prior, \code{p0}\eqn{^{l(\rho)}}, where \eqn{l(\rho)} is the boundary length of the partition.
#' @param d (optional) a positive number only used for linear regression change point models. Lower \code{d} means higher chance of fitting the full linear model (instead of the intercept-only model); see prior for \eqn{\tau_S} in Wang and Emerson (2015).
#' @param burnin the number of burnin iterations.
#' @param mcmc the number of iterations after burnin.
#' @param return.mcmc logical. If set to \code{TRUE}, the posterior means and the partitions
#' in each iteration are returned.
#' @param boundaryType (optional) only applicable for graph change point analysis. Values can be ``node'' (default) if we count nodes in the boundary length calculation, or ``edge'' if we count edges in the boundary length calculation. See Wang and Emerson (2015) for details.
#' @param p1 (optional) only applicable for graph change point analysis. The proportion of Active Pixel Passes run that are the actual Active Pixel Passes specified in Barry and Hartigan (1994). \code{p1 = 0} corresponds to exclusively using the pseudo-Active Pixel Passes given in Wang and Emerson (2015).
#' @param freqAPP (optional) only applicable for graph change point analysis. A positive integer for the number of Active Pixel Passes run in each step of the MCMC algorithm.
#' @param nreg (optional) only applicable for regression; related to parameter
#' \code{d} describing the minimum number of observations needed in a block to allow for fitting a regression model. Defaults to 2*number of predictors.
#'
#' @return Returns a list containing the following components:
#' \describe{
#' \item{data}{a copy of the data.}
#' \item{return.mcmc}{\code{TRUE} or \code{FALSE} as specified by the user; see the arguments, above.}
#' \item{mcmc.means}{if \code{return.mcmc=TRUE}, \code{mcmc.means} contains the means for each iteration conditional on the state of the partition.}
#' \item{mcmc.rhos}{if \code{return.mcmc=TRUE}, \code{mcmc.rhos} contains the partitions after each iteration. A value of 1 indicates the end of a block.}
#' \item{blocks}{a vector of the number of blocks after each iteration.}
#' \item{posterior.mean}{a vector or matrix of the estimated posterior means. In the regression case, the matrix includes posterior means for the response variable.}
#' \item{posterior.var}{a vector or matrix of the estimated posterior variances. In the regression case, the estimated posterior variances of the response are provided.}
#' \item{posterior.prob}{a vector of the estimated posterior probabilities of changes at each location.}
#' \item{burnin}{the number of burnin iterations.}
#' \item{mcmc}{the number of iterations after burnin.}
#' \item{w0}{see the arguments, above.}
#' \item{p0}{see the arguments, above.}
#' }
#' @references
#' \enumerate{
#' \item J. Bai and P. Perron (2003), Computation and Analysis of Multiple Structural Change Models, \emph{Journal of Applied Econometrics}, \bold{18}, 1-22. \url{http://qed.econ.queensu.ca/jae/2003-v18.1/bai-perron/}.
#'
#' \item Daniel Barry and J. A. Hartigan (1993), A Bayesian Analysis for Change Point Problems, \emph{Journal of The American Statistical Association}, \bold{88}, 309-19.
#'
#' \item Daniel Barry and J. A. Hartigan (1994), A Product Partition Model for Image Restoration, \emph{New Directions in Statistical Data Analysis and Robustness}, (Monte Verita : Proceedings of the Cento Stefano Franscini Ascona), Birkhauser, 9-23.
#'
#' \item Chandra Erdman and John W. Emerson (2008), A Fast Bayesian Change Point Analysis for the Segmentation of Microarray Data, \emph{Bioinformatics}, 24(19), 2143-2148. \url{https://www.ncbi.nlm.nih.gov/pubmed/18667443}.
#'
#' \item Chandra Erdman and John W. Emerson (2007), bcp: An R Package for Performing a Bayesian Analysis of Change Point Problems. \emph{Journal of Statistical Software}, 23(3), 1-13. \url{http://www.jstatsoft.org/v23/i03/}.
#'
#' \item A. B. Olshen, E. S. Venkatraman, R. Lucito, M. Wigler (2004), Circular binary segmentation for the analysis of array-based DNA copy number data, \emph{Biostatistics}, \bold{5}, 557-572. \url{http://www.bioconductor.org/packages/release/bioc/html/DNAcopy.html}.
#'
#' \item Snijders \emph{et al.} (2001), Assembly of microarrays for genome-wide measurement of DNA copy number, \emph{Nature Genetics}, \bold{29}, 263-264.
#'
#' \item Xiaofei Wang and John W. Emerson (2015). Bayesian Change Point Analysis of Linear Models on General Graphs, \emph{Working Paper}.
#'
#' \item Achim Zeileis, Friedrich Leisch, Kurt Hornik, Christian Kleiber (2002), strucchange: An R Package for Testing for Structural Change in Linear Regression Models, \emph{Journal of Statistical Software}, \bold{7}(2), 1-38. \url{http://www.jstatsoft.org/v07/i02/}.
#' }
#' @author Xiaofei Wang, Chandra Erdman, and John W. Emerson
#' @seealso \code{\link{plot.bcp}}, \code{\link{summary.bcp}}, and \code{\link{print.bcp}} for summaries of the results.
#' @import graphics
#' @examples
#'
#' ##### univariate sequential data #####
#' # an easy problem with 2 true change points
#' set.seed(5)
#' x <- c(rnorm(50), rnorm(50, 5, 1), rnorm(50))
#' bcp.1a <- bcp(x)
#' plot(bcp.1a, main="Univariate Change Point Example")
#' legacyplot(bcp.1a)
#'
#' # a hard problem with 1 true change point
#' set.seed(5)
#' x <- rep(c(0,1), each=50)
#' y <- x + rnorm(50, sd=1)
#' bcp.1b <- bcp(y)
#' plot(bcp.1b, main="Univariate Change Point Example")
#'
#' ##### multivariate sequential data #####
#' # an easy problem in k=3 dimensions
#' set.seed(5)
#' x <- rnorm(6, sd=3)
#' y <- rbind(cbind(rnorm(50, x[1]), rnorm(50, x[2]), rnorm(50, x[3])),
#' cbind(rnorm(50, x[4]), rnorm(50, x[5]), rnorm(50, x[6])))
#' bcp.2a <- bcp(y)
#' plot(bcp.2a, main="Multivariate (k=3) Change Point Example")
#' plot(bcp.2a, separated=TRUE, main="Multivariate (k=3) Change Point Example")
#'
#' # a harder problem in k=5 dimensions
#' set.seed(5)
#' means1 <- rep(0, 5)
#' means2 <- rep(1, 5)
#' x <- rbind(matrix(rep(means1, each=50), nrow=50),
#' matrix(rep(means2, each=50), nrow=50))
#' y <- x + rnorm(length(x), sd=1)
#' bcp.2b <- bcp(cbind(y))
#' plot(bcp.2b, main="Multivariate (k=5) Change Point Example")
#'
#' ##### linear models with sequential data #####
#' # 1 true change point at location 50; the predicting variable x is not related to location
#' x <- rnorm(100)
#' b <- rep(c(3,-3), each=50)
#' y <- b*x + rnorm(100)
#' bcp.3a <- bcp(y, x)
#' # in the two plots that follow, the location IDs are used as the plot characters
#' par(mfrow=c(1,2))
#' plot(y ~ x, type="n", main="Linear Regression: Raw Data")
#' text(x, y, as.character(1:100), col=(b/3)+2)
#' plot(y ~ x, type="n", main="Linear Regression: Posterior Means")
#' text(x, bcp.3a$posterior.mean[,1], as.character(1:100), col=(b/3)+2)
#' plot(bcp.3a, main="Linear Regression Change Point Example")
#'
#' # 1 true change point at location 50; the predicting variable x is equal to location
#' x <- 1:100
#' b <- rep(c(3,-3), each=50)
#' y <- b*x + rnorm(100, sd=50)
#' bcp.3b <- bcp(y, x)
#' plot(bcp.3b, main="Linear Regression Change Point Example")
#'
#' ##### univariate data on a grid #####
#' \dontrun{
#' set.seed(5)
#' adj <- makeAdjGrid(20)
#' z <- rep(c(0, 2), each=200)
#' y <- z + rnorm(400, sd=1)
#' out <- bcp(y, adj=adj, burnin=500, mcmc=500)
#'
#' if (require("ggplot2")) {
#' df <- data.frame(mean=z, data = y, post.means = out$posterior.mean[,1],
#' post.probs = out$posterior.prob,
#' i = rep(1:20, each=20), j = rep(1:20, times=20))
#'
#' # visualize the means
#' g <- ggplot(df, aes(i,j)) +
#' geom_tile(aes(fill = mean), color='white') +
#' scale_fill_gradientn(limits=range(y), colours=c('white', 'steelblue'))+
#' ggtitle("True Means")
#' print(g)
#'
#' # visualize the data
#' g <- ggplot(df, aes(i,j)) +
#' geom_tile(aes(fill = data), color='white') +
#' scale_fill_gradientn(limits=range(y), colours=c('white', 'steelblue'))+
#' ggtitle("Observed Data")
#' print(g)
#'
#' # visualize the posterior means/probs
#' g <- ggplot(df, aes(i,j)) +
#' geom_tile(aes(fill = post.means), color='white') +
#' scale_fill_gradientn(limits=range(y), colours=c('white', 'steelblue'))+
#' ggtitle("Posterior Means")
#' print(g)
#'
#' g <- ggplot(df, aes(i,j)) +
#' geom_tile(aes(fill = post.probs), color='white') +
#' scale_fill_gradientn(limits=c(0, 1), colours=c('white', 'steelblue'))+
#' ggtitle("Posterior Boundary Probabilities")
#' print(g)
#' }
#' }
#'
#' \dontrun{
#' ##### multivariate data on a grid #####
#' set.seed(5)
#' x <- rnorm(6, sd=3)
#' y <- rbind(cbind(rnorm(50, x[1]), rnorm(50, x[2]), rnorm(50, x[3])),
#' cbind(rnorm(50, x[4]), rnorm(50, x[5]), rnorm(50, x[6])))
#' adj <- makeAdjGrid(10)
#' a <- bcp(y, adj=adj, p0=0.4, burnin=500, mcmc=500)
#'
#' ##### linear models on a grid #####
#' set.seed(5)
#' x <- rnorm(100)
#' b <- rep(c(3,-3), each=50)
#' y <- b*x + rnorm(100)
#' adj <- makeAdjGrid(10)
#' a <- bcp(y,x,adj=adj, p0=0.4, burnin=500, mcmc=500)
#'
#' ##### linear models on a grid using pseudo-APPs #####
#' x <- rnorm(100)
#' b <- rep(c(3,-3), each=50)
#' y <- b*x + rnorm(100)
#' adj <- makeAdjGrid(10)
#' a <- bcp(y,x,adj=adj, p0=0.4, burnin=500, mcmc=500, p1 = 0)}
#'
#' ##### univariate data on a graph #####
#' \dontrun{
#' demo(bcpgraph)
#' }
#'
#' ###### Real Data Examples ######
#' \dontrun{
#' # Coriell chromosome 11: univariate sequential data
#' demo(coriell)
#'
#' # U.S. ex-post interest rate: univariate sequential data
#' demo(RealInt)
#'
#' # Lombard: univariate sequential data (with and without linear models)
#' demo(Lombard)
#'
#' # Quebec rivers: multivariate sequential data
#' demo(QuebecRivers)
#'
#' # New Haven housing: linear models on a graph
#' demo(NewHaven)
#' }
#'
#' @keywords datasets
#' @export
"bcp" <- function(y, x=NULL, id = NULL, adj=NULL, w0=NULL, p0=0.2, d = 10,
burnin=50, mcmc=500, return.mcmc=FALSE,
boundaryType = "node", p1 = 1, freqAPP = 20,
nreg = -1) {
######################################################
########################### BEGIN THE WORKER FUNCTION:
######################################################
"worker.bcp" <- function(mcmc, y, x, id, w0, p0, d, burnin, return.mcmc, membinit,
boundaryType, adj, p1, freqAPP, nreg) {
require(bcp)
# INITIALIZATION
# if (is.data.frame(x)) x <- matrix(as.double(x), nrow=nrow(x), ncol=ncol(x))
# if (is.vector(x)) x <- matrix(as.double(x), ncol=1)
# if (!is.matrix(x)) stop("x must be a vector, matrix or a data frame")
# if (nrow(x)==1) {
# warning("coercing data to a single series")
# x <- matrix(as.vector(x), ncol=1)
# }
if (p0 > 1 | p0 < 0 | p1 > 1 | p1 < 0) stop ("p0 and p1 must each be between 0 and 1.")
if (is.null(id)) {
if (is.matrix(y)) id <- 1:nrow(y)
else id <- 1:length(y)
}
if (min(id) == 1) id <- id - 1
if (is.null(x)) {
# doing multivariate bcp
if (is.null(w0)) w0 <- 0.2
if (w0 > 1 | w0 < 0) stop("w0 must be between 0 and 1.")
if (is.vector(y)) y <- cbind(y)
colnames(y) <- NULL
rownames(y) <- NULL
# Do the work in C:
if (is.null(adj)) {
out <- rcpp_bcpM(
as.matrix(y), as.integer(id),
as.integer(return.mcmc),
as.integer(burnin),
as.integer(mcmc),
as.double(p0),
as.double(w0))
attr(out, "structure") <- "series"
} else {
if (freqAPP < 0) stop("freqAPP must be a positive integer.")
out <- rcpp_ppm(y, as.integer(id),
adj,
as.integer(return.mcmc),
as.integer(burnin),
as.integer(mcmc),
as.double(p0),
as.double(w0),
membinit,
as.integer(boundaryType),
as.double(p1),
as.integer(freqAPP))
attr(out, "structure") <- "graph"
}
attr(out, "model") <- "multivariate"
out$data <- cbind(id+1, y)
if (is.null(colnames(x))) {
colnames(out$posterior.mean) <- paste(rep("X", ncol(out$data)-1),
1:(ncol(out$data)-1), sep="")
} else
colnames(out$posterior.mean) <- colnames(x)
} else {
# doing regression bcp
if (is.vector(x)) x <- cbind(x)
if (sum(x[,1]==1) != nrow(x)) x <- cbind(1, x)
if (!is.double(x)) x <- matrix(as.double(x), nrow(x), ncol(x))
if (is.null(w0)) {
w0 <- rep(0.2, ncol(x))
} else {
if (any(w0 > 1) | any(w0 < 0)) stop("Each element in w0 must be between 0 and 1.")
if (length(w0) != ncol(x)) {
if (length(w0) == 1) {
w0 <- rep(w0, ncol(x))
print("Incorrect length for w0. I'll assume you wanted each error-to-signal ratio to be iid from U(0, w0).")
} else stop("Incorrect length for w0.")
}
}
if (nreg == -1) {
nreg <- 2*(ncol(x)-1)
}
indmat <- t(sapply(unique(id), function(y) id == y)*1)
# Do the work in C:
if (is.null(adj)) {
out <- rcpp_bcpR(
as.double(y),
x,
indmat,
as.integer(id),
as.integer(return.mcmc),
as.integer(burnin),
as.integer(mcmc),
as.double(p0),
as.double(w0),
as.double(d),
as.integer(nreg)
)
out$data = cbind(id+1, y,x[,-1])
attr(out, "structure") <- "series"
} else {
if (freqAPP < 0) stop("freqAPP must be a positive integer.")
out <- rcpp_ppmR(y, x,
indmat,
as.integer(id),
adj,
as.integer(return.mcmc),
as.integer(burnin),
as.integer(mcmc),
as.double(p0),
as.double(w0),
membinit,
as.integer(boundaryType),
as.double(d),
as.double(p1),
as.integer(freqAPP),
as.integer(nreg))
out$data = cbind(id+1, y,x)
attr(out, "structure") <- "graph"
}
attr(out, "model") <- "regression"
out$posterior.mean <- cbind(as.numeric(out$posterior.mean)/table(id))
rownames(out$posterior.mean) <- NULL
colnames(out$posterior.mean) <- "y"
}
if (attr(out, "structure") == "series")
out$posterior.prob[length(out$posterior.prob)] <- NA # Fix up the last position, always NA
# RETURN RESULTS
z <- list(data=out$data,
return.mcmc=return.mcmc,
mcmc.means=out$mcmc.means,
mcmc.rhos=out$mcmc.rhos,
blocks=out$blocks,
posterior.mean=out$posterior.mean,
posterior.var=out$posterior.var,
posterior.prob=out$posterior.prob,
burnin=burnin,
mcmc=mcmc,
p0=p0,
w0=w0)
attr(z, "model") <- attr(out, "model")
attr(z, "structure") <- attr(out, "structure")
class(z) <- "bcp"
return(z)
}
###################################################
########################### END THE WORKER FUNCTION
########################### BEGIN THE MAIN SECTION:
###################################################
# Function header and foreach setup, from above:
#
#"bcp" <- function(x, w0=0.2, p0=0.2, burnin=50, mcmc=500, return.mcmc=FALSE) {
#
if (!is.null(adj)) {
if (boundaryType == "node") {
boundaryType <- 1
} else {
boundaryType <- 2
}
if (is.vector(y)) {
dataToRank <- y
} else if (is.matrix(y)) {
dataToRank <- y[,1]
}
if (!is.null(id)) { # varying num of obs per loc, we'll sample one obs per loc to rank
inds <- sapply(1:max(id), function(g) {
inds <- which(id==g)
if (length(inds)==1) return(inds)
return(sample(inds, 1))
})
dataToRank <- dataToRank[inds]
}
numNodes <- length(dataToRank)
# if (is.null(membinit)) {
Minit <- ceiling(sqrt(numNodes))
o <- rank(dataToRank, ties.method="first")
membinit <- ceiling(o/Minit)
membinit <- pmin(membinit, Minit)-1
# } else if (length(membinit) == 1) {
# Minit <- ceiling(numNodes/membinit)
# o <- rank(dataToRank, ties.method="first")
# membinit <- ceiling(o/Minit)
# membinit <- pmin(membinit, Minit)-1
# } else {
# nComponents <- max(membinit)
# if (length(setdiff(unique(membinit), 1:nComponents))>0) {
# stop("Error in membinit")
# } else {
# membinit <- membinit-1
# }
# }
relabelMap <- order(unique(membinit))
membinit <- relabelMap[membinit+1]-1
}
ans <- worker.bcp(mcmc, y=y, x=x, id=id, w0=w0, p0=p0, d=d,
burnin=burnin, return.mcmc=return.mcmc,
membinit=membinit, boundaryType=boundaryType,
adj=adj, p1=p1, freqAPP=freqAPP, nreg=nreg)
# ==================================
# === Reformat the mcmc.means ===
# ==================================
if (return.mcmc) {
if (ncol(ans$mcmc.means) > 1) {
mcmc.means <- vector('list', ncol(ans$mcmc.means))
for (i in 1:length(mcmc.means)) {
mcmc.means[[i]] <- matrix(ans$mcmc.means[,i], nrow=burnin+mcmc, byrow=TRUE)
}
} else {
mcmc.means <- matrix(ans$mcmc.means, nrow=burnin+mcmc, byrow=TRUE)
}
ans$mcmc.means <- mcmc.means
}
return(ans)
}
#' @title Creating the adjacency structure for grid graphs
#'
#' @description
#' \code{makeAdjGrid()} produces a sparse representation of the adjacency structure for grid graphs, useful as the \code{adj} argument in \code{bcp()}.
#'
#' @param n the number of rows of vertices in the graph data.
#' @param m (optional) the number of column of vertices in the graph data. If not given, we assume \code{m = n}.
#' @param k (optional) the number of neighbors assumed for a typical vertex (see details below), either 4 or 8. Default number of neighbors is assumed to be 8.
#'
#' @author Xiaofei Wang
#' @details
#' \code{makeAdjGrid()} produces a list representation of the adjacency structure for grid graphs. The \eqn{i}-th entry in the list gives a vector of neighbor ids for the \eqn{i}-th node. Note that neighbor ids are offset by 1 because indexing starts at 0 in C++.
#' If \code{k = 8}, then we assume each node is joined via edges to its 8 neighbors in the (top left, top middle, top right, left, right, bottom left, bottom middle, and bottom right) directions, where applicable. If \code{k = 4}, then we assume each node is joined via edges to its 4 neighbors in the (top, right, bottom, left) directions, where applicable.
#' @seealso \code{\link{bcp}} for performing Bayesian change point analysis.
#' @examples
#' # generates an adjacency list for a 10 node by 5 node grid, assuming a maximum of 8 neighbors
#' adj <- makeAdjGrid(10, 5)
#'
#' # generates an adjacency list for a 10 node by 5 node grid, assuming a maximum of 4 neighbors
#' adj4 <- makeAdjGrid(10, 5, 4)
#'
#'
#' ### show a grid example
#' \dontrun{
#' set.seed(5)
#' adj <- makeAdjGrid(20)
#' z <- rep(c(0, 2), each=200)
#' y <- z + rnorm(400, sd=1)
#' out <- bcp(y, adj=adj, burnin=500, mcmc=500)
#'
#' if (require("ggplot2")) {
#' df <- data.frame(mean=z, data = y, post.means = out$posterior.mean[,1],
#' post.probs = out$posterior.prob,
#' i = rep(1:20, each=20), j = rep(1:20, times=20))
#'
#' # visualize the data
#' g <- ggplot(df, aes(i,j)) +
#' geom_tile(aes(fill = data), color='white') +
#' scale_fill_gradientn(limits=range(y), colours=c('white', 'steelblue'))+
#' ggtitle("Observed Data")
#' print(g)
#'
#' # visualize the means
#' g <- ggplot(df, aes(i,j)) +
#' geom_tile(aes(fill = mean), color='white') +
#' scale_fill_gradientn(limits=range(y), colours=c('white', 'steelblue'))+
#' ggtitle("True Means")
#' print(g)
#'
#' # visualize the posterior means/probs
#' g <- ggplot(df, aes(i,j)) +
#' geom_tile(aes(fill = post.means), color='white') +
#' scale_fill_gradientn(limits=range(y), colours=c('white', 'steelblue'))+
#' ggtitle("Posterior Means")
#' print(g)
#'
#' g <- ggplot(df, aes(i,j)) +
#' geom_tile(aes(fill = post.probs), color='white') +
#' scale_fill_gradientn(limits=c(0, 1), colours=c('white', 'steelblue'))+
#' ggtitle("Posterior Boundary Probabilities")
#' print(g)
#' }
#' }
#'
#' @keywords datasets
#' @export
makeAdjGrid <- function(n,m=NULL, k=8) {
if (is.null(m)) m <- n
adj <- vector('list', n*m)
if (k == 8) {
for (i in 2:(n-1)) {
for (j in 2:(m-1)) {
adj[[(j-1)*n+i]] <- c((j-2)*n+(i-1):(i+1),(j-1)*n+i-c(1,-1), j*n+(i-1):(i+1))-1
}
}
i <- 1
for (j in 2:(m-1)) adj[[(j-1)*n+i]] <- c((j-2)*n+1:2, (j-1)*n+2, (j)*n+1:2)-1
i <- n
for (j in 2:(m-1)) adj[[(j-1)*n+i]] <- c((j-1)*n-1:0, j*n-1, (j+1)*n-1:0)-1
j <- 1
for (i in 2:(n-1)) adj[[(j-1)*n+i]] <- c(i-1,i+1, n+(i-1):(i+1))-1
j <- m
for (i in 2:(n-1)) adj[[(j-1)*n+i]] <- c((m-2)*n+(i-1):(i+1), (m-1)*n+i-1, (m-1)*n+i+1)-1
adj[[1]] <- c(2, n+1:2)-1
adj[[n]] <- c((1:2)*n-1,2*n)-1
adj[[(m-1)*n+1]] <- c((m-2)*n+1:2, (m-1)*n+2)-1
adj[[n*m]] <- c((m-1)*n-(1:0), n*m-1)-1
} else if (k == 4) {
for (i in 2:(n-1)) {
for (j in 2:(m-1)) {
adj[[(j-1)*n+i]] <- c((j-2)*n+i,(j-1)*n+i-c(1,-1), j*n+i)-1
}
}
i <- 1
for (j in 2:(m-1)) adj[[(j-1)*n+i]] <- c((j-2)*n+1, (j-1)*n+2, (j)*n+1)-1
i <- n
for (j in 2:(m-1)) adj[[(j-1)*n+i]] <- c((j-1)*n, j*n-1, (j+1)*n)-1
j <- 1
for (i in 2:(n-1)) adj[[(j-1)*n+i]] <- c(i-1,i+1, n+i)-1
j <- m
for (i in 2:(n-1)) adj[[(j-1)*n+i]] <- c((n-2)*m+i, (n-1)*m+i-1, (n-1)*m+i+1)-1
adj[[1]] <- c(2, n+1)-1
adj[[n]] <- c(n-1,2*n)-1
adj[[(m-1)*n+1]] <- c((m-2)*n+1, (m-1)*n+2)-1
adj[[n*m]] <- c((m-1)*n, n*m-1)-1
} else {
stop("Error: k must be 4 or 8.")
}
return(adj)
}
|
10f75e83bf7e2cd921375b3ac2d0dee640740836 | 8d1e47c19d81d6d71beb96a14e0c322e94321d32 | /R/meta-open_pkgdown.R | 4197565036bd7325066a96e3e0477b9841327eb2 | [
"MIT"
] | permissive | jimbrig/lossrx | 876316f52313351945d8d61b651bec66f3db0184 | cac11bc51886ec01f94178a499c9f92983bc3c8e | refs/heads/main | 2023-08-22T13:02:44.043512 | 2023-08-04T23:49:30 | 2023-08-04T23:49:30 | 421,496,897 | 7 | 2 | NOASSERTION | 2023-09-14T12:05:34 | 2021-10-26T16:14:20 | R | UTF-8 | R | false | false | 368 | r | meta-open_pkgdown.R | #' Open pkgdown site of the package
#'
#' @importFrom utils browseURL
#'
#' @export
#'
#' @examples
#' # open_pkgdown()
open_pkgdown <- function() {
guide_path <- system.file('docs/index.html', package = 'lossrx')
if (guide_path == "") {
stop('There is no pkgdown site in ', 'docs/index.html')
}
browseURL(paste0('file://', guide_path))
}
|
7ddf1da7dfd715d12ad2e797cd7a46c875a641bf | 3cfa8a29d52ee4d79b70c3e4e2ef669ae6c27829 | /misc_feb_2021.R | 8bb10412cee7093f3b294a4b429c9c8ff3f14dac | [] | no_license | liam-crow/AFL_scripts | 8fe352ad5b899edee11d61fc874bef30403dd55a | 4787d28e1a86325c7a83d19cdfe298b251100671 | refs/heads/master | 2022-08-21T05:12:48.341377 | 2022-08-17T14:49:00 | 2022-08-17T14:49:00 | 219,985,329 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,842 | r | misc_feb_2021.R | library(dplyr)
library(tidyr)
library(lubridate)
library(fitzRoy)
source("load_afltables.R")
source("fryzigg_data.R")
fitzRoy::get_aflw_detailed_data()
cookie <- get_aflw_cookie()
aflw_player_stats <- get_aflw_player_stats()
glimpse(aflw_player_stats)
aflw_player_stats %>%
select(date, fixture_round, player_name, fantasy_score, metres_gained) %>%
mutate(mpf = metres_gained/fantasy_score) %>%
filter(mpf != Inf, fantasy_score > 10) %>%
View()
tackle_data <- fryzigg_data %>%
select(season, player_position, intercept_marks, player_height_cm) %>%
mutate(
position_grouped = case_when(
player_position == 'BPL' ~ 'Backs',
player_position == 'BPR' ~ 'Backs',
player_position == 'FB' ~ 'Backs',
player_position == 'CHB' ~ 'Backs',
player_position == 'HBFL' ~ 'Backs',
player_position == 'HBFR' ~ 'Backs',
player_position == 'FPL' ~ 'Forwards',
player_position == 'FPR' ~ 'Forwards',
player_position == 'FF' ~ 'Forwards',
player_position == 'CHF' ~ 'Forwards',
player_position == 'HFFL' ~ 'Forwards',
player_position == 'HFFR' ~ 'Forwards',
player_position == 'C' ~ 'Centres',
player_position == 'R' ~ 'Centres',
player_position == 'RK' ~ 'Centres',
player_position == 'RR' ~ 'Centres',
player_position == 'WL' ~ 'Centres',
player_position == 'WR' ~ 'Centres',
TRUE ~ 'other'
)
) %>%
drop_na() %>%
filter(season == 2020, player_position != 'INT')
library(ggplot2)
ggplot(tackle_data, aes(y = player_height_cm, x = as.character(intercept_marks))) +
geom_violin() + geom_point() + geom_jitter() +
geom_smooth(aes(y = player_height_cm, x = intercept_marks), method = 'gam')
# geom_density2d()
ggplot(fryzigg_data %>% filter(season == 2020), aes(x = as.character(free_kicks_for), y = player_height_cm)) +
geom_violin() + geom_jitter(alpha = 0.1)
ggplot(fryzigg_data %>% filter(season == 2020), aes(x = as.character(free_kicks_against), y = player_height_cm)) +
geom_violin() + geom_jitter(alpha = 0.1) +
geom_smooth(inherit.aes = F, aes(x = free_kicks_against, y = player_height_cm), method = 'lm')
fryzigg_data %>%
select(date,season,match_round,match_winner, player_id, player_first_name, player_last_name, player_position, pressure_acts, def_half_pressure_acts, intercept_marks, turnovers, clangers, one_percenters) %>%
drop_na() %>%
mutate(fwd_half_pressure_acts = pressure_acts - def_half_pressure_acts) %>%
filter(fwd_half_pressure_acts == def_half_pressure_acts) %>% View()
afltables %>%
select(season, round, venue, playing_for, opp, playing_for_score, opp_score) %>% distinct() %>%
filter(venue == 'Gabba', playing_for_score == 131)
afltables %>%
select(season, round, home_team, away_team, playing_for, playing_for_score, opp_score) %>% distinct() %>%
group_by(playing_for_score) %>%
count() %>% View()
#### palindromes ####
afltables %>%
select(date, round, home_team, away_team, home_score, away_score) %>% distinct() %>%
rowwise() %>%
mutate(
combined = paste0(home_score, away_score),
backwards = intToUtf8(rev(utf8ToInt(combined))),
sum = sum(home_score, away_score)
) %>%
filter(combined == backwards) %>% View()
afltables %>%
select(date, round, id, first_name, surname, kicks, handballs) %>% distinct() %>%
rowwise() %>%
mutate(
combined = paste0(kicks, handballs),
backwards = intToUtf8(rev(utf8ToInt(combined))),
sum = sum(kicks, handballs)
) %>%
filter(combined == backwards, sum > 0) %>% View()
afltables %>%
select(date, round, playing_for, pq_4_g, pq_4_b, playing_for_score) %>% distinct() %>%
rowwise() %>%
mutate(
combined = paste0(pq_4_g, pq_4_b, playing_for_score),
backwards = intToUtf8(rev(utf8ToInt(combined)))
) %>%
filter(combined == backwards) %>% View()
afltables %>%
select(date, round, playing_for, pq_1_g, pq_1_b, oq_1_g, oq_1_b) %>% distinct() %>%
rowwise() %>%
mutate(
pq_score = pq_1_g*6 + pq_1_b,
oq_score = oq_1_g*6 + oq_1_b,
combined = paste0(pq_1_g, pq_1_b, pq_score, oq_1_g, oq_1_b, oq_score),
backwards = intToUtf8(rev(utf8ToInt(combined)))
) %>%
filter(combined == backwards) %>% View()
afltables %>%
select(date, round, id, first_name, surname, disposals) %>%
group_by(id, first_name, surname) %>%
summarise(t_disp = sum(disposals), .groups = 'keep') %>%
rowwise() %>%
mutate(
combined = paste0(t_disp),
backwards = intToUtf8(rev(utf8ToInt(combined)))
) %>%
filter(combined == backwards) %>% View()
afltables %>%
select(season, round, home_team, away_team, hq_4_g, hq_4_b, aq_4_g, aq_4_b, home_score, away_score) %>% distinct() %>%
filter(hq_4_g == aq_4_b, hq_4_b == aq_4_g) %>%
mutate(margin = home_score - away_score) %>% View()
afltables %>%
filter(season >= 1965) %>%
select(id, first_name, surname, kicks, handballs) %>%
mutate(
foot = grepl('foot', surname, ignore.case = T),
hand = grepl('hand', surname, ignore.case = T)
) %>%
group_by(foot, hand) %>%
summarise(t_kicks = sum(kicks), t_hballs = sum(handballs))
afltables %>%
select(season, round, playing_for, starts_with('pq')) %>% distinct() %>%
mutate(
q1 = pq_1_g*6 + pq_1_b,
q2 = pq_2_g*6 + pq_2_b,
q3 = pq_3_g*6 + pq_3_b,
q4 = pq_4_g*6 + pq_4_b
) %>% #View()
filter()
fryzigg_data %>%
filter(season >= 2021) %>%
select(player_id, player_first_name, player_last_name, player_height_cm, player_weight_kg) %>% distinct() %>% drop_na() %>%
inner_join(
starwars %>% select(name, height, mass),
by = c('player_height_cm' = 'height', 'player_weight_kg' = 'mass')
) %>%
summarise(
comb = paste0(player_first_name,' ',player_last_name,' & ',name,': ',player_height_cm,'cm, ',player_weight_kg,'kg', collapse = '')
) %>% View()
afltables %>%
select(date, home_team, away_team) %>% distinct() %>%
mutate(m = month(date), d = day(date)) %>%
group_by(d, m) %>%
summarise(n = n(), years = paste(year(date), home_team, away_team, collapse = ', ')) %>%
arrange(m, d) %>% View()
afltables %>%
select(date, first_name, surname) %>% #View()
group_by(first_name) %>%
arrange(date) %>%
summarise(diff = date - lag(date)) #%>% View()
afltables %>%
select(season, date, round, playing_for, opp, frees_for, frees_against)
sample_data <- afltables %>%
select(date, round, home_team, away_team, team = playing_for, kicks, handballs, tackles, inside_50_s, cont_pos = contested_possessions, uncont_pos = uncontested_possessions, home_score, away_score) %>%
group_by(date, round, home_team, away_team) %>%
mutate(game_id = cur_group_id()) %>% ungroup() %>%
mutate(h_a = if_else(home_team == team, "H", "A")) %>%
group_by(game_id, date, round, team, h_a, home_score, away_score) %>%
summarise(
s_kicks = sum(kicks),
s_handballs = sum(handballs),
s_tackles = sum(tackles),
s_inside_50_s = sum(inside_50_s),
s_cont_pos = sum(cont_pos),
s_uncont_pos = sum(uncont_pos),
) %>% arrange(game_id, desc(h_a)) %>% ungroup() %>%
pivot_wider(
names_from = c(h_a),
values_from = c(team, s_kicks, s_handballs, s_tackles, s_inside_50_s, s_cont_pos, s_uncont_pos)
) %>%
mutate(
score_diff = home_score - away_score,
kicks_diff = s_kicks_H - s_kicks_A,
handballs_diff = s_handballs_H - s_handballs_A,
tackles_diff = s_tackles_H - s_tackles_A,
inside_50_s_diff = s_inside_50_s_H - s_inside_50_s_A,
cont_pos_diff = s_cont_pos_H - s_cont_pos_A,
uncont_pos_diff = s_uncont_pos_H - s_uncont_pos_A,
.keep = "unused"
)
afltables %>%
select(season, round, home_team, away_team, first_name, surname) %>%
group_by(season, round, home_team, away_team, first_name) %>%
summarise(n = n()) %>% View()
afltables %>%
select(season, round, home_team, away_team, playing_for, pq_1_g, pq_2_g, pq_3_g, pq_4_g) %>%
distinct() %>%
mutate(g1 = pq_1_g, g2 = pq_2_g - pq_1_g, g3 = pq_3_g - pq_2_g, g4 = pq_4_g - pq_3_g) %>%
filter(
g1 < g2,
g2 < g3,
g3 < g4,
g1 >= 4
) %>% arrange(season) %>% View()
afltables %>%
select(season, date, round, home_team, away_team, playing_for, kicks, handballs) %>%
mutate(disp = kicks + handballs) %>%
filter(disp >= 9) %>%
group_by(season, date, round, home_team, away_team, playing_for) %>%
summarise(n = n()) %>% View()
library(dplyr)
library(tidyr)
library(snakecase)
library(fitzRoy)
afltables <- fetch_player_stats_afltables(season = 2021)
dim(afltables)
head(afltables)
names(afltables) <- to_snake_case(names(afltables))
#team names = Richmond, Carlton, Collingwood, Western Bulldogs, Melbourne,
#Fremantle, Adelaide, Geelong, Essendon, Hawthorn, Brisbane Lions, Sydney
#North Melbourne, Port Adelaide, Greater Western Sydney, St Kilda, West Coast
#Gold Coast, Carlton,
useful_stats <- afltables %>%
select(round, first_name, surname, playing_for, kicks, handballs, goals, time_on_ground) %>%
#mutate(games_played = ) %>%
group_by(playing_for, first_name, surname) %>%
summarise(
GP = sum(time_on_ground > 0),
avgD = mean(kicks+handballs),
pcnt_20_plus_D = ((sum((kicks + handballs)>=20))/(sum(time_on_ground > 0)))*100,
pcnt_25_plus_D = ((sum((kicks + handballs)>=25))/(sum(time_on_ground > 0)))*100,
avgG = mean(goals),
pcnt_1_plus_G = ((sum((goals)>=1))/(sum(time_on_ground > 0)))*100,
pcnt_1_plus_G = ((sum((goals)>=2))/(sum(time_on_ground > 0)))*100,
.groups = 'drop'
) %>%
arrange(playing_for, surname)
View(useful_stats)
afltables %>%
select(season, round, date, home_team, away_team, playing_for, first_name, surname, disposals) %>%
group_by(season, round, date, home_team, away_team, playing_for, first_name) %>%
summarise(
n = n(),
s_disp = sum(disposals)
) %>%
filter(n >= 2) %>% View()
afltables %>%
select(season, date, round, playing_for, opp, pq_2_g, pq_2_b, oq_2_g, oq_2_b, w_l) %>% distinct() %>%
mutate(
HT_p = pq_2_g*6 + pq_2_b,
HT_o = oq_2_g*6 + oq_2_b,
ratio = HT_p/HT_o
) %>%
filter(ratio >= 2, w_l %in% c('L','D')) %>% View()
afltables
lubridate::wday('2021/4/14', label= T) %>% as.character()
thu_stk_rich <- afltables %>%
select(season, date, playing_for, id, first_name, surname) %>%
filter(playing_for %in% c('Richmond', 'St Kilda')) %>%
mutate(day_of_week = as.character(lubridate::wday(date, label= T))) %>%
group_by(day_of_week, id, first_name, surname) %>%
summarise(
n = n(),
teams = paste(unique(playing_for), collapse = ', ')
) %>%
filter(day_of_week == 'Thu')
write.csv(thu_stk_rich, file = 'thu_stk_rich.csv', row.names = F)
afltables %>%
select(season, round, date, playing_for, id, first_name, surname, disposals, marks) %>%
filter(marks == 0, disposals >=1) %>% View()
afltables %>%
select(season, round, date, playing_for, opp, playing_for_score, opp_score) %>% distinct() %>%
mutate(
day_of_week = as.character(lubridate::wday(date, label= T)),
margin = playing_for_score-opp_score
) %>%
filter(day_of_week == 'Thu') %>% View()
frees_data <- afltables %>%
select(season, round, date, playing_for, frees_for, frees_against, playing_for_score, opp_score) %>%
group_by(season, round, date, playing_for) %>%
summarise(
sfrees_for = sum(frees_for),
sfrees_against = sum(frees_against),
playing_for_score = unique(playing_for_score),
opp_score = unique(opp_score),
.groups = 'drop'
) %>%
filter(
season > 2000,
sfrees_for > 0 & sfrees_against > 0
) %>%
mutate(
diff = sfrees_for - sfrees_against,
margin = playing_for_score - opp_score
)
View(frees_data)
library(ggplot2)
ggplot(frees_data) +
geom_point(aes(x = diff, y = margin))
frees_data %>%
filter(sfrees_for > playing_for_score) %>% View()
afltables %>%
select(season, round, date, playing_for, opp, playing_for_score, opp_score) %>%
distinct() %>%
mutate(margin = playing_for_score - opp_score) %>% View()
afltables %>%
select(season, round, date, playing_for, id, first_name, surname) %>%
group_by(season, round, date, first_name, surname) %>%
summarise(
n = length(unique(playing_for)),
teams = paste(unique(playing_for), collapse = ', ')
) %>%
filter(n != 1) %>%
group_by(first_name, surname) %>% count() %>%
View()
names(fryzigg_data)
fryzigg_data %>%
select(match_date, match_round, match_home_team, match_away_team, player_id, player_first_name, player_last_name, metres_gained) %>% View()
afltables %>%
select(season, round, date, playing_for, opp, pq_1_g, pq_1_b, oq_1_g, oq_1_b, w_l, playing_for_score, opp_score) %>% distinct() %>%
mutate(
margin = playing_for_score - opp_score,
p_q1 = pq_1_g*6 + pq_1_b,
o_q1 = oq_1_g*6 + oq_1_b,
q1_w_l = case_when(
p_q1 > o_q1 ~ "W",
p_q1 < o_q1 ~ "L",
T ~ 'D'
)
) %>%
filter(q1_w_l %in% c("L")) %>%
group_by(w_l) %>% summarise(n = n())
summarise(avg_marg = mean(margin))
# group_by(season, round) %>% count() %>% View()
|
d26de65825823e55ce5310a93ea992c6ee6d55e5 | 3d6c2b65707eee399fa06975acdc1965092da1d4 | /ui.R | 81eb65e52e1a825b0d6841461aad11045d694001 | [] | no_license | lv100magikarp/Statcalc | 98237c53ccca99fa2929ad69c154e579b519a1b5 | 7fa82ffa695dbacd27eed49b393a29e26c1fe8d0 | refs/heads/master | 2020-12-24T18:32:59.214715 | 2016-04-24T18:59:52 | 2016-04-24T18:59:52 | 56,988,251 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,456 | r | ui.R | library(shiny)
shinyUI(pageWithSidebar(
headerPanel('Summary statistics calculator'),
sidebarPanel(
width = 5,
h3('This is a calculator for some of the basic summary statistics'),
textInput(inputId='num',label='Enter your number(s):'),
actionButton(inputId='addnum',label='Input number'),
actionButton(inputId='calc',label='Calculate!'),
helpText('Note: You could enter a single number at a time or enter a vector',
'of numbers with individual numbers separated by a comma.',
'Press the "Input number" button to confirm to input your number(s),',
'and you may input new numbers afterwards.',
'All your inputted numbers will be concatenated into a single vector',
'and will show up on the main panel on the right.',
'Press the "Calculate!" button to obtain basic summary statistics of the inserted numbers.',
'Inputs that are not numbers will show up as NA and will not be taken into calculations.')
),
mainPanel(
width = 6,
h3('You have entered:'),
verbatimTextOutput('inputVector'),
h3('The mean is:'),
verbatimTextOutput('mean'),
h3('The quartiles are:'),
verbatimTextOutput('quart'),
h3('The variance is:'),
h6('At least 2 inputs are required for this'),
verbatimTextOutput('var')
)
))
|
76a197eebbaa1fb92dd051068feb0be11dddb3df | e3259d8f489b093b246fe2fd0c4fb6999d6466bf | /R/grafhistbox.comp.r | 48a43ac82dfa93b466db137989d57fa692d1214a | [] | no_license | Franvgls/CampR | 7baf0e8213993db85004b95d009bec33570c0407 | 48987b9f49ea492c043a5c5ec3b85eb76605b137 | refs/heads/master | 2023-09-04T00:13:54.220440 | 2023-08-22T14:20:40 | 2023-08-22T14:20:40 | 93,841,088 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 3,280 | r | grafhistbox.comp.r | #' Gráficas grafhistbox combinadas biomasa y peso
#'
#' Ver documentación grafhistbox
#' @param gr Grupo de la especie: 1 peces, 2 crustáceos 3 moluscos 4 equinodermos 5 invertebrados 6 para deshechos y otros. 9 incluye todos los grupos a excepción del 6
#' @param esp Codigo de la especie numérico o carácter con tres espacios. 999 para todas las especies del grupo
#' @param camps campañas de la serie de datos a representar en el gráfico de abundancias Demersales Nsh, Porcupine Psh, Arsa primavera As1 y Arsa otoño As2
#' @param dns Elige el origen de las bases de datos: Porcupine "Porc" o "Pnew", Cantábrico "Cant", Golfo de Cádiz "Arsa" (únicamente para sacar datos al IBTS, no gráficos)
#' @param cor.time Si T corrige las abundancias en función de la duración del lance
#' @param kg Si T el gráfico está en kgs, si F en gramos
#' @param ci.lev El intervalo de confianza a representar
#' @param DLS si T dibuja líneas en los valores medios de los últimos dos años y de los cinco previos
#' @param idi Nombre científico de la especie ("l") o nombre común ("e")
#' @param SE Si T dibuja las cajas representando el error estándar, si F no aparecen la cajas
#' @param es Si T ejes y unidades en español, si F en inglés
#' @param excl.sect Sectores a excluir como carácter, se pueden elegir tanto los sectores como estratos
#' @param sector Alternativa a excl.sect para un sólo sector. Si especificado como carácter solo toma el sector elegido
#' @param Nas Si F no representa las cajas en los sectores/estratos en los que algún sector o estrato tiene un sólo lance. Si T utiliza el valor del estrato y evita los NAs
#' @param ymax Valor máximo del eje de las y, tiene doble valor para regular biomasa y número
#' @param ti Permíte sólo el nombre de la especie que aparece en el gráfico superior y sale siempre en cursiva.
#' @param sub Añade un subtítulo con el valor que se le ponga si no F
#' @param mar Si se quiere dejar un margen ya establecido hacerlo igual a F
#' @param tline Si T dibuja una línea de tendencia a traves de un glm con los datos de abundancia. Gráficos evaluación MSFD.
#' @param years Si T saca los años como nombre de campaña en el eje de las equis en vez del nombre de campaña
#' @return Crea una gráfica doble de evolución de las abundancias en biomasa y número.
#' @seealso {\link{grafhistbox}}, {\link{grafhistbox.comp}}
#' @family abunds
#' @examples grafhistbox(1,45,Nsh[7:27],"Cant",DLS=T,es=FALSE,years=TRUE,tline=TRUE,ti=TRUE,sub=TRUE)
#' @export
grafhistbox.comp<-function(gr,esp,camps,dns="Porc",cor.time=TRUE,kg=TRUE,ci.lev=.8,DLS=F,idi="l",SE=TRUE,es=TRUE,sector=NA,
Nas=FALSE,excl.sect=NA,ymax=c(NA,NA),tline=FALSE,years=TRUE,ti=TRUE,mar=NA) {
op<- par(no.readonly = TRUE) # the whole list of settable par's.
par(mfrow=c(2,1))
grafhistbox(gr=gr,esp=esp,camps=camps,dns=dns,ind="p",cor.time=cor.time,kg=kg,DLS=DLS,es=es,sector=sector,ti=ti,Nas=Nas,excl.sect=excl.sect,
ymax=ymax[1],mar=c(4, 4, 2.5, 2.5) + 0.1,tline=tline,years=years,sub=TRUE)
grafhistbox(gr=gr,esp=esp,camps=camps,dns=dns,ind="n",cor.time=cor.time,kg=kg,DLS=DLS,es=es,sector=sector,ti=FALSE,Nas=Nas,excl.sect=excl.sect,
ymax=ymax[2],mar=c(4, 4, 1, 2.5) + 0.1,tline=tline,years=years,sub=TRUE)
par(op)
}
|
ea1a911241facc65e472d4648cbac9df23cbe270 | 9be4049c03dde12e6404aa7bc9e11a2b619998c3 | /CodeLinkR/R/parse_NAICS.R | 048748d48ded9bcf360c3750ca8799367556569d | [] | no_license | HaoxueChang/what-links-to-what | e4d53de7cdadc1e7d14488b69369c5da914a40a1 | 675a6897a09d8ae92627ff599b7804dbd85be128 | refs/heads/master | 2021-06-18T13:46:32.169343 | 2017-06-16T13:51:54 | 2017-06-16T13:51:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,871 | r | parse_NAICS.R | write_NAICS_to_RDF <- function(ws, codeAbbrev, version, dataDir, turtlePath){
baseURL = paste0("http://isdata.org/Classifications/",codeAbbrev,"/", version, "/")
ontStore = initialize_New_OntStore()
# add rdf:type skos:ConceptScheme
add_skos_concept_scheme(ontStore, substring(baseURL, 1, nchar(baseURL)-1))
for (i in c(1:nrow(ws))){
subjectURL = paste0(baseURL, ws$Code[i])
higherCodeURL = ""
loc = which(ws$Code == substr(ws$Code[i], 1, nchar(ws$Code[i])-1))
if (any(loc)){
higherCodeURL = paste0(baseURL, ws$Code[loc])
}
if (higherCodeURL != ""){
add_skos_narrower(ontStore, higherCodeURL, subjectURL)
add_skos_broader(ontStore, subjectURL, higherCodeURL)
}
add_skos_inScheme(ontStore, subjectURL, substring(baseURL, 1, nchar(baseURL)-1))
add_skos_concept_node(ontStore,
conceptId = subjectURL,
notation = ws$Code[i],
description = ws$Title[i],
prefLabel = ws$Code[i])
}
save.rdf(ontStore, paste0(turtlePath, "/", codeAbbrev, version, ".turtle"), format="TURTLE")
}
parse_NAICS <- function(codeAbbrev = "NAICS", turtlePath = "./data/Turtle"){
dir.create(turtlePath, recursive=TRUE)
versions = get_classification_versions(codeAbbrev)
for (item in versions){
dataDir = paste0("./data/",codeAbbrev,"/", item$version)
dir.create(dataDir, recursive=TRUE)
filePath = paste0(dataDir, "/", item$dataFile)
if (!file.exists(filePath)){
download.file(item$url, filePath)
}
wb <- loadWorkbook(paste0(dataDir, "/", item$dataFile))
ws = readWorksheet(wb, 1)
colnames(ws) = strsplit(item$colnames, ",")[[1]]
# remove NA rows (1st row)
ws = ws[which(!is.na(ws[,1])),]
write_NAICS_to_RDF(ws, codeAbbrev, item$version, dataDir, turtlePath)
}
}
|
11de2b988ced0106d49fdacacdcf3552dd9cf71e | 5a69a7fa1f5c177b09f3af68640a998c2328f9b7 | /man/as.nm.Rd | 30de63071cde5fdc82d5752e668ccde3e2c46f05 | [] | no_license | metrumresearchgroup/metrumrg | a2cecff3aa8f2a38f17e2d64f7aed6936a2e4844 | 2e5a5411de7768ee9d382b90b15a6b7fe9b210c9 | refs/heads/master | 2021-05-04T07:57:13.829508 | 2017-04-24T14:16:15 | 2017-04-24T14:16:15 | 70,732,920 | 2 | 4 | null | 2018-10-15T14:56:40 | 2016-10-12T19:11:55 | R | UTF-8 | R | false | false | 5,922 | rd | as.nm.Rd | \name{as.nm}
\alias{nm}
\alias{as.nm}
\alias{as.nm.default}
\alias{as.nm.data.frame}
%\alias{merge.nm}
\alias{read.nm}
\alias{write.nm}
\title{Create and Manipulate nm Objects}
\description{
Objects of class \dfn{nm} are intended to support analysis using the software
NONMEM ((c), Icon Development Solutions). \code{nm} gives a zero-row data.frame with
suitable columns and column classes (essentially, a template for dataset construction).
\code{as.nm} and \code{as.nm.data.frame} construct an
nm object from an existing object. The read and write methods are wrappers
for \file{.csv} equivalents. \code{read.nm} reconstitutes classes for flags,
DATETIME, and C.
%\code{merge.nm} coerces its result using \code{as.nm}, guaranteeing
%a consistent left hand object when using \code{Ops.keyed}.
}
\usage{
nm()
\method{as.nm}{data.frame}(x, ...)
read.nm(x,na.strings='.',as.is=TRUE,key=c('SUBJ','TIME','SEQ'),flags=character(0),...)
write.nm(x, file, na = '.', row.names = FALSE, quote = FALSE, ...)
}
\arguments{
\item{x}{data.frame or nm}
\item{na.strings}{passed to \code{read.csv}}
\item{as.is}{passed to \code{read.csv}}
\item{key}{passed to \code{as.keyed}}
\item{flags}{character vector naming colums to convert using \code{as.flag}}
\item{\dots}{extra arguments, ignored or passed to \code{write.csv}}
\item{file}{passed to \code{write.csv}}
\item{na}{passed to \code{write.csv}}
\item{row.names}{passed to \code{write.csv}}
\item{quote}{passed to \code{write.csv}}
}
\details{
\code{as.nm.data.frame} is the principal method that creates an nm classification. It
alone enforces all qualities of class nm. \code{read.nm} is the only other function
that creates an nm classification; use with caution, as it does not enforce all qualities.
Just before \code{as.nm.data.frame} returns, it calls \code{as.nm} on each of
its columns: a non-operation (\code{as.nm.default}) unless the user supplies specialized
classes and methods. Column-specific methods may make use of the passed argument
\code{data}, which contains the data set itself. Any enforced or conditional outputs
(see column summary) have the column name as the first member of the class vector.
\itemize{
\item \code{SUBJ} must be present and defined, even for commented records. ID is (re)calculated as \code{as.numeric(factor(SUBJ))}.
\item \code{C} (class: comment) will be created if not present.
\item \code{NA} \code{C} will be imputed \code{FALSE}.
\item Every active (non-commented) source record should define exactly one of \code{HOUR} or \code{DATETIME}.
\item \code{HOUR} is taken to represent relative accumulation of hours from arbitrary origin.
\item \code{DATETIME} is understood as seconds, coercible to \code{mDateTime}.
\item \code{TIME} is calculated from either \code{HOUR} or \code{DATETIME}.
\item Definition (or not) of \code{HOUR} vs. \code{DATETIME} should be constant within subject (for active records).
\item \code{SEQ} (class \code{flag}) will be created if not present.
\item \code{nm} will be keyed on \code{SUBJ}, \code{TIME}, and \code{SEQ}. \code{SEQ} determines sort order for rows with matching \code{TIME}.
\item Result will be sorted.
\item \code{TIME} will be relativized to earliest extant value, incl. those in comments.
\item \code{TAFD} (time after first non-commented dose), \code{TAD} (time since most recent non-commented dose), and \code{LDOS} (amount of most recent non-commented dose) will be calculated if \code{AMT} is present. \code{TAD} and \code{LDOS} are very literal: they return NA if no dose has been given yet, and will "remember" the time and amount of the most recent dose whether or not it is still conceptually relevant, given study design. If two doses are given at the same time, say in different compartments, LDOS reflects the first AMT with respect to sort order.
\item \code{TAD} will consider \code{ADDL} and \code{II} if present.
\item \code{NA} flags will be imputed as zero.
\item \code{MDV} (missing dependent value) will be calculated if \code{DV} is present, preserving non-NA MDV, if present.
\item resulting column order will lead with \code{C} followed by key columns.
}
Column summary:
\itemize{
\item required inputs: SUBJ; HOUR or DATETIME
\item optional inputs: AMT, ADDL, II, DV
\item enforced outputs: SUBJ, ID, C, TIME, SEQ
\item conditional outputs: TAFD, TAD, LDOS, MDV
}
}
\value{
\code{write.nm} is used for side effects. Others return an object with class
\code{c('nm','keyed','data.frame')}.
}
\references{\url{http://metrumrg.googlecode.com}}
\author{Tim Bergsma}
\note{
Assembly chains, such as \code{nm() + dose + samp | demo}, are no longer
supported. Nor are \code{as.moot} and \code{as.rigged}.
}
\seealso{
\itemize{
\item \code{\link{summary.nm}}
\item \code{\link{Ops.keyed}}
}
}
\examples{
\dontrun{metrumrgURL('example/project/script/assemble.pdf')}
dose <- data.frame(
SUBJ = rep(letters[1:3], each = 2),
HOUR = rep(c(0,20),3),
AMT = rep(c(40,60,80), each = 2)
)
dose <- as.keyed(dose,key=c('SUBJ','HOUR'))
samp <- data.frame(
SUBJ = rep(letters[1:3], each = 4),
HOUR = rep(c(0,10,20,30),3),
DV = signif(rnorm(12),2) + 2
)
samp <- as.keyed(samp,key=c('SUBJ','HOUR'))
demo <- data.frame(
SUBJ = letters[2:5],
RACE = c('asian','white','black','other'),
SEX = c('female','male','female','male'),
WT = c(75, 70, 73, 68)
)
demo <- as.keyed(demo,key=c('SUBJ'))
meds <- as.keyed(
data.frame(
SUBJ=c('a','c'),
HOUR=c(0,15),
STOP=c(10,25),
C3A4=as.flag(c(1,1))
),
key=c('SUBJ','HOUR')
)
nm()
#nm() + dose
as.nm(dose)
as.nm(dose + samp)
as.nm(dose + samp | demo) #as.nm executes once
meds
long <- deranged(meds,start='HOUR',stop='STOP')
long$EVID <- 2
as.nm( dose + samp + long)
data <- as.nm( aug(dose,EVID=1, SEQ=1) + aug(samp,EVID=0, SEQ=0) | demo)
summary(data,by=c('EVID','SEQ'))
\dontrun{index(data)}
}
\keyword{manip}
|
950967649c5c25827d080da709a86df4926c0a37 | c39e2142d4f72c76ca8a36fb592d6a74b7464c62 | /man/update_alpha.Rd | 99237aa17aa29c4960840b84db014c902be58699 | [] | no_license | oslerinhealth-releases/rewind | 5ad098bc45fc0c6e40dcb15d3b46a03aea301738 | 8ea4adc1e55de1876b86f39048ead692a1325514 | refs/heads/master | 2020-08-24T19:14:27.497533 | 2019-10-22T19:04:24 | 2019-10-22T19:04:26 | 216,093,606 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 974 | rd | update_alpha.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sample_mcmc.R
\name{update_alpha}
\alias{update_alpha}
\title{sample alpha - hyperparameter for latent state prevalences p_m}
\usage{
update_alpha(H_star, t, M, a = 1, b = 1, show_density = FALSE)
}
\arguments{
\item{H_star}{binary matrix of latent state profiles (row for clusters, column for
latent states)}
\item{t}{number of clusters}
\item{M}{latent state dimension}
\item{a, b}{hyperparameter for Beta distribution over reparameterized alpha.}
\item{show_density}{show the full conditional density of alpha
given other unknown parameters; \code{FALSE} by default.}
}
\value{
an updated alpha value (positive)
}
\description{
This function samples the parameters from a grid (this is used only for model
with pre-specified latent state dimension M);
NB: currently it uses uniform prior or Beta prior for the reparameterized
hyperparameter; check if this is can be replaced by Gamma.
}
|
8128cc6b1ce0aeca034d06358fe8449724cc1367 | 82dd388bbb6e26a26145d05024956159419e1efc | /codeFromBook/Chapter6/plotBeta.r | 3e720b6d0e46ece16c67734570536dd53d41b675 | [] | no_license | humanfactors/computational-modelling | 731a2f143d2795cffd9994700c29a9c78928da83 | ff4be7dcc416012bc83098c1cab3e85dcb321273 | refs/heads/master | 2020-03-16T15:02:23.775932 | 2019-08-05T22:53:58 | 2019-08-05T22:53:58 | 132,724,714 | 0 | 1 | null | 2019-08-05T22:54:00 | 2018-05-09T08:17:45 | R | UTF-8 | R | false | false | 817 | r | plotBeta.r | #plot some betas
curve(dbeta(x, 2, 4),ylim=c(0,6),ylab="Probability Density",las=1)
curve(dbeta(x, 8, 16),add=TRUE,lty="dashed")
legend("topright",c("Johnnie","Jane"), inset=.05,lty=c("solid","dashed"))
#the remaining lines are not listed in the book but perform some of the computations mentioned there
pbeta(.53,8,16)-pbeta(.13,8,16)
pbeta(.53,2,4)-pbeta(.13,2,4)
x11(7,7)
alpha <- beta <- 12
curve(dbeta(x, alpha, beta),ylim=c(0,40),ylab="Probability Density",las=1,lwd=3)
t<-c(12,100,1000)
i<-0
for (h in c(14,113,1130)){
i<-i+1
curve(dbeta(x, alpha+h, beta+t[i]),add=TRUE,lty=log10(t)+1)
print(c((alpha+h)/(alpha+h+beta+t[i]),h/(h+t[i])))
}
legend("topright",c("{14, 26}","{113, 213}", "{1130, 2130}"),
inset=.05,lty=c(2:4))
abline(v=0.5,col="red")
pbeta(.5,1130,1000)
pbeta(.5305164,1130,1000)
|
4e60d3bd94bdd124a1ba84f44ed4ac49b07bb9cc | 9de3b2b8b28f89cfb13723b6be99f157fc13a313 | /2_Functions/1_Aggregate_wrangle/Function_str_clean.R | 8e91bfec5abcbd66d4dd049d929a1990ce2bbe2a | [] | no_license | WWF-ConsEvidence/MPAMystery | 0e730dd4d0e39e6c44b36d5f9244a0bfa0ba319b | 6201c07950206a4eb92531ff5ebb9a30c4ec2de9 | refs/heads/master | 2023-06-22T04:39:12.209784 | 2021-07-20T17:53:51 | 2021-07-20T19:34:34 | 84,862,221 | 8 | 1 | null | 2019-07-24T08:21:16 | 2017-03-13T18:43:30 | R | UTF-8 | R | false | false | 362 | r | Function_str_clean.R |
# Function to remove all white space in string variables
trim <- function(x) gsub("^\\s+|\\s+$","",x)
# Function to clean string variables (lower case, remove punctuation)
str_clean <- function(strings) {
require(dplyr)
require(tm)
strings %>% tolower() %>% removePunctuation(preserve_intra_word_dashes = FALSE) %>% stripWhitespace() %>%
trim()
}
|
732b9c0f336fe32165ed443d8e447c00e3f51586 | 1b73390902afc90781b72e7cf49f08878ddd4a08 | /R/rtutor_events.r | b5e414ecf7b31f8ae88ea89112512fad5b78f74d | [] | no_license | skranz/RTutor2 | cf018a114a724a666672399e73b8da38c58bd522 | cb19b69a6d11d2f957dad72df56a52680c2ff353 | refs/heads/master | 2021-01-17T13:24:57.440277 | 2017-10-07T01:51:22 | 2017-10-07T01:51:22 | 51,748,016 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,325 | r | rtutor_events.r |
# Add javascript to deal with clicks on free html area,
# i.e. not on inputs, buttons, links or images
# can be used to proceed with slides
rtutorClickHandler = function(button.handler=TRUE, image.handler = TRUE, use.frame.click = TRUE,opts = rt.opts()) {
restore.point("rtutorClickHandler")
code = '$(document).on("click", function (e) {'
# Variable definitions
code = paste0(code,'
var tag = e.target.nodeName;
var eclass = e.target.className;
var pn = e.target.parentNode;
var gpn = pn.parentNode;
')
if (button.handler) {
eventId = "buttonHandlerEvent"
code = paste0(code,'
if (tag === "BUTTON") {
Shiny.onInputChange("',eventId,'", {eventId: "',eventId,'", id: e.target.id, tag: tag, nonce: Math.random(), pageX: e.pageX, pageY: e.pageY});
return;
} else {
var ptag = e.target.parentNode.nodeName;
if (ptag === "BUTTON" || ptag ==="BUTTON") {
Shiny.onInputChange("',eventId,'", {eventId: "',eventId,'", id: e.target.parentNode.id, tag: ptag, nonce: Math.random(), pageX: e.pageX, pageY: e.pageY});
return;
}
}
')
}
if (image.handler) {
imageEventId = "imageClickEvent"
code = paste0(code,'
if (tag === "IMG") {
var img = $(e.target);
var offset = img.offset();
var oimg = document.getElementById(img.attr("id"));
var xratio = oimg.naturalWidth / img.width();
var yratio = oimg.naturalHeight / img.height();
//alert("Image click: offset= "+JSON.stringify(offset));
var x = (e.pageX - offset.left)*xratio;
var y = (e.pageY - offset.top)*yratio;
Shiny.onInputChange("',imageEventId,'", {eventId: "',imageEventId,'", id: e.target.id, x: x, y: y, tag: tag, nonce: Math.random(), pageX: e.pageX, pageY: e.pageY});
return;
}
')
registerEvent("imageClickEvent", jscript="", overwrite=TRUE)
}
if (use.frame.click) {
# General no click handler
code = paste0(code,'
if (tag === "BUTTON" || tag === "IMG" || tag === "INPUT" || tag === "A") {
return;
}
if (pn.className === "radio" || pn.className === "checkbox") {
return;
}
if (gpn.className === "radio" || gpn.className === "checkbox") {
return;
}
')
# don't handle clicks from ACE editor
code = paste0(code,'
if (tag === "DIV") {
if (eclass === "ace_content" || eclass === "ace_scroller" || eclass === "ace_gutter" || pn.className === "ace_scroller" || pn.className === "ace_gutter" || gpn.className === "ace_gutter" || gpn.className === "ace_scroller") {
return;
}
}
')
# if not returned, register doc_click handler
code = paste0(code,'
Shiny.onInputChange("documentClickHandlerEvent", {id: e.target.id, tag: tag, class: eclass, pClass: pn.className, gpClass: gpn.className, nonce: Math.random(), pageX: e.pageX, pageY: e.pageY});
')
}
code = paste0(code,'
});
')
res = bottomScript(HTML(code))
registerEvent("documentClickHandlerEvent", jscript="", overwrite=TRUE)
registerEvent("buttonHandlerEvent", jscript="", overwrite=TRUE)
#classEventHandler("StopClickPropagation", function(...) {cat("\nStopClickPropagation clicked")}, stop.propagation = TRUE)
return(res)
}
|
91779418f2fb10ff5b1ed7927ce7a76a227b1ed2 | dc71e5bcc01f820eb2c4f5149a6fa7c01674ab6a | /process_outputs.R | 5c57a3bd7d7973fc9bd7aae2c5d82079ec560567 | [] | no_license | kevinvzandvoort/covid_svk | 9433027f6a180c9893f24c6dc1daf125eaf23546 | 5c3bc50ec9516286f164677095411541ed0fb017 | refs/heads/main | 2023-02-21T05:15:57.655028 | 2021-01-20T18:32:10 | 2021-01-20T18:32:10 | 314,217,980 | 5 | 2 | null | null | null | null | UTF-8 | R | false | false | 15,490 | r | process_outputs.R | library("qs")
library("data.table")
library("ggplot2")
library("patchwork")
setwd("~/workspace/covid_svk")
#' TODO: still need to clean this script
lshcols <- c("#000000", "#0D5257", "#00BF6F", "#00AEC7", "#A7A8AA", "#32006E", "#1E22AA", "#FE5000", "#FFB81C")
out_folder <- "./out/"
runs <- 400
#how many weeks to plot before the first test and after the last test?
plotdata_weeks <- c(3,2)
#' summarize output data
#' returns incidence and prevalence over time
processOutput <- function(data, plot_times, test_times, r, scen, test, compliance){
prevalence <- sapply(plot_times, function(x) data[status > 1 & infectious_at <= x & (recovered_at > x | recovered_at == -1), .N])
incidence <- sapply(plot_times, function(x) data[infectious_at == x, .N])
out <- rbindlist(list(
data.table(
time=plot_times, relative_time=plot_times - test_times[1],
value=prevalence, type="prevalence"
),
data.table(
time=plot_times, relative_time=plot_times - test_times[1],
value=incidence, type="incidence"
)
))[, run := r][, scen := scen][, test := test][, compliance := compliance]
return(out)
}
#' get observed summary model results
#' observed prevalence during test
#' return median and 95% interval
summary_data_obs <- rbindlist(lapply(1:runs, function(x){
if(!file.exists(sprintf("%s/run_%s.csv", out_folder, x))) return(NULL)
smrydata <- fread(sprintf("%s/run_%s.csv", out_folder, x))
smrydata[, prevalence := test_positive/test_attend][, actual_prevalence := infectious/popsize]
return(smrydata[, run := x])
}))[!is.na(prevalence), .(median=median(prevalence), low95=quantile(prevalence, 0.025), high95=quantile(prevalence, 0.975)), by=c("scenario","test","compliance")]
#' add actual model results
#' actual prevalence at time test would be implemented
#' for scenarios without testing (baseline, lockdown)
#' returns prevalence ratio between test 2 and 1 (t2t1) and test 3 and 1 (t3t1)
summary_data <- rbindlist(lapply(1:runs, function(x){
if(!file.exists(sprintf("%s/run_%s.csv", out_folder, x))) return(NULL)
smrydata <- fread(sprintf("%s/run_%s.csv", out_folder, x))
smrydata[, prevalence := test_positive/test_attend][, actual_prevalence := infectious/popsize][is.na(prevalence), prevalence := actual_prevalence]
rundata = dcast(smrydata, scenario+compliance~t_start, value.var = "prevalence")
colnames(rundata) <- c("scenario", "compliance", "test1", "test2", "test3")
return(melt(rundata[, t2t1 := test2/test1][, t3t1 := test3/test1][, -c("test1","test2","test3")],
measure.vars=c("t2t1","t3t1"))[, run := x])
}))[, .(median=median(value), low95=quantile(value, 0.025), high95=quantile(value, 0.975)), by=c("scenario","compliance","variable")]
#' add observed reduction between tests
summary_data <- rbindlist(
list(summary_data, data.table(
scenario=c("o","o"),
compliance=c(NA,NA),
variable=c("t2t1","t3t1"),
median=c(1-0.56, 1-0.82),
low95=c(1-0.58, 1-0.83),
high95=c(1-0.54, 1-0.81)
))
)
#' what scenarios are available
scenarios <- list(
list(name = "%s/run_%s_scen0_baseline.qs",
scen = 0, test = NA, compliance = NA),
list(name = "%s/run_%s_scen1_test1_compl_full.qs",
scen = 1, test = 1, compliance = "full"),
list(name = "%s/run_%s_scen1_test1_compl_full.qs",
scen = 1, test = 1, compliance = "full"),
list(name = "%s/run_%s_scen1_test2_compl_full.qs",
scen = 1, test = 2, compliance = "full"),
list(name = "%s/run_%s_scen1_test3_compl_full.qs",
scen = 1, test = 3, compliance = "full"),
list(name = "%s/run_%s_scen1_test1_compl_none.qs",
scen = 1, test = 1, compliance = "none"),
list(name = "%s/run_%s_scen1_test2_compl_none.qs",
scen = 1, test = 2, compliance = "none"),
list(name = "%s/run_%s_scen1_test3_compl_none.qs",
scen = 1, test = 3, compliance = "none"),
list(name = "%s/run_%s_scen2_test0.qs",
scen = 2, test = 0, compliance = NA),
list(name = "%s/run_%s_scen2_test1_compl_full.qs",
scen = 2, test = 1, compliance = "full"),
list(name = "%s/run_%s_scen2_test2_compl_full.qs",
scen = 2, test = 2, compliance = "full"),
list(name = "%s/run_%s_scen2_test3_compl_full.qs",
scen = 2, test = 3, compliance = "full"),
list(name = "%s/run_%s_scen2_test1_compl_none.qs",
scen = 2, test = 1, compliance = "none"),
list(name = "%s/run_%s_scen2_test2_compl_none.qs",
scen = 2, test = 2, compliance = "none"),
list(name = "%s/run_%s_scen2_test3_compl_none.qs",
scen = 2, test = 3, compliance = "none"),
list(name = "%s/run_%s_scen3_test3_compl_none.qs",
scen = 3, test = 3, compliance = "none")
)
#read output data files
rundata <- list()
for(r in 1:runs){
message(sprintf("run: %s/%s", r, runs))
if(!file.exists(sprintf("%s/run_%s.csv", out_folder, r))) next;
smrydata <- fread(sprintf("%s/run_%s.csv", out_folder, r))
test_times <- smrydata[scenario == 0, t_start]
plot_times <- (test_times[1]-plotdata_weeks[1]*7):(test_times[3]+plotdata_weeks[2]*7)
plotdata <- rbindlist(lapply(scenarios, function(x){
data <- qs::qread(sprintf(x[["name"]], out_folder, r))
out <- processOutput(data, plot_times, test_times, r, x[["scen"]], x[["test"]], x[["compliance"]])
}))
plotdata[, "popsize"] <- unique(smrydata$popsize)
rundata[[length(rundata)+1]] <- plotdata
}
plotdata <- rbindlist(rundata)
setorder(plotdata, test)
#colours to use in plot
testcols <- c(
"Baseline - No test" = lshcols[1],
"Lockdown (Re: 1) - No test" = lshcols[5],
"Lockdown (Re: 0.6) - No test" = lshcols[6],
"Only pilot round" = lshcols[2],
"Pilot and 1st round" = lshcols[3],
"Pilot, 1st, and 2nd round" = lshcols[4]
)
#observed prevalence data
observed_prevalence <- summary_data_obs[compliance == "full"]
observed_prevalence[test==1, relative_time := 0]
observed_prevalence[test==2, relative_time := 7]
observed_prevalence[test==3, relative_time := 14]
observed_prevalence[, scen := scenario]
#summarize data across runs
plotdata2 <- plotdata[compliance == "full" | is.na(compliance) | scen==3, .(
median=median(value/popsize*1000),
low95=quantile(value/popsize*1000, 0.025),
high95=quantile(value/popsize*1000, 0.975),
low50=quantile(value/popsize*1000, 0.25),
high50=quantile(value/popsize*1000, 0.75)
), by=c("relative_time", "type", "scen", "test", "compliance")]
plotdata2[scen==0, collab := "Baseline - No test"]
plotdata2[scen==2 & test==0, collab := "Lockdown (Re: 1) - No test"]
plotdata2[scen==3, collab := "Lockdown (Re: 0.6) - No test"]
plotdata2[scen!=3 & test==1, collab := "Only pilot round"]
plotdata2[scen!=3 & test==2, collab := "Pilot and 1st round"]
plotdata2[scen!=3 & test==3, collab := "Pilot, 1st, and 2nd round"]
plotdata2[, "collab"] <- factor(plotdata2[, collab], levels=names(testcols), labels=names(testcols))
#plot prevalence over time
#TODO ggplot is not plotting in correct order - currently specifying lines layer by layer
plot_prevtime <- ggplot(
data=NULL,
aes(x = relative_time, y = median, group=test)
)+facet_grid(
factor(
as.character(scen),
as.character(1:2),
c("No lockdown", "Lockdown")
)~paste0("Compliance household: ", compliance),
scales="free_y"
)+geom_vline(
data=data.table(
relative_time=c(0,7,14)
), aes(xintercept=relative_time)
)+geom_vline(
data=data.table(
relative_time=c(-7),
scen=2
), aes(xintercept=relative_time), linetype=2
)+geom_line(
data=plotdata2[type=="prevalence" & scen==3][, -c("compliance")][, test := 3][, scen := 2],
aes(colour=collab)
)+geom_ribbon(
data=plotdata2[type=="prevalence" & scen==3][, -c("compliance")][, test := 3][, scen := 2],
colour=NA, alpha=0.5,
aes(ymin=low95, ymax=high95, fill=collab)
)+geom_line(
data=plotdata2[type=="prevalence" & scen > 0 & scen < 3 & test == 3],
aes(colour=collab)
)+geom_ribbon(
data=plotdata2[type=="prevalence" & scen > 0 & scen < 3 & test == 3],
colour=NA, alpha=0.5,
aes(ymin=low95, ymax=high95, fill=collab)
)+geom_line(
data=plotdata2[type=="prevalence" & scen > 0 & scen < 3 & test == 2],
aes(colour=collab,fill=collab)
)+geom_ribbon(
data=plotdata2[type=="prevalence" & scen > 0 & scen < 3 & test == 2],
colour=NA, alpha=0.5,
aes(ymin=low95, ymax=high95, fill=collab)
)+geom_line(
data=plotdata2[type=="prevalence" & scen > 0 & scen < 3 & test == 1],
aes(colour=collab,fill=collab)
)+geom_ribbon(
data=plotdata2[type=="prevalence" & scen > 0 & scen < 3 & test == 1],
colour=NA, alpha=0.5,
aes(ymin=low95, ymax=high95, fill=collab)
)+geom_line(
data=plotdata2[type=="prevalence" & scen==2 & test == 0][, -c("compliance", "scen")][, test := 0][, scen := 2],
aes(colour=collab,fill=collab)
)+geom_ribbon(
data=plotdata2[type=="prevalence" & scen==2 & test == 0][, -c("compliance", "scen")][, test := 0][, scen := 2],
colour=NA, alpha=0.5,
aes(ymin=low95, ymax=high95, fill=collab)
)+geom_line(
data=plotdata2[type=="prevalence" & scen==0][, -c("compliance", "scen")][, test := 0][, scen := 1],
aes(colour=collab,fill=collab)
)+geom_ribbon(
data=plotdata2[type=="prevalence" & scen==0][, -c("compliance", "scen")][, test := 0][, scen := 1],
colour=NA, alpha=0.5,
aes(ymin=low95, ymax=high95, fill=collab)
)+geom_line(
data=plotdata2[type=="prevalence" & scen==0 & relative_time <= -7][, -c("compliance", "scen")][, test := 0][, scen := 2],
aes(colour=collab,fill=collab)
)+geom_ribbon(
data=plotdata2[type=="prevalence" & scen==0 & relative_time <= -7][, -c("compliance", "scen")][, test := 0][, scen := 2],
colour=NA, alpha=0.5,
aes(ymin=low95, ymax=high95, fill=collab)
)+geom_errorbar(
data = observed_prevalence[scen==1],
aes(x=relative_time, ymin=low95*1000, ymax=high95*1000),
colour=lshcols[8], width=1
)+geom_point(
data = observed_prevalence[scen==1],
aes(x=relative_time, y=median*1000),
fill=lshcols[8],
colour="#000000",
shape=23,
size=3
)+geom_errorbar(
data = observed_prevalence[scen==2],
aes(x=relative_time, ymin=low95*1000, ymax=high95*1000),
colour=lshcols[9], width=1
)+geom_point(
data = observed_prevalence[scen==2],
aes(x=relative_time, y=median*1000),
fill=lshcols[9],
colour="#000000",
shape=23,
size=3
)+theme_classic(
)+scale_colour_manual(
values=testcols
)+scale_fill_manual(
values=testcols
)+labs(
colour = "Mass tests",
fill = "Mass tests",
x = "time relative to pilot testing (days)",
y = "prevalence (per 1000)"
)+theme(legend.position = "bottom")
plot_inctime <- ggplot(
data=NULL,
aes(x = relative_time, y = median, group=test)
)+facet_grid(
factor(
as.character(scen),
as.character(1:2),
c("No lockdown", "Lockdown")
)~paste0("Compliance household: ", compliance),
scales="free_y"
)+geom_vline(
data=data.table(
relative_time=c(0,7,14)
), aes(xintercept=relative_time)
)+geom_vline(
data=data.table(
relative_time=c(-7),
scen=2
), aes(xintercept=relative_time), linetype=2
)+geom_line(
data=plotdata2[type=="incidence" & scen==3][, -c("compliance")][, test := 3][, scen := 2],
aes(colour=collab)
)+geom_ribbon(
data=plotdata2[type=="incidence" & scen==3][, -c("compliance")][, test := 3][, scen := 2],
colour=NA, alpha=0.5,
aes(ymin=low95, ymax=high95, fill=collab)
)+geom_line(
data=plotdata2[type=="incidence" & scen > 0 & scen < 3 & test == 3],
aes(colour=collab)
)+geom_ribbon(
data=plotdata2[type=="incidence" & scen > 0 & scen < 3 & test == 3],
colour=NA, alpha=0.5,
aes(ymin=low95, ymax=high95, fill=collab)
)+geom_line(
data=plotdata2[type=="incidence" & scen > 0 & scen < 3 & test == 2],
aes(colour=collab,fill=collab)
)+geom_ribbon(
data=plotdata2[type=="incidence" & scen > 0 & scen < 3 & test == 2],
colour=NA, alpha=0.5,
aes(ymin=low95, ymax=high95, fill=collab)
)+geom_line(
data=plotdata2[type=="incidence" & scen > 0 & scen < 3 & test == 1],
aes(colour=collab,fill=collab)
)+geom_ribbon(
data=plotdata2[type=="incidence" & scen > 0 & scen < 3 & test == 1],
colour=NA, alpha=0.5,
aes(ymin=low95, ymax=high95, fill=collab)
)+geom_line(
data=plotdata2[type=="incidence" & scen==2 & test == 0][, -c("compliance", "scen")][, test := 0][, scen := 2],
aes(colour=collab,fill=collab)
)+geom_ribbon(
data=plotdata2[type=="incidence" & scen==2 & test == 0][, -c("compliance", "scen")][, test := 0][, scen := 2],
colour=NA, alpha=0.5,
aes(ymin=low95, ymax=high95, fill=collab)
)+geom_line(
data=plotdata2[type=="incidence" & scen==0][, -c("compliance", "scen")][, test := 0][, scen := 1],
aes(colour=collab,fill=collab)
)+geom_ribbon(
data=plotdata2[type=="incidence" & scen==0][, -c("compliance", "scen")][, test := 0][, scen := 1],
colour=NA, alpha=0.5,
aes(ymin=low95, ymax=high95, fill=collab)
)+geom_line(
data=plotdata2[type=="incidence" & scen==0 & relative_time <= -7][, -c("compliance", "scen")][, test := 0][, scen := 2],
aes(colour=collab,fill=collab)
)+geom_ribbon(
data=plotdata2[type=="incidence" & scen==0 & relative_time <= -7][, -c("compliance", "scen")][, test := 0][, scen := 2],
colour=NA, alpha=0.5,
aes(ymin=low95, ymax=high95, fill=collab)
)+theme_classic(
)+scale_colour_manual(
values=testcols
)+scale_fill_manual(
values=testcols
)+labs(
colour = "Mass tests",
fill = "Mass tests",
x = "time relative to pilot testing (days)",
y = "incidence (per 1000)"
)+theme(legend.position = "bottom")
#colours for prevalence plot
prevcols = c(
"Observed" = lshcols[7],
"Baseline - No test" = lshcols[1],
"Lockdown (Re: 0.6) - No test" = lshcols[6],
"No lockdown - Mass testing" = lshcols[8],
"Lockdown (Re: 1)\n+ Mass-testing" = lshcols[9],
"Lockdown (Re: 1) - No test" = lshcols[5]
)
#plot effectiveness
summary_data2 <- summary_data[compliance != "none" | scenario %in% c(0,3,"o")]
summary_data2[scenario == 2 & compliance == "", scenario := "2o"]
scenario_labels <- rev(c(
"o" = "Observed",
"0" = "Baseline - No test",
"3" = "Lockdown (Re: 0.6) - No test",
"2o" = "Lockdown (Re: 1) - No test",
"1" = "No lockdown - Mass testing",
"2" = "Lockdown (Re: 1)\n+ Mass-testing"
))
plot_effectiveness <- ggplot(
summary_data2,
aes(
x=factor(
scenario,
names(scenario_labels),
scenario_labels
),
colour=factor(
scenario,
names(scenario_labels),
scenario_labels
),
y=median
)
)+geom_segment(
aes(
x=factor(
scenario,
names(scenario_labels),
scenario_labels
),
xend=factor(
scenario,
names(scenario_labels),
scenario_labels
),
y=low95, yend=high95
),
size=1
)+geom_point()+facet_grid(
.~factor(
variable,
c("t2t1","t3t1"),
c("Test round 1 vs pilot round", "Test round 2 vs test round 1")
)
)+theme_classic(
)+geom_hline(
yintercept=1,
size=0.25,
colour="#777777"
)+geom_hline(
data=summary_data2[scenario=="o"],
linetype=2,
aes(yintercept=median, colour=factor(
scenario,
names(scenario_labels),
scenario_labels
))
)+labs(
x="",
y="prevalence ratio"
)+scale_y_continuous(
trans="log10", breaks=c(0, 0.1, 0.2, 0.3, 0.5, 0.75, 1, 1.5, 2)
)+theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position="none")+coord_flip(
)+scale_colour_manual(values=prevcols)
(plot_effectiveness)/(plot_prevtime)+ plot_annotation(tag_levels = 'A')+plot_layout(
heights = c(2,3)
)
ggsave("./results20201227.png", units="mm", width=200, height=200)
plot_inctime
ggsave("./results_incidence20201227.png", units="mm", width=200, height=200)
|
c97712b79743821d14ded936e64977f30c16bd6d | 17e75933bd6fc146e60eddd4abcdda275ddb1128 | /man/ds08_IQ_and_Brain_Size.Rd | fc8cb3a57e1cdc12924b59f3d1f7468c873a434c | [] | no_license | ngay/kustats | ca6d65094c4147d2aede6211c323c5703933e6e3 | 7e699753597d0cde0e2eab7a209cd032d40bb2de | refs/heads/master | 2021-01-09T20:08:21.539585 | 2018-04-10T01:53:45 | 2018-04-10T01:53:45 | 63,873,690 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,000 | rd | ds08_IQ_and_Brain_Size.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ds08_IQ_and_Brain_Size.R
\name{ds08_IQ_and_Brain_Size}
\alias{ds08_IQ_and_Brain_Size}
\title{Data Set 8: IQ and Brain Size}
\format{A data frame with 20 rows and 9 variables}
\source{
Triola, M. 2018. Elementary Statistics, 13th ed. Boston, MA: Pearson.
}
\description{
The data are obtained from monozygotic (identical) twins. Data provided by M.J.Tramo,
W.C. loftus, T.A. Stukel, J.B. Weaver, M.S. Gazziniga. See "Brain Size, Head Size, and IQ
in Monozygotic Twins, " Neurology, Vol. 50.
\itemize{
\item PAIR identifies the set of twins
\item SEX is the gender of the subject (1 = male, 2 = female)
\item ORDER is the birth order
\item IQ is full-scale IQ
\item VOL is total brain volumne (cm^3)
\item AREA is total brain surface area (cm^2)
\item CCSA is corpus callsum (fissure connecting left and right crebral hemispheres)
surface area (cm^2)
\item CIRC is head circumference (cm)
\item WT is body weight (kg)
}
}
|
951d539f1e0798fd2f1243fd811c105ed1e11170 | 5351411609cb6532f704238a9150804c1fa29dd5 | /R/dsfcruesexample.R | 4fd54cf9bacf7f965b7e42bcbf67c15b98c99530 | [] | no_license | cran/ipptoolbox | feb124cfbc455352ba95f44c8bda65ead40ad956 | eb75273821367bff40eee09e9430c2797558aa2b | refs/heads/master | 2021-01-01T16:31:03.079564 | 2019-01-07T16:10:09 | 2019-01-07T16:10:09 | 17,718,916 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,238 | r | dsfcruesexample.R | #' @import evd triangle
#' @export
`dsfcruesexample` <-
function(){
#Load necessary packages (please install if missing)
#library(evd)
#library(triangle)
#Data on Q:
Qdonnes=c(3854, 1256, 1649, 1605, 341, 1149, 868, 1148, 1227, 1991, 1255,
1366, 1100, 1837, 351, 1084, 1924, 843, 2647, 1248, 2417, 1125,
903, 1462, 378, 1230, 1149, 1400, 2078, 1433, 917, 1530, 2442,
2151, 1909, 630, 2435, 1920, 1512, 1377, 3330, 1858, 1359, 714,
1528, 1035, 1026, 1127, 1839, 771, 1730, 1889, 3320, 352, 885,
759, 731, 1711, 1906, 1543, 1307, 1275, 2706, 582, 1260, 1331,
1283, 1348, 1048, 1348, 383, 1526, 789, 811, 1073, 965, 619,
3361, 523, 493, 424, 2017, 1958, 3192, 1556, 1169, 1511, 1515,
2491, 881, 846, 856, 1036, 1830, 1391, 1334, 1512, 1792, 136,
891, 635, 733, 758, 1368, 935, 1173, 547, 669, 331, 227, 2037,
3224, 1525, 766, 1575, 1695, 1235, 1454, 2595, 706, 1837, 1629,
1421, 2204, 956, 971, 1383, 541, 703, 2090, 800, 651, 1153, 704,
1771, 1433, 238, 122, 1306, 733, 793, 856, 1903, 1594, 740, 3044,
1128, 522, 642)
Zmdonnes=c(55.09, 55, 54.87, 54.28, 54.74, 55.48, 55.36, 55.39, 54.8,
55.18, 54.94, 54.42, 55.34, 55.3, 54.31, 55.57, 54.28, 55.49,
54.49, 55.11, 55.15, 54.4, 55.87, 55.63, 54.93, 55.61, 54.95,
55.38, 54.57)
Zvdonnes=c(50.39, 50.28, 50.23, 49.92, 50.51, 50.42, 50.16, 50.16, 49.76,
50.17, 50.71, 50.08, 49.95, 50.63, 49.51, 50.77, 49.98, 50.3,
50.1, 50.12, 50.54, 49.21, 50.55, 50.67, 50, 50.7, 50.27, 50.06,
49.49)
Qlimits=c(10,10000)
Kslimits=c(5,60)
Zvlimits=c(48,52.8)
Zmlimits=c(53.2,58)
#Define Pbox with imprecise Gumbel parameter a.
a=dsstruct(c(1000,1040,1));
b=558
#Sample 10000 points from the modified Gumbel distribution
Q=dsadf('qgumbel',10000,a,b)
#Cutting can be performed using Intersection rule
QlimitsBPA=dsstruct(c(Qlimits,1))
Q=dsdempstersrule(Q,QlimitsBPA)
#plot Q
dscdf(Q,xlab="Q",ylab="")
#plot Q versus data in a qq plot
dev.new()
dsqqplot(Q,Qdonnes)
#What is the Kolmogorov-Smirnov probability for the best distribution in Q?
p=dskstest(Qdonnes,Q)
print("K-S probability is:")
print(p)
#Quantify Ks, no distribution, no data. Estimations
#Mean: 30, Min: 5, Max: 60
#5% quantile at 20, 95% quantile at 40
Ks1=dsminmeanmax(1000,5,30,60)
Ks2=dsstruct(rbind(c(5,20,0.05),c(20,40,0.9),c(40,60,0.05)));
dev.new()
dscdf(Ks1,xlab="Ks")
dev.new()
dscdf(Ks2,xlab="Ks")
dev.new()
Ks=dsintersect(Ks1,Ks2)
dscdf(Ks,xlab="Ks",ylab="")
#Quantify Zv
#Laplace fitting from data
Zv=dslapconf(Zvdonnes,Zvlimits)
dev.new()
dscdf(Zv,xlab="Zv")
#Quantify Zm
#Kolmogorov-Smirnov 50% fitting from data
Zm=dsksconf(Zmdonnes,conf=0.5,Zmlimits)
dev.new()
dscdf(Zm,xlab="Zm")
#Define the function fcrue
fcrue=function(x,...){
Q=x[,1];
Ks=x[,2];
Zm=x[,3];
Zv=x[,4];
const=1;
length=5000;
B=300;
result=Zv+(pmax(Q,0)*const/(pmax(Ks,0)*B*sqrt((Zm - Zv)/length)))^0.6;
}
#Evaluate the function, propagate the uncertainty
#If function is nonmonotonous, choose 'dsopt' as internal optimizer (slow)
#If function is monotonously increasing, choose 'dsbound' (faster)
#If function is monotonous, but you don't know if increasing or decreasing, choose 'dsmonotonous' (medium)
#10000 samples
temp=dsevalmc(fcrue,list(Q,Ks,Zm,Zv),10000,dsmonotonous);
Zc=temp[[1]]
dev.new()
dscdf(Zc,xlab="Zc",ylab="",xrange=c(48,65))
print("Median")
print(dsconf(Zc,0.5))
print("Q99 with 95% Wilks bounds")
print(dsconf(Zc,0.99,0.95))
print("Bel/Pl(Zc<=55.5)")
print(dsbelpl(Zc,c(-Inf,55.5)))
print("Bel/Pl(Zc>=55.5)")
print(dsbelpl(Zc,c(55.5,Inf)))
print("Exp. value")
print(dsexpect(Zc))
print("Variance, standard deviation")
print(dsvariance(Zc))
print(sqrt(dsvariance(Zc)))
print("Aggregated width")
print(dsaggwidth(Zc))
#Do sensitivity analysis
sens=(dssensitivity(list(Q,Ks,Zm,Zv),c(1,2,3,4),fcrue,dsaggwidth,mcIT=20,pinch_samples=20,pinch_type='distribution'));
dev.new()
barplot(sens,beside=TRUE,names=list("Q","Ks","Zm","Zv"))
title("Sensitivity on aggregated width")
#Study with correlation between Zm and Zv
temp=dsevalmc(fcrue,list(Q,Ks,Zm,Zv),10000,dsmonotonous,corr=c(0,0,0,0,0,0.66));
Zccorr=temp[[1]]
dev.new()
dscdf(Zccorr,xlab="Zccorr",ylab="",xrange=c(48,65))
}
|
48c78163ba54d60103f9ce8602fcf12be31c33d4 | 9f4c069e7a174b333bf7b315853e0040eedc1eb5 | /man/Fallow.Postseason.Daily.ET.Calc.Rd | 869b8e8dbe8e8419d2adc700756bcc07b4b0addc | [] | no_license | amitghosh-ag/CropWatR | 6fe4ecbf7212773243e9f7413503407acc5724d7 | 90ce6155a54d0434b0e13064f194b71e962539de | refs/heads/master | 2020-05-07T08:35:53.860173 | 2019-04-09T09:58:09 | 2019-04-09T09:58:09 | 180,337,562 | 0 | 1 | null | 2019-04-09T09:56:53 | 2019-04-09T09:56:53 | null | UTF-8 | R | false | false | 17,202 | rd | Fallow.Postseason.Daily.ET.Calc.Rd | \name{Fallow.Postseason.Daily.ET.Calc}
\alias{Fallow.Postseason.Daily.ET.Calc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
Fallow.Postseason.Daily.ET.Calc(Croplayer, Overwrite = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Croplayer}{
%% ~~Describe \code{Croplayer} here~~
}
\item{Overwrite}{
%% ~~Describe \code{Overwrite} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (Croplayer, Overwrite = FALSE)
{
load(paste0(Intermediates, paste("Fallow.Saved", Croplayer,
"Rdata", sep = ".")))
Post.ETo <- Fallow.File[[2]]
Post.Precip <- Fallow.File[[4]]
Post.Few <- Fallow.File[[6]]
Post.ROi <- Fallow.File[[8]]
Qfc.minus.Qwp <- Fallow.File[[9]]
Post.Dei <- Fallow.File[[11]]
TAW <- Fallow.File[[12]]
TEW <- Fallow.File[[13]]
REW <- Fallow.File[[14]]
Post.Kr <- Post.Precip
Post.Ke <- Post.Precip
Post.Dei <- Post.Precip
Post.DPei <- Post.Precip
Post.Kcb.tot <- Post.Precip
Post.E <- Post.Precip
Post.Fw <- Post.Precip
Post.Dr <- Post.Precip
Post.DP <- Post.Precip
Post.Ks <- Post.Precip
Post.Kcb.tot <- Post.Precip
Post.Pval <- Post.Precip
Post.TAW <- Post.Precip
Post.RAW <- Post.Precip
Post.Kcb <- Post.Precip
if (file.exists(paste0(Intermediates, paste("KcMax.Fallow",
Croplayer, "Rdata", sep = "."))) == FALSE) {
KcMAX.fallow(Croplayer)
}
load(paste0(Intermediates, paste("KcMax.Fallow", Croplayer,
"Rdata", sep = ".")))
KcMax <- lapply(KcMax, function(x) x[, (grep("layer", names(x)))])
DaysRow <- lapply(Post.Precip, function(x) as.numeric(gsub("layer.",
"", names(x))))
Cuts <- lapply(DaysRow, function(x) which(diff(x) > 1))
Cuts <- sapply(Cuts, function(x) replace(x, length(x) ==
0, 0))
LengthCheck <- unlist(lapply(DaysRow, length))
CutCheck <- unlist(Cuts)
for (i in 1:length(KcMax)) {
if (length(CutCheck) == 0) {
KcMax[[i]] <- KcMax[[i]][1:length(Post.Precip[[i]])]
}
if (length(Cuts[[i]]) == 0) {
KcMax[[i]] <- KcMax[[i]][1:length(KcMax[[i]])]
}
else {
KcMax[[i]] <- KcMax[[i]][, Cuts[[i]]:length(KcMax[[i]])]
}
while (length(KcMax[[i]]) > length(Post.Precip[[i]])) {
KcMax[[i]] <- KcMax[[i]][, 1:length(KcMax[[i]]) -
1]
}
}
print("Post Season KcMax layer lengths equal?:")
print(all.equal(lapply(KcMax, length), lapply(Post.Precip,
length)))
Kcb <- 0.55
load(paste0(Intermediates, paste("Growing.Season", Croplayer,
"Precip_", "Rdata", sep = ".")))
Precip <- Growing.Season
rm(Growing.Season)
Qfc.minus.Qwp <- lapply(Precip, function(x) x$Qfc.minus.Qwp)
root.depth <- 0.1
TAW <- lapply(Qfc.minus.Qwp, function(x) 1000 * (x[] * root.depth))
TEW <- lapply(Precip, function(x) x$ave_TEW)
Dei <- TEW
REW <- lapply(Precip, function(x) x$ave_REW)
if (!file.exists(paste0(Intermediates, paste("Postseason_Deep.Percolation",
Croplayer, "Rdata", sep = "."))) | Overwrite == TRUE) {
Others <- c("switchgrass", "miscanthus", "idle_cropland",
"pasture_grass", "silage")
load("Vars.Rdata")
if (Croplayer \%in\% Vars) {
setwd(paste0(Path, "/CropWatR/Intermediates/"))
load(paste("Growing.Season_Root.Zone.Depletion",
Croplayer, "Rdata", sep = "."))
load(paste("Growing.Season_Deep.Percolation", Croplayer,
"Rdata", sep = "."))
load(paste("Growing.Season_Runoff", Croplayer, "Rdata",
sep = "."))
load(paste("Growing.Season_Soil.Evaporation", Croplayer,
"Rdata", sep = "."))
load(paste("Growing.Saved", Croplayer, "Rdata", sep = "."))
load(paste("Growing.Season_Soil.Water.Balance", Croplayer,
"Rdata", sep = "."))
DPe <- local(get(load(file = paste("Growing.Season.Root.Zone.Percolation.Loss",
Croplayer, "Rdata", sep = "."))))
load(file = paste("Growing.Season.Evaporation.Fractions",
Croplayer, "Rdata", sep = "."))
setwd(paste0(Path, "/CropWatR/Data"))
}
if (Croplayer \%in\% Others) {
setwd(paste0(Path, "/CropWatR/Intermediates/"))
load(paste("Growing.Season_Root.Zone.Depletion",
Croplayer, "Rdata", sep = "."))
load(paste("Growing.Season_Deep.Percolation", Croplayer,
"Rdata", sep = "."))
load(paste("Growing.Season_Runoff", Croplayer, "Rdata",
sep = "."))
load(paste("Growing.Season_Soil.Evaporation", Croplayer,
"Rdata", sep = "."))
load(paste("Growing.Saved", Croplayer, "Rdata", sep = "."))
load(paste("Growing.Season_Soil.Water.Balance", Croplayer,
"Rdata", sep = "."))
load(file = paste("Growing.Season.Root.Zone.Percolation.Loss",
Croplayer, "Rdata", sep = "."))
load(file = paste("Growing.Season.Evaporation.Fractions",
Croplayer, "Rdata", sep = "."))
setwd(paste0(Path, "/CropWatR/Data"))
}
ETo <- Growing.Files[[1]]
Precip <- Growing.Files[[2]]
ROi <- Growing.Files[[3]]
Irr <- Growing.Files[[4]]
Fw <- Growing.Files[[5]]
print("starting calculation of post season")
for (i in 1:length(Post.Precip)) {
for (j in 1:length(Post.Precip[[i]])) {
Kcb <- 0.75
if (j == 1) {
Post.Fw[[i]][, j] <- Few[[i]][, length(Few[[i]])]
Post.Kr[[i]][, j][De[[i]][, length(De[[i]])] >
REW[[i]]] <- (TEW[[i]][De[[i]][, length(De[[i]])] >
REW[[i]]] - De[[i]][, length(De[[i]])][De[[i]][,
length(De[[i]])] > REW[[i]]])/(TEW[[i]][De[[i]][,
length(De[[i]])] > REW[[i]]] - REW[[i]][De[[i]][,
length(De[[i]])] > REW[[i]]])
Post.Kr[[i]][, j][De[[i]][, length(De[[i]])] <=
REW[[i]]] <- 1
Post.Kr[[i]][, j][Post.Kr[[i]][, j] < 0] <- 0
Post.Ke[[i]][, j] <- pmin.int(Post.Kr[[i]][,
j] * (KcMax[[i]][, j] - Kcb), Post.Few[[i]][,
j] * KcMax[[i]][, j])
Post.Ke[[i]][, j][Post.Ke[[i]][, j] < 0] <- 0
Post.E[[i]][, j] <- Post.Ke[[i]][, j] * Post.ETo[[i]][,
j]
Post.DPei[[i]][, j] <- (Post.Precip[[i]][,
j] - Post.ROi[[i]][, j]) - De[[i]][, length(De[[i]])]
Post.DPei[[i]][, j][Post.DPei[[i]][, j] < 0] <- 0
Post.Dei[[i]][, j] <- De[[i]][, length(De[[i]])] -
(Post.Precip[[i]][, j] - Post.ROi[[i]][,
j]) + (Post.E[[i]][, j]/Post.Few[[i]][,
j]) + DPe[[i]][, length(DPe[[i]])]
Post.Dei[[i]][, j][Post.Dei[[i]][, j] < 0] <- 0
Post.Dei[[i]][, j][Post.Dei[[i]][, j] > TEW[[i]]] <- TEW[[i]][Post.Dei[[i]][,
j] > TEW[[i]]]
Post.Kcb[[i]][, j] <- (Kcb + Post.Ke[[i]][,
j]) * Post.ETo[[i]][, j]
Post.Kcb.tot[[i]][, j] <- (Kcb) * Post.ETo[[i]][,
j]
P.value <- 0.1
Post.Pval[[i]][, j] <- P.value + 0.02 * (5 -
(Post.Kcb.tot[[i]][, j]))
Post.Pval[[i]][, j][Post.Pval[[i]][, j] < 0.1] <- 0.1
Post.Pval[[i]][, j][Post.Pval[[i]][, j] > 0.8] <- 0.8
Root.depth <- 0.1 + 0.002 * j
Post.TAW[[i]][, j] <- TAW[[i]] * Root.depth
Post.RAW[[i]][, j] <- Post.Pval[[i]][, j] *
Post.TAW[[i]][, j]
Per.of.field.capacity <- 0.2
Post.Dr[[i]][, j] <- Post.TAW[[i]][, j] * Per.of.field.capacity
Post.Dr[[i]][, j] <- Post.Dr[[i]][, j] - (Post.Precip[[i]][,
j] - Post.ROi[[i]][, j]) + Post.Kcb.tot[[i]][,
j] + Post.DP[[i]][, j]
Post.Dr[[i]][, j][Post.Dr[[i]][, j] < 0] <- 0
Post.Dr[[i]][, j][Post.Dr[[i]][, j] > Post.TAW[[i]][,
j]] <- Post.TAW[[i]][, j][Post.Dr[[i]][,
j] > Post.TAW[[i]][, j]]
Post.Ks[[i]][, j][Post.Dr[[i]][, j] > Post.RAW[[i]][,
j]] <- ((Post.TAW[[i]][, j] - Post.Dr[[i]][,
j])[Post.Dr[[i]][, j] > Post.RAW[[i]][, j]])/((1 -
Post.Pval[[i]][, j][Post.Dr[[i]][, j] > Post.RAW[[i]][,
j]]) * Post.TAW[[i]][, j][Post.Dr[[i]][,
j] > Post.RAW[[i]][, j]])
Post.Ks[[i]][, j][Post.Dr[[i]][, j] <= Post.RAW[[i]][,
j]] <- 1
Post.DP[[i]][, j] <- (Post.Precip[[i]][, j] -
Post.ROi[[i]][, j]) - Post.Kcb.tot[[i]][,
j] - Dr[[i]][, length(Dr[[i]])]
Post.DP[[i]][, j][Post.Dr[[i]][, j] > 0] <- 0
Post.DP[[i]][, j][Post.DP[[i]][, j] < 0] <- 0
Post.Kcb.tot[[i]][, j] <- (Post.Ks[[i]][, j] *
Post.Kcb.tot[[i]][, j]) * Post.ETo[[i]][,
j]
Post.Kcb[[i]][, j] <- (Post.Ks[[i]][, j] *
Post.Kcb[[i]][, j] + Post.Ke[[i]][, j]) *
Post.ETo[[i]][, j]
Post.DPei[[i]][, j] <- (Post.Precip[[i]][,
j] - Post.ROi[[i]][, j]) - De[[i]][, length(De[[i]])]
Post.DPei[[i]][, j][Post.DPei[[i]][, j] < 0] <- 0
}
else {
Kcb <- Kcb - 0.003 * j
Kcb[Kcb < 0.005] <- 0.005
Post.Fw[[i]][, j] <- Post.Few[[i]][, j - 1]
Post.Few[[i]][, j] <- pmin.int(Post.Few[[i]][,
j], Post.Fw[[i]][, j])
Post.Kr[[i]][, j][Post.Dei[[i]][, (j - 1)] >
REW[[i]]] <- (TEW[[i]][Post.Dei[[i]][, (j -
1)] > REW[[i]]] - Post.Dei[[i]][, (j - 1)][Post.Dei[[i]][,
(j - 1)] > REW[[i]]])/(TEW[[i]][Post.Dei[[i]][,
(j - 1)] > REW[[i]]] - REW[[i]][Post.Dei[[i]][,
(j - 1)] > REW[[i]]])
Post.Kr[[i]][, j][Post.Dei[[i]][, (j - 1)] <=
REW[[i]]] <- 1
Post.Kr[[i]][, j][Post.Kr[[i]][, j] < 0] <- 0
Post.Ke[[i]][, j] <- pmin.int(Post.Kr[[i]][,
j] * (KcMax[[i]][, j] - Kcb), Post.Few[[i]][,
j] * KcMax[[i]][, j])
Post.Ke[[i]][, j][Post.Ke[[i]][, j] < 0] <- 0
Post.E[[i]][, j] <- Post.Ke[[i]][, j] * Post.ETo[[i]][,
j]
if (length(Post.E[[i]][, j][Post.E[[i]][, j] >
5]) > 0) {
print("Evaporation triggered:")
print("day col:")
print(j)
print("State code")
print(names(Post.Precip[i]))
print("Evap profile")
print(Post.E[[i]][, j][Post.E[[i]][, j] >
5])
print("ETo profile")
print(Post.ETo[[i]][, j][Post.E[[i]][, j] >
5])
print("Ke profile")
print(Post.Ke[[i]][, j][Post.E[[i]][, j] >
5])
}
Post.DPei[[i]][, j] <- (Post.Precip[[i]][,
j] - Post.ROi[[i]][, j]) - Post.Dei[[i]][,
(j - 1)]
Post.DPei[[i]][, j][Post.DPei[[i]][, j] < 0] <- 0
Post.Dei[[i]][, j] <- Post.Dei[[i]][, (j -
1)] - (Post.Precip[[i]][, j] - Post.ROi[[i]][,
j]) + (Post.E[[i]][, j]/Post.Few[[i]][, j]) +
Post.DPei[[i]][, j]
Post.Dei[[i]][, j][Post.Dei[[i]][, j] < 0] <- 0
Post.Dei[[i]][, j][Post.Dei[[i]][, j] > TEW[[i]]] <- TEW[[i]][Post.Dei[[i]][,
j] > TEW[[i]]]
Post.Kcb[[i]][, j] <- (Kcb + Post.Ke[[i]][,
j]) * Post.ETo[[i]][, j]
Post.Kcb.tot[[i]][, j] <- (Kcb) * Post.ETo[[i]][,
j]
P.value <- 0.05
Post.Pval[[i]][, j] <- P.value + 0.04 * (5 -
(Post.Kcb.tot[[i]][, j]))
Post.Pval[[i]][, j][Post.Pval[[i]][, j] < 0.1] <- 0.1
Post.Pval[[i]][, j][Post.Pval[[i]][, j] > 0.8] <- 0.8
Root.depth <- 0.05 + 0.002 * j
Post.TAW[[i]][, j] <- TAW[[i]] * Root.depth
Post.RAW[[i]][, j] <- Post.Pval[[i]][, j] *
Post.TAW[[i]][, j]
Post.Dr[[i]][, j] <- Post.Dr[[i]][, (j - 1)] -
(Post.Precip[[i]][, j] - Post.ROi[[i]][,
j]) + Post.Kcb.tot[[i]][, j] + Post.DP[[i]][,
(j - 1)]
Post.Dr[[i]][, j][Post.Dr[[i]][, j] < 0] <- 0
Post.Dr[[i]][, j][Post.Dr[[i]][, j] > Post.TAW[[i]][,
j]] <- Post.TAW[[i]][, j][Post.Dr[[i]][,
j] > Post.TAW[[i]][, j]]
Post.Ks[[i]][, j][Post.Dr[[i]][, j] > Post.RAW[[i]][,
j]] <- ((Post.TAW[[i]][, j] - Post.Dr[[i]][,
j])[Post.Dr[[i]][, j] > Post.RAW[[i]][, j]])/((1 -
Post.Pval[[i]][, j][Post.Dr[[i]][, j] > Post.RAW[[i]][,
j]]) * Post.TAW[[i]][, j][Post.Dr[[i]][,
j] > Post.RAW[[i]][, j]])
Post.Ks[[i]][, j][Post.Dr[[i]][, j] <= Post.RAW[[i]][,
j]] <- 1
Post.DP[[i]][, j] <- (Post.Precip[[i]][, j] -
Post.ROi[[i]][, j]) - Post.Kcb.tot[[i]][,
j] - Post.Dr[[i]][, j - 1]
Post.DP[[i]][, j][Post.Dr[[i]][, j] > 0] <- 0
Post.DP[[i]][, j][Post.DP[[i]][, j] < 0] <- 0
Post.Kcb.tot[[i]][, j] <- (Post.Ks[[i]][, j] *
Post.Kcb.tot[[i]][, j]) * Post.ETo[[i]][,
j]
Post.Kcb[[i]][, j] <- (Post.Ks[[i]][, j] *
Post.Kcb[[i]][, j] + Post.Ke[[i]][, j]) *
Post.ETo[[i]][, j]
Post.DPei[[i]][, j] <- (Post.Precip[[i]][,
j] - Post.ROi[[i]][, j]) - Post.Dei[[i]][,
j - 1]
Post.DPei[[i]][, j][Post.DPei[[i]][, j] < 0] <- 0
print(mean(Post.E[[i]][, j], na.rm = TRUE))
print(mean(Post.Kcb.tot[[i]][, j], na.rm = TRUE))
}
}
}
print("Calculation of Postseason daily soil water balance, deep percolation, and evaporation complete")
setwd(paste0(Path, "/CropWatR/Intermediates/"))
save(Post.Dei, file = paste("Postseason_Soil.Water.Balance",
Croplayer, "Rdata", sep = "."))
save(Post.DP, file = paste("Postseason_Deep.Percolation",
Croplayer, "Rdata", sep = "."))
save(Post.ROi, file = paste("Postseason_Runoff", Croplayer,
"Rdata", sep = "."))
Post.KeETo <- Post.E
save(Post.KeETo, file = paste("Postseason_Soil.Evaporation",
Croplayer, "Rdata", sep = "."))
save(Post.Kcb.tot, file = paste("Postseason_Weed.Transpiration",
Croplayer, "Rdata", sep = "."))
setwd(paste0(Path, "/CropWatR/Data"))
print("Postseason files saved")
}
if (file.exists(paste0(Intermediates, paste("Postseason_Deep.Percolation",
Croplayer, "Rdata", sep = "."))) == TRUE && Overwrite ==
FALSE) {
print(paste("Post Season already estimated for", Croplayer))
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
bbc5ff057ca45ffcf0f52476670f8828b53b9e9a | e9ac96c253d4d49e7253783ce51f4c03f990f93c | /src/sp_final_ensemble/submissions/sp_01_splsda.R | d0ebbf5cb46ce7833d9585cac64394874d40459e | [] | no_license | zztopper/lung_cancer_ds_bowl | 730cc893fd23a69c70fecb6e2658eaa674958293 | 5dd769af04089fe117e7d53c0dbbf46b50ff4799 | refs/heads/master | 2020-06-20T14:47:28.022422 | 2017-09-23T11:43:49 | 2017-09-23T11:43:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,806 | r | sp_01_splsda.R | library(data.table)
library(pls)
library(spls)
# library(mixOmics)
library(plyr)
library(dplyr)
library(caret)
# library(mlr)
library(Metrics)
set.seed(1)
# submission df
# patient format is bad - will be improved
df_submit <- read.csv("../data/stage1_sample_submission.csv", stringsAsFactors = FALSE)
colnames(df_submit) <- c("patientid", "cancer")
# labels
df_cancer <- read.csv("../data/stage1_labels.csv", stringsAsFactors = FALSE)
colnames(df_cancer) <- c("patientid", "cancer")
# df_nodule <- fread("../data/output_dl_example_augmented.csv")
df_nodule <- fread("../data/nodules_v06_score07_augmented.csv")
df_nodule[, `Unnamed: 0` := NULL]
df_nodule$patientid <- gsub("(.+)(dsb_|luna_)([[:alnum:]]+)(\\.npz)", "\\3", df_nodule$patientid)
# aggregate patients
df_nodule %<>%
group_by(patientid) %>%
summarise_each(funs(length, min, median, mean, max, sd)) %>%
as.data.table
# any missing patient?
sum(df_submit$patientid %in% df_nodule$patientid)
dim(df_submit)
# background proportion for missing patient imputation
NA_prop <- mean(df_cancer$cancer)
df_submit$cancer <- NA_prop
# remove features that will have no variance
col_length <- grep(".+_length$", colnames(df_nodule), value = TRUE)
df_nodule$n_patches <- df_nodule[[col_length[1]]]
df_nodule[, (col_length) := NULL]
df_nodule[, c("10_lungmask_min", "10_lungmask_median", "10_lungmask_max", "10_lungmask_sd") := NULL]
# some sd can be na: collapse them to 0
sum(is.na(df_nodule))
df_nodule[is.na(df_nodule)] <- 0
# save features
write.csv(df_nodule, file = "submissions/sp_01_features.csv", row.names = F, quote = F)
# inner join, add cancer column
df_model <- df_nodule[df_cancer, on = "patientid", nomatch = 0]
#df_model$cancer <- as.factor(ifelse(df_model$cancer, "yes", "no"))
# Split train-test
set.seed(2)
inTraining <- caret::createDataPartition(df_model$cancer, p = .85, list = FALSE)
# lapply(row_train, length)
training <- df_model[ inTraining, !("patientid"), with = FALSE]
testing <- df_model[-inTraining, !("patientid"), with = FALSE]
# To assess performance, cv in train
set.seed(3)
mod_splscv <- spls::cv.splsda(
x = as.matrix(training[, !"cancer"]),
y = training$cancer,
K = 1:8,
eta = seq(from = .6, to = .95, by = .05),
classifier = "logistic",
n.core = 4
)
mod_spls <- spls::splsda(
x = as.matrix(training[, !"cancer"]),
y = training$cancer,
K = mod_splscv$K.opt,
eta = mod_splscv$eta.opt,
classifier = "logistic")
mod_spls
# predict test set
ypred <- spls::predict.splsda(
mod_spls,
testing[, !"cancer"],
fit.type = "response"
)
# quality metrics
Metrics::logLoss(testing$cancer, ypred)
Metrics::auc(testing$cancer, ypred)
# Final model
# Train with all the samples (cv for param optim)
set.seed(4)
mod_splscv_final <- spls::cv.splsda(
x = as.matrix(df_model[, !c("cancer", "patientid")]),
y = df_model$cancer,
K = 1:8,
eta = seq(from = .6, to = .95, by = .05),
classifier = "logistic",
n.core = 4
)
mod_spls_final <- spls::splsda(
x = as.matrix(df_model[, !c("cancer", "patientid")]),
y = df_model$cancer,
K = mod_splscv_final$K.opt,
eta = mod_splscv_final$eta.opt,
classifier = "logistic")
mod_spls_final
# save results
df_eval <- df_nodule[patientid %in% df_submit$patientid]
pred_out <- spls::predict.splsda(
mod_spls_final,
newx = df_eval[, !"patientid"],
fit.type = "response"
)
names(pred_out) <- df_eval$patientid
pat_na <- setdiff(df_submit$patientid, df_eval$patientid)
v_na <- setNames(rep(NA_prop, length(pat_na)), pat_na)
v_out <- c(pred_out, v_na)
df_out <- data.frame(
id = names(v_out),
cancer = v_out,
stringsAsFactors = FALSE
)
write.csv(df_out, file = "submissions/sp_01_splsda.csv", row.names = FALSE, quote = FALSE)
# SANITY CHECKS and stuff
#
# x <- read.csv("~/Downloads/05_submission.csv", stringsAsFactors = FALSE)
# all(df_out$id %in% x$id)
# all(dim(df_out) == dim(x))
#
#
#
# # See used features when pls is forced to sparsify
# set.seed(3)
# mod_splscv <- spls::cv.splsda(
# x = as.matrix(training[, !"cancer"]),
# y = training$cancer,
# K = 1:8,
# eta = .9,
# classifier = "logistic",
# n.core = 4
# )
# mod_spls <- spls::splsda(
# x = as.matrix(training[, !"cancer"]),
# y = training$cancer,
# K = mod_splscv$K.opt,
# eta = mod_splscv$eta.opt,
# classifier = "logistic")
# mod_spls
#
# ypred <- spls::predict.splsda(
# mod_spls,
# testing[, !"cancer"],
# fit.type = "response"
# )
#
# Metrics::logLoss(testing$cancer, ypred)
# Metrics::auc(testing$cancer, ypred)
# mixomics
#
# PETA!
#
# library(mixOmics)
#
# mo_splsda <- mixOmics::tune.splsda(
# X = as.matrix(training[, !"cancer"]),
# Y = ifelse(training$cancer == "yes", 1, 0),
# ncomp = 3,
# test.keepX = c(5, 10),
# folds = 3,
# progressBar = TRUE)
|
3ae494a9cae9ee37466e7d612dd8d69a8c9f8aa2 | 570285c78d1be229cedb297004ff0cef4b9b849f | /R/case_anarbe.R | cc347c94a3a9a339310ffd271b0eb6823784ab96 | [] | no_license | Yuanchao-Xu/hyfo | d2a7067fcea4d119bf8e269bc69d74bf0e7895d7 | 1895ea5dd0e8819f26f5a8b0a1bd045e2c83b41c | refs/heads/master | 2023-08-18T03:12:27.278809 | 2023-08-15T15:36:14 | 2023-08-15T15:36:14 | 37,417,362 | 18 | 5 | null | null | null | null | UTF-8 | R | false | false | 12,790 | r | case_anarbe.R | #' Collect data from csv for Anarbe case.
#'
#' Collect data from the gauging stations in spain, catchement Anarbe
#'
#' @param folderName A string showing the path of the folder holding different csv files.
#' @param output A boolean showing whether the output is given, default is T.
#' @return The collected data from different csv files.
#' @examples
#'
#' #use internal data as an example.
#' file <- system.file("extdata", "1999.csv", package = "hyfo")
#' folder <- strsplit(file, '1999')[[1]][1]
#' a <- collectData_csv_anarbe(folder)
#'
#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/
#'
#' @references
#'
#' \itemize{
#' \item http://meteo.navarra.es/estaciones/mapadeestaciones.cfm
#' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for
#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.
#' }
#'
#' @source http://meteo.navarra.es/estaciones/mapadeestaciones.cfm
#' @export
#' @importFrom utils tail
#' @importFrom data.table rbindlist
collectData_csv_anarbe <- function(folderName, output = TRUE){
fileNames <- list.files(folderName, pattern='*.csv', full.names = TRUE)
data <- lapply(fileNames, readColumn_csv_anarbe)
data <- rbindlist(data)
data <- data[, 1:2]
# cus the special structure of data.tables, here should be data[[1]], instead of data[, 1]
data[, 1] <- as.Date(data[[1]], format = '%d/%m/%Y')
#newFileName <- file.choose(new = T)
#write.table(data_new,file=newFileName,row.names = F, col.names = F,sep=',')
a <- unlist(strsplit(folderName, '\\\\|/'))
tarName <- tail(a, 2)[1]
colnames(data) <- c('Date', tarName)
if (output) return(data)
}
readColumn_csv_anarbe <- function(fileName){
data <- read.csv(fileName, skip = 4)
endIndex <- which(data == '', arr.ind = TRUE)[1]-1
data <- data[1:endIndex, ]
if (!is.null(levels(data[, 2]))) {
data[, 2] <- as.numeric(levels((data[, 2])))[data[, 2]]
}
colnames(data) <- c('Date', 'target')
message(fileName)
return(data)
}
#' Collect data from different excel files
#'
#' @param folderName A string showing the folder path.
#' @param keyword A string showing the extracted column, e.g., waterLevel, waterBalance.
#' @param output A boolean showing whether the output is given.
#' @return The collected data from different excel files.
#' @export
#' @references
#'
#' \itemize{
#' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for
#' Statistical Computing, Vienna, Austria. URL http://www.R-project.org/.
#' }
# @importFrom utils write.table
collectData_excel_anarbe <- function(folderName, keyword = NULL, output = TRUE){
message('In order to make "hyfo" easier to be installed, this part is commented,
check original R file in your computer or go to
https://github.com/Yuanchao-Xu/hyfo/blob/master/R/collectData_excel.R
for ideas.')
# newFileName <- file.choose(new = TRUE)
# message ('new file should be located a different location than the excel folder,
# in order to avoid error.
# At least 2 excels should be in the folder\n')
#
# message ('this function only applies to strange spain dem operation record file, and this strange file changes
# its format in the middle of the record. For other applications, some tiny changes needs to be made.')
# if (is.null(keyword)) stop('key word is needed, e.g."waterLevel".')
#
# fileNames <- list.files(folderName, pattern = '*.xls', full.names = TRUE)
# data <- lapply(fileNames, FUN = readColumn_excel_anarbe, keyword = keyword)
# checkBind(data, 'rbind')
# data <- do.call('rbind', data)
#
# data_new <- data.frame(data)
#
# data_new <- data_new[order(data_new[, 1]), ]
#
#
# startDate <- data_new[1, 1]
# endDate <- data_new[length(data_new[, 1]), 1]
#
# Date <- as.factor(seq(startDate, endDate, by = 1))
#
# if (length(Date) != length(data_new[, 1])) stop('check if the excel files are continuous')
#
# colnames(data_new) <- c('Date', keyword)
#
# write.table(data_new, file = newFileName,
# row.names = FALSE, col.names = TRUE, sep = ',')
# if(output == TRUE) return(data_new)
}
#
# @importFrom xlsx read.xlsx
# readTable_excel_anarbe <- function(fileName){
#
# index <- tail(strsplit(fileName, '\\.|\\ ')[[1]], 3)
# raw_year <- index[1]
# raw_mon <- index[2]
#
# raw <- read.xlsx(fileName, sheetName='A')
# startRow <- which(raw == 'COTA', arr.ind = TRUE)[1]+4
# startCol <- which(raw == 'COTA',arr.ind = TRUE)[2]-1
# stopRow <- which(raw =='TOTAL',arr.ind = TRUE)[1]-1
# stopCol1 <- startCol + 17
# stopCol2 <- which(raw == 'SUPERFICIE', arr.ind = TRUE)[2]
# data <- cbind(raw[startRow:stopRow,startCol:stopCol1], raw[startRow:stopRow,stopCol2])
#
#
# yearIndex <- rep(raw_year, stopRow-startRow+1)
# monIndex <- rep(raw_mon, stopRow-startRow+1)
#
# data <- cbind(yearIndex, monIndex, data)
# return(data)
# }
# #
# @importFrom utils tail
# readColumn_excel_anarbe <- function(fileName, keyword = NULL){
#
# index <- tail(strsplit(fileName, '\\.|\\ ')[[1]],3)
# year <- as.numeric(index[1])
# mon <- as.numeric(index[2])
#
# if (year == 99) {
# year = year + 1900
# } else year = year + 2000
#
# word = c('COTA', 'Cota\n(m)', 'TOTAL', ' TOTAL')
#
# if (keyword == 'waterLevel') {
# searchWord <- c('COTA', 'Cota\n(m)')
# } else if (keyword == 'discharge_ERE') {
# searchWord <- c('AF.ERE-', 'Caudal\n(m??/s)')
# } else if (keyword == 'waterBalance') {
# searchWord <- c('INCREMENTO', 'al Canal Bajo', 'AFORO',
# 'Variaci??n\nvolumen embalsado')
# } else if (keyword == 'surfaceArea') {
# searchWord <- c('SUPERFICIE', 'SUPERFICIE')
# } else if (keyword == 'volume') {
# searchWord <- c('EMBALSADO', 'Volumen\n(m????)')
# }
#
#
# if (year == 1999 | year < 2009 | (year == 2009 & mon < 5)) {
# raw <- xlsx::read.xlsx(fileName, sheetName = 'A')
# startIndex <- which(raw == word[1], arr.ind = TRUE)
# endIndex <- which(raw == word[3], arr.ind = TRUE)
# startRow <- startIndex[1]+4
# endRow <- endIndex[1]-1
#
# dayCol <- endIndex[2]
# day <- raw[startRow:endRow, dayCol]
#
# targetCol <- which(raw == searchWord[1], arr.ind = TRUE)[2]
#
# if (is.na(targetCol)) stop(sprintf('capture nothing in %s', fileName))
#
# if (keyword == 'waterBalance') {
# targetStart <- targetCol
# targetEnd <- which(raw == searchWord[3], arr.ind = TRUE)[2]
# a <- raw[startRow:endRow, targetStart:targetEnd]
# a <- sapply(a, function(x) as.numeric(levels(x)[x]))
#
# if (year == 1999 & mon == 4) {
#
# target <- data.frame(a[, 2] * 86.4, a[, 5] * 86.4, rep(NA, dim(a)[1]), a[, 6] * 86.4,
# a[, 4] * 86.4, a[, 11] * 86.4, a[, 3], a[, 7], rep(NA, dim(a)[1]), a[, 1])
# } else {
# target <- data.frame(a[, 2] * 86.4, a[, 5] * 86.4, a[, 6] * 86.4, a[, 7] * 86.4,
# a[, 4] * 86.4, a[, 12] * 86.4, a[, 3], a[, 8], rep(NA, dim(a)[1]), a[, 1])
# }
#
# } else {
# target <- raw[startRow:endRow, targetCol]
# if (keyword == 'discharge_ERE') target <- as.numeric(levels(target))[target]/1000
# }
#
# } else {
# raw <- read.xlsx(fileName,sheetName = 'parte del embalse')
# startIndex <- which(raw == word[2], arr.ind = TRUE)
# endIndex <- which(raw == word[4], arr.ind = TRUE)
# startRow <- startIndex[1]+1
# endRow <- endIndex[1]-2
#
# dayCol <- endIndex[2]
# day <- raw[startRow:endRow, dayCol]
# targetCol <- which(raw == searchWord[2], arr.ind=TRUE)[2]
# if (is.na(targetCol)) stop(sprintf('capture nothing in %s', fileName))
#
# if (keyword == 'waterBalance') {
# targetStart <- targetCol
# targetEnd <- which(raw == searchWord[4], arr.ind=TRUE)[2]
# target <- raw[startRow:endRow, targetStart:targetEnd]
#
# } else {
# target <- raw[startRow:endRow, targetCol]
# }
#
# }
#
#
# startDate <- as.Date(paste(year, mon, day[1], sep = '-'))
# endDate <- as.Date(paste(year, mon, tail(day,1), sep = '-'))
#
# Date <- seq(startDate, endDate, 1)
# output <- data.frame(Date, as.vector(target))
# colnames(output) <- c('Date', seq(1, dim(output)[2] - 1))
# message(fileName)
# return(output)
#
# }
#
#' collect data from different txt.
#'
#' @param folderName A string showing the folder path.
#' @param output A boolean showing whether the result is given.
#' @param rangeWord A list containing the keyword and the shift.
#' defaut is set to be used in spain gauging station.
#' @examples
#'
#' #use internal data as an example.
#'
#' \dontrun{
#' file <- system.file("extdata", "1999.csv", package = "hyfo")
#' folder <- strsplit(file, '1999')[[1]][1]
#' a <- collectData_txt_anarbe(folder)
#' }
#'
#'
#' # More examples can be found in the user manual on https://yuanchao-xu.github.io/hyfo/
#'
#' @references
#'
#' \itemize{
#' \item http://www4.gipuzkoa.net/oohh/web/esp/02.asp
#' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for
#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.
#' }
#'
#'
#' @source http://www4.gipuzkoa.net/oohh/web/esp/02.asp
#' @return The collected data from different txt files.
#' @export
#' @importFrom utils tail
#' @importFrom data.table rbindlist
collectData_txt_anarbe <- function(folderName, output = TRUE, rangeWord = c('Ene ', -1,
'Total ', -6)){
#All the code should be ASCII encode, so there should be no strange symbol.
if (is.null(rangeWord)) {
stop('rangeWord consists of 4 elements:
1. start word which program can recognise.
2. shift1, the shift needs to be made. E.g. start word is in line 7, and program
should read file from line 9, then shift is 9-7 = 2.
3. end word, as start word
4. shift2, same as shift1, sometimes can be negative
E.g. rangeWord=c(\"aaa\",2,\"bbb\",-2)
if no rangeWord, just input c(NULL,NULL,NULL,NULL)')
}
fileNames <- list.files(folderName, pattern = '*.TXT', full.names = TRUE)
data <- lapply(fileNames, FUN = readColumn_txt_anarbe, rangeWord = rangeWord)
data <- rbindlist(data)
a <- unlist(strsplit(folderName, '\\\\|/'))
tarName <- tail(a, 2)[1]
colnames(data) <- c('Date', tarName)
#newFileName <- file.choose(new = T)
message('new file should be located a different location than the excel folder,
in order to avoid error.
At least 2 excels should be in the folder')
#write.table(data_new,file=newFileName,row.names = F, col.names = F,sep=',')
if (output == TRUE) return(data)
}
anarbe_txt <- function(dataset, x1, x2){
data <- as.matrix(dataset[x1:x2, 2:13])
startYear <- data[1, 6]
data <- data[5:35, ]
date <- which(data != ' ', arr.ind = TRUE)
startDate <- date[1, ]
endDate <- date[length(date[, 1]), ]
startDate <- as.Date(paste(startYear, startDate[2], startDate[1], sep = '-'))
endDate <- as.Date(paste(startYear, endDate[2], endDate[1], sep = '-'))
Date <- as.factor(seq(startDate, endDate, 1))
dim(data) <- c(length(data), 1)
data <- as.numeric(data[which(data != ' '), ])
if (length(data) != length(Date)) {
stop('check original txt file. for missing value, the symbol is "--", check
if this symbol is missing somewhere')
}
output <- data.frame(Date = Date, target = data)
return(output)
}
#' @references
#'
#' \itemize{
#' \item R Core Team (2015). R: A language and environment for statistical computing. R Foundation for
#' Statistical Computing, Vienna, Austria. URL https://www.R-project.org/.
#' }
#'
#' @importFrom utils read.fwf
readColumn_txt_anarbe <- function(fileName, keyword = NULL, rangeWord = NULL){
a <- read.fwf(fileName, widths = rep(10,13))#read file with fixed width
startRow <- which(a == rangeWord[1], arr.ind = TRUE)[, 1]
startRow <- startRow + as.numeric(rangeWord[2])
endRow <- which(a == rangeWord[3], arr.ind = TRUE)[, 1]
endRow <- endRow + as.numeric(rangeWord[4])
data <- mapply(FUN = function(x1, x2) anarbe_txt(dataset = a, x1, x2), startRow, endRow)
data_new <- data.frame(Data = unlist(data[1, ]), target = unlist(data[2, ]))
message(fileName)
return(data_new)
}
|
0ec4da734e5a405e2fa49fb1a60c4fbeb81da337 | fc41ba72f92ef5a75539784f2d55c268a769dcde | /respirometry_0.1.2/respirometry.Rcheck/00_pkg_src/respirometry/R/Q10.R | 8bb339c50dd6e2dc9664782f77d0f734543d3336 | [] | no_license | matthewabirk/respirometry | 3573d578be80758fda72e795e7a4977a2a68e48d | 6f5f65a4086db65c1fe86f17cd61455a21dc38c5 | refs/heads/master | 2023-07-20T00:32:29.078249 | 2023-07-06T17:09:57 | 2023-07-06T17:09:57 | 53,635,934 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,493 | r | Q10.R | #' @title Parameters of Q10 Temperature Coefficient
#'
#' @description Returns the unknown parameter given 4 of 5 parameters from Q10 temperature coefficient calculation for chemical or biological systems.
#'
#' @details
#' Given four parameters, the fifth parameter will be returned.
#'
#' @param Q10 factor by which rate changes due to 10 °C increase in temperature.
#' @param R2 rate 2.
#' @param R1 rate 1.
#' @param T2 temperature 2 (in °C).
#' @param T1 temperature 1 (in °C).
#'
#' @author Matthew A. Birk, \email{matthewabirk@@gmail.com}
#'
#' @examples
#' Q10(R2 = 10, R1 = 5, T2 = 20, T1 = 10) # Returns Q10; = 2
#' Q10(Q10 = 2.66, R1 = 5, T2 = 20, T1 = 10) # Returns R2; = 13.3
#'
#' @encoding UTF-8
#' @export
Q10 = function(Q10, R2, R1, T2, T1){
q10 = methods::hasArg(Q10)
r2 = methods::hasArg(R2)
r1 = methods::hasArg(R1)
t2 = methods::hasArg(T2)
t1 = methods::hasArg(T1)
if(sum(q10, r2, r1, t2, t1) < 4) stop('Four parameters are needed')
if(sum(q10, r2, r1, t2, t1) == 5) stop('All parameters already provided. Nothing to calculate...')
if(q10 == F){
Q10 = list(Q10 = (R2 / R1)^(10 / (T2 - T1)))
return(Q10)
}
if(r2 == F){
R2 = list(R2 = Q10^((T2 - T1) / 10) * R1)
return(R2)
}
if(r1 == F){
R1 = list(R1 = Q10^((T1 - T2) / 10) * R2)
return(R1)
}
if(t2 == F){
T2 = list(T2 = 10 / log(Q10, base = R2 / R1) + T1)
return(T2)
}
if(t1==F){
T1 = list(T1 = 10 / log(Q10, base = R1 / R2) + T2)
return(T1)
}
} |
06d9806eee5a51d115f05609cc48242603a6b7ab | a209b8668b2b860f7faefc537faaeb787d170e2b | /get_dataCLUBS_THROGHT ALL SEASONS.R | b02434e9f67050383b40497f16241debdda3f8a0 | [] | no_license | Kpavicic00/visualization-of-data-for-football-soccer | 9b296a5555ad0e9890e7f048d43665b7e1df3acb | 009976fc4298074db8ae5254f634333dd4d61dd8 | refs/heads/master | 2020-05-23T02:28:13.696919 | 2019-05-26T23:10:52 | 2019-05-26T23:10:52 | 186,604,091 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 760 | r | get_dataCLUBS_THROGHT ALL SEASONS.R | #library
library(ggplot2)
library(maps)
library(ggridges)
library(dplyr)
library(ggthemes)
library(extrafont)
liga_throught <- read.csv("/home/kristijan/github/FootballEvolcion/Datas/SaveData/save_csv_GetDate_for_Clubs_throught_all_seasons.csv")
colnames(liga_throught) <- c("Order_of_Expend","Club","State","Competition","Expenditures",
"Income","Arrivals","Departures","Balance",
"inflation_Expenditure","inflation_Income","inflation_Balance")
View(liga_throught)
petica_La_liga <- liga_throught %>%
filter(Competition == "LaLiga")
View(petica_La_liga)
fill <- "#4271AE"
ggplot(petica_La_liga, aes(x = Club, y = Departures)) +
geom_boxplot(fill='#A4A4A4', color="black")+
theme_classic()
|
370779fcc6ce64b053e05862c90bbaa46a0cac07 | 5ec0f0a09965352c3b7266abaf8c3a47375dbd15 | /plot2.R | b2be321d1fa0d08f6ab98888a27ee1f89944f0d9 | [] | no_license | marisha-t/ExData_Plotting1 | 110cbe0bfeb79952bc0f345de79f3e136b0e2d3a | 573eee85659b860aa0164996c872fb663ebd3368 | refs/heads/master | 2021-05-27T21:11:58.550512 | 2014-06-08T15:41:19 | 2014-06-08T15:41:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 718 | r | plot2.R | plot2 <- function(){
png(file = "plot2.png")
# read data file
data <- read.table("household_power_consumption.txt", sep = ";", nrow = 2880, skip = 66637)
# add column headers
colnames(data) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
# convert date and time columns into date/time classes in R
data$Date <- as.Date(data$Date, "%d/%m/%Y")
date <- data$Date
time <- data$Time
x <- paste(date, time)
data$Time <- strptime(x, "%Y-%m-%d %H:%M:%S")
# plot graph
plot(data$Time, data$Global_active_power, type="l", xlab="", ylab = "Global Active Power (killowatts)")
dev.off()
}
|
494a87658ba2cf9adbad7297e5aeaf938b75004a | e95d58d7c98f17bdb63f2dc1ea863aff8ffabf46 | /R/utils.R | c1010e5341b9f6804556fd2b3bb4fd596c6b1675 | [] | no_license | cran/ph2rand | 02311955c015294de07888ae80f2d079605ddc35 | 3af611dd2131f9b494ede839c646882bd7448510 | refs/heads/master | 2023-03-14T03:30:27.899725 | 2021-03-02T18:10:03 | 2021-03-02T18:10:03 | 343,990,892 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,350 | r | utils.R | build_des_one_stage_output <- function(alpha, beta, boundaries, delta, feasible,
nC, nCmax, nE, opchar, Pi0, Pi1, ratio,
summary, type) {
output <- list(alpha = alpha,
beta = beta,
boundaries = boundaries,
delta = delta,
feasible = feasible,
J = 1L,
nC = nC,
nCmax = nCmax,
nE = nE,
opchar = opchar,
Pi0 = Pi0,
Pi1 = Pi1,
ratio = ratio,
summary = summary,
type = type)
class(output) <- c("ph2rand_des", class(output))
output
}
build_des_two_stage_output <- function(alpha, beta, boundaries, delta, efficacy,
efficacy_param, efficacy_type, equal,
feasible, futility, futility_param,
futility_type, nC, nCmax, nE, opchar,
Pi0, Pi1, piO, ratio, summary, w, type) {
output <- list(alpha = alpha,
beta = beta,
boundaries = boundaries,
delta = delta,
efficacy = efficacy,
efficacy_param = efficacy_param,
efficacy_type = efficacy_type,
equal = equal,
feasible = feasible,
futility = futility,
futility_param = futility_param,
futility_type = futility_type,
J = 2L,
nC = nC,
nCmax = nCmax,
nE = nE,
opchar = opchar,
Pi0 = Pi0,
Pi1 = Pi1,
piO = piO,
ratio = ratio,
summary = summary,
type = type,
w = w)
class(output) <- c("ph2rand_des", class(output))
output
}
search_parameters <- function(J, type, nCmax, ratio) {
poss_nC <- 1:nCmax
poss_nE <- poss_nE_orig <-
poss_nC*ratio
keep <- which(poss_nE%%1 == 0)
poss_nC <- poss_nC[keep]
poss_nE <- poss_nE[keep]
len_poss_nC <- length(poss_nC)
max_poss_nC <- max(poss_nC)
if (type == "fisher") {
maxima <- max(max_poss_nC, poss_nE)
choose_mat <- matrix(0, maxima,
maxima + 1)
for (n in 1:maxima) {
choose_mat[n, 1:(n + 1)] <- choose(n, 0:n)
}
} else {
choose_mat <- NULL
}
poss_x <- poss_y <- poss_z <- poss_B <- unique_B <- list()
all_x <-
as.matrix(expand.grid(0:poss_nC[len_poss_nC], 0:poss_nE[len_poss_nC]))
all_y <- all_x[, 2] - all_x[, 1]
all_z <- all_x[, 1] + all_x[, 2]
for (n in 1:len_poss_nC) {
nC <- poss_nC[n]
nE <- poss_nE[n]
index <- nC + max_poss_nC*(nE - 1)
keep <- which(all_x[, 1] <= nC &
all_x[, 2] <= nE)
poss_x[[index]] <- all_x[keep, ]
if (type != "barnard") {
poss_y[[index]] <- all_y[keep]
if (type == "fisher") {
poss_z[[index]] <- all_z[keep]
}
} else {
denom_fact <-
(poss_x[[index]][, 1] + poss_x[[index]][, 2])/(nC + nE)
poss_B_index <-
(poss_x[[index]][, 2]/nE - poss_x[[index]][, 1]/nC)/
sqrt(denom_fact*(1 - denom_fact)*(1/nC + 1/nE))
poss_B_index[is.nan(poss_B_index)] <- 0
poss_B[[index]] <- matrix(0, nC + 1, nE + 1)
for (i in 1:((nC + 1)*(nE + 1))) {
poss_B[[index]][poss_x[[index]][i, 1] + 1,
poss_x[[index]][i, 2] + 1] <- poss_B_index[i]
}
unique_B[[index]] <-
sort(unique(as.vector(poss_B_index)))
len_unique_B_index <- length(unique_B[[index]])
keep <-
!logical(len_unique_B_index)
for (i in 2:len_unique_B_index) {
if (unique_B[[index]][i] - unique_B[[index]][i - 1] <= 1e-15) {
keep[i] <- FALSE
}
}
unique_B[[index]] <- unique_B[[index]][keep]
unique_B[[index]] <-
c(unique_B[[index]][1] - 1, unique_B[[index]],
unique_B[[index]][length(unique_B[[index]])] + 1)
}
}
list(choose_mat = choose_mat,
max_poss_nC = max_poss_nC,
poss_nC = poss_nC,
poss_nE = poss_nE,
poss_nE_orig = poss_nE_orig,
poss_B = poss_B,
poss_x = poss_x,
poss_y = poss_y,
poss_z = poss_z,
unique_B = unique_B)
}
sim_internal <- function(pi, completed_replicates, des, k,
replicates, summary, total_replicates) {
summary_i <-
round(seq(1, total_replicates, length.out = 11)[-c(1, 11)])
J <- des$J
nC <- des$nC
nE <- des$nE
if (des$type %in% c("barnard", "binomial")) {
e <- list(des$boundaries$e1, des$boundaries$e2)
f <- list(des$boundaries$f1, des$boundaries$f2)
}
cum_nC <- cumsum(nC)
cum_nE <- cumsum(nE)
seq_J <- 1:J
E <- Fu <- numeric(J)
numeric_2 <- numeric(2)
for (i in 1:replicates) {
x <- z <- numeric_2
for (j in seq_J) {
x_iterate <- stats::rbinom(2, c(nC[j], nE[j]), pi)
z[j] <- x_iterate[1] + x_iterate[2]
x <- x + x_iterate
if (des$type %in% c("binomial", "fisher", "sat")) {
tD <- x[2] - x[1]
if (des$type == "sat") {
tS <- x[2]
}
} else if (des$type == "barnard") {
if (any(all(x == 0), all(x == c(cum_nC[j], cum_nE[j])))) {
tB <- 0
} else {
fact <- (x[1] + x[2])/(cum_nC[j] + cum_nE[j])
tB <-
(x[2]/cum_nE[j] - x[1]/cum_nC[j])/
sqrt(fact*(1 - fact)*(1/cum_nC[j] + 1/cum_nE[j]))
}
}
continue <- TRUE
if (des$type == "barnard") {
if (tB >= e[[j]]) {
E[j] <- E[j] + 1
continue <- FALSE
} else if (tB <= f[[j]]) {
Fu[j] <- Fu[j] + 1
continue <- FALSE
}
} else if (des$type == "binomial") {
if (tD >= e[[j]]) {
E[j] <- E[j] + 1
continue <- FALSE
} else if (tD <= f[[j]]) {
Fu[j] <- Fu[j] + 1
continue <- FALSE
}
} else if (des$type == "fisher") {
if (j == 1) {
if (tD >= des$boundaries$e1[z[1] + 1]) {
E[1] <- E[1] + 1
continue <- FALSE
} else if (tD <= des$boundaries$f1[z[1] + 1]) {
Fu[1] <- Fu[1] + 1
continue <- FALSE
}
} else {
if (tD >= des$boundaries$e2[z[1] + 1, z[2] + 1]) {
E[2] <- E[2] + 1
} else {
Fu[2] <- Fu[2] + 1
}
}
} else if (des$type == "sat") {
if (j == 1) {
if (all(tD >= des$boundaries$eT1, tS >= des$boundaries$eS1)) {
E[1] <- E[1] + 1
continue <- FALSE
} else if (all(tD <= des$boundaries$fT1, tS <= des$boundaries$fS1)) {
Fu[1] <- Fu[1] + 1
continue <- FALSE
}
} else {
if (all(tD >= des$boundaries$eT2, tS >= des$boundaries$eS2)) {
E[2] <- E[2] + 1
continue <- FALSE
} else {
Fu[2] <- Fu[2] + 1
}
}
}
if (!continue) {
break
}
}
if (all((completed_replicates + i) %in% summary_i, summary)) {
message("..approximately ",
10*which(summary_i == (completed_replicates + i)),
"% through the required simulations..")
}
}
E <- E/replicates
Fu <- Fu/replicates
if (J == 1) {
c(pi, E[1])
} else {
S <- E + Fu
if (length(k) == 1) {
E <- E/S[k]
Fu <- Fu/S[k]
E[-k] <- 0
Fu[-k] <- 0
S <- E + Fu
}
n <- c(rep(nC[1] + nE[1], replicates*S[1]),
rep(cum_nC[2] + cum_nE[2], replicates*S[2]))
c(pi, sum(E), sum(n)/replicates, stats::sd(n), stats::quantile(n, 0.5), E,
Fu, S, cum_nC[2] + cum_nE[2])
}
}
theme_ph2rand <- function(base_size = 11, base_family = "") {
ggplot2::theme_grey(base_family = base_family,
base_size = base_size) +
ggplot2::theme(axis.ticks = ggplot2::element_line(colour = "grey70",
size = 0.25),
complete = TRUE,
legend.key = ggplot2::element_rect(fill = "white",
colour = NA),
legend.position = "bottom",
legend.title = ggplot2::element_blank(),
panel.background = ggplot2::element_rect(fill = "white",
colour = NA),
panel.border = ggplot2::element_rect(fill = NA,
colour = "grey70",
size = 0.5),
panel.grid.major = ggplot2::element_line(colour = "grey87",
size = 0.25),
panel.grid.minor = ggplot2::element_line(colour = "grey87",
size = 0.125),
plot.margin = ggplot2::unit(c(0.3, 0.5, 0.3, 0.3),
"cm"),
plot.title = ggplot2::element_text(hjust = 0.5),
strip.background = ggplot2::element_rect(fill = "grey70",
colour = NA),
strip.text =
ggplot2::element_text(colour = "white",
size = ggplot2::rel(0.8)))
}
|
08e9c14bb302a189aeaff15761267b96613ac6a9 | d08874fad547ed79b237a0ac69576c09066d19f9 | /analiza_R_mutacje.R | d1c0f2ca1ef91bf01e0846e2941bbf708eed0ce8 | [] | no_license | mggrami/skladowisko_rozwiazan | 777d5af6404759b718a30e7f57dda77bede70f0a | 98f3f9f64d33e78c9c373a29c91a36049d723def | refs/heads/master | 2021-01-22T20:19:11.668661 | 2017-04-22T19:53:03 | 2017-04-22T19:53:03 | 85,313,943 | 0 | 0 | null | 2017-04-22T19:53:04 | 2017-03-17T13:22:28 | R | UTF-8 | R | false | false | 1,339 | r | analiza_R_mutacje.R | ##zabawy z danymi
###Proby
library("ggplot2")
wykresy<-ggplot(`AZBIORCZA`, aes(x= log2.fold.change, y= Name, shape=mutant, color= Name))+
geom_point()
wykresy + scale_shape_discrete(solid=F)
### Heatmap
# Przygotowanie danych
HM<- as.data.frame(AZBIORCZA)
HM<- select(HM, Name, log2.fold.change, mutant)
HM<- as.data.frame(HM)
HM<- HM[!grepl("-", HM$Name),]
HM<- as.matrix(HM)
# Heatmap produkty
ggplot(AZBIORCZA, aes(mutant, Name))+
geom_tile(aes(fill = log2.fold.change))
## Obrobka danych
library(plyr)
library(dplyr)
library(tidyr)
##Zmiany w danych
###PNP
PNP.WT<- rename(`1_VS_2_16_USUNIETE_ODCZYTY_R`, c("fold_change_PNP.WT"="fold.change","log2_fold_change_PNP.WT"="log2.fold.change"))
PNP<- c(rep("PNP", 16))
PNP.WT$mutant<- PNP
###RNB
RNB.WT<- rename(`1_VS_3_47_USUNIETE_ODCZYTY_R`, c("fold_change_RNB.WT"="fold.change", "log2_fold_change_RNB.WT"="log2.fold.change"))
RNB<-c(rep("RNB", 47))
RNB.WT$mutant<-RNB
###RNR
names(`1_VS_4_62_USUNIETE_ODCZYTY_R`)[names(`1_VS_4_62_USUNIETE_ODCZYTY_R`)=="fold_change_RNR.WT"]<-"fold.change"
names(`1_VS_4_62_USUNIETE_ODCZYTY_R`)[names(`1_VS_4_62_USUNIETE_ODCZYTY_R`)=="log2_fold_change_RNR.WT"]<-"log2.fold.change"
RNR.WT<-`1_VS_4_62_USUNIETE_ODCZYTY_R`
RNR<-c(rep("RNR", 62))
RNR.WT$mutant<-RNR
##merge
MUTACJE<- full_join(PNP.WT,RNB.WT)
AZBIORCZA<- full_join(MUTACJE, RNR.WT)
|
d92bee6904f674335710863d0f382b84b4a1ffc8 | df43b5b655ad126f833cd101be478840a0af4bc5 | /R/get_post_links.R | c91be812b4fc69aa5672dce93863aaac2668e68d | [
"MIT"
] | permissive | rpodcast/shinycontestscrape | 5c392f07814eea0cbf44ecfa25bcc4391845818b | 0b4f1315df473504fb1a8bceb4e76df19778cea9 | refs/heads/master | 2023-05-02T07:32:27.842067 | 2021-05-17T14:07:28 | 2021-05-17T14:07:28 | 367,966,865 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 803 | r | get_post_links.R | #' Extract all links contained in a discourse post
#'
#' @param url character string of the URL for a post.
#' @param add_sleep boolean to add a simple sleep before
#' function completes.
#' @param pb progress bar object. If no progress output is
#' needed, keep this value as `NULL`.
#'
#' @return character vector of all links
#' @import httr
#' @import rvest
#' @import progress
#' @export
get_post_links <- function(url, add_sleep = TRUE, pb = NULL) {
links <- NULL
if (add_sleep) Sys.sleep(0.4)
if (!is.null(pb)) {
pb$tick()
}
req <- httr::GET(paste0(url, ".json"))
httr::stop_for_status(req)
con <- httr::content(req)
post_text <- con$post_stream$posts[[1]]$cooked
links <- read_html(post_text) %>%
html_elements("a") %>%
html_attr("href")
return(links)
}
|
4ba839420d1135ad3af8fd516b0f0f396114c893 | f042fbdf31a2106bfbe298b32dc0aa551bd3ae84 | /man/exclude_incomplete_years.Rd | 06258a30042d558b53e6330b313682a902acfffa | [] | no_license | danielbonhaure/weather-generator | c76969967c3a60500a6d90d5931a88fb44570eba | 6a207415fb53cca531b4c6be691ff2d7d221167d | refs/heads/gamwgen | 2023-01-21T17:38:46.102213 | 2020-12-04T21:59:05 | 2020-12-04T21:59:05 | 286,565,700 | 0 | 0 | null | 2020-12-01T13:19:05 | 2020-08-10T19:50:16 | R | UTF-8 | R | false | true | 274 | rd | exclude_incomplete_years.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{exclude_incomplete_years}
\alias{exclude_incomplete_years}
\title{Exclude incomplete years}
\usage{
exclude_incomplete_years(climate_data)
}
\description{
Exclude incomplete years.
}
|
55021063e679d9159480be1ee035709b7d1fed33 | ac78b04fe8fdfcf8e09a7764825184474bb583fa | /man/bootstrapNull.Rd | f2f0f6b30a40ed580fc17b4bef64fce7c487bd6a | [
"MIT"
] | permissive | kbroman/mbmixture | 5440d845ea9188f41be03bcfa25b664c3d3ac655 | e930074fd09391dfa01268fe846d6b6e5641be76 | refs/heads/main | 2023-05-12T08:28:10.816396 | 2023-04-27T13:02:21 | 2023-04-27T13:02:21 | 144,390,106 | 3 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,652 | rd | bootstrapNull.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bootstrapNull.R
\name{bootstrapNull}
\alias{bootstrapNull}
\title{Bootstrap to assess significance}
\usage{
bootstrapNull(
tab,
n_rep = 1000,
interval = c(0, 1),
tol = 0.000001,
check_boundary = TRUE,
cores = 1,
return_raw = TRUE
)
}
\arguments{
\item{tab}{Dataset of read counts as 3d array of size 3x3x2, genotype in first sample x genotype in second sample x allele in read.}
\item{n_rep}{Number of bootstrap replicates}
\item{interval}{Interval to which each parameter should be constrained}
\item{tol}{Tolerance for convergence}
\item{check_boundary}{If TRUE, explicitly check the boundaries of \code{interval}.}
\item{cores}{Number of CPU cores to use, for parallel calculations.
(If \code{0}, use \code{\link[parallel:detectCores]{parallel::detectCores()}}.)
Alternatively, this can be links to a set of cluster sockets, as
produced by \code{\link[parallel:makeCluster]{parallel::makeCluster()}}.}
\item{return_raw}{If TRUE, return the raw results. If FALSE, just return the p-value.
Unlink \code{\link[=bootstrapSE]{bootstrapSE()}}, here the default is TRUE.}
}
\value{
If \code{return_raw=FALSE}, a single numeric value (the p-value).If
\code{return_raw=TRUE}, a vector of length \code{n_rep} with the LRT statistics from each
bootstrap replicate.
}
\description{
Perform a parametric bootstrap to assess whether there is significant evidence that a sample is a mixture.
}
\examples{
data(mbmixdata)
# just 100 bootstrap replicates, as an illustration
bootstrapNull(mbmixdata, n_rep=100)
}
\seealso{
\code{\link[=bootstrapSE]{bootstrapSE()}}
}
|
88dc41194ca744cfbf374bba811448fe7bd2a987 | c42728240c58d1b8f56b698c42af112def62553d | /extsyms/rankings.R | 05f4c6c52e75002413c8ef5fc4ef6ac77b3d9ed4 | [
"Unlicense"
] | permissive | razvanm/fs-expedition | 7e5c6b8c74b832598129733300360efe4b998b19 | 7d1f3d45b5b9ed1e6ddb5af40ee9491fd287657d | refs/heads/master | 2016-09-06T12:34:50.487833 | 2014-08-31T09:00:13 | 2014-08-31T09:00:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,957 | r | rankings.R | d <- read.table('extsyms', head=T)
dd <- tapply(d$extsyms, d$fs, function(x) { length(x) } )
dd <- sort(dd)
n <- length(dd)
png('extsyms-longevity.png', width=900, height=500, antialias='subpixel')
par(mar=c(1,6,1,1))
plot(dd, type='n', xlim=c(0, n+0.5), ylim=c(-5,31), ann=F, axes=F, xaxs='i', yaxs='i')
#abline(h=unique(dd), lwd=0.5, col='gray')
#abline(v=1:n, lwd=0.5, col='gray')
points(dd, pch=20, cex=2)
text(1:length(dd), dd, paste(rownames(dd), ' ', sep=''), adj=c(1, 0.3), srt=90)
#axis(1, at=1:length(dd), labels=rownames(dd), lwd=0, lwd.tick=2, las=2)
axis(2, at=1:30, labels=F, lwd=0, lwd.tick=1)
p <- 1
for (i in unique(dd)) {
count <- length(dd[dd == i])
p <- p + count
segments(0, i, p-1, i, lwd=0.5, lty=3)
axis(2, at=i, labels=paste(i, '(', count, ')', sep=''), las=2, lwd=0, lwd.tick=2)
}
par(ps=10)
title(ylab='Number of releases (number of file systems)', line=4)
dev.off()
dd <- tapply(d$extsyms, d$fs, function(x) { min(x) } )
dd <- sort(dd)
n <- length(dd)
png('extsyms-min.png', width=900, height=500, antialias='subpixel')
par(mar=c(1,6,1,1))
plot(dd, type='n', xlim=c(0, n+0.5), ylim=c(-35,max(dd)+20), ann=F, axes=F, xaxs='i', yaxs='i')
points(dd, pch=20, cex=2)
text(1:length(dd), dd, paste(rownames(dd), ' ', sep=''), adj=c(1, 0.3), srt=90)
text(1:length(dd), dd, paste(' ', dd, sep=''), adj=c(0, 0.3), srt=90, col='gray')
axis(2, at=seq(0, max(dd), 10), lwd=0, lwd.tick=2, las=2)
par(ps=10)
title(ylab='Minimum number of symbols', line=4)
dev.off()
dd <- tapply(d$extsyms, d$fs, function(x) { max(x) } )
dd <- sort(dd)
n <- length(dd)
png('extsyms-max.png', width=900, height=500, antialias='subpixel')
par(mar=c(1,6,1,1))
plot(dd, type='n', xlim=c(0, n+0.5), ylim=c(-35,max(dd)+30), ann=F, axes=F, xaxs='i', yaxs='i')
points(dd, pch=20, cex=2)
text(1:length(dd), dd, paste(rownames(dd), ' ', sep=''), adj=c(1, 0.3), srt=90)
text(1:length(dd), dd, paste(' ', dd, sep=''), adj=c(0, 0.3), srt=90, col='gray')
axis(2, at=seq(0, max(dd), 20), lwd=0, lwd.tick=2, las=2)
par(ps=10)
title(ylab='Maximum number of symbols', line=4)
dev.off()
dd <- tapply(d$extsyms, d$fs, function(x) { max(x)-min(x) } )
dd <- sort(dd)
n <- length(dd)
png('extsyms-range.png', width=900, height=500, antialias='subpixel')
par(mar=c(1,6,1,1))
plot(dd, type='n', xlim=c(0, n+0.5), ylim=c(-35,max(dd)+20), ann=F, axes=F, xaxs='i', yaxs='i')
points(dd, pch=20, cex=2)
text(1:length(dd), dd, paste(rownames(dd), ' ', sep=''), adj=c(1, 0.3), srt=90)
text(1:length(dd), dd, paste(' ', dd, sep=''), adj=c(0, 0.3), srt=90, col='gray')
axis(2, at=seq(0, max(dd), 10), lwd=0, lwd.tick=2, las=2)
par(ps=10)
title(ylab='Range in number of symbols', line=4)
dev.off()
dd <- tapply(d$extsyms, d$fs, function(x) { x[length(x)]-x[1] } )
dd <- sort(dd)
n <- length(dd)
png('extsyms-last-first.png', width=900, height=500, antialias='subpixel')
par(mar=c(1,6,1,1))
plot(dd, type='n', xlim=c(0, n+0.5), ylim=c(-35,max(dd)+20), ann=F, axes=F, xaxs='i', yaxs='i')
points(dd, pch=20, cex=2)
text(1:length(dd), dd, paste(rownames(dd), ' ', sep=''), adj=c(1, 0.3), srt=90)
text(1:length(dd), dd, paste(' ', dd, sep=''), adj=c(0, 0.3), srt=90, col='gray')
axis(2, at=seq(0, max(dd), 10), lwd=0, lwd.tick=2, las=2)
par(ps=10)
title(ylab='Number of symbols between the last and first appearance', line=4)
dev.off()
dd <- tapply(d$extsyms, d$fs, function(x) { if (length(x) > 1) max(x[-1] - x[1:length(x)-1]) else 0 } )
dd <- sort(dd)
n <- length(dd)
png('extsyms-max-jump.png', width=900, height=500, antialias='subpixel')
par(mar=c(1,6,1,1))
plot(dd, type='n', xlim=c(0, n+0.5), ylim=c(-12,max(dd)+5), ann=F, axes=F, xaxs='i', yaxs='i')
points(dd, pch=20, cex=2)
text(1:length(dd), dd, paste(rownames(dd), ' ', sep=''), adj=c(1, 0.3), srt=90)
text(1:length(dd), dd, paste(' ', dd, sep=''), adj=c(0, 0.3), srt=90, col='gray')
axis(2, at=seq(0, max(dd)+1, 5), lwd=0, lwd.tick=2, las=2)
par(ps=10)
title(ylab='Biggest increase in the number of external symbols', line=4)
title(ylab='between two consecutive releases', line=3)
dev.off()
dd <- tapply(d$extsyms, d$fs, function(x) { if (length(x) > 1) -min(x[-1] - x[1:length(x)-1]) else 0 } )
dd <- sort(dd)
n <- length(dd)
png('extsyms-min-jump.png', width=900, height=500, antialias='subpixel')
par(mar=c(1,6,1,1))
plot(dd, type='n', xlim=c(0, n+0.5), ylim=c(min(dd)-4,max(dd)+2), ann=F, axes=F, xaxs='i', yaxs='i')
points(dd, pch=20, cex=2)
text(1:length(dd), dd, paste(rownames(dd), ' ', sep=''), adj=c(1, 0.3), srt=90)
text(1:length(dd), dd, paste(' ', dd, sep=''), adj=c(0, 0.3), srt=90, col='gray')
axis(2, at=seq(0, max(dd), 1), lwd=0, lwd.tick=2, las=2)
par(ps=10)
title(ylab='Biggest decrease in the number of external symbols', line=4)
title(ylab='between two consecutive releases', line=3)
dev.off()
my.points <- function(d, y) {
points((d-min(d))/(max(d)-min(d)), rep(y, n), pch=20, cex=2)
}
d.len <- rank(tapply(d$extsyms, d$fs, function(x) { length(x) } ), ties.method='first')
d.min <- rank(tapply(d$extsyms, d$fs, function(x) { min(x) } ), ties.method='first')
d.max <- rank(tapply(d$extsyms, d$fs, function(x) { max(x) } ), ties.method='first')
d.range <- rank(tapply(d$extsyms, d$fs, function(x) { max(x)-min(x) } ), ties.method='first')
d.fl <- rank(tapply(d$extsyms, d$fs, function(x) { x[length(x)]-x[1] } ), ties.method='first')
d.maxjump <- rank(tapply(d$extsyms, d$fs, function(x) { if (length(x) > 1) max(x[-1] - x[1:length(x)-1]) else 0 } ), ties.method='first')
d.minjump <- rank(tapply(d$extsyms, d$fs, function(x) { if (length(x) > 1) -min(x[-1] - x[1:length(x)-1]) else 0 } ), ties.method='first')
n <- length(d.len)
png('extsyms-rankings.png', width=900, height=1000, antialias='subpixel')
par(mar=c(1,1,1,1))
plot(c(1-0.3,7.3), c(0,n+1), type='n', ann=F, axes=F, xaxs='i', yaxs='i')
for (i in names(d.len)) {
y <- c(d.len[i], d.min[i], d.max[i], d.range[i], d.fl[i], d.maxjump[i], d.minjump[i])
leftx <- 1:6+strwidth(i)/2+0.1
lefty <- y[1:6]
rightx <- 2:7-strwidth(i)/2-0.1
righty <- y[2:7]
points(leftx, lefty, pch=20, cex=1.6)
points(rightx, righty, pch=20, cex=1.6)
#segments(leftx, lefty, rightx, righty)
for (k in 1:6) {
incx <- 0.05
incy <- (righty[k] - lefty[k])/40
s <- spline(c(leftx[k], leftx[k]+incx, rightx[k]-incx, rightx[k]),
c(lefty[k], lefty[k]+incy, righty[k]-incy, righty[k]), n=30)
#points(leftx[k]+incx, lefty[k]+incy, pch=20, cex=0.2)
#points(rightx[k]-incx, righty[k]-incy, pch=20, cex=0.2)
#lines(c(leftx[k], rightx[k]), c(lefty[k], righty[k]))
#cat(leftx[k], rightx[k], lefty[k], righty[k], fill=T)
if (max(s$y)-min(s$y) > 10) {
lines(s$x, s$y, lwd=1, col=rgb(0,0,0,0.4))
} else {
lines(s$x, s$y, lwd=2)
}
#cat(max(s$y)-min(s$y), ' ')
}
text(1:7, y, rep(i, 7))
}
par(ps=10)
axis(3, at=1:7, labels=c('Length', 'Min', 'Max', 'Range', 'First-Last', 'Max Jump', 'Min Jump'), font=2, tick=F, line=-1)
dev.off()
|
e2889a5b8b32e8880eb5b841bc82834c7228557e | db12b990924703cd74748d8585cd9c11fafa6746 | /h2o-r/tests/testdir_misc/runit_logloss.R | d4028ecb476c46f6160ced9cd9a75efc0f3209b4 | [
"Apache-2.0"
] | permissive | h2oai/h2o-3 | 919019a8f297eec676011a9cfd2cc2d97891ce14 | d817ab90c8c47f6787604a0b9639b66234158228 | refs/heads/master | 2023-08-17T18:50:17.732191 | 2023-08-17T16:44:42 | 2023-08-17T16:44:42 | 17,371,412 | 6,872 | 2,345 | Apache-2.0 | 2023-09-14T18:05:40 | 2014-03-03T16:08:07 | Jupyter Notebook | UTF-8 | R | false | false | 2,323 | r | runit_logloss.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
test.logloss <- function() {
Log.info("Testing binomial logloss")
train = h2o.uploadFile(locate("smalldata/logreg/prostate.csv"), destination_frame="train")
test = h2o.uploadFile(locate("smalldata/logreg/prostate.csv"), destination_frame="test")
## Compute LogLoss explicitly from predictions on the test set
LogLoss <- function(act, pred) {
for (i in 0:max(act)) {
act[,2+i] = act[,1]==i
}
ll <- act[,-1]*log(pred)
ll <- -sum(ll)/nrow(act)
ll
}
### BINOMIAL
predictors = 3:9
response = 2
train[,response] <- as.factor(train[,response])
model = h2o.gbm(x=predictors,y=response,distribution = "bernoulli",training_frame=train,
ntrees=2,max_depth=3,min_rows=1,learn_rate=0.01,nbins=20)
## Get LogLoss from the model on training set
ll1 <- h2o.performance(model, train)@metrics$logloss
## Get LogLoss from model metrics after predicting on test set (same as training set)
ll2 <- h2o.performance(model, test)@metrics$logloss
test3 = h2o.uploadFile(locate("smalldata/logreg/prostate.csv"), destination_frame="test3")
actual <- as.numeric(test3[,response])
pred <- predict(model,test3)
ll3 <- LogLoss(actual, pred[,-1])
print(ll1)
print(ll2)
print(ll3)
expect_true(abs(ll1-ll2)<1e-6)
expect_true(abs(ll1-ll3)<1e-6)
### MULTINOMIAL
predictors = c(2:3,5:9)
response = 4
train[,response] <- as.factor(train[,response])
model = h2o.gbm(x=predictors,y=response,distribution = "multinomial",training_frame=train,
ntrees=2,max_depth=3,min_rows=1,learn_rate=0.01,nbins=20)
## Get LogLoss from the model on training set
ll1 <- h2o.performance(model, train)@metrics$logloss
## Get LogLoss from model metrics after predicting on test set (same as training set)
ll2 <- h2o.performance(model, test)@metrics$logloss
test3 = h2o.uploadFile(locate("smalldata/logreg/prostate.csv"), destination_frame="test3")
actual <- as.numeric(test3[,response])
pred <- predict(model,test3)
ll3 <- LogLoss(actual, pred[,-1])
print(ll1)
print(ll2)
print(ll3)
expect_true(abs(ll1-ll2)<1e-6)
expect_true(abs(ll1-ll3)<1e-6)
}
doTest("Test logloss computation", test.logloss)
|
f33c9299f7c08f92f1bf16aef36d575adbfccd1c | bc42c76a961ef56d4d08a714c0eaabb4366a36a1 | /R/NHDaux.R | 85e3681c1de0f7dd077f9937e732b34626042413 | [] | no_license | cran/IndTestPP | 593ab1dc0ddb6addd008e80aed948d88058a240c | a628d5be9c314513541656d6e2ea28dd9bc91cee | refs/heads/master | 2021-06-28T21:12:36.085070 | 2020-08-28T18:00:03 | 2020-08-28T18:00:03 | 64,703,962 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 772 | r | NHDaux.R | NHDaux <-
function(r,lambdaC, lambdaD,posC,typeC, posD,typeD, T)
{
posWC<-posC[(posC>=r)&(posC<=(T-r))]
typeWC<-typeC[(posC>=r)&(posC<=(T-r))]
lambdaWC<-lambdaC[cbind(ceiling(posWC),typeWC)] # the result is a vector
lWnuC<-sum(1/lambdaWC, na.rm=TRUE)
L1D<-1-min(lambdaD)/lambdaD #it is a matrix
#if processes in D are homogeneous, L1D=0 all values; in that way
#the product in pag 13 can be only 0 or 1, depending if the point in C
#has the nearest neighbourg in D at a distance <=r or not
#the expression in pp13 must count the number of points in C with the nearest
# the nearest neighbourg in D at a distance >r
L1C0<-sapply(posWC, FUN = prodN2, r=r,L1D=L1D,posD=posD, typeD=typeD)
NHD<-sum(L1C0/lambdaWC)
return(c(lWnuC,NHD))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.