blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a78119ca6303fe2108f08c58ab44a4a00caf0abf
|
0a12b0865fb849e3b1f639edccccd69b9468a844
|
/R/flatpack.R
|
8505538e0b241199062461c28af82e7c15daf1aa
|
[] |
no_license
|
CopenhagenCenterForGlycomics/glycodomainR
|
ef6d768ec2d75f853dc3b2cccf55836ac0db4ee3
|
2fc52e92d6f1cfc4874e7a11c9ba396a048f02c3
|
refs/heads/master
| 2022-04-11T14:40:43.253890
| 2020-04-08T10:08:22
| 2020-04-08T10:08:22
| 104,898,065
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 467
|
r
|
flatpack.R
|
#' @export
flatpack <- function() {
targetenv=new.env()
installed_list=sapply(grep("^gator.",installed.packages(),value = T), function(x){ data('data',package=x,envir=targetenv) })
are_msdata=sapply( ls(targetenv), function(x) attributes(targetenv[[x]])$type == 'msdata' )
flatpack=do.call(rbind,sapply(na.omit(ls(targetenv)[as.logical(are_msdata)]), function(x) cbind(targetenv[[x]],dataset=rep(x,nrow(targetenv[[x]])) ),simplify = F ))
return (flatpack)
}
|
6754551506ecca24d922d29926f997839dad2036
|
f243a77fdd193583b937b620861fc042661db0cf
|
/DH_QMEE_JAGSFinal .R
|
583b581f7d9f14447b0d08d7b5ce6f8364802fe2
|
[] |
no_license
|
darcyh13/QMEE
|
f087d116d0025ed99b4e3537f83c8f279599a1cf
|
b81a43a18a05baa67c66b024393c5788a29af8ee
|
refs/heads/master
| 2020-04-15T19:17:18.906612
| 2019-05-03T03:16:22
| 2019-05-03T03:16:22
| 164,943,918
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,636
|
r
|
DH_QMEE_JAGSFinal .R
|
library("tidyverse")
## BMB: you missed these, your code's not reproducible ...
library("R2jags")
library("broom.mixed")
library("lattice")
library("emdbook")
library("dotwhisker")
Genetic_Background_Interactions <- read_csv("GeneticBackgroundVeinMutantInteractionsFinal.csv")
#Change second gene to say WT if it does not have a second mutation -> get rid of NA in this column
Genetic_Background_Interactions$gene2[is.na(Genetic_Background_Interactions$gene2)] <- "wt"
## BMB: could also tidyverse this
## %>% mutate(gene2=replace_na(gene2,"wt"))
#only want to deal with two genes for now, not three
Genetic_Background_Interactions <- mutate(Genetic_Background_Interactions,
genotype=interaction(gene1, rhomboid))
## BMB: use pipe to avoid repetitively typing Genetic_Background_Interactions
#change relevant columns to factors
columns_to_factors <- c("background", "gene1", "gene2", "rhomboid", "sex", "individual", "genotype")
Genetic_Background_Interactions[,columns_to_factors] <- lapply(Genetic_Background_Interactions[,columns_to_factors], as.factor)
## BMB: mutate_at(columns_to_factors,as.factors)
#omit remaining NAs
Genetic_Background_Interactions <- na.omit(Genetic_Background_Interactions)
#subset Ore
Genetic_Ore_Interactions <-Genetic_Background_Interactions %>%
filter(background == "Ore")
#need this function
named_list <- function (...)
{
L <- list(...)
snm <- sapply(substitute(list(...)), deparse)[-1]
if (is.null(nm <- names(L)))
nm <- snm
if (any(nonames <- nm == ""))
nm[nonames] <- snm[nonames]
setNames(L, nm)
}
#model L3 (length of vein 3) with effects of genotpe and sex
#data need to be in list
#genotype need to converted to numeric
OREdat2<-with(Genetic_Ore_Interactions,
named_list(N=nrow(Genetic_Ore_Interactions),
ngenotype=length(levels(genotype)),
genotype=as.numeric(genotype),
sex=as.numeric(sex),
L3))
genomodel3 <- function() {
for (i in 1:N) {
## Poisson model
logmean[i] <- b_genotype[genotype[i]] ## predicted log(counts)
sexeff[i] <- b_sex*(sex[i]-1)
## BMB: assuming equal sex effect across all genotypes?
pred[i] <- exp(logmean[i] + sexeff[i]) ## predicted counts
L3[i] ~ dnorm(pred[i], 0.001)
}
## define priors in a loop
for (i in 1:ngenotype) {
b_genotype[i] ~ dnorm(0,0.001)
}
b_sex ~ dnorm(0,0.001)
}
## BMB: this isn't a poisson model because you didn't use ~ dpois() ...
## it's a log-link Gaussian model
## main difference is that it will assume a *multiplicative* effect of sex
j3 <- jags(data=OREdat2,
inits=NULL,n.iter = 50000, n.thin = 100,
parameters=c("b_genotype", "b_sex"),
model.file=genomodel3, n.chains = 4)
tidy(j3,conf.int=TRUE, conf.method="quantile")
bb2 <- j3$BUGSoutput ## extract the "BUGS output" component
mm2 <- as.mcmc.bugs(bb2) ## convert it to an "mcmc" object that coda can handle
plot(j3)
xyplot(mm2) #I think this looks ok - doesn't look like burnin needs to be adjusted
densityplot(mm2) #This doesn't look perfect but I think it looks OK
## BMB: these do look a *little* dicey to me -- would expect these to
## be smoother -- would probably run longer/thin in a real application
print(dwplot(j3))
## BMB: these confidence intervals are incredibly wide. I suspect
## something's wrong (but can't see anything obvious) ...
#Discuss prior assumptions
#set priors to b_sex ~ dnorm(0,0.001) and b_genotype[i] ~ dnorm(0,0.001)
#assuming a normal distrubution
## BMB: this is a different model from what you fitted above
lm_geno_L3 <- lm(L3 ~ genotype + sex , data= Genetic_Ore_Interactions)
plot(lm_geno_L3)
#scanned literature and could not find research that would allow me to make sensible priors. Therefore, I chose priors that would have minimal impact. With little knowledge about previous work I think these are safe priors. Better to be safe than assume wrong.
#plots from linear model look like a good fit and although bayesian approach is a powerful appraoch I don't think I used it in a powerful sense. For example, priors were not specific but rather safe choices. To my understanding, a bayesian model with uninformative priors will not differ drastically (if at all) from maximum likelihood because data is not being strongly influenced by prior.I think this approach is a better fit when you have reason for strong priors .
## BMB: there are also complex-modeling cases where it's useful (e.g. mixed models)
## score: 2.25
|
e702e461a3e33b5b8538aa36c9084d45722d25e7
|
b310f8412ea3cfef7be133959c331d029bd1899f
|
/man/compareDatabases.Rd
|
62a0b738f40710ec3c82fbf7872936fe75fb3751
|
[] |
no_license
|
clbustos/rcebn
|
9444f387b4b46e5870f140f40ec90ba6a43f87db
|
7173891eae8b10ab93087a229aa677ff2674c004
|
refs/heads/master
| 2023-05-10T20:08:26.821587
| 2023-04-28T17:17:17
| 2023-04-28T17:17:17
| 29,193,481
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 512
|
rd
|
compareDatabases.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compareDatabase.r
\name{compareDatabases}
\alias{compareDatabases}
\title{Compare differences between two databases}
\usage{
compareDatabases(x1, x2, vars.rel = colnames(x1), vars.report = x1[, 1])
}
\arguments{
\item{x1}{Base de datos uno}
\item{x2}{Base de datos dos}
\item{vars.rel}{nombre de columnas a reportar}
\item{vars.report}{vector o matriz de ids a mantener}
}
\description{
Compare differences between two databases
}
|
c1ea30cc7aa01ef2482d8de76a21645236f41ac3
|
a5a5c0abfa9a7cb2e18a20d706ebf23d2fa990b8
|
/ndata/R/keel_an_nn_5.R
|
041e61f19f74678e461a19d9eee0b6d4e6a7982e
|
[] |
no_license
|
noeliarico/ndata
|
9242dc7fb4d8e14d3de450a0ec740c70deeb0f0d
|
7842de743552a133caa652ba64d74676d5f14f1a
|
refs/heads/master
| 2020-06-18T19:19:28.540199
| 2020-03-13T13:34:10
| 2020-03-13T13:34:10
| 196,415,685
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,647
|
r
|
keel_an_nn_5.R
|
# ecoli_5an --------------------------------------------------------------
ecoli_5an_nn <- read.keel("files/keel/noisy/an/nn/nn5/ecoli-5an-nn.dat")
ecoli_5an_nn$Site <- factor(as.character(ecoli_5an_nn$Site), levels = c("cp","im","imL","imS","imU","om","omL","pp"))
change <- colnames(ecoli_5an_nn)[colnames(ecoli_5an_nn) != "Site"]
ecoli_5an_nn <- ecoli_5an_nn %>% mutate_at(change, function(x) {as.numeric(as.character(x))}) %>%
rename(class = Site)
ecoli_5an_nn <- ecoli_5an_nn[, c("class", setdiff(colnames(ecoli_5an_nn), "class"))]
save(ecoli_5an_nn, file = "data/ecoli_5an_nn.RData")
# glass_5an --------------------------------------------------------------
glass_5an_nn <- read.keel("files/keel/noisy/an/nn/nn5/glass-5an-nn.dat")
change <- colnames(glass_5an_nn)[colnames(glass_5an_nn) != "TypeGlass"]
glass_5an_nn <- glass_5an_nn %>% mutate_at(change, function(x) {as.numeric(as.character(x))}) %>%
rename(class = TypeGlass)
glass_5an_nn <- glass_5an_nn[, c("class", setdiff(colnames(glass_5an_nn), "class"))]
save(glass_5an_nn, file = "data/glass_5an_nn.RData")
# ionosphere_5an --------------------------------------------------------------
ionosphere_5an_nn <- read.keel("files/keel/noisy/an/nn/nn5/ionosphere-5an-nn.dat")
change <- colnames(ionosphere_5an_nn)[colnames(ionosphere_5an_nn) != "Class"]
ionosphere_5an_nn <- ionosphere_5an_nn %>% mutate_at(change, function(x) {as.numeric(as.character(x))}) %>%
rename(class = Class)
ionosphere_5an_nn <- ionosphere_5an_nn[, c("class", setdiff(colnames(ionosphere_5an_nn), "class"))]
save(ionosphere_5an_nn, file = "data/ionosphere_5an_nn.RData")
# sonar_5an --------------------------------------------------------------
sonar_5an_nn <- read.keel("files/keel/noisy/an/nn/nn5/sonar-5an-nn.dat")
change <- colnames(sonar_5an_nn)[colnames(sonar_5an_nn) != "Type"]
sonar_5an_nn <- sonar_5an_nn %>% mutate_at(change, function(x) {as.numeric(as.character(x))}) %>%
rename(class = Type)
sonar_5an_nn <- sonar_5an_nn[, c("class", setdiff(colnames(sonar_5an_nn), "class"))]
sonar_5an_nn <- sonar_5an_nn %>% mutate(class = fct_relevel(class, "R", "M"))
save(sonar_5an_nn, file = "data/sonar_5an_nn.RData")
# wine_5an --------------------------------------------------------------
wine_5an_nn <- read.keel("files/keel/noisy/an/nn/nn5/wine-5an-nn.dat")
change <- colnames(wine_5an_nn)[colnames(wine_5an_nn) != "Class"]
wine_5an_nn <- wine_5an_nn %>% mutate_at(change, function(x) {as.numeric(as.character(x))}) %>%
rename(class = Class)
wine_5an_nn <- wine_5an_nn[, c("class", setdiff(colnames(wine_5an_nn), "class"))]
save(wine_5an_nn, file = "data/wine_5an_nn.RData")
|
12cdeb2d91d65013f18901cf69d6095376c80274
|
d0dfea53a44e40ce7e5155e03f8b072e23fcc7ba
|
/R/teste.R
|
2cf619e6decafc06af6c3c5f6bcdf2bededbd48c
|
[
"MIT"
] |
permissive
|
jjesusfilho/tjac
|
8e76f9cfa0955700e37e8a3245fb6c17addf000f
|
62e83dd12c149a98bc1edffea2f47e3caca88e46
|
refs/heads/master
| 2020-07-17T00:21:50.032338
| 2020-01-30T08:20:24
| 2020-01-30T08:20:24
| 205,899,922
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,329
|
r
|
teste.R
|
function(fim,
inicio=0,
ano,
segmento,
uf,
distribuidor,
funcao,
expr = "sp_vazio") {
## Para encontrar o maior número do processo do ano, eu usei a lógica da busca binária.
## fim pode ser qualquer número grande o bastante para ser superior ao número total de processos
## distribuídos.
# O loop abaixo faz requisição ao modo de busca binária. Pode haver uma pequena diferença de 2.
while (`-`(fim, inicio) > 5) {
inicio <- mean(c(inicio,fim)) ## Calculo a média, mas não vejo necessidade de arrendondar.
# Todas as funções para baixar htmls dos processos, de todos os pacotes,
# possuem um argumento para o vetor de processos (ids) e outro para o
# diretório ou path. Assim, criamos um diretorio temporário para guardar
# os arquivos:
temporario <- tempdir()
## Criamos um intervalo de oito números em torno de y
## para assegurar que ao menos um deles existirá caso o último seja
## superior ou igual a y.
intervalo <- round(inicio + -4:4) %>%
range()
## aqui eu uso a função cnj_sequencial para criar a numeracao conforme o CNJ,
## aplico a função para baixar e verifico se os cinco são simultaneamente nulos,
## somando os objetos lógicos. Se a soma for cinco, ou seja, TRUE, TRUE, TRUE, TRUE, TRUE
## o último processo é menor que inicio.
l<- cnj_sequencial(intervalo[1], intervalo[2], ano, segmento, uf, distribuidor=0001) %>%
funcao(l,diretorio = temporario)
arquivos <- list.files(temporario,full.names=TRUE,pattern="html")
purrr::map_dbl(arquivos, eval(parse(text = expr))) %>% ## Eu usei NULL como padrão porque a requisição para o DF retorna nulo,
# mas isso não se aplica a outros tribunais.
sum()
unlink(temporario,recursive = TRUE) ## manda os arquivos do diretório pro espaço.
## Se inicio for maior que o último processo, substituímos inicio atual pelo y anterior,
## e fim se torna o atual inicio, isto é a média entre inicio e fim.
## Se o último for maior que inicio, fim é preservado e inicio passa a ser
## a média entre inicio e fim.
if (soma == 5) {
inicio <- inicio - (fim - inicio)
fim <- mean(c(inicio,fim))
}
}
return(inicio)
}
|
fe62fb859850752952dd92c9e7d0598a79cfea04
|
f7a4328c2c33fe5c1a4fc7895cffabac75f3d40c
|
/man/parseFeatures.Rd
|
de69f537e0e7ab81e2453e5a81443f6db338dd04
|
[] |
no_license
|
Frederic-bioinfo/MetaFeatures
|
447834d966c4f7ec140a366666e8de7878f34f94
|
69a625852597ab72ebc339da98b2d11d36770440
|
refs/heads/master
| 2021-01-18T12:45:11.243150
| 2014-06-18T19:25:08
| 2014-06-18T19:25:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,752
|
rd
|
parseFeatures.Rd
|
\name{parseFeatures}
\alias{parseFeatures}
\title{
Parse an experiment using a list of features
}
\description{
This function produces the list object that contains all the
information necessary to produce a metagene-like plot with the
\code{plotGraphic} function.
}
\usage{
parseFeatures(
bamFiles,
features=NULL,
specie="human",
maxDistance=5000,
design=NULL,
cores=1,
debug=FALSE)
}
\arguments{
\item{bamFiles}{A \code{vector} of bamFile to plot.}
\item{features}{Either a filename of a \code{vector} of filenames.
Supported features: ensembl_gene_id.
File must contain a header that correspond to the name of the
group.
If value is NULL, all known RefSeq genes will be used.}
\item{specie}{human: Homo sapiens (default). mouse: Mus musculus.}
\item{maxDistance}{The distance around feature to include
in the plot.}
\item{design}{A \code{matrix} explaining the relationship
between multiple samples. One line per samples. One column
per group of samples. For example, biological replicates and
corresponding controls are in the same group.
1: treatment file(s). 2: control file(s).}
\item{cores}{Number of cores for parallel processing.
Require parallel package.}
\item{debug}{Keep the intermediate files (can use a lot of memory).
TRUE or FALSE.}
}
\details{
This function will extract the read density from alignments
files (bam) in the viscinity of transcription start sites of
one or multiple list of genes.
The values are normalized as read per millions aligned (RPM).
It is possible to parse multiple groups of gene by saving each
list in a separate file and by listing the file names in a
vector as the \code{features} parameter.
By using the \code{design} parameter, the \code{parseFeatures}
function will deal with more complex experimental design such
as the use of replicates and/or controls. The values of
controls are substracted from every replicates.
}
\value{
\code{parseFeatures} returns a list that contains the data
necessary to produce a plot.
The data structure is a list of lists.
The first level contain the following fields:
* design: The information from the design file.
* param: The values of the argument used with parseFeatures.
* bamFilesDescription: A \code{data.frame} with the
following columns;
* The names of the original bam files, the names of the
* sorted bam files and the number of aligned reads.
* matrix: A list of matrix that will be used to produce the
plot. One element by combination of features/design
groups.
}
\author{
Charles Joly Beauparlant
<Charles.Joly-Beauparlant@crchul.ulaval.ca>
}
|
afec1ec916d4904bf79b9a840bd8fab848830047
|
a29b0b2ec374909fe1087099c309989dcd9d0a8b
|
/R/getVerticalLevelPars.ECOMS.R
|
f67ffe76e9bf9c115599feaca208d1e1edf3f16d
|
[] |
no_license
|
jbedia/loadeR.ECOMS
|
729bab9a41074234bc4b9d3e4473cc25bf411018
|
84528ab325dba57d2af34e1b71ce032830ee7ac9
|
refs/heads/master
| 2021-01-24T23:58:02.464171
| 2018-02-12T12:04:11
| 2018-02-12T12:04:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,655
|
r
|
getVerticalLevelPars.ECOMS.R
|
#' Definition of vertical dimension slices
#'
#' Returns the selected level value (if any) and a suitable java structure. This is a subroutine
#' of \code{loadGridDataset}, whose output is passed to \code{makeSubset}.
#'
#' @param gcs An object of the java class \sQuote{GeoGrid})
#' @param level Vertical level. Passed by \code{loadGridDataset}, obtained via \code{findVerticalLevel}
#' @param dataset dataset name
#' @return A list with the level value and either a java Range or a java null reference
#' defining the index of the vertical axis (null if no vertical levels exist)
#' @author J. Bedia
getVerticalLevelPars.ECOMS <- function(grid, dataset, level) {
gcs <- grid$getCoordinateSystem()
if (gcs$hasVerticalAxis()) {
if (is.null(level)) {
stop("Variable with vertical levels: '@level' following the variable name is required.\nGo to <http://meteo.unican.es/ecoms-udg/dataserver/catalog> for details", call. = FALSE)
}
levelInd <- gcs$getVerticalAxis()$findCoordElement(level)
if (grepl("System4", dataset)) {
levelInd <- 0L
} else if (levelInd < 0) {
stop("Vertical level not found.\nGo to <http://meteo.unican.es/ecoms-udg/dataserver/catalog> for valid vertical level values", call. = FALSE)
}
zRange <- .jnew("ucar/ma2/Range", levelInd, levelInd)
} else {
if (!is.null(level)) {
warning("The variable selected is 2D: the '@level' specification was ignored", call. = FALSE)
level <- NULL
}
zRange <- .jnull()
}
return(list("level" = level, "zRange" = zRange))
}
# End
|
79961f4daec8cd54524331b6df64fafbee49b19b
|
fac56cba38e4919be45519605837165d85835fff
|
/man/closed_check.Rd
|
ef931836f6443f7c44e41b9d1f11b1a08d2f3708
|
[] |
no_license
|
mmparker/to1check
|
28cb87273ce41e442edc674fd70d4b842052afdd
|
808d1bc447760c618af7747d9cee1206ecb43333
|
refs/heads/master
| 2016-08-06T07:19:13.231958
| 2014-05-29T15:43:52
| 2014-05-29T15:43:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 677
|
rd
|
closed_check.Rd
|
\name{closed_check}
\alias{closed_check}
\title{Check that participants were closed correctly.}
\usage{
closed_check(cleanlist)
}
\arguments{
\item{cleanlist}{The list of cleaned TO1 data generated
by \code{\link{clean_to1}}}
}
\value{
This function returns a data.frame of participants whose
records aren't consistent with their closing reason. The
data.frame includes study ID, participant status, visit
date, and test result indicators.
}
\description{
This function checks that participants were closed
correctly (currently, just that those who are closed as
triple-negatives actually were).
}
\seealso{
\code{\link{to_close}} for participants who should be
closed.
}
|
bb50be9b182ff4ee35317446a6c09d7a37e614dc
|
cfc95853b9b474864a71e5f20bb54005e8582854
|
/best.R
|
73bc23cd7be19350502640868386af7a32ff35e1
|
[] |
no_license
|
tdetzel/CompDataAnalysisR
|
4a069cc11ab920cb0f8e896e505fcad8687cd210
|
faee83cc38b3ce676e5e41a5e94e0166fbdc501c
|
refs/heads/master
| 2016-09-05T17:25:53.095760
| 2014-04-26T15:06:07
| 2014-04-26T15:06:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 986
|
r
|
best.R
|
best <- function(state, outcome) {
## Read outcome data
data<-read.csv("outcome-of-care-measures.csv", colClasses="character")
## Check that state and outcome are valid
outcomes <- c("heart attack", "heart failure", "pneumonia")
columns <- c("Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack",
"Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure",
"Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia")
if (!(outcome %in% outcomes)) {
stop("invalid outcome")
}
if (!(state %in% data$State)) {
stop("invalid state")
}
## Return hospital name in that state with lowest 30-day death rate
fieldidx<-match(outcome, outcomes)
data<-subset(data, data$State==state)
data[, columns[[fieldidx]]]<-as.numeric(data[, columns[[fieldidx]]])
data<-subset(data, !is.na(data[, columns[[fieldidx]]]))
sorted.data<-data[order(data[, columns[[fieldidx]]], data$Hospital.Name), ]
sorted.data$Hospital.Name[1]
}
|
f04295f7b35e7c95cc3c076e90b0a7f7718a3dbd
|
850fb65a0435402667af1475072f36391fcb1627
|
/server.R
|
6cb7c54bef44977f4b5f21f17ce58be268b3ccf9
|
[] |
no_license
|
HunterRatliff1/Texas_Health_Professions
|
04cd84fa659128b34459071db5d414dbe4d2304d
|
739f8848a0e16dca2cf3716d7322e58700b436f8
|
refs/heads/master
| 2021-01-10T17:55:41.100355
| 2017-05-10T22:03:45
| 2017-05-10T22:03:45
| 49,854,939
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,273
|
r
|
server.R
|
# # # # # # # # # # # # # # # # # # # # # # # # # #
# REQUIRED PACKAGES #
# # # # # # # # # # # # # # # # # # # # # # # # # #
library(magrittr)
library(shiny)
library(dplyr)
library(reshape2)
library(leaflet)
library(scales)
library(RColorBrewer)
library(ggplot2)
# # # # # # # # # # # # # # # # # # # # # # # # # #
# Load Required Data #
# # # # # # # # # # # # # # # # # # # # # # # # # #
HPOs <- read.csv("Data/HPO.csv") %>% select(-X)
TX_Counties <- readRDS("Data/TX_Counties.RDS")
# # # # # # # # # # # # # # # # # # # # # # # # # #
# SHINY SERVER #
# # # # # # # # # # # # # # # # # # # # # # # # # #
shinyServer(function(input, output) {
# --- # --- # --- # --- # --- # --- # --- # --- #
# REACTIVE FXN #
# Filter by 'measurement' & 'HP_type' input #
# --- # --- # --- # --- # --- # --- # --- # --- #
filteredData <- reactive({
## Reactive expression for the data subsetted based
## on the user input. Specifically, this looks at
## the 'measurement' input
# If user selected 'per100k', then find that rate & save as a
# data.frame named 'df'
if(input$measurement=="per100k") {
df <- HPOs %>%
melt(id.vars = c("County", "Population", "sqmi")) %>%
mutate(value = round(value / (Population/100000), 2)) %>%
dcast(County+Population+sqmi ~ variable)
}
else(
df <- HPOs # Otherwise just save 'HPOs' as the data.frame
)
# This line of code sets the selected 'HP_type' as the 'Value'
df$Value <- df[,c(input$HP_type)]
# Finally, merge the data.frame with 'TX_Counties'
tigris::geo_join(
spatial_data = TX_Counties,
data_frame = df,
by_sp = "NAME",
by_df = "County")
})
# --- # --- # --- # --- # --- # --- # --- # --- #
# REACTIVE FXN #
# Define the palette to be used with the map #
# --- # --- # --- # --- # --- # --- # --- # --- #
pal <- reactive({
## This reactive expression represents the palette function,
## which changes as the user makes selections in UI.
colorNumeric(input$colors, domain = NULL)
})
# --- # --- # --- # --- # --- # --- # --- # --- #
# OUTPUT: leaf_map #
# Output the static portion of the map #
# --- # --- # --- # --- # --- # --- # --- # --- #
output$leaf_map <- renderLeaflet({
## Use leaflet() here, and only include aspects of the map that
## won't need to change dynamically (at least, not unless the
## entire map is being torn down and recreated).
leaflet() %>%
addProviderTiles("OpenStreetMap.Mapnik") %>%
addPolygons(data = TX_Counties)
})
# --- # --- # --- # --- # --- # --- # --- # --- #
# OBSERVER FUNCTION #
# Change fill color & legand based on 'HP_type' #
# --- # --- # --- # --- # --- # --- # --- # --- #
observe({
## Incremental changes to the map (in this case, replacing the
## fill color when a new 'HP_type' is chosen) should be performed
## an observer. Each independent set of things that can change
## in should be managed in its own observer
# Call the palette function which is reactive
pal <- pal()
# Get merged shapefile + data.frame
merged_df <- filteredData()
# Define pop-up
popup <- paste0(
"County: <b>", merged_df$County, "</b><br>",
"Citizen Population: ", comma(merged_df$Population), "<hr>",
"Veterinarians: <code>", merged_df$DVM, "</code><br>",
"Pharmacists: <code>", merged_df$PharmD, "</code><br>",
"Social Workers: <code>", merged_df$SWs, "</code><br>",
"Physical Therapists: <code>", merged_df$PTs, "</code><br>",
"Physician Assistants: <code>", merged_df$PAs, "</code><br>",
"Primary Care Physicians: <code>", merged_df$PCPs, "</code><br>",
"Psychiatrists: <code>", merged_df$Psych, "</code><br>",
"Family Medicine: <code>", merged_df$FM, "</code><br>",
"General Practice: <code>", merged_df$GPs, "</code><br>",
"Geriatrics: <code>", merged_df$Gers, "</code><br>",
"Internal Medicine: <code>", merged_df$IM, "</code><br>",
"Women's Health: <code>", merged_df$OB.GYN, "</code><br>",
"Pediatrics: <code>", merged_df$Peds, "</code><br>")
# Call the 'leaf_map' that was outputted earlier, and make adjustments
leafletProxy("leaf_map") %>%
## Clear any old controls
clearControls() %>%
## Clear any old polygons
clearShapes() %>%
## Add the counties polygon based on the merged (filtered) data
addPolygons(data = merged_df,
fillColor = ~pal(Value), popup=popup,
color="#b2aeae", fillOpacity=0.5, weight=1, smoothFactor=0.2) %>%
## Add a new legend
addLegend(pal = pal, values = merged_df$Value, position="bottomright")
})
# --- # --- # --- # --- # --- # --- # --- # --- #
# OUTPUT: plot #
# Output plot that fits in the control pannel #
# --- # --- # --- # --- # --- # --- # --- # --- #
output$plot <- renderPlot({
merged_df <- filteredData()
merged_df@data %>%
dplyr::select(County:Peds) %>%
melt(id.vars = c("County", "Population")) %>%
filter(variable==input$HP_type) %>%
qplot(data=., x=value) + labs(y="Number of counties")
# layer_points()
})
# --- # --- # --- # --- # --- # --- # --- # --- #
# OUTPUT: table #
# Output an interactive data table #
# --- # --- # --- # --- # --- # --- # --- # --- #
output$table <- DT::renderDataTable({
HPOs %>%
filter(County %in% input$County) %>%
melt(id.vars = c("County", "Population")) %>%
filter(variable %in% input$profession) %>%
dcast(County+Population ~ variable) %>%
DT::datatable()
})
})
|
b73ed1411a472fccc8ffb309ebb74f68ef5a4795
|
437ea30837d0068b8bca815f500396f30cd2ff74
|
/R/Plot.genes.R
|
4d0a70cd8fd51fc0068f375cd2de79cf8a51586d
|
[] |
no_license
|
hummelma/GlobalAncova
|
0d51390638a353a3d2732a962f05b9a2a73606fc
|
f2512c80850a0b0ebb6d5ee53c6ed9228b85b74c
|
refs/heads/master
| 2021-06-15T19:42:03.969139
| 2021-01-31T09:39:31
| 2021-01-31T09:39:31
| 123,418,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,394
|
r
|
Plot.genes.R
|
setGeneric("Plot.genes", function(xx,formula.full,formula.red,model.dat,group,covars=NULL,
test.terms,test.genes,Colorgroup=NULL,legendpos="topright",returnValues=FALSE,bar.names,...)
standardGeneric("Plot.genes"))
# xx: expression matrix (rows=genes, columns=subjects)
# formula.full: model formula for the full model
# formula.red: model formula for the reduced model
# model.dat: data frame that contains the group and covariable information
# group: group variable
# covars: covariate information
# test.terms: character vector of terms of interest
# test.genes: may define the gene set to be plotted
# Colorgroup: character variable giving the group that specifies coloring
# legendpos: position of the legend
# returnValues: shall gene-wise reduction in sum of squares = bar heights be returned?
# bar.names: user specified bar names; if missing names of 'test.genes' or row names of 'xx' are taken
# ...: additional graphical parameters
############################# general function #################################
setMethod("Plot.genes", signature(xx="matrix",formula.full="formula",formula.red="formula",
model.dat="ANY",group="missing",covars="missing",test.terms="missing"),
definition = function(xx,formula.full,formula.red,model.dat,test.genes,Colorgroup=NULL,
legendpos="topright",returnValues=FALSE,bar.names,...){
# test for model.dat
if(!is.data.frame(model.dat))
stop("'model.dat' has to be a data frame")
# test for test.genes (i.e. only one gene set can be given and not a list of gene sets)
if(!missing(test.genes)){
if(!(data.class(test.genes) %in% c("numeric","character")))
stop("'test.genes' has to be a vector of gene names or indices")
}
# get gene set
if(!missing(test.genes))
xx <- xx[test.genes,,drop=FALSE]
# bar names
if(is.null(rownames(xx)))
rownames(xx) <- 1:nrow(xx)
if(missing(bar.names))
bar.names <- rownames(xx)
if(length(bar.names) != nrow(xx))
stop("length of 'bar.names' not equal to size of gene set")
# basic analysis
res <- reduSQ(xx=xx,formula.full=formula.full,formula.red=formula.red,model.dat=model.dat)
redu.SSQ.Genes <- res$redu.genes
msE.genes <- res$mse
# plot
plotgenes(xx=xx,model.dat=model.dat,Colorgroup=Colorgroup,redu.SSQ.Genes=redu.SSQ.Genes,msE.genes=msE.genes,
legendpos=legendpos,returnValues=returnValues,bar.names=bar.names,...)
}
)
########################## function for 2 groups ###############################
setMethod("Plot.genes", signature(xx="matrix",formula.full="missing",formula.red="missing",
model.dat="missing",group="ANY",test.terms="missing"),
definition = function(xx,group,covars=NULL,test.genes,Colorgroup=NULL,
legendpos="topright",returnValues=FALSE,bar.names,...){
# test for test.genes (i.e. only one gene set can be given and not a list of gene sets)
if(!missing(test.genes)){
if(!(data.class(test.genes) %in% c("numeric","character")))
stop("'test.genes' has to be a vector of gene names or indices")
}
# get gene set
if(!missing(test.genes))
xx <- xx[test.genes,,drop=FALSE]
# 'group' is assumed to be the variable relevant for coloring
if(is.null(Colorgroup))
Colorgroup <- deparse(substitute(group))
# group name
group.name <- deparse(substitute(group))
if(is.null(dim(covars)))
covar.names <- deparse(substitute(covars))
else
covar.names <- colnames(covars)
# get formulas and 'model.dat' out of 'group' and 'covars'
res <- group2formula(group=group, group.name=group.name, covars=covars, covar.names=covar.names)
formula.full <- res$formula.full
formula.red <- res$formula.red
model.dat <- res$model.dat
# bar names
if(is.null(rownames(xx)))
rownames(xx) <- 1:nrow(xx)
if(missing(bar.names))
bar.names <- rownames(xx)
if(length(bar.names) != nrow(xx))
stop("length of 'bar.names' not equal to size of gene set")
# basic analysis
res <- reduSQ(xx=xx,formula.full=formula.full,formula.red=formula.red,model.dat=model.dat)
redu.SSQ.Genes <- res$redu.genes
msE.genes <- res$mse
# plot
plotgenes(xx=xx,model.dat=model.dat,Colorgroup=Colorgroup,redu.SSQ.Genes=redu.SSQ.Genes,
msE.genes=msE.genes,legendpos=legendpos,returnValues=returnValues,bar.names=bar.names,...)
}
)
############################# with 'test.terms' ################################
setMethod("Plot.genes", signature(xx="matrix",formula.full="formula",formula.red="missing",
model.dat="ANY",group="missing",covars="missing",test.terms="character"),
definition = function(xx,formula.full,model.dat,test.terms,test.genes,Colorgroup=NULL,
legendpos="topright",returnValues=FALSE,bar.names,...){
# test for model.dat
if(!is.data.frame(model.dat))
stop("'model.dat' has to be a data frame")
# test for test.genes (i.e. only one gene set can be given and not a list of gene sets)
if(!missing(test.genes)){
if(!(data.class(test.genes) %in% c("numeric","character")))
stop("'test.genes' has to be a vector of gene names or indices")
}
# get gene set
if(!missing(test.genes))
xx <- xx[test.genes,,drop=FALSE]
# test for 'test.terms'
terms.all <- test.terms
D.full <- model.matrix(formula.full, model.dat)
terms.all <- colnames(D.full)
# are all terms variables compatible with 'model.dat'?
if(!all(test.terms %in% terms.all))
stop("'test.terms' are not compatible with the specified models")
D.red <- D.full[,!(colnames(D.full) %in% test.terms), drop=F]
# bar names
if(is.null(rownames(xx)))
rownames(xx) <- 1:nrow(xx)
if(missing(bar.names))
bar.names <- rownames(xx)
if(length(bar.names) != nrow(xx))
stop("length of 'bar.names' not equal to size of gene set")
# basic analysis
res <- reduSQ(xx=xx,formula.full=formula.full,D.red=D.red,model.dat=model.dat)
redu.SSQ.Genes <- res$redu.genes
msE.genes <- res$mse
# plot
plotgenes(xx=xx,model.dat=model.dat,Colorgroup=Colorgroup,redu.SSQ.Genes=redu.SSQ.Genes,
msE.genes=msE.genes,legendpos=legendpos,returnValues=returnValues,bar.names=bar.names,...)
}
)
################################################################################
################################################################################
# main function
plotgenes <- function(xx, model.dat, Colorgroup, redu.SSQ.Genes, msE.genes, legendpos, returnValues=FALSE, bar.names, col, xlab, ylab, ...){
if(!is.character(Colorgroup) && !is.null(Colorgroup))
stop("'Colorgroup' has to be a character")
N.Genes <- dim(xx)[1]
if(missing(col))
# default color palette
palette(c("#931638",rgb(1,.95,0.1),"lightblue","NavyBlue","#F7B50C","lightgreen","grey","mistyrose","#008751",rgb(1,.2,.2)))
else if(is.numeric(col))
palette(palette()[rep(col,2)])
else
palette(rep(col,2))
# if a Colorgroup variable is given and if it is not continuous
colorgroup.vector <- as.numeric(model.dat[,Colorgroup])
N.groups <- length(unique(colorgroup.vector))
if(N.groups > 0 && N.groups <= 10){
# in which group has a gene the highest expression
means <- NULL
for(elt in unique(colorgroup.vector))
means <- cbind(means, apply(xx, 1, function(x) mean(x[colorgroup.vector==elt])))
up <- apply(means, 1, function(x) unique(colorgroup.vector)[which(x == max(x))])
# colors and labels for the legend
color <- numeric(length(up))
colind <- numeric(0)
label <- numeric(0)
for(i in 1:N.groups){
if(sort(unique(colorgroup.vector))[i] %in% up){
color[up == sort(unique(colorgroup.vector))[i]] <- i
colind <- c(colind, i)
label <- c(label, paste("max. expression in",Colorgroup,"=",sort(unique(model.dat[,Colorgroup]))[i]))
}
}
}
# for a continuous group variable
else
color <- 1
# plotting results
if(missing(xlab))
xlab <- "Reduction in Sum of Squares"
if(missing(ylab))
ylab <- "Genes"
#bars
horizontal.bars(
x = rev(redu.SSQ.Genes),
xlab = xlab,
ylab = ylab,
color = rev(color),
bar.names = rev(bar.names), ...
)
# MSE-line
pp <- sort(c(-.5+(1:N.Genes),.5+(1:N.Genes)))
vv <- rev(rep(msE.genes,rep(2,N.Genes)))
lines(vv,pp,type="s",lwd=2)
# legend
if(N.groups > 0 && N.groups <= 10)
legend(legendpos, label, col=colind, pch=15)
palette("default")
# return bar heights
if(returnValues){
names(redu.SSQ.Genes) <- rownames(xx)
return(redu.SSQ.Genes)
}
}
################################################################################
# function for plotting horizontal bars with labels added at right margin
# bars are determined by value of x which is assumed to be a vector
# no formal check of variables performed
# setting the plot region
# (also used in Plot.subjects')
horizontal.bars <- function(x, labelsize=.75, bar.names=NULL, color, xlim,...){
if(missing(xlim)){
xlim <- 0.05*c(-1,1)*range(x)+c(min(x),max(x))
xlim[1] <- min(0,xlim[1])
}
n <- length(x)
ylim <- c(0,n+1)
# enlarging right margin for bar.names
names <- TRUE
plot.new()
w <- max(1.5 * max(strwidth(bar.names, "inches", labelsize)), .6) # !!
oldmai <- par("mai")
par(mai=c(oldmai[1:3],max(w,oldmai[4])), new=T)
# plotting bars with border=F nothing appears color is NULL
plot(0,type="n", xlim=xlim, ylim=ylim, yaxt="n",...)
rect(rep(0,n),(1:n)-.3,x,(1:n)+.3, col=color, border="white")
box()
# adding bar.names at right margin
if(names){
axis(4,at=1:n,bar.names,cex.axis=labelsize,las=2)
par(mai=oldmai)
}
}
################################################################################
# computes the reduction in sum of squares for genes and subjects and the MSE
# (also used in Plot.subjects' and 'Plot.sequential')
reduSQ <- function(xx, formula.full, formula.red=NULL, D.red=NULL, model.dat){
N.Subjects <- ncol(xx)
# design matrices
D.full <- model.matrix(formula.full, data=model.dat)
if(is.null(D.red))
D.red <- model.matrix(formula.red, data=model.dat)
N.par.full <- ncol(D.full)
N.par.red <- ncol(D.red)
# residuals
R.full <- row.orth2d(xx,D.full)
R.red <- row.orth2d(xx,D.red)
# reduction sum of squares
redu.sq <- R.red^2 - R.full^2
redu.SSQ.Genes <- rowSums(redu.sq) / (N.par.full - N.par.red)
redu.SSQ.Subjects <- colSums(redu.sq)
# mean square error
msE.genes <- rowSums(R.full^2) / (N.Subjects - N.par.full)
return(list(redu.genes=redu.SSQ.Genes,redu.subjects=redu.SSQ.Subjects,mse=msE.genes))
}
|
dee5353a763bb570688f13fa72c02899d1307147
|
8dac5359ccc3874c11c2f32e5276da785f1bcf5c
|
/scripts/Normalizacao.R
|
b550191fec432222bcde6652cce627158cc83e0b
|
[] |
no_license
|
wellrangel/MDM-Predict-5p
|
018fe3c560d37ca70aa0c89edc6f1eac77dd782d
|
fdf092ed17177576aab5c3365832657a025f3bcf
|
refs/heads/master
| 2020-12-02T19:30:26.272118
| 2017-07-05T18:23:35
| 2017-07-05T18:23:35
| 96,348,954
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,234
|
r
|
Normalizacao.R
|
library(sqldf)
atletasJogo <- read.csv("data/bancoatletasORIGINAL.csv", sep = ";" ,stringsAsFactors=FALSE)
#retirando jogadores com jogos por temporada
atletas <- sqldf("SELECT id_jogador, nome_temporada, count(id_partida) as totalpartida from atletasJogo group by nome_temporada, id_jogador")
atletas <- atletas[atletas$totalpartida > 5,]
atletas10 <- sqldf("SELECT * from atletasJogo aj inner join atletas a on aj.id_jogador = a.id_jogador and aj.nome_temporada = a.nome_temporada")
#retirando jogadores com pouca produtividade
atletasProdutividade <- atletas10[atletas10$pontos_total_jogador >=3,]
atletasProdutividade <- atletasProdutividade[atletasProdutividade$rebotes_total_jogador >=1,]
#atletasProdutividade <- atletasProdutividade[atletasProdutividade$faltas_cometidas_jogador >=1,]
#retirando jogadores que jogaram poucas temporadas
atletas <- sqldf("SELECT id_jogador as totalpartida from atletasJogo group by nome_temporada, id_jogador")
atletas <- count(atletas)
atletas <- atletas[atletas$freq > 3,]
atletasIDJ <-atletas$totalpartida
atletasProdutividadeFinal <- subset(atletasProdutividade, id_jogador %in% atletasIDJ)
write.csv(atletasProdutividadeFinal, file = "data/bancoatletasProdutivo.csv")
|
06c1a80f3fc83fdd856c09a892d015dc9b05b941
|
11b0786b49fd68edf0b78c1a7d6fbdfb97887985
|
/cachematrix.R
|
9b784987fe5326e58784da0122b28fd4a33e31cf
|
[] |
no_license
|
etbear/ProgrammingAssignment2
|
475f10dde805cd2efa971f0efd3de1fe160e724b
|
696550765dc7d1efff7d8eea96cbac7902ecf2ac
|
refs/heads/master
| 2021-01-20T16:24:19.837455
| 2015-05-23T14:24:34
| 2015-05-23T14:24:34
| 36,124,237
| 0
| 0
| null | 2015-05-23T13:23:30
| 2015-05-23T13:23:30
| null |
UTF-8
|
R
| false
| false
| 1,208
|
r
|
cachematrix.R
|
## the two functions allow us to solve a matrix and store the solution for future use
## when called the first time, cacheSolve will solve the matrix
## calling cacheSolve later will retrieve previously stored solution, eliminating the need to re-compute
## makeCacheMatrix returns a list of 4 objects, set, get, setinv and getinv
## set is used to store matrix value; get returns the matrix
## setinv is used to store inverse matrix; getinv returns the inverse matrix (or null)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve takes a list as argument
## it checks if inverse of a matrix has been previously computed
## if so, the function retrieves the stored value without further computation
## otherwise, the function solves the matrix and set the value at the same time
cacheSolve <- function(x, ...) {
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
be7517456d2b398976e558e9a99362830230156c
|
deb839b1a41d68e91f72e18bddfef82da62216c5
|
/server.R
|
883ffa4729975c6efa7cc3387002e13ae17a204c
|
[
"Apache-2.0"
] |
permissive
|
smeebear/3220-Final-Project
|
9ec14b19e432489f7798627f6b3ec8ee8d7d6063
|
7121213ba105f965fe37a4b539134729ed6e0010
|
refs/heads/master
| 2020-05-09T11:28:39.942391
| 2019-04-22T21:26:17
| 2019-04-22T21:26:17
| 181,081,700
| 1
| 0
|
Apache-2.0
| 2019-04-22T19:26:27
| 2019-04-12T20:50:45
|
R
|
UTF-8
|
R
| false
| false
| 3,414
|
r
|
server.R
|
library(class)
library(dplyr)
library(C50)
library(printr)
library(shiny)
credit = "good"
err = 0
server <- function(input, output) {
output$text = renderPrint({credit})
#output$credit = "bad"
observeEvent(input$eval, {
# put in the stuff to check input against model
if(input$education <= 2)
{
if(as.double(input$ctg_ratio) <= 1.145)
{
credit = "good"
}
else
{
if(as.double(input$annuity_length) > 21.45468)
{
credit = "good"
}
else
{
if(as.double(input$days_birth) *-365 <= -12851)
{
credit = "good"
}
else
{
credit = "bad"
}
}
}
}
else
{
if(as.double(input$days_birth) *-365 <= -14788)
{
if(as.double(input$annuity_length) <= 10.77844)
{
credit = "good"
}
else
{
if(as.double(input$annuity_length) <= 12.6609)
{
credit = "bad"
}
else
{
if(input$name_prod_type_min <= 1)
{
if(as.double(input$days_credit_mean) *-1 > -854.5555)
{
credit = "bad"
}
else
{
if(input$name_contract_status_mean <= 1.814815)
{
credit = "good"
}
else credit = "bad"
}
}
else
{
if(as.double(input$ctg_ratio) <= 1.1584)
{
credit = "good"
}
else
{
if(input$code_gender <= 1)
{
credit = "good"
}
else credit = "bad"
}
}
}
}
}
else
{
if(as.double(input$ctg_ratio) > 1.211197)
{
credit = "bad"
}
else
{
if(input$days_employed <= -1785)
{
if(input$name_contract_status_mean <= 1.636364) credit = "good"
else credit = "bad"
}
else
{
if(input$payment_perc_mean <= 0.9294118) credit = "bad"
else
{
if(input$name_contract_status_mean > 1.78125) credit = "bad"
else
{
if(as.double(input$days_credit_mean) * -1 <= -914.0714) credit = "good"
else credit = "bad"
}
}
}
}
}
}
output$text = renderPrint({credit})
output$credit_opinion <- renderImage({
if (is.null(credit))
return(NULL)
if (credit == "good") {
return(list(
src = "thumbsup.jpg",
contentType = "image/jpg",
alt = "good"
))
} else if (credit == "bad") {
return(list(
width = 300,
height = 300,
src = "thumbsdown.jpg",
filetype = "image/jpg",
alt = "bad"
))
}
}, deleteFile = FALSE)
})
output$tree = renderImage({
return(list(
width = 600,
height = 500,
src = "tree.png",
filetype = "image/png",
alt = "tree"
))
}, deleteFile = FALSE)
}
|
05417cf8e7a7fc3a7a961515a104f32a13d131d4
|
e5a9f6ab465cd0f28c26f95fc781ba59927edf8b
|
/man/boxplot.OutCoe.Rd
|
2c61b70fb24a4cdd74e914ff013a81923eceba9a
|
[] |
no_license
|
stas-malavin/Momocs
|
550266555ab7724a01ca3b58777bb623de433566
|
44789d9bce9fe923a9af128f0493d16a73fe9fdd
|
refs/heads/master
| 2020-12-26T15:49:20.330404
| 2017-03-08T20:43:13
| 2017-03-08T20:43:13
| 59,151,808
| 0
| 1
| null | 2016-05-18T21:15:59
| 2016-05-18T21:15:58
| null |
UTF-8
|
R
| false
| true
| 852
|
rd
|
boxplot.OutCoe.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gr-Coe.R
\name{boxplot.OutCoe}
\alias{boxplot.Coe}
\alias{boxplot.OutCoe}
\title{Boxplot of morphometric coefficients}
\usage{
\method{boxplot}{OutCoe}(x, retain = 6, drop = 0, center.y = TRUE, ...)
}
\arguments{
\item{x}{the \link{Coe} object}
\item{retain}{numeric the number of harmonics to retain}
\item{drop}{numeric the number of harmonics to drop}
\item{center.y}{logical whether to center the y-axis}
\item{...}{useless here but maintain the consistency with generic boxplot}
}
\value{
a ggplot2 object
}
\description{
Explores the distribution of coefficient values.
}
\examples{
data(bot)
bot.f <- efourier(bot, 24)
boxplot(bot.f)
data(olea)
op <- opoly(olea)
boxplot(op)
}
\seealso{
Other Coe_graphics: \code{\link{hcontrib}},
\code{\link{hist.OutCoe}}
}
|
6a4b829c38384cfa8e5d659eadd9d3791ca511d1
|
07abd435b05bd123827286cb23bc130f1648cf4f
|
/Problem 2.R
|
16e8813d0c45df9e856189e8349bcb8436b52abe
|
[] |
no_license
|
reakain/DataAnalysisProject2
|
02cdc5e98e60fe99cbdd7ad4c3d56b8b30db1d2a
|
7fb5ea2c1621d6a8f670425f03e6e10e2ea518a8
|
refs/heads/master
| 2021-05-29T15:56:25.063372
| 2014-06-20T17:38:26
| 2014-06-20T17:38:26
| 20,963,834
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 829
|
r
|
Problem 2.R
|
## Problem 2
## Have total emissions from PM2.5 decreased in the
## Baltimore City, Maryland (fips == "24510") from
## 1999 to 2008? Use the base plotting system to make
## a plot answering this question.
# Get to my working directory and read in the data
setwd("~/git_repos/DataAnalysisProject2")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Get specifically the baltimore data
baltem <- NEI[NEI[,"fips"]=="24510",]
#Sum the baltimore emissions by year
balty <- aggregate(Emissions~year,data=baltem,FUN=sum)
# write the plot to a png
png(file="plot2.png",height=450,width=450)
barplot(height=balty[,"Emissions"],names.arg=c(1999,2002,2005,2008),
xlab="Year", ylab="Total Emissions from PM2.5 in Baltimore",
main="Emissions from 1999 to 2008",log="")
dev.off()
|
f38c004db8a824d444b5a0d57bba441dd04a8644
|
67aceca655f47d98d8e62e484cd4e606b72c8870
|
/Project Euler/018 - Maximum path sum I.R
|
013a01d01d4fa9e552e5a0a70c04b0a0ce6ea92f
|
[] |
no_license
|
QiliWu/leetcode-and-Project-Euler
|
130cf8e2d50707687b8a4c97426dbdaddeca902b
|
ade9a06861ec97ffb5ad33d83c9d9e93b043abcd
|
refs/heads/master
| 2021-09-21T16:49:31.539051
| 2018-08-29T05:32:48
| 2018-08-29T05:32:48
| 103,649,704
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 619
|
r
|
018 - Maximum path sum I.R
|
#Maximum path sum I
#Problem 18
#By starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top to bottom is 23.
3
7 4
2 4 6
8 5 9 3
#That is, 3 + 7 + 4 + 9 = 23.
#Find the maximum total from top to bottom of the triangle below:
con <- file('/home/wuqili/R practise/Project Euler/018.csv')
triangle <- read.csv(con, header = FALSE)
maxpathsum <- function(grid){
l <- length(grid)-1
for(i in l:1){
for(j in 1:i){
grid[i,j] <- max(grid[i, j]+grid[i+1, j], grid[i,j]+grid[i+1,j+1])
}
}
return(grid[1,1])
}
maxpathsum(triangle)
|
63822c56e2d10314df08ffc8ef5d9d76547a0a32
|
cfdbca2e80b393dee23647a41810b7e0827c5359
|
/Z Old Tex Files/program R/Multivariada/Ejemplo student.r
|
bbffd78aae288fd4c08be4397378753ed6eac878
|
[] |
no_license
|
cualquiercosa327/2ed-team
|
99e1d67f861785ef6ec003901e9869525128178f
|
cd596e8ff5a657a3c0118763d1cd6a7fbc4bfc69
|
refs/heads/master
| 2022-01-06T05:38:44.019913
| 2019-06-25T20:54:11
| 2019-06-25T20:54:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 464
|
r
|
Ejemplo student.r
|
a<-c(0.7,-1.6,-0.2,-1.2,-1,3.4,3.7,0.8,0,2)
b<-c(1.9,0.8,1.1,0.1,-0.1,4.4,5.5,1.6,4.6,3.4)
length(a)
length(b)
par(mfrow=c(1,2))
qqnorm(a,main="Sedante A")
qqline(a)
qqnorm(b,main="Sedante B")
qqline(b)
x<-matrix(c(a,b),10,2)
var(x)
cor(x)
## Independencia entre las dos variables
alpha<-0.5
p<-2
n<-10
esta<--n*log(det(cor(x)))
perce<-qchisq(1-alpha,p*(p+1)/2-p)
p.val<-pchisq(esta,p*(p+1)/2-p,lower.tail=F)
esta
perce
p.val
|
6d2b86e3668c95cb9688d18f0f8256ccb3fcefb1
|
167237832367e3413ba2d5beb78c5780df1a6c82
|
/DTtoPostgres.R
|
78d12fe202d97f32f00d8f867b2ff1258a2d9e56
|
[] |
no_license
|
mplunney/R
|
15a5f42d4498b9ddaefd67db717ecb3244baa74f
|
0376b425b4a846c11466aee37e3a6a57d1e27284
|
refs/heads/master
| 2021-01-19T13:46:53.627341
| 2018-10-09T16:27:05
| 2018-10-09T16:27:05
| 82,419,166
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 659
|
r
|
DTtoPostgres.R
|
#optional pre-function DT check
#View(Derived)
#Trim column names into postgres-suitable format
dbSafeNames = function(names) {
names = gsub('[^a-z0-9]+','_',tolower(names))
names = make.names(names, unique=TRUE, allow_=TRUE)
names = gsub('.','_',names, fixed=TRUE)
names
}
#append DT column names
colnames(Derived) = dbSafeNames(colnames(Derived))
#optional data check
#summary(Derived)
#load dependencies
library(RPostgreSQL)
pg = dbDriver("PostgreSQL")
#instantiate db connection
con = dbConnect(pg, user="Mike", password="***",
host="localhost", port=5432, dbname="ProTECT")
dbWriteTable(con,'derived',Derived, row.names=TRUE)
|
8b2215ab83b941068257b35a908bb1a7cf371266
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610383009-test.R
|
7227ee1847f8b03df6800b371753be7a51b50108
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 354
|
r
|
1610383009-test.R
|
testlist <- list(rates = c(NaN, NaN, NaN, 1.42991672770726e-308, -8.52118900406582e+245, -4.78904768543923e+54, -5.50000973208021e+303, -5.18847371542227e+245, -7.02630600428121e-260, 1.04294967003024e+40, 5.43412073090628e-311, NaN, Inf, 7.29111854342396e-304, 0), thresholds = NaN, x = NaN)
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
604cfd45d65f1d1920e9de95bc4065c731d9c803
|
ea2608ba2fa5fcb272e476b848256220c95a33ab
|
/plot3.R
|
54a295854009de88798b5b385d76842f26c85af4
|
[] |
no_license
|
sweetpickles/eda2
|
deccff4202b3227a29a675664a8f62c4e2c8bcb3
|
34d1ab762623619577d17124c392f09dca7a58ae
|
refs/heads/master
| 2021-01-10T03:11:11.992440
| 2015-10-24T00:02:45
| 2015-10-24T00:02:45
| 44,680,568
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 235
|
r
|
plot3.R
|
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
ds3 <- NEI[NEI$fips == '24510',]
p3 <- aggregate(Emissions ~ year + type, data = ds3, sum)
with(p3, qplot(year,Emissions,color=type,geom='line'))
|
17318df463ab30bea83b0fdb7c1afe767184ec1d
|
32a708b019e1d12870c82f23dc33e204b145518c
|
/ui.R
|
146f8d9db29175c2a2a2ae146a7ffe658e6c6e11
|
[] |
no_license
|
jpacilo/shiny_application
|
0a90c9e618e4f74e11aedb2b5b54ea8d94606667
|
9dcae29334e22e7121ccb84cd59bd37fdd597a73
|
refs/heads/master
| 2020-07-01T19:04:43.021885
| 2019-08-08T13:41:42
| 2019-08-08T13:41:42
| 201,265,896
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 549
|
r
|
ui.R
|
# author : jpacilo
# date : 8/8/19
pageWithSidebar(
headerPanel('K-Means Clustering x 1974 Motor Trend US Dataset'),
sidebarPanel(
selectInput('xcol', 'x-variable?', names(mtcars)),
selectInput('ycol', 'y-variable?', names(mtcars),
selected = names(mtcars)[[2]]),
numericInput('clusters', 'how many clusters?', 3,
min = 1, max = 12)
),
mainPanel(
plotOutput('plot_me')
)
)
|
0d51ceaf49e5c2e5e493424b82f577b6bf9b1243
|
c9042b30fd26d875436d32362e0d7aa765c59389
|
/01_Demo/Demo_01.R
|
4f05c7ba0eab0cac3ed13ad34a6ce1391c47ffb7
|
[] |
no_license
|
rkaysen63/R_Analysis
|
cd0a3fd4bddde355a46f501f609884dfba6d3e48
|
a9ef717133cff210b1a6d7aa67cf89df1840ea1b
|
refs/heads/main
| 2023-05-27T14:59:16.032427
| 2021-06-11T22:35:31
| 2021-06-11T22:35:31
| 374,245,375
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,797
|
r
|
Demo_01.R
|
x <- 3
numlist <- c(0,1,2,3,4,5,6,7,8,9)
# 15.2.3: Read and Write Using R ----------------------------------------------
demo_table <- read.csv(file='Data/demo.csv',check.names=F,stringsAsFactors = F)
# install.packages("package") requires parentheses but install.library does not
install.library(jsonlite)
library(jsonlite)
demo_table2 <- fromJSON(txt='Data/demo.json')
demo_table3 <- read.csv(file='Data/demo2.csv',check.names=F,stringsAsFactors = F)
demo_table2 <- fromJSON(txt='Data/demo.json')nrow()
# 15.2.4: Select Data in R -----------------------------------------------------
x <- c(3,3,2,2,5,5,8,8,9)
x[3]
demo_table[3,"Year"]
demo_table[3,3]
demo_table$"Vehicle_Class"
demo_table$"Vehicle_Class"[2]
# filter ----------------------------------------------------------------------
filter_table <- demo_table2[demo_table2$price > 10000,]
filter_table2 <- subset(demo_table2, price > 10000 & drive == "4wd" & "clean" %in% title_status) #filter by price and drivetrain
filter_table3 <- demo_table2[("clean" %in% demo_table2$title_status) & (demo_table2$price > 10000) & (demo_table2$drive == "4wd"),]
# sample function --------------------------------------------------------------
sample(c("cow", "deer", "pig", "chicken", "duck", "sheep", "dog"),4)
num_rows <- 1:nrow(demo_table)
sample_rows <- sample(num_rows, 3)
demo_table[sample_rows,]
# combine all three
demo_table[sample(1:nrow(demo_table), 3),]
# 15.2.5: Transform, Group, Reshape --------------------------------------------
# tidyverse package contains dplyr (need this to use pipe operator %>%), tidyr, ggplot2
library(tidyverse)
# pipe operator chains together functions
# mutate
demo_table <- demo_table %>% mutate(Mileage_per_Year=Total_Miles/(2020-Year),IsActive=TRUE) #add columns to original data frame
# group_by and summarize
summarize_demo <- demo_table2 %>% group_by(condition) %>% summarize(Mean_Mileage=mean(odometer), .groups = 'keep') #create summary table
summarize_demo <- demo_table2 %>% group_by(condition) %>% summarize(Mean_Mileage=mean(odometer),Maximum_Price=max(price),Num_Vehicles=n(), .groups = 'keep') #create summary table with multiple columns
# gather to reshape dataset (shrink in)
demo_table3 <- read.csv('01_Demo/demo2.csv',check.names = F,stringsAsFactors = F)
long_table <- gather(demo_table3,key="Metric",value="Score",buying_price:popularity)
long_table <- demo_table3 %>% gather(key="Metric",value="Score",buying_price:popularity)
# spread to expand back out
wide_table <- long_table %>% spread(key="Metric",value="Score")
all.equal(wide_table,table)
table <- demo_table3[,order(colnames(wide_table))]
table <- demo_table3[,(colnames(wide_table))]
# ------------------------------------------------------------------------------
# 15.3.1: ggplot2
# mpg is a built in R dataset
head(mpg)
# 15.3.2: bar plots ----------------------------------------------
plt <- ggplot(mpg,aes(x=class)) #import dataset into ggplot2
plt + geom_bar() #plot a bar plot
mpg_summary <- mpg %>% group_by(manufacturer) %>% summarize(Vehicle_Count=n(), .groups = 'keep') #create summary table
plt <- ggplot(mpg_summary,aes(x=manufacturer,y=Vehicle_Count)) #import dataset into ggplot2
plt + geom_col() #plot a bar plot
plt + geom_col() + xlab("Manufacturing Company") + ylab("Number of Vehicles in Dataset") #plot bar plot with labels
plt + geom_col() + xlab("Manufacturing Company") + ylab("Number of Vehicles in Dataset") + #plot a boxplot with labels
theme(axis.text.x=element_text(angle=45,hjust=1)) #rotate the x-axis label 45 degrees
# 15.3.4: line plots -----------------------------------------------------------------
mpg_summary <- subset(mpg,manufacturer=="toyota") %>% group_by(cyl) %>% summarize(Mean_Hwy=mean(hwy), .groups = 'keep') #create summary table
plt <- ggplot(mpg_summary,aes(x=cyl,y=Mean_Hwy)) #import dataset into ggplot2
plt + geom_line()
plt + geom_line() + scale_x_discrete(limits=c(4,6,8)) + scale_y_continuous(breaks = c(15:30)) #add line plot with labels
# point plots ------------------------------------------------------------------
plt <- ggplot(mpg,aes(x=displ,y=cty)) #import dataset into ggplot2
plt + geom_point() + xlab("Engine Size (L)") + ylab("City Fuel-Efficiency (MPG)") #add scatter plot with labels
plt <- ggplot(mpg,aes(x=displ,y=cty,color=class)) #import dataset into ggplot2
plt + geom_point() + labs(x="Engine Size (L)", y="City Fuel-Efficiency (MPG)", color="Vehicle Class") #add scatter plot with labels
plt <- ggplot(mpg,aes(x=displ,y=cty,color=class,shape=drv)) #import dataset into ggplot2
plt + geom_point() + labs(x="Engine Size (L)", y="City Fuel-Efficiency (MPG)", color="Vehicle Class",shape="Type of Drive") #add scatter plot with multiple aesthetics
plt <- ggplot(mpg,aes(x=displ,y=cty,color=class,shape=drv,size=cty)) #import dataset into ggplot2
plt + geom_point() + labs(x="Engine Size (L)", y="City Fuel-Efficiency (MPG)", color="Vehicle Class",shape="Type of Drive", size="City Fuel-Efficiency (MPG)") #add scatter plot with multiple aesthetics
plt + geom_point(aes(size=cty)) + labs(x="Engine Size (L)", y="City Fuel-Efficiency (MPG)", color="Vehicle Class",shape="Type of Drive",size="City Fuel-Efficiency (MPG)") #add scatter plot with multiple aesthetics
# 15.3.5: box plots --------------------------------------------------------------------
plt <- ggplot(mpg,aes(y=hwy)) #import dataset into ggplot2
plt + geom_boxplot() #add boxplot
plt <- ggplot(mpg,aes(x=manufacturer,y=hwy)) #import dataset into ggplot2
plt + geom_boxplot(fill="lightblue", color = "darkgreen", lty="dashed") + theme(axis.text.x=element_text(angle=45,hjust=1)) #add boxplot and rotate x-axis labels 45 degrees
plt <- ggplot(mpg,aes(x=manufacturer,y=hwy)) #import dataset into ggplot2
plt + geom_boxplot(fill="lightblue") + #add boxplot and fill color
theme(axis.text.x=element_text(angle=45,hjust=1)) + #rotate x-axis labels 45 degrees
geom_point() #overlay scatter plot on top
plt + geom_boxplot(fill="white",aes(color=manufacturer),linetype="dashed") + theme(axis.text.x=element_text(angle=45,hjust=1)) #add boxplot and rotate x-axis labels 45 degrees
# 15.3.6: heat maps --------------------------------------------------------------------
mpg_summary <- mpg %>% group_by(class,year) %>% summarize(Mean_Hwy=mean(hwy), .groups = 'keep') #create summary table
plt <- ggplot(mpg_summary, aes(x=class,y=factor(year),fill=Mean_Hwy))
plt + geom_tile() + labs(x="Vehicle Class",y="Vehicle Year",fill="Mean Highway (MPG)") #create heatmap with labels
mpg_summary <- mpg %>% group_by(model,year) %>% summarize(Mean_Hwy=mean(hwy), .groups = 'keep') #create summary table
plt <- ggplot(mpg_summary, aes(x=model,y=factor(year),fill=Mean_Hwy)) #import dataset into ggplot2
plt + geom_tile() + labs(x="Model",y="Vehicle Year",fill="Mean Highway (MPG)") + #add heatmap with labels >
theme(axis.text.x = element_text(angle=90,hjust=1,vjust=.5)) #rotate x-axis labels 90 degrees
# 15.3.7: Add Layers to Plots ---------------------------------------------------------
plt <- ggplot(mpg,aes(x=manufacturer,y=hwy)) #import dataset into ggplot2
plt + geom_boxplot(fill="white",aes(color=manufacturer)) + theme(axis.text.x=element_text(angle=45,hjust=1)) + #add boxplot and rotate x-axis labels 45 degrees
geom_point() #overlay scatter plot on top
# mapping argument uses aes() to identify variables, data argument can be used to provide new input data-------
mpg_summary <- mpg %>% group_by(class) %>% summarize(Mean_Engine=mean(displ), .groups = 'keep') #create summary table
plt <- ggplot(mpg_summary,aes(x=class,y=Mean_Engine)) #import dataset into ggplot2
plt + geom_point(size=4) + labs(x="Vehicle Class",y="Mean Engine Size") #add scatter plot
# overlay with error bars ------------------------------------------------------------
mpg_summary <- mpg %>% group_by(class) %>% summarize(Mean_Engine=mean(displ),SD_Engine=sd(displ), .groups = 'keep')
plt <- ggplot(mpg_summary,aes(x=class,y=Mean_Engine)) #import dataset into ggplot2
plt + geom_point(size=4) + labs(x="Vehicle Class",y="Mean Engine Size") + #add scatter plot with labels
geom_errorbar(aes(ymin=Mean_Engine-SD_Engine,ymax=Mean_Engine+SD_Engine)) #overlay with error bars
# Faceting ---- added to the end of plotting statement -------------------------------
mpg_long <- mpg %>% gather(key="MPG_Type",value="Rating",c(cty,hwy)) #convert to long format
head(mpg_long)
plt <- ggplot(mpg_long,aes(x=manufacturer,y=Rating,color=MPG_Type)) #import dataset into ggplot2
plt + geom_boxplot() + theme(axis.text.x=element_text(angle=45,hjust=1)) #add boxplot with labels rotated 45 degrees
# now add facet_wrap ------
plt <- ggplot(mpg_long,aes(x=manufacturer,y=Rating,color=MPG_Type)) #import dataset into ggplot2
plt + geom_boxplot() + facet_wrap(vars(MPG_Type)) + #create multiple boxplots, one for each MPG type
theme(axis.text.x=element_text(angle=45,hjust=1),legend.position = "none") + xlab("Manufacturer") #rotate x-axis labels
# Skill Drill --- use a different variable for the facet_wrap -----------------------------------------------
plt <- ggplot(mpg_long,aes(x=manufacturer,y=Rating,color=MPG_Type)) #import dataset into ggplot2
plt + geom_boxplot() + facet_wrap(vars(class)) + #create multiple boxplots, one for each class
theme(axis.text.x=element_text(angle=45,hjust=1),legend.position = "none") + xlab("Manufacturer") #rotate x-axis labels
plt <- ggplot(mpg_long,aes(x=manufacturer,y=Rating,color=MPG_Type)) #import dataset into ggplot2
plt + geom_boxplot() + facet_wrap(vars(year)) + #create multiple boxplots, one for each year
theme(axis.text.x=element_text(angle=45,hjust=1),legend.position = "none") + xlab("Manufacturer") #rotate x-axis labels
# Skill Drill --- use two different variables for the facet_wrap -------------------------------------------
plt <- ggplot(mpg_long,aes(x=manufacturer,y=Rating,color=MPG_Type)) #import dataset into ggplot2
plt + geom_boxplot() + facet_wrap(vars(year,MPG_Type)) + #create multiple boxplots, one for each year & MPG type
theme(axis.text.x=element_text(angle=45,hjust=1),legend.position = "none") + xlab("Manufacturer") #rotate x-axis labels
# 15.4.4: Qualitative Test for Normality --------------------------------------------------------------------
head(mtcars)
ggplot(mtcars,aes(x=wt)) + geom_density() #visualize distribution using density plot
# 15.6.1: random sample from population ---- sample_n() function --------------------------------------------
population_table <- read.csv(file='Data/used_car_data.csv',check.names=F,stringsAsFactors = F)
plt <- ggplot(population_table,aes(x=log10(Miles_Driven))) #import dataset into ggplot2
plt + geom_density() #visualize distribution using density plot
sample_table <- population_table %>% sample_n(50) #randomly sample 50 data points
plt <- ggplot(sample_table, aes(x=log10(Miles_Driven))) #import dataset into ggplot2
plt + geom_density() #visualize distribution using density plot
# 15.6.2: One-Sample t-Test -----------------------------------------------------------------------------------
t.test(log10(sample_table$Miles_Driven),mu=mean(log10(population_table$Miles_Driven))) #compare sample versus population means
# 15.6.3: Two-sample t-test -----------------------------------------------------------------------------------
sample_table <- population_table %>% sample_n(50) #generate 50 randomly sampled data points
sample_table2 <- population_table %>% sample_n(50) #generate another 50 randomly sampled data points
t.test(log10(sample_table$Miles_Driven),log10(sample_table2$Miles_Driven)) #compare means of two samples
# 15.6.4: Two-sample t-test (paired) --------------------------------------------------------------------------
mpg_data <- read.csv('Data/mpg_modified.csv') #import dataset
mpg_1999 <- mpg_data %>% filter(year==1999) #select only data points where the year is 1999
mpg_2008 <- mpg_data %>% filter(year==2008) #select only data points where the year is 2008
t.test(mpg_1999$hwy,mpg_2008$hwy,paired=T) #compare the mean difference between two samples
# 15.6.5 ANOVA test -------------------------------------------------------------------------------------------
# clean data - convert numeric column to factor
head(mtcars)
mtcars_filt <- mtcars[,c("hp","cyl")] #filter columns from mtcars dataset
head(mtcars_filt)
# use factor() to convert numeric to categorical
mtcars_filt$cyl <- factor(mtcars_filt$cyl) #convert numeric column to factor
# compare means
aov(hp ~ cyl,data=mtcars_filt) #compare means across multiple levels
# compare means w p-values - use summary()
summary(aov(hp ~ cyl,data=mtcars_filt)) #compare means across multiple levels
# 15.7.1 ------ correlation, variance, covariance (matrices) -------------------------------------------------
# example 1:
head(mtcars)
plt <- ggplot(mtcars,aes(x=hp,y=qsec)) #import dataset into ggplot2
plt + geom_point() #create scatter plot
cor(mtcars$hp,mtcars$qsec) #calculate correlation coefficient, "r"
# example 2:
used_cars <- read.csv('Data/used_car_data.csv', stringsAsFactors = F) #read in dataset
head(used_cars)
plt <- ggplot(used_cars,aes(x=Miles_Driven,y=Selling_Price)) #import dataset into ggplot2
plt + geom_point() #create a scatter plot
cor(used_cars$Miles_Driven,used_cars$Selling_Price) #calculate correlation coefficient
# example 3: correlation matrix
used_matrix <- as.matrix(used_cars[,c("Selling_Price","Present_Price","Miles_Driven")]) #convert data frame into numeric matrix
cor(used_matrix)
# 15.7.2: Single Linear Regression ---------------------------------------------------------------
lm(qsec ~ hp, mtcars) #create linear model lm (y ~ x, data)
summary(lm(qsec~hp,mtcars)) #summarize linear model
# Create linear model and plot
model <- lm(qsec ~ hp, mtcars) # Create linear model
yvals <- model$coefficients['hp']*mtcars$hp +
model$coefficients['(Intercept)'] #determine y-axis values from linear model
plt <- ggplot(mtcars,aes(x=hp,y=qsec)) #import dataset into ggplot2
plt + geom_point() + geom_line(aes(y=yvals), color = "red") #plot scatter and linear model
# 15.7.3: Multiple Linear Regression ---------------------------------------------------------------
lm(qsec ~ mpg + disp + drat + wt + hp,data=mtcars) #generate multiple linear regression model
summary( lm(qsec ~ mpg + disp + drat + wt + hp,data=mtcars)) #generate summary statistics
# 15.8.1: chi-square test to compare distribution of frequencies across two groups -----------------
# First generate a contingency (frequency) table
head(mpg)
table(mpg$class,mpg$year) #generate contingency table
tbl <- table(mpg$class,mpg$year) #generate contingency table
chisq.test(tbl) #compare categorical distributions
# practice
head(mtcars)
tbl2 <- table(mtcars$cyl,mtcars$qsec) #generate contingency table
chisq.test(tbl2) #compare categorical distributions
|
ddd94bd146e9c91ae3ed2acec04330c66f71dfaf
|
962142a98fafed07eefa031d35d7a1f3293c1ac1
|
/man/already_uploaded.Rd
|
5f293ff2b2d582825cd7616a791ef3c113f95d25
|
[] |
no_license
|
zevross-spatial/rpackage-sensorDataImport
|
057b6e87e1ad5d19028b9f5bd5a991076477f385
|
81febbeff8c8e71796dad175c594c4ed2b6aea25
|
refs/heads/master
| 2022-02-15T05:18:06.647276
| 2019-08-16T14:25:29
| 2019-08-16T14:25:29
| 35,230,643
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,399
|
rd
|
already_uploaded.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/postgresql.R
\name{already_uploaded}
\alias{already_uploaded}
\title{Tests if data has already been uploaded}
\usage{
already_uploaded(tablename, filename, con = ".connection")
}
\arguments{
\item{dbname}{the database.}
\item{host}{database host, usually 'localhost'}
}
\value{
.connection -- which is a global variable
}
\description{
This function tests whether the filename exists in the given table
there is no check to see if the date or data are the same -- based only on
filename
}
\examples{
try(already_uploaded("gps", "BIKE0001_GPS01_S01_150306.gpx"), silent=TRUE)
}
\seealso{
Other postgresql functions: \code{\link{add_tables_db}},
\code{\link{agg_unit_ok}}, \code{\link{aggregate_data}},
\code{\link{backup_database}},
\code{\link{column_exists}}, \code{\link{column_types}},
\code{\link{create_database}}, \code{\link{delete_data}},
\code{\link{get_column_names}},
\code{\link{get_connection}},
\code{\link{get_filenames_forSubject}},
\code{\link{get_filenames}}, \code{\link{get_row_count}},
\code{\link{get_sensor_data}},
\code{\link{get_subjectid}},
\code{\link{kill_pg_connections}},
\code{\link{list_tables}},
\code{\link{process_hexoskin}},
\code{\link{restore_database}},
\code{\link{table_exists}},
\code{\link{upload_postgres}},
\code{\link{valid_connection}}
}
|
338cbdcc5e81d1b1ca474201d8583a3e8af74cd3
|
88b5925e6cd7c318e82cbedd429bdad8d6229c72
|
/Rpackage/scrbook/man/simSCR0.Rd
|
08878dc1dddf5cc258d7048d02c8a2fe19c081e4
|
[] |
no_license
|
jaroyle/scrbook
|
290a6055cbcc6a34b2915a6267cbddbfab632a06
|
6554f7cf3af819870a001022a25c020379db475f
|
refs/heads/master
| 2021-03-19T16:02:26.707679
| 2017-12-19T15:05:35
| 2017-12-19T15:05:35
| 2,988,377
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,910
|
rd
|
simSCR0.Rd
|
\name{simSCR0}
\alias{simSCR0}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Simulate some SCR data under a simple Bernoulli encounter model.
%% ~~function to do ... ~~
}
\description{
Right now this is a simple R script that provides very little
flexibility. At some point, we'll generalize the function but, until
then, you should be able to easily modify this script to suit
your own needs.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
simSCR0(N=100,K=20,alpha0=-2.5, sigma = .5, discard0 = TRUE, array3d = FALSE, rnd = 2013)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{N}{ Population size }
\item{K}{ Number of sampling occasions (nights of camera trapping effort)}
\item{alpha0}{ Intercept of the Gaussian encounter probability model:
probcap<-plogis(alpha0)*exp(-alpha1*D*D)
}
\item{sigma}{
The scale parameter of the Gaussian encounter probability model,
i.e., alpha1 = (1/(2*sigma*sigma)).
}
\item{discard0}{
If TRUE only the encounter individuals are returned, if FALSE it
returns the encounter history array or matrix for all N individuals
whether encountered or not.
}
\item{array3d}{
If TRUE, returns the full 3-d encounter history array having
dimensions nind x ntraps x K (n occasions)
If FALSE, summarizes over noccasions and returns the total number of
encounters out of K.
}
\item{rnd}{
Random number seed. rnd=2013 should reproduce the results in the SCR
book.
}
}
\details{
This function is meant as a learning tool and a template to modify to
suit your own needs. It is not a general purpose function.
It simulates Bernoulli encounters of N individuals by a 5x5 array of
camera traps or similar. Trap coordinates are the integers
(1,2,3,4,5)x(1,2,3,4,5). The state-space is defined by a buffer
extending 2 units beyond the trap array, for a total size of 8x8
units. i.e., the square [-1,7]x[-1,7].
The encounter probability model is the Gaussian ("half-normal") model in which, for p[i,j] = encounter probability of individual i at trap j:
p[i,j] = p0*exp(-alpha1*d[i,j]^2).
Default values are logit(p0) = -2.5 and alpha1 = 2 which corresponds
to sigma = .5 where alpha1=(1/(2*sigma*sigma))
%% ~~ If necessary, more details than the description above ~~
}
\value{
Returns a data list suitable for use in SCR0bayes and other functions. It
has elements:
Y = nind x ntraps (array3d=FALSE) of encounter frequencies or N x
ntraps if discard0=FALSE. If array3d=TRUE, returns nind x K x ntraps
array which must be summarized to nind x ntraps to use SCR0bayes and
some other functions.
traplocs = ntraps x 2 matrix of trap coordinates
xlim=ylim = vectors of lower and upper x- and y-coordinates of the state-space
N = true value of N
alpha0 = true value of logit(p0)
alpha1 = true value of alpha1 = 1/(2*sigma*sigma)
sigma = true value of sigma
K = number of sampling occasions
}
\references{
SCR book, Chapter 5. %% ~put references to the literature/web site here ~
}
\author{
Andy Royle, aroyle@usgs.gov %% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
SCR0bayes, SCR0bayesDss%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
library("scrbook")
## Simulate a data set for a population of N = 100 individuals
data<-simSCR0(N=100, K=20, alpha0= -2.5, sigma = 0.5, discard0=TRUE,rnd=2013)
##out1<-SCR0bayes(data,M=200,engine="jags",ni=2000,nb=1000)
# Fit the model using WinBUGS:
out2<-SCR0bayes(data,M=200,engine="winbugs",ni=2000,nb=1000)
## Inspect the results....
print(out2)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
1253e16d191e79f6661ab3b44aed0adee786f902
|
c76d4dc9a4d693c49631f28fe5dd5d4394275f93
|
/man/CalculateTimeLag.Rd
|
d80e923bbe407390b9d4b29c12123da8cc3c3a86
|
[] |
no_license
|
cran/RHRV
|
570b16a6e5c88bf9039699431213d2ead2a8971d
|
4700c6ba5a1600f6a5356e25fd4499f7be48bc4f
|
refs/heads/master
| 2022-11-08T23:46:30.640056
| 2022-10-31T19:05:02
| 2022-10-31T19:05:02
| 17,682,624
| 12
| 6
| null | 2017-10-19T05:42:46
| 2014-03-12T20:21:26
|
R
|
UTF-8
|
R
| false
| true
| 4,204
|
rd
|
CalculateTimeLag.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CalculateNonLinearParameters.R
\name{CalculateTimeLag}
\alias{CalculateTimeLag}
\title{Estimate an appropiate time lag for the Takens' vectors}
\usage{
CalculateTimeLag(HRVData, technique = c("acf", "ami"),
method = c("first.e.decay", "first.zero", "first.minimum", "first.value"),
value = 1/exp(1), lagMax = NULL, doPlot = TRUE, ...)
}
\arguments{
\item{HRVData}{Data structure that stores the beats register and information related to it.}
\item{technique}{The technique that we shall use to estimate the time lag.
Allowed values are \emph{"acf"} and \emph{"ami"}.}
\item{method}{The method that we shall use to select the time lag (see the Details section). Available methods
are \emph{"first.zero"}, \emph{"first.e.decay"}, \emph{"first.minimum"} and \emph{"first.value"}.}
\item{value}{Numeric value indicating the value that the autocorrelation/AMI function must cross in order to
select the time lag. It is used only with the "first.value" method.}
\item{lagMax}{Maximum lag at which to calculate the acf/AMI.}
\item{doPlot}{Logical value. If TRUE (default value), a plot of the autocorrelation/AMI function is shown.}
\item{...}{Additional parameters for the \emph{acf} or the
\emph{mutualInformation} functions (see \code{\link[nonlinearTseries]{mutualInformation}}).}
}
\value{
The estimated time lag.
}
\description{
Given a time series (timeSeries), an embedding dimension (m) and a
time lag (timeLag), the \eqn{n^{th}}
Takens' vector is defined as
\deqn{T[n]={timeSeries[n], timeSeries[n+ timeLag],...timeSeries[n+m*timeLag]}.}
This function estimates an appropiate time lag by using the autocorrelation or the
average mutual information (AMI) function.
}
\details{
A basic criteria for estimating a proper time lag is based on the following reasoning:
if the time lag used to build the Takens' vectors is too small, the coordinates will
be too highly temporally correlated and the embedding will tend to cluster around
the diagonal in the phase space. If the time lag is chosen too large, the resulting
coordinates may be almost uncorrelated and the resulting embedding will be very complicated.
Thus, the autocorrelation function can be used for estimating an appropiate time lag of
a time series. However, it must be noted that the autocorrelation is a linear statistic,
and thus it does not take into account nonlinear dynamical correlations. To take into
account nonlinear correlations the average mutual information (AMI) can be used.
Independently of the technique used to compute the correlation, the time lag can
be selected in a variety of ways:
\itemize{
\item Select the time lag where the autocorrelation/AMI function decays to 0
(\emph{first.zero} method). This
method is not appropriate for the AMI function, since it only takes positive values.
\item Select the time lag where the autocorrelation/AMI function decays to
1/e of its value at zero (\emph{first.e.decay} method).
\item Select the time lag where the autocorrelation/AMI function reaches
its first minimum (\emph{first.minimum} method).
\item Select the time lag where the autocorrelation/AMI function decays to
the value specified by the user (\emph{first.value} method and
\emph{value} parameter).
}
}
\note{
If the autocorrelation/AMI function does not cross the specifiged value, an error is thrown. This may be solved
by increasing the lag.max or selecting a higher value to which the autocorrelation/AMI function may decay.
This function is based on the \code{\link[nonlinearTseries]{timeLag}} function from the
nonlinearTseries package.
}
\examples{
\dontrun{
data(HRVProcessedData)
HRVData = HRVProcessedData
HRVData = SetVerbose(HRVData,T)
timeLag = CalculateTimeLag(HRVData,technique = "ami")
embeddingDim = CalculateEmbeddingDim(HRVData,
timeLag = timeLag,
maxEmbeddingDim = 15)
}
}
\references{
H. Kantz and T. Schreiber: Nonlinear Time series Analysis (Cambridge university press)
}
\seealso{
\code{\link[nonlinearTseries]{timeLag}},\code{\link[nonlinearTseries]{mutualInformation}} .
}
|
3145c3bf2e0cf9e8d6d9ec99b1b89da054b86754
|
6158b9b89a1446b6a7079735fcef295b412ab752
|
/R/sttc.R
|
4da889a4c3ab45c1f678723eee67876bc22a506e
|
[] |
no_license
|
sje30/test1
|
6c201c07a1740a9981f98284aeaec728dfcf33df
|
6126745eb42296d12fb2bf08da1e10a2a8ac8bb4
|
refs/heads/master
| 2021-01-01T04:22:43.081032
| 2017-07-17T09:02:17
| 2017-07-17T09:02:17
| 97,163,904
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,226
|
r
|
sttc.R
|
##' Compute STTC for a pair of spike trains
##'
##' The Spike Time Tiling correlation (STTC) is computed for a pair
##' of spike trains. The method is defined in Cutts and Eglen (2014).
##'
##' @title Compute STTC for a pair of spike trains
##' @param a first spike train
##' @param b second spike train
##' @param dt bin size in seconds
##' @param rec_time 2-element vector: start and end time
##' @return STTC a scalar bounded between -1 and +1.
##' @author Stephen J Eglen
##' @examples
##' a = c(1, 2, 3, 4, 5)
##' b = a+0.01
##' c = a+0.5
##' sttc(a, b)==1
##' sttc(a, c)==0
sttc <- function(a, b, dt = 0.05, rec_time = NULL) {
if (is.null(rec_time)) {
rec_time <- range(c(a, b))
}
run_TMcpp(dt, rec_time[1], rec_time[2], a, b)
}
##' Compute STTC profile for a pair of spike trains
##'
##' We extend the STTC to a profile (or correlogram) by shifting one
##' spike train by amount tau, where tau varies in [-tau_max, +tau_max]
##' in steps of tau_step.
##'
##' @title Compute
##' @param a spike train 1
##' @param b spike train 2
##' @param dt time window for STTC
##' @param tau_max maximum time shift
##' @param tau_step step size in tau
##' @param beg start of recording. When NULL use the minimum spike time from
##' the two trains.
##' @param end end of recording. When NULL use the maximum spike time from
##' the two trains.
##' @return List containing the STTC profile.
##' @author Stephen Eglen
##' @examples
##' t1 <- -cumsum(log(runif(1000)) / 2)
##' t2 <- -cumsum(log(runif(1000)) / 2)
##' corr <- sttcp(t1, t2)
##' plot(corr, main="cross correlation")
##' autocorr <- sttcp(t1, t1)
##' plot(autocorr, main="auto correlation")
sttcp <- function(a, b, dt = 0.05, tau_max = 5, tau_step = 0.1,
beg = NULL, end = NULL) {
spikes <- c(a, b)
nspikes <- c(length(a), length(b))
first_spike <- cumsum(c(1, length(a)))
if (is.null(beg))
beg <- min(spikes)
if (is.null(end))
end <- max(spikes)
y = sttcp_ab(a, b, beg, end, dt, tau_step, tau_max)
taus = seq(from=-tau_max, to=tau_max, by=tau_step)
object = list(x=taus, y=y)
class(object) <- "sttcp"
object
}
plot.sttcp <- function(x, ...) {
plot(x$x, x$y, xlab="tau (s)", ylab='STTC', type='l', ...)
}
|
ee8cf5025181de2a665721c8273038d35cbda701
|
c117e26f3e8d61b07eb4ede0f48619a8504ae3d5
|
/HW2pt3.R
|
188f1b43a8734acf2951f921076fda26cc389c76
|
[] |
no_license
|
1123581321345589/R-Projects
|
0fbe5304c81bd12f98d7ea1707172ad5027d9be4
|
4ea3c0d7f76b2b042c2befcaa1bbd63b4c90c965
|
refs/heads/main
| 2023-06-28T13:29:50.843042
| 2021-07-22T17:36:18
| 2021-07-22T17:36:18
| 388,542,928
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,277
|
r
|
HW2pt3.R
|
setwd("C:/Users/dunnn2/Desktop/R Working Directory")
data = read.csv("insects_data.csv")
antLength = data$Antenna_Length
Ant_data = data[order(data[,3]),]
avgs = c ()
for( i in 1:(length(antLength)-1) ){
avgs[i] = (antLength[i] + antLength[i+1])/2
}
# for each element in the avgs array{
# -count the entries less than that element
# -count the number of class 1s and class 2s
# -count the entries greater than that element by subtracting the ones
# that are less than it from the total number of entries
# -count the number of class 1s and class 2s
# }
ginis = c ()
c1abv = 0
c1bel = 0
c2abv = 0
c2bel = 0
greater = 0
lesser = 0
great_leaf_gini = 0
less_leaf_gini = 0
min_gini = 1000
split = 0
# looping through the averages
for (j in 1:length(avgs)){
# looping through the entries in the dataset to get counts for probability calculations
for (k in 1:length(antLength)){
# increments the counts of the elements greater and lesser than the element
if(Ant_data[k, "Antenna_Length"] < avgs[j]){
lesser = lesser + 1
# increments the counts of entries above and below the average
if(Ant_data[k, "c"] == 1){
c1bel = c1bel + 1
}
else{
c2bel = c2bel + 1
}
}
else{
greater = greater + 1
if(Ant_data[k, "c"] == 1){
c1abv = c1abv + 1
}
else{
c2abv = c2abv + 1
}
}
}
great_leaf_gini = 1 - (c1abv/greater)**2 - (c2abv/greater)**2
less_leaf_gini = 1 - (c1bel/lesser)**2 - (c2bel/lesser)**2
ginis[j] = (lesser/(greater+lesser))*less_leaf_gini + (greater/(greater+lesser))*great_leaf_gini
if (ginis[j] < min_gini){
min_gini = ginis[j]
split = avgs[j]
}
#resetting counts for next
c1abv = 0
c1bel = 0
c2abv = 0
c2bel = 0
greater = 0
lesser = 0
great_leaf_gini = 0
less_leaf_gini = 0
}
gini_index = min(ginis)
print(avgs)
print(Ant_data)
print(min_gini)
print(split)
print("If the length of the antenna is less than 6.7, there is an 83.33% or 5/6 probability the observation is class 1, if it is greater than that there is a 100% or 4/4 probability that the observation is class 2")
|
3cd04a4e95adb80d971e4ebe0a6c8d994c92c385
|
8a2f8858d883c226d73e978c7ee8623e3b704e5a
|
/code/ann_train.R
|
334e229a1ff62f3cd643e3edea9cb9d2355918ac
|
[
"MIT"
] |
permissive
|
UTD-DOES/M3
|
2348c52cafbc6ccc705b05ac97cb2f8c03368d8e
|
a08b2a496cc451dd7362872372d29f13893b4811
|
refs/heads/master
| 2020-03-31T18:45:26.991870
| 2019-03-14T01:57:09
| 2019-03-14T01:57:09
| 152,471,280
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,804
|
r
|
ann_train.R
|
#-------------------------------- NOTE ----------------------------------------
# 1 This code is to train the ann model for the 1st layer;
# 2 Coder: Cong Feng Date: 2016/06/24 @ DOES Lab, UTD
#--------------------------------------------------------------------------------
ann_train<-function(training_data,learning_function,act_function,version_ctrl){
library(caret)
library(RSNNS)
library(nnet)
#x is the model inputs and y is the model target
x<-training_data[,1:(ncol(training_data)-1)]
y<-training_data[,(ncol(training_data))]
#Train model directly (or used commented out caret module which took too long on the full dataset)
#print("Training ANN model...")
# 1st Version
if (version_ctrl == 'type1') {
model_ann <-mlp(x, y, size = c(30), maxit = 1000,
initFunc = "Randomize_Weights", initFuncParams = c(-0.3, 0.3),
learnFunc = learning_function, learnFuncParams = c(0.2,0),
updateFunc = "Topological_Order", updateFuncParams = c(0),
hiddenActFunc = act_function, shufflePatterns = TRUE, linOut = FALSE,
inputsTest = NULL, targetsTest = NULL, pruneFunc = NULL,
pruneFuncParams = NULL)
}
if (version_ctrl == 'type2') {
model_ann <- caret::train(x,y,
method = "nnet",
preProcess = "range", #scales the data to be within [0,1]
tuneLength = 5,
trace = FALSE,
maxit = 100)
}
if (version_ctrl == 'type3') {
model_ann <- rbf(x, y, size=5, maxit=1000,
initFuncParams=c(0, 1, 0, 0.01, 0.001),
learnFuncParams=c(1e-8, 0, 1e-8, 0.1, 0.8), linOut=FALSE)
}
#print("Finish training ANN model")
return(model_ann)
}
|
8b01f744b1057a94b36034b6eec7834379520179
|
75db022357f0aaff30d419c13eafb9dddfce885a
|
/inst/Assessments/LFA41Assessment/4.figureSexRatios.r
|
d16014dae26f52e0673269e930dbcc96bb86f6df
|
[] |
no_license
|
LobsterScience/bio.lobster
|
d4c553f0f55f561bb9f9cd4fac52c585e9cd16f8
|
b2af955291cb70c2d994e58fd99d68c6d7907181
|
refs/heads/master
| 2023-09-01T00:12:23.064363
| 2023-08-23T16:34:12
| 2023-08-23T16:34:12
| 60,636,005
| 11
| 5
| null | 2017-01-20T14:35:09
| 2016-06-07T18:18:28
|
R
|
UTF-8
|
R
| false
| false
| 7,208
|
r
|
4.figureSexRatios.r
|
#Figure sex ratios
# all sizes combined
require(bio.lobster)
p = bio.lobster::load.environment()
p$libs = NULL
fp = file.path(project.datadirectory('bio.lobster'),"analysis",'lfa41Assessment')
la()
a = c(file.path(project.datadirectory('bio.lobster'),'analysis','lfa41Assessment','sexLFA41polygonSummerRV.rdata ') ,
file.path(project.datadirectory('bio.lobster'),'analysis','lfa41Assessment','sexLFA41dfogeorges.rdata '),
file.path(project.datadirectory('bio.lobster'),'analysis','lfa41Assessment','sexLFA41NEFSCfallrestratified.rdata '),
file.path(project.datadirectory('bio.lobster'),'analysis','lfa41Assessment','sexLFA41NEFSCspringrestratified.rdata ') )
for(i in 1:length(a)) {
load(a[i])
ap = aggregate(n.yst~yr,data=aa,FUN=sum)
af = aggregate(n.yst~yr,data=subset(aa,sex=='femberr'),FUN=sum)
apf = merge(ap,af,by='yr')
apf$pFem = apf$n.yst.y / apf$n.yst.x
p$add.reference.lines = F
p$time.series.start.year = min(aa$yr)
p$time.series.end.year = max(aa$yr)
p$metric = 'sexRatio' #weights
p$measure = 'stratified.mean' #'stratified.total'
p$figure.title = ""
p$reference.measure = 'median' # mean, geomean
p$ylim=c(0,1)
p$file.name = paste(strsplit(strsplit(a[i],"/")[[1]][7],"\\.")[[1]][1],'png',sep=".")
print(p$file.name)
p$y.maximum = NULL # NULL # if ymax is too high for one year
p$show.truncated.numbers = F #if using ymax and want to show the numbers that are cut off as values on figure
p$legend = FALSE
p$running.median = T
p$running.length = 3
p$running.mean = F #can only have rmedian or rmean
p$error.polygon=F
p$error.bars=T
p$ylim2 = c(0,500)
xx = aggregate(ObsLobs~yr,data=aa,FUN=sum)
names(xx) =c('x','y')
ref.out= figure.stratified.analysis(x=apf[,c('yr','pFem')],out.dir = 'bio.lobster', x2 = xx, p=p,sampleSizes=T)
}
####################################################
#Just >95mm
a = c(file.path(project.datadirectory('bio.lobster'),'analysis','lfa41Assessment','maturesexLFA41polygonSummerRV.rdata ') ,
file.path(project.datadirectory('bio.lobster'),'analysis','lfa41Assessment','maturesexLFA41dfogeorges.rdata '),
file.path(project.datadirectory('bio.lobster'),'analysis','lfa41Assessment','maturesexLFA41NEFSCfallrestratified.rdata '),
file.path(project.datadirectory('bio.lobster'),'analysis','lfa41Assessment','maturesexLFA41NEFSCspringrestratified.rdata ') )
for(i in 1:length(a)) {
load(a[i])
ap = aggregate(n.yst~yr,data=aa,FUN=sum)
af = aggregate(n.yst~yr,data=subset(aa,sex=='femberr'),FUN=sum)
apf = merge(ap,af,by='yr')
apf$pFem = apf$n.yst.y / apf$n.yst.x
p$add.reference.lines = F
p$time.series.start.year = min(aa$yr)
p$time.series.end.year = max(aa$yr)
p$metric = 'sexRatio' #weights
p$measure = 'stratified.mean' #'stratified.total'
p$figure.title = ""
p$reference.measure = 'median' # mean, geomean
p$ylim=c(0,1)
p$file.name = paste(strsplit(strsplit(a[i],"/")[[1]][7],"\\.")[[1]][1],'png',sep=".")
print(p$file.name)
p$y.maximum = NULL # NULL # if ymax is too high for one year
p$show.truncated.numbers = F #if using ymax and want to show the numbers that are cut off as values on figure
p$legend = FALSE
p$running.median = T
p$running.length = 3
p$running.mean = F #can only have rmedian or rmean
p$error.polygon=F
p$error.bars=T
p$ylim2 = c(0,500)
xx = aggregate(ObsLobs~yr,data=aa,FUN=sum)
names(xx) =c('x','y')
ref.out= figure.stratified.analysis(x=apf[,c('yr','pFem')],out.dir = 'bio.lobster', x2 = xx, p=p,sampleSizes=T)
}
####################################################
#Just <95mm
a = c( file.path(project.datadirectory('bio.lobster'),'analysis','lfa41Assessment','immaturesexLFA41polygonSummerRV.rdata ') ,
file.path(project.datadirectory('bio.lobster'),'analysis','lfa41Assessment','immaturesexLFA41dfogeorges.rdata '),
file.path(project.datadirectory('bio.lobster'),'analysis','lfa41Assessment','immaturesexLFA41NEFSCfallrestratified.rdata '),
file.path(project.datadirectory('bio.lobster'),'analysis','lfa41Assessment','immaturesexLFA41NEFSCspringrestratified.rdata '))
for(i in 1:length(a)) {
load(a[i])
ap = aggregate(n.yst~yr,data=aa,FUN=sum)
af = aggregate(n.yst~yr,data=subset(aa,sex=='femberr'),FUN=sum)
apf = merge(ap,af,by='yr')
apf$pFem = apf$n.yst.y / apf$n.yst.x
p$add.reference.lines = F
p$time.series.start.year = min(aa$yr)
p$time.series.end.year = max(aa$yr)
p$metric = 'sexRatio' #weights
p$measure = 'stratified.mean' #'stratified.total'
p$figure.title = ""
p$reference.measure = 'median' # mean, geomean
p$ylim=c(0,1)
p$file.name = paste(strsplit(strsplit(a[i],"/")[[1]][7],"\\.")[[1]][1],'png',sep=".")
print(p$file.name)
p$y.maximum = NULL # NULL # if ymax is too high for one year
p$show.truncated.numbers = F #if using ymax and want to show the numbers that are cut off as values on figure
p$legend = FALSE
p$running.median = T
p$running.length = 3
p$running.mean = F #can only have rmedian or rmean
p$error.polygon=F
p$error.bars=T
p$ylim2 = c(0,500)
xx = aggregate(ObsLobs~yr,data=aa,FUN=sum)
names(xx) =c('x','y')
ref.out= figure.stratified.analysis(x=apf[,c('yr','pFem')],out.dir = 'bio.lobster', x2 = xx, p=p,sampleSizes=T)
}
|
484e420cd7e3f5236b455aa63a07c6a87350ce5d
|
f4d0a736e7786b7bf9fa9b20a4ca489daf1f22de
|
/FEV_7_4_wc/FEV_prediction_wc.R
|
1422a06b0e6296f47457e1fc8ca145ad35b9c07d
|
[] |
no_license
|
jakrotgithub/lungfunction
|
6825a0ea1f89a677be0df942d6941324561b2e76
|
ed9aed0a606cf27caa6c63f634577cb2e1a910d7
|
refs/heads/master
| 2021-08-24T12:30:35.321064
| 2017-12-09T21:48:21
| 2017-12-09T21:48:21
| 103,309,198
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,689
|
r
|
FEV_prediction_wc.R
|
make_predictions<-function(lmer_object, predictors){
attach(full_file_name)
#Is this the prediction data? Please use the prediction data, it should be one row of entry with some NULL variables.
#Add a RANDOMID to make predictions
full_file_name$RANDOMID<-1
# Create age category
full_file_name$agecat[full_file_name$age>=65]<-4
full_file_name$agecat[full_file_name$age<65 &full_file_name$age>=50]<-3
full_file_name$agecat[full_file_name$age<50 &full_file_name$age>=35]<-2
full_file_name$agecat[full_file_name$age<35 &full_file_name$age>=20]<-1
full_file_name$agecat<-as.factor( full_file_name$agecat)
# Center input predictors
full_file_name$fev1_0[is.na!(full_file_name$fev1_0)]<-full_file_name$fev1_0-2.979447188)/0.794445308 #WC: I centered all continuous predictors except for cum_smoke and year
full_file_name$trig[is.na!(full_file_name$trig)]<-(full_file_name$trig-93.02984434)/79.628844
full_file_name$hema[is.na!(full_file_name$hema)]<-(full_file_name$hema-42.83871875)/3.770632403
full_file_name$alb[is.na!(full_file_name$alb)]<-(full_file_name$alb-46.7343477)/3.259360147
full_file_name$glob[is.na!(full_file_name$glob)]<-(full_file_name$glob-25.90719409)/3.530116396
full_file_name$alk_phos[is.na!(full_file_name$alk_phos)]<-(full_file_name$alk_phos-56.64908166)/16.30523751
full_file_name$white_bc[is.na!(full_file_name$white_bc)]<-(full_file_name$white_bc-61.5919838)/16.32819272
full_file_name$qrs[is.na!(full_file_name$qrs)]<-(full_file_name$qrs-7.903884425)/0.784763186
full_file_name$alcohol<-((full_file_name$beer*0.444+full_file_name$cocktail*0.570+full_file_name$wine*0.400)-3.681324783)/4.781456965
#this is CENTERED alcohol index = ((0.570XHIGHBALLS[drinks/wk])+(0.444XBEERS[cans or bottles/wk])+(0.400XWINE[glass/wk]))
full_file_name$wine[is.na!(<-(full_file_name$wine-1.532559397)/3.13716088
full_file_name$cocktail[is.na!(<-(full_file_name$cocktail-2.749524582)/5.049623158
full_file_name$height2[is.na!(<-(full_file_name$height^2-28422.20329)/3185.597537 #this is centered height square
full_file_name$cum_smoke<-round((full_file_name$smoke_year*full_file_name$daily_cigs)/20,0) #this is a derived variable, need two variables, dont need to center
full_file_name$age<-(full_file_name$age-36.61082037)/9.249913362
#make sure all categorical variables are factors
full_file_name$sex<-as.factor(full_file_name$sex)
#generate year for prediction
year<-c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20)
year2<-year^2
year<-cbind(year,year2)
data_pred<-merge(full_file_name,year,all=TRUE)
#Now I will create two scenario for 20-year prediction of FEV1 decline
#Scenario 1: quit smoke today (smk=0)
#Scenario 2: continue to smoke at current speed (smk=1)
smk<-c(0,1)
data_pred<-merge(data_pred,smk,all=TRUE) #From here on, for quitting smoke and continue to smoke, each scenario has 20 years of follow-up
#Make sure non-smoker's baseline CUM_SMoke=0
data_pred$cum_smoke[data_pred$smk==1]<-data_pred$cum_smoke+data_pred$daily_cigs*data_pred$year/20 #If continue to smoke, calculate cumulative pack-years over time
#Note: for smoke=0, pack-years will continue all the time
##############################################################
# When data is ready, prediction begins here #
##############################################################
#Obtain fixed Coefficients;
beta<-fixef(lmfin)
vcov<-vcov(lmfin)
vbeta<-diag(vcov(lmfin))
#vARIANCE-COVARIANCE coefficients
vc<-as.data.frame(VarCorr(lmfin))
vc
v.int<-vc$vcov[1]
v.yr<-vc$vcov[2]
cov.int.yr<-vc$vcov[3]
v.err<-vc$vcov[4]
# Prediction;
pred<-predict(lmfin,data_pred,re.form=NA, allow.new.levels=TRUE)
#Note!!!: I wonder if you could rename these data variables to in line with the data_rf4's variable names, otherwise it will not work
data_pred2<-cbind(data_pred,pred)
#get predicted fev1 at baseline for calculation (pfev0)
pfev0<-subset(data_pred2,year==0,select=c(RANDOMID,pred))
colnames(pfev0)[2]<-"pfev0"
data_pred2<-join(data_pred2,pfev0,by='RANDOMID', type='right',match='all')
#Calculation the bivariate correlation between baseline and future FEV1 value
cov11<-v.int+2*data_pred2$year*cov.int.yr+data_pred2$year2*v.yr+v.err
cov12<-v.int+data_pred2$year*cov.int.yr
cov22<-v.int+v.err
data_pred2<-cbind(data_pred2,cov11,cov12)
data_pred2<-merge(data_pred2,cov22,all=TRUE) #please make sure cov22's variable name is accurate in the prediction dataset
#relate baseline fev1 to future fev1 to make final prediction
pred2<-data_pred2$pred+data_pred2$cov12*(data_pred2$fev1_0-data_pred2$pfev0)/data_pred2$cov22
se2<-sqrt(data_pred2$cov11-data_pred2$cov12*data_pred2$cov12/data_pred2$cov22)
#VERY IMPORTANT!!!! - back-transform PREDICTION into original scale
#pred3<-pred2*y.sd+y.mean
pred3<-pred2*0.794445308+2.979447188
#se3<-se2*y.sd
se3<-se2*0.794445308
lower3<-pred3-1.960463534*se3 #lower 95% prediction interval
upper3<-pred3+1.960463534*se3 #upper 95% prediction interval
data_pred_fin<-cbind(data_pred2$year, data_pred2$smk, data_pred2$cum_smoke,data_pred2$fev1_0,pred3,se3,lower3,upper3)
# Note: We uses baseline FEV1 to predict future FEV1, so baseline FEV1 should be set to original value, se should be 0
data_pred_fin$pred3[data_pred_fin$year==0]<-data_pred_fin$fev1_0
data_pred_fin$se3[data_pred_fin$year==0]<-0
data_pred_fin$lower3[data_pred_fin$year==0]<-data_pred_fin$pred3
data_pred_fin$upper3[data_pred_fin$year==0]<-data_pred_fin$pred3
}
|
77f9a9564c81b5d1b72a793f681c6428505c0c1b
|
28c3f73a6d70c2fed4b2d2011bd1d9416a293b0e
|
/R/performance.metrics.R
|
389bbf42b18de6ec4697e14545b429ff0cf5eca8
|
[] |
no_license
|
cdeterman/OmicsMarkeR
|
a8cbe69bc26f98db69b89c02949a3a4d0ab2d8a1
|
f9a0f3dfd067c0a0beb9ad421982ad86e63914cf
|
refs/heads/master
| 2021-01-10T20:58:11.111636
| 2017-01-10T15:14:14
| 2017-01-10T15:14:14
| 13,659,839
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,576
|
r
|
performance.metrics.R
|
#' @title Performance Metrics of fs.stability or fs.ensembl.stability object
#' @description This will provide a concise data.frame of confusion matrix
#' and ROC statistics from the results of \code{fs.stability} or
#' \code{fs.ensembl.stability}.
#' @param fit.model An fs.stability or fs.ensembl.stability object
#' @param digits How many digits to round values
#' @return Dataframe of performance statistics by model
#' @author Charles E. Determan Jr.
#' @example inst/examples/performance.metrics.R
#' @export
performance.metrics <- function(fit.model, # fs.stability object
digits = max(3, getOption("digits") - 3))
{
# need to set class of performance statistics in fs.stability object
# instead, probably be easiest to simply use the fs.stability results and
# draw performance with generic performance.metrics function
assert_is_numeric(digits)
perf <- fit.model$performance
fit.models <- fit.model$methods
if(length(fit.models) >= 2){
mult.param <-
sapply(fit.models,
function(x) as.character(params(x)[[1]]$parameter))
out <-
mapply(perf,
FUN = function(x,y) x[,!colnames(x) %in% y], y = mult.param)
out <- as.data.frame(out)
out
}else{
perf <- do.call(rbind, perf)
param <- as.character(params(fit.models)[[1]]$parameter)
tmp <- perf[,!colnames(perf) %in% param]
out <- as.data.frame(tmp)
colnames(out) <- fit.models
out
}
}
|
87ff1b32ad37acf1590f7e6d88158455e189c734
|
70baf75d407baa766ae885e63aa23dd27a24decc
|
/review_notifications_foreign.r
|
a5a3812d963b7cc71401d72ca1d57606e53c7909
|
[] |
no_license
|
hazimtimimi/data_review
|
94d5f745758f0cb42cdb2297aa78900de2cf3669
|
9c707bd150a2cd2a94634666140636af9c78d626
|
refs/heads/master
| 2023-08-24T18:12:12.192591
| 2023-08-14T10:02:12
| 2023-08-14T10:02:12
| 39,380,950
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,662
|
r
|
review_notifications_foreign.r
|
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Look at trends in number of notifications among foreigners
# Hazim Timimi, June 2017
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Set up the running environment ----
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# This depends on the person, location, machine used etc.and populates the following:
#
# file_name: Name of the PDF output file
#
# The next two are set using set_environment.r
#
# outfolder: Folder containing output subfolders for tables and figures
# connection_string: ODBC connection string to the global TB database
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
source("set_environment.r") #particular to each person so this file is in the ignore list
source("set_plot_themes.r")
# Define list of regions in SQL format if we don't want to plot all countries
# (If not keep it as an empty string)
# region_filter <- "AND iso2 IN (SELECT iso2 FROM view_TME_master_report_country
# WHERE g_whoregion IN ('AFR', 'EMR','SEA', 'WPR'))"
region_filter <- ""
file_name <- paste0(outfolder, "foreigner_graphs_", Sys.Date(), ".pdf")
file_name_pcnt <- paste0(outfolder, "foreigner_pcnt_graphs_", Sys.Date(), ".pdf")
# load packages ----
library(RODBC)
library(ggplot2)
library(dplyr)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get the data ----
#
# I prefer to do this via SQL, but could be done of course with the pure views
# and some R jiggery pokey
#
# The query combines data from the master notification view with latest data
# reported as retreived from the dcf views (dcf = data collection form)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
sql <- "SELECT country, year, notif_foreign, c_newinc FROM dcf.latest_notification
WHERE notif_foreign IS NOT NULL
UNION ALL
SELECT country, year, notif_foreign, c_newinc FROM view_TME_master_notification
WHERE year BETWEEN 2006 AND (SELECT MAX(year - 1) FROM dcf.latest_notification) AND
iso2 IN (SELECT iso2 from dcf.latest_notification WHERE notif_foreign IS NOT NULL)
ORDER BY country,year"
# Extract data from the database
channel <- odbcDriverConnect(connection_string)
data_to_plot <- sqlQuery(channel,sql)
# get list of countries
countries <- sqlQuery(channel, paste("SELECT country FROM dcf.latest_notification",
"WHERE notif_foreign IS NOT NULL",
region_filter,
"ORDER BY country"))
close(channel)
# Calculate pcnt foreigners ----
# - - - - - - - - - - -
data_to_plot <- data_to_plot %>%
mutate( pcnt_foreign = ifelse(c_newinc > 0, notif_foreign * 100 / c_newinc, NA))
# Simple rounding function that returns a string rounded to the nearest integer and
# uses a space as the thousands separator as per WHO standard.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
rounder <- function(x) {
ifelse(is.na(x), NA,
formatC(round(x,0), big.mark=" ", format="d")
)
}
# Define graph layout ----
# - - - - - - - - - - -
plot_faceted <- function(df){
# Blue line = New and relapse cases
# Green dots = Cases among foreigners
graphs <- qplot(year, c_newinc, data=df, geom="line", colour=I("blue")) +
geom_point(aes(year, notif_foreign), colour=I("green")) +
# Use space separators for the y axis
scale_y_continuous(name = "New and relapse cases (blue) and cases among foreigners (green dots) (number)",
labels = rounder) +
scale_x_continuous(name="", breaks = c(2005, 2010, 2015, 2020)) +
facet_wrap(~country,
scales="free_y",
# Use the labeller function to make sure long country names are wrapped in panel headers
labeller = label_wrap_gen(width = 23)) +
expand_limits(y=0) +
theme_gtbr_2021(base_size=8, axis_text_size = 6)
# note that inside a function the print() command is needed to paint to the canvass
#(see http://stackoverflow.com/questions/19288101/r-pdf-usage-inside-a-function)
print(graphs)
}
plot_faceted_pcnt <- function(df){
# Blue line = % cases among foreigners
graphs <- qplot(year, pcnt_foreign, data=df, geom="line", colour=I("blue")) +
geom_point(aes(year, pcnt_foreign), colour=I("blue")) +
# Use space separators for the y axis
scale_y_continuous(name = "% of new and relapse cases that are in foreigners") +
scale_x_continuous(name="", breaks = c(2005, 2010, 2015, 2020)) +
facet_wrap(~country,
scales="free_y",
# Use the labeller function to make sure long country names are wrapped in panel headers
labeller = label_wrap_gen(width = 23)) +
expand_limits(y=0) +
theme_gtbr_2021(base_size=8, axis_text_size = 6)
print(graphs)
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Plot the graphs to PDF -------
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get Function to plot multiple graphs to multi-page PDF
source("plot_blocks_to_pdf.r")
plot_blocks_to_pdf(data_to_plot, countries, file_name, plot_function = plot_faceted)
plot_blocks_to_pdf(data_to_plot, countries, file_name_pcnt, plot_function = plot_faceted_pcnt)
|
c52c982fc6e2dcbcad1e49a1158676db6b2659cd
|
e6a26c0ec2e8b97f24a02da1877c1bcc6f233bcc
|
/R/spectrum.fnc.R
|
a3b3937184f143770c3bf0a3783a3c6eb7c22da7
|
[] |
no_license
|
cran/languageR
|
4733c665fee1186e562d13434d75bbc8cf3ee5d2
|
76ffa66a386d2e3df4947ce10c48741bfcaee0fa
|
refs/heads/master
| 2020-05-05T05:06:14.673115
| 2019-01-30T07:20:03
| 2019-01-30T07:20:03
| 17,696,980
| 0
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 201
|
r
|
spectrum.fnc.R
|
`spectrum.fnc` <-
function(text) {
tab = table(table(text))
spectrum = data.frame(frequency = as.numeric(rownames(tab)),
freqOfFreq = as.numeric(tab))
return(spectrum)
}
|
a6d0fdd5054760acc198e2d45248d89571b4343e
|
b3bf7b8c56b2f3e8d8594cccce6f65981c9514e5
|
/R/internal.R
|
1ccfd2d3154db65df3e7fd5c289c2ab6faeb9348
|
[] |
no_license
|
faustovrz/bugcount
|
055ee388bcf9049e5d01cf3ad19898220f7787a2
|
f3fbb7e9ed5cecae78fdfaa1035e2a87e072be2d
|
refs/heads/master
| 2021-03-27T15:43:12.992541
| 2018-05-04T22:17:49
| 2018-05-04T22:17:49
| 104,142,648
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,238
|
r
|
internal.R
|
# Compatibility of graphic device with windows OS.
if (.Platform$OS.type == "windows") {
quartz <- function() windows()
}
# Basic Function Initialization ################################################
# Make vectors of descriptors from the expected column names of the whitefly
# data frame
exp_descriptor <- function(wf){
if (any(!grepl("experiment",colnames(wf)))) {
descriptor <- grep("exp_",colnames(wf), value = TRUE)
c("experiment",exp_descriptor)
} else {
descriptor <- grep("exp_",colnames(wf), value = TRUE)
}
descriptor
}
exp_criteria <- function(wf){
paste(exp.descriptor(wf), collapse = " + ")
}
plant_descriptor <- function(wf){
c("group","propagation","substrate","clone", "rep","plant")
}
plant_criteria <- function(wf){
paste(c(exp_descriptor(wf),plant_descriptor(wf)), collapse = " + ")
}
leaf_descriptor <- function(wf){
c(plant_descriptor(wf),"leaf")
}
leaf_criteria <- function(wf){
paste(c(exp_descriptor(wf), leaf_descriptor(wf)), collapse = " + ")
}
# fix aggregate column names
fix_ag_colnames <- function(ag){
do.call(cbind.data.frame,ag)
}
#' Histogram panel for use un correlation matrix
panel_hist <- function(x, ...){
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col = "cyan", ...)
}
#' Correlation panel for use un correlation matrix
panel_cor <- function(x, y, digits=2, prefix="")
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- abs(cor(x, y, use = "pairwise.complete.obs"))
txt <- format(c(r, 0.123456789), digits = digits)[1]
txt <- paste(prefix, txt, sep = "")
cex <- 0.4/strwidth(txt)
test <- cor.test(x,y)
# borrowed from printCoefmat
Signif <- symnum(test$p.value, corr = FALSE, na = FALSE,
cutpoints = c(0, 0.001, 0.01, 0.05, 0.1, 1),
symbols = c("***", "**", "*", ".", " "))
text(0.5, 0.5, txt, cex = cex, col = "darkgrey")
text(.65, .6, Signif, cex = cex , col = "darkgrey", pos = 4)
}
# Correlation panel for use un correlation matrix
panel_points <- function(x,y){
points(x,y)
abline(stats::lm(y ~ x), col = "red")
}
# Variance decomposition, Heritability, BLUP ###################################
clone_exp <- function(clone,wf){
wf_exp <- unique(wf[,c("clone","experiment")])
sort(as.vector(wf.exp$experiment[wf.exp$clone == clone]))
}
cross_exp <- function(cross,wf){
wf_exp <- unique(wf[,c("exp_cross","experiment")])
sort(as.vector(wf_exp$experiment[wf_exp$exp_cross == cross]))
}
select_complete_clones <- function(wf) {
clones <- unique(as.character(wf$clone))
experiments <- unique(as.character(wf$experiment))
complete <- c()
for (cross in unique(as.character(wf$exp_cross)) ) {
complete_exp <- grep(cross, experiments, value = TRUE)
if (length(complete.exp) > 0) {
check_complete <- unlist(
lapply(clones,
FUN = function(x){
all( complete_exp %in% clone_exp(x, wf))
}))
complete <- unique(c(complete, clones[check_complete]))
}
}
sort(complete)
}
|
a99caeaadf40a0455d5f6476487ad4b58c7e759e
|
105801668ac9b45d0530c7cc1deebec6433db08e
|
/Lexical Scoping
|
d4b030b992aaf8d07d7a0f96762390dc7156b521
|
[] |
no_license
|
ebenezerbediam89/ProgrammingAssignment2
|
1a598fbf15a20423ae0342f4f6ab938529fc5ecd
|
b94d285c9cc2322bf10682640d775f2815f83d72
|
refs/heads/master
| 2020-05-29T09:04:42.916753
| 2019-07-05T08:46:27
| 2019-07-05T08:46:27
| 189,050,788
| 1
| 0
| null | 2019-05-28T15:03:06
| 2019-05-28T15:03:05
| null |
UTF-8
|
R
| false
| false
| 932
|
Lexical Scoping
|
## makeChacheMatrix stores a list that can perform functions:
##create matrix, return matrix, compute inverse of matrix and return inverse matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get<-function() x
setmatrix<-function(solve) m<<- solve
getmatrix<-function() m
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## cacheSolve will compute inverse but it checks if it is calculated already
## if it is calculated it will print "getting chached data" and return calculated matrix
cacheSolve <- function(x, ...) {
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached matrix")
return(m)
}
matrix<-x$get()
m<-solve(matrix, ...)
x$setmatrix(m)
m
}
|
|
1a597437bd89b762a2605f4c2e47fa55ba9663c1
|
484657c476431839017a4c91d1754d6ba18da9a7
|
/area_curva.R
|
0197ef69786a82dda4b64db95702259d44275449
|
[] |
no_license
|
saracebu/TFG
|
8b6253db1a80dfa7f9958546b137d0e533c1a0d1
|
7882195bd14f56c160d33681f52832e33e06e378
|
refs/heads/main
| 2023-07-17T08:37:54.410981
| 2021-09-08T08:51:35
| 2021-09-08T08:51:35
| 399,497,954
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,282
|
r
|
area_curva.R
|
#' @title Calculate the area under a polygonal line.
#'
#' @description We introduce two positive numeric vectors x and y, which
#' must have the same length. To calculate the area, the difference in
#' absolute value of the current and previous elements of both vectors are
#' calculated, naming them base and height. Next, we calculate the area of
#' the triangle that has the base and height calculates previously, as well
#' as the area of the rectangle whose height is the minimum between the
#' current and previous one and the base is equal to the calculated one.
#' Finally we add the areas obtaining the total area.
#'
#' @param x Numeric vector of x coordinates
#'
#' @param y Numeric vector of y coordinates
#'
#' @return Numeric vector of length 1 with the area under the curve
#'
#' @example
#' area(c(0, 1, 2, 3), c(4, 3, 5, 1))
#'
area <- function(x, y){
if(length(x) != length(y)){
stop("The length of x and y must be the same.")
}
if(any(x < 0) | any(y < 0)){
stop("Data must be positive.")
}
if (max(y) < .Machine$double.eps) return(1)
x <- c(0, x)
y <- c(0, y)
delta_x <- diff(x)
semisum_y <- 0.5 * (y[-1] + y[-length(y)])
A <- delta_x * semisum_y
area <- sum(A)
return(area)
}
|
8b70cee1fab6d59f4be338a82e484a4b797800c3
|
7ebca7fbe69d3a74bd250a716ceaac569b53e94f
|
/shiny_web_apps/HouseHunt/server.R
|
cba8df96004789a4bff893d7edaccc38389ff8fd
|
[] |
no_license
|
mrinaltrikha/dso545_uber_movement_plus_drone_delivery
|
2a938a338975fcc4fd67af1cc39413d756161e6c
|
ce6fff8619524dc6a372fb4eff498623694ecab0
|
refs/heads/master
| 2020-03-16T14:11:03.344210
| 2018-05-09T08:00:01
| 2018-05-09T08:00:01
| 132,709,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,642
|
r
|
server.R
|
# Load Library: shiny - Web Application Framework for R
library(shiny)
# Load Library: dplyr - A Grammar of Data Manipulatio
library(dplyr)
# Load Library: ggmap - Spatial Visualization with ggplot2
library(ggmap)
# Load Library: viridis - Default Color Maps from 'matplotlib'
library(viridis)
# Define Shiny Server
shinyServer(function(input, output) {
# All Locations in San Fransisco
dfUniqueMovementIds = dfUberCensusTracts %>%
select(MovementId, DisplayName) %>%
distinct()
# Pre-Populate Location Selection Drop-Down
movementIds <- setNames(dfUniqueMovementIds$MovementId, dfUniqueMovementIds$DisplayName)
# Reactive Dataset - Average of Travel Times to/from Work
dataset <- reactive({
# Work Location Selected by User
selectedDestinationId = input$destinationId
# Preferred Start of Travel Time to Work
hourOfDayForTravelToDestination = input$hourOfDayForTravelToDestination
# Maximum Travel Time to Work Acceptable to User (in mins)
maxTravelTimeToDestination = input$maxTravelTimeToDestination * 60
# Preferred Start of Travel Time from Work
hourOfDayForTravelFromDestination = input$hourOfDayForTravelFromDestination
# Maximum Travel Time from Work Acceptable to User (in mins)
maxTravelTimeFromDestination = input$maxTravelTimeFromDestination * 60
# Get Average Travel Times to Work from All Locations in San Fransisco
dfAvgTravelTimesToDestination =
dfUberHourlyAggregate %>%
filter(dstid == selectedDestinationId &
((hod == hourOfDayForTravelToDestination &
mean_travel_time <= maxTravelTimeToDestination) |
(hod == hourOfDayForTravelFromDestination &
mean_travel_time <= maxTravelTimeFromDestination))) %>%
select(dstid, sourceid, hod, mean_travel_time, standard_deviation_travel_time) %>%
group_by(dstid, sourceid) %>%
summarise(mean_travel_time = mean(mean_travel_time) / 60)
# Add Geospatial Data to Average Travel Times
dfOriginsWithLeastTravelToDestination =
right_join(dfUberCensusTracts,
dfAvgTravelTimesToDestination,
by = c("MovementId" = "sourceid"),
copy = FALSE,
suffix = c(".census_tracts", ".travel_times"))
})
# Reactive Dataset - Summary of Average Travel Times to/from Work
datasetSummaryTable <- reactive({
dfSummary = dataset() %>%
group_by(DisplayName) %>%
summarise(MeanTravelTime = mean(mean_travel_time)) %>%
arrange(MeanTravelTime)
names(dfSummary) <- c("Housing Option", "Mean Travel Time (in mins)")
dfSummary
})
# Render Plot - Average of Travel Times to/from Work
output$plot <- renderPlot({
ggmap(SanFransisco_map) +
geom_polygon(aes(x = Lat,
y = Long,
group = MovementId,
fill = mean_travel_time),
size = .2,
color = 'black',
data = dataset(),
alpha = 0.7) +
scale_fill_viridis(direction = -1,
name="Avg. Travel Time (in minutes)") +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
legend.position = "bottom",
legend.direction = "horizontal")
})
# Summary Table - Average of Travel Times to/from Work
output$table <- renderTable({
datasetSummaryTable()
})
})
|
36e22654628f54395e2268733fd79891b10bb4f8
|
f9f42e00cdc3ddb75f4496733fbba28ca1f740b0
|
/tests/testthat.R
|
e0c2bb30f2b0cd860f076a45e462b65f0c49f496
|
[] |
no_license
|
richardsprague/actino
|
20f36f951cb7a45294225005de8c1e69fc4a7fdd
|
f4bbaa1c7cf3f520de34e53ef340877ebc9ee07d
|
refs/heads/master
| 2021-12-09T21:08:21.017115
| 2021-12-03T23:37:40
| 2021-12-03T23:37:40
| 80,489,917
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 512
|
r
|
testthat.R
|
# All test scripts are called from here
# Note that first we read the data that will be used by all test scripts.
library(testthat)
library(actino)
test_check("actino")
# print("running testthat.R")
#
# c <- just_json_files_in("./data/kombucha")
#
# data("kombucha.csv")
# data("kombucha")
# k = kombucha.csv
# k$ssr <- 0000
# mapfile <- data.frame(ssr = c(0), Username = c("testPerson"))
#
# p <- kombucha
#
# #print(getwd())
# #test_file("./tests/testthat/test-reading.R")
# test_dir("./tests/testthat/")
|
6ebf26c121b50a06ddd54fe12a565c7209d0e274
|
b9d4fdc5b544ffb6158705d1c8e81f670a2931f1
|
/man/plot_subgroups.Rd
|
5838f919ee87825dad4fd90b84e67ba9fe9f7245
|
[] |
no_license
|
cran/nph
|
284e6c2ad631edfd62f46a3540c499d26c287476
|
e2fa54d4e719b85ad2c40e9ce8b6b0010bea4f1c
|
refs/heads/master
| 2022-06-17T07:47:58.897005
| 2022-05-16T22:20:05
| 2022-05-16T22:20:05
| 236,633,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,115
|
rd
|
plot_subgroups.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/additional_functions_1-1.R
\name{plot_subgroups}
\alias{plot_subgroups}
\title{Draw a population composition plot}
\usage{
plot_subgroups(
A,
B,
colors = "default",
max_time = max(A$Tint),
position = c("stack", "fill"),
title = ""
)
}
\arguments{
\item{A}{An object of class \code{mixpch}, resembling the survival function in treatment group 0}
\item{B}{An object of class \code{mixpch}, resembling the survival function in treatment group 1}
\item{colors}{Either a vector of length four with colors for A and B and subgroup 1 and 2, or "default".}
\item{max_time}{the maximum value for the x-axis.}
\item{position}{Either "stack" or "fill". By default (stack), the total population decreases through time. If position="fill", the size of the population is rescaled to show conditional percentages.}
\item{title}{The text for the title.}
}
\description{
A figure that shows the composition of the population under study though time
}
\examples{
A <- pop_pchaz(Tint = c(0, 90, 365),
lambdaMat1 = matrix(c(0.2, 0.1, 0.4, 0.1), 2, 2) / 365,
lambdaMat2 = matrix(c(0.5, 0.2, 0.6, 0.2), 2, 2) / 365,
lambdaProg = matrix(c(0.5, 0.5, 0.4, 0.4), 2, 2) / 365,
p = c(0.8, 0.2),
timezero = FALSE, discrete_approximation = TRUE)
B <- pop_pchaz(Tint = c(0, 90, 365),
lambdaMat1 = matrix(c(0.2, 0.1, 0.4, 0.1), 2, 2) / 365,
lambdaMat2 = matrix(c(0.5, 0.1, 0.6, 0.1), 2, 2) / 365,
lambdaProg = matrix(c(0.5, 0.5, 0.04, 0.04), 2, 2) / 365,
p = c(0.8, 0.2),
timezero = FALSE, discrete_approximation = TRUE)
plot_subgroups(A, B, title = "position='stack'")
plot_subgroups(A, B, position='fill', title = "position='fill'")
}
\references{
Robin Ristl, Nicolas Ballarini, Heiko Götte, Armin Schüler, Martin Posch, Franz König. Delayed treatment effects, treatment switching and
heterogeneous patient populations: How to design and analyze RCTs in oncology. Pharmaceutical statistics. 2021; 20(1):129-145.
}
\seealso{
\code{\link{pop_pchaz}}
}
\author{
Robin Ristl, \email{robin.ristl@meduniwien.ac.at}, Nicolas Ballarini
}
|
6d3178eb57b0ceadd2e3f560743875970d149289
|
21ddf3011b05427cd69fa66e0321929107bfd58e
|
/man/formatTree.Rd
|
3b8c545d3edae9192e11d88392f1587a59ed6ee9
|
[] |
no_license
|
luisrizzardi/TreeAndLeaf
|
9293fcfbb2723566f8bd516e026b74dfdb7951f8
|
e5fe966e07c5941c6214586d9361e5e21aa66518
|
refs/heads/master
| 2020-05-17T11:06:43.091842
| 2019-11-04T18:46:48
| 2019-11-04T18:46:48
| 183,676,588
| 2
| 0
| null | 2019-04-26T18:29:49
| 2019-04-26T18:29:48
| null |
UTF-8
|
R
| false
| true
| 1,369
|
rd
|
formatTree.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formatTree.R
\name{formatTree}
\alias{formatTree}
\title{A theme function for formatting of an igraph to be shown in RedeR}
\usage{
formatTree(gg, theme = 1, cleanalias = FALSE)
}
\arguments{
\item{gg}{An igraph object generated by either \code{\link{hclust2igraph}}
or \code{\link{phylo2igraph}}<igraph>.}
\item{theme}{An integer ranging from 1 to 5 with desired theme. \cr
Options are: \cr
1- A clean black and blue theme, great for higher levels of user
customization. \cr
2- A theme with a palette of greens. \cr
3- A theme with a palette of blues. \cr
4- A theme with a palette of purples. \cr
5- A theme with a palette of reds. \cr
For custom formatting, see \code{\link[RedeR]{addGraph}} for accepted
parameters <integer>. \cr}
\item{cleanalias}{A logical that removes the node aliases when set to
TRUE (default = FALSE) <logical>.}
}
\value{
An igraph object with standard formatting for RedeR application.
}
\description{
Applies formatting patterns to an igraph object
according to predefined themes.
This formatting is used for plotting on the RedeR app interface.
}
\examples{
hc <- hclust(dist(USArrests), "ave")
gg <- hclust2igraph(hc)
gg <- formatTree(gg = gg,
theme = 5)
}
\seealso{
\code{\link[RedeR]{addGraph}}
\code{\link{treeAndLeaf}}
}
|
e6c0b6b1a748911e093f410352f67571e209de99
|
0746beaacc333c85756fc1b4f281bc09da014a89
|
/R/add_metadata.R
|
29fd42e38eea128fb8066e250a013e94b10d38cf
|
[] |
no_license
|
wenwenmin/Project-NBT2017-recount
|
e6d935cd4fae27c15e7e3354df3010d8430ddaa4
|
50ea27f0c5899e088801f696f60f05b95d250f18
|
refs/heads/master
| 2020-03-27T06:53:32.349037
| 2018-07-28T06:20:41
| 2018-07-28T06:20:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,143
|
r
|
add_metadata.R
|
#' Add additional curated metadata to a recount rse object
#'
#' This function appends sample metadata information to a
#' \link[SummarizedExperiment]{RangedSummarizedExperiment-class} from the
#' recount2 project. The sample metadata comes from curated efforts
#' independent from the original recount2 project. Currently the only
#' information comes from the recount_brain project described in more detail
#' at \url{http://lieberinstitute.github.io/recount-brain/}.
#'
#'
#' @param rse A \link[SummarizedExperiment]{RangedSummarizedExperiment-class}
#' object as downloaded with \link{download_study}. If this argument is
#' not specified, the function will return the raw metadata table.
#' @param source A valid source name. The only supported options at this
#' moment are \code{recount_brain_v1} and \code{recount_brain_v2}.
#' @param is_tcga Set to \code{TRUE} only when \code{rse} is from TCGA.
#' Otherwise set to \code{FALSE} (default).
#' @param verbose If \code{TRUE} it will print a message of where the
#' predictions file is being downloaded to.
#'
#' @return A \link[SummarizedExperiment]{RangedSummarizedExperiment-class}
#' object with the sample metadata columns appended to the \code{colData()}
#' slot.
#' For \code{source = "recount_brain_v1"}, the metadata columns are
#' described at \url{http://lieberinstitute.github.io/recount-brain/}.
#' For \code{source = "recount_brain_v2"}, the metadata columns are
#' described at
#' \url{http://lieberinstitute.github.io/recount-brain/cross_studies_metadata/cross_studies_metadata.html}.
#'
#' @details If you use the recount_brain data please cite the Razmara et al
#' bioRxiv pre-print available at
#' (TODO update URL once it's available). See citation
#' details with citation('recount').
#'
#' @references
#' Razmara et al, in prep, 2018.
#'
#' @author Leonardo Collado-Torres
#' @export
#'
#' @import downloader
#' @import SummarizedExperiment
#'
#' @examples
#'
#' ## Add the sample metadata to an example rse_gene object
#' rse_gene <- add_metadata(rse_gene_SRP009615, 'recount_brain_v1')
#'
#' ## Explore the metadata
#' colData(rse_gene)
#'
#' ## For a list of studies present in recount_brain_v1 check
#' ## http://lieberinstitute.github.io/recount-brain/. Note that it only
#' ## includes studies from SRA, so no TCGA or GTEx (those have great
#' ## sample metadata already available).
#' ## recount_brain_v2 includes GTEx and TCGA brain samples in addition to the
#' ## recount_brain_v1 data.
#'
#' \dontrun{
#' ## Example project that is present in recount_brain_v2.
#'
#' ## Download and load the data
#' download_study('ERP001304')
#' load(file.path('ERP001304', 'rse_gene.Rdata'))
#'
#' ## Add the sample metadata from recount_brain_v2
#' rse_gene <- add_metadata(rse_gene, source = 'recount_brain_v2')
#'
#' ## Check the metadata
#' colData(rse_gene)
#' }
#'
#' ## Obtain all the recount_brain_v2 metadata
#' recount_brain_v2 <- add_metadata(source = 'recount_brain_v2')
#'
add_metadata <- function(rse, source = 'recount_brain_v1', is_tcga = FALSE,
verbose = TRUE) {
stopifnot(length(source) == 1)
## For a NOTE in R CMD check
valid_sources <- data.frame(
name = c('recount_brain_v1', 'recount_brain_v2'),
url = c(
'https://github.com/LieberInstitute/recount-brain/blob/master/merged_metadata/recount_brain_v1.Rdata?raw=true', 'https://github.com/LieberInstitute/recount-brain/blob/master/cross_studies_metadata/recount_brain_v2.Rdata?raw=true'),
object = c('recount_brain', 'recount_brain'),
sample_id = c('run_s', 'run_s'),
stringsAsFactors = FALSE
)
stopifnot(tolower(source) %in% tolower(valid_sources$name))
to_use <- valid_sources[tolower(valid_sources$name) == tolower(source), ]
destfile <- file.path(tempdir(), paste0(to_use$name, '.Rdata'))
if(verbose) message(paste(Sys.time(), 'downloading the', to_use$object, 'metadata to', destfile))
downloader::download(to_use$url, destfile = destfile, mode = 'wb')
load_meta <- function() {
load(destfile, verbose = verbose)
get(to_use$object)
}
new_meta <- load_meta()
if(missing(rse)) return(new_meta)
if(is_tcga) {
map <- match(colData(rse)$gdc_file_id, new_meta[, to_use$sample_id])
} else {
map <- match(colData(rse)$run, new_meta[, to_use$sample_id])
}
if(verbose) {
message(paste(Sys.time(), 'found', sum(!is.na(map)), 'out of', length(map), 'samples in the', to_use$object, 'metadata'))
}
## Make a dummy table with the new metadata to be added
dummy <- as.data.frame(matrix(NA, nrow = ncol(rse),
ncol = ncol(new_meta) - 1))
cols_to_drop <- which(colnames(new_meta) == to_use$sample_id)
colnames(dummy) <- colnames(new_meta)[- cols_to_drop]
## In case new data is present
if(any(!is.na(map))){
dummy[!is.na(map), ] <- new_meta[map[!is.na(map)], - cols_to_drop]
}
rownames(dummy) <- NULL
## Merge new metadata and return the rse
colData(rse) <- cbind(colData(rse), dummy)
return(rse)
}
|
94e13df694c0bf4b04ffdbcf19a65ac7e8d39e25
|
d27c606c99b41e697b2aae73863900e5cf17b59d
|
/plot1.R
|
f4982ed4f4221a2170e34a67aaee18abfae70590
|
[] |
no_license
|
theam14/ExData_Plotting1
|
c526105155b1d6216ef4373a14b94dfd96f68929
|
3a48b4adbf816c9636f86e78c16bd38ce537c83f
|
refs/heads/master
| 2021-01-12T22:11:24.500047
| 2015-06-07T22:44:12
| 2015-06-07T22:44:12
| 36,842,515
| 0
| 0
| null | 2015-06-04T02:14:43
| 2015-06-04T02:14:42
| null |
UTF-8
|
R
| false
| false
| 294
|
r
|
plot1.R
|
# Script used to plot into a png file, named 'plot1.png' the Plot 1
# described at the project 1 assignment, for Exploratory Data Analysis Course.
source('run_plot.R');
# Load Data
data <- loadData();
# Perform a plot at the PNG file 'plot1.png'
png("plot1.png");
runPlot1(data);
dev.off();
|
38928ab66c960e7a5bdecaf01a9a7af445d98d4b
|
44abd544104271bc9bd73faf892aebe75642152b
|
/PCA_EXAMPLES.R
|
f39448ad10c17db303a638b63c5d83be7f896028
|
[] |
no_license
|
JHubb565/R-Code
|
a50c49535f1501b9ec8aae9844921130feca1086
|
fa42a8cb41f1d99fcbdc0df18a71f190a697002f
|
refs/heads/master
| 2021-05-14T03:53:24.448658
| 2019-07-10T14:22:58
| 2019-07-10T14:22:58
| 116,629,409
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,711
|
r
|
PCA_EXAMPLES.R
|
################################################################################
Justin Hubbard
################################################################################
# load in data
getwd()
setwd("C:\\Users\\malan\\Dropbox\\_Purdue\\_Teaching\\DM\\6_Dimension Reduction")
# Book Example - p.96 1990 California census housing data
# Each record corresponds to a census block - each block has on avg 1425 people
# Y = "median_house_value" : income in thousands of dollars
# X1 = "median_income" : income scaled between 0 and 15
# X2 ="housing_median_age" : age in years of house
# X3 ="total_rooms" : # of rooms in block
# X4 ="total_bedrooms" : # bedrooms in block
# X5 ="population" : # of people living in block
# X6 ="households" : # of households in block
# X7 ="latitude" : center latitute of block
# X8 ="longitude" : center longitude of block
# myUrl <- "https://raw.githubusercontent.com/MatthewALanham/Datasets/master/1990CAHousingData.csv"
df <- read.table(file=myUrl, header=T, sep=",")
str(df)
head(df, n=2)
names(df)
names(df) <- c("value","income","homeage","rooms","beds","pop","homes","lat","long")
################################################################################
# summarize features
library(psych)
stats <- describe(df)
# distribution of each variable
par(mfrow=c(3,3))
for (i in 1:9) {
hist(df[,i], xlab=names(df)[i], main=names(df)[i], col="blue", prob=T)
lines(density(df[,i]), col="red", lwd=2) #adds density line
}
# distribution and correlation of specific variables
library(GGally)
ggpairs(df[,2:9])
# variance-covariance matrix
corMatrix <- cor(df)
corMatrix
# Looking at the correlation among the numeric features
library(corrgram)
corrgram(df
, order=F
, lower.panel=panel.shade
, upper.panel=panel.shade
, text.panel=panel.txt
, main="Correlogram of California House Data"
)
# ignore Y response variable
df <- df[,2:9]
################################################################################
# easy way to standardize variables
dfz <- data.frame(scale(df))
# change variable names so they have a "_z" after them
for (i in 1:8) {
names(dfz)[i] <- paste0(names(dfz)[i],"_z")
}
names(dfz)
# check that we get mean of 0 and sd of 1
summary(dfz) # faster version of apply(scaled.dat, 2, mean)
apply(dfz, 2, sd) # standard deviations for each standardized variable
# distribution of each variable
par(mfrow=c(3,3))
for (i in 1:8) {
hist(dfz[,i], xlab=names(dfz)[i], main=names(dfz)[i], col="blue", prob=T)
lines(density(dfz[,i]), col="red", lwd=2) #adds density line
}
# distribution and correlation of specific variables
library(GGally)
ggpairs(dfz[,1:8])
# variance-covariance matrix
corMatrixz <- cor(dfz)
corMatrixz
# Looking at the correlation among the numeric features
library(corrgram)
corrgram(dfz
, order=F
, lower.panel=panel.shade
, upper.panel=panel.shade
, text.panel=panel.txt
, main="Correlogram of California House Data"
)
################################################################################
# randomly choose a training and testing set
set.seed(1234) # use this to replicate results
choose <- runif(nrow(dfz))
train <- dfz[which(choose > 0.1),] # this is the data set we do PCA on
test <- dfz[which(choose <= 0.1),] # this is our evaluation set (like new data)
# pca using the psych package
# be aware that if you get an error its likely due to multicollinearity
library(psych)
names(train)
pca1 <- principal(train
, nfactors = 8 # number of componets to extract
, rotate = "none" # can specify different rotations
, scores = T # find component scores or not
)
# eignvalues
pca1$values
# loadings matrix, variance explained
# Cumulative Var is the proportion of total variability explained in Z that is
# explained by the cumulative of ith principal components
pca1$loadings
# Here we see that the first component accounts for nearly half (49.0%) of
# the total variance in the dataset.
# PC1 PC2 PC3 PC4 PC5 PC6 PC7 PC8
#SS loadings 3.922 1.909 1.073 0.818 0.137 0.081 0.047 0.014
#Proportion Var 0.490 0.239 0.134 0.102 0.017 0.010 0.006 0.002
#Cumulative Var 0.490 0.729 0.863 0.965 0.982 0.992 0.998 1.000
# the sum of the eigenvalues always equals the total number of m features
sum(pca1[[1]])
# Using the eigenvalue criterion - how many components to keep?
length(pca1[[1]][pca1[[1]] > 1])
# scree plot
par(mfrow=c(1,1))
plot(pca1$values, type="b", main="Scree plot"
, col="blue", xlab="Component #", ylab="Eigenvalues", pch=19)
# plot factor scores from 3rd PC
pairs(cbind(train[,c("income_z", "homeage_z")], pca1$scores[,3])
, labels = c("Median income","Home Age","PC 3 Scores"))
# plot factor scores from 4th PC
pairs(cbind(train[,c("income_z", "homeage_z")], pca1$scores[,4])
, labels = c("Median income","Home Age","PC 4 Scores"))
# calculate communalities
com3 <- loadings(pca1)[2,1]^2 + loadings(pca1)[2,2]^2 + loadings(pca1)[2,3]^2
com4 <- loadings(pca1)[2,1]^2 + loadings(pca1)[2,2]^2 + loadings(pca1)[2,3]^2 +
loadings(pca1)[2,4]^2
com3; com4
# validation of pca
pca2 <- principal(test
, nfactors = 4 # number of componets to extract
, rotate = "none" # can specify different rotations
, scores = T # find component scores or not
)
pca2$loadings
|
8bb703b64659e69850593113b20df4597e8688f2
|
880251c02cfe2a75061745308404f967246c31ad
|
/man/generateHazardMatrix.Rd
|
77676719c586f8adeee4014226864bf86f73b3b1
|
[] |
no_license
|
blasern/gems
|
efe0094764c3e21dee5c10a9521b9f16ad85983d
|
12cfd4cc6858a7aa72cd42e88a6b867db0d17f78
|
refs/heads/master
| 2022-07-12T15:28:51.889025
| 2022-06-08T07:54:10
| 2022-06-08T07:54:10
| 35,159,682
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,040
|
rd
|
generateHazardMatrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gems.r
\name{generateHazardMatrix}
\alias{generateHazardMatrix}
\title{generate template for transition functions}
\usage{
generateHazardMatrix(statesNumber)
}
\arguments{
\item{statesNumber}{the number of states to be considered.}
}
\value{
a \code{transition.structure} of dimension \eqn{N \times N}{N x N},
where \eqn{N} is the number of states and with value "impossible" for all
potential transitions.
}
\description{
This function simplifies generating the matrix of transition functions.
}
\author{
Luisa Salazar Vizcaya, Nello Blaser, Thomas Gsponer
}
\references{
Nello Blaser, Luisa Salazar Vizcaya, Janne Estill, Cindy Zahnd,
Bindu Kalesan, Matthias Egger, Olivia Keiser, Thomas Gsponer (2015). gems:
An R Package for Simulating from Disease Progression Models. Journal of
Statistical Software, 64(10), 1-22. URL http://www.jstatsoft.org/v64/i10/.
}
\seealso{
\code{\link{transition.structure}},
\code{\link{simulateCohort}}
}
\keyword{utilities}
|
d119c89d9a2c1a7bab54f6a3c9c0ceacf481b3ff
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.s3/man/delete_bucket_cors.Rd
|
d900a65dcb987d4374fe62259830058ff6594fd3
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 608
|
rd
|
delete_bucket_cors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.s3_operations.R
\name{delete_bucket_cors}
\alias{delete_bucket_cors}
\title{Deletes the CORS configuration information set for the bucket}
\usage{
delete_bucket_cors(Bucket)
}
\arguments{
\item{Bucket}{[required]}
}
\description{
Deletes the CORS configuration information set for the bucket.
}
\section{Accepted Parameters}{
\preformatted{delete_bucket_cors(
Bucket = "string"
)
}
}
\examples{
# The following example deletes CORS configuration on a bucket.
\donttest{delete_bucket_cors(
Bucket = "examplebucket"
)}
}
|
e6e01d7f1792c8161826128d5306b5da3ce2a3f8
|
a1405253a430168fe1b412defcec2d70271d65bd
|
/Re3-readability/src/get_readability_stats/readability-snippets-r/snippet80.R
|
25498f2f291c6c205f718368d7a05f05d09d131e
|
[
"MIT"
] |
permissive
|
Re3-reproducibility-readability/21-05-Re3
|
4ccb75f671ff8c2c905cd823068c99de9f5aacc2
|
8c35318fabcffa2fb83a588b91b45d0cdd5aa04e
|
refs/heads/main
| 2023-04-20T23:18:33.066671
| 2021-05-12T18:07:28
| 2021-05-12T18:07:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 608
|
r
|
snippet80.R
|
# bind income to the quantiles
lpr_ci_med = cbind(jew_seq, lpr_ci_med)
#plot
plot_pred_LPR_both = ggplot(lpr_ci) +
geom_line(aes(x=jew_seq, y = mid, color = 'black')) +
geom_ribbon(aes(x=jew_seq, ymin=lower, ymax=upper), fill = 'red', alpha = .25) +
xlab('pctJews 1931') +
ylab('Predicted pctLPR Vote') +
geom_line(data = lpr_ci_med, aes(x=jew_seq, y = mid, group = "X at Observed", color = 'blue')) +
geom_ribbon(data = lpr_ci_med, aes(x=jew_seq, ymin=lower, ymax=upper), alpha = .25, fill = 'cyan') +
scale_color_discrete(labels = c("X at Observed", "X at Median"))
plot_pred_LPR_both
|
7c48d1e969f5d67bb80daa82fb184f2bae16d0d0
|
45990fe7a844e0f18fc983238b3aaf88d4d83f60
|
/plot1.R
|
a6b4ce8517bf2a567b9ea33a5ddaa17dd71c3154
|
[] |
no_license
|
ClairexwWang/ExData_Plotting1
|
91e780a39855c805e4019b0f4f6b1f93908565e2
|
0788a3d70691dafe1019f958b7a70e1fbfe3d230
|
refs/heads/master
| 2020-12-30T20:48:22.645628
| 2015-04-12T18:36:47
| 2015-04-12T18:36:47
| 33,823,850
| 0
| 0
| null | 2015-04-12T16:13:51
| 2015-04-12T16:13:51
| null |
UTF-8
|
R
| false
| false
| 361
|
r
|
plot1.R
|
> mydata2<- read.table("household_power_consumption.txt", na.strings = "?", header=TRUE,sep=";")
> mydata_sub <- mydata2 [mydata2$Date== "1/2/2007" | mydata2$Date == "2/2/2007",]
> png(filename="plot1.png", width = 480, height = 480)
> hist(mydata_sub$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", col="red")
> dev.off
|
c57e29307acc718ecd852be58e4da3445099ba83
|
84b5ba1d2ae5d41f0ff9ba9f9b793366d0207f0b
|
/man/read.zTree.Rd
|
532400eab58d9201211ecb5961d477470760c719
|
[] |
no_license
|
cran/SciencesPo
|
f09bc2d2a23cd15b380892643825d43fee764794
|
b957c971d0f740318e4a6327459a3627b519fa51
|
refs/heads/master
| 2020-12-19T19:01:50.770536
| 2016-08-05T00:24:29
| 2016-08-05T00:24:29
| 17,693,640
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 627
|
rd
|
read.zTree.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zTree.R
\name{read.zTree}
\alias{read.zTree}
\title{Reads zTree output files}
\usage{
read.zTree(object, tables = c("globals", "subjects"))
}
\arguments{
\item{object}{a zTree file or a list of files.}
\item{tables}{the tables of intrest.}
}
\value{
A list of dataframes, one for each table
}
\description{
Extracts variables from a zTree output file.
}
\examples{
\dontrun{url <-
zTables <- read.zTree( "131126_0009.xls" , "contracts" )
zTables <- read.zTree( c("131126_0009.xls",
"131126_0010.xls"), c("globals","subjects", "contracts" ))
}
}
|
73a0a8fa835a6938e8288093467b5c57262e9cbe
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/pangoLayoutLineGetXRanges.Rd
|
d6376e9a115d7d19c7eca7c5b5d92dd85d01e941
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 1,610
|
rd
|
pangoLayoutLineGetXRanges.Rd
|
\alias{pangoLayoutLineGetXRanges}
\name{pangoLayoutLineGetXRanges}
\title{pangoLayoutLineGetXRanges}
\description{Gets a list of visual ranges corresponding to a given logical range.
This list is not necessarily minimal - there may be consecutive
ranges which are adjacent. The ranges will be sorted from left to
right. The ranges are with respect to the left edge of the entire
layout, not with respect to the line.}
\usage{pangoLayoutLineGetXRanges(object, start.index, end.index)}
\arguments{
\item{\verb{object}}{[\code{\link{PangoLayoutLine}}] a \code{\link{PangoLayoutLine}}}
\item{\verb{start.index}}{[integer] Start byte index of the logical range. If this value
is less than the start index for the line, then
the first range will extend all the way to the leading
edge of the layout. Otherwise it will start at the
leading edge of the first character.}
\item{\verb{end.index}}{[integer] Ending byte index of the logical range. If this value
is greater than the end index for the line, then
the last range will extend all the way to the trailing
edge of the layout. Otherwise, it will end at the
trailing edge of the last character.}
}
\value{
A list containing the following elements:
\item{\verb{ranges}}{[integer] out): (array length=n_ranges): (transfer=full. \acronym{out): (array length=n_ranges):} (transfer=full. }
\item{\verb{n.ranges}}{[integer] The number of ranges stored in \code{ranges}.}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
944d4f79321ca5718ceaebfcd5a6a51033c9e064
|
6ae574fc7fa9b720c361b9a47c51684cdd302f96
|
/man/summary.CADFtest.Rd
|
6bf0c0c79cab5c05619e9df386a4c23e6981b39d
|
[] |
no_license
|
cran/CADFtest
|
7e498795d51f7bf72394d634daf327426c68dde8
|
405d9d90df237f6c5dbaa4da644172c178c1f8ef
|
refs/heads/master
| 2021-01-21T12:23:33.687891
| 2017-06-02T16:10:31
| 2017-06-02T16:10:31
| 17,678,203
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,583
|
rd
|
summary.CADFtest.Rd
|
\name{summary.CADFtest}
\alias{summary.CADFtest}
\title{Function to print a summary of CADFtest objects}
\description{
This function conveniently prints the detailed results of the Covariate-Augmented Dickey Fuller
test carried out in \code{CADFtest}.
}
\usage{
\method{summary}{CADFtest}(object, ...)
}
\arguments{
\item{object}{an object belonging to the class \code{CADFtest}.}
\item{...}{currently not used.}
}
\value{
The function returns an object of class \code{CADFtestsummary} containing the main
results of the test.
\item{test.summary}{a matrix, containing the t-test statistic, the estimated value of \eqn{\rho^2},
the p-value of the test, the max lag used for the differenced dependent variable,
the max lag of the stationary covariate, the max lead of the stationary covariate.
When a standard ADF test is performed, only the t-test statistic, the p-value and
the max lag of the differenced dependent variable are reported.}
\item{model.summary}{the summary of the test model, in the usual form. However, note that the
p-value of the lagged dependent is computed under the null of a unit root. Furthermore,
differently from the common practice, the F-statistic refers to the joint significance of
the stationary regressors. If no stationary regressors are used (no lagged differences
of the dependent, no stationary covariates) then the F-statistic is not computed and a
\code{NA} value is returned.}
}
\author{Claudio Lupi}
\examples{
data(npext, package="urca")
ADFt <- CADFtest(npext$realgnp, type="trend")
summary(ADFt)
}
|
88b6b946ffa872365c494391193a6d4c906930f7
|
f9241a98cbb5bd22735f605b8e9194229100ec7c
|
/code/PBMC_extract.R
|
6139cab78f39b37c40e1f454703d5db7e31bd023
|
[] |
no_license
|
gvrocha/scVI-container
|
a5fc776d5e244589633a4a61f82a8bbe8c23de50
|
c8ab76574d0716e13af1010b3fbfd6e2b6c3c911
|
refs/heads/master
| 2020-12-29T06:41:30.112122
| 2020-02-29T01:42:57
| 2020-02-29T01:42:57
| 238,496,713
| 1
| 1
| null | 2020-02-29T01:12:36
| 2020-02-05T16:30:43
|
Dockerfile
|
UTF-8
|
R
| false
| false
| 1,240
|
r
|
PBMC_extract.R
|
library(Matrix)
library(scone)
library(plyr)
originals_dir = "/base/scVI-data/reorganized/originals"
processed_dir = "/base/scVI-data/reorganized/originals"
# get cleaned data by merging genes from original scone pipeline and microarray
load(file.path(originals_dir, "PBMC/scVI_scone.rda"))
barcodes = scone_obj@colData@rownames
list_qc = scone_obj@colData@listData[names(scone_obj@colData@listData)[1:9]]
qc.df = do.call("cbind", lapply(list_qc, as.data.frame))
colnames(qc.df) = names(scone_obj@colData@listData)[1:9]
batch = get_batch(scone_obj)
gene_names = scone_obj@NAMES
design = get_design(scone_obj, method = "none,fq,qc_k=8,no_bio,no_batch" )
write.csv(barcodes, file.path(processed_dir, "PBMC/barcodes.csv"))
write.csv(batch, file.path(processed_dir, "PBMC/batch.csv"))
write.csv(qc.df, file.path(processed_dir, "PBMC/full_qc.csv"))
write.csv(gene_names, file.path(processed_dir, "PBMC/gene_names.csv"))
write.csv(design, file.path(processed_dir, "PBMC/design.csv"))
# load cells information from SEURAT, included in the original scone object
load(file.path(originals_dir, "PBMC/scone_all_wposcon_extendim_biostrat2_q.rda"))
bio = get_bio(scone_obj)
write.csv(bio, file.path(processed_dir, "PBMC/bio.csv"))
|
7e2d382c37ca1c677fbe589b83bad03425dbf2c3
|
987a8f2bad88f95ccc761c5a4a399eec60171123
|
/man/is_cpdag.Rd
|
6d9a1518797fb975ca06fc5d5521a464c27be4d0
|
[] |
no_license
|
annennenne/causalDisco
|
defe81fe780c4e23eb0de5a9aa5b418816ce7bfa
|
ab7c97e22dc611e436c65ec3cd492f9fd802160e
|
refs/heads/master
| 2022-07-10T02:53:20.375785
| 2022-06-27T10:15:17
| 2022-06-27T10:15:17
| 192,750,826
| 9
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 374
|
rd
|
is_cpdag.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{is_cpdag}
\alias{is_cpdag}
\title{Check for CPDAG}
\usage{
is_cpdag(amat)
}
\arguments{
\item{amat}{An adjacency matrix}
}
\value{
A logical.
}
\description{
Check for CPDAG
}
\details{
Check: Is adjacency matrix proper CPDAG? See \code{\link[pcalg]{isValidGraph}} for
definition.
}
|
205b093281de7d5dde11e5e053cb17af7f7ee37f
|
d5d52840065492b2b73172be6bd0738e6a708dff
|
/package/fwPackage_1.0_source/R/ClarkWest.R
|
a5b68b33746d5e9a31dded981e505fcbf53010d2
|
[] |
no_license
|
grayclhn/oos-overfit
|
9f3e1aca769d4bbba45b84044e609b37a843b9a0
|
779f466158fa4254a8845b47c5b003dedab69ccc
|
refs/heads/master
| 2021-01-17T08:20:04.806990
| 2017-05-05T16:18:09
| 2017-05-05T21:27:21
| 73,234,970
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 500
|
r
|
ClarkWest.R
|
ClarkWest <- function(x,...) {
null.model <- model.null(x)
alt.model <- model.alt(x)
unname(sapply(mapply(function(f.null, e.null, f.alt, e.alt)
e.null^2 - e.alt^2 + (f.null - f.alt)^2,
f.null = forecasts(null.model),
f.alt = forecasts(alt.model),
e.null = ForecastErrors(null.model),
e.alt = ForecastErrors(alt.model)),
function(x) t.test(x,...)$statistic))
}
|
8cc6ee771ba058e2d22e2c61736cdd97149b1bc8
|
036983f65dc517593f84d44cb14a712ea0687225
|
/homeworks/homework_10/Sinski_Bartosz/server.R
|
99a4a05fe08700c96238d710b730defc9590fa18
|
[] |
no_license
|
iketutg/2021Z-DataVisualizationTechniques
|
027c0228ed95e37ddd73500909117449f6b87e25
|
125077c8b02426b779e351324c3c6d4e96ad64c1
|
refs/heads/master
| 2023-03-19T08:59:42.177496
| 2021-02-23T20:08:31
| 2021-02-23T20:08:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,032
|
r
|
server.R
|
library(plotly)
library(shiny)
library(dplyr)
library(ggplot2)
library(ggthemes)
source("readingData.R")
function(input,output,session){
output$plot1 <- renderPlot({
if(input$grade == 1){
mplot <- ggplot(filter(Mt,Country == input$Countries,grade=="value4")) + geom_bar(aes(factor(year,levels = sort(year,decreasing=TRUE)),value),fill="darkorange3",stat = "identity")
}else if(input$grade == 2){
mplot <- ggplot(filter(Mt,Country == input$Countries,grade=="value8" )) + geom_bar(aes(factor(year,levels = sort(year,decreasing=TRUE)),value),fill="steelblue",stat = "identity")
}else{
mplot <- ggplot(filter(Mt,Country == input$Countries)) + geom_bar(aes(x = factor(year,levels = sort(unique(year),decreasing=TRUE)),y = value,fill=grade),stat = "identity",position = "dodge") +
scale_fill_manual(labels= c("4th","8th"),values=c("darkorange3", "steelblue"))
}
mplot <- mplot + ggtitle("Mathematics Achievement") + labs(x="Year",y ="Average Scale Score",fill="Grade") + theme_bw() + theme(legend.position="bottom")
return(mplot)
})
output$plot2 <- renderPlot({
if(input$grade == 1){
splot <- ggplot(filter(St,Country == input$Countries,grade=="value4")) + geom_bar(aes(factor(year,levels = sort(year,decreasing=TRUE)),value),fill="darkorange3",stat = "identity")
}else if(input$grade == 2){
splot <- ggplot(filter(St,Country == input$Countries,grade=="value8" )) + geom_bar(aes(factor(year,levels = sort(year,decreasing=TRUE)),value),fill="steelblue",stat = "identity")
}else{
splot <- ggplot(filter(St,Country == input$Countries)) + geom_bar(aes(x = factor(year,levels = sort(unique(year),decreasing=TRUE)),y = value,fill=grade),stat = "identity",position = "dodge") +
scale_fill_manual(labels= c("4th","8th"),values=c("darkorange3", "steelblue"))
}
splot <- splot + ggtitle("Science Achievement") + labs(x="Year",y ="Average Scale Score",fill="Grade") + theme_bw() + theme(legend.position="bottom")
return(splot)
})
}
|
72252116a50c6d36075c9c3d454d47c8bd9339c0
|
2d2b923fb73cbe10dc958f0f44f278f607b05e94
|
/tests/testthat/test-DataAccess_CompaniesService.R
|
ca092149bced611e0c043997efcf90daa415a68f
|
[
"MIT"
] |
permissive
|
nik01010/rBuildReleaseTest
|
7483f23a7615fca4ea45b5bd11a41b7271d09638
|
f4b7fb6ba2fa9a9aa4d12892daa4c1bf51d85bfa
|
refs/heads/develop
| 2021-06-28T16:46:53.062162
| 2021-06-25T18:36:01
| 2021-06-25T18:36:01
| 219,200,692
| 1
| 0
|
NOASSERTION
| 2020-04-13T12:30:07
| 2019-11-02T19:04:18
|
R
|
UTF-8
|
R
| false
| false
| 9,190
|
r
|
test-DataAccess_CompaniesService.R
|
options(stringsAsFactors = FALSE)
context("CompaniesService")
mongoConnectionString <- Sys.getenv("MONGO_CS_UNIT")
testContext <- rBuildReleaseTest::ApplicationDbContext$new(
connectionString = mongoConnectionString,
database = "IntegrationTest",
collection = "Companies",
verbose = FALSE
)
companiesSchemaValidator <- rBuildReleaseTest::CompaniesSchemaValidator()
testCompaniesService <- rBuildReleaseTest::CompaniesService$new(
dbContext = testContext,
schemaValidator = companiesSchemaValidator
)
test_that("GetCompaniesCount_ShouldReturnCorrectCount_WhenCalled", {
# Arrange
testContext$DbConnection$drop()
companies <- data.frame(
name = c(letters[1:10]),
value = c(1:10)
)
testContext$DbConnection$insert(data = companies)
expectedCount <- nrow(companies)
# Act
result <- testCompaniesService$GetCompaniesCount()
# Assert
expect_equal(expectedCount, result)
})
test_that("GetCompanies_ShouldReturnCorrectListOfCompanies_WhenCalled", {
# Arrange
testContext$DbConnection$drop()
companies <- data.frame(
name = c(letters[1:10]),
value = c(1:10)
)
testContext$DbConnection$insert(data = companies)
# Act
result <- testCompaniesService$GetCompanies()
# Assert
expect_equal(companies, result)
})
test_that("GetCompany_ShouldReturnCorrectCompany_IfCalledWithValidName", {
# Arrange
testContext$DbConnection$drop()
companies <- data.frame(
name = c("Company1", "Company2", "Company3"),
address = c("Address1", "Address2", "Address3"),
value = c(1, 2, 3)
)
testContext$DbConnection$insert(data = companies)
companyName <- "Company2"
expectedCompany <- companies %>%
dplyr::filter(name == companyName)
# Act
result <- testCompaniesService$GetCompany(companyName = companyName)
# Assert
expect_equal(expectedCompany, result)
})
test_that("GetCompanyId_ShouldReturnId_IfCalledWithValidName", {
# Arrange
testContext$DbConnection$drop()
companyToFind <- "Company2"
companies <- data.frame(
name = c("Company1", companyToFind, "Company3"),
address = c("Address1", "Address2", "Address3"),
value = c(1, 2, 3)
)
testContext$DbConnection$insert(data = companies)
# Act
result <- testCompaniesService$GetCompanyId(companyName = companyToFind)
# Assert
expect_true(!is.null(result))
})
test_that("GetOldestCompanies_ShouldReturnCorrectList_IfCalledWithValidLimit", {
# Arrange
testContext$DbConnection$drop()
companies <- data.frame(
name = c("Company1", "Company2", "Company3", "Company4"),
address = c("Address1", "Address2", "Address3", "Address4"),
founded_year = c(2005, 2004, 1999, 2019)
)
testContext$DbConnection$insert(data = companies)
limit <- 2
expectedCompanies <- companies %>%
dplyr::arrange(founded_year) %>%
dplyr::slice(1:limit)
# Act
result <- testCompaniesService$GetOldestCompanies(limit = limit)
# Assert
expect_equal(expectedCompanies, result)
})
test_that("GetOldestCompanies_ShouldFilterOutNulls_WhenCalled", {
# Arrange
testContext$DbConnection$drop()
companies <- data.frame(
name = c("Company1", "Company2", "Company3", "Company4"),
address = c("Address1", "Address2", "Address3", "Address4"),
founded_year = c(NA, NA, NA, NA)
)
limit <- nrow(companies)
testContext$DbConnection$insert(data = companies)
expectedCompanies <- data.frame()
# Act
result <- testCompaniesService$GetOldestCompanies(limit = limit)
# Assert
expect_equal(expectedCompanies, result)
})
test_that("GetNumberOfCompaniesFoundedPerYear_ShouldReturnCorrectCounts_WhenCalled", {
# Arrange
testContext$DbConnection$drop()
companies <- data.frame(
name = c("Company2007A", "Company2007B", "Company2009A", "Company2009B", "Company2009C"),
founded_year = c(2007, 2007, 2009, 2009, 2009),
another_value = c(1, 2, 3, 4, 5)
)
testContext$DbConnection$insert(data = companies)
expectedCounts <- companies %>%
dplyr::group_by(founded_year) %>%
dplyr::count(name = "count") %>%
dplyr::ungroup() %>%
as.data.frame()
# Act
result <- testCompaniesService$GetNumberOfCompaniesFoundedPerYear()
# Assert
expect_equal(expectedCounts, result)
})
test_that("CreateCompany_ShouldCreateNewCompany_WhenCalledWithValidJson", {
# Arrange
testContext$DbConnection$drop()
companyName <- "TestCompany"
companyFoundedYear <- 2000
newCompany <- glue::glue('{{
"name": "{companyName}",
"founded_year": {companyFoundedYear}
}}')
expectedCompanies <- data.frame(
name = companyName,
founded_year = companyFoundedYear
)
# Act
result <- testCompaniesService$CreateCompany(companyDetails = newCompany)
# Assert
expect_equal(nrow(expectedCompanies), result$nInserted)
expect_equal(list(), result$writeErrors)
actualCompanies <- testCompaniesService$GetCompanies()
expect_equal(expectedCompanies, actualCompanies)
})
test_that("CreateCompany_ShouldThrowError_IfCalledWithInvalidJsonKeys", {
# Arrange
testContext$DbConnection$drop()
companyName <- "TestCompany"
companyFoundedYear <- 2000
invalidCompany <- glue::glue('{{
"Name": "{companyName}",
"FoundedYear": {companyFoundedYear}
}}')
# Act
result <- expect_error(testCompaniesService$CreateCompany(companyDetails = invalidCompany))
# Assert
expect_s3_class(result, "error")
expect_equal("New company is not valid.", result$message)
actualCompanies <- testCompaniesService$GetCompanies()
expect_equal(0, nrow(actualCompanies))
})
test_that("CreateCompany_ShouldThrowError_IfCompanyAlreadyExists", {
# Arrange
testContext$DbConnection$drop()
companyName <- "TestCompany"
companyFoundedYear <- 2000
company <- glue::glue('{{
"name": "{companyName}",
"founded_year": {companyFoundedYear}
}}')
testCompaniesService$CreateCompany(companyDetails = company)
duplicateCompany <- company
expectedErrorMessage <- glue::glue("Company {companyName} already exists.")
# Act
result <- expect_error(testCompaniesService$CreateCompany(companyDetails = duplicateCompany))
# Assert
expect_s3_class(result, "error")
expect_equal(expectedErrorMessage, result$message)
actualCompanies <- testCompaniesService$GetCompanies()
expect_equal(1, nrow(actualCompanies))
})
test_that("EditCompany_ShouldUpdateCompanyDetails_IfCalledWithCorrectDetails", {
# Arrange
testContext$DbConnection$drop()
companyName <- "CompanyToEdit"
oldCompanyFoundedYear <- 2019
oldCompanyDetails <- glue::glue('{{
"name": "{companyName}",
"founded_year": {oldCompanyFoundedYear}
}}')
testCompaniesService$CreateCompany(companyDetails = oldCompanyDetails)
newCompanyFoundedYear <- 2020
newCompanyDetails <- glue::glue('{{
"name": "{companyName}",
"founded_year": {newCompanyFoundedYear}
}}')
expectedCompany <- data.frame(
name = companyName,
founded_year = newCompanyFoundedYear
)
# Act
result <- testCompaniesService$EditCompany(companyName = companyName, newCompanyDetails = newCompanyDetails)
# Assert
expect_equal(nrow(expectedCompany), result$modifiedCount)
expect_equal(nrow(expectedCompany), result$matchedCount)
expect_equal(0, result$upsertedCount)
actualCompany <- testCompaniesService$GetCompany(companyName = companyName)
expect_equal(expectedCompany, actualCompany)
})
test_that("EditCompany_ShouldThrowError_IfCalledForInvalidCompany", {
# Arrange
testContext$DbConnection$drop()
invalidCompany <- "TestCompany"
expectedError <- glue::glue('Company with name {invalidCompany} does not exist.')
# Act
result <- expect_error(
testCompaniesService$EditCompany(
companyName = invalidCompany,
newCompanyDetails = "{}"
)
)
# Assert
expect_s3_class(result, "error")
expect_equal(expectedError, result$message)
})
test_that("EditCompany_ShouldThrowError_IfMultipleCompaniesExistWithSameName", {
# Arrange
testContext$DbConnection$drop()
duplicateCompanyName <- "Company1"
companies <- data.frame(
name = c(duplicateCompanyName, duplicateCompanyName),
founded_year = c(2018, 2019)
)
testContext$DbConnection$insert(data = companies)
expectedError <- glue::glue('Multiple companies exist with the name {duplicateCompanyName}.')
# Act
result <- expect_error(
testCompaniesService$EditCompany(
companyName = duplicateCompanyName,
newCompanyDetails = "{}"
)
)
# Assert
expect_s3_class(result, "error")
expect_equal(expectedError, result$message)
})
test_that("DeleteCompany_ShouldRemoveRecord_IfCalledForExistingCompany", {
# Arrange
testContext$DbConnection$drop()
companies <- data.frame(
name = c("Company1", "Company2", "Company3"),
founded_year = c(2017, 2018, 2019)
)
testContext$DbConnection$insert(data = companies)
companyToDelete <- "Company2"
expectedCompanies <- companies %>%
dplyr::filter(name != companyToDelete)
# Act
result <- testCompaniesService$DeleteCompany(companyName = companyToDelete)
# Assert
actualCompanies <- testCompaniesService$GetCompanies()
expect_equal(expectedCompanies, actualCompanies)
})
# Teardown
rm(testContext)
rm(testCompaniesService)
|
3788931187a9a990709277d3e38fe6a08b3b2390
|
1d2128e15f37b04e0d5ac7ec25458b852ad08b16
|
/plot2.R
|
362ecf4840a8990a3c2135d624bec34eebb953cf
|
[] |
no_license
|
jjaimon/ExData_Plotting1
|
0ec86cef5af68453df2bbdf8ad9fa701d8ea13d9
|
bc507f4ac1a7c0cceba24c026896ade05970b069
|
refs/heads/master
| 2021-01-23T23:38:57.611425
| 2014-06-05T19:10:32
| 2014-06-05T19:10:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 404
|
r
|
plot2.R
|
#
# Course Project 1: Exploratory Data Analysis
# Plot 1. Histogram of Global Active Power
#
source("data.prep.R")
#
# Draw directly to the file
#
png(filename="plot2.png", width=480, height=480, units = "px")
#
# Draw a line chart
#
plot(data.df$DTime, data.df$Global_active_power, type="n", ylab="Global Active Power (kilowatts)", xlab="")
lines(data.df$DTime, data.df$Global_active_power)
dev.off()
|
803ab271f7fa25e7a1df5ec1a7669a2a74ae3eb9
|
de98b3b57a8d2c588f8d78117626cf77a0e61400
|
/elasticities.R
|
654b5a6c7bd16b8f8066384c118da40c46e416c4
|
[] |
no_license
|
rgr02/Microeconometrics
|
8c09972f9bb7994d53cd547e3073d662f95cc89d
|
cbb69824e734df582b5c90c3b6a011d18bb57f6d
|
refs/heads/master
| 2021-07-09T13:33:54.169864
| 2017-10-06T09:00:12
| 2017-10-06T09:00:12
| 105,984,605
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,624
|
r
|
elasticities.R
|
# ------------------------------------------------------------------------------------------------------------------------------------------------------
# Dealing with Elasticities
# ------------------------------------------------------------------------------------------------------------------------------------------------------
# NONPARAMETRIC ELASITICITIES
# Kernal Bandwidth Silverman's Rule of Thumb
#kernel.bandwidth <- ((4*sd(trimmed.final$lpcexp)/(3*nrow(trimmed.final)))^5)^(1/5)
# Selecting Kernel Bandwidth lpcexp
# gridsize <- nrow(trimmed.final)
# kernel.bandwidth <- dpik(trimmed.final$lpcexp, scalest = "stdev", level = 2L, kernel = "normal", gridsize = gridsize) + 0.01
density.lpcexp <- density(trimmed.final$lpcexp)
kernel.bandwidth <- density.lpcexp$bw + 0.1
# Nonparametric Estimate fdsh
np.est.fdsh <- locpoly(trimmed.final$lpcexp,trimmed.final$adj.fdsh, degree = 2, bandwidth = kernel.bandwidth)
np.deriv.fdsh <- locpoly(trimmed.final$lpcexp,trimmed.final$adj.fdsh, drv = 1, degree = 2, bandwidth = kernel.bandwidth)
fdsh.elasticity <- (1 + (np.deriv.fdsh$y / np.est.fdsh$y))
# Nonparametric Estimate mksh
np.est.mksh <- locpoly(trimmed.final$lpcexp,trimmed.final$adj.mksh, degree = 2, bandwidth = kernel.bandwidth)
np.deriv.mksh <- locpoly(trimmed.final$lpcexp,trimmed.final$adj.mksh, drv = 1, degree = 2, bandwidth = kernel.bandwidth)
mksh.elasticity <- (1 + (np.deriv.mksh$y / np.est.mksh$y))
# Nonparametric Estimate adsh
np.est.adsh <- locpoly(trimmed.final$lpcexp,trimmed.final$adj.adsh, degree = 2, bandwidth = kernel.bandwidth)
np.deriv.adsh <- locpoly(trimmed.final$lpcexp,trimmed.final$adj.adsh, drv = 1, degree = 2, bandwidth = kernel.bandwidth)
adsh.elasticity <- (1 + (np.deriv.adsh$y / np.est.adsh$y))
# Nonparametric Estimate chsh
np.est.chsh <- locpoly(trimmed.final$lpcexp,trimmed.final$adj.chsh, degree = 2, bandwidth = kernel.bandwidth)
np.deriv.chsh <- locpoly(trimmed.final$lpcexp,trimmed.final$adj.chsh, drv = 1, degree = 2, bandwidth = kernel.bandwidth)
chsh.elasticity <- (1 + (np.deriv.chsh$y / np.est.chsh$y))
summary(chsh.elasticity)
np.elast.fdsh <- ggplot(as.data.frame(np.est.fdsh)) +
geom_point(aes(np.est.fdsh$x,fdsh.elasticity), col = "blue")+
geom_hline(yintercept = 1) +
xlim(4,7.15) +
ylim(0.3,1) +
xlab("Log Per Capita Expenditure") +
ylab("") +
ggtitle("Nonparametric Elasticity of Food Expenditure") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
np.elast.mksh <- ggplot(as.data.frame(np.est.mksh))+
geom_point(aes(np.est.mksh$x,mksh.elasticity), col = "green")+
geom_hline(yintercept = 1) +
xlim(4,7.15) +
ylim(0,4) +
xlab("Log Per Capita Expenditure") +
ylab("") +
ggtitle("Nonparametric Elasticity of Milk Expenditure") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
np.elast.adsh <- ggplot(as.data.frame(np.est.adsh))+
geom_point(aes(np.est.adsh$x,adsh.elasticity), col = "red")+
geom_hline(yintercept = 1) +
xlim(4,7.15) +
ylim(-0.5,1.5) +
xlab("Log Per Capita Expenditure") +
ylab("") +
ggtitle("Elasticity of Adult Good Expenditure") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
np.elast.chsh <- ggplot(as.data.frame(np.est.chsh))+
geom_point(aes(np.est.chsh$x,chsh.elasticity), col = "orange")+
geom_hline(yintercept = 1) +
xlim(4,7.15) +
ylim(-3,3) +
xlab("Log Per Capita Expenditure") +
ylab("") +
ggtitle("Elasticity of Child Good Expenditure") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
multiplot(np.elast.fdsh, np.elast.chsh,
np.elast.mksh, np.elast.adsh, cols = 2)
|
23b1071180cc085bc07f8c05c5669139b7dba5d7
|
1424a179e9f5695a4ce645073391ca4c2cad9c18
|
/wilke-purl/wilke-multi-panel_figures.R
|
ee54c34be37e81eb69d89276dc52354812b6428e
|
[
"MIT"
] |
permissive
|
datalorax/psych-seminar21
|
229f439ad09f8f156bf53e68d2ef874cb856fc37
|
ab6d504737e862eeaddf035048283f8cc9dd231d
|
refs/heads/main
| 2023-04-07T08:48:13.773563
| 2021-04-07T23:22:10
| 2021-04-07T23:22:10
| 353,508,145
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,049
|
r
|
wilke-multi-panel_figures.R
|
## ----echo = FALSE, message = FALSE, warning = FALSE----------------------
# run setup script
source(here::here("wilke-purl", "_common.R"))
library(forcats)
library(stringr)
library(ggridges)
## ----titanic-passenger-breakdown, fig.width = 5, fig.asp = 3/4, fig.cap = '(ref:titanic-passenger-breakdown)'----
titanic %>% mutate(surv = ifelse(survived == 0, "died", "survived")) %>%
ggplot(aes(sex, fill = sex)) + geom_bar() +
facet_grid(class ~ surv, scales = "free_x") +
scale_x_discrete(name = NULL) +
scale_y_continuous(limits = c(0, 195), expand = c(0, 0)) +
scale_fill_manual(values = c("#D55E00D0", "#0072B2D0"), guide = "none") +
theme_dviz_hgrid(rel_small = 1, font_family = "Roboto Light") +
theme(axis.line = element_blank(),
axis.ticks.length = grid::unit(0, "pt"),
axis.ticks = element_blank(),
axis.text.x = element_text(margin = margin(7, 0, 0, 0)),
strip.text = element_text(margin = margin(3.5, 3.5, 3.5, 3.5)),
strip.background = element_rect(fill = "grey80", colour = "grey80",
linetype = 1, size = 0.25),
panel.border = element_rect(colour = "grey80", fill = NA, linetype = 1,
size = 1.))
## ----movie-rankings, fig.width = 8.5, fig.asp = 1, fig.cap = '(ref:movie-rankings)'----
library(ggplot2movies)
ggplot(filter(movies, year > 1905), aes(y = rating, x = votes)) +
geom_point(color = "#0072B250", size = 0.1) +
geom_smooth(method = 'lm', se = FALSE, size = 1.25, color = '#D55E00',
fullrange = TRUE) +
scale_x_log10(labels = label_log10, name = "number of votes") +
scale_y_continuous(limits = c(0, 10), expand = c(0, 0),
breaks = c(0, 5, 10), name = "average rating") +
facet_wrap(~year, ncol = 10) +
theme_dviz_grid(10, rel_small = 1, line_size = 0.25, font_family = "Roboto Light") +
theme(strip.text = element_text(margin = margin(3.5, 3.5, 3.5, 3.5)),
panel.border = element_rect(colour = "grey80", fill = NA, linetype = 1,
size = 1.))
## ----BA-degrees-variable-y-lims, fig.width = 8.5, fig.asp = 0.8, fig.cap = '(ref:BA-degrees-variable-y-lims)'----
BA_degrees %>%
mutate(field = ifelse(field == "Communication, journalism, and related programs",
"Communication, journalism, and related", field)) -> BA_df
BA_df %>% group_by(field) %>%
summarize(mean_perc = mean(perc)) %>%
arrange(desc(mean_perc)) -> BA_top
top_fields <- filter(BA_top, mean_perc>0.04)$field
BA_top_degrees <- filter(BA_df, field %in% top_fields) %>%
mutate(field = factor(field, levels = top_fields)) %>%
arrange(field)
p <- ggplot(BA_top_degrees, aes(year, perc)) +
geom_line(color = "#0072B2") +
facet_wrap(~field, labeller = label_wrap_gen(width = 25), ncol = 3,
scales = "free") +
ylab("percent") +
theme_dviz_hgrid(font_family = "Roboto Light") +
theme(strip.text = element_text(margin = margin(7, 7, 3, 7)),
panel.spacing.x = grid::unit(14, "pt"),
plot.margin = margin(3.5, 14, 3.5, 0))
stamp_bad(p)
## ----BA-degrees-fixed-y-lims, fig.width = 8.5, fig.asp = 0.8, fig.cap = '(ref:BA-degrees-fixed-y-lims)'----
ggplot(BA_top_degrees, aes(year, perc)) +
geom_line(color = "#0072B2") +
facet_wrap(~field, labeller = label_wrap_gen(width = 25), ncol = 3,
scales = "free") +
scale_y_continuous(limits = c(0, 0.241), expand = c(0, 0),
name = "percent") +
theme_dviz_hgrid(font_family = "Roboto Light") +
theme(strip.text = element_text(margin = margin(7, 7, 3, 7)),
panel.spacing.x = grid::unit(14, "pt"),
plot.margin = margin(3.5, 14, 3.5, 0))
## Always arrange the panels in a small multiples plot in a meaningful and logical order.
## ----BA-degrees-compound, fig.asp = 0.4, fig.width = 8.5, fig.cap = '(ref:BA-degrees-compound)'----
BA_degrees %>%
mutate(field = ifelse(field == "Communication, journalism, and related programs",
"Communication, journalism, and related", field)) -> BA_df
BA_df %>% group_by(year) %>%
summarize(total = sum(count)) -> BA_totals
textcol <- "gray30"
p1 <- ggplot(BA_totals, aes(year, total/1e6)) +
geom_density_line(stat = "identity", color = "#0072B2",
fill = desaturate(lighten("#0072B280", .3), .4)) +
scale_y_continuous(limits = c(0, 2.05), expand = c(0, 0),
name = "degrees awarded (millions)") +
scale_x_continuous(limits = c(1970, 2016), expand = c(0, 0), name = NULL) +
theme_dviz_hgrid(font_family = "Roboto Light") +
theme(axis.title = element_text(color = textcol),
axis.text = element_text(color = textcol),
plot.margin = margin(3, 7, 3, 0))
BA_df %>% group_by(field) %>%
summarize(mean_perc = mean(perc)) %>%
arrange(desc(mean_perc)) -> BA_top
top_fields <- filter(BA_top, mean_perc>0.055)$field
BA_top_pairs <- filter(BA_df, field %in% top_fields,
year %in% c(1971, 2015)) %>%
mutate(field_wrapped = str_wrap(field, 25))
p2 <- ggplot(BA_top_pairs, aes(x = year, y = perc)) +
geom_line(aes(group = field), color = "gray60") +
geom_point(fill = "#0072B2", color = "white", shape = 21, size = 3, stroke = 1.5) +
scale_x_continuous(limits = c(1971, 2015), breaks = c(1971, 2015),
labels = c("1970-71", "2014-15"),
expand = expand_scale(mult = c(0.1, 0.04)),
name = NULL,
position = "top") +
scale_y_continuous(limits = c(0.02, 0.22), expand = c(0, 0), name = "percent of degrees",
sec.axis = dup_axis(breaks = filter(BA_top_pairs, year == 2015)$perc + 0.0001,
labels = filter(BA_top_pairs, year == 2015)$field_wrapped,
name = NULL)) +
theme_dviz_open(font_family = "Roboto Light") +
theme(axis.line.x = element_blank(),
axis.ticks.x = element_blank(),
axis.title.y = element_text(color = textcol),
axis.text.y = element_text(color = textcol),
axis.line.y.left = element_line(color = textcol),
axis.text.y.right = element_text(hjust = 0, vjust = .5,
margin = margin(0, 0, 0, 0),
color = "black",
lineheight = 0.8
),
axis.line.y.right = element_blank(),
axis.ticks.y.right = element_blank(),
plot.margin = margin(3, 7, 3, 0))
plot_grid(p1, p2, labels = "auto", rel_widths = c(1.2, 1), align = 'h')
## ----BA-degrees-compound-bad, fig.asp = 0.4, fig.width = 8.5, fig.cap = '(ref:BA-degrees-compound-bad)'----
stamp_ugly(plot_grid(p1, p2, labels = "AUTO", rel_widths = c(1.2, 1), align = 'h',
label_fontfamily = "Palatino", label_fontface = "bold",
label_x = 0.8,
label_y = 0.2,
label_size = 23))
## ----athletes-composite-inconsistent, fig.asp = 0.75, fig.cap = '(ref:athletes-composite-inconsistent)'----
male_sport <- unique(filter(Aus_athletes, sex=="m")$sport)
female_sport <- unique(filter(Aus_athletes, sex=="f")$sport)
both_sport <- male_sport[male_sport %in% female_sport]
athletes_df <- filter(Aus_athletes, sport %in% both_sport) %>%
mutate(sport = case_when(sport == "track (400m)" ~ "track",
sport == "track (sprint)" ~ "track",
TRUE ~ sport),
sex = factor(sex, levels = c("f", "m")))
p1 <- ggplot(athletes_df, aes(x = sex)) +
geom_bar(fill = "#56B4E9E0") +
scale_y_continuous(limits = c(0, 95), expand = c(0, 0), name = "number") +
scale_x_discrete(name = NULL, labels = c("female", "male")) +
theme_dviz_hgrid(12, rel_small = 1, font_family = "Roboto Light") +
theme(axis.ticks.x = element_blank(),
#axis.ticks.length = grid::unit(0, "pt"),
plot.margin = margin(3, 6, 0, 0))
p2 <- ggplot(athletes_df, aes(x = rcc, y = wcc, shape = sex, color = sex, fill = sex)) +
geom_point(size = 2.5) +
scale_x_continuous(limits = c(3.8, 6.75), name = NULL) +
scale_y_continuous(limits = c(2.2, 11.), expand = c(0, 0), name = "WBC count") +
scale_shape_manual(values = c(21, 22),
labels = c("female ", "male"), name = NULL,
guide = guide_legend(direction = "horizontal")) +
scale_color_manual(values = c("#CC79A7", "#56B4E9"), name = NULL,
labels = c("female ", "male"),
guide = guide_legend(direction = "horizontal")) +
scale_fill_manual(values = c("#CC79A780", "#56B4E980"), name = NULL,
labels = c("female ", "male"),
guide = guide_legend(direction = "horizontal")) +
theme_dviz_hgrid(12, rel_small = 1, font_family = "Roboto Light") +
theme(legend.position = c(1, .1),
legend.justification = "right",
legend.box.background = element_rect(fill = "white", color = "white"),
plot.margin = margin(3, 0, 0, 0))
p_row <- plot_grid(p1, p2, labels = "auto", align = 'h', rel_widths = c(0.7, 1)) +
draw_text("RBC count", x = 1, y = 0, size = 12, hjust = 1, vjust = -0.02,
family = dviz_font_family) +
theme(plot.margin = margin(0, 0, 6, 0))
p3 <- ggplot(athletes_df, aes(x = sport, y = pcBfat, color = fct_relevel(sex, "m"),
fill = fct_relevel(sex, "m"))) +
geom_boxplot(width = 0.5) +
scale_color_manual(values = c("#009E73", "#56B4E9"), name = NULL,
labels = c("male", "female")) +
scale_fill_manual(values = c("#009E7340", "#56B4E940"), name = NULL,
labels = c("male", "female")) +
scale_x_discrete(name = NULL) +
scale_y_continuous(name = "% body fat") +
theme_dviz_hgrid(12, rel_small = 1, font_family = "Roboto Light") +
theme(axis.line.x = element_blank(),
axis.ticks.x = element_blank(),
#axis.ticks.length = grid::unit(0, "pt")
)
stamp_bad(plot_grid(p_row, p3, ncol = 1, labels = c("", "c")) +
theme(plot.margin = margin(6, 12, 0, 0)))
## ----athletes-composite-good, fig.asp = 0.75, fig.cap = '(ref:athletes-composite-good)'----
p1 <- ggplot(athletes_df, aes(x = sex, fill = sex)) +
geom_bar() +
scale_y_continuous(limits = c(0, 95), expand = c(0, 0), name = "number") +
scale_x_discrete(name = NULL, labels = c("female", "male")) +
scale_fill_manual(values = c("#D55E00D0", "#0072B2D0"), guide = "none") +
theme_dviz_hgrid(12, rel_small = 1, font_family = "Roboto Light") +
theme(#axis.line.x = element_blank(),
axis.ticks.x = element_blank(),
#axis.ticks.length = grid::unit(0, "pt"),
plot.margin = margin(3, 6, 0, 0))
p2 <- ggplot(athletes_df, aes(x = rcc, y = wcc, fill = sex)) +
geom_point(pch = 21, color = "white", size = 2.5) +
scale_x_continuous(limits = c(3.8, 6.75), name = NULL) +
scale_y_continuous(limits = c(2.2, 11.), expand = c(0, 0), name = "WBC count") +
scale_fill_manual(values = c("#D55E00D0", "#0072B2D0"), guide = "none") +
theme_dviz_hgrid(12, rel_small = 1, font_family = "Roboto Light") +
theme(plot.margin = margin(3, 0, 0, 0))
p_row <- plot_grid(p1, p2, labels = "auto", align = 'h', rel_widths = c(0.7, 1)) +
draw_text("RBC count", x = 1, y = 0, size = 12, hjust = 1, vjust = -0.02,
family = dviz_font_family) +
theme(plot.margin = margin(0, 0, 6, 0))
GeomBP <- GeomBoxplot
GeomBP$draw_key <- draw_key_polygon
p3 <- ggplot(athletes_df, aes(x = sport, y = pcBfat, color = sex, fill = sex)) +
stat_boxplot(width = 0.5, geom = GeomBP) +
scale_color_manual(values = c("#D55E00", "#0072B2"), name = NULL,
labels = c("female ", "male")) +
scale_fill_manual(values = c("#D55E0040", "#0072B240"), guide = "none") +
scale_x_discrete(name = NULL) +
scale_y_continuous(name = "% body fat") +
guides(color = guide_legend(override.aes = list(fill = c("#D55E00D0", "#0072B2D0"),
color = "white", size = 2),
direction = "horizontal")) +
theme_dviz_hgrid(12, rel_small = 1, font_family = "Roboto Light") +
theme(axis.line.x = element_blank(),
axis.ticks.x = element_blank(),
#axis.ticks.length = grid::unit(0, "pt"),
legend.position = c(1., 0.9),
legend.justification = "right")
plot_grid(p_row, p3, ncol = 1, labels = c("", "c")) +
theme(plot.margin = margin(6, 12, 0, 0))
## ----athletes-composite-misaligned, fig.asp = 0.75, fig.cap = '(ref:athletes-composite-misaligned)'----
p1 <- ggplot(athletes_df, aes(x = sex, fill = sex)) +
geom_bar() +
scale_y_continuous(limits = c(0, 95), expand = c(0, 0), name = "number") +
scale_x_discrete(name = NULL, labels = c("female", "male")) +
scale_fill_manual(values = c("#D55E00D0", "#0072B2D0"), guide = "none") +
theme_dviz_open(12, rel_small = 1, font_family = "Roboto Light") +
background_grid(major = "y") +
theme(#axis.line.x = element_blank(),
#axis.ticks.x = element_blank(),
#axis.ticks.length = grid::unit(0, "pt"),
plot.margin = margin(3, 6, 6, 0))
p2 <- ggplot(athletes_df, aes(x = rcc, y = wcc, fill = sex)) +
geom_point(pch = 21, color = "white", size = 2.5) +
scale_x_continuous(limits = c(3.8, 6.75), name = "RBC count") +
scale_y_continuous(limits = c(2.2, 11.), expand = c(0, 0), name = "WBC count") +
scale_fill_manual(values = c("#D55E00D0", "#0072B2D0"), guide = "none") +
theme_dviz_open(12, rel_small = 1, font_family = "Roboto Light") +
background_grid(major = "y") +
theme(plot.margin = margin(3, 18, 0, 0))
p_row <- plot_grid(NULL, p1, p2, labels = c("", "a", "b"), nrow = 1,
rel_widths = c(0.03, 0.7, 1))
GeomBP <- GeomBoxplot
GeomBP$draw_key <- draw_key_polygon
p3 <- ggplot(athletes_df, aes(x = sport, y = pcBfat, color = sex, fill = sex)) +
stat_boxplot(width = 0.5, geom = GeomBP) +
scale_color_manual(values = c("#D55E00", "#0072B2"), name = NULL,
labels = c("female ", "male")) +
scale_fill_manual(values = c("#D55E0040", "#0072B240"), guide = "none") +
scale_x_discrete(name = NULL) +
scale_y_continuous(name = "% body fat") +
guides(color = guide_legend(override.aes = list(fill = c("#D55E00D0", "#0072B2D0"),
color = "white", size = 2),
direction = "horizontal")) +
theme_dviz_open(12, rel_small = 1, font_family = "Roboto Light") +
background_grid(major = "y") +
theme(#axis.line.x = element_blank(),
#axis.ticks.x = element_blank(),
#axis.ticks.length = grid::unit(0, "pt"),
legend.position = c(1., 0.9),
legend.justification = "right")
stamp_ugly(plot_grid(p_row, p3, ncol = 1, labels = c("", "c")) +
theme(plot.margin = margin(6, 12, 0, 0)))
|
d500a5a96008469654779b4abfd7d59cd4b4e731
|
1ce3e0ae1c9bd1eba75e4a88dca1a2dab2900945
|
/plot4.R
|
e952eded6e48c3abf15416a8b4a178f8ad2a3147
|
[] |
no_license
|
drleyhe/ExData_Plotting1
|
eb0e7cb212bcb487664fe356bccbeb0eb8c21601
|
a60d99c958dfa185646d95186c5b84aa0f4dbe19
|
refs/heads/master
| 2021-05-14T16:47:51.287220
| 2018-01-03T20:44:12
| 2018-01-03T20:44:12
| 116,029,726
| 0
| 0
| null | 2018-01-02T15:33:15
| 2018-01-02T15:33:14
| null |
UTF-8
|
R
| false
| false
| 1,153
|
r
|
plot4.R
|
data <- read.table("household_power_consumption.txt", header=TRUE, sep=';', stringsAsFactors = FALSE)
datasubset <- subset(data, Date %in% c("1/2/2007", "2/2/2007"))
datasubset <- data.frame(datasubset , DateTime = strptime(paste(datasubset $Date, datasubset $Time), "%d/%m/%Y %H:%M:%S"))
png(filename="plot4.png", width = 480, height = 480)
par(mfrow = c(2, 2))
plot(datasubset$DateTime, datasubset$Global_active_power, type = 'l'
, xlab = ""
, ylab = "Global Active Power"
)
plot(datasubset$DateTime, datasubset$Voltage, type = 'l'
, xlab = "datetime"
, ylab = "Voltage"
)
plot(datasubset$DateTime, datasubset$Sub_metering_1, type = 'l'
, xlab = ""
, ylab = "Energy sub metering"
, col = "black"
)
lines(datasubset$DateTime, datasubset$Sub_metering_2, col = "red")
lines(datasubset$DateTime, datasubset$Sub_metering_3, col = "blue")
legend("topright" , legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
, lty = par("lty")
, col = c("black", "red", "blue")
, bty = "n"
)
plot(datasubset$DateTime, datasubset$Global_reactive_power, type = 'l'
, xlab = "datetime"
, ylab = "Global_reactive_power"
)
dev.off()
|
cf7e1f9fd1d660fbe91ebf17d64ae0eb09339989
|
9b0e9c1e919b9abc7e1b321cc5d7e2a14451037b
|
/man/set_theme.Rd
|
2bd2415e9a0404112dee449efcdd66a25231a129
|
[] |
no_license
|
atlas-aai/ratlas
|
abbcf9050885c6189e8bec2d6dfa42bff301664b
|
7e88f6a826b6e0d855d49e960c84ef3182b64aa8
|
refs/heads/main
| 2023-07-24T20:21:59.886545
| 2023-06-29T21:41:00
| 2023-06-29T21:41:00
| 150,454,937
| 30
| 10
| null | 2023-06-29T21:41:01
| 2018-09-26T16:14:04
|
R
|
UTF-8
|
R
| false
| true
| 1,115
|
rd
|
set_theme.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set-theme.R
\name{set_theme}
\alias{set_theme}
\title{Set default ggplot2 theme}
\usage{
set_theme(
font = "Arial Narrow",
discrete = c("okabeito", "atlas", "ggplot2"),
continuous = c("viridis", "magma", "inferno", "plasma", "cividis", "ggplot2"),
...
)
}
\arguments{
\item{font}{The base font family to be used in plots.}
\item{discrete}{Color palette for discrete colors. One of "okabeito"
(default), "atlas", or "ggplot2".}
\item{continuous}{Color palette for continuous scales. One of "magma",
"inferno", "plasma", "viridis" (default), or "cividis", or "ggplot2".}
\item{...}{Additional arguments to pass to theme functions.}
}
\description{
Sets the default color schemes, fonts, and theme for ggplot2 plots. The
default color scheme for continuous variables is the
\href{https://cran.r-project.org/web/packages/viridis/index.html}{viridis}
color palette, and the default color scheme for discrete variables is the
\href{http://jfly.iam.u-tokyo.ac.jp/color/}{Okabe Ito} palette.
}
\examples{
set_theme("Arial Narrow")
}
|
f4d39dceff5516b8a8a2c71cc3aa253b46c24f16
|
88442e1b9eb811b5bbbbe2a50c159bb197a21c90
|
/Subspace_v0.1.R
|
5e8d0cd8e4c0d8a65d2c8438675fb146aa7f6a83
|
[] |
no_license
|
prashantde/CAS_RPM_2017_Public
|
0ff1cf92fd8c90ea94080263bbf67e483deead21
|
cda11d0fb3edac073de2a350a02f63b4b7afd63d
|
refs/heads/master
| 2021-01-23T01:17:17.920604
| 2017-03-27T21:14:16
| 2017-03-27T21:14:16
| 85,892,487
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,723
|
r
|
Subspace_v0.1.R
|
#install.packages("ElemStatLearn")
#install.packages("HighDimOut")
#install.packages("scales") ## If v0.40 needs to be updated
##############################################################################
#1 Import High dimensional data
###############################################################################
library("ElemStatLearn")
library(readr)
library(HighDimOut)
library(ggplot2)
prostate <- read_delim("https://statweb.stanford.edu/~tibs/ElemStatLearn/datasets/prostate.data", "\t", escape_double = FALSE, trim_ws = TRUE)
##############################################################################
#2 Outlier analysis
###############################################################################
##Considerations and findings:
#a The number of data elements outweights the number of records indicating sparse data
#b Using distance based, euclideanm, outlier detection is challenging in a sparse environment due to the curse of dimensionality: one finds extremely sparse space where outlier detection is not easily quantified
#c Subspace outlier detection, identifies relevant subspaces and then finds outliers in those subspaces allowing it to work in high dimensions
#d We find some records are possible outliers
#Source :"Outlier Detection in Axis-Parallel Subspaces of High Dimensional Data", Hans-Peter Kriegel, Peer Kr?oger, Erich Schubert, and Arthur Zimek
##Tasks:
#a Run a SOD analysis for high dimensional and sparse data and review the records
res.SOD <- Func.SOD(data=prostate, k.nn = 10, k.sel = 5, alpha = 0.8)
plot(res.SOD,main = "Scatterplot of outliers by record index")
qplot(res.SOD,main = "Subspace Outlier records in data. Distance measured")
|
6114ad26721b81f5e961d95ec8a5d1fb13669c99
|
c4bd48cc8156e85212ca2d6ef522a8ea7b318aeb
|
/man/rec_skogsfugl.Rd
|
3b00dca35a465d5aa2d41f2dab0ac5bf4884a815
|
[] |
no_license
|
ErlendNilsen/HFP
|
9af4007e8baf2bb5587ddf3c96fdfa93ea822592
|
8ceb7415fcb2d264edea0be17fb6037dba887042
|
refs/heads/master
| 2020-07-09T02:57:43.875307
| 2019-08-23T07:14:23
| 2019-08-23T07:14:23
| 203,856,059
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 582
|
rd
|
rec_skogsfugl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rec_skogsfugl.R
\name{rec_skogsfugl}
\alias{rec_skogsfugl}
\title{Estimate brood size - forest birds}
\usage{
rec_skogsfugl(strat)
}
\arguments{
\item{strat}{Level of stratification - 'No', 'OmradeNavn' or 'Year'}
}
\description{
Estimates brood size, i.e. the number of chicks per adult female,
based on the level of stratification.
NB! Only for use with forest birds, i.e. Capercaillie and Black grouse.
}
\examples{
rec(strat = LEVEL)
}
\keyword{birds}
\keyword{brood}
\keyword{forest}
\keyword{size}
|
5a2ef99dc26cbe25031a56216604def0e0296fb8
|
c15420ec3bb8809329419e97f7d274c81b11ff5d
|
/R/qpcr_curve.R
|
43dd294a5a558d93c16e3ed67cfd3caf49a27360
|
[] |
no_license
|
infotroph/Prairie_seq
|
378196db4762ac1a2a39a31713fbe77156b7728f
|
b93c01edcf5a5f3f45a4bb8b455856cffec37c2e
|
refs/heads/master
| 2021-05-01T01:01:43.808133
| 2017-12-18T07:47:23
| 2017-12-18T07:47:23
| 29,328,771
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,475
|
r
|
qpcr_curve.R
|
# Convert melting curves from a Bio-Rad QX200 droplet PCR analyzer into melting peak form for analysis of melting temperature heterogeneity.
# Usage: qpcr_curve.R datafile.csv idfile.csv
library(qpcR)
source("R/order.plate.R") # for correct sorting of "A1, A10, A11, A12, A2..."
extract.meltcurve = function(mc){
# Each curve is a ragged-shaped data frame,
# which is really two data structures crammed into one table.
# For the record, this is The Wrong Way To Use A Dataframe
# and I think less of the package authors for doing it.
#
# Data structure one: Parameters that have a value for every temperature.
# Temp = temperature
# Fluo = normalized fluorescence
# df.dT = derivative of fluorescence
# baseline = estimated baseline fluorescence
# (NA at temperatures outside a fitted peak)
#
# Data structure two: Parameters that only have a few values.
# Note that each of these columns starts at row one, goes for however many rows it needs
# to store all the values, and DOES NOT necessarily correspond to the temperature
# for the row it is stored in (╯°□°)╯︵ ┻━┻).
# Pars = optimized values for span.smooth and span.peaks.
# RSS = Residual sum of squares from comparing fitted Tm against Tm.opt values.
# (NA for us because we leave TM.opt null).
# Tm = estimated melting temperature, one line per fitted peak.
# Area = Peak area within Tm.border °C of Tm, one line per fitted peak.
#
# So, let's break this Not Really A Dataframe up into a more useful list.
structure(list(
fitted=mc[,c("Temp", "Fluo", "df.dT", "baseline")],
pars=c(na.omit(mc$Pars)),
RSS=c(na.omit(mc$RSS)),
peaks = data.frame(
Tm=c(na.omit(mc$Tm)),
Area=c(na.omit(mc$Area))),
quality=attr(mc, "quality"),
class=c("meltcurve", "list")))
}
get_peaks = function(mc){
# Takes a meltcurve, returns a dataframe containing Tm and area of each fitted peak.
stopifnot(mc$quality == "good")
return(mc$peaks)
}
get_fitted = function(mc){
# Takes a meltcurve, returns a dataframe containing: Temperature; smoothed, normalized fluorescence; rate of change; baseline fluorescence.
stopifnot(mc$quality == "good")
return(mc$fitted)
}
args = commandArgs(trailingOnly=TRUE)
curvedat = read.csv(args[1])
iddat = read.csv(args[2], stringsAsFactors=FALSE)
# column 1 is empty, column 2 is Temperature
wellnames=colnames(curvedat)[-c(1,2)]
# For a full plate, expect a 98-col datafile and a 96-line ID file.
# Any line of the ID file where "Sample" is blank will be skipped in the analysis.
# If you also want to skip samples where "Censor" is *not* blank, uncomment the second set_id assignment.
stopifnot(
nrow(iddat) == ncol(curvedat)-2
&& all(c("Well", "Sample", "Censor") %in% colnames(iddat)))
ids = iddat$Sample
set_ids = seq_along(ids)[ids != ""]
# set_ids = seq_along(ids)[ids != "" & iddat$Censor == ""]
# Normalize, smooth, and fit melting peaks all in one step. See the qpcR::meltcurve() documentation for details on how it does the fitting.
# The plot is pretty ugly and we'll make a prettier version in a later step, but let's save this one too for diagnostic pourposes.
pdf(
file="figs/multi_ctab_peakfit-20150421.pdf", #FIXME FIXME FIXME: let Makefile specify path!
width=45,
height=30,
pointsize=24)
curves = meltcurve(
data=curvedat,
temps=rep(2, length(set_ids)), # expects one column of temperatures for every column of fluorescence, so pass the same column repeatedly.
fluos=2+set_ids, # column numbers to analyze (we're skipping empty wells).
window=c(80,90), # Picked these values for 2015-03-19 run, check before trusting on others.
norm=TRUE, # scale so max fluorescence in each curve = 1, min ditto = 0
plot=TRUE,
Tm.opt=NULL,
Tm.border=c(0.5,0.5)) # compute "baseline" from Tm +/- this many °C
# (Usually far above the actual bottom of the peak!)
dev.off()
names(curves) = wellnames[set_ids]
curves = lapply(curves, extract.meltcurve)
# Collect summary statistics for the whole dataset:
# npeaks = How many peaks were found in each sample
# main_tm = Melting temperature of the peak with the largest area
# N.B. this isn't really the same as "average melting temperature"
# and may or may not actually be a useful metric.
peaks = lapply(curves, get_peaks)
peak_summary = data.frame(
npeak=sapply(peaks, nrow),
main_tm=sapply(peaks, function(x)with(x, Tm[which.max(Area)])))
peak_summary$Well = wellnames[set_ids]
peak_summary = merge(peak_summary, iddat, all.x=TRUE)
# Fix ordering by forcing "A2" to sort before "A10" instead of after.
peak_summary = peak_summary[order.plate(peak_summary$Well),]
# OK, summary is done. Write it out and move on.
#FIXME FIXME FIXME: let Makefile specify path!
write.csv(peak_summary, file="data/multi_ctab_melt_summary-20150421.csv", quote=FALSE, row.names=FALSE)
# Extract normalized fluorescence data, add sample ID info,
# save as one long dataframe.
fitted = lapply(curves, get_fitted) # returns a named list of dataframes
fitted = mapply( # add name to each dataframe in list as a new column
FUN=function(x,n){x$Well=n;x},
fitted,
names(fitted), SIMPLIFY=FALSE)
fitted_summary = do.call("rbind", fitted)
fitted_summary = merge(fitted_summary, iddat, all.x=TRUE)
# Sort in plate order (TODO: maybe should sort by sample instead?)
fitted_summary = fitted_summary[order.plate(fitted_summary$Well),]
# FIXME FIXME FIXME: let Makefile specify path!
write.csv(fitted_summary, file="data/multi_ctab_melt_curves-20150421.csv", quote=FALSE, row.names=FALSE)
|
63a3a4542614f77b2c60a3b4b85267b60e73d2b8
|
8fc163a7184ca1481f617f1986da01be36974b4f
|
/R/RefactorBD.R
|
eeece145376e67572fad41a5384c10d67a32c387
|
[] |
no_license
|
TomasSG/Parkinson-BecaCIN
|
474ac46663572e4104f54ef5a3af0c5d6b7f41ec
|
384881e0cd37577a7fdcf166836dcb06c8b8f49d
|
refs/heads/master
| 2023-07-14T19:52:48.091136
| 2021-08-27T00:12:42
| 2021-08-27T00:12:42
| 295,257,148
| 0
| 0
| null | 2021-01-26T19:38:11
| 2020-09-13T23:37:14
|
HTML
|
UTF-8
|
R
| false
| false
| 2,403
|
r
|
RefactorBD.R
|
# Vamos a hacer un refactor de las variables categóricas
library(tidyverse)
PATH_DATOS_IN <- "./data/bd_final.csv"
PATH_DATOS_OUT <- "./data/bd_final_refactor.csv"
leer_archivo <- function(path){
df <- read.table(path, header = TRUE, sep = ";", dec = ".")
return(df)
}
escribir_archivo <- function(df, path){
write.table(df, path, sep = ";", dec = ".")
}
df_ini <- leer_archivo(PATH_DATOS_IN)
glimpse(df_ini)
# Cambiamos para que trate a los char como factor
df_fct <- df_ini %>%
map_df(~ if(is.character(.x)) { as.factor(.x)} else {.x})
glimpse(df_fct)
# ¿Cómo están distribuidos las categóricas?
df_fct %>%
select(where(is.factor)) %>%
walk(~ print(table(.x)))
# Reorganizamos las categóricas para que tengan una distribución más equitativa.
# Dividir educ en Posgraduate - College - Highschool
df_fct <- df_fct %>%
mutate(educ = fct_collapse(educ,
"Postgraduate" = c("Doctoral Degree", "Masters Degree"),
"College" = c("2-year college degree", "4-year college degree", "Some graduate school"),
"HighSchool" = c("High School Diploma/GED", "Some college", "Some high school")))
# Emplemos lo agrupamos en Working - Not Working
df_fct <- df_fct %>%
mutate(empleo = fct_collapse(empleo,
"Working" = c("A homemaker", "Employment for wages", "Self-employed"),
"Not Working" = c("A student", "Out of work", "Retired", "Unable to work" )))
# Estado marital lo convertimos en Married - Not Married
df_fct <- df_fct %>%
mutate(estado_marital = fct_collapse(estado_marital,
"Married" = c("Married or domestic partnership"),
"Not Married" = c("Divorced", "Other", "Separated", "Single never married", "Widowed")))
# Para facilidad celular lo convertimos en Easy - Not Easy
df_fct <- df_fct %>%
mutate(facilidad_celular = fct_collapse(facilidad_celular,
"Not Easy" = c("Difficult", "Neither easy nor difficult", "Very Difficult"),
"Easy" = c("Easy", "Very easy")))
# Observamos como se distribuyen los datos ahora.
df_fct %>%
select(where(is.factor)) %>%
walk(~ print(table(.x)))
# Guardamos los datos
escribir_archivo(df_fct, PATH_DATOS_OUT)
|
94f43d2cbecb8cec458f6c1bdf630903a511090c
|
a5d59c05c906769461b849c605a00b5d521f4052
|
/lab3/lab3_p2/generatePlot.R
|
4d4ce50dc24ebe7ce58a94c39d4248e94e1f2ce9
|
[] |
no_license
|
Caio-Batista/operational-systems
|
36a23685ff4abb2bd14ffc13d917d6dccc74f14d
|
e59434de6f10c32dd72ff5c8233161a859957da2
|
refs/heads/master
| 2021-03-22T04:48:54.132157
| 2017-04-05T19:52:08
| 2017-04-05T19:52:08
| 75,005,567
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 597
|
r
|
generatePlot.R
|
mydata4 = read.csv("exec.csv")
png('plot_lab3_p2.png')
attach(mydata4)
par(mfrow=c(2,2))
boxplot(Runtime~Instancia,data=mydata4, main="Instancia X Runtime",
xlab="Instancia", ylab="Runtime")
boxplot(Switches~Instancia,data=mydata4, main="Instancia X Switches",
xlab="Instancia", ylab="Switches")
boxplot(Average.delay~Instancia,data=mydata4, main="Instancia X Average delay",
xlab="Instancia", ylab="Average delay")
boxplot(Wall.clock.time~Instancia,data=mydata4, main="Instancia X Wall clock time s",
xlab="Instancia", ylab="Wall clock time")
dev.off()
|
b7ad1671225cb7805e630a61ac5fa63168b2019b
|
a54a6aae7c7294612882cac46b1d530b6b89570e
|
/man/spline_hematocrit.Rd
|
0c74d6cebb455e7e9d4836ed89d05e1617f0ad28
|
[] |
no_license
|
HQData/httk
|
a4a5f3ff8db6a6f7a73cb35db69300e438595205
|
844e5e32865b3e8920a0a5d1b365e4f81bb62e64
|
refs/heads/master
| 2020-04-21T03:12:06.086486
| 2019-02-04T19:33:21
| 2019-02-04T19:33:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,178
|
rd
|
spline_hematocrit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{spline_hematocrit}
\alias{spline_hematocrit}
\title{Smoothing splines for log hematocrit vs. age in months, and KDE residuals, by
race and gender.}
\format{A data.table with 6 variables: \describe{ \item{\code{gender}}{Gender:
Male or Female} \item{\code{reth}}{Race/ethnicity: Mexican American, Other
Hispanic, Non-Hispanic White, Non-Hispanic Black, Other}
\item{\code{hct_spline}}{A list of smooth.spline objects, each giving a
smoothed relationship between log hematocrit and age in months}
\item{\code{hct_kde}}{A list of kde objects; each is a KDE of the
distribution of residuals about the smoothing spline.}}}
\usage{
spline_hematocrit
}
\description{
Smoothing splines and KDE residuals pre-calculated from NHANES hematocrit and
age data by race/ethnicity and gender.
}
\references{
Ring, Caroline L., et al. "Identifying populations sensitive to
environmental chemicals by simulating toxicokinetic variability." Environment
International 106 (2017): 105-118
}
\author{
Caroline Ring
}
\keyword{data}
\keyword{httk-pop}
|
168ee776918ddde0a7d5a418df6e8285d06096be
|
caea54b3de3c4373bedffbd9d09e47e244636019
|
/tests/testthat/test_rinstall.r
|
e7a0ae549176af0216cc3ea02ebef35364ff6167
|
[] |
no_license
|
ashiklom/rtcl
|
66ce642dce90727c40f066b0ee8a2016da315e41
|
995519859544900ba2056a711cec8371da5a4bf0
|
refs/heads/master
| 2023-01-31T00:14:05.680419
| 2020-12-03T22:19:13
| 2020-12-03T22:19:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 938
|
r
|
test_rinstall.r
|
context("rinstall")
test_that("rinstall works for remote packages", {
skip_on_cran()
pkgs = list(
list(str = "rdatsci/rtcl/tests/testthat/assets/package", name = "testpkg", type = "GitHub"),
list(str = "rdatsci/rtcl/tests/testthat/assets/package@master", name = "testpkg", type = "GitHub"),
list(str = "https://github.com/rdatsci/rtcl.git/tests/testthat/assets/package", name = "testpkg", type = "Git"),
list(str = "https://github.com/rdatsci/rtcl.git/tests/testthat/assets/package@master", name = "testpkg", type = "Git")
)
for (pkg in pkgs) {
test_basic_rinstall(pkg$str, pkg$name, info = pkg$str)
}
})
test_that("rinstall works for local packages", {
skip_on_cran()
test_basic_rinstall("./assets/package", "testpkg")
})
test_that("rinstall works for local packages on windows", {
skip_on_os(c("mac", "linux", "solaris"))
skip_on_cran()
test_basic_rinstall(".\\assets\\package", "testpkg")
})
|
a3b6471b795baa4be260142990067098e99ae6af
|
0a6027e3a34be0436e9322e7fbb80ab7e2580ff0
|
/man/gen_axis_y_scale.Rd
|
e33a5fd150ba2a360d2eeca5083de95baf045814
|
[
"MIT"
] |
permissive
|
leofmr/survey.analysis
|
9ffb0a626dfc5bfad9b6ba10922cae71272b4455
|
71c5c4d0f331c22775daf1278b32b89ae2ea41f7
|
refs/heads/master
| 2022-12-26T20:45:58.032005
| 2020-10-13T21:57:49
| 2020-10-13T21:57:49
| 301,246,927
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 512
|
rd
|
gen_axis_y_scale.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_bar_graph_aux.R
\name{gen_axis_y_scale}
\alias{gen_axis_y_scale}
\title{Gerar rótulos do eixo y}
\usage{
gen_axis_y_scale(negative_col, intervals = 0.25)
}
\arguments{
\item{negative_col}{Character. Nome da coluna que é negativa}
\item{intervals}{Numeric. Tamanho do intervalo da escala de valores continuos do eixo y}
}
\value{
Escala de valores para o eixo y do gráfico de barras
}
\description{
Gerar rótulos do eixo y
}
|
325fa3ef3644dc65b320025f6894545315335d21
|
0700dab5e1804ed4d40a368555792037fbe58417
|
/Code/feature_EDA.R
|
a6d0eedacca094bd29ed1f9fe9aeaebab1d9175a
|
[] |
no_license
|
amytildazhang/DeanonymizationProject
|
4fc58ce7ab7e06f85937670cc9f96cbf5d424554
|
551b9f57147c2c01ef00769b22255bb35ce71de3
|
refs/heads/master
| 2021-08-28T15:14:51.405334
| 2017-12-12T15:08:30
| 2017-12-12T15:08:30
| 103,578,773
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,128
|
r
|
feature_EDA.R
|
library(readr)
library(data.table)
library(dplyr)
library(lubridate)
library(tidyr)
library(ggplot2)
library(binr)
library(purrr)
library(magrittr)
file <- "RC_2017-02"
folder <- "../Data/"
# n_authors <- 5000
#scores are considered reliable if the comment was collected after allowed_gap
options(tibble.print_max = Inf)
#------------------------EDA--------------------------------
features <- do.call("rbind", lapply(1:3, function(set) {
read_csv(paste0("../Data/RC_2017-02_combinedFeaturesSubset", set, ".csv")) #%>%
# filter(id %in% metadata$id) %>%
# inner_join(metadata, by = c("id", "author", "subreddit"))
}))
#include authors that are in the main subreddit (1st) and at least one of the others
# authors <- metadata %>%
# group_by(author) %>%
# summarise(
# in_main = main_subreddit %in% subreddit,
# in_others = any(setdiff(subreddits, main_subreddit) %in% subreddit),
# subreddits = paste(unique(subreddit), collapse = " "),
# n_posts = n()
# ) %>%
# filter(in_main, in_others) %>%
# arrange(desc(n_posts)) %T>%
# write_csv(paste0(folder, file, "_shared_authors.csv"))
#
# by_subreddit <- metadata %>%
# group_by(subreddit_id) %>%
# summarise(
# n_names = length(unique(subreddit)),
# n_posts = n(),
# n_posters = length(unique(author)),
# avg_posts = n_posts/n_posters,
# names = paste(unique(subreddit), collapse = " ")
# ) %>%
# filter(n_posters > 2000) %>%
# arrange(desc(avg_posts))
#
# by_author <- metadata %>%
# group_by(author, subreddit) %>%
# summarise(
# n_posts = n()
# ) %>%
# ungroup() %>% group_by(author) %>%
# summarise(
# avg_posts = mean(n_posts),
# med_posts = median((n_posts)),
# n_posts = sum(n_posts),
# n_subreddits = length(unique(subreddit))
# ) %>%
# filter(n_posts > 20, n_subreddits > 1) %>%
# arrange(desc(avg_posts))
# metadata <- filter(metadata, author %in% authors$author)
features <- read_csv(paste0(folder, file, "_features.csv")) %>%
filter(id %in% metadata$id)
id_cols <- c("id", "subreddit_id", "author", "plot", "subreddit", "V1")
feature_cols <- colnames(features) %>% setdiff(id_cols)
#replace all NAs in feature_cols with 0s
toreplace <- setNames(lapply(vector("list", length(feature_cols)),
function(x) x <- 0), feature_cols)
features <- features %>%
replace_na(toreplace) %>%
mutate_at(feature_cols, as.numeric)
# plotprob <- min(30000/nrow(metadata), 1) #maximum number of points that can reasonably be plotted
# cat("% of points that will be plotted is", plotprob, "\n")
# training_set <- features %>%
# filter(subreddit %in% main_subreddit) %>%
# group_by(author) %>%
# mutate(
# plot = rbinom(1, n(), plotprob)
# ) %>%
# ungroup()
# features <- features %>%
# mutate(
# plot = rbinom(1, n(), plotprob)
# )
#select top 20 features by information gain using shannon entropy
#plot EDA for these, rather htan all 300...
shannon_bin <- function(ftidx, df) {
ft <- as.numeric(df %>% pull(ftidx))
if (length(unique(ft)) == 1) return(df %>% select(id, ftidx))
bins <- bins(ft, 20, max.breaks = 20)
newdf <- df %>% select(ftidx, id) %>%
arrange(df[[ftidx]]) %>%
mutate(bin = NA) %>%
select(-1)
j = 0
letter = 1
for (i in bins$binct) {
newdf$bin[j:(j+i)] <- letters[letter]
letter <- letter + 1; j <- j + i
}
colnames(newdf)[2] <- colnames(df)[ftidx]
return(newdf)
}
shannon_entropy <- function(fct) {
occurrence <- table(fct) + .01
prob <- occurrence/sum(occurrence)
- sum(prob * log2(prob))
}
#Takes: Dataframe and the column index of the feature of interest within the dataframe
#Calculates information gain across authors, so it needs a group column named 'author'
#Tbl manipulation means an 'id' column so that we can match numbers together is necessary
#returns: scalar value of information gain for that column
info_gain <- function(ftidx, df) {
fctorized <- shannon_bin(ftidx, df) %>%
full_join(df %>% select(author, id), by = "id")
shannon_entropy(fctorized %>% pull(2)) +
shannon_entropy(fctorized %>% pull(author)) -
shannon_entropy(interaction(fctorized %>% pull(2), fctorized %>% pull(author)))
}
cat("Calculating information gain of feature in column 'x' of the training subreddit: \n")
#calculate information gain for all columns specified as feature_cols, save as CSV file
feature_infogain <- tibble(
idx = which(colnames(features) %in% feature_cols),
info_gain = map_dbl(idx, function(ftidx) {
print(ftidx)
info_gain(ftidx, features)
}),
feature = feature_cols[idx],
n_unique = map_dbl(idx, function(ftidx) features %>% pull(ftidx) %>% unique %>% length)
) %>%
arrange(desc(info_gain))
feature_infogain %>%
write_csv(paste0("../Output/Feature Infogain/", file, "_feature_infogain.csv"))
for (file in list.files(".")) {
authors <- fread(file, select = "author")
authors <- authors$author
print(paste("filename:", file))
print(paste("# authors:", length(unique(authors))))
print(paste("# rows:", length(authors)))
}
#
# #------------------------------EDA
# #compare feature distribution across authors, and across subreddits
# #provides support that features help distinguish authors and also transfer across subreddits
#
# #dataframe that calculates mean value of all feature_cols, grouped by author
# authorsumm <- training_set %>%
# group_by(author) %>%
# summarise_at(feature_cols, mean, na.rm = TRUE) %>%
# ungroup()
#
# for (i in 1:20){ #save plots of the top 20 features according to information gain
# print(i)
# ft <- feature_infogain$feature[i]
#
# #sort authors so they are in ascending order based on their mean valuefor feature of interest
# authorsumm <- authorsumm %>% arrange_(ft) %>% mutate(order = 1:n())
# training_set <- training_set %>%
# left_join(authorsumm[,c("author", "order")], by = "author")
#
#
# #compare distribution of features across authors within the training set
# ggplot() +
# geom_point(data = training_set %>% filter(plot != 0), aes_string(x = "order", y = ft),
# alpha = 0.1, size = 1) +
# geom_line(data = authorsumm, aes_string(x = "order", y = ft),
# color = "#C4A20A", alpha = 0.7, size = 1) +
# theme_minimal() +
# labs(title = paste("Distribution of", ft, "across Reddit users"),
# subtitle = "Training set",
# xlab = "Reddit user id #",
# ylab = ft) +
# ylim(0, 2 * max(authorsumm$ft, na.rm = TRUE)) +
# ggsave(paste0("../Output/EDA_authors_", ft, ".png"))
#
#
# #compare distribution of features across authors and subreddits
# #provides support that features are topic-independent
# features <- features %>% left_join(authorsumm %>% select(author, order), by = "author")
# subredditsumm <- features %>%
# group_by(author, subreddit, order) %>%
# summarise_at(feature_cols, mean, na.rm = TRUE) %>%
# ungroup()
#
# ggplot() +
# geom_point(data = features %>% filter(plot != 0),
# aes_string(x = "order", y = ft),
# alpha = 0.1, size = 1) +
# geom_line(data = subredditsumm, aes_string(x = "order", y = ft),
# color = "#0980B2", size = 0.8) +
# geom_line(data = authorsumm, aes_string(x = "order", y = ft),
# color = "#C4A20A", size = 0.8) +
# theme_minimal() +
# labs(title = paste("Distribution of", ft, "across subreddits and users"),
# xlab = "Reddit user id #",
# ylab = ft) +
# facet_grid(subreddit ~ ., scales = "free_y") +
# ggsave(paste0("../Output/EDA_subreddits_", ft, ".png"),
# height = 10, width = 7, units = "in")
#
# training_set <- training_set %>% select(-order)
# features <- features %>% select(-order)
# }
|
f06f6fda3d436a45dedfb08460c104068c7c5851
|
e2988ee8e9e4d954fb8c9b450d900f74fcffcc17
|
/man/Example6.6.3.Rd
|
67bd31402b4aa74bec95531ce55e3b8c011d9cac
|
[] |
no_license
|
Allisterh/CGE
|
64e3ee2d8e60017d8feb2c02c85dd3330158ac87
|
49dcff5060be56a5c6a19a261ad573eedad8cd90
|
refs/heads/master
| 2023-03-21T18:10:14.964009
| 2020-05-24T04:00:11
| 2020-05-24T04:00:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 473
|
rd
|
Example6.6.3.Rd
|
\name{Example6.6.3}
\alias{Example6.6.3}
\title{Third Part of Example 6.6 in Li (2019)}
\description{This is the third part of Example 6.6 in Li (2019),which illustrates the second country of a two-country economy.}
\usage{Example6.6.3()}
\author{
LI Wu <liwu@staff.shu.edu.cn>
}
\references{
LI Wu (2019, ISBN: 9787521804225) General Equilibrium and Structural Dynamics: Perspectives of New Structural Economics. Beijing: Economic Science Press. (In Chinese)
}
|
b7aebee89bc01e4b5122065878d919ce3252a01a
|
c4f065fcdc73605b9944b69bc981b339d8356ef4
|
/R/INDperform.R
|
565de68d3b6b9f031b20d6fef30a45bed7116b43
|
[] |
no_license
|
saskiaotto/INDperform
|
a9c566a774bdfd58480e0493eb564761abef8621
|
dffa5a2af4f2a10ba53e2622da9e540fd9dbcdde
|
refs/heads/master
| 2021-11-11T11:26:54.013687
| 2021-10-23T19:13:44
| 2021-10-23T19:13:44
| 106,686,773
| 7
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,828
|
r
|
INDperform.R
|
#' INDperform: Performance validation of ecological state indicators
#'
#' INDperform provides an implementation of the 7-step approach suggested by
#' Otto \emph{et al.} (2018) to validate ecological state indicators
#' and to select a suite of complimentary and well performing indicators.
#' This suite can be then used to assess the current state of the system
#' in comparison to a reference period. See also the website:
#' https://saskiaotto.github.io/INDperform/
#'
#' The package builds upon the tidy data principles and offers functions to
#' \itemize{
#' \item identify temporal indicator changes,
#' \item model relationships to pressures while taking non-linear responses
#' and temporal autocorrelation into account, and to
#' \item quantify the robustness of these models.
#' }
#'
#' These functions can be executed on any number of indicators and pressures.
#' Based on these analyses and a scoring scheme for selected criteria the
#' individual performances can be quantified, visualized, and compared. The
#' combination of tools provided in this package can significantly help making
#' state indicators operational under given management schemes such as the
#' EU Marine Strategy Framework Directive.
#'
#' @section Usage:
#' INDperform offers function that can be applied individually to some
#' extent but mostly build upon each other to follow the 7-step approach.
#' They can be grouped into 3 broad categories. For demonstration purposes
#' the package provides a dataset of food web indicators and pressure
#' variables in the Central Baltic Sea (modified from Otto \emph{et al.},
#' 2018).
#'
#' @section 1. Validation of IND performances:
#' The following functions implement the first five steps of the 7-step
#' validation approach and model each IND as a function of time or a
#' single pressure variable using Generalized Additive Models (GAMs)
#' (based on the \code{\link{mgcv}} package):
#' \itemize{
#' \item \code{\link{model_trend}}
#' \item \code{\link{ind_init}}
#' \item \code{\link{model_gam}}
#' \item \code{\link{model_gamm}}
#' \item \code{\link{select_model}}
#' \item \code{\link{merge_models}}
#' \item \code{\link{calc_deriv}}
#' \item \code{\link{select_interaction}}
#' \item \code{\link{test_interaction}}
#' }
#'
#' To show the model diagnostics or complete model results use the functions:
#' \itemize{
#' \item \code{\link{plot_diagnostics}}
#' \item \code{\link{plot_trend}}
#' \item \code{\link{plot_model}}
#' }
#'
#'
#' @section 2. Scoring IND performance based on model output:
#' Among the 16 common indicator selection criteria, five criteria relate
#' to the indicators` performances and require time series for their
#' evaluation, i.e.
#'
#' 8. Development reflects ecosystem change caused by variation in
#' manageable pressure(s)
#'
#' 9. Sensitive or responsive to pressures
#'
#' 10. Robust, i.e. responses in a predictive fashion, and statistically sound
#'
#' 11. Links to management measures (responsiveness and specificity)
#'
#' 12. Relates where appropriate to other indicators but is not redundant
#'
#' In this package, the scoring scheme for these criteria as proposed by
#' Otto \emph{et al.} (2018) serves as basis for the quantification
#' of the IND performance (see the scoring template table
#' \code{\link{crit_scores_tmpl}}). Sensitivity (criterion 9) and
#' robustness (criterion 10) are specified into more detailed sub-criteria
#' to allow for quantification based on statistical models and rated
#' individually for every potential pressure that might affect the IND
#' directly or indirectly.
#'
#' However, the scoring template can easily be adapted to any kind of state
#' indicator and management scheme by modifying the scores, the weighting
#' of scores or by removing (sub)criteria.
#'
#' The following functions relate to the indicator performance scoring
#' (used in this order):
#' \itemize{
#' \item \code{\link{scoring}}
#' \item \code{\link{expect_resp}}
#' \item \code{\link{summary_sc}}
#' \item \code{\link{plot_spiechart}}
#' }
#'
#' For examining redundancies and selecting robust indicator suites use
#' (in that order):
#' \itemize{
#' \item \code{\link{dist_sc}}
#' \item \code{\link{dist_sc_group}}
#' \item \code{\link{clust_sc}}
#' \item \code{\link{plot_clust_sc}}
#' }
#'
#'
#' @section 3. Assessment of current state status:
#' Two approaches based on trajectories in state space to determine
#' the current state of the system in comparison to an earlier period
#' as reference using the selected IND suite (state space =
#' n-dimensional space of possible locations of IND variables)
#'
#' 1. Calculation of the Euclidean distance in state space of any
#' dimensionality between each single year (or any other time step
#' used) and a defined reference year:
#' \itemize{
#' \item \code{\link{statespace_ed}}
#' \item \code{\link{plot_statespace_ed}}
#' }
#'
#' 2. Given the identification of a reference domain in state space,
#' more recent observations might lie within or outside this domain.
#' The convex hull is a multivariate measure derived from
#' computational geometry representing the smallest convex set
#' containing all the reference points in Euclidean plane or space.
#' For visualization, only 2 dimensions considered (dimension
#' reduction through e.g. Principal Component Analysis suggested).
#' \itemize{
#' \item \code{\link{statespace_ch}}
#' \item \code{\link{plot_statespace_ch}}
#' }
#'
#'
#' @references
#' To learn more about the framework, see
#'
#' Otto, S.A., Kadin, M., Casini, M., Torres, M.A., Blenckner, T. (2018)
#' A quantitative framework for selecting and validating food web indicators.
#' \emph{Ecological Indicators}, 84: 619-631,
#' doi: https://doi.org/10.1016/j.ecolind.2017.05.045
#'
#'
"_PACKAGE"
|
34bc464587c3bcf89b1b5a8ea8de4e55ad5638cb
|
51f26f6e159c49fc4e4488c5689bf7c7c3a79f21
|
/prob_Distributions/HyperGeomWithbigSample.R
|
f461f1ca85ffb3b1545add68cdad9816ffd61fb0
|
[] |
no_license
|
michaelgobz/probabilityDistribtions
|
db041392f5fcd58bb0361585edf9ee7d54ffd258
|
35493324e92a725f70bd78651a5fd63fc8d93e26
|
refs/heads/master
| 2021-04-14T15:21:37.990384
| 2020-03-22T18:06:02
| 2020-03-22T18:06:02
| 249,241,459
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 657
|
r
|
HyperGeomWithbigSample.R
|
N <- 10000 # assigning 10000 to N which is the population size
n <- 5 # assigning 5 to n which is the sample to select
x <- rhyper(N, 300, 100, 100) # generating the hypergeometric variables randomly passing the respective arguments to the rhyper function
# we plot the graph of hypergeometric distribution based on the x values that are being passed in the hist function
hist(x, xlim = c(min(x), max(x)), probability = TRUE, nclass = max(x)-min(x)+1, col ='lightblue', main = 'Hypergeometric Distribution, n=400, p=.75, k=100')
#draw the line to show the trend of the distribution as it changes values
lines(density(x, bw=1), col ='red', lwd = 2)
|
6a5776014dd3b6519a23e5672e77655b69a331f2
|
f9f143594922d6dfc386d86e1f1584d98008ccb1
|
/bin/plot_venn.R
|
b43300ce43be7991c16bcfb2c2eb301c4d4a77ad
|
[] |
no_license
|
rwtaylor/nf-maf-snps
|
7da4e06aa7d8180211ff7623fed405e013137d89
|
443bc1abd7d84f21f700530523c558397239249e
|
refs/heads/master
| 2021-01-22T23:26:15.755813
| 2017-03-22T00:29:04
| 2017-03-22T00:29:04
| 85,637,657
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,723
|
r
|
plot_venn.R
|
#!/bin/Rscript
# Plots venn diagram
# argument 1: cpus
# argument 2: comma separated list of populations
# argument 3: path to sample_groups
# argument 4: path to *.traw file
# argument 5: output prefix
library(readr)
library(tidyr)
library(dplyr)
library(gplots)
library(foreach)
library(doMC)
library(gplots)
args <- commandArgs(trailingOnly = TRUE)
## For testing
#args <- 1
#args[1] <- 8
#args[2] <- "jacksoni,sumatrae,altaica,tigris"
#args[3] <- "../../sample_groups.tsv"
#args[4] <- "fb-170208-1-snp-qq30-gq30-hwe-ss0.001-ldp.traw"
#args[5] <- "fb-170208-1-snp-qq30-gq30-hwe-ss0.001-ldp"
###
registerDoMC(cores = args[1])
pops <- strsplit(args[2], ',')[[1]]
genotypes <- read_tsv(args[4], col_names=TRUE)
sample_groups <- read_tsv(args[3], col_names=c("ID", "pop"))
sample_groups <- sample_groups %>% filter(pop %in% pops)
#sample_groups <- sample_groups %>% filter(pop %in% pops)
colnames(genotypes) <- gsub("_.*", "", colnames(genotypes))
genotypes_cols <- colnames(genotypes)
sample_groups <- sample_groups[sample_groups$ID %in% genotypes_cols, ]
sample_groups$idcol <- match(sample_groups$ID, genotypes_cols)
pop_polymorphic_sites <- foreach(group.i = unique(sample_groups$pop), .combine = cbind) %dopar% {
sg.i <- sample_groups %>% filter(pop == group.i)
genotypes.i <- genotypes[ ,sg.i$idcol]
out <- data_frame(pop = apply(genotypes.i, 1, function(x) any(x == 1, na.rm=TRUE) || (any(x == 0, na.rm=TRUE) & any(x == 2, na.rm=TRUE))))
names(out) <- group.i
out
}
venn_diagram <- venn(pop_polymorphic_sites[ , pops], show.plot = FALSE)
pdf(file = paste(args[5], ".venn.pdf", sep = ''))
plot(venn_diagram)
dev.off()
png(file = paste(args[5], ".venn.png", sep = ''))
plot(venn_diagram)
dev.off()
|
aa33e05944694a5ea754f76256971960ec205316
|
9fdc16616cfcc329f6915b2c8ebea8a14ff9cdfb
|
/results/5-projectivity-no-fact/rscripts/analysis.R
|
546bd2758cf5d8e706703be8e698979f6cf32476
|
[] |
no_license
|
judith-tonhauser/projective-probability
|
7bf703a53d4e4cff62732ddbcfef5d783f6eca5c
|
8d8658b31cc679ba8cc0c62053876016367070bc
|
refs/heads/master
| 2022-12-22T04:40:02.482838
| 2022-12-15T09:12:59
| 2022-12-15T09:12:59
| 219,491,209
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,899
|
r
|
analysis.R
|
# Factives paper
# 5-projectivity-no-fact (certainty ratings, continuous task)
# analysis.R
# set working directory to directory of script
this.dir <- dirname(rstudioapi::getSourceEditorContext()$path)
setwd(this.dir)
source('helpers.R')
# load required packages
library(tidyverse)
library(tidybayes)
library(dplyr)
library(dichromat)
library(forcats)
library(ggrepel)
library(brms)
library(knitr)
library(emmeans)
library(lme4)
library(padr)
library(performance)
library(MuMIn)
theme_set(theme_bw())
# load clean data ----
cd = read.csv("../data/cd.csv")
nrow(cd) #6916
# create item as combination of predicate and complement clause
cd$item = as.factor(paste(cd$verb,cd$content))
# LME model predicting rating from predicate
table(cd$verb)
cd$verb <- relevel(cd$verb, ref = "MC")
m = lmer(response ~ verb + (1+verb|workerid) + (1|content), data = cd, REML=F)
summary(m)
# reorder verb by mean
means = cd %>%
group_by(verb) %>%
summarize(Mean = mean(response)) %>%
mutate(verb = fct_reorder(as.factor(verb),Mean))
means
levels(means$verb)
cd$verb <- factor(cd$verb, levels = unique(levels(means$verb)))
levels(cd$verb)
# pairwise comparison
str(cd$response)
str(cd$verb)
cd$verb <- as.factor(cd$verb)
str(cd$workerid)
cd$workerid <- as.factor(cd$workerid)
model = lmer(response ~ verb + (1|workerid) + (1|item), data=cd, REML=F)
summary(model)
comparison = lsmeans(model, pairwise~verb,adjust="tukey")
options(max.print=10000)
comparison
# $contrasts
# contrast estimate SE df t.ratio p.value
# MC - pretend -0.042020639 0.01916977 49.97 -2.192 0.8098
# MC - be_right -0.071607572 0.01916501 50.13 -3.736 0.0542
# MC - think -0.083783582 0.01916492 50.07 -4.372 0.0089
# MC - suggest -0.111608001 0.01912903 50.69 -5.834 0.0001
# MC - say -0.133890950 0.01912899 50.72 -6.999 <.0001
# MC - prove -0.193307193 0.01914129 50.50 -10.099 <.0001
# MC - confirm -0.231812154 0.01917434 50.04 -12.090 <.0001
# MC - establish -0.251595754 0.01913785 50.56 -13.146 <.0001
# MC - demonstrate -0.381538510 0.01913109 50.66 -19.943 <.0001
# MC - announce -0.471695123 0.01912825 50.73 -24.660 <.0001
# MC - confess -0.527021744 0.01916157 50.13 -27.504 <.0001
# MC - admit -0.544807503 0.01912812 50.71 -28.482 <.0001
# MC - reveal -0.592362151 0.01912462 50.78 -30.974 <.0001
# MC - acknowledge -0.613560616 0.01916529 50.13 -32.014 <.0001
# MC - hear -0.634910506 0.01912981 50.72 -33.190 <.0001
# MC - discover -0.667312302 0.01913513 50.63 -34.874 <.0001
# MC - inform -0.693266524 0.01915643 50.26 -36.190 <.0001
# MC - see -0.697664502 0.01914503 50.43 -36.441 <.0001
# MC - know -0.752791183 0.01913805 50.55 -39.335 <.0001
# MC - be_annoyed -0.773522289 0.01915574 50.30 -40.381 <.0001
# pretend - be_right -0.029586933 0.02175318 352.08 -1.360 0.9989
# pretend - think -0.041762942 0.02175315 351.11 -1.920 0.9404
# pretend - suggest -0.069587361 0.02172153 363.43 -3.204 0.1553
# pretend - say -0.091870311 0.02172148 363.91 -4.229 0.0051
# pretend - prove -0.151286554 0.02173230 359.55 -6.961 <.0001
# pretend - confirm -0.189791515 0.02176139 349.96 -8.721 <.0001
# pretend - establish -0.209575115 0.02172928 360.73 -9.645 <.0001
# pretend - demonstrate -0.339517871 0.02172331 362.84 -15.629 <.0001
# pretend - announce -0.429674484 0.02172086 363.99 -19.782 <.0001
# pretend - confess -0.485001104 0.02175018 352.32 -22.299 <.0001
# pretend - admit -0.502786864 0.02172074 363.80 -23.148 <.0001
# pretend - reveal -0.550341512 0.02171763 365.20 -25.341 <.0001
# pretend - acknowledge -0.571539977 0.02175347 351.98 -26.274 <.0001
# pretend - hear -0.592889867 0.02172222 363.73 -27.294 <.0001
# pretend - discover -0.625291662 0.02172689 361.93 -28.780 <.0001
# pretend - inform -0.651245884 0.02174563 354.69 -29.948 <.0001
# pretend - see -0.655643862 0.02173562 358.16 -30.164 <.0001
# pretend - know -0.710770543 0.02172942 360.62 -32.710 <.0001
# pretend - be_annoyed -0.731501649 0.02174507 355.26 -33.640 <.0001
# be_right - think -0.012176009 0.02174894 353.91 -0.560 1.0000
# be_right - suggest -0.040000428 0.02171732 366.37 -1.842 0.9600
# be_right - say -0.062283378 0.02171730 366.84 -2.868 0.3344
# be_right - prove -0.121699621 0.02172813 362.42 -5.601 <.0001
# be_right - confirm -0.160204582 0.02175720 352.72 -7.363 <.0001
# be_right - establish -0.179988182 0.02172509 363.62 -8.285 <.0001
# be_right - demonstrate -0.309930938 0.02171914 365.74 -14.270 <.0001
# be_right - announce -0.400087551 0.02171664 366.95 -18.423 <.0001
# be_right - confess -0.455414171 0.02174594 355.15 -20.942 <.0001
# be_right - admit -0.473199931 0.02171652 366.75 -21.790 <.0001
# be_right - reveal -0.520754579 0.02171338 368.20 -23.983 <.0001
# be_right - acknowledge -0.541953044 0.02174927 354.77 -24.918 <.0001
# be_right - hear -0.563302934 0.02171802 366.67 -25.937 <.0001
# be_right - discover -0.595704730 0.02172270 364.85 -27.423 <.0001
# be_right - inform -0.621658951 0.02174146 357.50 -28.593 <.0001
# be_right - see -0.626056929 0.02173140 361.04 -28.809 <.0001
# be_right - know -0.681183610 0.02172529 363.46 -31.354 <.0001
# be_right - be_annoyed -0.701914716 0.02174087 358.10 -32.285 <.0001
# think - suggest -0.027824419 0.02171720 365.42 -1.281 0.9995
# think - say -0.050107369 0.02171718 365.89 -2.307 0.7508
# think - prove -0.109523612 0.02172804 361.46 -5.041 0.0001
# think - confirm -0.148028572 0.02175712 351.80 -6.804 <.0001
# think - establish -0.167812173 0.02172500 362.66 -7.724 <.0001
# think - demonstrate -0.297754928 0.02171905 364.77 -13.709 <.0001
# think - announce -0.387911542 0.02171658 365.95 -17.862 <.0001
# think - confess -0.443238162 0.02174589 354.19 -20.383 <.0001
# think - admit -0.461023921 0.02171643 365.78 -21.229 <.0001
# think - reveal -0.508578569 0.02171334 367.18 -23.422 <.0001
# think - acknowledge -0.529777035 0.02174921 353.82 -24.358 <.0001
# think - hear -0.551126924 0.02171787 365.74 -25.377 <.0001
# think - discover -0.583528720 0.02172260 363.89 -26.863 <.0001
# think - inform -0.609482942 0.02174134 356.58 -28.033 <.0001
# think - see -0.613880920 0.02173129 360.11 -28.249 <.0001
# think - know -0.669007601 0.02172518 362.52 -30.794 <.0001
# think - be_annoyed -0.689738707 0.02174078 357.16 -31.726 <.0001
# suggest - say -0.022282950 0.02168553 379.04 -1.028 1.0000
# suggest - prove -0.081699192 0.02169639 374.37 -3.766 0.0285
# suggest - confirm -0.120204153 0.02172556 364.09 -5.533 <.0001
# suggest - establish -0.139987753 0.02169334 375.64 -6.453 <.0001
# suggest - demonstrate -0.269930509 0.02168737 377.89 -12.446 <.0001
# suggest - announce -0.360087123 0.02168490 379.13 -16.605 <.0001
# suggest - confess -0.415413743 0.02171425 366.68 -19.131 <.0001
# suggest - admit -0.433199502 0.02168475 378.95 -19.977 <.0001
# suggest - reveal -0.480754150 0.02168168 380.41 -22.173 <.0001
# suggest - acknowledge -0.501952616 0.02171755 366.31 -23.113 <.0001
# suggest - hear -0.523302505 0.02168625 378.86 -24.131 <.0001
# suggest - discover -0.555704301 0.02169097 376.91 -25.619 <.0001
# suggest - inform -0.581658523 0.02170974 369.17 -26.793 <.0001
# suggest - see -0.586056501 0.02169969 372.89 -27.008 <.0001
# suggest - know -0.641183182 0.02169352 375.49 -29.556 <.0001
# suggest - be_annoyed -0.661914288 0.02170912 369.83 -30.490 <.0001
# say - prove -0.059416243 0.02169635 374.86 -2.739 0.4254
# say - confirm -0.097921204 0.02172555 364.55 -4.507 0.0016
# say - establish -0.117704804 0.02169334 376.11 -5.426 <.0001
# say - demonstrate -0.247647560 0.02168737 378.36 -11.419 <.0001
# say - announce -0.337804173 0.02168484 379.66 -15.578 <.0001
# say - confess -0.393130793 0.02171422 367.16 -18.105 <.0001
# say - admit -0.410916553 0.02168471 379.46 -18.950 <.0001
# say - reveal -0.458471201 0.02168165 380.91 -21.146 <.0001
# say - acknowledge -0.479669666 0.02171752 366.79 -22.087 <.0001
# say - hear -0.501019556 0.02168617 379.41 -23.103 <.0001
# say - discover -0.533421352 0.02169092 377.43 -24.592 <.0001
# say - inform -0.559375573 0.02170970 369.66 -25.766 <.0001
# say - see -0.563773551 0.02169966 373.39 -25.981 <.0001
# say - know -0.618900232 0.02169349 375.98 -28.529 <.0001
# say - be_annoyed -0.639631338 0.02170909 370.31 -29.464 <.0001
# prove - confirm -0.038504961 0.02173640 360.15 -1.771 0.9731
# prove - establish -0.058288561 0.02170419 371.50 -2.686 0.4651
# prove - demonstrate -0.188231317 0.02169820 373.73 -8.675 <.0001
# prove - announce -0.278387930 0.02169568 374.99 -12.831 <.0001
# prove - confess -0.333714550 0.02172508 362.71 -15.361 <.0001
# prove - admit -0.351500310 0.02169558 374.77 -16.201 <.0001
# prove - reveal -0.399054958 0.02169251 376.20 -18.396 <.0001
# prove - acknowledge -0.420253423 0.02172836 362.36 -19.341 <.0001
# prove - hear -0.441603313 0.02169707 374.69 -20.353 <.0001
# prove - discover -0.474005109 0.02170182 372.75 -21.842 <.0001
# prove - inform -0.499959330 0.02172053 365.19 -23.018 <.0001
# prove - see -0.504357308 0.02171049 368.84 -23.231 <.0001
# prove - know -0.559483989 0.02170436 371.35 -25.777 <.0001
# prove - be_annoyed -0.580215095 0.02171994 365.81 -26.713 <.0001
# confirm - establish -0.019783600 0.02173331 361.39 -0.910 1.0000
# confirm - demonstrate -0.149726356 0.02172736 363.49 -6.891 <.0001
# confirm - announce -0.239882969 0.02172486 364.68 -11.042 <.0001
# confirm - confess -0.295209590 0.02175419 352.98 -13.570 <.0001
# confirm - admit -0.312995349 0.02172471 364.51 -14.407 <.0001
# confirm - reveal -0.360549997 0.02172167 365.86 -16.599 <.0001
# confirm - acknowledge -0.381748462 0.02175744 352.66 -17.546 <.0001
# confirm - hear -0.403098352 0.02172625 364.40 -18.554 <.0001
# confirm - discover -0.435500148 0.02173092 362.60 -20.041 <.0001
# confirm - inform -0.461454369 0.02174966 355.33 -21.217 <.0001
# confirm - see -0.465852348 0.02173966 358.81 -21.429 <.0001
# confirm - know -0.520979028 0.02173343 361.29 -23.971 <.0001
# confirm - be_annoyed -0.541710134 0.02174904 355.95 -24.907 <.0001
# establish - demonstrate -0.129942756 0.02169515 375.00 -5.989 <.0001
# establish - announce -0.220099369 0.02169266 376.24 -10.146 <.0001
# establish - confess -0.275425990 0.02172202 363.93 -12.680 <.0001
# establish - admit -0.293211749 0.02169254 376.04 -13.517 <.0001
# establish - reveal -0.340766397 0.02168946 377.49 -15.711 <.0001
# establish - acknowledge -0.361964862 0.02172535 363.54 -16.661 <.0001
# establish - hear -0.383314752 0.02169403 375.97 -17.669 <.0001
# establish - discover -0.415716548 0.02169871 374.07 -19.159 <.0001
# establish - inform -0.441670769 0.02171751 366.39 -20.337 <.0001
# establish - see -0.446068748 0.02170745 370.08 -20.549 <.0001
# establish - know -0.501195428 0.02170132 372.61 -23.095 <.0001
# establish - be_annoyed -0.521926534 0.02171688 367.05 -24.033 <.0001
# demonstrate - announce -0.090156613 0.02168671 378.48 -4.157 0.0068
# demonstrate - confess -0.145483234 0.02171609 366.04 -6.699 <.0001
# demonstrate - admit -0.163268993 0.02168659 378.27 -7.529 <.0001
# demonstrate - reveal -0.210823641 0.02168350 379.75 -9.723 <.0001
# demonstrate - acknowledge -0.232022106 0.02171937 365.68 -10.683 <.0001
# demonstrate - hear -0.253371996 0.02168808 378.19 -11.683 <.0001
# demonstrate - discover -0.285773792 0.02169275 376.29 -13.174 <.0001
# demonstrate - inform -0.311728014 0.02171154 368.55 -14.358 <.0001
# demonstrate - see -0.316125992 0.02170150 372.26 -14.567 <.0001
# demonstrate - know -0.371252672 0.02169533 374.84 -17.112 <.0001
# demonstrate - be_annoyed -0.391983778 0.02171092 369.20 -18.055 <.0001
# announce - confess -0.055326620 0.02171357 367.26 -2.548 0.5718
# announce - admit -0.073112380 0.02168406 379.57 -3.372 0.0981
# announce - reveal -0.120667028 0.02168102 381.01 -5.566 <.0001
# announce - acknowledge -0.141865493 0.02171682 366.93 -6.533 <.0001
# announce - hear -0.163215383 0.02168556 379.49 -7.526 <.0001
# announce - discover -0.195617179 0.02169026 377.55 -9.019 <.0001
# announce - inform -0.221571400 0.02170904 369.77 -10.206 <.0001
# announce - see -0.225969378 0.02169903 373.47 -10.414 <.0001
# announce - know -0.281096059 0.02169282 376.11 -12.958 <.0001
# announce - be_annoyed -0.301827165 0.02170843 370.42 -13.904 <.0001
# confess - admit -0.017785759 0.02171345 367.07 -0.819 1.0000
# confess - reveal -0.065340407 0.02171039 368.45 -3.010 0.2478
# confess - acknowledge -0.086538873 0.02174624 355.04 -3.979 0.0134
# confess - hear -0.107888762 0.02171498 366.96 -4.968 0.0002
# confess - discover -0.140290558 0.02171962 365.17 -6.459 <.0001
# confess - inform -0.166244780 0.02173840 357.79 -7.648 <.0001
# confess - see -0.170642758 0.02172837 361.32 -7.853 <.0001
# confess - know -0.225769439 0.02172222 363.77 -10.393 <.0001
# confess - be_annoyed -0.246500545 0.02173779 358.41 -11.340 <.0001
# admit - reveal -0.047554648 0.02168088 380.81 -2.193 0.8226
# admit - acknowledge -0.068753113 0.02171675 366.70 -3.166 0.1709
# admit - hear -0.090103003 0.02168545 379.27 -4.155 0.0068
# admit - discover -0.122504799 0.02169011 377.37 -5.648 <.0001
# admit - inform -0.148459021 0.02170898 369.52 -6.839 <.0001
# admit - see -0.152856999 0.02169890 373.28 -7.044 <.0001
# admit - know -0.207983680 0.02169272 375.88 -9.588 <.0001
# admit - be_annoyed -0.228714786 0.02170827 370.26 -10.536 <.0001
# reveal - acknowledge -0.021198465 0.02171367 368.09 -0.976 1.0000
# reveal - hear -0.042548355 0.02168234 380.77 -1.962 0.9275
# reveal - discover -0.074950151 0.02168705 378.81 -3.456 0.0768
# reveal - inform -0.100904373 0.02170584 370.99 -4.649 0.0009
# reveal - see -0.105302351 0.02169580 374.73 -4.854 0.0003
# reveal - know -0.160429031 0.02168964 377.33 -7.397 <.0001
# reveal - be_annoyed -0.181160138 0.02170524 371.63 -8.346 <.0001
# acknowledge - hear -0.021349890 0.02171823 366.62 -0.983 1.0000
# acknowledge - discover -0.053751686 0.02172293 364.79 -2.474 0.6289
# acknowledge - inform -0.079705907 0.02174171 357.42 -3.666 0.0400
# acknowledge - see -0.084103885 0.02173165 360.97 -3.870 0.0199
# acknowledge - know -0.139230566 0.02172545 363.46 -6.409 <.0001
# acknowledge - be_annoyed -0.159961672 0.02174108 358.05 -7.358 <.0001
# hear - discover -0.032401796 0.02169165 377.25 -1.494 0.9963
# hear - inform -0.058356018 0.02171044 369.47 -2.688 0.4634
# hear - see -0.062753996 0.02170037 373.22 -2.892 0.3186
# hear - know -0.117880676 0.02169422 375.79 -5.434 <.0001
# hear - be_annoyed -0.138611782 0.02170981 370.14 -6.385 <.0001
# discover - inform -0.025954222 0.02171514 367.61 -1.195 0.9998
# discover - see -0.030352200 0.02170507 371.33 -1.398 0.9985
# discover - know -0.085478881 0.02169893 373.88 -3.939 0.0154
# discover - be_annoyed -0.106209987 0.02171446 368.32 -4.891 0.0003
# inform - see -0.004397978 0.02172387 363.74 -0.202 1.0000
# inform - know -0.059524659 0.02171765 366.27 -2.741 0.4237
# inform - be_annoyed -0.080255765 0.02173323 360.85 -3.693 0.0366
# see - know -0.055126681 0.02170760 369.95 -2.540 0.5784
# see - be_annoyed -0.075857787 0.02172323 364.40 -3.492 0.0691
# know - be_annoyed -0.020731106 0.02171705 366.90 -0.955 1.0000
# JD CODE STARTS HERE ----
# TL;DR: all verbs are different from main clause (non-projecting) controls (called "control")
cd <- read.csv(file="../data/cd.csv", header=TRUE, sep=",")
table(cd$verb)
# Bayesian mixed effects regression to test whether ratings differ by predicate from good controls
cd$workerid = as.factor(as.character(cd$workerid))
cd$item = as.factor(paste(as.character(cd$verb),as.character(cd$content)))
cd$content = as.factor(as.character(cd$content))
cd$isMC = cd$verb == "MC"
cd$isZeroOne = (cd$response == 0 | cd$response == 1)
cd = cd %>%
mutate(predicate_type_ternary = as.factor(case_when(verb %in% c("know", "reveal","see","discover","be_annoyed") ~ "factive",
verb %in% c("think","say","pretend","suggest","MC") ~ "non-factive",
# verb == "MC" ~ "control",
TRUE ~ "opt-factive")))
cd = cd %>%
mutate(predicate_type_ternary = fct_relevel(predicate_type_ternary,"non-factive","opt-factive")) %>%
droplevels()
p = ggplot(cd, aes(x=response,fill=isMC)) +
geom_histogram() +
facet_wrap(~workerid)
ggsave(p, file="../graphs/subject_variability.pdf",width=25,height=25)
# set reference level to main clause controls
d = cd %>%
droplevels() %>%
mutate(verb = fct_relevel(verb,"MC"))
table(d$verb)
# run beta regression
# first, because response assumes values of 0 and 1, which beta regression cannot handle, transform: (Smithson & Verkuilen 2006)
# y'' = (y' ?? (n ??? 1) + 0.5)/n
# note: first rescaling of y'=(y-a)/(b-a) not necessary because highest and lowest value are 0 and 1 already
d$betaresponse = (d$response*(nrow(d)-1) + .5)/nrow(d)
prior = get_prior(betaresponse ~ verb + (1|workerid) + (1|item),family = Beta(),data=d)
prior
betamodel = bf(betaresponse ~ verb + (1|workerid) + (1|item),
phi ~ verb + (1|workerid) + (1|item), # beta distribution's precision )
family = Beta())
m.b = brm(formula = betamodel,
family=Beta(),
data=d,
cores = 4,
control = list(adapt_delta = .95,max_treedepth=15))
summary(m.b)
saveRDS(m.b,file="../data/beta-model-mixed.rds")
# to load model
m.b = readRDS(file="../data/beta-model-mixed.rds")
# to get stan code
stancode(m.b)
summary(m.b)
fixef(m.b) # does the same thing
# create LaTeX table
mcmcReg(m.b, pars = "b_", file="../models/brm_output.tex")
# hypothesis-testing, probing posterior model
q = c(q_pretend_MC = "plogis(verbpretend-Intercept) = plogis(Intercept)")
q_answer = hypothesis(m.b, q)
q_answer
plot(q_answer)
prop.table(table(q_answer$samples$H1 > 0))
# posterior_samples(m.b, pars = "b_") %>%
# mutate_at(c("b_phi_Intercept",paste("b_",c(dimnames(fixef(m.b))[[1]])[23:42],sep="")), exp) %>%
# mutate_at(c("b_Intercept",paste("b_",c(dimnames(fixef(m.b))[[1]])[3:22],sep="")), plogis) %>%
# posterior_summary() %>%
# as.data.frame() %>%
# rownames_to_column("Parameter") %>%
# kable(digits = 2)
# run alternate beta model with factivity fixed effect
# create factivity variable -- classify MC controls as "non-factive" to stack deck against yourself
d = d %>%
mutate(predicate_type = as.factor(case_when(verb %in% c("know", "reveal","see","discover","be_annoyed") ~ "factive",
# verb == "MC" ~ "control",
TRUE ~ "non-factive")))
d = d %>%
mutate(predicate_type = fct_relevel(predicate_type,"non-factive")) %>%
droplevels()
betamodel.fact = bf(betaresponse ~ predicate_type + (1|workerid) + (1|item),
phi ~ predicate_type + (1|workerid) + (1|item), # beta distribution's precision )
family = Beta())
m.b.fact = brm(formula = betamodel.fact,
family=Beta(),
data=d,
cores = 4,
control = list(adapt_delta = .95,max_treedepth=15))
summary(m.b.fact)
saveRDS(m.b.fact,file="../data/beta-model-fact-mixed.rds")
# to load model
m.b.fact = readRDS(file="../data/beta-model-fact-mixed.rds")
# ternary factivity predictor model
betamodel.fact.ternary = bf(betaresponse ~ predicate_type_ternary + (1|workerid) + (1|item),
phi ~ predicate_type_ternary + (1|workerid) + (1|item), # beta distribution's precision )
family = Beta())
m.b.fact.ternary = brm(formula = betamodel.fact.ternary,
family=Beta(),
data=d,
cores = 4,
control = list(adapt_delta = .95,max_treedepth=15))
summary(m.b.fact.ternary)
saveRDS(m.b.fact.ternary,file="../data/beta-model-fact-ternary-mixed.rds")
# to load model
m.b.fact.ternary = readRDS(file="../data/beta-model-fact-ternary-mixed.rds")
# model comparison between binary factivity and predicate-specific predictor model.
m.b.fact.ternary = add_criterion(m.b.fact.ternary, "waic")
m.b.fact = add_criterion(m.b.fact, "waic")
m.b = add_criterion(m.b, "waic")
# look at absolute waic
waic(m.b.fact) # -18643.9, higher -> worse
waic(m.b.fact.ternary) # -
waic(m.b) # -18971, lower -> better
loo_compare(m.b.fact, m.b, criterion = "waic")
##################################
# fit linear model, first Bayesian then frequentist -- same qualitative result (except pretend's lower bound is now inluded in 95% credible interval)
summary(d %>% select(response,verb,workerid,item))
str(d %>% select(response,verb,workerid,item))
m <- brm(
formula = response ~ verb + (1|workerid) + (1|item),
data = d,
cores = 4,
control = list(adapt_delta = .95)
# file = here::here("zoib-ex")
)
# no need to run this multiple times:
saveRDS(m,file="../data/linear-model-mixed.rds")
# load linear model ----
m <- readRDS(file="../data/linear-model-mixed.rds")
summary(m) # see summary printed below
# let's look at pretend in particular
q = c(q_pretend_MC_mean = "Intercept + verbpretend = Intercept")
q_answer = hypothesis(m, q)
q_answer
plot(q_answer)
prop.table(table(q_answer$samples$H1 > 0)) # prob (pretend > MC) = .97
# fit frequentist linear model for comparison
m = lmer(response ~ verb + (1|workerid) + (1|item), data = d)
summary(m)
check_model(m) # shows some non-normality of residuals as well as violation of homoscedasticity assumption
m = lmerTest::lmer(response ~ verb + (1|workerid) + (1|item), data = d)
summary(m)
# fit frequentist linear models with verb (m.verb) and factivity (m.fact) predictor, compare
d$verb_noMC = fct_relevel(d$verb,"pretend")
d_noMC = d %>%
filter(verb != "MC") %>%
mutate(predicate_type = fct_relevel(predicate_type,"non-factive")) %>%
droplevels()
m.verb = lmer(response ~ verb_noMC + (1|workerid) + (1|item), data = d_noMC)
summary(m.verb)
m.fact = lmer(response ~ predicate_type + (1|workerid) + (1|item), data = d_noMC)
summary(m.fact)
check_model(m.fact)
# rank models by two different measures of model quality -- in both cases, verb model is better than binary factivity model
# rank models by BIC
model.sel(m.verb,m.fact, rank = BIC)
# rank models by AIC
model.sel(m.verb,m.fact, rank = AIC)
# To compute marginal R^2 (variance explained by fixed effects) and conditional R^2 (variance explained by fixed and random effects):
r.squaredGLMM(m.verb)
# marginal R^2 (variance explained by fixed effects): .47
# conditional R^2 (variance explained by fixed and random effects jointly): .54
r.squaredGLMM(m.fact)
# marginal R^2 (variance explained by fixed effects): .18
# conditional R^2 (variance explained by fixed and random effects jointly): .54
|
c09dda8641b86057e46763c7396c11f603561aac
|
8aee8bed5a2828acfcbc27e0de2df4240ec7aac8
|
/gbm/gbm_func.R
|
e38aa6276bbef72bb8b6a8be610d4e431a62f57d
|
[] |
no_license
|
melinkoh/IowaHousing
|
51effaadc441667d825be7caabd1af47769ef308
|
3abf3e4bfd87427b4222f099047ce728d33a405d
|
refs/heads/master
| 2021-01-16T19:10:36.897650
| 2017-08-20T19:41:55
| 2017-08-20T19:41:55
| 100,147,392
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,001
|
r
|
gbm_func.R
|
gbm_func <- function(datalist){
# The input "datalist" is a list generated from transform function, which does preprocessing of the data
# Get training, test and high-correlation variables from "datalist"
train <- attr(datalist,"train");
test <- attr(datalist,"test");
# Build a GBM model
current_model <- gbm(SalePrice ~.,
data = train[,-1], #remove ID column
distribution = "laplace",
shrinkage = 0.05,
interaction.depth = 5,
bag.fraction = 0.66,
n.minobsinnode = 1,
cv.folds = 100,
keep.data = F,
verbose = F,
n.trees = 1000);
# Use the model to make predictions
ytest = predict(current_model,newdata=test);
ytest = exp(ytest) - 1;
# Return the predictions for the test data
return(ytest)
}
|
660862ac3abdc61bb7bdbe1f41c8c159bbf12124
|
0e311f3197baac2efea92039be9fce5d648a8969
|
/hello.r
|
8049e5b959fb187adcfbd836b6efef81012e2278
|
[
"MIT"
] |
permissive
|
iajzenszmi/CodeCode
|
dbd6e600e14886879836259fab7bed1fc63578f9
|
b3c58357f346c49025291f28b94fee2666523b55
|
refs/heads/master
| 2023-02-16T23:03:40.956645
| 2023-02-08T08:05:17
| 2023-02-08T08:05:17
| 108,805,048
| 2
| 1
| null | 2023-02-08T08:05:20
| 2017-10-30T05:18:40
|
Fortran
|
UTF-8
|
R
| false
| false
| 77
|
r
|
hello.r
|
# My first programming game.
myString <-"Hello from Ian"
print ( myString )
|
d833b9928eb057f5a1e29cf3c568d311a52e5a29
|
233b586d268d93a39ab6ebe9bd63fe7c750a3648
|
/R/User.R
|
587fc98ac4b747fbbc9c71267689f0f936cb051f
|
[
"MIT"
] |
permissive
|
Glewando/polished
|
1fa7d26216208c1759f16190dbe351ad1362ab79
|
de5f9e4bc10f4ffc3e8ae61eec9f5e592c4876bf
|
refs/heads/master
| 2020-07-13T00:58:07.184702
| 2019-08-28T03:01:21
| 2019-08-28T03:01:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,041
|
r
|
User.R
|
#' User
#'
#' R6 class to track the polish user. An instance of this class named `polish__user` should
#' be created in "global.R" of the Shiny app.
#'
#' @export
#'
#' @importFrom R6 R6Class
#' @importFrom httr GET content stop_for_status
#' @importFrom jsonlite fromJSON
#'
#'
#'
#'
User <- R6::R6Class(
classname = "User",
public = list(
initialize = function(firebase_functions_url, firebase_auth_token, app_name, polished_session) {
stopifnot(length(firebase_functions_url) == 1 && is.character(firebase_functions_url))
stopifnot(length(firebase_auth_token) == 1 && is.character(firebase_auth_token))
stopifnot(length(app_name) == 1 && is.character(app_name))
stopifnot(length(polished_session) == 1)
self$firebase_functions_url <- firebase_functions_url
self$token <- firebase_auth_token
self$app_name <- app_name
self$sign_in_with_token(firebase_auth_token)
stopifnot(isTRUE(private$is_authed))
private$polished_session <- polished_session
invisible(self)
},
firebase_functions_url = character(0),
token = character(0),
app_name = character(0),
sign_in_with_token = function(firebase_auth_token) {
# firebase function callable via url
url_out <- paste0(self$firebase_functions_url, "signInWithToken")
response <- httr::GET(
url_out,
query = list(
token = firebase_auth_token,
app_name = self$app_name
)
)
httr::warn_for_status(response)
user_text <- httr::content(response, "text")
user <- jsonlite::fromJSON(user_text)
if (is.null(user)) {
private$is_authed <- FALSE
private$email <- character(0)
private$is_admin <- FALSE
private$role <- character(0)
private$email_verified <- FALSE
private$uid <- character(0)
} else {
private$is_authed <- TRUE
private$email <- user$email
private$is_admin <- user$is_admin
private$role <- user$role
private$email_verified <- user$email_verified
private$uid <- user$uid
}
invisible(self)
},
set_signed_in_as = function(user_to_sign_in_as) {
if (private$is_admin) {
private$signed_in_as <- user_to_sign_in_as
}
invisible(self)
},
refreshEmailVerification = function(firebase_auth_token) {
url_out <- paste0(self$firebase_functions_url, "getUser")
response <- httr::GET(
url_out,
query = list(
uid = private$uid,
token = firebase_auth_token
)
)
httr::warn_for_status(response)
user_text <- httr::content(response, "text")
user <- jsonlite::fromJSON(user_text)
private$email_verified <- user$emailVerified
invisible(self)
},
get_token = function() {
self$token
},
get_is_authed = function() {
private$is_authed
},
get_email = function() {
private$email
},
get_is_admin = function() {
private$is_admin
},
get_role = function() {
private$role
},
get_email_verified = function() {
private$email_verified
},
get_uid = function() {
private$uid
},
get_signed_in_as = function() {
private$signed_in_as
},
clear_signed_in_as = function() {
private$signed_in_as <- NULL
},
get_polished_session = function() {
private$polished_session
}
),
private = list(
email = character(0),
is_admin = FALSE,
role = character(0),
is_authed = "authorizing",
email_verified = FALSE,
uid = character(0),
# optional use to sign in as
signed_in_as = NULL,
polished_session = numeric(0)
)
)
#' Users
#'
#' R6 class to track the polish user. An instance of this class named `polish__user` should
#' be created in "global.R" of the Shiny app.
#'
#' @export
#'
#' @importFrom R6 R6Class
#' @importFrom httr GET content
#' @importFrom jsonlite fromJSON
#'
Users <- R6::R6Class(
classname = "Users",
public = list(
# list of instances of `User`
users = vector("list", length = 0),
add_user = function(user) {
self$users[[length(self$users) + 1]] <- user
invisible(self)
},
find_user_by_uid = function(uid, polished_session) {
if (length(self$users) == 0 || is.null(uid)) return(NULL)
user_out <- NULL
for (i in seq_along(self$users)) {
if (self$users[[i]]$get_uid() == uid && self$users[[i]]$get_polished_session() == polished_session) {
user_out <- self$users[[i]]
}
}
user_out
},
remove_user_by_uid = function(uid, polished_session) {
if (length(self$users) == 0 || is.null(uid)) return(NULL)
for (i in seq_along(self$users)) {
if (self$users[[i]]$get_uid() == uid && self$users[[i]]$get_polished_session() == polished_session) {
self$users[[i]] <- NULL
break
}
}
invisible(self)
}
)
)
.global_users <- Users$new()
|
501191d7a009d71ed31678fbf7e955a4129e02b5
|
e869eeeeda90f5da4e0454b77d3a4e3fc4ae5392
|
/cachematrix.R
|
acfb9ff3b776d67927899d833413e751d892a0ad
|
[] |
no_license
|
Tuliro725/tulirorprogramming
|
cb449cd9d7b3d730c4f278ae458d1e2380414aa3
|
e679f5f582a4f5bc14846d29bb423e3f04f18b59
|
refs/heads/master
| 2021-01-20T20:08:00.463377
| 2016-05-29T15:54:51
| 2016-05-29T15:54:51
| 59,949,934
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,106
|
r
|
cachematrix.R
|
# This created function creates a list containing a function that is able to do the following steps:
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {var1 <- NULL
set <- function(y) {
x <<- y
var1 <<- NULL
}
get <- function() x
setinverse <- function(inverse) var1 <<- inverse
getinverse <- function() var1
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If that is the case, it then gets the result and skips the
# computation. If it has not been computed, it then computes the inverse, sets the value in the cache via
# setinverse function.
cacheSolve <- function(x, ...) {
var1 <- x$getinverse()
if(!is.null(var1)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
var1 <- solve(data)
x$setinverse(var1)
var1
}
|
69d33d04129989717d9196c3a8109df54ce71674
|
5e1dcc20e409a5513796c387065fd1859acc46c9
|
/utils-r/.local/bin/rnewpkg
|
eb2aa93cebc136513cec2b192bb7a541d95af474
|
[] |
no_license
|
ashiklom/dotfiles
|
da2ef5ec85c4bd4e959d7fa20a9a35ae12f8dabe
|
e64c06724827008ca64b7076bfae9d446ba663cc
|
refs/heads/master
| 2023-08-03T23:31:25.499631
| 2023-07-25T19:49:01
| 2023-07-25T19:49:01
| 36,020,040
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 119
|
rnewpkg
|
#!/usr/bin/env Rscript
args <- commandArgs(trailingOnly = FALSE)
devtools::create(path = args)
# vim: set filetype=r:
|
|
c773e1b721c9936da7633f2f1daf1a8068389f38
|
6c785f33479f50f79ab63514c241e0ea95115418
|
/analysis/scripts/results/table_all_models (Conflit de casse (1)).r
|
5f20057b557ce0fae355b155f15577fa3ce29055
|
[] |
no_license
|
DominiqueGravel/ms_probaweb
|
ad3b43c844ec5e1d67bf1abbe473ca70837c470d
|
6d37a7e330043af92f0fe357bf5195fd77d962bc
|
refs/heads/master
| 2020-03-27T16:35:57.598866
| 2018-08-30T19:07:20
| 2018-08-30T19:07:20
| 146,794,173
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,288
|
r
|
table_all_models (Conflit de casse (1)).r
|
##################################################################
# Compute the likelihood for each model and each species pair
# Record the results in a summary table
# Dominique Gravel
##################################################################
rm(list = ls())
# Source the functions
source("./scripts/functions/species_models.r")
source("./scripts/functions/interactions_models.r")
source("./scripts/functions/get_probs.r")
source("./scripts/functions/get_LL.r")
source("./scripts/functions/fit_models.r")
# Load & prepare the data
load("./data/expand_data.Rdata")
load("./data/DF_split.Rdata")
load("./data/pairs.Rdata")
IDi = data$pairs.IDi
IDj = data$pairs.IDj
Si = length(unique(IDi))
Sj = length(unique(IDj))
unique_IDi = unique(IDi)
unique_IDj = unique(IDj)
np = length(DF_split)
type = numeric(np)
# Matrices to store the results
C2_L0 = matrix(nr = np, nc = 2)
C2_L1 = matrix(nr = np, nc = 2)
C2_L2 = matrix(nr = np, nc = 2)
C0_L2 = matrix(nr = np, nc = 2)
C1_L2 = matrix(nr = np, nc = 2)
C3_L2 = matrix(nr = np, nc = 2)
# Loop around all pairs of species
count = 1
for(x in 1:np) {
sub_data = DF_split[[x]]
IDs = as.matrix(sub_data[1,1:2])
test = which(pairs[,2] == as.character(IDs[1]) & pairs[,3]==as.character(IDs[2]) | pairs[,2]==as.character(IDs[2]) & pairs[,3]==as.character(IDs[1]))
if(length(test)!=0) type[x] = unique(as.character(pairs[test,4]))
# Compute the models
models_C2_L0 = fit_models(sub_data, selection = FALSE, funC = C2, funL = L0)
models_C2_L1 = fit_models(sub_data, selection = FALSE, funC = C2, funL = L1)
models_C2_L2 = fit_models(sub_data, selection = FALSE, funC = C2, funL = L2)
models_C0_L2 = fit_models(sub_data, selection = FALSE, funC = C0, funL = L2)
models_C1_L2 = fit_models(sub_data, selection = FALSE, funC = C1, funL = L2)
models_C3_L2 = fit_models(sub_data, selection = FALSE, funC = C3, funL = L2)
# Compute the likelihood
C2_L0[x,] = get_LL(models_C2_L0, sub_data)
C2_L1[x,] = get_LL(models_C2_L1, sub_data)
C2_L2[x,] = get_LL(models_C2_L2, sub_data)
C0_L2[x,] = get_LL(models_C0_L2, sub_data)
C1_L2[x,] = get_LL(models_C1_L2, sub_data)
C3_L2[x,] = get_LL(models_C3_L2, sub_data)
if(count >= 10) {
cat(x,'\n')
count = 1
}
else count = count + 1
}
# Sum by type of interaction
npars_SH = c(
sum(C2_L0[type=="SH",2]),
sum(C2_L1[type=="SH",2]),
sum(C2_L2[type=="SH",2]),
sum(C0_L2[type=="SH",2]),
sum(C1_L2[type=="SH",2]),
sum(C3_L2[type=="SH",2]))
npars_HP = c(
sum(C2_L0[type=="HP",2]),
sum(C2_L1[type=="HP",2]),
sum(C2_L2[type=="HP",2]),
sum(C0_L2[type=="HP",2]),
sum(C1_L2[type=="HP",2]),
sum(C3_L2[type=="HP",2]))
sumLL_SH = c(
sum(C2_L0[type=="SH",1]),
sum(C2_L1[type=="SH",1]),
sum(C2_L2[type=="SH",1]),
sum(C0_L2[type=="SH",1]),
sum(C1_L2[type=="SH",1]),
sum(C3_L2[type=="SH",1]))
sumLL_HP = c(
sum(C2_L0[type=="HP",1]),
sum(C2_L1[type=="HP",1]),
sum(C2_L2[type=="HP",1]),
sum(C0_L2[type=="HP",1]),
sum(C1_L2[type=="HP",1]),
sum(C3_L2[type=="HP",1]))
AIC_SH = -2*sumLL_SH + 2*npars_SH
AIC_HP = -2*sumLL_HP + 2*npars_HP
comparison_SH = cbind(sumLL_SH,npars_SH,AIC_SH)
comparison_HP = cbind(sumLL_HP,npars_HP,AIC_HP)
write.table(comparison_SH,"./tables/Table_model_comparison_SH.txt")
write.table(comparison_HP,"./tables/Table_model_comparison_HP.txt")
|
b872b448772a100b3b0c9494b7942b0ccd2eeb39
|
eed0f06b17a05967f7cb474f9ec0a64da3563be4
|
/trees/celltypes_hclust_hvc_ra_x_gaba_int_sub2_regress_zf_ortho.R
|
3c2328331881c21161a918fd91f5f5e93fa5f6ac
|
[] |
no_license
|
bradleycolquitt/songbird_cells
|
7c4a4b00893eb8324499b84a2e656ecb92d01d6d
|
c5f2513015af8ac6244309bc6c306de11d5fa148
|
refs/heads/master
| 2023-04-15T22:31:26.496099
| 2022-02-10T18:32:30
| 2022-02-10T18:53:30
| 262,400,801
| 2
| 3
| null | 2020-05-12T19:59:28
| 2020-05-08T18:37:10
|
R
|
UTF-8
|
R
| false
| false
| 6,994
|
r
|
celltypes_hclust_hvc_ra_x_gaba_int_sub2_regress_zf_ortho.R
|
library(Seurat)
library(tidyverse)
library(qs)
library(pvclust)
library(future)
library(dendextend)
library(ggdendro)
library(ggraph)
library(circlize)
library(cowplot)
plan(sequential)
source("~/data2/rstudio/birds/utils/scRNA.R")
source("~/data2/rstudio/birds/utils/common_aesthetics.R")
# Parameters ---------------------------------------------------------------
hclust_method = "average"
nsig = 50
sub_dir = sprintf("%s_%s", hclust_method, nsig)
# Directories -------------------------------------------------------------
dir_root = "~/sdd/data2/rstudio/birds/scRNA"
dev_dir = file.path(dir_root, "devin_combined", "finch_cells")
dev_data_dir = file.path(dev_dir, "export")
data_fname= file.path(dev_data_dir, "HVC_RA_X.qs")
## Output dir
dir_out_root = "~/data2/rstudio/birds/scRNA"
dev_out_dir = file.path(dir_out_root, "devin_combined", "songbird_cells")
tree_dir = file.path(dev_out_dir, "trees")
script_name = "celltypes_hclust_hvc_ra_x_gaba_int_sub2_regress_zf_ortho"
tree_dir = file.path(tree_dir, script_name)
tree_sub_dir = file.path(tree_dir, sub_dir)
dir.create(tree_sub_dir, recursive = T)
data_out_obj_fname = file.path(tree_dir, "obj_integrated_subclustered_gaba.qs")
data_out_avg_fname = file.path(tree_dir, "average_expr.qs")
data_out_pv_fname = file.path(tree_sub_dir, "pvclust.qs")
# Load finch data ---------------------------------------------------------
x_key = data.frame(cluster_orig = seq(0,22),
label = c("MSN (1)", "MSN (2)", "MSN (3)", "MSN (4)", "MSN (5)",
"Astro (6)", "Astro (7)",
"GLUT (8)",
"Oligo (9)",
"PN (10)", "GLUT (11)", "Unk PPP1R1B (12)", "GABA SST (13)", "GABA PVALB (14)", "GABA PVALB (15)", "Micro (16)",
"GABA (17)", "Endo (18)", "GABA (19)", "OPC (20)", "GABA (21)", "GABA (22)", "GABA CHAT (23)"))
x_key_to_use = x_key %>% filter(grepl("MSN|GABA|PN", label))
redo = F
if (redo) {
#load(data_fname)
#obj_int = HVCX_seurat_REGION_res1.1
obj_int = qread(data_fname)
cells = Cells(obj_int)[obj_int$cluster_orig %in% x_key_to_use$cluster_orig]
cells2 = Cells(obj_int)[grepl("GABA-[0-9]", obj_int$cluster_orig)]
cells = union(cells, cells2)
obj_int_filt = subset(obj_int, cells=cells)
table(obj_int_filt$cluster_orig)
plan(multiprocess(workers=10))
obj_int_filt = SCTransform(obj_int_filt, vars.to.regress = c("percent.mito", "dataset"))
qsave(obj_int_filt, data_out_obj_fname)
} else {
obj_int_filt = qread(data_out_obj_fname)
}
# Average data ------------------------------------------------------------
#obj_int_filt$region_cluster_orig = paste(obj_int_filt$region, obj_int_filt$cluster_orig, sep="_")
md = FetchData(obj_int_filt, c("cluster_orig")) %>%
distinct(cluster_orig)
md = md %>% mutate(s = map(cluster_orig, function(x) unlist(str_split(x, "-")))) %>%
mutate(s1 = map_chr(s, function(x) {
case_when(length(x) == 1 ~ paste(x, collapse = "-"),
length(x) == 2 ~ paste(x, collapse = "-"),
length(x) == 3 ~ paste(x[1:2], collapse='-'))
})) %>%
mutate(cluster_orig1 = s1)
md_full = FetchData(obj_int_filt, c("cluster_orig")) %>%
rownames_to_column()
md_full = md_full %>% left_join(md) %>%
select(rowname, cluster_orig, cluster_orig1) %>%
column_to_rownames()
obj_int_filt = AddMetaData(obj_int_filt, md_full)
obj_int_filt$region_cluster_orig = paste(obj_int_filt$region, obj_int_filt$cluster_orig1, sep="_")
qsave(obj_int_filt, data_out_obj_fname)
Idents(obj_int_filt) = FetchData(obj_int_filt, "region_cluster_orig")
obj_int_filt_avg = AverageExpression(obj_int_filt, assays = c("SCT", "RNA"), slot="counts")
obj_int_filt_avg1 = log1p(obj_int_filt_avg[["SCT"]])
# Marker ID --------------------------------------------------------------
markers_fname = file.path(tree_dir, sprintf("hvc_ra_x_gaba_rna_markers.rds"))
redo_markers = T
if (redo_markers) {
Idents(obj_int_filt) = obj_int_filt$cluster_orig1
markers = FindAllMarkers(obj_int_filt,
assay="RNA",
test.use = "wilcox",
min.pct = .2, only.pos=T)
saveRDS(markers, markers_fname)
} else {
markers = readRDS(markers_fname)
}
write.table(markers, file.path(tree_dir, "hvc_ra_x_gaba_rna_markers.txt"), quote=F, col.names=T, sep="\t")
# Cluster -----------------------------------------------------------------
nsig = 50
markers_int_top = markers %>%
mutate(sign = avg_logFC>0) %>%
#group_by(cluster, sign) %>%
group_by(cluster) %>%
#top_n(-1 * nsig, p_val_adj) %>%
#top_n(nsig, abs(avg_logFC)) %>%
top_n(nsig, avg_logFC) %>%
distinct(gene, .keep_all=T)
obj_int_avg_filt = obj_int_filt_avg1[markers_int_top$gene,]
pv = pvclust(obj_int_avg_filt, method.hclust=hclust_method, nboot = 100, parallel=4L)
plot(pv)
qsave(pv, data_out_pv_fname)
# dendextend ------------------------------------------------------------------
dend = pv$hclust %>% as.dendrogram %>%
set("branches_k_color", k=5) #%>%
#set("branches_lwd", c(1.5,1,1.5)) %>%
#set("branches_lty", c(1,1,3,1,1,2)) %>%
#set("labels_colors") %>% set("labels_cex", c(.9,1.2)) %>%
#set("nodes_pch", 19) %>% set("nodes_col", c("orange", "black", "plum", NA))
# plot the dend in usual "base" plotting engine:
plot(dend)
# circlize ----------------------------------------------------------------
#plot(pv)
dend = as.dendrogram(pv)
dend = dend %>%
dendextend::set("branches_k_color", value=c( "#2c9f58", "black"), k = 2)
dend_gg = as.ggdend(dend)
dend_seg = dend_gg$segments %>%
mutate(col = if_else(is.na(col), "black", col))
cols = unique(dend_seg$col)
pop_factor = 2
dend_seg = dend_seg %>%
mutate(is_root = y==max(y)) %>%
mutate(y = if_else(col==cols[1] & !is_root, y - pop_factor, y),
yend = if_else(col==cols[1], yend - pop_factor, yend))
dend_seg = dend_seg[-1,]
md = FetchData(obj_int_filt, c("region", "cluster_orig1", "region_cluster_orig")) %>%
distinct(region, cluster_orig1, .keep_all=T) %>%
mutate(region = sub("X", "Area X", region)) %>%
mutate(region_color = position_pretty_colors2[region])
dend_leaves <- dend_gg$labels %>%
rename(region_cluster_orig = label) %>%
left_join(md) %>%
left_join(x_key %>% mutate(cluster_orig1 = as.character(cluster_orig))) %>%
mutate(label = ifelse(is.na(label), cluster_orig1, as.character(label))) %>%
# filter(region!="RA") %>%
mutate(label1 = if_else(grepl("MSN|PN|GABA-1", label), label, ""))
gg = ggplot(data = dend_seg, aes(x=x, y=y, xend=xend, yend=yend, color=col)) +
geom_segment(size=.25, linejoin="mitre") +
geom_text(data=dend_leaves, aes(x=x, y=min(dend_seg$y)-1, label=label1, color=region_color), inherit.aes = F, size=6/2.8) +
scale_y_reverse() +
coord_polar(theta = "x") +
scale_color_identity() +
theme_void()
gg
save_plot(file.path(tree_sub_dir, "hclust_circ.pdf"), gg, base_height=2, base_width=2)
|
6d34ba08018092974c08d35822e474d17db4254b
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/B_analysts_sources_github/diegovalle/Age-Cohort-Violence/male-female.R
|
033b0b7dac07d5544731f8520f1d66636a15bd0f
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 693
|
r
|
male-female.R
|
mf <- read.csv("data/male-female-rates.csv", skip = 6)
mf <- na.omit(mf)
ggplot(mf, aes(1985:2009, Female.Per, color = "Females")) +
geom_line(size = 1.2) +
geom_line(aes(y = Male.Per, color = "Males"),
size = 1.2) +
scale_color_manual("Sex", values = c("pink", "#ADD8E6")) +
xlab("") + ylab("homicide rate as a percentage of that in 1985") +
scale_y_continuous(format = "percent", limits = c(0, 1.2)) +
opts(title = "Homicide rates for males and females in Mexico (1985 = 100%)")
SavePlot("male-female")
#Extrapolate the population from the 1980 Census
na.spline(c(33039307, rep(NA, 9), mf$Male.Population))
na.spline(c(33807526, rep(NA, 9), mf$Female.Population))
|
9c034d0858916f23e461faa32670509ebbc646f8
|
06e626755e1abc5f0e4d51610b63388a2ddb9815
|
/ui.R
|
07787c0b464c3b5e591daf681151fda87c151486
|
[] |
no_license
|
SYAN83/teamGenerator
|
ac334ead3b67866b9b8746b505f6085c38d36601
|
92951693ff75d73a960e2fbfdb09df1e55a0cf09
|
refs/heads/master
| 2021-06-21T15:40:34.032232
| 2017-08-15T02:50:20
| 2017-08-15T02:50:20
| 100,331,707
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,976
|
r
|
ui.R
|
library(DT)
library(shiny)
library(shinyBS)
library(shinydashboard)
actionButtonRow <- function (inputId, label, icon = NULL, width = NULL, ...) {
div(style="display:inline-block",
actionButton(inputId, label, icon = icon, width = width, ...))
}
shinyUI(dashboardPage(
dashboardHeader(title = "TEAMUP"),
dashboardSidebar(
sidebarUserPanel("NYC DSA",
image = NYCICON),
sidebarMenu(
menuItem("Team Generator", tabName = "team_generator", icon = icon("group")),
menuItem("Group Manager", tabName = "group_manager", icon = icon("cog"))
)
),
dashboardBody(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "custom.css")
),
tabItems(
tabItem(tabName = "team_generator",
fluidRow(
box(title = h4("TEAMS"), status = "info",
DT::dataTableOutput("teamUp"),
width = 12)
),
fluidRow(
box(
fluidRow(
column(4,
numericInput("teamSize",
"Team Size",
value = 3,
min = 2, max = ceiling(nrow(studentdf)/2))),
column(4, numericInput("seedNum", "Random Seed (0 for NONE)", value = 0)),
column(4, br(), actionButton("sfl", "Shuffle", icon = icon("random"))))
)
)
),
tabItem(tabName = "group_manager",
fluidRow(
box(title = "Add User", status = "success",
solidHeader = TRUE,
collapsible = TRUE,
collapsed = TRUE,
fluidRow(
column(9,
textInput("nameToAdd", "Name",
placeholder = "Add to group..."),
textInput("urlToAdd", "GitHub Link",
placeholder = "GitHub link...")),
column(3, br(),
actionButton("addUser", "",
icon = icon("user-plus")))
),
width = 4),
box(title = "Edit User", status = "warning",
solidHeader = TRUE,
collapsible = TRUE,
collapsed = TRUE,
fluidRow(
column(9, selectizeInput("userToEdit", "Name",
choice = c("", studentdf$NAME)),
textInput("urlToEdit", "GitHub Link",
placeholder = "GitHub link...")),
column(3, br(),
actionButton("editUser", "",
icon = icon("user-md")))
),
width = 4),
box(title = "Delete User", status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
collapsed = TRUE,
fluidRow(
column(9, selectizeInput("userToDel", "Name",
choice = c("", studentdf$NAME),
multiple = TRUE)),
column(3, br(),
actionButton("delUser", "",
icon = icon("user-times")))
),
width = 4)
),
fluidRow(box(title = "User List", status = "info",
solidHeader = TRUE,
collapsible = TRUE,
bsAlert(inputId = "alert"),
DT::dataTableOutput("table"),
column(4,
actionButtonRow("reloadTbl", "Reload",
icon = icon("repeat")),
actionButtonRow("saveTbl", "Save",
icon = icon("save"))),
width = 12))
)
)
)
))
|
6ae061c39b68985fa867b3f5a81472bfb01a96a8
|
9bf55d06d4c0deff0d753463018a3300e79199d2
|
/BreakPoint.R
|
fef2a7ce0c6fe74e2fea09a0e776cfe78f871745
|
[] |
no_license
|
claschumer/Project-SCV-
|
46e6592e55ddba93f87bb93047c18285b401bd32
|
a872b09a90ce802a2840e05e8815885b968e5a2f
|
refs/heads/main
| 2023-08-29T05:24:47.919641
| 2021-10-27T20:38:52
| 2021-10-27T20:38:52
| 411,320,177
| 1
| 2
| null | 2021-10-15T13:07:16
| 2021-09-28T14:39:09
|
R
|
UTF-8
|
R
| false
| false
| 513
|
r
|
BreakPoint.R
|
tennis2021 <- read.csv("C:/Users/ilanp/Documents/EPFL/Mathématiques/MA master sem 3 automne 2021/Statistical Computation and Visualization/atp_matches_2021.csv")
tennis2019 <- read.csv("C:/Users/ilanp/Documents/EPFL/Mathématiques/MA master sem 3 automne 2021/Statistical Computation and Visualization/tennis_atp-master/atp_matches_2019.csv")
winnerFacedBreakPoint <- tennis2019$w_bpFaced
winnerSavedBreakPoint <- tennis2019$w_bpSaved
winnerRatioSavedBreakPoint <- winnerSavedBreakPoint / winnerFacedBreakPoint
|
df0ab6a8bb0871ba65df46f2fd14d5a8c9d5d828
|
d56b256ad3bbca6d79b9979d5646f52b1d025c0a
|
/NBA_analysis.R
|
40b2aa1d474c1f7aa637b5064e3e407e025a691a
|
[] |
no_license
|
DMaya2/Reproducible-Data-Analysis-Project
|
49c05c920a1358817bc264957dc9b4b0844e39f4
|
42cb4db6fd556af7aa0de6c13e8dd9c50f45fca3
|
refs/heads/main
| 2023-04-19T17:48:08.170758
| 2021-05-09T11:03:19
| 2021-05-09T11:03:19
| 364,214,762
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,042
|
r
|
NBA_analysis.R
|
#Packages
#These are the packages to be used
library(tidyverse)
library(broom)
library(ggcorrplot)
#Datasets
#Reading the datasets into R.
player_salaries <- read_csv("2018-19_nba_player-salaries.csv")
player_stats <- read_csv("2018-19_nba_player-statistics.csv")
team_stats_1 <- read_csv("2018-19_nba_team-statistics_1.csv")
team_stats_2 <- read_csv("2018-19_nba_team-statistics_2.csv")
team_payroll <- read_csv("2019-20_nba_team-payroll.csv")
#Datasets description
#Two datasets are used
#2018-19_nba_player-statistics.csv: This data file provides total statistics for
#Individual NBA players during the 2018-19 season. The variables consist:
#2018-19_nba_player-salaries.csv: This data file contains the salary for
#individual players during the 2018-19 NBA season. The variables consist:
#Data pre-processing
#looking at the two datasets only
head(player_salaries)
tail(player_salaries)
str(player_salaries)
dim(player_salaries)
head(player_stats)
tail(player_stats)
str(player_stats)
dim(player_stats)
#Missing values Handling
player_salaries %>% summarise(sum(is.na(.)))
#The player salaries data has no missing values
player_stats %>% summarise(sum(is.na(.)))
#The player statistics data has 117 missing values
#columns containing missing values
colnames(is.na(player_stats))
#we can see that 3P%, 2P%, FT% are columns with missing values
colSums(is.na(player_stats)) %>% as.data.frame()
#Seeks like these missing values are from players who played less games
#therefore we will drop them
#dropping missing values
player_stats_clean <- player_stats %>% drop_na()
#Data tidying
#Merging player salaries data and player statistics clean data.
player_data <- full_join(player_salaries, player_stats_clean, by = "player_name")
#Duplicates handling
#handling duplicates
player_data$player_name[duplicated(player_data$player_id)] %>% as.data.frame()
unique(player_data$player_name[duplicated(player_data$player_id)]) %>% as.data.frame()
#There are alot of players duplicated probably due to season trading
#Removing duplicates from the data
player_data <- player_data[!duplicated(player_data$player_id), ]
colSums(is.na(player_data))
#we can see that there are missing values introduced after the
#two datasets are merged.
#The missing values are mostly from players with no defined position
#so we will also drop them
player_data_tidy <- player_data %>% drop_na()
#saving the tidy data
write.csv(player_data_tidy, "tidy_data.csv")
#Data exploration
#Salaries distribution
ggplot(data = player_data_tidy, aes(x = salary)) +
geom_histogram( color = "black", fill = "blue") + ggtitle("salary distribution")
#from the salary distribution plot, the histogram is skewed to the right.
#We can see that most of the players salary are less than a million
#salary summary
summary(player_data_tidy$salary)
#The median salary is $3258539
#The mean salary is $6967454
#Does players with higher game points have higher salaries?
#Does playing positions affects player's salary?
#points against salary
ggplot(data = player_data_tidy, aes(x = PTS, y = salary, color = Pos)) +
geom_point() + ggtitle("player points against salary")
#According to this graph, players that have higher points tend to have high salaries
#regardless of the position they player in.
#Bar plot of positions
ggplot(data = player_data_tidy, aes(x = Pos, fill = Pos)) + geom_bar() +
ggtitle("Number of players for each position in the season 2018-19")
#There are a high number of Shooting guard players
#in 2018-19 season compared to the other positions.
#Center position has the least number of players
#Games started by a player against points
#looking if games started by a player increases player's points.
ggplot(data = player_data_tidy, aes(x = GS, y = PTS)) + geom_point() +
ggtitle("Games started by a player against Points") + geom_smooth(method = lm)
#This does not give a proper picture.
#Because there are players that started fewer games but they still have high
#points
#Minutes played
#A player who played more minutes have higher points?
ggplot(data = player_data_tidy, aes(x = MP, y = PTS)) + geom_point() +
ggtitle("Minutes played by a player vs Points") +geom_smooth(method = lm)
#This plot tell us that the more a player is given more minutes in playing,
#that player can produce more points per game but not clear as
#there are players who played moore minutes but still have low points.
#variable correlation
#removing variables that are not numeric for orrelation
cor_data <- player_data_tidy %>% select(-c(player_name, Pos, Tm))
str(cor_data)
round(x = cor(cor_data), digits = 3)
ggcorrplot(cor(cor_data)) +ggtitle("Correlation headmap of the variables")
#We can see that salary is correlated with the other variables but not highly.
#Points though are highly correlated with Turnovers,Steals,Free Throw Attempts,
#Free Throws,2-point Field Goal Attempts,2-Point Field Goals,3-Point Field Goal Attempts,
#3-Point Field Goals,Field Goals,minutes Played,Games Started, Total Rebounds,Defensive Rebounds.
#Player's Age has no affects on points but has on salary.
#we can also see a high correlation among some of the variables.
#Features extraction
#extracting the variables to be used for the modeling
model_data <- player_data_tidy %>%
select(c(player_name,salary,Pos,Age,Tm,G,GS,MP,FG,FGA,"3P","3PA","2P","2PA",FT,FTA,DRB,TRB,TOV,PTS))
#Data Modeling
#testing if players points explains salary
model1 <- lm(salary~PTS, data = model_data)
summary(model1)
tidy(model1, conf.int = TRUE)
#looking at the model summary, player's point does not explain salary well.
#makes a bit of sense since scoring high points are contributed to by other factors.
#forexample, if a player played more minutes, there is a chance he can score more points.
#so points only does not give a clear explanations,
#looking at other variables
#whether players points can be explain by other variables
model_data2 <- player_data_tidy %>%
select(c(Pos,Age,G,GS,MP,FG,FGA,FT,FTA,DRB,TRB,TOV,PTS))
model2 <- lm(PTS~., data = model_data2)
summary(model2)
tidy(model2, conf.int = TRUE)
#By looking at this model, we can see that actually other variables
#for example Minutes played contribute to a player having a high points.
#as this model has achieved an R-squared of 0.9979.
#we can say that if a player has a better statistics, the player
#tend to have high points.
#Players points from highest to lowest.
Rec_data <- player_data_tidy %>%
select(c(player_name,salary,Pos,G,GS,MP,PTS)) %>% arrange(desc(PTS))
#Finding players by positions
#Conditions used to filter players: a player must atleast have played a minimum of 50 games,
#1000 minute played, salary between 1 Million and 10 Million and minimum points of 1000
#Point guard players:
pos_pg <- Rec_data %>% filter(Pos == "PG") %>%
filter(G >=50, MP>=1000, salary>=1000000, salary<=10000000, PTS>=1000)
pos_pg
#Points per minute played (pmp)
pos_pg %>% mutate(pmp = PTS/MP) %>%
arrange(desc(pmp))
#The player to buy for the position of Point Guard is
#D'Angelo Russell: salary 7019698, games 81, games started 81, minute played 2448,
#points 1712 and points per minute played ratio 0.699.
#He could be a value for money player for this position. He has been undervalued
#in the market but with great points to minute played ratio.
#The other alternative player to consider for the position is
#Spencer Dinwiddie, salary 1656092, games 68, game started 4, minute played 1914,
#points 1143 and points per minute played 0.597. He cost less compared to other players
#whose stats are not as good as his.
#Shooting guard
pos_sg <- Rec_data %>% filter(Pos == "SG") %>%
filter(G >=50, MP>=1000, salary>=1000000, salary<=10000000, PTS>=1000)
pos_sg
#Points per minute played (pgmp)
pos_sg%>% mutate(pmp = PTS/MP) %>%
arrange(desc(pmp))
#The good player to buy for the position of shooting guard is,
# Devin Booker: salary 3314365, games 64, games started 64, minute played 2242,
# points 1700 and points to minute played ratio 0.758. He is undervalued
#in the market.
#small forward
pos_sf <- Rec_data %>% filter(Pos == "SF") %>%
filter(G >=50, MP>=1000, salary>=1000000, salary<=10000000, PTS>=1000)
pos_sf
#Points per minute played (pgmp)
pos_sf %>% mutate(pmp = PTS/MP) %>%
arrange(desc(pmp))
#The good player to buy for the position of small forward is,
#Jayson Tatum: salary 6700800, games 79, games started 79, minute played 2455,
#points 1243 and points to minute played ratio 0.506.
#power forward
pos_pf <- Rec_data %>% filter(Pos == "PF") %>%
filter(G >=50, MP>=1000, salary>=1000000, salary<=10000000, PTS>=1000)
pos_pf
#Points per minute played (pgmp)
pos_pf %>% mutate(pmp = PTS/MP) %>%
arrange(desc(pmp))
#player to buy for the position of power forward is,
#Julius Randle: salary 8641000, games 73, games started 49, minute played 2232
#points 1565 and points per minute played ratio 0.701.
#Alternatively we can buy a cheaper option
#John Collins: salary 2299080, games 61, games started 59, minute played 1829 ,
#points 1188 and points per minute played ratio 0.650.
#center
pos_c <- Rec_data %>% filter(Pos == "C") %>%
filter(G >=50, MP>=1000, salary>=1000000, salary<=10000000, PTS>=1000)
pos_c
#Points per minute played (pgmp)
pos_c %>% mutate(pmp = PTS/MP) %>%
arrange(desc(pmp))
#Player to buy for the position of center is
#Karl-Anthony Towns: salary 7839435, games 77, games started 77, minute played 2545,
#points 1880 and points to minute played ratio 0.739.
#Recommendations
#Top Five starting players for Chicago to buy
recomend<- rbind(pos_c, pos_pf)
recomended <- rbind(recomend, pos_pg)
recomended_pl <- rbind(recomended,pos_sf)
recomended_players <- rbind(recomended_pl, pos_sg) %>% mutate(pmp = PTS/MP)
R <- recomended_players[c(1,6,12,19,21),] %>% arrange(desc(pmp))
118000000-sum(R$salary)
#D'Angelo Russell
#Karl-Anthony Towns
#Julius Randle
#Jayson Tatum
#Devin Booker
|
9932456867535175b5d90a7f7f003a24d3c74cce
|
76e9697fe875b50c92a30da09dd918aaef9d1d32
|
/code/intro_survey/evaluate_response.R
|
d7cf3a5f1171c5a0b8f5827b10ea773bc07c082a
|
[
"MIT"
] |
permissive
|
joachim-gassen/sposm
|
17821aef4e6c540c593ce145ca743c3173ebbd45
|
238ee27ecf5c1fa8ae3861b23ebb1fe6835152ce
|
refs/heads/master
| 2022-09-09T17:31:19.481064
| 2022-09-02T16:04:39
| 2022-09-02T16:04:39
| 213,951,838
| 32
| 47
|
MIT
| 2020-01-11T23:36:54
| 2019-10-09T15:15:13
|
R
|
UTF-8
|
R
| false
| false
| 2,326
|
r
|
evaluate_response.R
|
library(tidyverse)
library(DBI)
library(scales)
read_response_from_db <- function() {
con <- dbConnect(RSQLite::SQLite(), path_dbase)
res <- dbSendQuery(con, "SELECT * FROM answers")
df <- dbFetch(res)
dbClearResult(res)
dbDisconnect(con)
df
}
create_languages_reponse_df <- function (df) {
rbind(
df %>%
select(language_1, usability_1, ease_1) %>%
rename(language = language_1, usability = usability_1, ease = ease_1),
df %>%
select(language_2, usability_2, ease_2) %>%
rename(language = language_2, usability = usability_2, ease = ease_2),
df %>%
select(language_3, usability_3, ease_3) %>%
rename(language = language_3, usability = usability_3, ease = ease_3)) %>%
filter(language != "NA") %>%
mutate(
usability = ifelse(usability > 0, usability, NA),
ease = ifelse(ease > 0, ease, NA)
)
}
table_language <- function(lr_df) {
lr_df %>%
group_by(language) %>%
summarise(
nobs = n(),
mean_usability = mean(usability, na.rm = TRUE),
mean_ease = mean(ease, na.rm = TRUE)
) %>%
arrange(-nobs)
}
plot_bar_graph <- function(df, var) {
df[, var] <- factor(df[, var], levels = 1:10)
ggplot(data = df, aes_string(x = var)) +
geom_bar(fill = trr266_yellow) +
scale_x_discrete(drop = FALSE) +
labs(x = sprintf("Assessment for %s \n(1: bad, 10: good)",
str_to_title(var))) +
theme_minimal()
}
plot_bar_graph_language <- function(lr_df, lang_str, var) {
df <- lr_df %>%
filter(language == lang_str) %>%
select(language, !! var)
plot_bar_graph(df, var)
}
plot_know_terms_graph <- function(df) {
df <- raw_response
df %>% select(12:(11 + length(test_items))) %>%
pivot_longer(everything(), names_to = "term", values_to = "know") %>%
group_by(term) %>%
summarise(pct_know = sum(know)/n()) -> df
df$term <- factor(df$term,
levels = tolower(str_replace_all(test_items, "[^[:alnum:]]", "")),
labels = test_items)
ggplot(df, aes(x = term, y = pct_know)) +
geom_col(fill = trr266_yellow) +
labs(x = "Term", y = "Share that knows the term") +
scale_y_continuous(labels = percent) +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust = 1))
}
|
6da499679acec2f2694e703e09743fa84cd27d26
|
5478b961fdc92d5ec80fca9291077a0f2a51ce88
|
/app.R
|
7d60583b354549b39d300edca42f58f4e4471640
|
[] |
no_license
|
pmacnaughton/hai_epimodel
|
7aee1379ee67103f094bd1eeac79a609fef467c6
|
1959c121c0c7b923d6753b5bb4aa5fe2c42a9599
|
refs/heads/master
| 2022-11-23T13:41:38.838846
| 2020-07-27T02:25:33
| 2020-07-27T02:25:33
| 282,774,077
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,195
|
r
|
app.R
|
## Piers MacNaughton - 2020
#VRE Colonization - Six Compartment Deterministic Model
#Based on paper by Wolkewitz et al. 2008
#With additional term for daylight decontamination
library(shiny)
library(gridExtra)
library(tidyverse)
library(deSolve)
library(EpiModel)
library(ggplot2)
## Configuration
theme_set(theme_minimal(base_size = 18))
hai_fun <- function(t, t0, parms) {
with(as.list(c(t0, parms)), {
##main ODEs
#Population sizes
n.p <- c.p + u.p
n.s <- c.s + u.s
n.e <- c.e + u.e
#Contaminated Patients
dc.p <- (lam.u*n.p+(lam.c-lam.u)*c.p)*phi + (beta.sp*c.s/n.s+beta.ep*c.e/n.e)*(n.p-c.p) - lam.c*c.p
#Uncontaminated Patient
du.p <- (lam.u*n.p+(lam.c-lam.u)*c.p)*(1-phi) - lam.u*(n.p-c.p) - (beta.sp*c.s/n.s+beta.ep*c.e/n.e)*(n.p-c.p)
#Contaminated HCW
dc.s <- (beta.ps*c.p/n.p+beta.es*c.e/n.e)*(n.s-c.s) - mu*c.s
#Uncontaminated HCW
du.s <- mu*c.s - (beta.ps*c.p/n.p+beta.es*c.e/n.e)*(n.s-c.s)
#Contaminated Environment
dc.e <- (beta.se*c.s/n.s+beta.pe*c.p/n.p)*(n.e-c.e) - kappa*c.e - alpha*c.e
#Uncontamintated Environment
du.e <- kappa*c.e + alpha*c.e - (beta.se*c.s/n.s+beta.pe*c.p/n.p)*(n.e-c.e)
##output
list(c(dc.p, du.p, dc.s, du.s, dc.e, du.e), n.p=n.p, Prevalence=(c.p/n.p))
})
}
solve_ode <- function(alpha_one, alpha_two){
param <- param.dcm(phi=0.1, lam.u=0.1/24, lam.c=0.05/24, mu=24/24, kappa=1/24, alpha=c(0,alpha_one/24,alpha_two/24),
beta.sp=0.3/24, beta.se=2/24, beta.ps=2/24, beta.pe=2/24, beta.es=2/24, beta.ep=0.3/24)
init <- init.dcm(c.p=1, u.p=19, c.s=0, u.s=5, c.e=0, u.e=100)
control <- control.dcm(nsteps=120*24, new.mod=hai_fun)
mod_hai <- dcm(param, init, control)
mod_output <- as.data.frame(mod_hai)
mod_output$Day <- (mod_output$time+24)/24
mod_output
}
#Plot of Prevalence of VRE Colonization
plot_result <- function(mod_output, alpha_one, alpha_two, max_time) {
solve_ode(alpha_one, alpha_two)
pp <- ggplot(data=mod_output, aes(x=Day, y=Prevalence)) + geom_point(aes(color=as.factor(run))) + xlim(c(0,max_time)) +
scale_color_manual(name="Decontamination Rate", labels=c("None", "Low", "High"), values=c("#000000", "#EBB172", "#EB9234")) +
theme(legend.title = element_text(size=16))
print(pp)
}
ui <- fluidPage(
titlePanel("VRE Transmission in Hospital - Deterministic Compartment Model"),
sidebarLayout(
sidebarPanel(
sliderInput("x_max",
"Max days for the model",
min = 3,
max = 120,
value = 30,
step = 1),
hr(),
br(),
sliderInput("alpha_one",
"Daylight Decontamination Rate (/day) - Low",
min = 0,
max = 1.0,
value = 0.5,
step = 0.1),
sliderInput("alpha_two",
"Daylight Decontamination Rate (/day) - High",
min = 1,
max = 10,
value = 5,
step = 1),
br(),
hr(),
tags$div("Piers MacNaughton, 2020"),
),
mainPanel(
# p("Note: This tool is not intended to create a prediction."),
plotOutput("chart", height = "500px"),
)
)
)
server <- function(input, output) {
res <- reactive({
solve_ode(alpha_one=input$alpha_one, alpha_two=input$alpha_two)
})
output$chart <- renderPlot({
plot_result(res(), input$alpha_one, input$alpha_two, input$x_max)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
35a94490e12a3f957bd5182d294839f91f29ce13
|
05678f03a83ce73472b1473f2d0743c9f015f2b8
|
/tests/testthat/test_event.R
|
19996d68124d7fefd7a785ed2398d0337038eaa5
|
[] |
no_license
|
Breeding-Insight/brapi-r-v2
|
3a7b4168c6d8516eb1128445a2f281d1199668a3
|
5cfa7453947121496780b410661117639f09c7ff
|
refs/heads/main
| 2023-03-14T22:20:29.331935
| 2021-03-17T01:31:11
| 2021-03-17T01:31:11
| 348,535,689
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,064
|
r
|
test_event.R
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test Event")
model.instance <- Event$new()
test_that("additionalInfo", {
# tests for the property `additionalInfo` (map(character))
# Additional arbitrary info
# uncomment below to test the property
#expect_equal(model.instance$`additionalInfo`, "EXPECTED_RESULT")
})
test_that("date", {
# tests for the property `date` (array[character])
# A list of dates when the event occurred MIAPPE V1.1 (DM-68) Event date - Date and time of the event.
# uncomment below to test the property
#expect_equal(model.instance$`date`, "EXPECTED_RESULT")
})
test_that("eventDbId", {
# tests for the property `eventDbId` (character)
# Internal database identifier
# uncomment below to test the property
#expect_equal(model.instance$`eventDbId`, "EXPECTED_RESULT")
})
test_that("eventDescription", {
# tests for the property `eventDescription` (character)
# A detailed, human-readable description of this event MIAPPE V1.1 (DM-67) Event description - Description of the event, including details such as amount applied and possibly duration of the event.
# uncomment below to test the property
#expect_equal(model.instance$`eventDescription`, "EXPECTED_RESULT")
})
test_that("eventParameters", {
# tests for the property `eventParameters` (array[EventEventParameters])
# A list of objects describing additional event parameters. Each of the following accepts a human-readable value or URI
# uncomment below to test the property
#expect_equal(model.instance$`eventParameters`, "EXPECTED_RESULT")
})
test_that("eventType", {
# tests for the property `eventType` (character)
# General category for this event (e.g. Sowing, Watering, Rain). Each eventType should correspond to exactly one eventTypeDbId, if provided. MIAPPE V1.1 (DM-65) Event type - Short name of the event.
# uncomment below to test the property
#expect_equal(model.instance$`eventType`, "EXPECTED_RESULT")
})
test_that("eventTypeDbId", {
# tests for the property `eventTypeDbId` (character)
# An identifier for this event type, in the form of an ontology class reference MIAPPE V1.1 (DM-66) Event accession number - Accession number of the event type in a suitable controlled vocabulary (Crop Ontology).
# uncomment below to test the property
#expect_equal(model.instance$`eventTypeDbId`, "EXPECTED_RESULT")
})
test_that("observationUnitDbIds", {
# tests for the property `observationUnitDbIds` (array[character])
# A list of the affected observation units. If this parameter is not given, it is understood that the event affected all units in the study
# uncomment below to test the property
#expect_equal(model.instance$`observationUnitDbIds`, "EXPECTED_RESULT")
})
test_that("studyDbId", {
# tests for the property `studyDbId` (character)
# The study in which the event occurred
# uncomment below to test the property
#expect_equal(model.instance$`studyDbId`, "EXPECTED_RESULT")
})
|
965898ec1fb83f92e5e19217722404a4d478cc17
|
db8846bf9f77fedf42bfd39f8788f4080904b8ec
|
/bioticInteractions/recruitment/analyses/seed_abund_mod_LT.R
|
6ad688258ec5fa83d4c3a270aacfcd8e134378e4
|
[] |
no_license
|
fja062/FunCaB
|
e3d8dbdc96533bd46f4f4822f80b419707a2b9bd
|
c164c3ec9161ed698cd4e8f801c3759a5c7d826e
|
refs/heads/master
| 2023-01-19T20:52:21.347347
| 2020-11-27T10:18:44
| 2020-11-27T10:18:44
| 75,606,325
| 2
| 0
| null | 2019-07-12T08:19:46
| 2016-12-05T08:54:06
|
R
|
UTF-8
|
R
| false
| false
| 11,886
|
r
|
seed_abund_mod_LT.R
|
#### Load packages and data ####
library("rjags")
library("R2jags")
library("tidyverse")
library("tidybayes")
library("DHARMa")
# library("nimble")
#library("lme4")
#library("broom.mixed")
# data
load(file = "~/OneDrive - University of Bergen/Research/FunCaB/Data/secondary/cleanedAbundData.RData")
# 1)
#### data preparation ####
abdat <- rc_rtcSumAv %>%
filter(Treatment %in% c("Intact", "Gap")) %>%
mutate(stempLevel = as.vector(scale(tempLevel, scale = FALSE, center =TRUE)),
stemp7010 = as.vector(scale(temp7010, scale = FALSE, center =TRUE)),
precipDiv7010 = precip7010/1000,
sprecip7010 = as.vector(scale(precipDiv7010, scale = FALSE, center =TRUE)),
Treatment = factor(Treatment, levels = c("Intact", "Gap")),
monthN = factor(monthN, levels = c("spr", "aut")),
precipDiv = precipLevel/1000,
sprecipLevel = as.vector(scale(precipDiv, scale = FALSE, center = TRUE)),
soilTs = as.vector(scale(soilT, scale = FALSE, center = TRUE)),
soilMs = as.vector(scale(soilM, scale = FALSE, center = TRUE)),
) %>%
filter(!is.na(soilM),
!is.na(soilT))
#MASS::fitdistr(abdat %>% pull(seed), densfun = "negative binomial")
#### Non-bayesian analysis ####
nbGlmerAb <- abdat %>%
glmer.nb(sqrt(seed) ~ monthN + Treatment + soilTs + I(soilTs^2) + soilMs + I(soilMs^2) + soilTs:Treatment + I(soilTs^2):Treatment + I(soilMs^2):Treatment + soilMs:Treatment + (1|siteID), data = ., family = "gamma")
simOut <- simulateResiduals(fittedModel = nbGlmerAb, n = 250)
plot(simOut)
testResiduals(simOut)
testZeroInflation(simOut)
summary(nbGlmerAb)
nbGlmerAb %>%
tidy() %>%
mutate(lower = (estimate - std.error*1.96),
upper = (estimate + std.error*1.96))%>%
ggplot(aes(x = estimate, y = term, xmin = lower, xmax = upper)) +
geom_errorbarh() +
geom_point() +
geom_vline(xintercept = 0)
#### Bayesian analysis ####
# i) set up a model matrix to feed directly into the model. This avoids potential coding errors. -1 removes the intercept, which I set separately so it can be drawn from a normal distribution.
matab.t <- model.matrix(~ monthN + Treatment + tAnom + pAnom + tAnom:Treatment + pAnom:Treatment + monthN:pAnom:Treatment + monthN:tAnom:Treatment + stemp7010 + sprecip7010, data = abdat)[,-1]
##########~~~~~~~~ interactions with seasonality???
# fake data for predictions
abdatY <- crossing(Treatment = unique(abdat$Treatment), # rep is slowest on inside
monthN = unique(abdat$monthN),
temp7010 = unique(abdat$tempLevel),
precip7010 = quantile(abdat$precipDiv7010, prob = c(0.4, 0.5)),
pAnom = seq(min(abdat$pAnom), max(abdat$pAnom), length = 50),
tAnom = quantile(abdat$tAnom, prob = c(0.25, 0.75))) %>%
mutate(stemp7010 = as.vector(scale(temp7010, scale = FALSE, center =TRUE))
)
abdatY
# model matrix for fake data predictions
matab.tY <- model.matrix(~ monthN + Treatment + tAnom + pAnom + tAnom:Treatment + pAnom:Treatment + monthN:pAnom:Treatment + monthN:tAnom:Treatment + stemp7010 + precip7010, data = abdatY)
# remove intercept
matab.tY <- matab.tY[,-1]
# ii) model
cat("model {
# Likelihood
for (i in 1:n.dat) {
y[i] ~ dnegbin(p[i], r)
p[i] <- r / (r + mu[i])
log(mu[i]) <- beta.intercept + inprod(beta, matX[i, ]) + beta.site[siteID[i]]
# predictions for model validation, using original data
yPred[i] ~ dnegbin(p[i], r)
}
# derived predictions
for (k in 1:n.datY){
#Pred[k] ~ dnegbin(pPred[k], r) # new data for each MCMC iteration
pPred[k] <- r / (r + muPred[k])
log(muPred[k]) <- beta.intercept + inprod(beta, matY[k, ])
}
# Priors
r ~ dgamma(0.01, 0.01) # prior for the precision of the survival probability
beta.intercept ~ dnorm(0, 0.001) # intercept prior
for (b in 1:nEff) {
beta[b] ~ dnorm(0, 0.001) # priors for the betas
}
# priors random effects
randTau ~ dgamma(0.01, 0.01)
for (m in 1:n.site) {
beta.site[m] ~ dnorm(0, randTau)
}
}
", fill = TRUE, file = "~/Documents/FunCaB/analyses/seedAbund_tAnom.txt")
# specify the parameters to watch
paraNames.ab <- c("beta.intercept", "beta", "beta.site", "r", "yPred", "muPred", "mu")
# iii) Set up a list that contains all the necessary data
n.treat <- nlevels(factor(abdat$Treatment))
n.season <- nlevels(factor(abdat$monthN))
abDat <- list(y = abdat$seed,
n.dat = nrow(abdat),
n.datY = nrow(abdatY),
matX = matab.t,
matY = matab.tY,
nEff = ncol(matab.t),
siteID = as.numeric(factor(abdat$siteID)),
n.site = nlevels(factor(abdat$siteID)))
# iv) Compile the model and run the MCMC for an adaptation/burn-in phase and sample from the posteriors
AbundtAnom.mod <- jags(
model.file = "~/Documents/FunCaB/analyses/seedAbund_tAnom.txt",
data = abDat,
n.iter = 20000,
n.chains = 4,
parameters.to.save = paraNames.ab,
progress.bar = "text"
)
# vi) diagnostics
# create variables for model checking
simulations.abT <- AbundtAnom.mod$BUGSoutput$sims.list$yPred
predictions.abT <- apply(AbundtAnom.mod$BUGSoutput$sims.list$mu, 2, median)
jagsModabT.meanlist <- AbundtAnom.mod$BUGSoutput$mean
jagsModabT.paramlist <- AbundtAnom.mod$BUGSoutput$sims.list
drawsAB <- AbundtAnom.mod$BUGSoutput$sims.list$pPred %>% as.data.frame()
dim(simulations.abT)
# extract names from matrix
rNames.t <- colnames(matab.t) %>%
enframe(name = "i", value = "term") %>%
mutate(i = paste0("beta[",i,"]"))
sim.abT <- createDHARMa(
simulatedResponse = t(simulations.abT),
observedResponse = abdat$seed,
fittedPredictedResponse = predictions.abT,
integerResponse = TRUE
)
# check model fit
plot(sim.abT) # looks ok, slightly downturned
testResiduals(sim.abT) # looks good, no outliers, no dispersion problems
testZeroInflation(sim.abT) # no zero-inflation problems
plot(AbundtAnom.mod) # I think this looks alright...
testTemporalAutocorrelation(sim.abT)
testSpatialAutocorrelation(sim.abT)
traceplot(AbundtAnom.mod, match.head = TRUE, varname = "beta", mfrow = c(3,3))
traceplot(AbundtAnom.mod, match.head = TRUE, varname = "r")
#AbundtAnom.mod$BUGSoutput$sims.list$beta.intercept %>% as_data_frame() %>% as_tibble() %$% acf(V1)
source(file = "~/Documents/FunCaB/figures/plotting_dim.R")
# coefficients plot
modCoefPlot <- AbundtAnom.mod$BUGSoutput$summary %>%
as.data.frame() %>%
as_tibble(rownames = "term") %>%
filter(grepl("beta\\[", term)) %>%
full_join(rNames.t, by = c(term = "i")) %>%
mutate(term = if_else(!is.na(term.y), term.y, term)) %>%
select(-term.y) %>%
mutate(term = case_when(
term == "stemp7010" ~ "t",
term == "sprecip7010" ~ "P",
term == "TreatmentGap" ~ "Gap",
term == "monthNaut" ~ "Autumn",
term == "tAnom" ~ "t∆",
term == "pAnom" ~ "SM∆",
term == "monthNaut:TreatmentGap:tAnom" ~ "Autumn:Gap:t∆",
term == "monthNaut:TreatmentGap:pAnom" ~ "Autumn:Gap:SM∆",
term == "monthNaut:TreatmentIntact:tAnom" ~ "Autumn:Intact:t∆",
term == "monthNaut:TreatmentIntact:pAnom" ~ "Autumn:Intact:SM∆",
term == "TreatmentGap:tAnom" ~ "Gap:t∆",
term == "TreatmentGap:pAnom" ~ "Gap:SM∆"
),
term = factor(term, levels = rev(c("Gap", "t∆", "Gap:t∆", "SM∆", "Gap:SM∆", "Autumn","Autumn:Gap:t∆", "Autumn:Gap:SM∆", "Autumn:Intact:t∆", "Autumn:Intact:SM∆", "t", "P"))))
modCoefPlot %>% ggplot(aes(x = mean, y = term)) +
geom_vline(xintercept = 0, colour = "grey50", size = 0.4) +
geom_pointintervalh(aes(xmin = `2.5%`, xmax = `97.5%`), size = 0.4) +
geom_pointintervalh(aes(xmin = `25%`, xmax = `75%`), size = 4, ) +
geom_errorbarh(aes(xmin = `2.5%`, xmax = `97.5%`), height = 0.4) +
geom_hline(yintercept = c(3.5, 5.5, 7.5), colour = "grey80", size = 0.4) +
xlab("Effect size") +
theme(axis.title.y = element_blank()) +
axis.dimLarge +
theme_cowplot()
ggsave(filename = "~/OneDrive - University of Bergen/Research/FunCaB/paper 4/figures/fig7.jpg", dpi = 300, height = 5, width = 5)
# predictions plot II
AbundtAnom.mod$BUGSoutput$summary %>%
as.data.frame() %>%
as_tibble(rownames = "term") %>%
filter(grepl("muPred", term)) %>%
bind_cols(abdatY) %>%
mutate(stemp7010 = as.character(round(stemp7010, digits = 1)),
tempLevel = temp7010) %>%
filter(precip7010 > 1.34,
tAnom > 0) %>%
ggplot(aes(x = pAnom, y = mean)) +
geom_vline(xintercept = 0, colour = "grey80", linetype = "dashed") +
geom_hline(yintercept = 0, colour = "grey80", linetype = "dashed") +
geom_point(data = abdat, aes(y = seed, x = pAnom, colour = Treatment), shape = 21, alpha = 0.4) +
geom_ribbon(alpha = 0.2, aes(ymax = `97.5%`, ymin = `2.5%`, fill = Treatment)) +
geom_line(aes(colour = Treatment)) +
facet_grid(monthN~tempLevel) +
scale_color_manual(values = c("grey60", "Black")) +
scale_fill_manual(values = c("grey60", "Black")) +
labs(y = "seedling abundance") +
theme_classic() +
axis.dimLarge
ggsave(filename = "~/OneDrive - University of Bergen/Research/FunCaB/paper 4/figures/fig10.jpg", dpi = 300, height = 5.5, width = 9)
abdat %>% ggplot(aes(x = soilT, y = tAnom, size = seed, colour =factor(year))) +
geom_hline(yintercept = 0, colour = "grey50") +
geom_point(alpha = 0.2) +
facet_grid(Treatment~tempLevel) +
theme_classic() +
labs(x = "Air temperature at 2 m (ºC)") +
axis.dimLarge
abdat %>% ggplot(aes(x = soilM, y = pAnom, size = seed, colour =factor(year))) +
geom_hline(yintercept = 0, colour = "grey50") +
geom_point(alpha = 0.2) +
facet_grid(Treatment~precipLevel) +
theme_classic() +
axis.dimLarge
# make plots
precipdat <- abdat %>% distinct(precip7010, sprecip7010, precipLevel, precipLevelPlot) %>% as_tibble()
drawsNew <- add_draws(data = abdatY, draws = drawsAB) %>%
ungroup() %>%
filter(.draw < 200) %>%
left_join(precipdat)
drawsNew %>%
#median_qi(.width = c(0.5, 0.7, 0.975)) %>%
ggplot(aes(x = pAnom, y = .value, colour = ordered(Treatment))) +
geom_line(aes(group = paste(monthN, .draw)), alpha = .1) +
#geom_point(data = abdat) +
scale_color_brewer(palette = "Dark2") +
scale_fill_brewer(palette = "Dark2") +
facet_grid(precipLevelPlot~monthN)
# soil moisture deviation
abdat %>%
mutate(monthN = case_when(
monthN == "spr" ~ "early",
monthN == "aut" ~ "late"
)) %>%
ggplot(aes(x = pAnom, y = seed, colour = Treatment)) +
geom_vline(xintercept = 0, colour = "grey70") +
geom_hline(yintercept = 0, colour = "grey70") +
geom_point(shape = 21, size = 3) +
geom_smooth(method = "lm") +
facet_grid(.~monthN) +
scale_color_manual(values = c("Black", "grey60")) +
labs(x = "soil moisture deviation from 2009-2018 mean",
y = "seedling number") +
theme_classic() +
axis.dim +
theme(legend.title = element_blank())
ggsave(filename = "~/OneDrive - University of Bergen/Research/FunCaB/paper 4/figures/fig9.jpg", dpi = 300, width = 7.5, height = 4.5)
# temperature deviation
abdat %>%
mutate(monthN = case_when(
monthN == "spr" ~ "early",
monthN == "aut" ~ "late"
)) %>%
ggplot(aes(x = tAnom, y = seed, colour = Treatment)) +
geom_vline(xintercept = 0, colour = "grey70") +
geom_hline(yintercept = 0, colour = "grey70") +
geom_point(shape = 21, size = 3) +
geom_smooth(method = "lm") +
facet_grid(.~monthN) +
scale_color_manual(values = c("Black", "grey60")) +
labs(x = "soil moisture deviation from 2009-2018 mean",
y = "seedling number") +
theme_classic() +
axis.dim +
theme(legend.title = element_blank())
ggsave(filename = "~/OneDrive - University of Bergen/Research/FunCaB/paper 4/figures/fig9b.jpg", dpi = 300, width = 7.5, height = 4.5)
|
66b976d61a87aac4ef96219b71db60d773156dc0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NHLData/examples/Sch6263.Rd.R
|
b8b236aa5c4b1a78309203a78aa0184eb5e688b1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 237
|
r
|
Sch6263.Rd.R
|
library(NHLData)
### Name: Sch6263
### Title: 1962-1963 Season Results
### Aliases: Sch6263
### Keywords: datasets
### ** Examples
data(Sch6263)
## This command will show the results for the first game of the season.
Sch6263[1,]
|
a4fbe66f657cf070ce28b9dc697ebac5a44d61b5
|
eb6fad9bee922702d9857bab56ea818126145806
|
/R/HulC.R
|
9a0705f91137fb4ed1efc2fdc2ddeb469e3ed117
|
[] |
no_license
|
CodingMyLife/HulC
|
ee8530eea7d8f20479b45dedfee3c5789944671d
|
20ceb936057d52d438907531db5eaf147537e745
|
refs/heads/main
| 2023-05-31T07:08:18.395157
| 2021-06-20T15:45:52
| 2021-06-20T15:45:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,605
|
r
|
HulC.R
|
source("auxiliary_functions.R")
## HulC1d() uses asymptotic median bias value to construct
## convex hull confidence interval for a univariate
## parameter. This is Algorithm 1 of the paper.
## data is a data frame.
## estimate is a function that takes a data frame as input
## and returns a one-dimensional estimate.
## alpha is the level.
## Delta is the median bias of the estimate().
## randomize is a logical. If TRUE then the number of splits
## is randomized. If FALSE, then the larger number of
## splits is used.
HulC1d <- function(data, estimate, alpha = 0.05, Delta = 0, randomize = TRUE){
data <- as.matrix(data)
nn <- nrow(data)
data <- data[sample(nn),,drop=FALSE]
B1 <- solve_for_B(alpha = alpha, Delta = Delta, t = 0)
B <- B1
if(randomize){
p1 <- (1/2 + Delta)^B1 + (1/2 - Delta)^B1
B0 <- B1 - 1
p0 <- (1/2 + Delta)^B0 + (1/2 - Delta)^B0
U <- runif(1)
tau <- (alpha - p1)/(p0 - p1)
B <- B0*(U <= tau)+ B1*(U > tau)
}
if(B > nn){
print(paste0("Delta = ", Delta, ", No. of splits = ", B, ", Sample size = ", nn))
stop("Error: not enough samples for splitting!")
}
ci_est <- rep(0, B)
TMP <- split(1:nn, sort((1:nn)%%B))
for(idx in 1:B){
ci_est[idx] <- estimate(data[TMP[[idx]],,drop=FALSE])
}
CI <- range(ci_est)
names(CI) <- c("lwr", "upr")
ret <- list(CI = CI, median.bias = Delta, B = B)
return(ret)
}
## HulC() uses asymptotic median bias value to construct
## rectangular hull confidence region. This is Algorithm 1
## of the paper with union bound.
## data is a data frame.
## estimate is a function that takes a data frame as input
## and returns a one-dimensional estimate. If multivariate,
## union will be used to obtain the confidence region.
## alpha is the level.
## Delta is the median bias of the estimate(). It can be a
## vector. If a scalar is given, then it will repeated to
## form a vector of same length as dim.
## dim is the dimension of the output of estimate().
## randomize is a logical. If TRUE then the number of splits
## is randomized. If FALSE, then the larger number of
## splits is used.
HulC <- function(data, estimate, alpha = 0.05, Delta = 0, dim = 1, randomize = TRUE){
data <- as.matrix(data)
if(length(Delta) == 1){
Delta <- Delta*rep(1, dim)
}
CI <- matrix(0, nrow = dim, ncol = 2)
B <- rep(0, dim)
colnames(CI) <- c("lwr", "upr")
for(idx in 1:dim){
foo <- function(dat){
estimate(dat)[idx]
}
tryCatch(
hulc_idx <- HulC1d(data, foo, alpha = alpha/dim, Delta = Delta[idx], randomize = randomize),
error = function(e){
hulc_idx <- list(CI = c(NA, NA), B = NA)
}
)
CI[idx,] <- hulc_idx$CI
B[idx] <- hulc_idx$B
}
ret <- list(CI = CI, median.bias = Delta, B = B)
return(ret)
}
## Adaptive_HulC() estimates the median bias of the estimator
## and construct the rectangular confidence region. This is
## Algorithm 2 of the paper with union bound.
## data is a data frame.
## estimate is a function that takes a data frame as input
## and returns a one-dimensional estimate. If multivariate,
## union will be used to obtain the confidence region.
## alpha is the level.
## dim is the dimension of the output of estimate().
## subsamp_exp is the exponent of sample size
## nsub is the number of subsamples.
## randomize is a logical. If TRUE then the number of splits
## is randomized. If FALSE, then the larger number of
## splits is used.
adaptive_HulC <- function(data, estimate, alpha = 0.05, dim = 1, subsamp_exp = 2/3, nsub = 1000, randomize = TRUE){
data <- as.matrix(data)
CI <- matrix(0, nrow = dim, ncol = 2)
colnames(CI) <- c("lwr", "upr")
B <- rep(0, dim)
Delta <- rep(0, dim)
for(idx in 1:dim){
foo <- function(dat){
estimate(dat)[idx]
}
Delta[idx] <- subsamp_median_bias(data, foo, subsamp_exp = subsamp_exp, nsub = nsub)
tryCatch(
hulc_idx <- HulC1d(data, foo, alpha = alpha/dim, Delta = Delta[idx], randomize = randomize),
error = function(e){
hulc_idx <- list(CI = c(NA, NA), B = NA)
}
)
CI[idx,] <- hulc_idx$CI
B[idx] <- hulc_idx$B
}
ret <- list(CI = CI, median.bias = Delta, B = B)
return(ret)
}
## unimodal_HulC1d() uses asymptotic median bias and unimodality to
## construct an inflated convex hull confidence interval for a
## univariate parameter. This is Algorithm 3 of the paper.
## data is a data frame.
## estimate is a function that takes a data frame as input and returns
## a one-dimensional estimate.
## alpha is the level.
## Delta is the asymptotic median bias of the estimate().
## t is the inflation factor.
## randomize is a logical. If TRUE then the number of splits
## is randomized. If FALSE, then the larger number of
## splits is used.
unimodal_HulC1d <- function(data, estimate, alpha = 0.05, Delta = 1/2, t = 0.1, randomize = TRUE){
data <- as.matrix(data)
nn <- nrow(data)
data <- data[sample(nn),,drop=FALSE]
B1 <- solve_for_B(alpha = alpha, Delta = Delta, t = t)
B <- B1
if(randomize){
p1 <- ((1/2 - Delta)^B1 + (1/2 + Delta)^B1)*(1 + t)^(-B1 + 1)
B0 <- B1 - 1
p0 <- ((1/2 - Delta)^B0 + (1/2 + Delta)^B0)*(1 + t)^(-B0 + 1)
U <- runif(1)
tau <- (alpha - p1)/(p0 - p1)
B <- B0*(U <= tau)+ B1*(U > tau)
}
if(B > nn){
print(paste0("Delta = ", Delta, ", No. of splits = ", B, ", Sample size = ", nn))
stop("Error: not enough samples for splitting!")
}
ci_est <- rep(0, B)
TMP <- split(1:nn, sort((1:nn)%%B))
for(idx in 1:B){
ci_est[idx] <- estimate(data[TMP[[idx]],,drop=FALSE])
}
CI <- range(ci_est)
CI <- CI + t*diff(CI)*c(-1, 1)
names(CI) <- c("lwr", "upr")
ret <- list(CI = CI, median.bias = Delta, B = B)
return(ret)
}
## unimodal_HulC() uses asymptotic median bias and unimodality to
## construct an inflated rectangular hull confidence region for a
## multivariate parameter. This is Algorithm 3 of the paper with
## union bound.
## data is a data frame.
## estimate is a function that takes a data frame as input and returns
## a one-dimensional estimate. If multivariate, union bound is used.
## alpha is the level.
## Delta is the asymptotic median bias of the estimate(). It is allowed
## to be a vector. If a scalar is given, then it will repeated to
## form a vector of same length as dim.
## t is the inflation factor.
## randomize is a logical. If TRUE then the number of splits
## is randomized. If FALSE, then the larger number of
## splits is used.
unimodal_HulC <- function(data, estimate, alpha = 0.05, Delta = 1/2, t = 0.1, dim = 1, randomize = TRUE){
data <- as.matrix(data)
if(length(Delta) == 1){
Delta <- Delta*rep(1, dim)
}
CI <- matrix(0, nrow = dim, ncol = 2)
colnames(CI) <- c("lwr", "upr")
B <- rep(0, dim)
for(idx in 1:dim){
foo <- function(dat){
estimate(dat)[idx]
}
tryCatch(
hulc_idx <- unimodal_HulC1d(data, foo, alpha = alpha/dim, Delta = Delta[idx], t = t, randomize = randomize),
error = function(e){
hulc_idx <- list(CI = c(NA, NA), B = NA)
}
)
CI[idx,] <- hulc_idx$CI
B[idx] <- hulc_idx$B
}
ret <- list(CI = CI, median.bias = Delta, B = B)
return(ret)
}
## adaptive_unimodal_HulC() uses estimated median bias and unimodality to
## construct an inflated rectangular hull confidence region for a
## multivariate parameter. This is Algorithm 3 of the paper with
## subsample estimate of median bias and union bound.
## data is a data frame.
## estimate is a function that takes a data frame as input and returns
## a one-dimensional estimate. If multivariate, union bound is used.
## alpha is the level.
## t is the inflation factor.
## dim is the dimension of the output of estimate().
## subsamp_exp is the exponent of sample size
## nsub is the number of subsamples.
## randomize is a logical. If TRUE then the number of splits
## is randomized. If FALSE, then the larger number of
## splits is used.
adaptive_unimodal_HulC <- function(data, estimate, alpha = 0.05, t = 0.1, dim = 1, subsamp_exp = 2/3, nsub = 1000, randomize = TRUE){
data <- as.matrix(data)
CI <- matrix(0, nrow = dim, ncol = 2)
colnames(CI) <- c("lwr", "upr")
Delta <- rep(0, dim)
B <- rep(0, dim)
for(idx in 1:dim){
foo <- function(dat){
estimate(dat)[idx]
}
Delta[idx] <- subsamp_median_bias(data, foo, subsamp_exp = subsamp_exp, nsub = nsub)
tryCatch(
hulc_idx <- unimodal_HulC1d(data, foo, alpha = alpha/dim, Delta = Delta[idx], t = t, randomize = randomize),
error = function(e){
hulc_idx <- list(CI = c(NA, NA), B = NA)
}
)
CI[idx,] <- hulc_idx$CI
B[idx] <- hulc_idx$B
}
ret <- list(CI = CI, median.bias = Delta, B = B)
return(ret)
}
|
578c4266892205ff8db41c06cc8968f70c76c6cd
|
e25265fc84148575a244882e07f2eafd3a3ef06f
|
/primeirospassos.R
|
1b84c5e2332e96ebc9003a6df7571804fdb10dad
|
[
"MIT"
] |
permissive
|
Lucianea/blog_Luciane
|
0261ac8f5236a0bcfee16fcaaefde276fd9ea6f0
|
a25d9db0d387af0b6eb1ff7740617b4194767141
|
refs/heads/master
| 2020-04-02T17:41:45.111432
| 2018-11-01T15:15:57
| 2018-11-01T15:15:57
| 154,668,256
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 552
|
r
|
primeirospassos.R
|
#Passos após criar o repositorio no github
#e abrir um novo projeto no rstudio
#1
devtools::install_github("rstudio/blogdown")
#2
blogdown::install_hugo()
#3
blogdown::new_site(theme = "gcushen/hugo-academic")
#outro tema
#jchatkinson/HugoMDL
blogdown::new_site(theme = "djuelg/Shapez-Theme")
#criar no rstudio
blogdown::serve_site()
#Ajuste na primeira pagina
#ver hero.md (textos, imagem etc)
#imagem de hero.md fica em static/img/headers
#icon não achei onde fica as opcoes, usei simplesmente uma palavra
#A estrutura esta em content
|
cf5b1a564a09b1921c1cb5a6303ee97aaf07b49a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pgdraw/examples/pgdraw.moments.Rd.R
|
f9bff4c3db535c42ded770ecff2bf453ed2bfb65
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 431
|
r
|
pgdraw.moments.Rd.R
|
library(pgdraw)
### Name: pgdraw.moments
### Title: Compute exact first and second moments for the Polya-Gamma
### distribution, PG(b, c)
### Aliases: pgdraw.moments
### ** Examples
# -----------------------------------------------------------------
# Example: Simulated vs exact moments
u = matrix(1,1e6,1)
x = pgdraw(1,0.5*u)
mean(x)
var(x)
pgdraw.moments(1,0.5)
x = pgdraw(2,2*u)
mean(x)
var(x)
pgdraw.moments(2,2)
|
8baa481d010f5ac8739a35c4a2edad90ef5d4fdc
|
ef01b6aae7a1ac7a3e5422090d97889e3c5c47a8
|
/src/dotplot.R
|
02c25401154fd6f9f296b28cffca46d6962d40d4
|
[] |
no_license
|
chitrita/single_cell_analysis
|
26241f606c954491aed71cbdc4e740ff46f5be4e
|
d09397a6795e9ff2c4cb89ed5d935f0f229d853d
|
refs/heads/master
| 2020-04-10T22:43:22.282724
| 2017-02-05T02:37:10
| 2017-02-05T02:37:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,036
|
r
|
dotplot.R
|
# The follow code is taken from the Github Repo BipolarCell2016 written by Karthik Shekhar
# All credit goes to Karthik
# For the original code please see
# https://github.com/broadinstitute/BipolarCell2016/blob/master/class.R
library(ggplot2)
library(reshape)
dot.plot <- function(seurat.obj,
features.use,
group.use=NULL,
subset.group=NULL,
thresh.use=0,
max.val.perc=NULL,
max.val.exp=NULL,
max.size=10,
min.perc=0, ...) {
# Only use the provided names found in the seurat object
features.use=features.use[features.use %in% rownames(seurat.obj@data)]
# If groups or names are not used default to seurat@ident
if (is.null(group.use)) group.use = "ident"
# Set the group labels
group.labels = seurat.obj@ident
names(group.labels) = names(seurat.obj@ident)
if (group.use != "ident"){
if (group.use %in% colnames(seurat.obj@data.info)){
group.labels = seurat.obj@data.info[[group.use]]
names(group.labels) = rownames(seurat.obj@data.info)
} else {
print("Could not find the group in the seurat object. Please check your group.use value.")
return("Error, did not complete. Code 1")
}
}
# Initialize matrix of percent expressing cells
PercMat = matrix(0, nrow=length(features.use), ncol = 0)
rownames(PercMat) = features.use;
# Initialize matrix of average transcript levels
ExpMat = PercMat;
# Get count mat
Count.mat = seurat.obj@raw.data[features.use, colnames(seurat.obj@data)]
# Define the cell groupings
groupings = unique(group.labels)
if(!is.null(subset.group)){
additional.labels = setdiff(subset.group,group.labels)
if(length(additional.labels) > 0){
print("Some of the groups in subset.group were not in grouping found for group.use.")
print(paste(additional.labels, collapse=","))
print("Please check your group.use and subset.group values.")
return("Error, did not complete. Code 2")
}
groupings = subset.group
}
# In each group, for each gene find:
# The percent expressed over a certain threshold
# The expectation which is the mean of the non-zero values
for (label in groupings){
cells.in.cluster = names(group.labels[group.labels == label])
vec.exp = apply(seurat.obj@data[features.use, cells.in.cluster], 1,
function(x) sum(x>thresh.use)/length(x))
PercMat = cbind(PercMat,vec.exp)
vec.exp = apply(Count.mat[features.use, cells.in.cluster], 1,
function(x) if (sum(x>0) > 1){ mean(x[x>0]) } else {sum(x)})
ExpMat = cbind(ExpMat, vec.exp)
}
# Set the column names as the groups/labels
colnames(ExpMat) = groupings
colnames(PercMat) = groupings
# Use genes that have one entry that meets a min precent threshold
rows.use = rownames(PercMat)[apply(PercMat, 1, function(x) max(x) >= min.perc)];
PercMat = PercMat[rows.use,]
ExpMat = ExpMat[rows.use,]
features.use = rows.use
if (!is.null(max.val.perc)) PercMat[PercMat > max.val.perc] = max.val.perc
if (!is.null(max.val.exp)) ExpMat[ExpMat > max.val.exp] = max.val.exp
# Prep for ggplots
ExpVal = melt(ExpMat)
PercVal = melt(PercMat)
colnames(ExpVal) = c("gene","cluster","nTrans")
ExpVal$percExp = PercVal$value*100
# Plot
ExpVal$gene = factor(ExpVal$gene, levels=features.use)
ExpVal$cluster = factor(ExpVal$cluster, levels=rev(groupings))
p = ggplot(ExpVal,
aes(y = factor(cluster),
x = factor(gene))) +
geom_point(aes(colour=nTrans, size=percExp)) +
scale_color_gradient(low="blue", high="red", limits=c( 1, max(ExpVal$nTrans))) +
scale_size(range=c(0, max.size)) +
theme_bw() +
theme(panel.grid.major=element_blank(), panel.grid.minor=element_blank())
p = p +
ylab("Cluster") +
xlab("Gene") +
theme(axis.text.x=element_text(size=12, face="italic", angle=45, hjust=1)) +
theme(axis.text.y=element_text(size=12, face="italic"))
print(p)
}
|
08c0cc8af2ac92f8bd9db5471046a2baae3f9cb1
|
279277403782c464d08a388e114fa28a41c2bb7c
|
/man/PAYF.Rd
|
7aa7ae0c1d452e718f38cf83d74fb28b7cb60aed
|
[] |
no_license
|
nenaoana/SetMethods
|
44be0a8331e51faaff72bd67ad05e1133520646b
|
eae2232c57c8db822c3cc6c0741d23b16249f0dc
|
refs/heads/master
| 2023-04-07T12:52:41.675428
| 2023-03-31T08:40:13
| 2023-03-31T08:40:13
| 61,364,860
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,066
|
rd
|
PAYF.Rd
|
\name{PAYF}
\alias{PAYF}
\docType{data}
\title{Paykani et al. (2018)}
\description{The \code{PAYF} data frame has 131 rows and 9 sets. The data is calibrated into fuzzy sets.}
\usage{data(PAYF)}
\format{
A data frame with 131 observations on the following 9 sets.
\describe{
\item{\code{COUNTRY}}{Country}
\item{\code{REGION}}{Region the country belongs to.}
\item{\code{HE}}{Condition: healthy education system}
\item{\code{GG}}{Condition: good governance}
\item{\code{AH}}{Condition: affluent health system}
\item{\code{HI}}{Condition: high income inequality}
\item{\code{HW}}{Condition: high wealth}
\item{\code{HL}}{Outcome: high life expectancy}
\item{\code{LL}}{Negated Outcome: low life expectancy}
}
}
\references{
Paykani, Toktam, Rafiey, Hassan, and Sajjadi, Homeira. 2018. A fuzzy set qualitative comparative analysis of 131 countries: which configuration of the structural conditions can explain health better? International journal for equity in health, 17(1), 10.
}
\examples{data(PAYF)}
\keyword{datasets}
|
a65398af454de229e5e2c57f5e10b712806559cc
|
4951e7c534f334c22d498bbc7035c5e93c5b928d
|
/sourcecode/FreeAssoc.R
|
a820fbb74892b5771a31452c7f406ecc2440f821
|
[] |
no_license
|
Derek-Jones/ESEUR-code-data
|
140f9cf41b2bcc512bbb2e04bcd81b5f82eef3e1
|
2f42f3fb6e46d273a3803db21e7e70eed2c8c09c
|
refs/heads/master
| 2023-04-04T21:32:13.160607
| 2023-03-20T19:19:51
| 2023-03-20T19:19:51
| 49,327,508
| 420
| 50
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,755
|
r
|
FreeAssoc.R
|
#
# FreeAssoc.R, 28 May 20
# Data from:
# The University of {South Florida} Word Association, Rhyme and Word Fragment Norms
# Douglas L. Nelson and Cathy L. McEvoy and Thomas A. Schreiber
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG word_association experiment_human memory_association
source("ESEUR_config.r")
library("plyr")
par(mar=MAR_default+c(0.0, 0.7, 0, 0))
# Words only given by one subject don't appear in the data.
# But the total number of subjects is given.
percent_one_use=function(df)
{
total=sum(df$X.P)
one_use=100*(df$X.G[1]-total)/df$X.G[1]
return(one_use)
}
percent_usage=function(df)
{
usage=rep(0, 1000)
# The probability that 2:10 subjects will choose the same word.
# In the data, many words are only ever choosen by two people,
# assume that this number scales with sample size (the contribution
# from these cases is tiny anyway).
L10=100*sapply(2:10, function(X) sum((df$X.P/df$X.G)^X))
# Approximate the probability for each percentage likelihood.
usage[round(1000*(2:10)/df$X.G)]=L10
# Using 1,000 bins improves resolution, need to reduce it
# to 100 bins (for a percentage).
u100=sapply(0:99, function(X) sum(usage[(1:10)+X*10]))
return(u100)
}
# CUE: word subjects' see
# TARGET: Response word given
# X.G: Number of subjects seeing CUE
# X.P: Number of subjects responding with TARGET
FA=read.csv(paste0(ESEUR_dir, "sourcecode/FreeAssoc.csv.xz"), as.is=TRUE)
t=daply(FA, .(CUE), percent_usage)
mean_use=colMeans(t)
plot(mean_use, log="y", col=point_col,
xaxs="i",
xlim=c(1, 12),
xlab="Subjects (percentage)", ylab="Same word (probability)\n\n")
# one_use=ddply(FA, .(CUE), percent_one_use)
# mean(one_use$V1)
# sd(one_use$V1)
|
98cf42d03c445841ce579940609f81a3a5248b70
|
dd40d9e174598437cb543b68dde983a5fb9163ac
|
/man/inference.Rd
|
94849ab34f39bcc402ebe2a26889fc70e4e42deb
|
[
"Apache-2.0"
] |
permissive
|
lananhle/VERSO
|
afde8f0f11664719192433f86cc6956918c19e33
|
6a87f85aa85270d91677c8620589a7bedfa70026
|
refs/heads/master
| 2023-05-07T07:22:29.579378
| 2021-04-21T10:47:05
| 2021-04-21T10:47:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 452
|
rd
|
inference.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{inference}
\alias{inference}
\title{Results obtained running VERSO on the provided input dataset.}
\format{
results obtained running VERSO on the provided input dataset
}
\usage{
data(inference)
}
\value{
results obtained running VERSO on the provided input dataset
}
\description{
Results obtained running VERSO on the provided input dataset.
}
|
5ed79969ab16892606a6a5cd0ed5ba8afd76a971
|
eba6506142f820e3d6f77c415504cbb2ff7e5da3
|
/code/CART.R
|
410588c42b60dc29992c20e2e1dd259e8a616c84
|
[] |
no_license
|
stewarthkerr/causal992_project
|
2a5bf17d63c4501e878d23cfb27bcfc8f42f15f7
|
d360556d1addb61811eb8b90ca0c442972912657
|
refs/heads/master
| 2021-07-16T13:23:13.603407
| 2020-10-29T02:08:27
| 2020-10-29T02:08:27
| 220,862,789
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,748
|
r
|
CART.R
|
# This is the script for finding subgroups using the CART algorthim from the rpart package
# Choose which covariates to exact match on
exact_covariates = c("RSMOKEN", "RAGENDER_1.Male", "INITIAL_INCOME", "RSHLT")
#GOOD CANDIDATES ARE RSHLT, RSMOKEN, RHLTHLM
# Load data and functions/libraries
source("helpers.R")
results_final = read.csv('../data/results-final.csv')
### Subset results into treated and control
treated = filter(results_final, treated == 1) %>%
dplyr::select(-treated) %>%
arrange(pair_ID)
control = filter(results_final, treated == 0) %>%
dplyr::select(-treated) %>%
arrange(pair_ID)
#################################################################################
################################ Initial Tree ###################################
#################################################################################
# Find covariates that have exact matching
treated_covariates = treated[7:ncol(treated)]
control_covariates = control[7:ncol(control)]
exact_matches = names(which((colSums(abs(treated_covariates - control_covariates)) == 0)))
# For chosen covariates, find all pairs which have exact matching
exact_covariates_treated = dplyr::select(treated_covariates, one_of(exact_covariates))
exact_covariates_control = dplyr::select(control_covariates, one_of(exact_covariates))
exact_obs = which(rowSums(abs(exact_covariates_treated - exact_covariates_control)) == 0)
# Now, for those pairs which have exact matching on important covariates,
# create a CART data frame containing all exact matched covariates with outcome
treated_CART = filter(treated, pair_ID %in% exact_obs) %>%
dplyr::select(pair_ID, outcome,
one_of(exact_matches),
one_of(exact_covariates))
control_CART = filter(control, pair_ID %in% exact_obs) %>%
dplyr::select(pair_ID, outcome,
one_of(exact_matches),
one_of(exact_covariates))
matched_pairs = inner_join(treated_CART,control_CART, by = c("pair_ID","outcome",exact_matches,exact_covariates), suffix = c(".treated",".control"))
### Recode the outcome for CART (directionless)
CART_input = mutate(matched_pairs, CART_outcome = abs(outcome)) %>%
dplyr::select(-pair_ID, -outcome)
###Make variables factors
CART_input = mutate_all(CART_input, factor)
#Build the preliminary CART
pre_tree = rpart(CART_outcome ~ ., data = CART_input)
#################################################################################
################################### Final Tree ##################################
#################################################################################
#Drop the exact matching covariates that are not used in the CART -- this can give us more matches
CART_covariates = names(pre_tree$variable.importance)
CART_covariates_treated = dplyr::select(treated_covariates, one_of(CART_covariates))
CART_covariates_control = dplyr::select(control_covariates, one_of(CART_covariates))
CART_exact_obs = which(rowSums(abs(CART_covariates_treated - CART_covariates_control)) == 0)
# Now, for those pairs which have exact matching on important covariates,
# create a CART data frame containing all exact matched covariates with outcome
treated_CART = filter(treated, pair_ID %in% CART_exact_obs) %>%
dplyr::select(pair_ID, outcome,
one_of(exact_matches),
one_of(CART_covariates))
control_CART = filter(control, pair_ID %in% CART_exact_obs) %>%
dplyr::select(pair_ID, outcome,
one_of(exact_matches),
one_of(CART_covariates))
matched_pairs = inner_join(treated_CART,control_CART, by = c("pair_ID","outcome",exact_matches,CART_covariates), suffix = c(".treated",".control"))
write.csv(matched_pairs, "../data/matched-pairs.CART.csv", row.names = FALSE)
### Recode the outcome for CART (directionless)
CART_input = mutate(matched_pairs, CART_outcome = abs(outcome)) %>%
dplyr::select(-pair_ID, -outcome)
### Make variables factors
CART_input = mutate_all(CART_input, factor)
### Build the final CART
final_tree = rpart(CART_outcome ~ ., data = CART_input, model = TRUE)
rpart.plot(final_tree)
### This gets what leaf each pair ends on:
final_tree$frame$node = rownames(final_tree$frame)
leaves = final_tree$frame[final_tree$where, "node"]
### This extracts the dataframe used to build the CART tree
tree_df = final_tree$model
### This can be used to get the path of each leaf
path.rpart(final_tree, unique(leaves))
### Save the covariates used in CART
CART_covariates = names(final_tree$variable.importance)
write.csv(CART_covariates, "../data/CART-covariates.csv", row.names = FALSE)
# ###################################EXTRA###################################
#
# ##note initial income level one only has 0 for response only one pair with this
# x <- subset(CART_input, INITIAL_INCOME==1)
# length(x$INITIAL_INCOME)
#
# library(ggplot2)
# CART_input$CART_outcome <- as.factor(CART_input$CART_outcome)
# g1 <- ggplot(data=CART_input, aes(INITIAL_INCOME)) +
# geom_bar(aes(fill=CART_outcome, group=CART_outcome, color=CART_outcome))
# g2 <- ggplot(data=CART_input, aes(RAGENDER_1.Male)) +
# geom_bar(aes(fill=CART_outcome, group=CART_outcome, color=CART_outcome))
# g3 <- ggplot(data=CART_input, aes(RSMOKEV)) +
# geom_bar(aes(fill=CART_outcome, group=CART_outcome, color=CART_outcome))
# g4 <- ggplot(data=CART_input, aes(RACE_ETHN_NonHispOther)) +
# geom_bar(aes(fill=CART_outcome, group=CART_outcome, color=CART_outcome))
# g5 <- ggplot(data=CART_input, aes(RMSTAT_2.Married.spouse.absent)) +
# geom_bar(aes(fill=CART_outcome, group=CART_outcome, color=CART_outcome))
# g6 <- ggplot(data=CART_input, aes(RMSTAT_6.Separated.divorced)) +
# geom_bar(aes(fill=CART_outcome, group=CART_outcome, color=CART_outcome))
#
# g1
# g2
# g3
# g4
# g5
# g6
|
ef981ebe4619d8e23b71cf5b9f1f31e21f3c1aef
|
1d4f89dd72bb30591464c2c52b4e264512ea04a4
|
/assets/rcode/2014_03_30.r
|
e15033d73182ef192f0c7eda0ec7c5bb6c63247c
|
[
"MIT"
] |
permissive
|
TongZZZ/TongZZZ.github.io
|
00c253dcd70caddd3f56770c0716300a24a31136
|
1ba49eddbf2211dde61cea63b1f1a8376a464f42
|
refs/heads/master
| 2020-06-06T18:23:45.891899
| 2014-10-18T21:27:35
| 2014-10-18T21:27:35
| 18,046,709
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,307
|
r
|
2014_03_30.r
|
#### Scenario I
Q=1
Chi=100 # Number of Participants
COMB1=NULL
INDI1=NULL
for(Q in 1:Chi)
{
ASUM1=0
BSUM1=0
AIND1=0
BIND1=0
p=1
P=1000 ### Number of Experiments
for (p in 1:P)
{
q=1
E=NULL
for (q in 1:Q)
{
if(runif(1)>=1/3) ### Probability of random draws
{
E=cbind(E,"A")
}
else
{
E=cbind(E,"B")
}
q=q+1
}
AIND1=AIND1+sum(E=="A")
BIND1=BIND1+sum(E=="B")
if(sum((E=="A")-(E=="B"))>0)
{
ASUM1=ASUM1+1
}
else if(sum((E=="A")-(E=="B"))<0)
{
BSUM1=BSUM1+1
}
p=p+1
}
COMB1=cbind(COMB1,ASUM1/(ASUM1+BSUM1))
INDI1=cbind(INDI1,AIND1/(AIND1+BIND1))
Q=Q+1
}
COMB1 ### Combined prediction accuracy in Scenario I
INDI1 ### Individual prediction accuracy in Scenario I
#### Scenario II
N=1
Nu=100 ### Number of Participants
COMB2=NULL
INDI2=NULL
for(N in 1:Nu)
{
ASUM2=0
BSUM2=0
AIND2=0
BIND2=0
m=1
M=1000 ### Number of Experiments
for(m in 1:M)
{
n=1
D=NULL
for (n in 1:N)
{
SUM=sum((D=="A")-(D=="B"))
if(SUM<=-2)
{
D=cbind(D,"B")
}
if(SUM==-1)
{
if(runif(1)>=1/3) ### Probability of random draws
{
D=cbind(D,"A")
}
else
{
D=cbind(D,"B")
}
}
if(SUM==0)
{
if(runif(1)>=1/3)
{
D=cbind(D,"A")
}
else
{
D=cbind(D,"B")
}
}
if(SUM==1)
{
if(runif(1)>=1/3)
{
D=cbind(D,"A")
}
else
{
D=cbind(D,"B")
}
}
if(SUM>=2)
{
D=cbind(D,"A")
}
n=n+1
}
AIND2=AIND2+sum(D=="A")
BIND2=BIND2+sum(D=="B")
if(sum((D=="A")-(D=="B"))>0)
{
ASUM2=ASUM2+1
}
else if(sum((D=="A")-(D=="B"))<0)
{
BSUM2=BSUM2+1
}
m=m+1
}
COMB2=cbind(COMB2,ASUM2/(ASUM2+BSUM2))
INDI2=cbind(INDI2,AIND2/(AIND2+BIND2))
N=N+1
}
COMB2 ### Combined prediction accuracy in Scenario II
INDI2 ### Individual prediction accuracy in Scenario II
###Plots
plot(1:100,COMB1,xlab="# of Participants", ylab="Prediction accuracy", main="Individual-Group accuracy compromise",type="o",
col="red",ylim=c(0.6,1))
lines(1:100,INDI1,type="o", col="yellow")
lines(1:100,COMB2,type="o", pch=22, lty=2, col="blue")
lines(1:100,INDI2,type="o", pch=22, lty=2, col="green")
legend(80,0.98, c("Combined I","Individual I","Combined II","Individual II"),lty=c(1,1,2,2),pch=c(21,21,22,22), lwd=c(1,1,1,1),col=c("red","yellow","blue","green"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.